diff --git a/0029-LoongArch-sync-patch-from-gcc-upstream.patch b/0029-LoongArch-sync-patch-from-gcc-upstream.patch new file mode 100644 index 0000000000000000000000000000000000000000..ff46202d185b318af1d59c69ca53964d5e66e7ee --- /dev/null +++ b/0029-LoongArch-sync-patch-from-gcc-upstream.patch @@ -0,0 +1,217696 @@ +From 751793c5b04db2cc75bcb30c6a14c656171aeec7 Mon Sep 17 00:00:00 2001 +From: ticat_fp +Date: Wed, 28 Feb 2024 15:10:08 +0800 +Subject: [PATCH 1/3] LoongArch: sync patch from gcc upstream + +Signed-off-by: ticat_fp +--- + config-ml.in | 10 + + gcc/ada/Makefile.rtl | 49 + + gcc/ada/libgnarl/s-linux__loongarch.ads | 134 + + gcc/ada/libgnat/system-linux-loongarch.ads | 145 + + .../config/loongarch/loongarch-common.cc | 3 + + gcc/config.gcc | 410 +- + gcc/config.in | 8 +- + gcc/config/host-linux.cc | 2 +- + gcc/config/loongarch/constraints.md | 211 +- + gcc/config/loongarch/elf.h | 51 + + .../loongarch/genopts/loongarch-strings | 9 +- + gcc/config/loongarch/genopts/loongarch.opt.in | 77 +- + gcc/config/loongarch/gnu-user.h | 15 +- + gcc/config/loongarch/la464.md | 32 +- + gcc/config/loongarch/lasx.md | 5104 +++++++++++++ + gcc/config/loongarch/lasxintrin.h | 5338 ++++++++++++++ + gcc/config/loongarch/linux.h | 3 + + gcc/config/loongarch/loongarch-builtins.cc | 2686 ++++++- + gcc/config/loongarch/loongarch-c.cc | 22 +- + gcc/config/loongarch/loongarch-cpu.cc | 263 +- + gcc/config/loongarch/loongarch-cpu.h | 3 +- + gcc/config/loongarch/loongarch-def.c | 84 +- + gcc/config/loongarch/loongarch-def.h | 65 +- + gcc/config/loongarch/loongarch-driver.cc | 208 +- + gcc/config/loongarch/loongarch-driver.h | 40 +- + gcc/config/loongarch/loongarch-ftypes.def | 664 +- + gcc/config/loongarch/loongarch-modes.def | 39 + + gcc/config/loongarch/loongarch-opts.cc | 391 +- + gcc/config/loongarch/loongarch-opts.h | 60 +- + gcc/config/loongarch/loongarch-protos.h | 56 +- + gcc/config/loongarch/loongarch-str.h | 8 +- + gcc/config/loongarch/loongarch-tune.h | 9 + + gcc/config/loongarch/loongarch.cc | 6469 +++++++++++++++-- + gcc/config/loongarch/loongarch.h | 169 +- + gcc/config/loongarch/loongarch.md | 1171 +-- + gcc/config/loongarch/loongarch.opt | 75 +- + gcc/config/loongarch/lsx.md | 4467 ++++++++++++ + gcc/config/loongarch/lsxintrin.h | 5181 +++++++++++++ + gcc/config/loongarch/predicates.md | 441 +- + gcc/config/loongarch/t-linux | 32 +- + gcc/config/loongarch/t-loongarch | 4 + + gcc/configure | 67 +- + gcc/configure.ac | 15 +- + gcc/doc/extend.texi | 197 +- + gcc/doc/install.texi | 56 +- + gcc/doc/invoke.texi | 81 +- + gcc/doc/md.texi | 11 + + .../g++.target/loongarch/bstrins-compile.C | 22 + + .../g++.target/loongarch/bstrins-run.C | 65 + + gcc/testsuite/g++.target/loongarch/bytepick.C | 32 + + gcc/testsuite/g++.target/loongarch/pr106828.C | 4 + + .../gcc.target/loongarch/add-const.c | 45 + + .../gcc.target/loongarch/attr-model-1.c | 6 + + .../gcc.target/loongarch/attr-model-2.c | 6 + + .../gcc.target/loongarch/attr-model-diag.c | 7 + + .../gcc.target/loongarch/attr-model-test.c | 25 + + gcc/testsuite/gcc.target/loongarch/cmov_ii.c | 15 + + .../loongarch/const-double-zero-stx.c | 18 + + .../gcc.target/loongarch/direct-extern-1.c | 6 + + .../gcc.target/loongarch/direct-extern-2.c | 6 + + gcc/testsuite/gcc.target/loongarch/div-1.c | 9 + + gcc/testsuite/gcc.target/loongarch/div-2.c | 9 + + gcc/testsuite/gcc.target/loongarch/div-3.c | 9 + + gcc/testsuite/gcc.target/loongarch/div-4.c | 9 + + .../gcc.target/loongarch/fcopysign.c | 16 + + .../gcc.target/loongarch/float-load.c | 11 + + gcc/testsuite/gcc.target/loongarch/flogb.c | 18 + + .../gcc.target/loongarch/fmax-fmin.c | 30 + + gcc/testsuite/gcc.target/loongarch/frint.c | 16 + + gcc/testsuite/gcc.target/loongarch/fscaleb.c | 48 + + .../gcc.target/loongarch/ftint-no-inexact.c | 44 + + gcc/testsuite/gcc.target/loongarch/ftint.c | 44 + + .../gcc.target/loongarch/func-call-1.c | 32 + + .../gcc.target/loongarch/func-call-2.c | 32 + + .../gcc.target/loongarch/func-call-3.c | 32 + + .../gcc.target/loongarch/func-call-4.c | 32 + + .../gcc.target/loongarch/func-call-5.c | 33 + + .../gcc.target/loongarch/func-call-6.c | 33 + + .../gcc.target/loongarch/func-call-7.c | 34 + + .../gcc.target/loongarch/func-call-8.c | 33 + + .../loongarch/func-call-extreme-1.c | 32 + + .../loongarch/func-call-extreme-2.c | 32 + + .../gcc.target/loongarch/func-call-medium-1.c | 41 + + .../gcc.target/loongarch/func-call-medium-2.c | 41 + + .../gcc.target/loongarch/func-call-medium-3.c | 41 + + .../gcc.target/loongarch/func-call-medium-4.c | 41 + + .../gcc.target/loongarch/func-call-medium-5.c | 42 + + .../gcc.target/loongarch/func-call-medium-6.c | 42 + + .../gcc.target/loongarch/func-call-medium-7.c | 43 + + .../gcc.target/loongarch/func-call-medium-8.c | 43 + + gcc/testsuite/gcc.target/loongarch/imm-load.c | 10 + + .../gcc.target/loongarch/imm-load1.c | 26 + + .../gcc.target/loongarch/memcpy-vec-1.c | 11 + + .../gcc.target/loongarch/memcpy-vec-2.c | 12 + + .../gcc.target/loongarch/memcpy-vec-3.c | 6 + + .../gcc.target/loongarch/mulw_d_wu.c | 9 + + .../loongarch/{tst-asm-const.c => pr107731.c} | 6 +- + .../gcc.target/loongarch/pr109465-1.c | 9 + + .../gcc.target/loongarch/pr109465-2.c | 9 + + .../gcc.target/loongarch/pr109465-3.c | 12 + + gcc/testsuite/gcc.target/loongarch/pr111334.c | 39 + + .../gcc.target/loongarch/prolog-opt.c | 2 +- + .../loongarch/relocs-symbol-noaddend.c | 23 + + .../gcc.target/loongarch/shrink-wrap.c | 19 + + .../gcc.target/loongarch/sign-extend.c | 25 + + .../gcc.target/loongarch/slt-sign-extend.c | 14 + + .../gcc.target/loongarch/smuldi3_highpart.c | 13 + + .../gcc.target/loongarch/smulsi3_highpart.c | 15 + + .../loongarch/stack-check-alloca-1.c | 15 + + .../loongarch/stack-check-alloca-2.c | 12 + + .../loongarch/stack-check-alloca-3.c | 12 + + .../loongarch/stack-check-alloca-4.c | 12 + + .../loongarch/stack-check-alloca-5.c | 13 + + .../loongarch/stack-check-alloca-6.c | 13 + + .../gcc.target/loongarch/stack-check-alloca.h | 15 + + .../gcc.target/loongarch/stack-check-cfa-1.c | 12 + + .../gcc.target/loongarch/stack-check-cfa-2.c | 12 + + .../loongarch/stack-check-prologue-1.c | 11 + + .../loongarch/stack-check-prologue-2.c | 11 + + .../loongarch/stack-check-prologue-3.c | 11 + + .../loongarch/stack-check-prologue-4.c | 11 + + .../loongarch/stack-check-prologue-5.c | 12 + + .../loongarch/stack-check-prologue-6.c | 11 + + .../loongarch/stack-check-prologue-7.c | 12 + + .../loongarch/stack-check-prologue.h | 5 + + .../gcc.target/loongarch/strict-align.c | 12 + + .../gcc.target/loongarch/switch-qi.c | 16 + + .../gcc.target/loongarch/tls-gd-noplt.c | 12 + + .../gcc.target/loongarch/umulsi3_highpart.c | 14 + + gcc/testsuite/gcc.target/loongarch/va_arg.c | 24 + + .../loongarch/vector/lasx/lasx-builtin.c | 4460 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvabsd-1.c | 485 ++ + .../loongarch/vector/lasx/lasx-xvabsd-2.c | 650 ++ + .../loongarch/vector/lasx/lasx-xvadd.c | 725 ++ + .../loongarch/vector/lasx/lasx-xvadda.c | 785 ++ + .../loongarch/vector/lasx/lasx-xvaddi.c | 427 ++ + .../loongarch/vector/lasx/lasx-xvaddwev-1.c | 740 ++ + .../loongarch/vector/lasx/lasx-xvaddwev-2.c | 485 ++ + .../loongarch/vector/lasx/lasx-xvaddwev-3.c | 515 ++ + .../loongarch/vector/lasx/lasx-xvaddwod-1.c | 530 ++ + .../loongarch/vector/lasx/lasx-xvaddwod-2.c | 560 ++ + .../loongarch/vector/lasx/lasx-xvaddwod-3.c | 485 ++ + .../loongarch/vector/lasx/lasx-xvand.c | 155 + + .../loongarch/vector/lasx/lasx-xvandi.c | 196 + + .../loongarch/vector/lasx/lasx-xvandn.c | 125 + + .../loongarch/vector/lasx/lasx-xvavg-1.c | 680 ++ + .../loongarch/vector/lasx/lasx-xvavg-2.c | 560 ++ + .../loongarch/vector/lasx/lasx-xvavgr-1.c | 770 ++ + .../loongarch/vector/lasx/lasx-xvavgr-2.c | 650 ++ + .../loongarch/vector/lasx/lasx-xvbitclr.c | 635 ++ + .../loongarch/vector/lasx/lasx-xvbitclri.c | 515 ++ + .../loongarch/vector/lasx/lasx-xvbitrev.c | 650 ++ + .../loongarch/vector/lasx/lasx-xvbitrevi.c | 317 + + .../loongarch/vector/lasx/lasx-xvbitsel.c | 134 + + .../loongarch/vector/lasx/lasx-xvbitseli.c | 185 + + .../loongarch/vector/lasx/lasx-xvbitset.c | 620 ++ + .../loongarch/vector/lasx/lasx-xvbitseti.c | 405 ++ + .../loongarch/vector/lasx/lasx-xvbsll_v.c | 130 + + .../loongarch/vector/lasx/lasx-xvbsrl_v.c | 64 + + .../loongarch/vector/lasx/lasx-xvclo.c | 449 ++ + .../loongarch/vector/lasx/lasx-xvclz.c | 504 ++ + .../loongarch/vector/lasx/lasx-xvdiv-1.c | 485 ++ + .../loongarch/vector/lasx/lasx-xvdiv-2.c | 500 ++ + .../loongarch/vector/lasx/lasx-xvext2xv-1.c | 515 ++ + .../loongarch/vector/lasx/lasx-xvext2xv-2.c | 669 ++ + .../loongarch/vector/lasx/lasx-xvexth-1.c | 350 + + .../loongarch/vector/lasx/lasx-xvexth-2.c | 592 ++ + .../loongarch/vector/lasx/lasx-xvextl-1.c | 86 + + .../loongarch/vector/lasx/lasx-xvextl-2.c | 163 + + .../loongarch/vector/lasx/lasx-xvextrins.c | 515 ++ + .../loongarch/vector/lasx/lasx-xvfadd_d.c | 545 ++ + .../loongarch/vector/lasx/lasx-xvfadd_s.c | 911 +++ + .../loongarch/vector/lasx/lasx-xvfclass_d.c | 152 + + .../loongarch/vector/lasx/lasx-xvfclass_s.c | 95 + + .../loongarch/vector/lasx/lasx-xvfcmp_caf_s.c | 446 ++ + .../loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c | 977 +++ + .../loongarch/vector/lasx/lasx-xvfcmp_cle_s.c | 759 ++ + .../loongarch/vector/lasx/lasx-xvfcmp_clt_s.c | 675 ++ + .../loongarch/vector/lasx/lasx-xvfcmp_cne_s.c | 872 +++ + .../loongarch/vector/lasx/lasx-xvfcmp_cor_s.c | 340 + + .../loongarch/vector/lasx/lasx-xvfcmp_cun_s.c | 361 + + .../loongarch/vector/lasx/lasx-xvfcmp_saf_s.c | 424 ++ + .../loongarch/vector/lasx/lasx-xvfcmp_seq_s.c | 924 +++ + .../loongarch/vector/lasx/lasx-xvfcmp_sle_s.c | 627 ++ + .../loongarch/vector/lasx/lasx-xvfcmp_slt_s.c | 1212 +++ + .../loongarch/vector/lasx/lasx-xvfcmp_sne_s.c | 756 ++ + .../loongarch/vector/lasx/lasx-xvfcmp_sor_s.c | 438 ++ + .../loongarch/vector/lasx/lasx-xvfcmp_sun_s.c | 363 + + .../loongarch/vector/lasx/lasx-xvfcvt.c | 528 ++ + .../loongarch/vector/lasx/lasx-xvfcvth.c | 485 ++ + .../loongarch/vector/lasx/lasx-xvffint-1.c | 375 + + .../loongarch/vector/lasx/lasx-xvffint-2.c | 246 + + .../loongarch/vector/lasx/lasx-xvffinth.c | 262 + + .../loongarch/vector/lasx/lasx-xvflogb_d.c | 86 + + .../loongarch/vector/lasx/lasx-xvflogb_s.c | 115 + + .../loongarch/vector/lasx/lasx-xvfmadd_d.c | 382 + + .../loongarch/vector/lasx/lasx-xvfmadd_s.c | 720 ++ + .../loongarch/vector/lasx/lasx-xvfmax_d.c | 230 + + .../loongarch/vector/lasx/lasx-xvfmax_s.c | 560 ++ + .../loongarch/vector/lasx/lasx-xvfmaxa_d.c | 230 + + .../loongarch/vector/lasx/lasx-xvfmaxa_s.c | 506 ++ + .../loongarch/vector/lasx/lasx-xvfnmadd_d.c | 324 + + .../loongarch/vector/lasx/lasx-xvfnmadd_s.c | 895 +++ + .../loongarch/vector/lasx/lasx-xvfrint_d.c | 429 ++ + .../loongarch/vector/lasx/lasx-xvfrint_s.c | 723 ++ + .../loongarch/vector/lasx/lasx-xvfrstp.c | 381 + + .../loongarch/vector/lasx/lasx-xvfrstpi.c | 350 + + .../loongarch/vector/lasx/lasx-xvfsqrt_d.c | 482 ++ + .../loongarch/vector/lasx/lasx-xvfsqrt_s.c | 457 ++ + .../loongarch/vector/lasx/lasx-xvftint-1.c | 471 ++ + .../loongarch/vector/lasx/lasx-xvftint-2.c | 1565 ++++ + .../loongarch/vector/lasx/lasx-xvftint-3.c | 511 ++ + .../loongarch/vector/lasx/lasx-xvftintl.c | 1580 ++++ + .../loongarch/vector/lasx/lasx-xvhaddw-1.c | 560 ++ + .../loongarch/vector/lasx/lasx-xvhaddw-2.c | 650 ++ + .../loongarch/vector/lasx/lasx-xvhsubw-1.c | 620 ++ + .../loongarch/vector/lasx/lasx-xvhsubw-2.c | 545 ++ + .../loongarch/vector/lasx/lasx-xvilvh.c | 530 ++ + .../loongarch/vector/lasx/lasx-xvilvl.c | 620 ++ + .../loongarch/vector/lasx/lasx-xvinsgr2vr.c | 272 + + .../loongarch/vector/lasx/lasx-xvinsve0.c | 380 + + .../loongarch/vector/lasx/lasx-xvld.c | 86 + + .../loongarch/vector/lasx/lasx-xvldi.c | 83 + + .../loongarch/vector/lasx/lasx-xvldrepl.c | 16 + + .../loongarch/vector/lasx/lasx-xvmadd.c | 742 ++ + .../loongarch/vector/lasx/lasx-xvmaddwev-1.c | 856 +++ + .../loongarch/vector/lasx/lasx-xvmaddwev-2.c | 723 ++ + .../loongarch/vector/lasx/lasx-xvmaddwev-3.c | 940 +++ + .../loongarch/vector/lasx/lasx-xvmaddwod-1.c | 742 ++ + .../loongarch/vector/lasx/lasx-xvmaddwod-2.c | 799 ++ + .../loongarch/vector/lasx/lasx-xvmaddwod-3.c | 820 +++ + .../loongarch/vector/lasx/lasx-xvmax-1.c | 545 ++ + .../loongarch/vector/lasx/lasx-xvmax-2.c | 560 ++ + .../loongarch/vector/lasx/lasx-xvmaxi-1.c | 471 ++ + .../loongarch/vector/lasx/lasx-xvmaxi-2.c | 504 ++ + .../loongarch/vector/lasx/lasx-xvmin-1.c | 575 ++ + .../loongarch/vector/lasx/lasx-xvmin-2.c | 680 ++ + .../loongarch/vector/lasx/lasx-xvmini-1.c | 416 ++ + .../loongarch/vector/lasx/lasx-xvmini-2.c | 284 + + .../loongarch/vector/lasx/lasx-xvmod-1.c | 395 + + .../loongarch/vector/lasx/lasx-xvmod-2.c | 410 ++ + .../loongarch/vector/lasx/lasx-xvmskgez.c | 86 + + .../loongarch/vector/lasx/lasx-xvmskltz.c | 373 + + .../loongarch/vector/lasx/lasx-xvmsknz.c | 163 + + .../loongarch/vector/lasx/lasx-xvmsub.c | 647 ++ + .../loongarch/vector/lasx/lasx-xvmuh-1.c | 650 ++ + .../loongarch/vector/lasx/lasx-xvmuh-2.c | 635 ++ + .../loongarch/vector/lasx/lasx-xvmul.c | 620 ++ + .../loongarch/vector/lasx/lasx-xvmulwev-1.c | 590 ++ + .../loongarch/vector/lasx/lasx-xvmulwev-2.c | 590 ++ + .../loongarch/vector/lasx/lasx-xvmulwev-3.c | 605 ++ + .../loongarch/vector/lasx/lasx-xvmulwod-1.c | 545 ++ + .../loongarch/vector/lasx/lasx-xvmulwod-2.c | 470 ++ + .../loongarch/vector/lasx/lasx-xvmulwod-3.c | 440 ++ + .../loongarch/vector/lasx/lasx-xvneg.c | 526 ++ + .../loongarch/vector/lasx/lasx-xvnor.c | 170 + + .../loongarch/vector/lasx/lasx-xvnori.c | 152 + + .../loongarch/vector/lasx/lasx-xvor.c | 215 + + .../loongarch/vector/lasx/lasx-xvori.c | 141 + + .../loongarch/vector/lasx/lasx-xvorn.c | 245 + + .../loongarch/vector/lasx/lasx-xvpackev.c | 501 ++ + .../loongarch/vector/lasx/lasx-xvpackod.c | 575 ++ + .../loongarch/vector/lasx/lasx-xvpcnt.c | 526 ++ + .../loongarch/vector/lasx/lasx-xvpickev.c | 515 ++ + .../loongarch/vector/lasx/lasx-xvpickod.c | 530 ++ + .../loongarch/vector/lasx/lasx-xvpickve.c | 130 + + .../loongarch/vector/lasx/lasx-xvpickve2gr.c | 388 + + .../loongarch/vector/lasx/lasx-xvprem.c | 20 + + .../loongarch/vector/lasx/lasx-xvpremi.c | 20 + + .../loongarch/vector/lasx/lasx-xvreplgr2vr.c | 380 + + .../loongarch/vector/lasx/lasx-xvreplve.c | 536 ++ + .../loongarch/vector/lasx/lasx-xvreplve0.c | 471 ++ + .../loongarch/vector/lasx/lasx-xvreplvei.c | 20 + + .../loongarch/vector/lasx/lasx-xvrotr.c | 530 ++ + .../loongarch/vector/lasx/lasx-xvrotri.c | 394 + + .../loongarch/vector/lasx/lasx-xvsadd-1.c | 650 ++ + .../loongarch/vector/lasx/lasx-xvsadd-2.c | 350 + + .../loongarch/vector/lasx/lasx-xvsat-1.c | 537 ++ + .../loongarch/vector/lasx/lasx-xvsat-2.c | 427 ++ + .../loongarch/vector/lasx/lasx-xvseq.c | 650 ++ + .../loongarch/vector/lasx/lasx-xvseqi.c | 449 ++ + .../loongarch/vector/lasx/lasx-xvshuf4i_b.c | 430 ++ + .../loongarch/vector/lasx/lasx-xvshuf_b.c | 761 ++ + .../loongarch/vector/lasx/lasx-xvsigncov.c | 665 ++ + .../loongarch/vector/lasx/lasx-xvsle-1.c | 575 ++ + .../loongarch/vector/lasx/lasx-xvsle-2.c | 590 ++ + .../loongarch/vector/lasx/lasx-xvslei-1.c | 515 ++ + .../loongarch/vector/lasx/lasx-xvslei-2.c | 438 ++ + .../loongarch/vector/lasx/lasx-xvsll.c | 425 ++ + .../loongarch/vector/lasx/lasx-xvslli.c | 416 ++ + .../loongarch/vector/lasx/lasx-xvsllwil-1.c | 339 + + .../loongarch/vector/lasx/lasx-xvsllwil-2.c | 350 + + .../loongarch/vector/lasx/lasx-xvslt-1.c | 455 ++ + .../loongarch/vector/lasx/lasx-xvslt-2.c | 620 ++ + .../loongarch/vector/lasx/lasx-xvslti-1.c | 548 ++ + .../loongarch/vector/lasx/lasx-xvslti-2.c | 416 ++ + .../loongarch/vector/lasx/lasx-xvsra.c | 545 ++ + .../loongarch/vector/lasx/lasx-xvsrai.c | 504 ++ + .../loongarch/vector/lasx/lasx-xvsran.c | 455 ++ + .../loongarch/vector/lasx/lasx-xvsrani.c | 545 ++ + .../loongarch/vector/lasx/lasx-xvsrar.c | 725 ++ + .../loongarch/vector/lasx/lasx-xvsrari.c | 471 ++ + .../loongarch/vector/lasx/lasx-xvsrarn.c | 500 ++ + .../loongarch/vector/lasx/lasx-xvsrarni.c | 636 ++ + .../loongarch/vector/lasx/lasx-xvsrl.c | 650 ++ + .../loongarch/vector/lasx/lasx-xvsrli.c | 405 ++ + .../loongarch/vector/lasx/lasx-xvsrln.c | 425 ++ + .../loongarch/vector/lasx/lasx-xvsrlni.c | 680 ++ + .../loongarch/vector/lasx/lasx-xvsrlr.c | 515 ++ + .../loongarch/vector/lasx/lasx-xvsrlri.c | 416 ++ + .../loongarch/vector/lasx/lasx-xvsrlrn.c | 410 ++ + .../loongarch/vector/lasx/lasx-xvsrlrni.c | 455 ++ + .../loongarch/vector/lasx/lasx-xvssran.c | 905 +++ + .../loongarch/vector/lasx/lasx-xvssrani.c | 1235 ++++ + .../loongarch/vector/lasx/lasx-xvssrarn.c | 905 +++ + .../loongarch/vector/lasx/lasx-xvssrarni.c | 1160 +++ + .../loongarch/vector/lasx/lasx-xvssrln.c | 965 +++ + .../loongarch/vector/lasx/lasx-xvssrlni.c | 1130 +++ + .../loongarch/vector/lasx/lasx-xvssrlrn.c | 815 +++ + .../loongarch/vector/lasx/lasx-xvssrlrni.c | 1160 +++ + .../loongarch/vector/lasx/lasx-xvssub-1.c | 425 ++ + .../loongarch/vector/lasx/lasx-xvssub-2.c | 695 ++ + .../loongarch/vector/lasx/lasx-xvst.c | 102 + + .../loongarch/vector/lasx/lasx-xvstelm.c | 14 + + .../loongarch/vector/lasx/lasx-xvsub.c | 590 ++ + .../loongarch/vector/lasx/lasx-xvsubi.c | 482 ++ + .../loongarch/vector/lasx/lasx-xvsubwev-1.c | 530 ++ + .../loongarch/vector/lasx/lasx-xvsubwev-2.c | 440 ++ + .../loongarch/vector/lasx/lasx-xvsubwod-1.c | 695 ++ + .../loongarch/vector/lasx/lasx-xvsubwod-2.c | 620 ++ + .../loongarch/vector/lasx/lasx-xvxor.c | 185 + + .../loongarch/vector/lasx/lasx-xvxori.c | 163 + + .../loongarch/vector/loongarch-vector.exp | 42 + + .../loongarch/vector/lsx/lsx-builtin.c | 4328 +++++++++++ + .../loongarch/vector/lsx/lsx-vabsd-1.c | 272 + + .../loongarch/vector/lsx/lsx-vabsd-2.c | 398 + + .../loongarch/vector/lsx/lsx-vadd.c | 416 ++ + .../loongarch/vector/lsx/lsx-vadda.c | 344 + + .../loongarch/vector/lsx/lsx-vaddi.c | 251 + + .../loongarch/vector/lsx/lsx-vaddwev-1.c | 335 + + .../loongarch/vector/lsx/lsx-vaddwev-2.c | 344 + + .../loongarch/vector/lsx/lsx-vaddwev-3.c | 425 ++ + .../loongarch/vector/lsx/lsx-vaddwod-1.c | 408 ++ + .../loongarch/vector/lsx/lsx-vaddwod-2.c | 344 + + .../loongarch/vector/lsx/lsx-vaddwod-3.c | 237 + + .../loongarch/vector/lsx/lsx-vand.c | 159 + + .../loongarch/vector/lsx/lsx-vandi.c | 67 + + .../loongarch/vector/lsx/lsx-vandn.c | 129 + + .../loongarch/vector/lsx/lsx-vavg-1.c | 398 + + .../loongarch/vector/lsx/lsx-vavg-2.c | 308 + + .../loongarch/vector/lsx/lsx-vavgr-1.c | 299 + + .../loongarch/vector/lsx/lsx-vavgr-2.c | 317 + + .../loongarch/vector/lsx/lsx-vbitclr.c | 461 ++ + .../loongarch/vector/lsx/lsx-vbitclri.c | 279 + + .../loongarch/vector/lsx/lsx-vbitrev.c | 407 ++ + .../loongarch/vector/lsx/lsx-vbitrevi.c | 336 + + .../loongarch/vector/lsx/lsx-vbitsel.c | 109 + + .../loongarch/vector/lsx/lsx-vbitseli.c | 84 + + .../loongarch/vector/lsx/lsx-vbitset.c | 371 + + .../loongarch/vector/lsx/lsx-vbitseti.c | 279 + + .../loongarch/vector/lsx/lsx-vbsll.c | 83 + + .../loongarch/vector/lsx/lsx-vbsrl.c | 55 + + .../loongarch/vector/lsx/lsx-vclo.c | 266 + + .../loongarch/vector/lsx/lsx-vclz.c | 265 + + .../loongarch/vector/lsx/lsx-vdiv-1.c | 299 + + .../loongarch/vector/lsx/lsx-vdiv-2.c | 254 + + .../loongarch/vector/lsx/lsx-vexth-1.c | 342 + + .../loongarch/vector/lsx/lsx-vexth-2.c | 182 + + .../loongarch/vector/lsx/lsx-vextl-1.c | 83 + + .../loongarch/vector/lsx/lsx-vextl-2.c | 83 + + .../loongarch/vector/lsx/lsx-vextrins.c | 479 ++ + .../loongarch/vector/lsx/lsx-vfadd_d.c | 407 ++ + .../loongarch/vector/lsx/lsx-vfadd_s.c | 470 ++ + .../loongarch/vector/lsx/lsx-vfclass_d.c | 83 + + .../loongarch/vector/lsx/lsx-vfclass_s.c | 74 + + .../loongarch/vector/lsx/lsx-vfcmp_caf.c | 244 + + .../loongarch/vector/lsx/lsx-vfcmp_ceq.c | 516 ++ + .../loongarch/vector/lsx/lsx-vfcmp_cle.c | 530 ++ + .../loongarch/vector/lsx/lsx-vfcmp_clt.c | 476 ++ + .../loongarch/vector/lsx/lsx-vfcmp_cne.c | 378 + + .../loongarch/vector/lsx/lsx-vfcmp_cor.c | 170 + + .../loongarch/vector/lsx/lsx-vfcmp_cun.c | 253 + + .../loongarch/vector/lsx/lsx-vfcmp_saf.c | 214 + + .../loongarch/vector/lsx/lsx-vfcmp_seq.c | 450 ++ + .../loongarch/vector/lsx/lsx-vfcmp_sle.c | 407 ++ + .../loongarch/vector/lsx/lsx-vfcmp_slt.c | 512 ++ + .../loongarch/vector/lsx/lsx-vfcmp_sne.c | 398 + + .../loongarch/vector/lsx/lsx-vfcmp_sor.c | 269 + + .../loongarch/vector/lsx/lsx-vfcmp_sun.c | 335 + + .../loongarch/vector/lsx/lsx-vfcvt-1.c | 398 + + .../loongarch/vector/lsx/lsx-vfcvt-2.c | 278 + + .../loongarch/vector/lsx/lsx-vffint-1.c | 161 + + .../loongarch/vector/lsx/lsx-vffint-2.c | 264 + + .../loongarch/vector/lsx/lsx-vffint-3.c | 102 + + .../loongarch/vector/lsx/lsx-vflogb_d.c | 76 + + .../loongarch/vector/lsx/lsx-vflogb_s.c | 185 + + .../loongarch/vector/lsx/lsx-vfmadd_d.c | 251 + + .../loongarch/vector/lsx/lsx-vfmadd_s.c | 381 + + .../loongarch/vector/lsx/lsx-vfmax_d.c | 200 + + .../loongarch/vector/lsx/lsx-vfmax_s.c | 335 + + .../loongarch/vector/lsx/lsx-vfmaxa_d.c | 155 + + .../loongarch/vector/lsx/lsx-vfmaxa_s.c | 230 + + .../loongarch/vector/lsx/lsx-vfnmadd_d.c | 196 + + .../loongarch/vector/lsx/lsx-vfnmadd_s.c | 381 + + .../loongarch/vector/lsx/lsx-vfrint_d.c | 230 + + .../loongarch/vector/lsx/lsx-vfrint_s.c | 350 + + .../loongarch/vector/lsx/lsx-vfrstp.c | 218 + + .../loongarch/vector/lsx/lsx-vfrstpi.c | 209 + + .../loongarch/vector/lsx/lsx-vfsqrt_d.c | 216 + + .../loongarch/vector/lsx/lsx-vfsqrt_s.c | 372 + + .../loongarch/vector/lsx/lsx-vftint-1.c | 349 + + .../loongarch/vector/lsx/lsx-vftint-2.c | 695 ++ + .../loongarch/vector/lsx/lsx-vftint-3.c | 1028 +++ + .../loongarch/vector/lsx/lsx-vftint-4.c | 345 + + .../loongarch/vector/lsx/lsx-vhaddw-1.c | 488 ++ + .../loongarch/vector/lsx/lsx-vhaddw-2.c | 452 ++ + .../loongarch/vector/lsx/lsx-vhsubw-1.c | 327 + + .../loongarch/vector/lsx/lsx-vhsubw-2.c | 353 + + .../loongarch/vector/lsx/lsx-vilvh.c | 353 + + .../loongarch/vector/lsx/lsx-vilvl.c | 327 + + .../loongarch/vector/lsx/lsx-vinsgr2vr.c | 278 + + .../gcc.target/loongarch/vector/lsx/lsx-vld.c | 62 + + .../loongarch/vector/lsx/lsx-vldi.c | 61 + + .../loongarch/vector/lsx/lsx-vmadd.c | 450 ++ + .../loongarch/vector/lsx/lsx-vmaddwev-1.c | 472 ++ + .../loongarch/vector/lsx/lsx-vmaddwev-2.c | 383 + + .../loongarch/vector/lsx/lsx-vmaddwev-3.c | 383 + + .../loongarch/vector/lsx/lsx-vmaddwod-1.c | 372 + + .../loongarch/vector/lsx/lsx-vmaddwod-2.c | 438 ++ + .../loongarch/vector/lsx/lsx-vmaddwod-3.c | 460 ++ + .../loongarch/vector/lsx/lsx-vmax-1.c | 317 + + .../loongarch/vector/lsx/lsx-vmax-2.c | 362 + + .../loongarch/vector/lsx/lsx-vmaxi-1.c | 279 + + .../loongarch/vector/lsx/lsx-vmaxi-2.c | 223 + + .../loongarch/vector/lsx/lsx-vmin-1.c | 434 ++ + .../loongarch/vector/lsx/lsx-vmin-2.c | 344 + + .../loongarch/vector/lsx/lsx-vmini-1.c | 314 + + .../loongarch/vector/lsx/lsx-vmini-2.c | 216 + + .../loongarch/vector/lsx/lsx-vmod-1.c | 254 + + .../loongarch/vector/lsx/lsx-vmod-2.c | 254 + + .../loongarch/vector/lsx/lsx-vmskgez.c | 119 + + .../loongarch/vector/lsx/lsx-vmskltz.c | 321 + + .../loongarch/vector/lsx/lsx-vmsknz.c | 104 + + .../loongarch/vector/lsx/lsx-vmsub.c | 461 ++ + .../loongarch/vector/lsx/lsx-vmuh-1.c | 353 + + .../loongarch/vector/lsx/lsx-vmuh-2.c | 372 + + .../loongarch/vector/lsx/lsx-vmul.c | 282 + + .../loongarch/vector/lsx/lsx-vmulwev-1.c | 434 ++ + .../loongarch/vector/lsx/lsx-vmulwev-2.c | 344 + + .../loongarch/vector/lsx/lsx-vmulwev-3.c | 245 + + .../loongarch/vector/lsx/lsx-vmulwod-1.c | 272 + + .../loongarch/vector/lsx/lsx-vmulwod-2.c | 282 + + .../loongarch/vector/lsx/lsx-vmulwod-3.c | 308 + + .../loongarch/vector/lsx/lsx-vneg.c | 321 + + .../loongarch/vector/lsx/lsx-vnor.c | 109 + + .../loongarch/vector/lsx/lsx-vnori.c | 91 + + .../gcc.target/loongarch/vector/lsx/lsx-vor.c | 169 + + .../loongarch/vector/lsx/lsx-vori.c | 123 + + .../loongarch/vector/lsx/lsx-vorn.c | 109 + + .../loongarch/vector/lsx/lsx-vpackev.c | 452 ++ + .../loongarch/vector/lsx/lsx-vpackod.c | 461 ++ + .../loongarch/vector/lsx/lsx-vpcnt.c | 350 + + .../loongarch/vector/lsx/lsx-vpickev.c | 362 + + .../loongarch/vector/lsx/lsx-vpickod.c | 336 + + .../loongarch/vector/lsx/lsx-vpickve2gr.c | 488 ++ + .../loongarch/vector/lsx/lsx-vpremi.c | 20 + + .../loongarch/vector/lsx/lsx-vreplgr2vr.c | 212 + + .../loongarch/vector/lsx/lsx-vreplve.c | 300 + + .../loongarch/vector/lsx/lsx-vreplvei.c | 293 + + .../loongarch/vector/lsx/lsx-vrotr.c | 381 + + .../loongarch/vector/lsx/lsx-vrotri.c | 294 + + .../loongarch/vector/lsx/lsx-vsadd-1.c | 335 + + .../loongarch/vector/lsx/lsx-vsadd-2.c | 345 + + .../loongarch/vector/lsx/lsx-vsat-1.c | 231 + + .../loongarch/vector/lsx/lsx-vsat-2.c | 272 + + .../loongarch/vector/lsx/lsx-vseq.c | 470 ++ + .../loongarch/vector/lsx/lsx-vseqi.c | 328 + + .../loongarch/vector/lsx/lsx-vshuf.c | 394 + + .../loongarch/vector/lsx/lsx-vshuf4i.c | 348 + + .../loongarch/vector/lsx/lsx-vsigncov.c | 425 ++ + .../loongarch/vector/lsx/lsx-vsle-1.c | 290 + + .../loongarch/vector/lsx/lsx-vsle-2.c | 444 ++ + .../loongarch/vector/lsx/lsx-vslei-1.c | 258 + + .../loongarch/vector/lsx/lsx-vslei-2.c | 293 + + .../loongarch/vector/lsx/lsx-vsll.c | 254 + + .../loongarch/vector/lsx/lsx-vslli.c | 293 + + .../loongarch/vector/lsx/lsx-vsllwil-1.c | 244 + + .../loongarch/vector/lsx/lsx-vsllwil-2.c | 189 + + .../loongarch/vector/lsx/lsx-vslt-1.c | 434 ++ + .../loongarch/vector/lsx/lsx-vslt-2.c | 236 + + .../loongarch/vector/lsx/lsx-vslti-1.c | 328 + + .../loongarch/vector/lsx/lsx-vslti-2.c | 293 + + .../loongarch/vector/lsx/lsx-vsra.c | 344 + + .../loongarch/vector/lsx/lsx-vsrai.c | 258 + + .../loongarch/vector/lsx/lsx-vsran.c | 290 + + .../loongarch/vector/lsx/lsx-vsrani.c | 246 + + .../loongarch/vector/lsx/lsx-vsrar.c | 354 + + .../loongarch/vector/lsx/lsx-vsrari.c | 265 + + .../loongarch/vector/lsx/lsx-vsrarn.c | 236 + + .../loongarch/vector/lsx/lsx-vsrarni.c | 398 + + .../loongarch/vector/lsx/lsx-vsrl.c | 389 + + .../loongarch/vector/lsx/lsx-vsrli.c | 328 + + .../loongarch/vector/lsx/lsx-vsrln.c | 335 + + .../loongarch/vector/lsx/lsx-vsrlni.c | 281 + + .../loongarch/vector/lsx/lsx-vsrlr.c | 434 ++ + .../loongarch/vector/lsx/lsx-vsrlri.c | 300 + + .../loongarch/vector/lsx/lsx-vsrlrn.c | 164 + + .../loongarch/vector/lsx/lsx-vsrlrni.c | 686 ++ + .../loongarch/vector/lsx/lsx-vssran.c | 390 + + .../loongarch/vector/lsx/lsx-vssrani.c | 679 ++ + .../loongarch/vector/lsx/lsx-vssrarn.c | 669 ++ + .../loongarch/vector/lsx/lsx-vssrarni.c | 848 +++ + .../loongarch/vector/lsx/lsx-vssrln.c | 543 ++ + .../loongarch/vector/lsx/lsx-vssrlni.c | 668 ++ + .../loongarch/vector/lsx/lsx-vssrlrn.c | 470 ++ + .../loongarch/vector/lsx/lsx-vssrlrni.c | 597 ++ + .../loongarch/vector/lsx/lsx-vssub-1.c | 398 + + .../loongarch/vector/lsx/lsx-vssub-2.c | 408 ++ + .../gcc.target/loongarch/vector/lsx/lsx-vst.c | 70 + + .../loongarch/vector/lsx/lsx-vsub.c | 381 + + .../loongarch/vector/lsx/lsx-vsubi.c | 329 + + .../loongarch/vector/lsx/lsx-vsubwev-1.c | 326 + + .../loongarch/vector/lsx/lsx-vsubwev-2.c | 417 ++ + .../loongarch/vector/lsx/lsx-vsubwod-1.c | 326 + + .../loongarch/vector/lsx/lsx-vsubwod-2.c | 308 + + .../loongarch/vector/lsx/lsx-vxor.c | 79 + + .../loongarch/vector/lsx/lsx-vxori.c | 67 + + .../loongarch/vector/simd_correctness_check.h | 54 + + gcc/testsuite/lib/target-supports.exp | 7 +- + include/longlong.h | 12 + + include/vtv-change-permission.h | 4 + + libffi/Makefile.am | 4 +- + libffi/Makefile.in | 25 +- + libffi/configure.host | 5 + + libffi/src/loongarch64/ffi.c | 621 ++ + libffi/src/loongarch64/ffitarget.h | 82 + + libffi/src/loongarch64/sysv.S | 327 + + libgcc/config.host | 9 +- + libitm/config/loongarch/asm.h | 54 + + libitm/config/loongarch/sjlj.S | 130 + + libitm/config/loongarch/target.h | 50 + + libitm/configure.tgt | 2 + + libstdc++-v3/config/cpu/loongarch/t-loongarch | 1 + + libstdc++-v3/configure.host | 5 +- + libvtv/configure.tgt | 3 + + 545 files changed, 208379 insertions(+), 1724 deletions(-) + create mode 100644 gcc/ada/libgnarl/s-linux__loongarch.ads + create mode 100644 gcc/ada/libgnat/system-linux-loongarch.ads + create mode 100644 gcc/config/loongarch/elf.h + create mode 100644 gcc/config/loongarch/lasx.md + create mode 100644 gcc/config/loongarch/lasxintrin.h + create mode 100644 gcc/config/loongarch/lsx.md + create mode 100644 gcc/config/loongarch/lsxintrin.h + create mode 100644 gcc/testsuite/g++.target/loongarch/bstrins-compile.C + create mode 100644 gcc/testsuite/g++.target/loongarch/bstrins-run.C + create mode 100644 gcc/testsuite/g++.target/loongarch/bytepick.C + create mode 100644 gcc/testsuite/g++.target/loongarch/pr106828.C + create mode 100644 gcc/testsuite/gcc.target/loongarch/add-const.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-diag.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-test.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/cmov_ii.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/direct-extern-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/direct-extern-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/div-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/div-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/div-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/div-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/fcopysign.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/float-load.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/flogb.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/fmax-fmin.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/frint.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/fscaleb.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/ftint.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-5.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-6.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-7.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-8.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/imm-load.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/imm-load1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c + rename gcc/testsuite/gcc.target/loongarch/{tst-asm-const.c => pr107731.c} (78%) + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr111334.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/shrink-wrap.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/sign-extend.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h + create mode 100644 gcc/testsuite/gcc.target/loongarch/strict-align.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/switch-qi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/va_arg.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h + create mode 100644 libffi/src/loongarch64/ffi.c + create mode 100644 libffi/src/loongarch64/ffitarget.h + create mode 100644 libffi/src/loongarch64/sysv.S + create mode 100644 libitm/config/loongarch/asm.h + create mode 100644 libitm/config/loongarch/sjlj.S + create mode 100644 libitm/config/loongarch/target.h + create mode 100644 libstdc++-v3/config/cpu/loongarch/t-loongarch + +diff --git a/config-ml.in b/config-ml.in +index 68854a4f1..ad0db0781 100644 +--- a/config-ml.in ++++ b/config-ml.in +@@ -301,6 +301,16 @@ arm-*-*) + done + fi + ;; ++loongarch*-*) ++ old_multidirs="${multidirs}" ++ multidirs="" ++ for x in ${old_multidirs}; do ++ case "$x" in ++ `${CC-gcc} --print-multi-directory`) : ;; ++ *) multidirs="${multidirs} ${x}" ;; ++ esac ++ done ++ ;; + m68*-*-*) + if [ x$enable_softfloat = xno ] + then +diff --git a/gcc/ada/Makefile.rtl b/gcc/ada/Makefile.rtl +index aaf853e3a..579645d95 100644 +--- a/gcc/ada/Makefile.rtl ++++ b/gcc/ada/Makefile.rtl +@@ -2195,6 +2195,55 @@ ifeq ($(strip $(filter-out cygwin% mingw32% pe,$(target_os))),) + LIBRARY_VERSION := $(LIB_VERSION) + endif + ++# LoongArch Linux ++ifeq ($(strip $(filter-out loongarch% linux%,$(target_cpu) $(target_os))),) ++ LIBGNAT_TARGET_PAIRS = \ ++ a-exetim.adb. -- ++-- -- ++------------------------------------------------------------------------------ ++ ++-- This is the LoongArch version of this package ++ ++-- This package encapsulates cpu specific differences between implementations ++-- of GNU/Linux, in order to share s-osinte-linux.ads. ++ ++-- PLEASE DO NOT add any with-clauses to this package or remove the pragma ++-- Preelaborate. This package is designed to be a bottom-level (leaf) package ++ ++with Interfaces.C; ++with System.Parameters; ++ ++package System.Linux is ++ pragma Preelaborate; ++ ++ ---------- ++ -- Time -- ++ ---------- ++ ++ subtype int is Interfaces.C.int; ++ subtype long is Interfaces.C.long; ++ subtype suseconds_t is Interfaces.C.long; ++ type time_t is range -2 ** (System.Parameters.time_t_bits - 1) ++ .. 2 ** (System.Parameters.time_t_bits - 1) - 1; ++ subtype clockid_t is Interfaces.C.int; ++ ++ type timespec is record ++ tv_sec : time_t; ++ tv_nsec : long; ++ end record; ++ pragma Convention (C, timespec); ++ ++ type timeval is record ++ tv_sec : time_t; ++ tv_usec : suseconds_t; ++ end record; ++ pragma Convention (C, timeval); ++ ++ ----------- ++ -- Errno -- ++ ----------- ++ ++ EAGAIN : constant := 11; ++ EINTR : constant := 4; ++ EINVAL : constant := 22; ++ ENOMEM : constant := 12; ++ EPERM : constant := 1; ++ ETIMEDOUT : constant := 110; ++ ++ ------------- ++ -- Signals -- ++ ------------- ++ ++ SIGHUP : constant := 1; -- hangup ++ SIGINT : constant := 2; -- interrupt (rubout) ++ SIGQUIT : constant := 3; -- quit (ASCD FS) ++ SIGILL : constant := 4; -- illegal instruction (not reset) ++ SIGTRAP : constant := 5; -- trace trap (not reset) ++ SIGIOT : constant := 6; -- IOT instruction ++ SIGABRT : constant := 6; -- used by abort, replace SIGIOT in the future ++ SIGBUS : constant := 7; -- bus error ++ SIGFPE : constant := 8; -- floating point exception ++ SIGKILL : constant := 9; -- kill (cannot be caught or ignored) ++ SIGUSR1 : constant := 10; -- user defined signal 1 ++ SIGSEGV : constant := 11; -- segmentation violation ++ SIGUSR2 : constant := 12; -- user defined signal 2 ++ SIGPIPE : constant := 13; -- write on a pipe with no one to read it ++ SIGALRM : constant := 14; -- alarm clock ++ SIGTERM : constant := 15; -- software termination signal from kill ++ SIGSTKFLT : constant := 16; -- coprocessor stack fault (Linux) ++ SIGCLD : constant := 17; -- alias for SIGCHLD ++ SIGCHLD : constant := 17; -- child status change ++ SIGCONT : constant := 18; -- stopped process has been continued ++ SIGSTOP : constant := 19; -- stop (cannot be caught or ignored) ++ SIGTSTP : constant := 20; -- user stop requested from tty ++ SIGTTIN : constant := 21; -- background tty read attempted ++ SIGTTOU : constant := 22; -- background tty write attempted ++ SIGURG : constant := 23; -- urgent condition on IO channel ++ SIGXCPU : constant := 24; -- CPU time limit exceeded ++ SIGXFSZ : constant := 25; -- filesize limit exceeded ++ SIGVTALRM : constant := 26; -- virtual timer expired ++ SIGPROF : constant := 27; -- profiling timer expired ++ SIGWINCH : constant := 28; -- window size change ++ SIGPOLL : constant := 29; -- pollable event occurred ++ SIGIO : constant := 29; -- I/O now possible (4.2 BSD) ++ SIGPWR : constant := 30; -- power-fail restart ++ SIGSYS : constant := 31; -- bad system call ++ SIG32 : constant := 32; -- glibc internal signal ++ SIG33 : constant := 33; -- glibc internal signal ++ SIG34 : constant := 34; -- glibc internal signal ++ ++ -- These don't exist for Linux/LoongArch. The constants are present ++ -- so that we can continue to use a-intnam-linux.ads. ++ SIGLOST : constant := 0; -- File lock lost ++ SIGUNUSED : constant := 0; -- unused signal (GNU/Linux) ++ SIGEMT : constant := 0; -- EMT ++ ++ -- struct_sigaction offsets ++ ++ sa_handler_pos : constant := 0; ++ sa_mask_pos : constant := Standard'Address_Size / 8; ++ sa_flags_pos : constant := 128 + sa_mask_pos; ++ ++ SA_SIGINFO : constant := 16#04#; ++ SA_ONSTACK : constant := 16#08000000#; ++ ++end System.Linux; +diff --git a/gcc/ada/libgnat/system-linux-loongarch.ads b/gcc/ada/libgnat/system-linux-loongarch.ads +new file mode 100644 +index 000000000..735f67041 +--- /dev/null ++++ b/gcc/ada/libgnat/system-linux-loongarch.ads +@@ -0,0 +1,145 @@ ++------------------------------------------------------------------------------ ++-- -- ++-- GNAT RUN-TIME COMPONENTS -- ++-- -- ++-- S Y S T E M -- ++-- -- ++-- S p e c -- ++-- (GNU-Linux/LoongArch Version) -- ++-- -- ++-- Copyright (C) 1992-2023, Free Software Foundation, Inc. -- ++-- -- ++-- This specification is derived from the Ada Reference Manual for use with -- ++-- GNAT. The copyright notice above, and the license provisions that follow -- ++-- apply solely to the contents of the part following the private keyword. -- ++-- -- ++-- GNAT is free software; you can redistribute it and/or modify it under -- ++-- terms of the GNU General Public License as published by the Free Soft- -- ++-- ware Foundation; either version 3, or (at your option) any later ver- -- ++-- sion. GNAT is distributed in the hope that it will be useful, but WITH- -- ++-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -- ++-- or FITNESS FOR A PARTICULAR PURPOSE. -- ++-- -- ++-- As a special exception under Section 7 of GPL version 3, you are granted -- ++-- additional permissions described in the GCC Runtime Library Exception, -- ++-- version 3.1, as published by the Free Software Foundation. -- ++-- -- ++-- You should have received a copy of the GNU General Public License and -- ++-- a copy of the GCC Runtime Library Exception along with this program; -- ++-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -- ++-- . -- ++-- -- ++-- GNAT was originally developed by the GNAT team at New York University. -- ++-- Extensive contributions were provided by Ada Core Technologies Inc. -- ++-- -- ++------------------------------------------------------------------------------ ++ ++package System is ++ pragma Pure; ++ -- Note that we take advantage of the implementation permission to make ++ -- this unit Pure instead of Preelaborable; see RM 13.7.1(15). In Ada ++ -- 2005, this is Pure in any case (AI-362). ++ ++ pragma No_Elaboration_Code_All; ++ -- Allow the use of that restriction in units that WITH this unit ++ ++ type Name is (SYSTEM_NAME_GNAT); ++ System_Name : constant Name := SYSTEM_NAME_GNAT; ++ ++ -- System-Dependent Named Numbers ++ ++ Min_Int : constant := -2 ** (Standard'Max_Integer_Size - 1); ++ Max_Int : constant := 2 ** (Standard'Max_Integer_Size - 1) - 1; ++ ++ Max_Binary_Modulus : constant := 2 ** Standard'Max_Integer_Size; ++ Max_Nonbinary_Modulus : constant := 2 ** Integer'Size - 1; ++ ++ Max_Base_Digits : constant := Long_Long_Float'Digits; ++ Max_Digits : constant := Long_Long_Float'Digits; ++ ++ Max_Mantissa : constant := Standard'Max_Integer_Size - 1; ++ Fine_Delta : constant := 2.0 ** (-Max_Mantissa); ++ ++ Tick : constant := 0.000_001; ++ ++ -- Storage-related Declarations ++ ++ type Address is private; ++ pragma Preelaborable_Initialization (Address); ++ Null_Address : constant Address; ++ ++ Storage_Unit : constant := 8; ++ Word_Size : constant := Standard'Word_Size; ++ Memory_Size : constant := 2 ** Word_Size; ++ ++ -- Address comparison ++ ++ function "<" (Left, Right : Address) return Boolean; ++ function "<=" (Left, Right : Address) return Boolean; ++ function ">" (Left, Right : Address) return Boolean; ++ function ">=" (Left, Right : Address) return Boolean; ++ function "=" (Left, Right : Address) return Boolean; ++ ++ pragma Import (Intrinsic, "<"); ++ pragma Import (Intrinsic, "<="); ++ pragma Import (Intrinsic, ">"); ++ pragma Import (Intrinsic, ">="); ++ pragma Import (Intrinsic, "="); ++ ++ -- Other System-Dependent Declarations ++ ++ type Bit_Order is (High_Order_First, Low_Order_First); ++ Default_Bit_Order : constant Bit_Order := Low_Order_First; ++ pragma Warnings (Off, Default_Bit_Order); -- kill constant condition warning ++ ++ -- Priority-related Declarations (RM D.1) ++ ++ Max_Priority : constant Positive := 30; ++ Max_Interrupt_Priority : constant Positive := 31; ++ ++ subtype Any_Priority is Integer range 0 .. 31; ++ subtype Priority is Any_Priority range 0 .. 30; ++ subtype Interrupt_Priority is Any_Priority range 31 .. 31; ++ ++ Default_Priority : constant Priority := 15; ++ ++private ++ ++ type Address is mod Memory_Size; ++ Null_Address : constant Address := 0; ++ ++ -------------------------------------- ++ -- System Implementation Parameters -- ++ -------------------------------------- ++ ++ -- These parameters provide information about the target that is used ++ -- by the compiler. They are in the private part of System, where they ++ -- can be accessed using the special circuitry in the Targparm unit ++ -- whose source should be consulted for more detailed descriptions ++ -- of the individual switch values. ++ ++ Backend_Divide_Checks : constant Boolean := False; ++ Backend_Overflow_Checks : constant Boolean := True; ++ Command_Line_Args : constant Boolean := True; ++ Configurable_Run_Time : constant Boolean := False; ++ Denorm : constant Boolean := True; ++ Duration_32_Bits : constant Boolean := False; ++ Exit_Status_Supported : constant Boolean := True; ++ Machine_Overflows : constant Boolean := False; ++ Machine_Rounds : constant Boolean := True; ++ Preallocated_Stacks : constant Boolean := False; ++ Signed_Zeros : constant Boolean := True; ++ Stack_Check_Default : constant Boolean := False; ++ Stack_Check_Probes : constant Boolean := True; ++ Stack_Check_Limits : constant Boolean := False; ++ Support_Aggregates : constant Boolean := True; ++ Support_Composite_Assign : constant Boolean := True; ++ Support_Composite_Compare : constant Boolean := True; ++ Support_Long_Shifts : constant Boolean := True; ++ Always_Compatible_Rep : constant Boolean := False; ++ Suppress_Standard_Library : constant Boolean := False; ++ Use_Ada_Main_Program_Name : constant Boolean := False; ++ Frontend_Exceptions : constant Boolean := False; ++ ZCX_By_Default : constant Boolean := True; ++ ++end System; +diff --git a/gcc/common/config/loongarch/loongarch-common.cc b/gcc/common/config/loongarch/loongarch-common.cc +index ed3730fce..c8bc5718d 100644 +--- a/gcc/common/config/loongarch/loongarch-common.cc ++++ b/gcc/common/config/loongarch/loongarch-common.cc +@@ -34,6 +34,9 @@ along with GCC; see the file COPYING3. If not see + static const struct default_options loongarch_option_optimization_table[] = + { + { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, ++ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 }, ++ { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 }, ++ { OPT_LEVELS_1_PLUS, OPT_fsched_pressure, NULL, 1 }, + { OPT_LEVELS_NONE, 0, NULL, 0 } + }; + +diff --git a/gcc/config.gcc b/gcc/config.gcc +index 5c378c698..57e724080 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -456,7 +456,7 @@ mips*-*-*) + ;; + loongarch*-*-*) + cpu_type=loongarch +- extra_headers="larchintrin.h" ++ extra_headers="larchintrin.h lsxintrin.h lasxintrin.h" + extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" + extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" + extra_options="${extra_options} g.opt fused-madd.opt" +@@ -2519,6 +2519,18 @@ loongarch*-*-linux*) + gcc_cv_initfini_array=yes + ;; + ++loongarch*-*-elf*) ++ tm_file="elfos.h newlib-stdint.h ${tm_file}" ++ tm_file="${tm_file} loongarch/elf.h loongarch/linux.h" ++ tmake_file="${tmake_file} loongarch/t-linux" ++ gnu_ld=yes ++ gas=yes ++ ++ # For .init_array support. The configure script cannot always ++ # automatically detect that GAS supports it, yet we require it. ++ gcc_cv_initfini_array=yes ++ ;; ++ + mips*-*-netbsd*) # NetBSD/mips, either endian. + target_cpu_default="MASK_ABICALLS" + tm_file="elfos.h ${tm_file} mips/elf.h ${nbsd_tm_file} mips/netbsd.h" +@@ -4965,87 +4977,73 @@ case "${target}" in + esac + ;; + +- loongarch*-*-*) +- supported_defaults="abi arch tune fpu" ++ loongarch*-*) ++ supported_defaults="abi arch tune fpu simd multilib-default strict-align-lib" + + # Local variables + unset \ +- abi_pattern abi_default \ +- abiext_pattern abiext_default \ +- arch_pattern arch_default \ +- fpu_pattern fpu_default \ +- tune_pattern tune_default \ +- triplet_os triplet_abi ++ abi_base abi_ext \ ++ arch_pattern arch_default \ ++ fpu_pattern fpu_default \ ++ triplet_os triplet_abi \ ++ strict_align_opt ++ ++ # --with-abi is now obsolete, emit a warning if given. ++ case ${with_abi} in ++ "") ;; ++ *) ++ echo "warning: --with-abi= is now obsolete," \ ++ "the default ABI is derived from your target" \ ++ "triplet ${target}" 1>&2 ++ ;; ++ esac + + # Infer ABI from the triplet. + case ${target} in +- loongarch64-*-*-*f64) +- abi_pattern="lp64d" +- ;; +- loongarch64-*-*-*f32) +- abi_pattern="lp64f" +- ;; +- loongarch64-*-*-*sf) +- abi_pattern="lp64s" +- ;; +- loongarch64-*-*-*) +- abi_pattern="lp64[dfs]" +- abi_default="lp64d" +- ;; ++ loongarch64-*f64) abi_base="lp64d"; abi_ext="base" ;; ++ loongarch64-*f32) abi_base="lp64f"; abi_ext="base" ;; ++ loongarch64-*sf) abi_base="lp64s"; abi_ext="base" ;; ++ loongarch64-*) abi_base="lp64d"; abi_ext="base" ;; + *) + echo "Unsupported target ${target}." 1>&2 + exit 1 + ;; + esac + +- abiext_pattern="*" +- abiext_default="base" +- + # Get the canonical triplet (multiarch specifier). ++ case ${abi_base},${abi_ext} in ++ lp64d,base) triplet_abi="";; ++ lp64f,base) triplet_abi="f32";; ++ lp64s,base) triplet_abi="sf";; ++ esac ++ + case ${target} in +- *-linux-gnu*) triplet_os="linux-gnu";; ++ *-linux-gnu*) triplet_os="linux-gnu";; + *-linux-musl*) triplet_os="linux-musl";; ++ *-elf*) triplet_os="elf";; + *) + echo "Unsupported target ${target}." 1>&2 + exit 1 + ;; + esac ++ la_canonical_triplet="loongarch64-${triplet_os}${triplet_abi}" ++ + + # Perform initial sanity checks on --with-* options. + case ${with_arch} in +- "" | loongarch64 | la464) ;; # OK, append here. ++ "" | abi-default | loongarch64 | la464) ;; # OK, append here. + native) + if test x${host} != x${target}; then + echo "--with-arch=native is illegal for cross-compiler." 1>&2 + exit 1 + fi + ;; +- "") +- echo "Please set a default value for \${with_arch}" \ +- "according to your target triplet \"${target}\"." 1>&2 +- exit 1 +- ;; + *) + echo "Unknown arch in --with-arch=$with_arch" 1>&2 + exit 1 + ;; + esac + +- case ${with_abi} in +- "" | lp64d | lp64f | lp64s) ;; # OK, append here. +- *) +- echo "Unsupported ABI given in --with-abi=$with_abi" 1>&2 +- exit 1 +- ;; +- esac +- +- case ${with_abiext} in +- "" | base) ;; # OK, append here. +- *) +- echo "Unsupported ABI extention type $with_abiext" 1>&2 +- exit 1 +- ;; +- esac + + case ${with_fpu} in + "" | none | 32 | 64) ;; # OK, append here. +@@ -5059,73 +5057,41 @@ case "${target}" in + ;; + esac + +- +- # Set default value for with_abi. +- case ${with_abi} in +- "") +- if test x${abi_default} != x; then +- with_abi=${abi_default} +- else +- with_abi=${abi_pattern} +- fi +- ;; +- +- *) +- if echo "${with_abi}" | grep -E "^${abi_pattern}$" > /dev/null; then +- : # OK +- else +- echo "Incompatible options:" \ +- "--with-abi=${with_abi} and --target=${target}." 1>&2 ++ case ${with_simd} in ++ "" | none) ;; ++ lsx | lasx) # OK, append here. ++ case ${with_fpu} in ++ 64) ;; ++ "") with_fpu=64 ;; ++ *) ++ echo "--with-simd=${with_simd} conflicts with --with-fpu=${with_fpu}" 1>&2 + exit 1 +- fi +- ;; +- esac +- +- case ${with_abi} in +- "lp64d") triplet_abi="";; +- "lp64f") triplet_abi="f32";; +- "lp64s") triplet_abi="sf";; +- esac +- la_canonical_triplet="loongarch64-${triplet_os}${triplet_abi}" +- +- # Set default value for with_abiext (internal) +- case ${with_abiext} in +- "") +- if test x${abiext_default} != x; then +- with_abiext=${abiext_default} +- else +- with_abiext=${abiext_pattern} +- fi ++ ;; ++ esac + ;; + + *) +- if echo "${with_abiext}" | grep -E "^${abiext_pattern}$" > /dev/null; then +- : # OK +- else +- echo "The ABI extension type \"${with_abiext}\"" \ +- "is incompatible with --target=${target}." 1>&2 +- exit 1 +- fi +- ++ echo "Unknown SIMD extension in --with-simd=$with_simd" 1>&2 ++ exit 1 + ;; + esac + + # Infer ISA-related default options from the ABI: pass 1 +- case ${with_abi}/${with_abiext} in ++ case ${abi_base}/${abi_ext} in + lp64*/base) + # architectures that support lp64* ABI +- arch_pattern="native|loongarch64|la464" ++ arch_pattern="native|abi-default|loongarch64|la464" + # default architecture for lp64* ABI +- arch_default="loongarch64" ++ arch_default="abi-default" + ;; + *) +- echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2 ++ echo "Unsupported ABI type ${abi_base}/${abi_ext}." 1>&2 + exit 1 + ;; + esac + + # Infer ISA-related default options from the ABI: pass 2 +- case ${with_abi}/${with_abiext} in ++ case ${abi_base}/${abi_ext} in + lp64d/base) + fpu_pattern="64" + ;; +@@ -5138,7 +5104,7 @@ case "${target}" in + fpu_default="none" + ;; + *) +- echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2 ++ echo "Unsupported ABI type ${abi_base}/${abi_ext}." 1>&2 + exit 1 + ;; + esac +@@ -5157,7 +5123,7 @@ case "${target}" in + if echo "${with_arch}" | grep -E "^${arch_pattern}$" > /dev/null; then + : # OK + else +- echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \ ++ echo "${abi_base}/${abi_ext} ABI cannot be implemented with" \ + "--with-arch=${with_arch}." 1>&2 + exit 1 + fi +@@ -5178,7 +5144,7 @@ case "${target}" in + if echo "${with_fpu}" | grep -E "^${fpu_pattern}$" > /dev/null; then + : # OK + else +- echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \ ++ echo "${abi_base}/${abi_ext} ABI cannot be implemented with" \ + "--with-fpu=${with_fpu}." 1>&2 + exit 1 + fi +@@ -5186,32 +5152,19 @@ case "${target}" in + esac + + +- # Infer default with_tune from with_arch: pass 1 ++ # Check default with_tune configuration using with_arch. + case ${with_arch} in +- native) +- tune_pattern="*" +- tune_default="native" +- ;; + loongarch64) +- tune_pattern="loongarch64|la464" +- tune_default="la464" ++ tune_pattern="native|abi-default|loongarch64|la464" + ;; + *) + # By default, $with_tune == $with_arch +- tune_pattern="$with_arch" ++ tune_pattern="*" + ;; + esac + +- ## Set default value for with_tune. + case ${with_tune} in +- "") +- if test x${tune_default} != x; then +- with_tune=${tune_default} +- else +- with_tune=${tune_pattern} +- fi +- ;; +- ++ "") ;; # OK + *) + if echo "${with_tune}" | grep -E "^${tune_pattern}$" > /dev/null; then + : # OK +@@ -5223,13 +5176,64 @@ case "${target}" in + ;; + esac + ++ # Build libraries with -mstrict-align if --with-strict-align-lib is given. ++ case ${with_strict_align_lib} in ++ yes) strict_align_opt="/mstrict-align" ;; ++ ""|no) ;; ++ *) ++ echo "Unknown option: --with-strict-align-lib=${with_strict_align_lib}" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ ++ # Handle --with-multilib-default ++ if echo "${with_multilib_default}" \ ++ | grep -E -e '[[:space:]]' -e '//' -e '/$' -e '^/' > /dev/null 2>&1; then ++ echo "Invalid argument to --with-multilib-default." 1>&2 ++ exit 1 ++ fi ++ ++ if test x${with_multilib_default} = x; then ++ # Use -march=abi-default by default when building libraries. ++ with_multilib_default="/march=abi-default" ++ else ++ unset parse_state component ++ parse_state=arch ++ for component in $(echo "${with_multilib_default}" | tr '/' ' '); do ++ case ${parse_state},${component} in ++ arch,|arch,abi-default) ++ # ABI-default: use the ABI's default ARCH configuration for ++ # multilib library builds, unless otherwise specified ++ # in --with-multilib-list. ++ with_multilib_default="/march=abi-default" ;; ++ arch,fixed) ++ # Fixed: use the default gcc configuration for all multilib ++ # builds by default. ++ with_multilib_default="" ;; ++ arch,native|arch,loongarch64|arch,la464) # OK, append here. ++ with_multilib_default="/march=${component}" ;; ++ arch,*) ++ with_multilib_default="/march=abi-default" ++ with_multilib_default="${with_multilib_default}/${component}" ;; ++ opts,*) ++ with_multilib_default="${with_multilib_default}/${component}" ;; ++ esac ++ ++ if test x${parse_state} = xarch; then ++ parse_state=opt; ++ fi ++ done ++ unset parse_state component ++ fi ++ + # Handle --with-multilib-list. + if test x"${with_multilib_list}" = x \ + || test x"${with_multilib_list}" = xno \ + || test x"${with_multilib_list}" = xdefault \ + || test x"${enable_multilib}" != xyes; then + +- with_multilib_list="${with_abi}/${with_abiext}" ++ with_multilib_list="${abi_base}/${abi_ext}" + fi + + # Check if the configured default ABI combination is included in +@@ -5245,25 +5249,21 @@ case "${target}" in + # ${with_multilib_list} should not contain whitespaces, + # consecutive commas or slashes. + if echo "${with_multilib_list}" \ +- | grep -E -e "[[:space:]]" -e '[,/][,/]' -e '[,/]$' -e '^[,/]' > /dev/null; then ++ | grep -E -e "[[:space:]]" -e '[,/][,/]' -e '[,/]$' -e '^[,/]' > /dev/null 2>&1; then + echo "Invalid argument to --with-multilib-list." 1>&2 + exit 1 + fi + +- unset component idx elem_abi_base elem_abi_ext elem_tmp ++ unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis + for elem in $(echo "${with_multilib_list}" | tr ',' ' '); do +- idx=0 +- while true; do +- idx=$((idx + 1)) +- component=$(echo "${elem}" | awk -F'/' '{print $'"${idx}"'}') +- +- case ${idx} in +- 1) +- # Component 1: Base ABI type ++ unset elem_abi_base elem_abi_ext ++ parse_state="abi-base" ++ ++ for component in $(echo "${elem}" | tr '/' ' '); do ++ if test x${parse_state} = x"abi-base"; then ++ # Base ABI type + case ${component} in +- lp64d) elem_tmp="ABI_BASE_LP64D,";; +- lp64f) elem_tmp="ABI_BASE_LP64F,";; +- lp64s) elem_tmp="ABI_BASE_LP64S,";; ++ lp64d | lp64f | lp64s) elem_tmp="ABI_BASE_$(echo ${component} | tr a-z A-Z),";; + *) + echo "Unknown base ABI \"${component}\" in --with-multilib-list." 1>&2 + exit 1 +@@ -5272,57 +5272,114 @@ case "${target}" in + loongarch_multilib_list_c="${loongarch_multilib_list_c}${elem_tmp}" + loongarch_multilib_list_make="${loongarch_multilib_list_make}mabi=${component}" + elem_abi_base="${component}" +- ;; + +- 2) +- # Component 2: ABI extension type ++ parse_state="abi-ext" ++ continue ++ fi ++ ++ if test x${parse_state} = x"abi-ext"; then ++ # ABI extension type + case ${component} in +- "" | base) +- component="base" +- elem_tmp="ABI_EXT_BASE," +- ;; +- *) +- echo "Unknown ABI extension \"${component}\" in --with-multilib-list." 1>&2 +- exit 1 ++ base) ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. ++ parse_state="arch" ++ continue; + ;; + esac +- loongarch_multilib_list_c="${loongarch_multilib_list_c}${elem_tmp}" ++ ++ # The default ABI extension is "base" if unspecified. ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," + loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. +- elem_abi_ext="${component}" +- ;; ++ parse_state="arch" ++ fi + +- *) +- # Component 3 and on: optional stuff ++ if test x${parse_state} = x"arch"; then ++ # -march option + case ${component} in +- "") +- # End of component list. +- break ++ native | abi-default | loongarch64 | la464) # OK, append here. ++ # Append -march spec for each multilib variant. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}/march=${component}" ++ parse_state="opts" ++ continue ++ ;; ++ ++ default) ++ # "/default" is equivalent to --with-multilib-default=fixed ++ parse_state="opts" ++ continue + ;; ++ esac ++ ++ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ parse_state="opts" ++ fi ++ ++ if test x${parse_state} = x"opts"; then ++ # Other compiler options for building libraries. ++ # (no static sanity check performed) ++ case ${component} in + *) +- echo "Unknown ABI \"${elem}\" in --with-multilib-list." 1>&2 +- exit 1 ++ # Append other components as additional build options ++ # (without the prepending dash). ++ # Their validity should be examined by the compiler. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}/${component}" + ;; + esac +- ;; +- esac ++ fi + done + +- if test x${elem_abi_base} = x${with_abi} \ +- && test x${elem_abi_ext} = x${with_abiext}; then ++ case ${parse_state} in ++ "abi-ext") ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ ;; ++ "arch") ++ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ ;; ++ "opts") ++ : ++ ;; ++ esac ++ ++ # Use mstrict-align for building libraries if --with-strict-align-lib is given. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${strict_align_opt}" ++ ++ # Check for repeated configuration of the same multilib variant. ++ if echo "${elem_abi_base}/${elem_abi_ext}" \ ++ | grep -E "^(${all_abis%|})$" >/dev/null 2>&1; then ++ echo "Repeated multilib config of \"${elem_abi_base}/${elem_abi_ext}\" in --with-multilib-list." ++ exit 1 ++ fi ++ all_abis="${all_abis}${elem_abi_base}/${elem_abi_ext}|" ++ ++ ++ # Check if the default ABI configuration of the GCC binary ++ # is included in the enabled multilib variants. ++ if test x${elem_abi_base} = x${abi_base} \ ++ && test x${elem_abi_ext} = x${abi_ext}; then + loongarch_multilib_list_sane=yes + fi + loongarch_multilib_list_make="${loongarch_multilib_list_make}," + done ++ unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis ++ + + # Check if the default ABI combination is in the default list. + if test x${loongarch_multilib_list_sane} = xno; then +- if test x${with_abiext} = xbase; then +- with_abiext="" ++ if test x${abi_ext} = xbase; then ++ abi_ext="" + else +- with_abiext="/${with_abiext}" ++ abi_ext="/${abi_ext}" + fi + +- echo "Default ABI combination (${with_abi}${with_abiext})" \ ++ echo "Default ABI combination (${abi_base}${abi_ext})" \ + "not found in --with-multilib-list." 1>&2 + exit 1 + fi +@@ -5783,34 +5840,37 @@ case ${target} in + + # Let --with- flags initialize the enum variables from loongarch.opt. + # See macro definitions from loongarch-opts.h and loongarch-cpu.h. +- case ${with_arch} in +- native) tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_NATIVE" ;; +- la464) tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_LA464" ;; +- loongarch64) tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_LOONGARCH64" ;; +- esac + +- case ${with_tune} in +- native) tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_NATIVE" ;; +- la464) tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_LA464" ;; +- loongarch64) tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_LOONGARCH64" ;; +- esac ++ # Architecture ++ tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_$(echo ${with_arch} | tr a-z- A-Z_)" + +- case ${with_abi} in +- lp64d) tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64D" ;; +- lp64f) tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64F" ;; +- lp64s) tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64S" ;; +- esac ++ # Base ABI type ++ tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_$(echo ${abi_base} | tr a-z- A-Z_)" + +- case ${with_abiext} in ++ # ABI Extension ++ case ${abi_ext} in + base) tm_defines="${tm_defines} DEFAULT_ABI_EXT=ABI_EXT_BASE" ;; + esac + ++ # Microarchitecture ++ if test x${with_tune} != x; then ++ tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_$(echo ${with_tune} | tr a-z- A-Z_)" ++ fi ++ ++ # FPU adjustment + case ${with_fpu} in +- none) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_NOFPU" ;; ++ none) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_NONE" ;; + 32) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU32" ;; + 64) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU64" ;; + esac + ++ # SIMD extensions ++ case ${with_simd} in ++ none) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_NONE" ;; ++ lsx) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LSX" ;; ++ lasx) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LASX" ;; ++ esac ++ + tmake_file="loongarch/t-loongarch $tmake_file" + ;; + +diff --git a/gcc/config.in b/gcc/config.in +index 64c27c9cf..67ce422f2 100644 +--- a/gcc/config.in ++++ b/gcc/config.in +@@ -404,13 +404,19 @@ + #endif + + ++/* Define if your assembler supports eh_frame pcrel encoding. */ ++#ifndef USED_FOR_TARGET ++#undef HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT ++#endif ++ ++ + /* Define if your assembler supports the R_PPC64_ENTRY relocation. */ + #ifndef USED_FOR_TARGET + #undef HAVE_AS_ENTRY_MARKERS + #endif + + +-/* Define if your assembler supports explicit relocations. */ ++/* Define if your assembler supports explicit relocation. */ + #ifndef USED_FOR_TARGET + #undef HAVE_AS_EXPLICIT_RELOCS + #endif +diff --git a/gcc/config/host-linux.cc b/gcc/config/host-linux.cc +index 817d3c087..d93cfc064 100644 +--- a/gcc/config/host-linux.cc ++++ b/gcc/config/host-linux.cc +@@ -99,7 +99,7 @@ + #elif defined(__riscv) && defined (__LP64__) + # define TRY_EMPTY_VM_SPACE 0x1000000000 + #elif defined(__loongarch__) && defined(__LP64__) +-# define TRY_EMPTY_VM_SPACE 0x8000000000 ++# define TRY_EMPTY_VM_SPACE 0x1000000000 + #else + # define TRY_EMPTY_VM_SPACE 0 + #endif +diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md +index d0bfddbd5..cec5d8857 100644 +--- a/gcc/config/loongarch/constraints.md ++++ b/gcc/config/loongarch/constraints.md +@@ -20,14 +20,14 @@ + + ;; Register constraints + +-;; "a" "A constant call global and noplt address." +-;; "b" <-----unused ++;; "a" <-----unused ++;; "b" "A constant call not local address." + ;; "c" "A constant call local address." + ;; "d" <-----unused + ;; "e" JIRL_REGS + ;; "f" FP_REGS + ;; "g" <-----unused +-;; "h" "A constant call plt address." ++;; "h" <-----unused + ;; "i" "Matches a general integer constant." (Global non-architectural) + ;; "j" SIBCALL_REGS + ;; "k" "A memory operand whose address is formed by a base register and +@@ -42,7 +42,7 @@ + ;; "q" CSR_REGS + ;; "r" GENERAL_REGS (Global non-architectural) + ;; "s" "Matches a symbolic integer constant." (Global non-architectural) +-;; "t" "A constant call weak address" ++;; "t" <-----unused + ;; "u" "A signed 52bit constant and low 32-bit is zero (for logic instructions)" + ;; "v" "A signed 64-bit constant and low 44-bit is zero (for logic instructions)." + ;; "w" "Matches any valid memory." +@@ -60,13 +60,29 @@ + ;; "I" "A signed 12-bit constant (for arithmetic instructions)." + ;; "J" "Integer zero." + ;; "K" "An unsigned 12-bit constant (for logic instructions)." +-;; "L" <-----unused +-;; "M" <-----unused +-;; "N" <-----unused +-;; "O" <-----unused +-;; "P" <-----unused ++;; "L" - ++;; "La" ++;; "A signed constant in [-4096, 2048) or (2047, 4094]." ++;; "Lb" ++;; "A signed 32-bit constant and low 16-bit is zero, which can be ++;; added onto a register with addu16i.d. It matches nothing if ++;; the addu16i.d instruction is not available." ++;; "Lc" ++;; "A signed 64-bit constant can be expressed as Lb + I, but not a ++;; single Lb or I." ++;; "Ld" ++;; "A signed 64-bit constant can be expressed as Lb + Lb, but not a ++;; single Lb." ++;; "Le" ++;; "A signed 32-bit constant can be expressed as Lb + I, but not a ++;; single Lb or I." ++;; "M" "A constant that cannot be loaded using @code{lui}, @code{addiu} ++;; or @code{ori}." ++;; "N" "A constant in the range -65535 to -1 (inclusive)." ++;; "O" "A signed 15-bit constant." ++;; "P" "A constant in the range 1 to 65535 (inclusive)." + ;; "Q" <-----unused +-;; "R" <-----unused ++;; "R" "An address that can be used in a non-macro load or store." + ;; "S" <-----unused + ;; "T" <-----unused + ;; "U" <-----unused +@@ -86,13 +102,17 @@ + ;; "ZB" + ;; "An address that is held in a general-purpose register. + ;; The offset is zero" ++;; "ZD" ++;; "An address operand whose address is formed by a base register ++;; and offset that is suitable for use in instructions with the same ++;; addressing mode as @code{preld}." + ;; "<" "Matches a pre-dec or post-dec operand." (Global non-architectural) + ;; ">" "Matches a pre-inc or post-inc operand." (Global non-architectural) + +-(define_constraint "a" ++(define_constraint "b" + "@internal +- A constant call global and noplt address." +- (match_operand 0 "is_const_call_global_noplt_symbol")) ++ A constant call no local address." ++ (match_operand 0 "is_const_call_no_local_symbol")) + + (define_constraint "c" + "@internal +@@ -105,11 +125,6 @@ + (define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" + "A floating-point register (if available).") + +-(define_constraint "h" +- "@internal +- A constant call plt address." +- (match_operand 0 "is_const_call_plt_symbol")) +- + (define_register_constraint "j" "SIBCALL_REGS" + "@internal") + +@@ -134,11 +149,6 @@ + (define_register_constraint "q" "CSR_REGS" + "A general-purpose register except for $r0 and $r1 for lcsr.") + +-(define_constraint "t" +- "@internal +- A constant call weak address." +- (match_operand 0 "is_const_call_weak_symbol")) +- + (define_constraint "u" + "A signed 52bit constant and low 32-bit is zero (for logic instructions)." + (and (match_code "const_int") +@@ -176,6 +186,92 @@ + (and (match_code "const_int") + (match_test "IMM12_OPERAND_UNSIGNED (ival)"))) + ++(define_constraint "La" ++ "A signed constant in [-4096, 2048) or (2047, 4094]." ++ (and (match_code "const_int") ++ (match_test "DUAL_IMM12_OPERAND (ival)"))) ++ ++(define_constraint "Lb" ++ "A signed 32-bit constant and low 16-bit is zero, which can be added ++ onto a register with addu16i.d." ++ (and (match_code "const_int") ++ (match_test "ADDU16I_OPERAND (ival)"))) ++ ++(define_constraint "Lc" ++ "A signed 64-bit constant can be expressed as Lb + I, but not a single Lb ++ or I." ++ (and (match_code "const_int") ++ (match_test "loongarch_addu16i_imm12_operand_p (ival, DImode)"))) ++ ++(define_constraint "Ld" ++ "A signed 64-bit constant can be expressed as Lb + Lb, but not a single ++ Lb." ++ (and (match_code "const_int") ++ (match_test "DUAL_ADDU16I_OPERAND (ival)"))) ++ ++(define_constraint "Le" ++ "A signed 32-bit constant can be expressed as Lb + I, but not a single Lb ++ or I." ++ (and (match_code "const_int") ++ (match_test "loongarch_addu16i_imm12_operand_p (ival, SImode)"))) ++ ++(define_constraint "M" ++ "A constant that cannot be loaded using @code{lui}, @code{addiu} ++ or @code{ori}." ++ (and (match_code "const_int") ++ (not (match_test "IMM12_OPERAND (ival)")) ++ (not (match_test "IMM12_OPERAND_UNSIGNED (ival)")) ++ (not (match_test "LU12I_OPERAND (ival)")))) ++ ++(define_constraint "N" ++ "A constant in the range -65535 to -1 (inclusive)." ++ (and (match_code "const_int") ++ (match_test "ival >= -0xffff && ival < 0"))) ++ ++(define_constraint "O" ++ "A signed 15-bit constant." ++ (and (match_code "const_int") ++ (match_test "ival >= -0x4000 && ival < 0x4000"))) ++ ++(define_constraint "P" ++ "A constant in the range 1 to 65535 (inclusive)." ++ (and (match_code "const_int") ++ (match_test "ival > 0 && ival < 0x10000"))) ++ ++;; General constraints ++ ++(define_memory_constraint "R" ++ "An address that can be used in a non-macro load or store." ++ (and (match_code "mem") ++ (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1"))) ++(define_constraint "S" ++ "@internal ++ A constant call address." ++ (and (match_operand 0 "call_insn_operand") ++ (match_test "CONSTANT_P (op)"))) ++ ++(define_constraint "YG" ++ "@internal ++ A vector zero." ++ (and (match_code "const_vector") ++ (match_test "op == CONST0_RTX (mode)"))) ++ ++(define_constraint "YA" ++ "@internal ++ An unsigned 6-bit constant." ++ (and (match_code "const_int") ++ (match_test "UIMM6_OPERAND (ival)"))) ++ ++(define_constraint "YB" ++ "@internal ++ A signed 10-bit constant." ++ (and (match_code "const_int") ++ (match_test "IMM10_OPERAND (ival)"))) ++ ++(define_constraint "Yb" ++ "@internal" ++ (match_operand 0 "qi_mask_operand")) ++ + (define_constraint "Yd" + "@internal + A constant @code{move_operand} that can be safely loaded using +@@ -183,10 +279,73 @@ + (and (match_operand 0 "move_operand") + (match_test "CONSTANT_P (op)"))) + ++(define_constraint "Yh" ++ "@internal" ++ (match_operand 0 "hi_mask_operand")) ++ ++(define_constraint "Yw" ++ "@internal" ++ (match_operand 0 "si_mask_operand")) ++ + (define_constraint "Yx" + "@internal" + (match_operand 0 "low_bitmask_operand")) + ++(define_constraint "YI" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-512,511]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -512, 511)"))) ++ ++(define_constraint "YC" ++ "@internal ++ A replicated vector const in which the replicated value has a single ++ bit set." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_bitimm_set_p (op, mode)"))) ++ ++(define_constraint "YZ" ++ "@internal ++ A replicated vector const in which the replicated value has a single ++ bit clear." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_bitimm_clr_p (op, mode)"))) ++ ++(define_constraint "Unv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-31,0]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -31, 0)"))) ++ ++(define_constraint "Uuv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [0,31]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 31)"))) ++ ++(define_constraint "Usv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-16,15]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -16, 15)"))) ++ ++(define_constraint "Uuv6" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [0,63]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 63)"))) ++ ++(define_constraint "Urv8" ++ "@internal ++ A replicated vector const with replicated byte values as well as elements" ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_bytes_p (op, mode)"))) ++ + (define_memory_constraint "ZC" + "A memory operand whose address is formed by a base register and offset + that is suitable for use in instructions with the same addressing mode +@@ -200,3 +359,9 @@ + The offset is zero" + (and (match_code "mem") + (match_test "REG_P (XEXP (op, 0))"))) ++ ++(define_address_constraint "ZD" ++ "An address operand whose address is formed by a base register ++ and offset that is suitable for use in instructions with the same ++ addressing mode as @code{preld}." ++ (match_test "loongarch_12bit_offset_address_p (op, mode)")) +diff --git a/gcc/config/loongarch/elf.h b/gcc/config/loongarch/elf.h +new file mode 100644 +index 000000000..523d5c756 +--- /dev/null ++++ b/gcc/config/loongarch/elf.h +@@ -0,0 +1,51 @@ ++/* Definitions for LoongArch ELF-based systems. ++ Copyright (C) 2023 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Define the size of the wide character type. */ ++#undef WCHAR_TYPE ++#define WCHAR_TYPE "int" ++ ++#undef WCHAR_TYPE_SIZE ++#define WCHAR_TYPE_SIZE 32 ++ ++ ++/* GNU-specific SPEC definitions. */ ++#define GNU_USER_LINK_EMULATION "elf" ABI_GRLEN_SPEC "loongarch" ++ ++#undef GNU_USER_TARGET_LINK_SPEC ++#define GNU_USER_TARGET_LINK_SPEC \ ++ "%{shared} -m " GNU_USER_LINK_EMULATION ++ ++ ++/* Link against Newlib libraries, because the ELF backend assumes Newlib. ++ Handle the circular dependence between libc and libgloss. */ ++#undef LIB_SPEC ++#define LIB_SPEC "--start-group -lc %{!specs=nosys.specs:-lgloss} --end-group" ++ ++#undef LINK_SPEC ++#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC ++ ++#undef STARTFILE_SPEC ++#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s" ++ ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC "crtend%O%s" ++ ++#undef SUBTARGET_CC1_SPEC ++#define SUBTARGET_CC1_SPEC "%{profile:-p}" +diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings +index cb88ed56b..21245f52a 100644 +--- a/gcc/config/loongarch/genopts/loongarch-strings ++++ b/gcc/config/loongarch/genopts/loongarch-strings +@@ -23,6 +23,7 @@ OPTSTR_ARCH arch + OPTSTR_TUNE tune + + STR_CPU_NATIVE native ++STR_CPU_ABI_DEFAULT abi-default + STR_CPU_LOONGARCH64 loongarch64 + STR_CPU_LA464 la464 + +@@ -31,7 +32,7 @@ STR_ISA_BASE_LA64V100 la64 + + # -mfpu + OPTSTR_ISA_EXT_FPU fpu +-STR_ISA_EXT_NOFPU none ++STR_NONE none + STR_ISA_EXT_FPU0 0 + STR_ISA_EXT_FPU32 32 + STR_ISA_EXT_FPU64 64 +@@ -40,6 +41,11 @@ OPTSTR_SOFT_FLOAT soft-float + OPTSTR_SINGLE_FLOAT single-float + OPTSTR_DOUBLE_FLOAT double-float + ++# SIMD extensions ++OPTSTR_ISA_EXT_SIMD simd ++STR_ISA_EXT_LSX lsx ++STR_ISA_EXT_LASX lasx ++ + # -mabi= + OPTSTR_ABI_BASE abi + STR_ABI_BASE_LP64D lp64d +@@ -54,5 +60,6 @@ OPTSTR_CMODEL cmodel + STR_CMODEL_NORMAL normal + STR_CMODEL_TINY tiny + STR_CMODEL_TS tiny-static ++STR_CMODEL_MEDIUM medium + STR_CMODEL_LARGE large + STR_CMODEL_EXTREME extreme +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index 61e7d72a0..c53785a37 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -17,22 +17,12 @@ + ; . + ; + +-; Variables (macros) that should be exported by loongarch.opt: +-; la_opt_switches, +-; la_opt_abi_base, la_opt_abi_ext, +-; la_opt_cpu_arch, la_opt_cpu_tune, +-; la_opt_fpu, +-; la_cmodel. +- + HeaderInclude + config/loongarch/loongarch-opts.h + + HeaderInclude + config/loongarch/loongarch-str.h + +-Variable +-HOST_WIDE_INT la_opt_switches = 0 +- + ; ISA related options + ;; Base ISA + Enum +@@ -42,14 +32,13 @@ Basic ISAs of LoongArch: + EnumValue + Enum(isa_base) String(@@STR_ISA_BASE_LA64V100@@) Value(ISA_BASE_LA64V100) + +- + ;; ISA extensions / adjustments + Enum + Name(isa_ext_fpu) Type(int) + FPU types of LoongArch: + + EnumValue +-Enum(isa_ext_fpu) String(@@STR_ISA_EXT_NOFPU@@) Value(ISA_EXT_NOFPU) ++Enum(isa_ext_fpu) String(@@STR_NONE@@) Value(ISA_EXT_NONE) + + EnumValue + Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU32@@) Value(ISA_EXT_FPU32) +@@ -58,24 +47,48 @@ EnumValue + Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU64@@) Value(ISA_EXT_FPU64) + + m@@OPTSTR_ISA_EXT_FPU@@= +-Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPTION_NOT_SEEN) ++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) + -m@@OPTSTR_ISA_EXT_FPU@@=FPU Generate code for the given FPU. + + m@@OPTSTR_ISA_EXT_FPU@@=@@STR_ISA_EXT_FPU0@@ +-Target RejectNegative Alias(m@@OPTSTR_ISA_EXT_FPU@@=,@@STR_ISA_EXT_NOFPU@@) ++Target RejectNegative Alias(m@@OPTSTR_ISA_EXT_FPU@@=,@@STR_NONE@@) + + m@@OPTSTR_SOFT_FLOAT@@ +-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_SOFTF) Negative(m@@OPTSTR_SINGLE_FLOAT@@) ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SINGLE_FLOAT@@) + Prevent the use of all hardware floating-point instructions. + + m@@OPTSTR_SINGLE_FLOAT@@ +-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_F32) Negative(m@@OPTSTR_DOUBLE_FLOAT@@) ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_DOUBLE_FLOAT@@) + Restrict the use of hardware floating-point instructions to 32-bit operations. + + m@@OPTSTR_DOUBLE_FLOAT@@ +-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_F64) Negative(m@@OPTSTR_SOFT_FLOAT@@) ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SOFT_FLOAT@@) + Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations. + ++Enum ++Name(isa_ext_simd) Type(int) ++SIMD extension levels of LoongArch: ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_NONE@@) Value(ISA_EXT_NONE) ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_ISA_EXT_LSX@@) Value(ISA_EXT_SIMD_LSX) ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_ISA_EXT_LASX@@) Value(ISA_EXT_SIMD_LASX) ++ ++m@@OPTSTR_ISA_EXT_SIMD@@= ++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) ++-m@@OPTSTR_ISA_EXT_SIMD@@=SIMD Generate code for the given SIMD extension. ++ ++m@@STR_ISA_EXT_LSX@@ ++Target Driver Defer Var(la_deferred_options) ++Enable LoongArch SIMD Extension (LSX, 128-bit). ++ ++m@@STR_ISA_EXT_LASX@@ ++Target Driver Defer Var(la_deferred_options) ++Enable LoongArch Advanced SIMD Extension (LASX, 256-bit). + + ;; Base target models (implies ISA & tune parameters) + Enum +@@ -85,6 +98,9 @@ LoongArch CPU types: + EnumValue + Enum(cpu_type) String(@@STR_CPU_NATIVE@@) Value(CPU_NATIVE) + ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_ABI_DEFAULT@@) Value(CPU_ABI_DEFAULT) ++ + EnumValue + Enum(cpu_type) String(@@STR_CPU_LOONGARCH64@@) Value(CPU_LOONGARCH64) + +@@ -92,11 +108,11 @@ EnumValue + Enum(cpu_type) String(@@STR_CPU_LA464@@) Value(CPU_LA464) + + m@@OPTSTR_ARCH@@= +-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPTION_NOT_SEEN) ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) + -m@@OPTSTR_ARCH@@=PROCESSOR Generate code for the given PROCESSOR ISA. + + m@@OPTSTR_TUNE@@= +-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPTION_NOT_SEEN) ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) + -m@@OPTSTR_TUNE@@=PROCESSOR Generate optimized code for PROCESSOR. + + +@@ -118,18 +134,22 @@ EnumValue + Enum(abi_base) String(@@STR_ABI_BASE_LP64S@@) Value(ABI_BASE_LP64S) + + m@@OPTSTR_ABI_BASE@@= +-Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPTION_NOT_SEEN) ++Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPT_UNSET) + -m@@OPTSTR_ABI_BASE@@=BASEABI Generate code that conforms to the given BASEABI. + ++ + ;; ABI Extension + Variable +-int la_opt_abi_ext = M_OPTION_NOT_SEEN +- ++int la_opt_abi_ext = M_OPT_UNSET + + mbranch-cost= + Target RejectNegative Joined UInteger Var(loongarch_branch_cost) + -mbranch-cost=COST Set the cost of branches to roughly COST instructions. + ++mmemvec-cost= ++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) ++mmemvec-cost=COST Set the cost of vector memory access instructions. ++ + mcheck-zero-division + Target Mask(CHECK_ZERO_DIV) + Trap on integer divide by zero. +@@ -154,6 +174,10 @@ mmax-inline-memcpy-size= + Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + ++mexplicit-relocs ++Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS) ++Use %reloc() assembly operators. ++ + ; The code model option names for -mcmodel. + Enum + Name(cmodel) Type(int) +@@ -168,6 +192,9 @@ Enum(cmodel) String(@@STR_CMODEL_TINY@@) Value(CMODEL_TINY) + EnumValue + Enum(cmodel) String(@@STR_CMODEL_TS@@) Value(CMODEL_TINY_STATIC) + ++EnumValue ++Enum(cmodel) String(@@STR_CMODEL_MEDIUM@@) Value(CMODEL_MEDIUM) ++ + EnumValue + Enum(cmodel) String(@@STR_CMODEL_LARGE@@) Value(CMODEL_LARGE) + +@@ -175,5 +202,9 @@ EnumValue + Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME) + + mcmodel= +-Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(CMODEL_NORMAL) ++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) + Specify the code model. ++ ++mdirect-extern-access ++Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0) ++Avoid using the GOT to access external symbols. +diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h +index 664dc9206..44e4f2575 100644 +--- a/gcc/config/loongarch/gnu-user.h ++++ b/gcc/config/loongarch/gnu-user.h +@@ -33,21 +33,28 @@ along with GCC; see the file COPYING3. If not see + #define GLIBC_DYNAMIC_LINKER \ + "/lib" ABI_GRLEN_SPEC "/ld-linux-loongarch-" ABI_SPEC ".so.1" + ++#define MUSL_ABI_SPEC \ ++ "%{mabi=lp64d:-lp64d}" \ ++ "%{mabi=lp64f:-lp64f}" \ ++ "%{mabi=lp64s:-lp64s}" ++ + #undef MUSL_DYNAMIC_LINKER + #define MUSL_DYNAMIC_LINKER \ +- "/lib" ABI_GRLEN_SPEC "/ld-musl-loongarch-" ABI_SPEC ".so.1" ++ "/lib/ld-musl-loongarch" ABI_GRLEN_SPEC MUSL_ABI_SPEC ".so.1" + + #undef GNU_USER_TARGET_LINK_SPEC + #define GNU_USER_TARGET_LINK_SPEC \ + "%{G*} %{shared} -m " GNU_USER_LINK_EMULATION \ +- "%{!shared: %{static} %{!static: %{rdynamic:-export-dynamic} " \ +- "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}" ++ "%{!shared: %{static} " \ ++ "%{!static: %{!static-pie: %{rdynamic:-export-dynamic} " \ ++ "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}} " \ ++ "%{static-pie: -static -pie --no-dynamic-linker -z text}}" + + + /* Similar to standard Linux, but adding -ffast-math support. */ + #undef GNU_USER_TARGET_MATHFILE_SPEC + #define GNU_USER_TARGET_MATHFILE_SPEC \ +- "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}" ++ "%{Ofast|ffast-math|funsafe-math-optimizations:%{!shared:crtfastmath.o%s}}" + + #undef LIB_SPEC + #define LIB_SPEC GNU_USER_TARGET_LIB_SPEC +diff --git a/gcc/config/loongarch/la464.md b/gcc/config/loongarch/la464.md +index 0ae177610..89d61bf58 100644 +--- a/gcc/config/loongarch/la464.md ++++ b/gcc/config/loongarch/la464.md +@@ -43,88 +43,88 @@ + ;; Describe instruction reservations. + + (define_insn_reservation "la464_arith" 1 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "arith,clz,const,logical, + move,nop,shift,signext,slt")) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_branch" 1 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "branch,jump,call,condmove,trap")) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_imul" 7 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "imul")) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_idiv_si" 12 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (and (eq_attr "type" "idiv") + (eq_attr "mode" "SI"))) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_idiv_di" 25 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (and (eq_attr "type" "idiv") + (eq_attr "mode" "DI"))) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_load" 4 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "load")) + "la464_mem1 | la464_mem2") + + (define_insn_reservation "la464_gpr_fp" 16 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "mftg,mgtf")) + "la464_mem1") + + (define_insn_reservation "la464_fpload" 4 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "fpload")) + "la464_mem1 | la464_mem2") + + (define_insn_reservation "la464_prefetch" 0 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "prefetch,prefetchx")) + "la464_mem1 | la464_mem2") + + (define_insn_reservation "la464_store" 0 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "store,fpstore,fpidxstore")) + "la464_mem1 | la464_mem2") + + (define_insn_reservation "la464_fadd" 4 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "fadd,fmul,fmadd")) + "la464_falu1 | la464_falu2") + + (define_insn_reservation "la464_fcmp" 2 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "fabs,fcmp,fmove,fneg")) + "la464_falu1 | la464_falu2") + + (define_insn_reservation "la464_fcvt" 4 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "fcvt")) + "la464_falu1 | la464_falu2") + + (define_insn_reservation "la464_fdiv_sf" 12 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt") + (eq_attr "mode" "SF"))) + "la464_falu1 | la464_falu2") + + (define_insn_reservation "la464_fdiv_df" 19 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt") + (eq_attr "mode" "DF"))) + "la464_falu1 | la464_falu2") + + ;; Force single-dispatch for unknown or multi. + (define_insn_reservation "la464_unknown" 1 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "unknown,multi,atomic,syncloop")) + "la464_alu1 + la464_alu2 + la464_falu1 + + la464_falu2 + la464_mem1 + la464_mem2") +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +new file mode 100644 +index 000000000..8111c8bb7 +--- /dev/null ++++ b/gcc/config/loongarch/lasx.md +@@ -0,0 +1,5104 @@ ++;; Machine Description for LARCH Loongson ASX ASE ++;; ++;; Copyright (C) 2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++;; ++ ++(define_c_enum "unspec" [ ++ UNSPEC_LASX_XVABSD_S ++ UNSPEC_LASX_XVABSD_U ++ UNSPEC_LASX_XVAVG_S ++ UNSPEC_LASX_XVAVG_U ++ UNSPEC_LASX_XVAVGR_S ++ UNSPEC_LASX_XVAVGR_U ++ UNSPEC_LASX_XVBITCLR ++ UNSPEC_LASX_XVBITCLRI ++ UNSPEC_LASX_XVBITREV ++ UNSPEC_LASX_XVBITREVI ++ UNSPEC_LASX_XVBITSET ++ UNSPEC_LASX_XVBITSETI ++ UNSPEC_LASX_XVFCMP_CAF ++ UNSPEC_LASX_XVFCLASS ++ UNSPEC_LASX_XVFCMP_CUNE ++ UNSPEC_LASX_XVFCVT ++ UNSPEC_LASX_XVFCVTH ++ UNSPEC_LASX_XVFCVTL ++ UNSPEC_LASX_XVFLOGB ++ UNSPEC_LASX_XVFRECIP ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRSQRT ++ UNSPEC_LASX_XVFCMP_SAF ++ UNSPEC_LASX_XVFCMP_SEQ ++ UNSPEC_LASX_XVFCMP_SLE ++ UNSPEC_LASX_XVFCMP_SLT ++ UNSPEC_LASX_XVFCMP_SNE ++ UNSPEC_LASX_XVFCMP_SOR ++ UNSPEC_LASX_XVFCMP_SUEQ ++ UNSPEC_LASX_XVFCMP_SULE ++ UNSPEC_LASX_XVFCMP_SULT ++ UNSPEC_LASX_XVFCMP_SUN ++ UNSPEC_LASX_XVFCMP_SUNE ++ UNSPEC_LASX_XVFTINT_S ++ UNSPEC_LASX_XVFTINT_U ++ UNSPEC_LASX_XVCLO ++ UNSPEC_LASX_XVSAT_S ++ UNSPEC_LASX_XVSAT_U ++ UNSPEC_LASX_XVREPLVE0 ++ UNSPEC_LASX_XVREPL128VEI ++ UNSPEC_LASX_XVSRAR ++ UNSPEC_LASX_XVSRARI ++ UNSPEC_LASX_XVSRLR ++ UNSPEC_LASX_XVSRLRI ++ UNSPEC_LASX_XVSHUF ++ UNSPEC_LASX_XVSHUF_B ++ UNSPEC_LASX_BRANCH ++ UNSPEC_LASX_BRANCH_V ++ ++ UNSPEC_LASX_XVMUH_S ++ UNSPEC_LASX_XVMUH_U ++ UNSPEC_LASX_MXVEXTW_U ++ UNSPEC_LASX_XVSLLWIL_S ++ UNSPEC_LASX_XVSLLWIL_U ++ UNSPEC_LASX_XVSRAN ++ UNSPEC_LASX_XVSSRAN_S ++ UNSPEC_LASX_XVSSRAN_U ++ UNSPEC_LASX_XVSRARN ++ UNSPEC_LASX_XVSSRARN_S ++ UNSPEC_LASX_XVSSRARN_U ++ UNSPEC_LASX_XVSRLN ++ UNSPEC_LASX_XVSSRLN_U ++ UNSPEC_LASX_XVSRLRN ++ UNSPEC_LASX_XVSSRLRN_U ++ UNSPEC_LASX_XVFRSTPI ++ UNSPEC_LASX_XVFRSTP ++ UNSPEC_LASX_XVSHUF4I ++ UNSPEC_LASX_XVBSRL_V ++ UNSPEC_LASX_XVBSLL_V ++ UNSPEC_LASX_XVEXTRINS ++ UNSPEC_LASX_XVMSKLTZ ++ UNSPEC_LASX_XVSIGNCOV ++ UNSPEC_LASX_XVFTINTRNE_W_S ++ UNSPEC_LASX_XVFTINTRNE_L_D ++ UNSPEC_LASX_XVFTINTRP_W_S ++ UNSPEC_LASX_XVFTINTRP_L_D ++ UNSPEC_LASX_XVFTINTRM_W_S ++ UNSPEC_LASX_XVFTINTRM_L_D ++ UNSPEC_LASX_XVFTINT_W_D ++ UNSPEC_LASX_XVFFINT_S_L ++ UNSPEC_LASX_XVFTINTRZ_W_D ++ UNSPEC_LASX_XVFTINTRP_W_D ++ UNSPEC_LASX_XVFTINTRM_W_D ++ UNSPEC_LASX_XVFTINTRNE_W_D ++ UNSPEC_LASX_XVFTINTH_L_S ++ UNSPEC_LASX_XVFTINTL_L_S ++ UNSPEC_LASX_XVFFINTH_D_W ++ UNSPEC_LASX_XVFFINTL_D_W ++ UNSPEC_LASX_XVFTINTRZH_L_S ++ UNSPEC_LASX_XVFTINTRZL_L_S ++ UNSPEC_LASX_XVFTINTRPH_L_S ++ UNSPEC_LASX_XVFTINTRPL_L_S ++ UNSPEC_LASX_XVFTINTRMH_L_S ++ UNSPEC_LASX_XVFTINTRML_L_S ++ UNSPEC_LASX_XVFTINTRNEL_L_S ++ UNSPEC_LASX_XVFTINTRNEH_L_S ++ UNSPEC_LASX_XVFRINTRNE_S ++ UNSPEC_LASX_XVFRINTRNE_D ++ UNSPEC_LASX_XVFRINTRZ_S ++ UNSPEC_LASX_XVFRINTRZ_D ++ UNSPEC_LASX_XVFRINTRP_S ++ UNSPEC_LASX_XVFRINTRP_D ++ UNSPEC_LASX_XVFRINTRM_S ++ UNSPEC_LASX_XVFRINTRM_D ++ UNSPEC_LASX_XVREPLVE0_Q ++ UNSPEC_LASX_XVPERM_W ++ UNSPEC_LASX_XVPERMI_Q ++ UNSPEC_LASX_XVPERMI_D ++ ++ UNSPEC_LASX_XVADDWEV ++ UNSPEC_LASX_XVADDWEV2 ++ UNSPEC_LASX_XVADDWEV3 ++ UNSPEC_LASX_XVSUBWEV ++ UNSPEC_LASX_XVSUBWEV2 ++ UNSPEC_LASX_XVMULWEV ++ UNSPEC_LASX_XVMULWEV2 ++ UNSPEC_LASX_XVMULWEV3 ++ UNSPEC_LASX_XVADDWOD ++ UNSPEC_LASX_XVADDWOD2 ++ UNSPEC_LASX_XVADDWOD3 ++ UNSPEC_LASX_XVSUBWOD ++ UNSPEC_LASX_XVSUBWOD2 ++ UNSPEC_LASX_XVMULWOD ++ UNSPEC_LASX_XVMULWOD2 ++ UNSPEC_LASX_XVMULWOD3 ++ UNSPEC_LASX_XVMADDWEV ++ UNSPEC_LASX_XVMADDWEV2 ++ UNSPEC_LASX_XVMADDWEV3 ++ UNSPEC_LASX_XVMADDWOD ++ UNSPEC_LASX_XVMADDWOD2 ++ UNSPEC_LASX_XVMADDWOD3 ++ UNSPEC_LASX_XVHADDW_Q_D ++ UNSPEC_LASX_XVHSUBW_Q_D ++ UNSPEC_LASX_XVHADDW_QU_DU ++ UNSPEC_LASX_XVHSUBW_QU_DU ++ UNSPEC_LASX_XVROTR ++ UNSPEC_LASX_XVADD_Q ++ UNSPEC_LASX_XVSUB_Q ++ UNSPEC_LASX_XVREPLVE ++ UNSPEC_LASX_XVSHUF4 ++ UNSPEC_LASX_XVMSKGEZ ++ UNSPEC_LASX_XVMSKNZ ++ UNSPEC_LASX_XVEXTH_Q_D ++ UNSPEC_LASX_XVEXTH_QU_DU ++ UNSPEC_LASX_XVEXTL_Q_D ++ UNSPEC_LASX_XVSRLNI ++ UNSPEC_LASX_XVSRLRNI ++ UNSPEC_LASX_XVSSRLNI ++ UNSPEC_LASX_XVSSRLNI2 ++ UNSPEC_LASX_XVSSRLRNI ++ UNSPEC_LASX_XVSSRLRNI2 ++ UNSPEC_LASX_XVSRANI ++ UNSPEC_LASX_XVSRARNI ++ UNSPEC_LASX_XVSSRANI ++ UNSPEC_LASX_XVSSRANI2 ++ UNSPEC_LASX_XVSSRARNI ++ UNSPEC_LASX_XVSSRARNI2 ++ UNSPEC_LASX_XVPERMI ++ UNSPEC_LASX_XVINSVE0 ++ UNSPEC_LASX_XVPICKVE ++ UNSPEC_LASX_XVSSRLN ++ UNSPEC_LASX_XVSSRLRN ++ UNSPEC_LASX_XVEXTL_QU_DU ++ UNSPEC_LASX_XVLDI ++ UNSPEC_LASX_XVLDX ++ UNSPEC_LASX_XVSTX ++]) ++ ++;; All vector modes with 256 bits. ++(define_mode_iterator LASX [V4DF V8SF V4DI V8SI V16HI V32QI]) ++ ++;; Same as LASX. Used by vcond to iterate two modes. ++(define_mode_iterator LASX_2 [V4DF V8SF V4DI V8SI V16HI V32QI]) ++ ++;; Only used for splitting insert_d and copy_{u,s}.d. ++(define_mode_iterator LASX_D [V4DI V4DF]) ++ ++;; Only used for splitting insert_d and copy_{u,s}.d. ++(define_mode_iterator LASX_WD [V4DI V4DF V8SI V8SF]) ++ ++;; Only used for copy256_{u,s}.w. ++(define_mode_iterator LASX_W [V8SI V8SF]) ++ ++;; Only integer modes in LASX. ++(define_mode_iterator ILASX [V4DI V8SI V16HI V32QI]) ++ ++;; As ILASX but excludes V32QI. ++(define_mode_iterator ILASX_DWH [V4DI V8SI V16HI]) ++ ++;; As LASX but excludes V32QI. ++(define_mode_iterator LASX_DWH [V4DF V8SF V4DI V8SI V16HI]) ++ ++;; As ILASX but excludes V4DI. ++(define_mode_iterator ILASX_WHB [V8SI V16HI V32QI]) ++ ++;; Only integer modes equal or larger than a word. ++(define_mode_iterator ILASX_DW [V4DI V8SI]) ++ ++;; Only integer modes smaller than a word. ++(define_mode_iterator ILASX_HB [V16HI V32QI]) ++ ++;; Only floating-point modes in LASX. ++(define_mode_iterator FLASX [V4DF V8SF]) ++ ++;; Only used for immediate set shuffle elements instruction. ++(define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF]) ++ ++;; The attribute gives the integer vector mode with same size in Loongson ASX. ++(define_mode_attr VIMODE256 ++ [(V4DF "V4DI") ++ (V8SF "V8SI") ++ (V4DI "V4DI") ++ (V8SI "V8SI") ++ (V16HI "V16HI") ++ (V32QI "V32QI")]) ++ ++;;attribute gives half modes for vector modes. ++;;attribute gives half modes (Same Size) for vector modes. ++(define_mode_attr VHSMODE256 ++ [(V16HI "V32QI") ++ (V8SI "V16HI") ++ (V4DI "V8SI")]) ++ ++;;attribute gives half modes for vector modes. ++(define_mode_attr VHMODE256 ++ [(V32QI "V16QI") ++ (V16HI "V8HI") ++ (V8SI "V4SI") ++ (V4DI "V2DI")]) ++ ++;;attribute gives half float modes for vector modes. ++(define_mode_attr VFHMODE256 ++ [(V8SF "V4SF") ++ (V4DF "V2DF")]) ++ ++;; The attribute gives double modes for vector modes in LASX. ++(define_mode_attr VDMODE256 ++ [(V8SI "V4DI") ++ (V16HI "V8SI") ++ (V32QI "V16HI")]) ++ ++;; extended from VDMODE256 ++(define_mode_attr VDMODEEXD256 ++ [(V4DI "V4DI") ++ (V8SI "V4DI") ++ (V16HI "V8SI") ++ (V32QI "V16HI")]) ++ ++;; The attribute gives half modes with same number of elements for vector modes. ++(define_mode_attr VTRUNCMODE256 ++ [(V16HI "V16QI") ++ (V8SI "V8HI") ++ (V4DI "V4SI")]) ++ ++;; Double-sized Vector MODE with same elemet type. "Vector, Enlarged-MODE" ++(define_mode_attr VEMODE256 ++ [(V8SF "V16SF") ++ (V8SI "V16SI") ++ (V4DF "V8DF") ++ (V4DI "V8DI")]) ++ ++;; This attribute gives the mode of the result for "copy_s_b, copy_u_b" etc. ++(define_mode_attr VRES256 ++ [(V4DF "DF") ++ (V8SF "SF") ++ (V4DI "DI") ++ (V8SI "SI") ++ (V16HI "SI") ++ (V32QI "SI")]) ++ ++;; Only used with LASX_D iterator. ++(define_mode_attr lasx_d ++ [(V4DI "reg_or_0") ++ (V4DF "register")]) ++ ++;; This attribute gives the 256 bit integer vector mode with same size. ++(define_mode_attr mode256_i ++ [(V4DF "v4di") ++ (V8SF "v8si") ++ (V4DI "v4di") ++ (V8SI "v8si") ++ (V16HI "v16hi") ++ (V32QI "v32qi")]) ++ ++ ++;; This attribute gives the 256 bit float vector mode with same size. ++(define_mode_attr mode256_f ++ [(V4DF "v4df") ++ (V8SF "v8sf") ++ (V4DI "v4df") ++ (V8SI "v8sf")]) ++ ++ ;; This attribute gives suffix for LASX instructions. HOW? ++(define_mode_attr lasxfmt ++ [(V4DF "d") ++ (V8SF "w") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++(define_mode_attr flasxfmt ++ [(V4DF "d") ++ (V8SF "s")]) ++ ++(define_mode_attr lasxfmt_u ++ [(V4DF "du") ++ (V8SF "wu") ++ (V4DI "du") ++ (V8SI "wu") ++ (V16HI "hu") ++ (V32QI "bu")]) ++ ++(define_mode_attr ilasxfmt ++ [(V4DF "l") ++ (V8SF "w")]) ++ ++(define_mode_attr ilasxfmt_u ++ [(V4DF "lu") ++ (V8SF "wu")]) ++ ++;; This attribute gives suffix for integers in VHMODE256. ++(define_mode_attr hlasxfmt ++ [(V4DI "w") ++ (V8SI "h") ++ (V16HI "b")]) ++ ++(define_mode_attr hlasxfmt_u ++ [(V4DI "wu") ++ (V8SI "hu") ++ (V16HI "bu")]) ++ ++;; This attribute gives suffix for integers in VHSMODE256. ++(define_mode_attr hslasxfmt ++ [(V4DI "w") ++ (V8SI "h") ++ (V16HI "b")]) ++ ++;; This attribute gives define_insn suffix for LASX instructions that need ++;; distinction between integer and floating point. ++(define_mode_attr lasxfmt_f ++ [(V4DF "d_f") ++ (V8SF "w_f") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++(define_mode_attr flasxfmt_f ++ [(V4DF "d_f") ++ (V8SF "s_f") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++;; This attribute gives define_insn suffix for LASX instructions that need ++;; distinction between integer and floating point. ++(define_mode_attr lasxfmt_f_wd ++ [(V4DF "d_f") ++ (V8SF "w_f") ++ (V4DI "d") ++ (V8SI "w")]) ++ ++;; This attribute gives suffix for integers in VHMODE256. ++(define_mode_attr dlasxfmt ++ [(V8SI "d") ++ (V16HI "w") ++ (V32QI "h")]) ++ ++(define_mode_attr dlasxfmt_u ++ [(V8SI "du") ++ (V16HI "wu") ++ (V32QI "hu")]) ++ ++;; for VDMODEEXD256 ++(define_mode_attr dlasxqfmt ++ [(V4DI "q") ++ (V8SI "d") ++ (V16HI "w") ++ (V32QI "h")]) ++ ++;; This is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr indeximm256 ++ [(V4DF "0_to_3") ++ (V8SF "0_to_7") ++ (V4DI "0_to_3") ++ (V8SI "0_to_7") ++ (V16HI "uimm4") ++ (V32QI "uimm5")]) ++ ++;; This is used to form an immediate operand constraint using to ref high half ++;; "const__operand". ++(define_mode_attr indeximm_hi ++ [(V4DF "2_or_3") ++ (V8SF "4_to_7") ++ (V4DI "2_or_3") ++ (V8SI "4_to_7") ++ (V16HI "8_to_15") ++ (V32QI "16_to_31")]) ++ ++;; This is used to form an immediate operand constraint using to ref low half ++;; "const__operand". ++(define_mode_attr indeximm_lo ++ [(V4DF "0_or_1") ++ (V8SF "0_to_3") ++ (V4DI "0_or_1") ++ (V8SI "0_to_3") ++ (V16HI "uimm3") ++ (V32QI "uimm4")]) ++ ++;; This attribute represents bitmask needed for vec_merge using in lasx ++;; "const__operand". ++(define_mode_attr bitmask256 ++ [(V4DF "exp_4") ++ (V8SF "exp_8") ++ (V4DI "exp_4") ++ (V8SI "exp_8") ++ (V16HI "exp_16") ++ (V32QI "exp_32")]) ++ ++;; This attribute represents bitmask needed for vec_merge using to ref low half ++;; "const__operand". ++(define_mode_attr bitmask_lo ++ [(V4DF "exp_2") ++ (V8SF "exp_4") ++ (V4DI "exp_2") ++ (V8SI "exp_4") ++ (V16HI "exp_8") ++ (V32QI "exp_16")]) ++ ++ ++;; This attribute is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr bitimm256 ++ [(V32QI "uimm3") ++ (V16HI "uimm4") ++ (V8SI "uimm5") ++ (V4DI "uimm6")]) ++ ++ ++(define_mode_attr d2lasxfmt ++ [(V8SI "q") ++ (V16HI "d") ++ (V32QI "w")]) ++ ++(define_mode_attr d2lasxfmt_u ++ [(V8SI "qu") ++ (V16HI "du") ++ (V32QI "wu")]) ++ ++(define_mode_attr VD2MODE256 ++ [(V8SI "V4DI") ++ (V16HI "V4DI") ++ (V32QI "V8SI")]) ++ ++(define_mode_attr lasxfmt_wd ++ [(V4DI "d") ++ (V8SI "w") ++ (V16HI "w") ++ (V32QI "w")]) ++ ++(define_int_iterator FRINT256_S [UNSPEC_LASX_XVFRINTRP_S ++ UNSPEC_LASX_XVFRINTRZ_S ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRINTRM_S]) ++ ++(define_int_iterator FRINT256_D [UNSPEC_LASX_XVFRINTRP_D ++ UNSPEC_LASX_XVFRINTRZ_D ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRINTRM_D]) ++ ++(define_int_attr frint256_pattern_s ++ [(UNSPEC_LASX_XVFRINTRP_S "ceil") ++ (UNSPEC_LASX_XVFRINTRZ_S "btrunc") ++ (UNSPEC_LASX_XVFRINT "rint") ++ (UNSPEC_LASX_XVFRINTRM_S "floor")]) ++ ++(define_int_attr frint256_pattern_d ++ [(UNSPEC_LASX_XVFRINTRP_D "ceil") ++ (UNSPEC_LASX_XVFRINTRZ_D "btrunc") ++ (UNSPEC_LASX_XVFRINT "rint") ++ (UNSPEC_LASX_XVFRINTRM_D "floor")]) ++ ++(define_int_attr frint256_suffix ++ [(UNSPEC_LASX_XVFRINTRP_S "rp") ++ (UNSPEC_LASX_XVFRINTRP_D "rp") ++ (UNSPEC_LASX_XVFRINTRZ_S "rz") ++ (UNSPEC_LASX_XVFRINTRZ_D "rz") ++ (UNSPEC_LASX_XVFRINT "") ++ (UNSPEC_LASX_XVFRINTRM_S "rm") ++ (UNSPEC_LASX_XVFRINTRM_D "rm")]) ++ ++(define_expand "vec_init" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vector_init (operands[0], operands[1]); ++ DONE; ++}) ++ ++(define_expand "vec_initv32qiv16qi" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand:V16QI 1 "")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vector_group_init (operands[0], operands[1]); ++ DONE; ++}) ++ ++;; FIXME: Delete. ++(define_insn "vec_pack_trunc_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_concat: ++ (truncate: ++ (match_operand:ILASX_DWH 1 "register_operand" "f")) ++ (truncate: ++ (match_operand:ILASX_DWH 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvpickev.\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "") ++ (set_attr "length" "8")]) ++ ++(define_expand "vec_unpacks_hi_v8sf" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LASX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, ++ true/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_lo_v8sf" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LASX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, ++ false/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, ++ true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacks_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_insn "lasx_xvinsgr2vr_" ++ [(set (match_operand:ILASX_DW 0 "register_operand" "=f") ++ (vec_merge:ILASX_DW ++ (vec_duplicate:ILASX_DW ++ (match_operand: 1 "reg_or_0_operand" "rJ")) ++ (match_operand:ILASX_DW 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LASX" ++{ ++#if 0 ++ if (!TARGET_64BIT && (mode == V4DImode || mode == V4DFmode)) ++ return "#"; ++ else ++#endif ++ return "xvinsgr2vr.\t%u0,%z1,%y3"; ++} ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_insn "vec_concatv4di" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_concat:V4DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv8si" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (match_operand:V4SI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv16hi" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (match_operand:V8HI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv32qi" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "0") ++ (match_operand:V16QI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv4df" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (vec_concat:V4DF ++ (match_operand:V2DF 1 "register_operand" "0") ++ (match_operand:V2DF 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "vec_concatv8sf" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "0") ++ (match_operand:V4SF 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++;; xshuf.w ++(define_insn "lasx_xvperm_" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (unspec:LASX_W ++ [(match_operand:LASX_W 1 "nonimmediate_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVPERM_W))] ++ "ISA_HAS_LASX" ++ "xvperm.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++;; xvpermi.d ++(define_insn "lasx_xvpermi_d_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX ++ [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand:SI 2 "const_uimm8_operand")] ++ UNSPEC_LASX_XVPERMI_D))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpermi_d__1" ++ [(set (match_operand:LASX_D 0 "register_operand" "=f") ++ (vec_select:LASX_D ++ (match_operand:LASX_D 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand") ++ (match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand") ++ (match_operand 5 "const_0_to_3_operand")])))] ++ "ISA_HAS_LASX" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[2]) << 0; ++ mask |= INTVAL (operands[3]) << 2; ++ mask |= INTVAL (operands[4]) << 4; ++ mask |= INTVAL (operands[5]) << 6; ++ operands[2] = GEN_INT (mask); ++ return "xvpermi.d\t%u0,%u1,%2"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++;; xvpermi.q ++(define_insn "lasx_xvpermi_q_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX ++ [(match_operand:LASX 1 "register_operand" "0") ++ (match_operand:LASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LASX_XVPERMI_Q))] ++ "ISA_HAS_LASX" ++ "xvpermi.q\t%u0,%u2,%3" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickve2gr_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (any_extend:DI ++ (vec_select:DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand" "")]))))] ++ "ISA_HAS_LASX" ++ "xvpickve2gr.d\t%0,%u1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "V4DI")]) ++ ++(define_expand "vec_set" ++ [(match_operand:ILASX_DW 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lasx_xvinsgr2vr_ (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) ++ ++(define_expand "vec_set" ++ [(match_operand:FLASX 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lasx_xvinsve0__scalar (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) ++ ++(define_expand "vec_extract" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LASX 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vector_extract (operands[0], operands[1], ++ INTVAL (operands[2])); ++ DONE; ++}) ++ ++(define_expand "vec_perm" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "register_operand") ++ (match_operand:LASX 2 "register_operand") ++ (match_operand: 3 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_perm_1 (operands); ++ DONE; ++}) ++ ++;; FIXME: 256?? ++(define_expand "vcondu" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "reg_or_m1_operand") ++ (match_operand:LASX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:ILASX 4 "register_operand") ++ (match_operand:ILASX 5 "register_operand")])] ++ "ISA_HAS_LASX ++ && (GET_MODE_NUNITS (mode) ++ == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, ++ operands); ++ DONE; ++}) ++ ++;; FIXME: 256?? ++(define_expand "vcond" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "reg_or_m1_operand") ++ (match_operand:LASX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:LASX_2 4 "register_operand") ++ (match_operand:LASX_2 5 "register_operand")])] ++ "ISA_HAS_LASX ++ && (GET_MODE_NUNITS (mode) ++ == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, ++ operands); ++ DONE; ++}) ++ ++;; Same as vcond_ ++(define_expand "vcond_mask_" ++ [(match_operand:ILASX 0 "register_operand") ++ (match_operand:ILASX 1 "reg_or_m1_operand") ++ (match_operand:ILASX 2 "reg_or_0_operand") ++ (match_operand:ILASX 3 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_cond_mask_expr (mode, ++ mode, operands); ++ DONE; ++}) ++ ++(define_expand "lasx_xvrepli" ++ [(match_operand:ILASX 0 "register_operand") ++ (match_operand 1 "const_imm10_operand")] ++ "ISA_HAS_LASX" ++{ ++ if (mode == V32QImode) ++ operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]), ++ mode)); ++ emit_move_insn (operands[0], ++ loongarch_gen_const_int_vector (mode, INTVAL (operands[1]))); ++ DONE; ++}) ++ ++(define_expand "mov" ++ [(set (match_operand:LASX 0) ++ (match_operand:LASX 1))] ++ "ISA_HAS_LASX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++ ++(define_expand "movmisalign" ++ [(set (match_operand:LASX 0) ++ (match_operand:LASX 1))] ++ "ISA_HAS_LASX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++;; 256-bit LASX modes can only exist in LASX registers or memory. ++(define_insn "mov_lasx" ++ [(set (match_operand:LASX 0 "nonimmediate_operand" "=f,f,R,*r,*f") ++ (match_operand:LASX 1 "move_operand" "fYGYI,R,f,*f,*r"))] ++ "ISA_HAS_LASX" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") ++ (set_attr "mode" "") ++ (set_attr "length" "8,4,4,4,4")]) ++ ++ ++(define_split ++ [(set (match_operand:LASX 0 "nonimmediate_operand") ++ (match_operand:LASX 1 "move_operand"))] ++ "reload_completed && ISA_HAS_LASX ++ && loongarch_split_move_insn_p (operands[0], operands[1])" ++ [(const_int 0)] ++{ ++ loongarch_split_move_insn (operands[0], operands[1], curr_insn); ++ DONE; ++}) ++ ++;; Offset load ++(define_expand "lasx_mxld_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr)); ++ DONE; ++}) ++ ++;; Offset store ++(define_expand "lasx_mxst_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]); ++ DONE; ++}) ++ ++;; LASX ++(define_insn "add3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f,f") ++ (plus:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))] ++ "ISA_HAS_LASX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "xvadd.\t%u0,%u1,%u2"; ++ case 1: ++ { ++ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0)); ++ ++ operands[2] = GEN_INT (-val); ++ return "xvsubi.\t%u0,%u1,%d2"; ++ } ++ case 2: ++ return "xvaddi.\t%u0,%u1,%E2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (minus:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsub.\t%u0,%u1,%u2 ++ xvsubi.\t%u0,%u1,%E2" ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (mult:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvmul.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmadd_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (plus:ILASX (mult:ILASX (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand:ILASX 3 "register_operand" "f")) ++ (match_operand:ILASX 1 "register_operand" "0")))] ++ "ISA_HAS_LASX" ++ "xvmadd.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_insn "lasx_xvmsub_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (minus:ILASX (match_operand:ILASX 1 "register_operand" "0") ++ (mult:ILASX (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand:ILASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvmsub.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (div:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2", ++ operands); ++} ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "udiv3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (udiv:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2", ++ operands); ++} ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "mod3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (mod:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2", ++ operands); ++} ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "umod3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (umod:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2", ++ operands); ++} ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "xor3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f,f") ++ (xor:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvxor.v\t%u0,%u1,%u2 ++ xvbitrevi.%v0\t%u0,%u1,%V2 ++ xvxori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "ior3" ++ [(set (match_operand:LASX 0 "register_operand" "=f,f,f") ++ (ior:LASX ++ (match_operand:LASX 1 "register_operand" "f,f,f") ++ (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvor.v\t%u0,%u1,%u2 ++ xvbitseti.%v0\t%u0,%u1,%V2 ++ xvori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "and3" ++ [(set (match_operand:LASX 0 "register_operand" "=f,f,f") ++ (and:LASX ++ (match_operand:LASX 1 "register_operand" "f,f,f") ++ (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))] ++ "ISA_HAS_LASX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "xvand.v\t%u0,%u1,%u2"; ++ case 1: ++ { ++ rtx elt0 = CONST_VECTOR_ELT (operands[2], 0); ++ unsigned HOST_WIDE_INT val = ~UINTVAL (elt0); ++ operands[2] = loongarch_gen_const_int_vector (mode, val & (-val)); ++ return "xvbitclri.%v0\t%u0,%u1,%V2"; ++ } ++ case 2: ++ return "xvandi.b\t%u0,%u1,%B2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "one_cmpl2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (not:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvnor.v\t%u0,%u1,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V32QI")]) ++ ++;; LASX ++(define_insn "vlshr3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (lshiftrt:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsrl.\t%u0,%u1,%u2 ++ xvsrli.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; LASX ">>" ++(define_insn "vashr3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ashiftrt:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsra.\t%u0,%u1,%u2 ++ xvsrai.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; LASX "<<" ++(define_insn "vashl3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ashift:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsll.\t%u0,%u1,%u2 ++ xvslli.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "add3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (plus:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (minus:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfsub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (mult:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmul.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fmul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (div:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfdiv.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "fma4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmadd.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "fnma4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (neg:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "0")))] ++ "ISA_HAS_LASX" ++ "xvfnmsub.\t%u0,%u1,%u2,%u0" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sqrt2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (sqrt:FLASX (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfsqrt.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvadda_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (plus:ILASX (abs:ILASX (match_operand:ILASX 1 "register_operand" "f")) ++ (abs:ILASX (match_operand:ILASX 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvadda.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "ssadd3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ss_plus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "usadd3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (us_plus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvabsd_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVABSD_S))] ++ "ISA_HAS_LASX" ++ "xvabsd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvabsd_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVABSD_U))] ++ "ISA_HAS_LASX" ++ "xvabsd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavg_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVG_S))] ++ "ISA_HAS_LASX" ++ "xvavg.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavg_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVG_U))] ++ "ISA_HAS_LASX" ++ "xvavg.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavgr_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVGR_S))] ++ "ISA_HAS_LASX" ++ "xvavgr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavgr_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVGR_U))] ++ "ISA_HAS_LASX" ++ "xvavgr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitclr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITCLR))] ++ "ISA_HAS_LASX" ++ "xvbitclr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitclri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITCLRI))] ++ "ISA_HAS_LASX" ++ "xvbitclri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitrev_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITREV))] ++ "ISA_HAS_LASX" ++ "xvbitrev.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitrevi_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITREVI))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitsel_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (ior:LASX (and:LASX (not:LASX ++ (match_operand:LASX 3 "register_operand" "f")) ++ (match_operand:LASX 1 "register_operand" "f")) ++ (and:LASX (match_dup 3) ++ (match_operand:LASX 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvbitsel.v\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitseli_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (ior:V32QI (and:V32QI (not:V32QI ++ (match_operand:V32QI 1 "register_operand" "0")) ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (and:V32QI (match_dup 1) ++ (match_operand:V32QI 3 "const_vector_same_val_operand" "Urv8"))))] ++ "ISA_HAS_LASX" ++ "xvbitseli.b\t%u0,%u2,%B3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvbitset_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITSET))] ++ "ISA_HAS_LASX" ++ "xvbitset.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitseti_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITSETI))] ++ "ISA_HAS_LASX" ++ "xvbitseti.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvs_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ICC:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvs.\t%u0,%u1,%u2 ++ xvs.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "vec_cmp" ++ [(set (match_operand: 0 "register_operand") ++ (match_operator 1 "" ++ [(match_operand:LASX 2 "register_operand") ++ (match_operand:LASX 3 "register_operand")]))] ++ "ISA_HAS_LASX" ++{ ++ bool ok = loongarch_expand_vec_cmp (operands); ++ gcc_assert (ok); ++ DONE; ++}) ++ ++(define_expand "vec_cmpu" ++ [(set (match_operand: 0 "register_operand") ++ (match_operator 1 "" ++ [(match_operand:ILASX 2 "register_operand") ++ (match_operand:ILASX 3 "register_operand")]))] ++ "ISA_HAS_LASX" ++{ ++ bool ok = loongarch_expand_vec_cmp (operands); ++ gcc_assert (ok); ++ DONE; ++}) ++ ++(define_insn "lasx_xvfclass_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCLASS))] ++ "ISA_HAS_LASX" ++ "xvfclass.\t%u0,%u1" ++ [(set_attr "type" "simd_fclass") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfcmp_caf_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCMP_CAF))] ++ "ISA_HAS_LASX" ++ "xvfcmp.caf.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfcmp_cune_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCMP_CUNE))] ++ "ISA_HAS_LASX" ++ "xvfcmp.cune.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_int_iterator FSC256_UNS [UNSPEC_LASX_XVFCMP_SAF UNSPEC_LASX_XVFCMP_SUN ++ UNSPEC_LASX_XVFCMP_SOR UNSPEC_LASX_XVFCMP_SEQ ++ UNSPEC_LASX_XVFCMP_SNE UNSPEC_LASX_XVFCMP_SUEQ ++ UNSPEC_LASX_XVFCMP_SUNE UNSPEC_LASX_XVFCMP_SULE ++ UNSPEC_LASX_XVFCMP_SULT UNSPEC_LASX_XVFCMP_SLE ++ UNSPEC_LASX_XVFCMP_SLT]) ++ ++(define_int_attr fsc256 ++ [(UNSPEC_LASX_XVFCMP_SAF "saf") ++ (UNSPEC_LASX_XVFCMP_SUN "sun") ++ (UNSPEC_LASX_XVFCMP_SOR "sor") ++ (UNSPEC_LASX_XVFCMP_SEQ "seq") ++ (UNSPEC_LASX_XVFCMP_SNE "sne") ++ (UNSPEC_LASX_XVFCMP_SUEQ "sueq") ++ (UNSPEC_LASX_XVFCMP_SUNE "sune") ++ (UNSPEC_LASX_XVFCMP_SULE "sule") ++ (UNSPEC_LASX_XVFCMP_SULT "sult") ++ (UNSPEC_LASX_XVFCMP_SLE "sle") ++ (UNSPEC_LASX_XVFCMP_SLT "slt")]) ++ ++(define_insn "lasx_xvfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vfcond: (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfcmp..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lasx_xvfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ FSC256_UNS))] ++ "ISA_HAS_LASX" ++ "xvfcmp..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++(define_mode_attr fint256 ++ [(V8SF "v8si") ++ (V4DF "v4di")]) ++ ++(define_mode_attr FINTCNV256 ++ [(V8SF "I2S") ++ (V4DF "I2D")]) ++ ++(define_mode_attr FINTCNV256_2 ++ [(V8SF "S2I") ++ (V4DF "D2I")]) ++ ++(define_insn "float2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (float:FLASX (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvffint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "floatuns2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unsigned_float:FLASX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvffint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr FFQ256 ++ [(V4SF "V16HI") ++ (V2DF "V8SI")]) ++ ++(define_insn "lasx_xvreplgr2vr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (vec_duplicate:ILASX ++ (match_operand: 1 "reg_or_0_operand" "r,J")))] ++ "ISA_HAS_LASX" ++{ ++ if (which_alternative == 1) ++ return "xvldi.b\t%u0,0" ; ++ ++ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) ++ return "#"; ++ else ++ return "xvreplgr2vr.\t%u0,%z1"; ++} ++ [(set_attr "type" "simd_fill") ++ (set_attr "mode" "") ++ (set_attr "length" "8")]) ++ ++(define_insn "logb2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFLOGB))] ++ "ISA_HAS_LASX" ++ "xvflogb.\t%u0,%u1" ++ [(set_attr "type" "simd_flog2") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "smax3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (smax:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmax.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfmaxa_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (if_then_else:FLASX ++ (gt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (abs:FLASX (match_operand:FLASX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LASX" ++ "xvfmaxa.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (smin:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmin.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfmina_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (if_then_else:FLASX ++ (lt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (abs:FLASX (match_operand:FLASX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LASX" ++ "xvfmina.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrecip_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRECIP))] ++ "ISA_HAS_LASX" ++ "xvfrecip.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrint_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINT))] ++ "ISA_HAS_LASX" ++ "xvfrint.\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrsqrt_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRSQRT))] ++ "ISA_HAS_LASX" ++ "xvfrsqrt.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftint_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_S))] ++ "ISA_HAS_LASX" ++ "xvftint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftint_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_U))] ++ "ISA_HAS_LASX" ++ "xvftint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_insn "fix_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (fix: (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvftintrz..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "fixuns_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unsigned_fix: (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvftintrz..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvhw_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsub:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvhw_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsub:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvhw_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsub:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvpackev_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 32) ++ (const_int 2) (const_int 34) ++ (const_int 4) (const_int 36) ++ (const_int 6) (const_int 38) ++ (const_int 8) (const_int 40) ++ (const_int 10) (const_int 42) ++ (const_int 12) (const_int 44) ++ (const_int 14) (const_int 46) ++ (const_int 16) (const_int 48) ++ (const_int 18) (const_int 50) ++ (const_int 20) (const_int 52) ++ (const_int 22) (const_int 54) ++ (const_int 24) (const_int 56) ++ (const_int 26) (const_int 58) ++ (const_int 28) (const_int 60) ++ (const_int 30) (const_int 62)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++ ++(define_insn "lasx_xvpackev_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 2) (const_int 18) ++ (const_int 4) (const_int 20) ++ (const_int 6) (const_int 22) ++ (const_int 8) (const_int 24) ++ (const_int 10) (const_int 26) ++ (const_int 12) (const_int 28) ++ (const_int 14) (const_int 30)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpackev_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 2) (const_int 10) ++ (const_int 4) (const_int 12) ++ (const_int 6) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpackev_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 2) (const_int 10) ++ (const_int 4) (const_int 12) ++ (const_int 6) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvh_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 8) (const_int 40) ++ (const_int 9) (const_int 41) ++ (const_int 10) (const_int 42) ++ (const_int 11) (const_int 43) ++ (const_int 12) (const_int 44) ++ (const_int 13) (const_int 45) ++ (const_int 14) (const_int 46) ++ (const_int 15) (const_int 47) ++ (const_int 24) (const_int 56) ++ (const_int 25) (const_int 57) ++ (const_int 26) (const_int 58) ++ (const_int 27) (const_int 59) ++ (const_int 28) (const_int 60) ++ (const_int 29) (const_int 61) ++ (const_int 30) (const_int 62) ++ (const_int 31) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvilvh_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 4) (const_int 20) ++ (const_int 5) (const_int 21) ++ (const_int 6) (const_int 22) ++ (const_int 7) (const_int 23) ++ (const_int 12) (const_int 28) ++ (const_int 13) (const_int 29) ++ (const_int 14) (const_int 30) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_mode_attr xvilvh_suffix ++ [(V8SI "") (V8SF "_f") ++ (V4DI "") (V4DF "_f")]) ++ ++(define_insn "lasx_xvilvh_w" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (vec_select:LASX_W ++ (vec_concat: ++ (match_operand:LASX_W 1 "register_operand" "f") ++ (match_operand:LASX_W 2 "register_operand" "f")) ++ (parallel [(const_int 2) (const_int 10) ++ (const_int 3) (const_int 11) ++ (const_int 6) (const_int 14) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvilvh_d" ++ [(set (match_operand:LASX_D 0 "register_operand" "=f") ++ (vec_select:LASX_D ++ (vec_concat: ++ (match_operand:LASX_D 1 "register_operand" "f") ++ (match_operand:LASX_D 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 5) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpackod_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 33) ++ (const_int 3) (const_int 35) ++ (const_int 5) (const_int 37) ++ (const_int 7) (const_int 39) ++ (const_int 9) (const_int 41) ++ (const_int 11) (const_int 43) ++ (const_int 13) (const_int 45) ++ (const_int 15) (const_int 47) ++ (const_int 17) (const_int 49) ++ (const_int 19) (const_int 51) ++ (const_int 21) (const_int 53) ++ (const_int 23) (const_int 55) ++ (const_int 25) (const_int 57) ++ (const_int 27) (const_int 59) ++ (const_int 29) (const_int 61) ++ (const_int 31) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++ ++(define_insn "lasx_xvpackod_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 17) ++ (const_int 3) (const_int 19) ++ (const_int 5) (const_int 21) ++ (const_int 7) (const_int 23) ++ (const_int 9) (const_int 25) ++ (const_int 11) (const_int 27) ++ (const_int 13) (const_int 29) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++ ++(define_insn "lasx_xvpackod_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 9) ++ (const_int 3) (const_int 11) ++ (const_int 5) (const_int 13) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++ ++(define_insn "lasx_xvpackod_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 9) ++ (const_int 3) (const_int 11) ++ (const_int 5) (const_int 13) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvl_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 32) ++ (const_int 1) (const_int 33) ++ (const_int 2) (const_int 34) ++ (const_int 3) (const_int 35) ++ (const_int 4) (const_int 36) ++ (const_int 5) (const_int 37) ++ (const_int 6) (const_int 38) ++ (const_int 7) (const_int 39) ++ (const_int 16) (const_int 48) ++ (const_int 17) (const_int 49) ++ (const_int 18) (const_int 50) ++ (const_int 19) (const_int 51) ++ (const_int 20) (const_int 52) ++ (const_int 21) (const_int 53) ++ (const_int 22) (const_int 54) ++ (const_int 23) (const_int 55)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvilvl_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 1) (const_int 17) ++ (const_int 2) (const_int 18) ++ (const_int 3) (const_int 19) ++ (const_int 8) (const_int 24) ++ (const_int 9) (const_int 25) ++ (const_int 10) (const_int 26) ++ (const_int 11) (const_int 27)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvilvl_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 1) (const_int 9) ++ (const_int 4) (const_int 12) ++ (const_int 5) (const_int 13)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvilvl_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 1) (const_int 9) ++ (const_int 4) (const_int 12) ++ (const_int 5) (const_int 13)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvl_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_select:V4DI ++ (vec_concat:V8DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvilvl_d_f" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (vec_select:V4DF ++ (vec_concat:V8DF ++ (match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "smax3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (smax:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmax.\t%u0,%u1,%u2 ++ xvmaxi.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umax3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (umax:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmax.\t%u0,%u1,%u2 ++ xvmaxi.\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (smin:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmin.\t%u0,%u1,%u2 ++ xvmini.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umin3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (umin:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmin.\t%u0,%u1,%u2 ++ xvmini.\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvclo_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (clz:ILASX (not:ILASX (match_operand:ILASX 1 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvclo.\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "clz2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (clz:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvclz.\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvnor_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (and:ILASX (not:ILASX (match_operand:ILASX 1 "register_operand" "f,f")) ++ (not:ILASX (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))] ++ "ISA_HAS_LASX" ++ "@ ++ xvnor.v\t%u0,%u1,%u2 ++ xvnori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickev_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 32) (const_int 34) ++ (const_int 36) (const_int 38) ++ (const_int 40) (const_int 42) ++ (const_int 44) (const_int 46) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30) ++ (const_int 48) (const_int 50) ++ (const_int 52) (const_int 54) ++ (const_int 56) (const_int 58) ++ (const_int 60) (const_int 62)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvpickev_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpickev_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 8) (const_int 10) ++ (const_int 4) (const_int 6) ++ (const_int 12) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpickev_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 8) (const_int 10) ++ (const_int 4) (const_int 6) ++ (const_int 12) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvpickod_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 33) (const_int 35) ++ (const_int 37) (const_int 39) ++ (const_int 41) (const_int 43) ++ (const_int 45) (const_int 47) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31) ++ (const_int 49) (const_int 51) ++ (const_int 53) (const_int 55) ++ (const_int 57) (const_int 59) ++ (const_int 61) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvpickod_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpickod_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 9) (const_int 11) ++ (const_int 5) (const_int 7) ++ (const_int 13) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpickod_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 9) (const_int 11) ++ (const_int 5) (const_int 7) ++ (const_int 13) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "popcount2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (popcount:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvpcnt.\t%u0,%u1" ++ [(set_attr "type" "simd_pcnt") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lasx_xvsat_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSAT_S))] ++ "ISA_HAS_LASX" ++ "xvsat.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsat_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSAT_U))] ++ "ISA_HAS_LASX" ++ "xvsat.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf4i_" ++ [(set (match_operand:LASX_WHB_W 0 "register_operand" "=f") ++ (unspec:LASX_WHB_W [(match_operand:LASX_WHB_W 1 "register_operand" "f") ++ (match_operand 2 "const_uimm8_operand")] ++ UNSPEC_LASX_XVSHUF4I))] ++ "ISA_HAS_LASX" ++ "xvshuf4i.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf4i__1" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (vec_select:LASX_W ++ (match_operand:LASX_W 1 "nonimmediate_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand") ++ (match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand") ++ (match_operand 5 "const_0_to_3_operand") ++ (match_operand 6 "const_4_to_7_operand") ++ (match_operand 7 "const_4_to_7_operand") ++ (match_operand 8 "const_4_to_7_operand") ++ (match_operand 9 "const_4_to_7_operand")])))] ++ "ISA_HAS_LASX ++ && INTVAL (operands[2]) + 4 == INTVAL (operands[6]) ++ && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) ++ && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) ++ && INTVAL (operands[5]) + 4 == INTVAL (operands[9])" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[2]) << 0; ++ mask |= INTVAL (operands[3]) << 2; ++ mask |= INTVAL (operands[4]) << 4; ++ mask |= INTVAL (operands[5]) << 6; ++ operands[2] = GEN_INT (mask); ++ ++ return "xvshuf4i.w\t%u0,%u1,%2"; ++} ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrar_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRAR))] ++ "ISA_HAS_LASX" ++ "xvsrar.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrari_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSRARI))] ++ "ISA_HAS_LASX" ++ "xvsrari.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLR))] ++ "ISA_HAS_LASX" ++ "xvsrlr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSRLRI))] ++ "ISA_HAS_LASX" ++ "xvsrlri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssub_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ss_minus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvssub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssub_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (us_minus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvssub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf_" ++ [(set (match_operand:LASX_DWH 0 "register_operand" "=f") ++ (unspec:LASX_DWH [(match_operand:LASX_DWH 1 "register_operand" "0") ++ (match_operand:LASX_DWH 2 "register_operand" "f") ++ (match_operand:LASX_DWH 3 "register_operand" "f")] ++ UNSPEC_LASX_XVSHUF))] ++ "ISA_HAS_LASX" ++ "xvshuf.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f") ++ (match_operand:V32QI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVSHUF_B))] ++ "ISA_HAS_LASX" ++ "xvshuf.b\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvreplve0_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (vec_select: ++ (match_operand:LASX 1 "register_operand" "f") ++ (parallel [(const_int 0)]))))] ++ "ISA_HAS_LASX" ++ "xvreplve0.\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvrepl128vei_b_internal" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_duplicate:V32QI ++ (vec_select:V32QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_uimm4_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_operand 3 "const_16_to_31_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 16)" ++ "xvrepl128vei.b\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvrepl128vei_h_internal" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_duplicate:V16HI ++ (vec_select:V16HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_uimm3_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) ++ (match_operand 3 "const_8_to_15_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 8)" ++ "xvrepl128vei.h\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvrepl128vei_w_internal" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_duplicate:V8SI ++ (vec_select:V8SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_operand 3 "const_4_to_7_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 4)" ++ "xvrepl128vei.w\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvrepl128vei_d_internal" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_duplicate:V4DI ++ (vec_select:V4DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_or_1_operand" "") ++ (match_dup 2) ++ (match_operand 3 "const_2_or_3_operand" "") ++ (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 2)" ++ "xvrepl128vei.d\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvrepl128vei_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVREPL128VEI))] ++ "ISA_HAS_LASX" ++ "xvrepl128vei.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvreplve0__scalar" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (vec_duplicate:FLASX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvreplve0.\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvreplve0_q" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVREPLVE0_Q))] ++ "ISA_HAS_LASX" ++ "xvreplve0.q\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvfcvt_h_s" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (unspec:V16HI [(match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVT))] ++ "ISA_HAS_LASX" ++ "xvfcvt.h.s\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvfcvt_s_d" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVT))] ++ "ISA_HAS_LASX" ++ "xvfcvt.s.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "vec_pack_trunc_v4df" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_concat:V8SF ++ (float_truncate:V4SF (match_operand:V4DF 1 "register_operand" "f")) ++ (float_truncate:V4SF (match_operand:V4DF 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfcvt.s.d\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF") ++ (set_attr "length" "8")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvth_s_h" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVTH))] ++ "ISA_HAS_LASX" ++ "xvfcvth.s.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvth_d_s" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 2) (const_int 3) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "xvfcvth.d.s\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "12")]) ++ ++;; Define for gen insn. ++(define_insn "lasx_xvfcvth_d_insn" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,0xfa\n\txvfcvtl.d.s\t%u0,%u0" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "12")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvtl_s_h" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVTL))] ++ "ISA_HAS_LASX" ++ "xvfcvtl.s.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvtl_d_s" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 4) (const_int 5)]))))] ++ "ISA_HAS_LASX" ++ "xvfcvtl.d.s\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "8")]) ++ ++;; Define for gen insn. ++(define_insn "lasx_xvfcvtl_d_insn" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,0x50\n\txvfcvtl.d.s\t%u0,%u0" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "8")]) ++ ++(define_code_attr lasxbr ++ [(eq "xbz") ++ (ne "xbnz")]) ++ ++(define_code_attr lasxeq_v ++ [(eq "eqz") ++ (ne "nez")]) ++ ++(define_code_attr lasxne_v ++ [(eq "nez") ++ (ne "eqz")]) ++ ++(define_code_attr lasxeq ++ [(eq "anyeqz") ++ (ne "allnez")]) ++ ++(define_code_attr lasxne ++ [(eq "allnez") ++ (ne "anyeqz")]) ++ ++(define_insn "lasx__" ++ [(set (pc) ++ (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LASX 1 "register_operand" "f")] ++ UNSPEC_LASX_BRANCH) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "xvset.\t%Z3%u1\n\tbcnez\t%Z3%0", ++ "xvset.\t%z3%u1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx__v_" ++ [(set (pc) ++ (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LASX 1 "register_operand" "f")] ++ UNSPEC_LASX_BRANCH_V) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0", ++ "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "")]) ++ ++;; loongson-asx. ++(define_insn "lasx_vext2xv_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7) ++ (const_int 8) (const_int 9) ++ (const_int 10) (const_int 11) ++ (const_int 12) (const_int 13) ++ (const_int 14) (const_int 15)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.h.b\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_vext2xv_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.w.h\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_vext2xv_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_vext2xv_w_b" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.w.b\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_vext2xv_d_h" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.h\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_vext2xv_d_b" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.b\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++;; Extend loongson-sx to loongson-asx. ++(define_insn "xvandn3" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (and:LASX (not:LASX (match_operand:LASX 1 "register_operand" "f")) ++ (match_operand:LASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvandn.v\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "abs2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (abs:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsigncov.\t%u0,%u1,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "neg2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (neg:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvneg.\t%u0,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmuh_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMUH_S))] ++ "ISA_HAS_LASX" ++ "xvmuh.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmuh_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMUH_U))] ++ "ISA_HAS_LASX" ++ "xvmuh.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsllwil_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSLLWIL_S))] ++ "ISA_HAS_LASX" ++ "xvsllwil..\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsllwil_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSLLWIL_U))] ++ "ISA_HAS_LASX" ++ "xvsllwil..\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsran__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRAN))] ++ "ISA_HAS_LASX" ++ "xvsran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssran_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRAN_S))] ++ "ISA_HAS_LASX" ++ "xvssran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssran_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRAN_U))] ++ "ISA_HAS_LASX" ++ "xvssran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrarn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRARN))] ++ "ISA_HAS_LASX" ++ "xvsrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarn_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRARN_S))] ++ "ISA_HAS_LASX" ++ "xvssrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRARN_U))] ++ "ISA_HAS_LASX" ++ "xvssrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLN))] ++ "ISA_HAS_LASX" ++ "xvsrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrln_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLN_U))] ++ "ISA_HAS_LASX" ++ "xvssrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLRN))] ++ "ISA_HAS_LASX" ++ "xvsrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLRN_U))] ++ "ISA_HAS_LASX" ++ "xvssrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrstpi_" ++ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") ++ (unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0") ++ (match_operand:ILASX_HB 2 "register_operand" "f") ++ (match_operand 3 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVFRSTPI))] ++ "ISA_HAS_LASX" ++ "xvfrstpi.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrstp_" ++ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") ++ (unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0") ++ (match_operand:ILASX_HB 2 "register_operand" "f") ++ (match_operand:ILASX_HB 3 "register_operand" "f")] ++ UNSPEC_LASX_XVFRSTP))] ++ "ISA_HAS_LASX" ++ "xvfrstp.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf4i_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LASX_XVSHUF4I))] ++ "ISA_HAS_LASX" ++ "xvshuf4i.d\t%u0,%u2,%3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvbsrl_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVBSRL_V))] ++ "ISA_HAS_LASX" ++ "xvbsrl.v\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbsll_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVBSLL_V))] ++ "ISA_HAS_LASX" ++ "xvbsll.v\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextrins_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVEXTRINS))] ++ "ISA_HAS_LASX" ++ "xvextrins.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmskltz_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKLTZ))] ++ "ISA_HAS_LASX" ++ "xvmskltz.\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsigncov_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSIGNCOV))] ++ "ISA_HAS_LASX" ++ "xvsigncov.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "copysign3" ++ [(set (match_dup 4) ++ (and:FLASX ++ (not:FLASX (match_dup 3)) ++ (match_operand:FLASX 1 "register_operand"))) ++ (set (match_dup 5) ++ (and:FLASX (match_dup 3) ++ (match_operand:FLASX 2 "register_operand"))) ++ (set (match_operand:FLASX 0 "register_operand") ++ (ior:FLASX (match_dup 4) (match_dup 5)))] ++ "ISA_HAS_LASX" ++{ ++ operands[3] = loongarch_build_signbit_mask (mode, 1, 0); ++ ++ operands[4] = gen_reg_rtx (mode); ++ operands[5] = gen_reg_rtx (mode); ++}) ++ ++ ++(define_insn "absv4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (abs:V4DF (match_operand:V4DF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitclri.d\t%u0,%u1,63" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "absv8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (abs:V8SF (match_operand:V8SF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitclri.w\t%u0,%u1,31" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "negv4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (neg:V4DF (match_operand:V4DF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.d\t%u0,%u1,63" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "negv8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (neg:V8SF (match_operand:V8SF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.w\t%u0,%u1,31" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "xvfmadd4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmadd.\t%u0,%u1,$u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "fms4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (neg:FLASX (match_operand:FLASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfmsub.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "xvfnmsub4_nmsub4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (neg:FLASX ++ (fma:FLASX ++ (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (neg:FLASX (match_operand:FLASX 3 "register_operand" "f")))))] ++ "ISA_HAS_LASX" ++ "xvfnmsub.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "xvfnmadd4_nmadd4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (neg:FLASX ++ (fma:FLASX ++ (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfnmadd.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftintrne_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrne.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrne_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrne.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrp_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrp.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrp_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrp.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrm_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrm.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrm_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrm.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftint_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_W_D))] ++ "ISA_HAS_LASX" ++ "xvftint.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvffint_s_l" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINT_S_L))] ++ "ISA_HAS_LASX" ++ "xvffint.s.l\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvftintrz_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZ_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrz.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrp_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrp.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrm_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrm.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrne_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrne.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftinth_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftinth.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvffinth_d_w" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINTH_D_W))] ++ "ISA_HAS_LASX" ++ "xvffinth.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvffintl_d_w" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINTL_D_W))] ++ "ISA_HAS_LASX" ++ "xvffintl.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvftintrzh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrzh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrzl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrzl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lasx_xvftintrph_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRPH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrph.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lasx_xvftintrpl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRPL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrpl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrmh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRMH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrmh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrml_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRML_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrml.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrneh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNEH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrneh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrnel_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNEL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrnel.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrne_s" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRNE_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrne.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrne_d" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRNE_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrne.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrz_s" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRZ_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrz.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrz_d" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRZ_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrz.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrp_s" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRP_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrp.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrp_d" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRP_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrp.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrm_s" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRM_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrm.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrm_d" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRM_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrm.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++;; Vector versions of the floating-point frint patterns. ++;; Expands to btrunc, ceil, floor, rint. ++(define_insn "v8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ FRINT256_S))] ++ "ISA_HAS_LASX" ++ "xvfrint.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "v4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ FRINT256_D))] ++ "ISA_HAS_LASX" ++ "xvfrint.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++;; Expands to round. ++(define_insn "round2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINT))] ++ "ISA_HAS_LASX" ++ "xvfrint.\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; Offset load and broadcast ++(define_expand "lasx_xvldrepl_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 2 "aq12_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvldrepl__insn ++ (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_insn "lasx_xvldrepl__insn" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (mem: (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "aq12_operand")))))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldrepl.\t%u0,%1,%2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;; Offset is "0" ++(define_insn "lasx_xvldrepl__insn_0" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (mem: (match_operand:DI 1 "register_operand" "r"))))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldrepl.\t%u0,%1,0"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;;XVADDWEV.H.B XVSUBWEV.H.B XVMULWEV.H.B ++;;XVADDWEV.H.BU XVSUBWEV.H.BU XVMULWEV.H.BU ++(define_insn "lasx_xvwev_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsubmul:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWEV.W.H XVSUBWEV.W.H XVMULWEV.W.H ++;;XVADDWEV.W.HU XVSUBWEV.W.HU XVMULWEV.W.HU ++(define_insn "lasx_xvwev_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsubmul:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWEV.D.W XVSUBWEV.D.W XVMULWEV.D.W ++;;XVADDWEV.D.WU XVSUBWEV.D.WU XVMULWEV.D.WU ++(define_insn "lasx_xvwev_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsubmul:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvaddwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvsubwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWEV))] ++ "ISA_HAS_LASX" ++ "xvsubwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvmulwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++ ++;;XVADDWOD.H.B XVSUBWOD.H.B XVMULWOD.H.B ++;;XVADDWOD.H.BU XVSUBWOD.H.BU XVMULWOD.H.BU ++(define_insn "lasx_xvwod_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsubmul:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWOD.W.H XVSUBWOD.W.H XVMULWOD.W.H ++;;XVADDWOD.W.HU XVSUBWOD.W.HU XVMULWOD.W.HU ++(define_insn "lasx_xvwod_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsubmul:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++ ++;;XVADDWOD.D.W XVSUBWOD.D.W XVMULWOD.D.W ++;;XVADDWOD.D.WU XVSUBWOD.D.WU XVMULWOD.D.WU ++(define_insn "lasx_xvwod_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsubmul:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvaddwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvsubwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWOD))] ++ "ISA_HAS_LASX" ++ "xvsubwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvmulwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvaddwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV2))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvsubwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWEV2))] ++ "ISA_HAS_LASX" ++ "xvsubwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmulwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV2))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvaddwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD2))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvsubwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWOD2))] ++ "ISA_HAS_LASX" ++ "xvsubwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmulwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD2))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.H.BU.B XVMULWEV.H.BU.B ++(define_insn "lasx_xvwev_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addmul:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.h.bu.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWEV.W.HU.H XVMULWEV.W.HU.H ++(define_insn "lasx_xvwev_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addmul:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.w.hu.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWEV.D.WU.W XVMULWEV.D.WU.W ++(define_insn "lasx_xvwev_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addmul:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.d.wu.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.H.BU.B XVMULWOD.H.BU.B ++(define_insn "lasx_xvwod_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addmul:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.h.bu.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWOD.W.HU.H XVMULWOD.W.HU.H ++(define_insn "lasx_xvwod_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addmul:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.w.hu.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWOD.D.WU.W XVMULWOD.D.WU.W ++(define_insn "lasx_xvwod_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addmul:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.d.wu.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.H.B XVMADDWEV.H.BU ++(define_insn "lasx_xvmaddwev_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.h.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWEV.W.H XVMADDWEV.W.HU ++(define_insn "lasx_xvmaddwev_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.w.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWEV.D.W XVMADDWEV.D.WU ++(define_insn "lasx_xvmaddwev_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.d.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.H.B XVMADDWOD.H.BU ++(define_insn "lasx_xvmaddwod_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.h.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWOD.W.H XVMADDWOD.W.HU ++(define_insn "lasx_xvmaddwod_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.w.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWOD.D.W XVMADDWOD.D.WU ++(define_insn "lasx_xvmaddwod_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.d.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV2))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.du\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD2))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.du\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.H.BU.B ++(define_insn "lasx_xvmaddwev_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.h.bu.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWEV.W.HU.H ++(define_insn "lasx_xvmaddwev_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.w.hu.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWEV.D.WU.W ++(define_insn "lasx_xvmaddwev_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.d.wu.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.DU.D ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV3))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.du.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.H.BU.B ++(define_insn "lasx_xvmaddwod_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.h.bu.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWOD.W.HU.H ++(define_insn "lasx_xvmaddwod_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.w.hu.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWOD.D.WU.W ++(define_insn "lasx_xvmaddwod_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.d.wu.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.DU.D ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD3))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.du.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHADDW.Q.D ++;;TODO2 ++(define_insn "lasx_xvhaddw_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHADDW_Q_D))] ++ "ISA_HAS_LASX" ++ "xvhaddw.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHSUBW.Q.D ++;;TODO2 ++(define_insn "lasx_xvhsubw_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHSUBW_Q_D))] ++ "ISA_HAS_LASX" ++ "xvhsubw.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHADDW.QU.DU ++;;TODO2 ++(define_insn "lasx_xvhaddw_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHADDW_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvhaddw.qu.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHSUBW.QU.DU ++;;TODO2 ++(define_insn "lasx_xvhsubw_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHSUBW_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvhsubw.qu.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVROTR.B XVROTR.H XVROTR.W XVROTR.D ++;;TODO-478 ++(define_insn "lasx_xvrotr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVROTR))] ++ "ISA_HAS_LASX" ++ "xvrotr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++;;XVADD.Q ++;;TODO2 ++(define_insn "lasx_xvadd_q" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADD_Q))] ++ "ISA_HAS_LASX" ++ "xvadd.q\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUB.Q ++;;TODO2 ++(define_insn "lasx_xvsub_q" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUB_Q))] ++ "ISA_HAS_LASX" ++ "xvsub.q\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSSRLN.B.H XVSSRLN.H.W XVSSRLN.W.D ++(define_insn "lasx_xvssrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLN))] ++ "ISA_HAS_LASX" ++ "xvssrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++;;XVREPLVE.B XVREPLVE.H XVREPLVE.W XVREPLVE.D ++(define_insn "lasx_xvreplve_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand:SI 2 "register_operand" "r")] ++ UNSPEC_LASX_XVREPLVE))] ++ "ISA_HAS_LASX" ++ "xvreplve.\t%u0,%u1,%z2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++;;XVADDWEV.Q.DU.D ++(define_insn "lasx_xvaddwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV3))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.DU.D ++(define_insn "lasx_xvaddwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD3))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.DU.D ++(define_insn "lasx_xvmulwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV3))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.DU.D ++(define_insn "lasx_xvmulwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD3))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvpickve2gr_w" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (any_extend:SI ++ (vec_select:SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_7_operand" "")]))))] ++ "ISA_HAS_LASX" ++ "xvpickve2gr.w\t%0,%u1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "V8SI")]) ++ ++ ++(define_insn "lasx_xvmskgez_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKGEZ))] ++ "ISA_HAS_LASX" ++ "xvmskgez.b\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvmsknz_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKNZ))] ++ "ISA_HAS_LASX" ++ "xvmsknz.b\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvexth_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 16) (const_int 17) ++ (const_int 18) (const_int 19) ++ (const_int 20) (const_int 21) ++ (const_int 22) (const_int 23) ++ (const_int 24) (const_int 25) ++ (const_int 26) (const_int 27) ++ (const_int 28) (const_int 29) ++ (const_int 30) (const_int 31)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.h.b\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvexth_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 8) (const_int 9) ++ (const_int 10) (const_int 11) ++ (const_int 12) (const_int 13) ++ (const_int 14) (const_int 15)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.w.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvexth_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvexth_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTH_Q_D))] ++ "ISA_HAS_LASX" ++ "xvexth.q.d\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvexth_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTH_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvexth.qu.du\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvrotri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (rotatert:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")))] ++ "ISA_HAS_LASX" ++ "xvrotri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextl_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTL_Q_D))] ++ "ISA_HAS_LASX" ++ "xvextl.q.d\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvsrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRLNI))] ++ "ISA_HAS_LASX" ++ "xvsrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRLRNI))] ++ "ISA_HAS_LASX" ++ "xvsrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLNI))] ++ "ISA_HAS_LASX" ++ "xvssrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLNI2))] ++ "ISA_HAS_LASX" ++ "xvssrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLRNI))] ++ "ISA_HAS_LASX" ++ "xvssrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLRNI2))] ++ "ISA_HAS_LASX" ++ "xvssrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRANI))] ++ "ISA_HAS_LASX" ++ "xvsrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRARNI))] ++ "ISA_HAS_LASX" ++ "xvsrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRANI))] ++ "ISA_HAS_LASX" ++ "xvssrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRANI2))] ++ "ISA_HAS_LASX" ++ "xvssrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRARNI))] ++ "ISA_HAS_LASX" ++ "xvssrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRARNI2))] ++ "ISA_HAS_LASX" ++ "xvssrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr VDOUBLEMODEW256 ++ [(V8SI "V16SI") ++ (V8SF "V16SF")]) ++ ++(define_insn "lasx_xvpermi_" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (unspec:LASX_W [(match_operand:LASX_W 1 "register_operand" "0") ++ (match_operand:LASX_W 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVPERMI))] ++ "ISA_HAS_LASX" ++ "xvpermi.w\t%u0,%u2,%3" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpermi__1" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (vec_select:LASX_W ++ (vec_concat: ++ (match_operand:LASX_W 1 "register_operand" "f") ++ (match_operand:LASX_W 2 "register_operand" "0")) ++ (parallel [(match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand" ) ++ (match_operand 5 "const_8_to_11_operand" ) ++ (match_operand 6 "const_8_to_11_operand" ) ++ (match_operand 7 "const_4_to_7_operand" ) ++ (match_operand 8 "const_4_to_7_operand" ) ++ (match_operand 9 "const_12_to_15_operand") ++ (match_operand 10 "const_12_to_15_operand")])))] ++ "ISA_HAS_LASX ++ && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) ++ && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) ++ && INTVAL (operands[5]) + 4 == INTVAL (operands[9]) ++ && INTVAL (operands[6]) + 4 == INTVAL (operands[10])" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[3]) << 0; ++ mask |= INTVAL (operands[4]) << 2; ++ mask |= (INTVAL (operands[5]) - 8) << 4; ++ mask |= (INTVAL (operands[6]) - 8) << 6; ++ operands[3] = GEN_INT (mask); ++ ++ return "xvpermi.w\t%u0,%u1,%3"; ++} ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_expand "lasx_xvld" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (V32QImode, addr)); ++ DONE; ++}) ++ ++(define_expand "lasx_xvst" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (V32QImode, addr), operands[0]); ++ DONE; ++}) ++ ++(define_expand "lasx_xvstelm_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 3 "const__operand") ++ (match_operand 2 "aq8_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvstelm__insn ++ (operands[1], operands[2], operands[0], operands[3])); ++ DONE; ++}) ++ ++(define_insn "lasx_xvstelm__insn" ++ [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r") ++ (match_operand 1 "aq8_operand"))) ++ (vec_select: ++ (match_operand:LASX 2 "register_operand" "f") ++ (parallel [(match_operand 3 "const__operand" "")])))] ++ "ISA_HAS_LASX" ++{ ++ return "xvstelm.\t%u2,%0,%1,%3"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;; Offset is "0" ++(define_insn "lasx_xvstelm__insn_0" ++ [(set (mem: (match_operand:DI 0 "register_operand" "r")) ++ (vec_select: ++ (match_operand:LASX_WD 1 "register_operand" "f") ++ (parallel [(match_operand:SI 2 "const__operand")])))] ++ "ISA_HAS_LASX" ++{ ++ return "xvstelm.\t%u1,%0,0,%2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++(define_insn "lasx_xvinsve0_" ++ [(set (match_operand:LASX_WD 0 "register_operand" "=f") ++ (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "0") ++ (match_operand:LASX_WD 2 "register_operand" "f") ++ (match_operand 3 "const__operand" "")] ++ UNSPEC_LASX_XVINSVE0))] ++ "ISA_HAS_LASX" ++ "xvinsve0.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvinsve0__scalar" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (vec_merge:FLASX ++ (vec_duplicate:FLASX ++ (match_operand: 1 "register_operand" "f")) ++ (match_operand:FLASX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LASX" ++ "xvinsve0.\t%u0,%u1,%y3" ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickve_" ++ [(set (match_operand:LASX_WD 0 "register_operand" "=f") ++ (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVPICKVE))] ++ "ISA_HAS_LASX" ++ "xvpickve.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickve__scalar" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_select: ++ (match_operand:FLASX 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")])))] ++ "ISA_HAS_LASX" ++ "xvpickve.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLRN))] ++ "ISA_HAS_LASX" ++ "xvssrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "xvorn3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ior:ILASX (not:ILASX (match_operand:ILASX 2 "register_operand" "f")) ++ (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvorn.v\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextl_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTL_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvextl.qu.du\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvldi" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI[(match_operand 1 "const_imm13_operand")] ++ UNSPEC_LASX_XVLDI))] ++ "ISA_HAS_LASX" ++{ ++ HOST_WIDE_INT val = INTVAL (operands[1]); ++ if (val < 0) ++ { ++ HOST_WIDE_INT modeVal = (val & 0xf00) >> 8; ++ if (modeVal < 13) ++ return "xvldi\t%u0,%1"; ++ else ++ { ++ sorry ("imm13 only support 0000 ~ 1100 in bits '12 ~ 9' when bit '13' is 1"); ++ return "#"; ++ } ++ } ++ else ++ return "xvldi\t%u0,%1"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvldx" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ")] ++ UNSPEC_LASX_XVLDX))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldx\t%u0,%1,%z2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvstx" ++ [(set (mem:V32QI (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ"))) ++ (unspec: V32QI[(match_operand:V32QI 0 "register_operand" "f")] ++ UNSPEC_LASX_XVSTX))] ++ ++ "ISA_HAS_LASX" ++{ ++ return "xvstx\t%u0,%1,%z2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "vec_widen_mult_even_v8si" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvmulwev.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;; Vector reduction operation ++(define_expand "reduc_plus_scal_v4di" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:V4DI 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (V4DImode); ++ rtx tmp1 = gen_reg_rtx (V4DImode); ++ rtx vec_res = gen_reg_rtx (V4DImode); ++ emit_insn (gen_lasx_xvhaddw_q_d (tmp, operands[1], operands[1])); ++ emit_insn (gen_lasx_xvpermi_d_v4di (tmp1, tmp, GEN_INT (2))); ++ emit_insn (gen_addv4di3 (vec_res, tmp, tmp1)); ++ emit_insn (gen_vec_extractv4didi (operands[0], vec_res, const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_v8si" ++ [(match_operand:SI 0 "register_operand") ++ (match_operand:V8SI 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (V4DImode); ++ rtx tmp1 = gen_reg_rtx (V4DImode); ++ rtx vec_res = gen_reg_rtx (V4DImode); ++ emit_insn (gen_lasx_xvhaddw_d_w (tmp, operands[1], operands[1])); ++ emit_insn (gen_lasx_xvhaddw_q_d (tmp1, tmp, tmp)); ++ emit_insn (gen_lasx_xvpermi_d_v4di (tmp, tmp1, GEN_INT (2))); ++ emit_insn (gen_addv4di3 (vec_res, tmp, tmp1)); ++ emit_insn (gen_vec_extractv8sisi (operands[0], gen_lowpart (V8SImode,vec_res), ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:FLASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc__scal_" ++ [(any_bitwise: ++ (match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) +diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h +new file mode 100644 +index 000000000..d39379927 +--- /dev/null ++++ b/gcc/config/loongarch/lasxintrin.h +@@ -0,0 +1,5338 @@ ++/* LARCH Loongson ASX intrinsics include file. ++ ++ Copyright (C) 2018 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#ifndef _GCC_LOONGSON_ASXINTRIN_H ++#define _GCC_LOONGSON_ASXINTRIN_H 1 ++ ++#if defined(__loongarch_asx) ++ ++typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32))); ++typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1))); ++typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1))); ++typedef short v16i16 __attribute__ ((vector_size(32), aligned(32))); ++typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2))); ++typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2))); ++typedef int v8i32 __attribute__ ((vector_size(32), aligned(32))); ++typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32))); ++typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef float v8f32 __attribute__ ((vector_size(32), aligned(32))); ++typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); ++typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef float __m256 __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++typedef long long __m256i __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++typedef double __m256d __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvslli_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslli_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvslli_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslli_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvslli_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslli_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvslli_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslli_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrai_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrai_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrai_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrai_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrai_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrai_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrai_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrai_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrari_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrari_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrari_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrari_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrari_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrari_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrari_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrari_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrli_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrli_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrli_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrli_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrli_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrli_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrli_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrli_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrlri_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrlri_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrlri_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrlri_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrlri_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrlri_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrlri_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrlri_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_b ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitclri_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitclri_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitclri_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitclri_h ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitclri_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitclri_w ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitclri_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitclri_d ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_b ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitseti_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitseti_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitseti_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitseti_h ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitseti_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitseti_w ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitseti_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitseti_d ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_b ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitrevi_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitrevi_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitrevi_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitrevi_h ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitrevi_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitrevi_w ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitrevi_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitrevi_d ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvaddi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvaddi_bu ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvaddi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvaddi_hu ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvaddi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvaddi_wu ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvaddi_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvaddi_du ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsubi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsubi_bu ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsubi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsubi_hu ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsubi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsubi_wu ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsubi_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsubi_du ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvmaxi_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvmaxi_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvmaxi_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvmaxi_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvmaxi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvmaxi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvmaxi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvmaxi_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvmini_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvmini_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvmini_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvmini_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvmini_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvmini_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvmini_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvmini_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvseqi_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvseqi_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvseqi_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvseqi_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvseqi_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvseqi_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvseqi_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvseqi_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvslti_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvslti_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvslti_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvslti_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, UV32QI, UQI. */ ++#define __lasx_xvslti_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, UV16HI, UQI. */ ++#define __lasx_xvslti_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, UV8SI, UQI. */ ++#define __lasx_xvslti_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, UV4DI, UQI. */ ++#define __lasx_xvslti_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvslei_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvslei_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvslei_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvslei_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, UV32QI, UQI. */ ++#define __lasx_xvslei_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, UV16HI, UQI. */ ++#define __lasx_xvslei_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, UV8SI, UQI. */ ++#define __lasx_xvslei_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, UV4DI, UQI. */ ++#define __lasx_xvslei_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsat_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsat_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsat_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsat_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvsat_bu(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvsat_hu(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvsat_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvsat_du(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_hu_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_hu_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_wu_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_wu_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_du_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_du_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_hu_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_hu_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_wu_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_wu_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_du_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_du_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvrepl128vei_b(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrepl128vei_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvrepl128vei_h(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrepl128vei_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvrepl128vei_w(/*__m256i*/ _1, /*ui2*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrepl128vei_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui1. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvrepl128vei_d(/*__m256i*/ _1, /*ui1*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrepl128vei_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvand_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvand_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvandi_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvandi_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvor_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvor_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvori_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvori_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvnor_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvnor_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvnori_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvnori_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvxor_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvxor_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvxori_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvxori_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitsel_v (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvbitsel_v ((v32u8)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, USI. */ ++#define __lasx_xvbitseli_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvbitseli_b ((v32u8)(_1), (v32u8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, USI. */ ++#define __lasx_xvshuf4i_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvshuf4i_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V16HI, V16HI, USI. */ ++#define __lasx_xvshuf4i_h(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvshuf4i_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, USI. */ ++#define __lasx_xvshuf4i_w(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvshuf4i_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V32QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_b (int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_b ((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V16HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_h (int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_h ((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V8SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_w (int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_w ((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V4DI, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_d (long int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_d ((long int)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfadd_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfadd_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfadd_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfadd_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfsub_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfsub_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfsub_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfsub_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmul_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmul_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmul_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmul_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfdiv_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfdiv_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfdiv_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfdiv_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcvt_h_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcvt_h_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvt_s_d (__m256d _1, __m256d _2) ++{ ++ return (__m256)__builtin_lasx_xvfcvt_s_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmin_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmin_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmin_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmin_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmina_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmina_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmina_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmina_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmax_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmax_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmax_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmax_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmaxa_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmaxa_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmaxa_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmaxa_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfclass_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvfclass_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfclass_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvfclass_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfsqrt_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfsqrt_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfsqrt_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfsqrt_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrecip_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrecip_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrecip_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrecip_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrint_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrint_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrint_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrint_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrsqrt_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrsqrt_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrsqrt_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrsqrt_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvflogb_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvflogb_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvflogb_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvflogb_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvth_s_h (__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvfcvth_s_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfcvth_d_s (__m256 _1) ++{ ++ return (__m256d)__builtin_lasx_xvfcvth_d_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvtl_s_h (__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvfcvtl_s_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfcvtl_d_s (__m256 _1) ++{ ++ return (__m256d)__builtin_lasx_xvfcvtl_d_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_wu_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_wu_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_lu_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_lu_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_wu_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_wu_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_lu_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_lu_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_w (__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffint_d_l (__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffint_d_l ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_wu (__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_wu ((v8u32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffint_d_lu (__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffint_d_lu ((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V32QI, V32QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_b (__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_b ((v32i8)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V16HI, V16HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_h (__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_h ((v16i16)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V8SI, V8SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_w (__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_w ((v8i32)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V4DI, V4DI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_d (__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_d ((v4i64)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvpermi_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvpermi_w ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvandn_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvandn_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V16HI, V32QI, UQI. */ ++#define __lasx_xvsllwil_h_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_h_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V8SI, V16HI, UQI. */ ++#define __lasx_xvsllwil_w_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_w_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V8SI, UQI. */ ++#define __lasx_xvsllwil_d_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_d_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV16HI, UV32QI, UQI. */ ++#define __lasx_xvsllwil_hu_bu(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_hu_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV8SI, UV16HI, UQI. */ ++#define __lasx_xvsllwil_wu_hu(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_wu_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV8SI, UQI. */ ++#define __lasx_xvsllwil_du_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_du_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_bu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_bu_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_hu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_hu_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_wu_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_wu_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_bu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_bu_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_hu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_hu_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_wu_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_wu_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_bu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_bu_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_hu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_hu_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_wu_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_wu_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_bu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_bu_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_hu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_hu_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_wu_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_wu_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, UQI. */ ++#define __lasx_xvfrstpi_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvfrstpi_b ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, UQI. */ ++#define __lasx_xvfrstpi_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvfrstpi_h ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrstp_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvfrstp_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrstp_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvfrstp_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvshuf4i_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvshuf4i_d ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvbsrl_v(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbsrl_v ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvbsll_v(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbsll_v ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvextrins_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvextrins_b ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvextrins_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvextrins_h ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvextrins_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvextrins_w ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvextrins_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvextrins_d ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmadd_s (__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfmadd_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmadd_d (__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfmadd_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmsub_s (__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfmsub_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmsub_d (__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfmsub_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfnmadd_s (__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfnmadd_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfnmadd_d (__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfnmadd_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfnmsub_s (__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfnmsub_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfnmsub_d (__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfnmsub_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftint_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_l (__m256i _1, __m256i _2) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_l ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftinth_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftinth_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintl_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintl_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffinth_d_w (__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffinth_d_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffintl_d_w (__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffintl_d_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrzh_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrzh_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrzl_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrzl_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrph_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrph_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrpl_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrpl_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrmh_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrmh_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrml_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrml_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrneh_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrneh_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrnel_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrnel_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrintrne_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrintrne_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrintrne_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrintrne_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrintrz_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrintrz_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrintrz_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrintrz_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrintrp_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrintrp_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrintrp_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrintrp_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrintrm_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrintrm_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrintrm_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrintrm_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ ++#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) \ ++ ((__m256i)__builtin_lasx_xvld ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI. */ ++#define __lasx_xvst(/*__m256i*/ _1, /*void **/ _2, /*si12*/ _3) \ ++ ((void)__builtin_lasx_xvst ((v32i8)(_1), (void *)(_2), (_3))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_b(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lasx_xvstelm_b ((v32i8)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V16HI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_h(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lasx_xvstelm_h ((v16i16)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V8SI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_w(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lasx_xvstelm_w ((v8i32)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V4DI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_d(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lasx_xvstelm_d ((v4i64)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, UQI. */ ++#define __lasx_xvinsve0_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui3*/ _3) \ ++ ((__m256i)__builtin_lasx_xvinsve0_w ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, UQI. */ ++#define __lasx_xvinsve0_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui2*/ _3) \ ++ ((__m256i)__builtin_lasx_xvinsve0_d ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvpickve_w(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvpickve_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvpickve_d(/*__m256i*/ _1, /*ui2*/ _2) \ ++ ((__m256i)__builtin_lasx_xvpickve_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvorn_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvorn_v ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, i13. */ ++/* Data types in instruction templates: V4DI, HI. */ ++#define __lasx_xvldi(/*i13*/ _1) \ ++ ((__m256i)__builtin_lasx_xvldi ((_1))) ++ ++/* Assembly instruction format: xd, rj, rk. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvldx (void * _1, long int _2) ++{ ++ return (__m256i)__builtin_lasx_xvldx ((void *)_1, (long int)_2); ++} ++ ++/* Assembly instruction format: xd, rj, rk. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __lasx_xvstx (__m256i _1, void * _2, long int _3) ++{ ++ return (void)__builtin_lasx_xvstx ((v32i8)_1, (void *)_2, (long int)_3); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvextl_qu_du (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvextl_qu_du ((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, rj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, SI, UQI. */ ++#define __lasx_xvinsgr2vr_w(/*__m256i*/ _1, /*int*/ _2, /*ui3*/ _3) \ ++ ((__m256i)__builtin_lasx_xvinsgr2vr_w ((v8i32)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: xd, rj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, DI, UQI. */ ++#define __lasx_xvinsgr2vr_d(/*__m256i*/ _1, /*long int*/ _2, /*ui2*/ _3) \ ++ ((__m256i)__builtin_lasx_xvinsgr2vr_d ((v4i64)(_1), (long int)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_q (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_q ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_h_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_h_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_w_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_w_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_w_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_w_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_hu_bu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_hu_bu ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_wu_hu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_wu_hu ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_wu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_wu ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_wu_bu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_wu_bu ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_hu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_hu ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_bu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_bu ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvpermi_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvpermi_q ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, USI. */ ++#define __lasx_xvpermi_d(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvpermi_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvperm_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvperm_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) \ ++ ((__m256i)__builtin_lasx_xvldrepl_b ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si11. */ ++/* Data types in instruction templates: V16HI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) \ ++ ((__m256i)__builtin_lasx_xvldrepl_h ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si10. */ ++/* Data types in instruction templates: V8SI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) \ ++ ((__m256i)__builtin_lasx_xvldrepl_w ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si9. */ ++/* Data types in instruction templates: V4DI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) \ ++ ((__m256i)__builtin_lasx_xvldrepl_d ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui3. */ ++/* Data types in instruction templates: SI, V8SI, UQI. */ ++#define __lasx_xvpickve2gr_w(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((int)__builtin_lasx_xvpickve2gr_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui3. */ ++/* Data types in instruction templates: USI, V8SI, UQI. */ ++#define __lasx_xvpickve2gr_wu(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((unsigned int)__builtin_lasx_xvpickve2gr_wu ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui2. */ ++/* Data types in instruction templates: DI, V4DI, UQI. */ ++#define __lasx_xvpickve2gr_d(/*__m256i*/ _1, /*ui2*/ _2) \ ++ ((long int)__builtin_lasx_xvpickve2gr_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui2. */ ++/* Data types in instruction templates: UDI, V4DI, UQI. */ ++#define __lasx_xvpickve2gr_du(/*__m256i*/ _1, /*ui2*/ _2) \ ++ ((unsigned long int)__builtin_lasx_xvpickve2gr_du ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_wu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_wu_w ((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_hu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_hu_h ((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_bu_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_bu_b ((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_wu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_wu_w ((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_hu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_hu_h ((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_bu_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_bu_b ((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_wu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_wu_w ((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_hu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_hu_h ((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_bu_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_bu_b ((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_wu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_wu_w ((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_hu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_hu_h ((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_bu_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_bu_b ((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_qu_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_qu_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_qu_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_qu_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_w ((v4i64)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_h ((v8i32)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_b ((v16i16)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_du (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_du ((v4u64)_1, (v4u64)_2, (v4u64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_wu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_wu ((v4u64)_1, (v8u32)_2, (v8u32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_hu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_hu ((v8u32)_1, (v16u16)_2, (v16u16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_bu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_bu ((v16u16)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_w ((v4i64)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_h ((v8i32)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_b ((v16i16)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_du (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_du ((v4u64)_1, (v4u64)_2, (v4u64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_wu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_wu ((v4u64)_1, (v8u32)_2, (v8u32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_hu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_hu ((v8u32)_1, (v16u16)_2, (v16u16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_bu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_bu ((v16u16)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_du_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_du_d ((v4i64)_1, (v4u64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_wu_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_wu_w ((v4i64)_1, (v8u32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_hu_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_hu_h ((v8i32)_1, (v16u16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_bu_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_bu_b ((v16i16)_1, (v32u8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_du_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_du_d ((v4i64)_1, (v4u64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_wu_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_wu_w ((v4i64)_1, (v8u32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_hu_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_hu_h ((v8i32)_1, (v16u16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_bu_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_bu_b ((v16i16)_1, (v32u8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_q (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_q ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_q (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_q ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_du_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_du_d ((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_du_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_du_d ((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_du_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_du_d ((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_du_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_du_d ((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskgez_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskgez_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsknz_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmsknz_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_h_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_h_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_w_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_w_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_d_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_d_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_q_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_q_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV16HI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_hu_bu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_hu_bu ((v32u8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_wu_hu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_wu_hu ((v16u16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_du_wu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_du_wu ((v8u32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_qu_du (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_qu_du ((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvrotri_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrotri_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvrotri_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrotri_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvrotri_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrotri_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvrotri_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrotri_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvextl_q_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvextl_q_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlrni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlrni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlrni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlrni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrlni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrlni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrlni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrlni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrlrni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrlrni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrlrni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrlrni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrani_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrani_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrani_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrani_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrarni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrarni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrarni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrarni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrani_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrani_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrani_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrani_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrarni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrarni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrarni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrarni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbnz_b(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_b ((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV4DI. */ ++#define __lasx_xbnz_d(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_d ((v4u64)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV16HI. */ ++#define __lasx_xbnz_h(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_h ((v16u16)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbnz_v(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_v ((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV8SI. */ ++#define __lasx_xbnz_w(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_w ((v8u32)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbz_b(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_b ((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV4DI. */ ++#define __lasx_xbz_d(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_d ((v4u64)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV16HI. */ ++#define __lasx_xbz_h(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_h ((v16u16)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbz_v(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_v ((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV8SI. */ ++#define __lasx_xbz_w(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_w ((v8u32)(_1))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_caf_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_caf_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_caf_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_caf_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_ceq_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_ceq_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_ceq_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_ceq_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cle_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cle_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cle_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cle_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_clt_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_clt_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_clt_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_clt_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cne_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cne_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cne_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cne_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cor_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cor_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cor_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cor_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cueq_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cueq_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cueq_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cueq_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cule_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cule_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cule_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cule_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cult_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cult_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cult_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cult_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cun_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cun_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cune_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cune_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cune_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cune_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cun_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cun_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_saf_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_saf_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_saf_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_saf_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_seq_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_seq_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_seq_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_seq_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sle_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sle_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sle_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sle_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_slt_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_slt_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_slt_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_slt_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sne_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sne_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sne_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sne_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sor_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sor_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sor_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sor_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sueq_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sueq_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sueq_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sueq_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sule_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sule_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sule_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sule_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sult_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sult_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sult_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sult_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sun_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sun_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sune_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sune_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sune_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sune_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sun_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sun_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DF, V4DF, UQI. */ ++#define __lasx_xvpickve_d_f(/*__m256d*/ _1, /*ui2*/ _2) \ ++ ((__m256d)__builtin_lasx_xvpickve_d_f ((v4f64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SF, V8SF, UQI. */ ++#define __lasx_xvpickve_w_f(/*__m256*/ _1, /*ui3*/ _2) \ ++ ((__m256)__builtin_lasx_xvpickve_w_f ((v8f32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V32QI, HI. */ ++#define __lasx_xvrepli_b(/*si10*/ _1) \ ++ ((__m256i)__builtin_lasx_xvrepli_b ((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V4DI, HI. */ ++#define __lasx_xvrepli_d(/*si10*/ _1) \ ++ ((__m256i)__builtin_lasx_xvrepli_d ((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V16HI, HI. */ ++#define __lasx_xvrepli_h(/*si10*/ _1) \ ++ ((__m256i)__builtin_lasx_xvrepli_h ((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V8SI, HI. */ ++#define __lasx_xvrepli_w(/*si10*/ _1) \ ++ ((__m256i)__builtin_lasx_xvrepli_w ((_1))) ++ ++#endif /* defined(__loongarch_asx). */ ++#endif /* _GCC_LOONGSON_ASXINTRIN_H. */ +diff --git a/gcc/config/loongarch/linux.h b/gcc/config/loongarch/linux.h +index 110d0fab9..00039ac18 100644 +--- a/gcc/config/loongarch/linux.h ++++ b/gcc/config/loongarch/linux.h +@@ -48,3 +48,6 @@ along with GCC; see the file COPYING3. If not see + #define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) + + #define TARGET_ASM_FILE_END file_end_indicate_exec_stack ++ ++/* The stack pointer needs to be moved while checking the stack. */ ++#define STACK_CHECK_MOVING_SP 1 +diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc +index 64fe11168..2d9743d86 100644 +--- a/gcc/config/loongarch/loongarch-builtins.cc ++++ b/gcc/config/loongarch/loongarch-builtins.cc +@@ -34,13 +34,18 @@ along with GCC; see the file COPYING3. If not see + #include "recog.h" + #include "diagnostic.h" + #include "fold-const.h" ++#include "explow.h" + #include "expr.h" + #include "langhooks.h" ++#include "emit-rtl.h" ++#include "case-cfn-macros.h" + + /* Macros to create an enumeration identifier for a function prototype. */ + #define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B + #define LARCH_FTYPE_NAME2(A, B, C) LARCH_##A##_FTYPE_##B##_##C + #define LARCH_FTYPE_NAME3(A, B, C, D) LARCH_##A##_FTYPE_##B##_##C##_##D ++#define LARCH_FTYPE_NAME4(A, B, C, D, E) \ ++ LARCH_##A##_FTYPE_##B##_##C##_##D##_##E + + /* Classifies the prototype of a built-in function. */ + enum loongarch_function_type +@@ -63,6 +68,19 @@ enum loongarch_builtin_type + value and the arguments are mapped to operands 0 and above. */ + LARCH_BUILTIN_DIRECT_NO_TARGET, + ++ /* For generating LoongArch LSX. */ ++ LARCH_BUILTIN_LSX, ++ ++ /* The function corresponds to an LSX conditional branch instruction ++ combined with a compare instruction. */ ++ LARCH_BUILTIN_LSX_TEST_BRANCH, ++ ++ /* For generating LoongArch LASX. */ ++ LARCH_BUILTIN_LASX, ++ ++ /* The function corresponds to an LASX conditional branch instruction ++ combined with a compare instruction. */ ++ LARCH_BUILTIN_LASX_TEST_BRANCH, + }; + + /* Declare an availability predicate for built-in functions that require +@@ -100,6 +118,8 @@ struct loongarch_builtin_description + }; + + AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) ++AVAIL_ALL (lsx, ISA_HAS_LSX) ++AVAIL_ALL (lasx, ISA_HAS_LASX) + + /* Construct a loongarch_builtin_description from the given arguments. + +@@ -119,8 +139,8 @@ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) + #define LARCH_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \ + { \ + CODE_FOR_loongarch_##INSN, "__builtin_loongarch_" NAME, \ +- BUILTIN_TYPE, FUNCTION_TYPE, \ +- loongarch_builtin_avail_##AVAIL \ ++ BUILTIN_TYPE, FUNCTION_TYPE, \ ++ loongarch_builtin_avail_##AVAIL \ + } + + /* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT function +@@ -136,6 +156,594 @@ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) + LARCH_BUILTIN (INSN, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ + FUNCTION_TYPE, AVAIL) + ++/* Define an LSX LARCH_BUILTIN_DIRECT function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++ ++/* Define an LSX LARCH_BUILTIN_LSX_TEST_BRANCH function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_LSX_TEST_BRANCH, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++/* Define an LSX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++/* Define an LASX LARCH_BUILTIN_DIRECT function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ ++/* Define an LASX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ ++/* Define an LASX LARCH_BUILTIN_LASX_TEST_BRANCH function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX_TEST_BRANCH, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ ++/* LoongArch SX define CODE_FOR_lsx_xxx */ ++#define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3 ++#define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3 ++#define CODE_FOR_lsx_vsadd_w CODE_FOR_ssaddv4si3 ++#define CODE_FOR_lsx_vsadd_d CODE_FOR_ssaddv2di3 ++#define CODE_FOR_lsx_vsadd_bu CODE_FOR_usaddv16qi3 ++#define CODE_FOR_lsx_vsadd_hu CODE_FOR_usaddv8hi3 ++#define CODE_FOR_lsx_vsadd_wu CODE_FOR_usaddv4si3 ++#define CODE_FOR_lsx_vsadd_du CODE_FOR_usaddv2di3 ++#define CODE_FOR_lsx_vadd_b CODE_FOR_addv16qi3 ++#define CODE_FOR_lsx_vadd_h CODE_FOR_addv8hi3 ++#define CODE_FOR_lsx_vadd_w CODE_FOR_addv4si3 ++#define CODE_FOR_lsx_vadd_d CODE_FOR_addv2di3 ++#define CODE_FOR_lsx_vaddi_bu CODE_FOR_addv16qi3 ++#define CODE_FOR_lsx_vaddi_hu CODE_FOR_addv8hi3 ++#define CODE_FOR_lsx_vaddi_wu CODE_FOR_addv4si3 ++#define CODE_FOR_lsx_vaddi_du CODE_FOR_addv2di3 ++#define CODE_FOR_lsx_vand_v CODE_FOR_andv16qi3 ++#define CODE_FOR_lsx_vandi_b CODE_FOR_andv16qi3 ++#define CODE_FOR_lsx_bnz_v CODE_FOR_lsx_bnz_v_b ++#define CODE_FOR_lsx_bz_v CODE_FOR_lsx_bz_v_b ++#define CODE_FOR_lsx_vbitsel_v CODE_FOR_lsx_vbitsel_b ++#define CODE_FOR_lsx_vseqi_b CODE_FOR_lsx_vseq_b ++#define CODE_FOR_lsx_vseqi_h CODE_FOR_lsx_vseq_h ++#define CODE_FOR_lsx_vseqi_w CODE_FOR_lsx_vseq_w ++#define CODE_FOR_lsx_vseqi_d CODE_FOR_lsx_vseq_d ++#define CODE_FOR_lsx_vslti_b CODE_FOR_lsx_vslt_b ++#define CODE_FOR_lsx_vslti_h CODE_FOR_lsx_vslt_h ++#define CODE_FOR_lsx_vslti_w CODE_FOR_lsx_vslt_w ++#define CODE_FOR_lsx_vslti_d CODE_FOR_lsx_vslt_d ++#define CODE_FOR_lsx_vslti_bu CODE_FOR_lsx_vslt_bu ++#define CODE_FOR_lsx_vslti_hu CODE_FOR_lsx_vslt_hu ++#define CODE_FOR_lsx_vslti_wu CODE_FOR_lsx_vslt_wu ++#define CODE_FOR_lsx_vslti_du CODE_FOR_lsx_vslt_du ++#define CODE_FOR_lsx_vslei_b CODE_FOR_lsx_vsle_b ++#define CODE_FOR_lsx_vslei_h CODE_FOR_lsx_vsle_h ++#define CODE_FOR_lsx_vslei_w CODE_FOR_lsx_vsle_w ++#define CODE_FOR_lsx_vslei_d CODE_FOR_lsx_vsle_d ++#define CODE_FOR_lsx_vslei_bu CODE_FOR_lsx_vsle_bu ++#define CODE_FOR_lsx_vslei_hu CODE_FOR_lsx_vsle_hu ++#define CODE_FOR_lsx_vslei_wu CODE_FOR_lsx_vsle_wu ++#define CODE_FOR_lsx_vslei_du CODE_FOR_lsx_vsle_du ++#define CODE_FOR_lsx_vdiv_b CODE_FOR_divv16qi3 ++#define CODE_FOR_lsx_vdiv_h CODE_FOR_divv8hi3 ++#define CODE_FOR_lsx_vdiv_w CODE_FOR_divv4si3 ++#define CODE_FOR_lsx_vdiv_d CODE_FOR_divv2di3 ++#define CODE_FOR_lsx_vdiv_bu CODE_FOR_udivv16qi3 ++#define CODE_FOR_lsx_vdiv_hu CODE_FOR_udivv8hi3 ++#define CODE_FOR_lsx_vdiv_wu CODE_FOR_udivv4si3 ++#define CODE_FOR_lsx_vdiv_du CODE_FOR_udivv2di3 ++#define CODE_FOR_lsx_vfadd_s CODE_FOR_addv4sf3 ++#define CODE_FOR_lsx_vfadd_d CODE_FOR_addv2df3 ++#define CODE_FOR_lsx_vftintrz_w_s CODE_FOR_fix_truncv4sfv4si2 ++#define CODE_FOR_lsx_vftintrz_l_d CODE_FOR_fix_truncv2dfv2di2 ++#define CODE_FOR_lsx_vftintrz_wu_s CODE_FOR_fixuns_truncv4sfv4si2 ++#define CODE_FOR_lsx_vftintrz_lu_d CODE_FOR_fixuns_truncv2dfv2di2 ++#define CODE_FOR_lsx_vffint_s_w CODE_FOR_floatv4siv4sf2 ++#define CODE_FOR_lsx_vffint_d_l CODE_FOR_floatv2div2df2 ++#define CODE_FOR_lsx_vffint_s_wu CODE_FOR_floatunsv4siv4sf2 ++#define CODE_FOR_lsx_vffint_d_lu CODE_FOR_floatunsv2div2df2 ++#define CODE_FOR_lsx_vfsub_s CODE_FOR_subv4sf3 ++#define CODE_FOR_lsx_vfsub_d CODE_FOR_subv2df3 ++#define CODE_FOR_lsx_vfmul_s CODE_FOR_mulv4sf3 ++#define CODE_FOR_lsx_vfmul_d CODE_FOR_mulv2df3 ++#define CODE_FOR_lsx_vfdiv_s CODE_FOR_divv4sf3 ++#define CODE_FOR_lsx_vfdiv_d CODE_FOR_divv2df3 ++#define CODE_FOR_lsx_vfmax_s CODE_FOR_smaxv4sf3 ++#define CODE_FOR_lsx_vfmax_d CODE_FOR_smaxv2df3 ++#define CODE_FOR_lsx_vfmin_s CODE_FOR_sminv4sf3 ++#define CODE_FOR_lsx_vfmin_d CODE_FOR_sminv2df3 ++#define CODE_FOR_lsx_vfsqrt_s CODE_FOR_sqrtv4sf2 ++#define CODE_FOR_lsx_vfsqrt_d CODE_FOR_sqrtv2df2 ++#define CODE_FOR_lsx_vflogb_s CODE_FOR_logbv4sf2 ++#define CODE_FOR_lsx_vflogb_d CODE_FOR_logbv2df2 ++#define CODE_FOR_lsx_vmax_b CODE_FOR_smaxv16qi3 ++#define CODE_FOR_lsx_vmax_h CODE_FOR_smaxv8hi3 ++#define CODE_FOR_lsx_vmax_w CODE_FOR_smaxv4si3 ++#define CODE_FOR_lsx_vmax_d CODE_FOR_smaxv2di3 ++#define CODE_FOR_lsx_vmaxi_b CODE_FOR_smaxv16qi3 ++#define CODE_FOR_lsx_vmaxi_h CODE_FOR_smaxv8hi3 ++#define CODE_FOR_lsx_vmaxi_w CODE_FOR_smaxv4si3 ++#define CODE_FOR_lsx_vmaxi_d CODE_FOR_smaxv2di3 ++#define CODE_FOR_lsx_vmax_bu CODE_FOR_umaxv16qi3 ++#define CODE_FOR_lsx_vmax_hu CODE_FOR_umaxv8hi3 ++#define CODE_FOR_lsx_vmax_wu CODE_FOR_umaxv4si3 ++#define CODE_FOR_lsx_vmax_du CODE_FOR_umaxv2di3 ++#define CODE_FOR_lsx_vmaxi_bu CODE_FOR_umaxv16qi3 ++#define CODE_FOR_lsx_vmaxi_hu CODE_FOR_umaxv8hi3 ++#define CODE_FOR_lsx_vmaxi_wu CODE_FOR_umaxv4si3 ++#define CODE_FOR_lsx_vmaxi_du CODE_FOR_umaxv2di3 ++#define CODE_FOR_lsx_vmin_b CODE_FOR_sminv16qi3 ++#define CODE_FOR_lsx_vmin_h CODE_FOR_sminv8hi3 ++#define CODE_FOR_lsx_vmin_w CODE_FOR_sminv4si3 ++#define CODE_FOR_lsx_vmin_d CODE_FOR_sminv2di3 ++#define CODE_FOR_lsx_vmini_b CODE_FOR_sminv16qi3 ++#define CODE_FOR_lsx_vmini_h CODE_FOR_sminv8hi3 ++#define CODE_FOR_lsx_vmini_w CODE_FOR_sminv4si3 ++#define CODE_FOR_lsx_vmini_d CODE_FOR_sminv2di3 ++#define CODE_FOR_lsx_vmin_bu CODE_FOR_uminv16qi3 ++#define CODE_FOR_lsx_vmin_hu CODE_FOR_uminv8hi3 ++#define CODE_FOR_lsx_vmin_wu CODE_FOR_uminv4si3 ++#define CODE_FOR_lsx_vmin_du CODE_FOR_uminv2di3 ++#define CODE_FOR_lsx_vmini_bu CODE_FOR_uminv16qi3 ++#define CODE_FOR_lsx_vmini_hu CODE_FOR_uminv8hi3 ++#define CODE_FOR_lsx_vmini_wu CODE_FOR_uminv4si3 ++#define CODE_FOR_lsx_vmini_du CODE_FOR_uminv2di3 ++#define CODE_FOR_lsx_vmod_b CODE_FOR_modv16qi3 ++#define CODE_FOR_lsx_vmod_h CODE_FOR_modv8hi3 ++#define CODE_FOR_lsx_vmod_w CODE_FOR_modv4si3 ++#define CODE_FOR_lsx_vmod_d CODE_FOR_modv2di3 ++#define CODE_FOR_lsx_vmod_bu CODE_FOR_umodv16qi3 ++#define CODE_FOR_lsx_vmod_hu CODE_FOR_umodv8hi3 ++#define CODE_FOR_lsx_vmod_wu CODE_FOR_umodv4si3 ++#define CODE_FOR_lsx_vmod_du CODE_FOR_umodv2di3 ++#define CODE_FOR_lsx_vmul_b CODE_FOR_mulv16qi3 ++#define CODE_FOR_lsx_vmul_h CODE_FOR_mulv8hi3 ++#define CODE_FOR_lsx_vmul_w CODE_FOR_mulv4si3 ++#define CODE_FOR_lsx_vmul_d CODE_FOR_mulv2di3 ++#define CODE_FOR_lsx_vclz_b CODE_FOR_clzv16qi2 ++#define CODE_FOR_lsx_vclz_h CODE_FOR_clzv8hi2 ++#define CODE_FOR_lsx_vclz_w CODE_FOR_clzv4si2 ++#define CODE_FOR_lsx_vclz_d CODE_FOR_clzv2di2 ++#define CODE_FOR_lsx_vnor_v CODE_FOR_lsx_nor_b ++#define CODE_FOR_lsx_vor_v CODE_FOR_iorv16qi3 ++#define CODE_FOR_lsx_vori_b CODE_FOR_iorv16qi3 ++#define CODE_FOR_lsx_vnori_b CODE_FOR_lsx_nor_b ++#define CODE_FOR_lsx_vpcnt_b CODE_FOR_popcountv16qi2 ++#define CODE_FOR_lsx_vpcnt_h CODE_FOR_popcountv8hi2 ++#define CODE_FOR_lsx_vpcnt_w CODE_FOR_popcountv4si2 ++#define CODE_FOR_lsx_vpcnt_d CODE_FOR_popcountv2di2 ++#define CODE_FOR_lsx_vxor_v CODE_FOR_xorv16qi3 ++#define CODE_FOR_lsx_vxori_b CODE_FOR_xorv16qi3 ++#define CODE_FOR_lsx_vsll_b CODE_FOR_vashlv16qi3 ++#define CODE_FOR_lsx_vsll_h CODE_FOR_vashlv8hi3 ++#define CODE_FOR_lsx_vsll_w CODE_FOR_vashlv4si3 ++#define CODE_FOR_lsx_vsll_d CODE_FOR_vashlv2di3 ++#define CODE_FOR_lsx_vslli_b CODE_FOR_vashlv16qi3 ++#define CODE_FOR_lsx_vslli_h CODE_FOR_vashlv8hi3 ++#define CODE_FOR_lsx_vslli_w CODE_FOR_vashlv4si3 ++#define CODE_FOR_lsx_vslli_d CODE_FOR_vashlv2di3 ++#define CODE_FOR_lsx_vsra_b CODE_FOR_vashrv16qi3 ++#define CODE_FOR_lsx_vsra_h CODE_FOR_vashrv8hi3 ++#define CODE_FOR_lsx_vsra_w CODE_FOR_vashrv4si3 ++#define CODE_FOR_lsx_vsra_d CODE_FOR_vashrv2di3 ++#define CODE_FOR_lsx_vsrai_b CODE_FOR_vashrv16qi3 ++#define CODE_FOR_lsx_vsrai_h CODE_FOR_vashrv8hi3 ++#define CODE_FOR_lsx_vsrai_w CODE_FOR_vashrv4si3 ++#define CODE_FOR_lsx_vsrai_d CODE_FOR_vashrv2di3 ++#define CODE_FOR_lsx_vsrl_b CODE_FOR_vlshrv16qi3 ++#define CODE_FOR_lsx_vsrl_h CODE_FOR_vlshrv8hi3 ++#define CODE_FOR_lsx_vsrl_w CODE_FOR_vlshrv4si3 ++#define CODE_FOR_lsx_vsrl_d CODE_FOR_vlshrv2di3 ++#define CODE_FOR_lsx_vsrli_b CODE_FOR_vlshrv16qi3 ++#define CODE_FOR_lsx_vsrli_h CODE_FOR_vlshrv8hi3 ++#define CODE_FOR_lsx_vsrli_w CODE_FOR_vlshrv4si3 ++#define CODE_FOR_lsx_vsrli_d CODE_FOR_vlshrv2di3 ++#define CODE_FOR_lsx_vsub_b CODE_FOR_subv16qi3 ++#define CODE_FOR_lsx_vsub_h CODE_FOR_subv8hi3 ++#define CODE_FOR_lsx_vsub_w CODE_FOR_subv4si3 ++#define CODE_FOR_lsx_vsub_d CODE_FOR_subv2di3 ++#define CODE_FOR_lsx_vsubi_bu CODE_FOR_subv16qi3 ++#define CODE_FOR_lsx_vsubi_hu CODE_FOR_subv8hi3 ++#define CODE_FOR_lsx_vsubi_wu CODE_FOR_subv4si3 ++#define CODE_FOR_lsx_vsubi_du CODE_FOR_subv2di3 ++ ++#define CODE_FOR_lsx_vpackod_d CODE_FOR_lsx_vilvh_d ++#define CODE_FOR_lsx_vpackev_d CODE_FOR_lsx_vilvl_d ++#define CODE_FOR_lsx_vpickod_d CODE_FOR_lsx_vilvh_d ++#define CODE_FOR_lsx_vpickev_d CODE_FOR_lsx_vilvl_d ++ ++#define CODE_FOR_lsx_vrepli_b CODE_FOR_lsx_vrepliv16qi ++#define CODE_FOR_lsx_vrepli_h CODE_FOR_lsx_vrepliv8hi ++#define CODE_FOR_lsx_vrepli_w CODE_FOR_lsx_vrepliv4si ++#define CODE_FOR_lsx_vrepli_d CODE_FOR_lsx_vrepliv2di ++#define CODE_FOR_lsx_vsat_b CODE_FOR_lsx_vsat_s_b ++#define CODE_FOR_lsx_vsat_h CODE_FOR_lsx_vsat_s_h ++#define CODE_FOR_lsx_vsat_w CODE_FOR_lsx_vsat_s_w ++#define CODE_FOR_lsx_vsat_d CODE_FOR_lsx_vsat_s_d ++#define CODE_FOR_lsx_vsat_bu CODE_FOR_lsx_vsat_u_bu ++#define CODE_FOR_lsx_vsat_hu CODE_FOR_lsx_vsat_u_hu ++#define CODE_FOR_lsx_vsat_wu CODE_FOR_lsx_vsat_u_wu ++#define CODE_FOR_lsx_vsat_du CODE_FOR_lsx_vsat_u_du ++#define CODE_FOR_lsx_vavg_b CODE_FOR_lsx_vavg_s_b ++#define CODE_FOR_lsx_vavg_h CODE_FOR_lsx_vavg_s_h ++#define CODE_FOR_lsx_vavg_w CODE_FOR_lsx_vavg_s_w ++#define CODE_FOR_lsx_vavg_d CODE_FOR_lsx_vavg_s_d ++#define CODE_FOR_lsx_vavg_bu CODE_FOR_lsx_vavg_u_bu ++#define CODE_FOR_lsx_vavg_hu CODE_FOR_lsx_vavg_u_hu ++#define CODE_FOR_lsx_vavg_wu CODE_FOR_lsx_vavg_u_wu ++#define CODE_FOR_lsx_vavg_du CODE_FOR_lsx_vavg_u_du ++#define CODE_FOR_lsx_vavgr_b CODE_FOR_lsx_vavgr_s_b ++#define CODE_FOR_lsx_vavgr_h CODE_FOR_lsx_vavgr_s_h ++#define CODE_FOR_lsx_vavgr_w CODE_FOR_lsx_vavgr_s_w ++#define CODE_FOR_lsx_vavgr_d CODE_FOR_lsx_vavgr_s_d ++#define CODE_FOR_lsx_vavgr_bu CODE_FOR_lsx_vavgr_u_bu ++#define CODE_FOR_lsx_vavgr_hu CODE_FOR_lsx_vavgr_u_hu ++#define CODE_FOR_lsx_vavgr_wu CODE_FOR_lsx_vavgr_u_wu ++#define CODE_FOR_lsx_vavgr_du CODE_FOR_lsx_vavgr_u_du ++#define CODE_FOR_lsx_vssub_b CODE_FOR_lsx_vssub_s_b ++#define CODE_FOR_lsx_vssub_h CODE_FOR_lsx_vssub_s_h ++#define CODE_FOR_lsx_vssub_w CODE_FOR_lsx_vssub_s_w ++#define CODE_FOR_lsx_vssub_d CODE_FOR_lsx_vssub_s_d ++#define CODE_FOR_lsx_vssub_bu CODE_FOR_lsx_vssub_u_bu ++#define CODE_FOR_lsx_vssub_hu CODE_FOR_lsx_vssub_u_hu ++#define CODE_FOR_lsx_vssub_wu CODE_FOR_lsx_vssub_u_wu ++#define CODE_FOR_lsx_vssub_du CODE_FOR_lsx_vssub_u_du ++#define CODE_FOR_lsx_vabsd_b CODE_FOR_lsx_vabsd_s_b ++#define CODE_FOR_lsx_vabsd_h CODE_FOR_lsx_vabsd_s_h ++#define CODE_FOR_lsx_vabsd_w CODE_FOR_lsx_vabsd_s_w ++#define CODE_FOR_lsx_vabsd_d CODE_FOR_lsx_vabsd_s_d ++#define CODE_FOR_lsx_vabsd_bu CODE_FOR_lsx_vabsd_u_bu ++#define CODE_FOR_lsx_vabsd_hu CODE_FOR_lsx_vabsd_u_hu ++#define CODE_FOR_lsx_vabsd_wu CODE_FOR_lsx_vabsd_u_wu ++#define CODE_FOR_lsx_vabsd_du CODE_FOR_lsx_vabsd_u_du ++#define CODE_FOR_lsx_vftint_w_s CODE_FOR_lsx_vftint_s_w_s ++#define CODE_FOR_lsx_vftint_l_d CODE_FOR_lsx_vftint_s_l_d ++#define CODE_FOR_lsx_vftint_wu_s CODE_FOR_lsx_vftint_u_wu_s ++#define CODE_FOR_lsx_vftint_lu_d CODE_FOR_lsx_vftint_u_lu_d ++#define CODE_FOR_lsx_vandn_v CODE_FOR_vandnv16qi3 ++#define CODE_FOR_lsx_vorn_v CODE_FOR_vornv16qi3 ++#define CODE_FOR_lsx_vneg_b CODE_FOR_vnegv16qi2 ++#define CODE_FOR_lsx_vneg_h CODE_FOR_vnegv8hi2 ++#define CODE_FOR_lsx_vneg_w CODE_FOR_vnegv4si2 ++#define CODE_FOR_lsx_vneg_d CODE_FOR_vnegv2di2 ++#define CODE_FOR_lsx_vshuf4i_d CODE_FOR_lsx_vshuf4i_d ++#define CODE_FOR_lsx_vbsrl_v CODE_FOR_lsx_vbsrl_b ++#define CODE_FOR_lsx_vbsll_v CODE_FOR_lsx_vbsll_b ++#define CODE_FOR_lsx_vfmadd_s CODE_FOR_fmav4sf4 ++#define CODE_FOR_lsx_vfmadd_d CODE_FOR_fmav2df4 ++#define CODE_FOR_lsx_vfmsub_s CODE_FOR_fmsv4sf4 ++#define CODE_FOR_lsx_vfmsub_d CODE_FOR_fmsv2df4 ++#define CODE_FOR_lsx_vfnmadd_s CODE_FOR_vfnmaddv4sf4_nmadd4 ++#define CODE_FOR_lsx_vfnmadd_d CODE_FOR_vfnmaddv2df4_nmadd4 ++#define CODE_FOR_lsx_vfnmsub_s CODE_FOR_vfnmsubv4sf4_nmsub4 ++#define CODE_FOR_lsx_vfnmsub_d CODE_FOR_vfnmsubv2df4_nmsub4 ++ ++#define CODE_FOR_lsx_vmuh_b CODE_FOR_lsx_vmuh_s_b ++#define CODE_FOR_lsx_vmuh_h CODE_FOR_lsx_vmuh_s_h ++#define CODE_FOR_lsx_vmuh_w CODE_FOR_lsx_vmuh_s_w ++#define CODE_FOR_lsx_vmuh_d CODE_FOR_lsx_vmuh_s_d ++#define CODE_FOR_lsx_vmuh_bu CODE_FOR_lsx_vmuh_u_bu ++#define CODE_FOR_lsx_vmuh_hu CODE_FOR_lsx_vmuh_u_hu ++#define CODE_FOR_lsx_vmuh_wu CODE_FOR_lsx_vmuh_u_wu ++#define CODE_FOR_lsx_vmuh_du CODE_FOR_lsx_vmuh_u_du ++#define CODE_FOR_lsx_vsllwil_h_b CODE_FOR_lsx_vsllwil_s_h_b ++#define CODE_FOR_lsx_vsllwil_w_h CODE_FOR_lsx_vsllwil_s_w_h ++#define CODE_FOR_lsx_vsllwil_d_w CODE_FOR_lsx_vsllwil_s_d_w ++#define CODE_FOR_lsx_vsllwil_hu_bu CODE_FOR_lsx_vsllwil_u_hu_bu ++#define CODE_FOR_lsx_vsllwil_wu_hu CODE_FOR_lsx_vsllwil_u_wu_hu ++#define CODE_FOR_lsx_vsllwil_du_wu CODE_FOR_lsx_vsllwil_u_du_wu ++#define CODE_FOR_lsx_vssran_b_h CODE_FOR_lsx_vssran_s_b_h ++#define CODE_FOR_lsx_vssran_h_w CODE_FOR_lsx_vssran_s_h_w ++#define CODE_FOR_lsx_vssran_w_d CODE_FOR_lsx_vssran_s_w_d ++#define CODE_FOR_lsx_vssran_bu_h CODE_FOR_lsx_vssran_u_bu_h ++#define CODE_FOR_lsx_vssran_hu_w CODE_FOR_lsx_vssran_u_hu_w ++#define CODE_FOR_lsx_vssran_wu_d CODE_FOR_lsx_vssran_u_wu_d ++#define CODE_FOR_lsx_vssrarn_b_h CODE_FOR_lsx_vssrarn_s_b_h ++#define CODE_FOR_lsx_vssrarn_h_w CODE_FOR_lsx_vssrarn_s_h_w ++#define CODE_FOR_lsx_vssrarn_w_d CODE_FOR_lsx_vssrarn_s_w_d ++#define CODE_FOR_lsx_vssrarn_bu_h CODE_FOR_lsx_vssrarn_u_bu_h ++#define CODE_FOR_lsx_vssrarn_hu_w CODE_FOR_lsx_vssrarn_u_hu_w ++#define CODE_FOR_lsx_vssrarn_wu_d CODE_FOR_lsx_vssrarn_u_wu_d ++#define CODE_FOR_lsx_vssrln_bu_h CODE_FOR_lsx_vssrln_u_bu_h ++#define CODE_FOR_lsx_vssrln_hu_w CODE_FOR_lsx_vssrln_u_hu_w ++#define CODE_FOR_lsx_vssrln_wu_d CODE_FOR_lsx_vssrln_u_wu_d ++#define CODE_FOR_lsx_vssrlrn_bu_h CODE_FOR_lsx_vssrlrn_u_bu_h ++#define CODE_FOR_lsx_vssrlrn_hu_w CODE_FOR_lsx_vssrlrn_u_hu_w ++#define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d ++ ++/* LoongArch ASX define CODE_FOR_lasx_mxxx */ ++#define CODE_FOR_lasx_xvsadd_b CODE_FOR_ssaddv32qi3 ++#define CODE_FOR_lasx_xvsadd_h CODE_FOR_ssaddv16hi3 ++#define CODE_FOR_lasx_xvsadd_w CODE_FOR_ssaddv8si3 ++#define CODE_FOR_lasx_xvsadd_d CODE_FOR_ssaddv4di3 ++#define CODE_FOR_lasx_xvsadd_bu CODE_FOR_usaddv32qi3 ++#define CODE_FOR_lasx_xvsadd_hu CODE_FOR_usaddv16hi3 ++#define CODE_FOR_lasx_xvsadd_wu CODE_FOR_usaddv8si3 ++#define CODE_FOR_lasx_xvsadd_du CODE_FOR_usaddv4di3 ++#define CODE_FOR_lasx_xvadd_b CODE_FOR_addv32qi3 ++#define CODE_FOR_lasx_xvadd_h CODE_FOR_addv16hi3 ++#define CODE_FOR_lasx_xvadd_w CODE_FOR_addv8si3 ++#define CODE_FOR_lasx_xvadd_d CODE_FOR_addv4di3 ++#define CODE_FOR_lasx_xvaddi_bu CODE_FOR_addv32qi3 ++#define CODE_FOR_lasx_xvaddi_hu CODE_FOR_addv16hi3 ++#define CODE_FOR_lasx_xvaddi_wu CODE_FOR_addv8si3 ++#define CODE_FOR_lasx_xvaddi_du CODE_FOR_addv4di3 ++#define CODE_FOR_lasx_xvand_v CODE_FOR_andv32qi3 ++#define CODE_FOR_lasx_xvandi_b CODE_FOR_andv32qi3 ++#define CODE_FOR_lasx_xvbitsel_v CODE_FOR_lasx_xvbitsel_b ++#define CODE_FOR_lasx_xvseqi_b CODE_FOR_lasx_xvseq_b ++#define CODE_FOR_lasx_xvseqi_h CODE_FOR_lasx_xvseq_h ++#define CODE_FOR_lasx_xvseqi_w CODE_FOR_lasx_xvseq_w ++#define CODE_FOR_lasx_xvseqi_d CODE_FOR_lasx_xvseq_d ++#define CODE_FOR_lasx_xvslti_b CODE_FOR_lasx_xvslt_b ++#define CODE_FOR_lasx_xvslti_h CODE_FOR_lasx_xvslt_h ++#define CODE_FOR_lasx_xvslti_w CODE_FOR_lasx_xvslt_w ++#define CODE_FOR_lasx_xvslti_d CODE_FOR_lasx_xvslt_d ++#define CODE_FOR_lasx_xvslti_bu CODE_FOR_lasx_xvslt_bu ++#define CODE_FOR_lasx_xvslti_hu CODE_FOR_lasx_xvslt_hu ++#define CODE_FOR_lasx_xvslti_wu CODE_FOR_lasx_xvslt_wu ++#define CODE_FOR_lasx_xvslti_du CODE_FOR_lasx_xvslt_du ++#define CODE_FOR_lasx_xvslei_b CODE_FOR_lasx_xvsle_b ++#define CODE_FOR_lasx_xvslei_h CODE_FOR_lasx_xvsle_h ++#define CODE_FOR_lasx_xvslei_w CODE_FOR_lasx_xvsle_w ++#define CODE_FOR_lasx_xvslei_d CODE_FOR_lasx_xvsle_d ++#define CODE_FOR_lasx_xvslei_bu CODE_FOR_lasx_xvsle_bu ++#define CODE_FOR_lasx_xvslei_hu CODE_FOR_lasx_xvsle_hu ++#define CODE_FOR_lasx_xvslei_wu CODE_FOR_lasx_xvsle_wu ++#define CODE_FOR_lasx_xvslei_du CODE_FOR_lasx_xvsle_du ++#define CODE_FOR_lasx_xvdiv_b CODE_FOR_divv32qi3 ++#define CODE_FOR_lasx_xvdiv_h CODE_FOR_divv16hi3 ++#define CODE_FOR_lasx_xvdiv_w CODE_FOR_divv8si3 ++#define CODE_FOR_lasx_xvdiv_d CODE_FOR_divv4di3 ++#define CODE_FOR_lasx_xvdiv_bu CODE_FOR_udivv32qi3 ++#define CODE_FOR_lasx_xvdiv_hu CODE_FOR_udivv16hi3 ++#define CODE_FOR_lasx_xvdiv_wu CODE_FOR_udivv8si3 ++#define CODE_FOR_lasx_xvdiv_du CODE_FOR_udivv4di3 ++#define CODE_FOR_lasx_xvfadd_s CODE_FOR_addv8sf3 ++#define CODE_FOR_lasx_xvfadd_d CODE_FOR_addv4df3 ++#define CODE_FOR_lasx_xvftintrz_w_s CODE_FOR_fix_truncv8sfv8si2 ++#define CODE_FOR_lasx_xvftintrz_l_d CODE_FOR_fix_truncv4dfv4di2 ++#define CODE_FOR_lasx_xvftintrz_wu_s CODE_FOR_fixuns_truncv8sfv8si2 ++#define CODE_FOR_lasx_xvftintrz_lu_d CODE_FOR_fixuns_truncv4dfv4di2 ++#define CODE_FOR_lasx_xvffint_s_w CODE_FOR_floatv8siv8sf2 ++#define CODE_FOR_lasx_xvffint_d_l CODE_FOR_floatv4div4df2 ++#define CODE_FOR_lasx_xvffint_s_wu CODE_FOR_floatunsv8siv8sf2 ++#define CODE_FOR_lasx_xvffint_d_lu CODE_FOR_floatunsv4div4df2 ++#define CODE_FOR_lasx_xvfsub_s CODE_FOR_subv8sf3 ++#define CODE_FOR_lasx_xvfsub_d CODE_FOR_subv4df3 ++#define CODE_FOR_lasx_xvfmul_s CODE_FOR_mulv8sf3 ++#define CODE_FOR_lasx_xvfmul_d CODE_FOR_mulv4df3 ++#define CODE_FOR_lasx_xvfdiv_s CODE_FOR_divv8sf3 ++#define CODE_FOR_lasx_xvfdiv_d CODE_FOR_divv4df3 ++#define CODE_FOR_lasx_xvfmax_s CODE_FOR_smaxv8sf3 ++#define CODE_FOR_lasx_xvfmax_d CODE_FOR_smaxv4df3 ++#define CODE_FOR_lasx_xvfmin_s CODE_FOR_sminv8sf3 ++#define CODE_FOR_lasx_xvfmin_d CODE_FOR_sminv4df3 ++#define CODE_FOR_lasx_xvfsqrt_s CODE_FOR_sqrtv8sf2 ++#define CODE_FOR_lasx_xvfsqrt_d CODE_FOR_sqrtv4df2 ++#define CODE_FOR_lasx_xvflogb_s CODE_FOR_logbv8sf2 ++#define CODE_FOR_lasx_xvflogb_d CODE_FOR_logbv4df2 ++#define CODE_FOR_lasx_xvmax_b CODE_FOR_smaxv32qi3 ++#define CODE_FOR_lasx_xvmax_h CODE_FOR_smaxv16hi3 ++#define CODE_FOR_lasx_xvmax_w CODE_FOR_smaxv8si3 ++#define CODE_FOR_lasx_xvmax_d CODE_FOR_smaxv4di3 ++#define CODE_FOR_lasx_xvmaxi_b CODE_FOR_smaxv32qi3 ++#define CODE_FOR_lasx_xvmaxi_h CODE_FOR_smaxv16hi3 ++#define CODE_FOR_lasx_xvmaxi_w CODE_FOR_smaxv8si3 ++#define CODE_FOR_lasx_xvmaxi_d CODE_FOR_smaxv4di3 ++#define CODE_FOR_lasx_xvmax_bu CODE_FOR_umaxv32qi3 ++#define CODE_FOR_lasx_xvmax_hu CODE_FOR_umaxv16hi3 ++#define CODE_FOR_lasx_xvmax_wu CODE_FOR_umaxv8si3 ++#define CODE_FOR_lasx_xvmax_du CODE_FOR_umaxv4di3 ++#define CODE_FOR_lasx_xvmaxi_bu CODE_FOR_umaxv32qi3 ++#define CODE_FOR_lasx_xvmaxi_hu CODE_FOR_umaxv16hi3 ++#define CODE_FOR_lasx_xvmaxi_wu CODE_FOR_umaxv8si3 ++#define CODE_FOR_lasx_xvmaxi_du CODE_FOR_umaxv4di3 ++#define CODE_FOR_lasx_xvmin_b CODE_FOR_sminv32qi3 ++#define CODE_FOR_lasx_xvmin_h CODE_FOR_sminv16hi3 ++#define CODE_FOR_lasx_xvmin_w CODE_FOR_sminv8si3 ++#define CODE_FOR_lasx_xvmin_d CODE_FOR_sminv4di3 ++#define CODE_FOR_lasx_xvmini_b CODE_FOR_sminv32qi3 ++#define CODE_FOR_lasx_xvmini_h CODE_FOR_sminv16hi3 ++#define CODE_FOR_lasx_xvmini_w CODE_FOR_sminv8si3 ++#define CODE_FOR_lasx_xvmini_d CODE_FOR_sminv4di3 ++#define CODE_FOR_lasx_xvmin_bu CODE_FOR_uminv32qi3 ++#define CODE_FOR_lasx_xvmin_hu CODE_FOR_uminv16hi3 ++#define CODE_FOR_lasx_xvmin_wu CODE_FOR_uminv8si3 ++#define CODE_FOR_lasx_xvmin_du CODE_FOR_uminv4di3 ++#define CODE_FOR_lasx_xvmini_bu CODE_FOR_uminv32qi3 ++#define CODE_FOR_lasx_xvmini_hu CODE_FOR_uminv16hi3 ++#define CODE_FOR_lasx_xvmini_wu CODE_FOR_uminv8si3 ++#define CODE_FOR_lasx_xvmini_du CODE_FOR_uminv4di3 ++#define CODE_FOR_lasx_xvmod_b CODE_FOR_modv32qi3 ++#define CODE_FOR_lasx_xvmod_h CODE_FOR_modv16hi3 ++#define CODE_FOR_lasx_xvmod_w CODE_FOR_modv8si3 ++#define CODE_FOR_lasx_xvmod_d CODE_FOR_modv4di3 ++#define CODE_FOR_lasx_xvmod_bu CODE_FOR_umodv32qi3 ++#define CODE_FOR_lasx_xvmod_hu CODE_FOR_umodv16hi3 ++#define CODE_FOR_lasx_xvmod_wu CODE_FOR_umodv8si3 ++#define CODE_FOR_lasx_xvmod_du CODE_FOR_umodv4di3 ++#define CODE_FOR_lasx_xvmul_b CODE_FOR_mulv32qi3 ++#define CODE_FOR_lasx_xvmul_h CODE_FOR_mulv16hi3 ++#define CODE_FOR_lasx_xvmul_w CODE_FOR_mulv8si3 ++#define CODE_FOR_lasx_xvmul_d CODE_FOR_mulv4di3 ++#define CODE_FOR_lasx_xvclz_b CODE_FOR_clzv32qi2 ++#define CODE_FOR_lasx_xvclz_h CODE_FOR_clzv16hi2 ++#define CODE_FOR_lasx_xvclz_w CODE_FOR_clzv8si2 ++#define CODE_FOR_lasx_xvclz_d CODE_FOR_clzv4di2 ++#define CODE_FOR_lasx_xvnor_v CODE_FOR_lasx_xvnor_b ++#define CODE_FOR_lasx_xvor_v CODE_FOR_iorv32qi3 ++#define CODE_FOR_lasx_xvori_b CODE_FOR_iorv32qi3 ++#define CODE_FOR_lasx_xvnori_b CODE_FOR_lasx_xvnor_b ++#define CODE_FOR_lasx_xvpcnt_b CODE_FOR_popcountv32qi2 ++#define CODE_FOR_lasx_xvpcnt_h CODE_FOR_popcountv16hi2 ++#define CODE_FOR_lasx_xvpcnt_w CODE_FOR_popcountv8si2 ++#define CODE_FOR_lasx_xvpcnt_d CODE_FOR_popcountv4di2 ++#define CODE_FOR_lasx_xvxor_v CODE_FOR_xorv32qi3 ++#define CODE_FOR_lasx_xvxori_b CODE_FOR_xorv32qi3 ++#define CODE_FOR_lasx_xvsll_b CODE_FOR_vashlv32qi3 ++#define CODE_FOR_lasx_xvsll_h CODE_FOR_vashlv16hi3 ++#define CODE_FOR_lasx_xvsll_w CODE_FOR_vashlv8si3 ++#define CODE_FOR_lasx_xvsll_d CODE_FOR_vashlv4di3 ++#define CODE_FOR_lasx_xvslli_b CODE_FOR_vashlv32qi3 ++#define CODE_FOR_lasx_xvslli_h CODE_FOR_vashlv16hi3 ++#define CODE_FOR_lasx_xvslli_w CODE_FOR_vashlv8si3 ++#define CODE_FOR_lasx_xvslli_d CODE_FOR_vashlv4di3 ++#define CODE_FOR_lasx_xvsra_b CODE_FOR_vashrv32qi3 ++#define CODE_FOR_lasx_xvsra_h CODE_FOR_vashrv16hi3 ++#define CODE_FOR_lasx_xvsra_w CODE_FOR_vashrv8si3 ++#define CODE_FOR_lasx_xvsra_d CODE_FOR_vashrv4di3 ++#define CODE_FOR_lasx_xvsrai_b CODE_FOR_vashrv32qi3 ++#define CODE_FOR_lasx_xvsrai_h CODE_FOR_vashrv16hi3 ++#define CODE_FOR_lasx_xvsrai_w CODE_FOR_vashrv8si3 ++#define CODE_FOR_lasx_xvsrai_d CODE_FOR_vashrv4di3 ++#define CODE_FOR_lasx_xvsrl_b CODE_FOR_vlshrv32qi3 ++#define CODE_FOR_lasx_xvsrl_h CODE_FOR_vlshrv16hi3 ++#define CODE_FOR_lasx_xvsrl_w CODE_FOR_vlshrv8si3 ++#define CODE_FOR_lasx_xvsrl_d CODE_FOR_vlshrv4di3 ++#define CODE_FOR_lasx_xvsrli_b CODE_FOR_vlshrv32qi3 ++#define CODE_FOR_lasx_xvsrli_h CODE_FOR_vlshrv16hi3 ++#define CODE_FOR_lasx_xvsrli_w CODE_FOR_vlshrv8si3 ++#define CODE_FOR_lasx_xvsrli_d CODE_FOR_vlshrv4di3 ++#define CODE_FOR_lasx_xvsub_b CODE_FOR_subv32qi3 ++#define CODE_FOR_lasx_xvsub_h CODE_FOR_subv16hi3 ++#define CODE_FOR_lasx_xvsub_w CODE_FOR_subv8si3 ++#define CODE_FOR_lasx_xvsub_d CODE_FOR_subv4di3 ++#define CODE_FOR_lasx_xvsubi_bu CODE_FOR_subv32qi3 ++#define CODE_FOR_lasx_xvsubi_hu CODE_FOR_subv16hi3 ++#define CODE_FOR_lasx_xvsubi_wu CODE_FOR_subv8si3 ++#define CODE_FOR_lasx_xvsubi_du CODE_FOR_subv4di3 ++#define CODE_FOR_lasx_xvpackod_d CODE_FOR_lasx_xvilvh_d ++#define CODE_FOR_lasx_xvpackev_d CODE_FOR_lasx_xvilvl_d ++#define CODE_FOR_lasx_xvpickod_d CODE_FOR_lasx_xvilvh_d ++#define CODE_FOR_lasx_xvpickev_d CODE_FOR_lasx_xvilvl_d ++#define CODE_FOR_lasx_xvrepli_b CODE_FOR_lasx_xvrepliv32qi ++#define CODE_FOR_lasx_xvrepli_h CODE_FOR_lasx_xvrepliv16hi ++#define CODE_FOR_lasx_xvrepli_w CODE_FOR_lasx_xvrepliv8si ++#define CODE_FOR_lasx_xvrepli_d CODE_FOR_lasx_xvrepliv4di ++ ++#define CODE_FOR_lasx_xvandn_v CODE_FOR_xvandnv32qi3 ++#define CODE_FOR_lasx_xvorn_v CODE_FOR_xvornv32qi3 ++#define CODE_FOR_lasx_xvneg_b CODE_FOR_negv32qi2 ++#define CODE_FOR_lasx_xvneg_h CODE_FOR_negv16hi2 ++#define CODE_FOR_lasx_xvneg_w CODE_FOR_negv8si2 ++#define CODE_FOR_lasx_xvneg_d CODE_FOR_negv4di2 ++#define CODE_FOR_lasx_xvbsrl_v CODE_FOR_lasx_xvbsrl_b ++#define CODE_FOR_lasx_xvbsll_v CODE_FOR_lasx_xvbsll_b ++#define CODE_FOR_lasx_xvfmadd_s CODE_FOR_fmav8sf4 ++#define CODE_FOR_lasx_xvfmadd_d CODE_FOR_fmav4df4 ++#define CODE_FOR_lasx_xvfmsub_s CODE_FOR_fmsv8sf4 ++#define CODE_FOR_lasx_xvfmsub_d CODE_FOR_fmsv4df4 ++#define CODE_FOR_lasx_xvfnmadd_s CODE_FOR_xvfnmaddv8sf4_nmadd4 ++#define CODE_FOR_lasx_xvfnmadd_d CODE_FOR_xvfnmaddv4df4_nmadd4 ++#define CODE_FOR_lasx_xvfnmsub_s CODE_FOR_xvfnmsubv8sf4_nmsub4 ++#define CODE_FOR_lasx_xvfnmsub_d CODE_FOR_xvfnmsubv4df4_nmsub4 ++ ++#define CODE_FOR_lasx_xvpermi_q CODE_FOR_lasx_xvpermi_q_v32qi ++#define CODE_FOR_lasx_xvpermi_d CODE_FOR_lasx_xvpermi_d_v4di ++#define CODE_FOR_lasx_xbnz_v CODE_FOR_lasx_xbnz_v_b ++#define CODE_FOR_lasx_xbz_v CODE_FOR_lasx_xbz_v_b ++ ++#define CODE_FOR_lasx_xvssub_b CODE_FOR_lasx_xvssub_s_b ++#define CODE_FOR_lasx_xvssub_h CODE_FOR_lasx_xvssub_s_h ++#define CODE_FOR_lasx_xvssub_w CODE_FOR_lasx_xvssub_s_w ++#define CODE_FOR_lasx_xvssub_d CODE_FOR_lasx_xvssub_s_d ++#define CODE_FOR_lasx_xvssub_bu CODE_FOR_lasx_xvssub_u_bu ++#define CODE_FOR_lasx_xvssub_hu CODE_FOR_lasx_xvssub_u_hu ++#define CODE_FOR_lasx_xvssub_wu CODE_FOR_lasx_xvssub_u_wu ++#define CODE_FOR_lasx_xvssub_du CODE_FOR_lasx_xvssub_u_du ++#define CODE_FOR_lasx_xvabsd_b CODE_FOR_lasx_xvabsd_s_b ++#define CODE_FOR_lasx_xvabsd_h CODE_FOR_lasx_xvabsd_s_h ++#define CODE_FOR_lasx_xvabsd_w CODE_FOR_lasx_xvabsd_s_w ++#define CODE_FOR_lasx_xvabsd_d CODE_FOR_lasx_xvabsd_s_d ++#define CODE_FOR_lasx_xvabsd_bu CODE_FOR_lasx_xvabsd_u_bu ++#define CODE_FOR_lasx_xvabsd_hu CODE_FOR_lasx_xvabsd_u_hu ++#define CODE_FOR_lasx_xvabsd_wu CODE_FOR_lasx_xvabsd_u_wu ++#define CODE_FOR_lasx_xvabsd_du CODE_FOR_lasx_xvabsd_u_du ++#define CODE_FOR_lasx_xvavg_b CODE_FOR_lasx_xvavg_s_b ++#define CODE_FOR_lasx_xvavg_h CODE_FOR_lasx_xvavg_s_h ++#define CODE_FOR_lasx_xvavg_w CODE_FOR_lasx_xvavg_s_w ++#define CODE_FOR_lasx_xvavg_d CODE_FOR_lasx_xvavg_s_d ++#define CODE_FOR_lasx_xvavg_bu CODE_FOR_lasx_xvavg_u_bu ++#define CODE_FOR_lasx_xvavg_hu CODE_FOR_lasx_xvavg_u_hu ++#define CODE_FOR_lasx_xvavg_wu CODE_FOR_lasx_xvavg_u_wu ++#define CODE_FOR_lasx_xvavg_du CODE_FOR_lasx_xvavg_u_du ++#define CODE_FOR_lasx_xvavgr_b CODE_FOR_lasx_xvavgr_s_b ++#define CODE_FOR_lasx_xvavgr_h CODE_FOR_lasx_xvavgr_s_h ++#define CODE_FOR_lasx_xvavgr_w CODE_FOR_lasx_xvavgr_s_w ++#define CODE_FOR_lasx_xvavgr_d CODE_FOR_lasx_xvavgr_s_d ++#define CODE_FOR_lasx_xvavgr_bu CODE_FOR_lasx_xvavgr_u_bu ++#define CODE_FOR_lasx_xvavgr_hu CODE_FOR_lasx_xvavgr_u_hu ++#define CODE_FOR_lasx_xvavgr_wu CODE_FOR_lasx_xvavgr_u_wu ++#define CODE_FOR_lasx_xvavgr_du CODE_FOR_lasx_xvavgr_u_du ++#define CODE_FOR_lasx_xvmuh_b CODE_FOR_lasx_xvmuh_s_b ++#define CODE_FOR_lasx_xvmuh_h CODE_FOR_lasx_xvmuh_s_h ++#define CODE_FOR_lasx_xvmuh_w CODE_FOR_lasx_xvmuh_s_w ++#define CODE_FOR_lasx_xvmuh_d CODE_FOR_lasx_xvmuh_s_d ++#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_lasx_xvmuh_u_bu ++#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_lasx_xvmuh_u_hu ++#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_lasx_xvmuh_u_wu ++#define CODE_FOR_lasx_xvmuh_du CODE_FOR_lasx_xvmuh_u_du ++#define CODE_FOR_lasx_xvssran_b_h CODE_FOR_lasx_xvssran_s_b_h ++#define CODE_FOR_lasx_xvssran_h_w CODE_FOR_lasx_xvssran_s_h_w ++#define CODE_FOR_lasx_xvssran_w_d CODE_FOR_lasx_xvssran_s_w_d ++#define CODE_FOR_lasx_xvssran_bu_h CODE_FOR_lasx_xvssran_u_bu_h ++#define CODE_FOR_lasx_xvssran_hu_w CODE_FOR_lasx_xvssran_u_hu_w ++#define CODE_FOR_lasx_xvssran_wu_d CODE_FOR_lasx_xvssran_u_wu_d ++#define CODE_FOR_lasx_xvssrarn_b_h CODE_FOR_lasx_xvssrarn_s_b_h ++#define CODE_FOR_lasx_xvssrarn_h_w CODE_FOR_lasx_xvssrarn_s_h_w ++#define CODE_FOR_lasx_xvssrarn_w_d CODE_FOR_lasx_xvssrarn_s_w_d ++#define CODE_FOR_lasx_xvssrarn_bu_h CODE_FOR_lasx_xvssrarn_u_bu_h ++#define CODE_FOR_lasx_xvssrarn_hu_w CODE_FOR_lasx_xvssrarn_u_hu_w ++#define CODE_FOR_lasx_xvssrarn_wu_d CODE_FOR_lasx_xvssrarn_u_wu_d ++#define CODE_FOR_lasx_xvssrln_bu_h CODE_FOR_lasx_xvssrln_u_bu_h ++#define CODE_FOR_lasx_xvssrln_hu_w CODE_FOR_lasx_xvssrln_u_hu_w ++#define CODE_FOR_lasx_xvssrln_wu_d CODE_FOR_lasx_xvssrln_u_wu_d ++#define CODE_FOR_lasx_xvssrlrn_bu_h CODE_FOR_lasx_xvssrlrn_u_bu_h ++#define CODE_FOR_lasx_xvssrlrn_hu_w CODE_FOR_lasx_xvssrlrn_u_hu_w ++#define CODE_FOR_lasx_xvssrlrn_wu_d CODE_FOR_lasx_xvssrlrn_u_wu_d ++#define CODE_FOR_lasx_xvftint_w_s CODE_FOR_lasx_xvftint_s_w_s ++#define CODE_FOR_lasx_xvftint_l_d CODE_FOR_lasx_xvftint_s_l_d ++#define CODE_FOR_lasx_xvftint_wu_s CODE_FOR_lasx_xvftint_u_wu_s ++#define CODE_FOR_lasx_xvftint_lu_d CODE_FOR_lasx_xvftint_u_lu_d ++#define CODE_FOR_lasx_xvsllwil_h_b CODE_FOR_lasx_xvsllwil_s_h_b ++#define CODE_FOR_lasx_xvsllwil_w_h CODE_FOR_lasx_xvsllwil_s_w_h ++#define CODE_FOR_lasx_xvsllwil_d_w CODE_FOR_lasx_xvsllwil_s_d_w ++#define CODE_FOR_lasx_xvsllwil_hu_bu CODE_FOR_lasx_xvsllwil_u_hu_bu ++#define CODE_FOR_lasx_xvsllwil_wu_hu CODE_FOR_lasx_xvsllwil_u_wu_hu ++#define CODE_FOR_lasx_xvsllwil_du_wu CODE_FOR_lasx_xvsllwil_u_du_wu ++#define CODE_FOR_lasx_xvsat_b CODE_FOR_lasx_xvsat_s_b ++#define CODE_FOR_lasx_xvsat_h CODE_FOR_lasx_xvsat_s_h ++#define CODE_FOR_lasx_xvsat_w CODE_FOR_lasx_xvsat_s_w ++#define CODE_FOR_lasx_xvsat_d CODE_FOR_lasx_xvsat_s_d ++#define CODE_FOR_lasx_xvsat_bu CODE_FOR_lasx_xvsat_u_bu ++#define CODE_FOR_lasx_xvsat_hu CODE_FOR_lasx_xvsat_u_hu ++#define CODE_FOR_lasx_xvsat_wu CODE_FOR_lasx_xvsat_u_wu ++#define CODE_FOR_lasx_xvsat_du CODE_FOR_lasx_xvsat_u_du ++ + static const struct loongarch_builtin_description loongarch_builtins[] = { + #define LARCH_MOVFCSR2GR 0 + DIRECT_BUILTIN (movfcsr2gr, LARCH_USI_FTYPE_UQI, hard_float), +@@ -183,6 +791,1481 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + DIRECT_NO_TARGET_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default), + DIRECT_NO_TARGET_BUILTIN (syscall, LARCH_VOID_FTYPE_USI, default), + DIRECT_NO_TARGET_BUILTIN (break, LARCH_VOID_FTYPE_USI, default), ++ ++ /* Built-in functions for LSX. */ ++ LSX_BUILTIN (vsll_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsll_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsll_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsll_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslli_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vslli_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vslli_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vslli_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsra_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsra_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsra_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsra_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrai_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrai_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrai_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrai_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrar_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrar_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrar_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrar_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrari_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrari_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrari_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrari_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrl_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrl_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrl_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrl_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrli_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrli_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrli_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrli_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrlr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrlr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrlr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrlr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrlri_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrlri_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrlri_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrlri_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vbitclr_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitclr_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitclr_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitclr_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitclri_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitclri_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitclri_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitclri_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vbitset_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitset_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitset_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitset_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitseti_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitseti_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitseti_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitseti_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vbitrev_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitrev_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitrev_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitrev_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitrevi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitrevi_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitrevi_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitrevi_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddi_bu, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vaddi_hu, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vaddi_wu, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vaddi_du, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubi_bu, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsubi_hu, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsubi_wu, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsubi_du, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vmax_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmax_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmax_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmax_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmaxi_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vmaxi_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vmaxi_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vmaxi_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vmax_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmax_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmax_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmax_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaxi_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vmaxi_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vmaxi_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vmaxi_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vmin_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmin_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmin_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmin_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmini_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vmini_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vmini_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vmini_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vmin_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmin_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmin_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmin_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmini_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vmini_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vmini_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vmini_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vseq_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vseq_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vseq_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vseq_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vseqi_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vseqi_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vseqi_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vseqi_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vslti_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vslt_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vslt_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vslt_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vslt_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslti_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vslti_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vslti_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vslt_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vslt_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vslt_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vslt_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vslti_bu, LARCH_V16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vslti_hu, LARCH_V8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vslti_wu, LARCH_V4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vslti_du, LARCH_V2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vsle_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsle_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsle_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsle_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslei_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vslei_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vslei_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vslei_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vsle_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsle_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsle_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsle_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vslei_bu, LARCH_V16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vslei_hu, LARCH_V8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vslei_wu, LARCH_V4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vslei_du, LARCH_V2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vsat_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsat_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsat_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsat_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsat_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vsat_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vsat_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vsat_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vadda_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vadda_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vadda_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vadda_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsadd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsadd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsadd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsadd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vavg_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vavg_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vavg_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vavg_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vavg_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vavg_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vavg_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vavg_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vavgr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vavgr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vavgr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vavgr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vavgr_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vavgr_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vavgr_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vavgr_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vssub_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vssub_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssub_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssub_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssub_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vssub_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssub_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssub_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vabsd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vabsd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vabsd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vabsd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vabsd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vabsd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vabsd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vabsd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmul_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmul_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmul_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmul_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vmadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vmadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vmadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vmsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vmsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vmsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vdiv_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vdiv_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vdiv_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vdiv_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vdiv_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vdiv_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vdiv_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vdiv_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vhaddw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vhaddw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vhaddw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vhaddw_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vhaddw_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vhaddw_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vhsubw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vhsubw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vhsubw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vhsubw_hu_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vhsubw_wu_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vhsubw_du_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmod_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmod_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmod_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmod_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vreplve_b, LARCH_V16QI_FTYPE_V16QI_SI), ++ LSX_BUILTIN (vreplve_h, LARCH_V8HI_FTYPE_V8HI_SI), ++ LSX_BUILTIN (vreplve_w, LARCH_V4SI_FTYPE_V4SI_SI), ++ LSX_BUILTIN (vreplve_d, LARCH_V2DI_FTYPE_V2DI_SI), ++ LSX_BUILTIN (vreplvei_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vreplvei_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vreplvei_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vreplvei_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vpickev_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpickev_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpickev_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpickev_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpickod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpickod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpickod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpickod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vilvh_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vilvh_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vilvh_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vilvh_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vilvl_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vilvl_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vilvl_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vilvl_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpackev_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpackev_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpackev_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpackev_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpackod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpackod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpackod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpackod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vshuf_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vshuf_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vshuf_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vand_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vandi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vnor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vnori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vxor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vxori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitsel_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitseli_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_USI), ++ LSX_BUILTIN (vshuf4i_b, LARCH_V16QI_FTYPE_V16QI_USI), ++ LSX_BUILTIN (vshuf4i_h, LARCH_V8HI_FTYPE_V8HI_USI), ++ LSX_BUILTIN (vshuf4i_w, LARCH_V4SI_FTYPE_V4SI_USI), ++ LSX_BUILTIN (vreplgr2vr_b, LARCH_V16QI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_h, LARCH_V8HI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_w, LARCH_V4SI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_d, LARCH_V2DI_FTYPE_DI), ++ LSX_BUILTIN (vpcnt_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vpcnt_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vpcnt_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vpcnt_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vclo_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vclo_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vclo_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vclo_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vclz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vclz_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vclz_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vclz_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vpickve2gr_b, LARCH_SI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vpickve2gr_h, LARCH_SI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vpickve2gr_w, LARCH_SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vpickve2gr_d, LARCH_DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vpickve2gr_bu, LARCH_USI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vpickve2gr_hu, LARCH_USI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vpickve2gr_wu, LARCH_USI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vpickve2gr_du, LARCH_UDI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vinsgr2vr_b, LARCH_V16QI_FTYPE_V16QI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_h, LARCH_V8HI_FTYPE_V8HI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_w, LARCH_V4SI_FTYPE_V4SI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_d, LARCH_V2DI_FTYPE_V2DI_DI_UQI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_b, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_h, LARCH_SI_FTYPE_UV8HI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_w, LARCH_SI_FTYPE_UV4SI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_d, LARCH_SI_FTYPE_UV2DI), ++ LSX_BUILTIN_TEST_BRANCH (bz_b, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bz_h, LARCH_SI_FTYPE_UV8HI), ++ LSX_BUILTIN_TEST_BRANCH (bz_w, LARCH_SI_FTYPE_UV4SI), ++ LSX_BUILTIN_TEST_BRANCH (bz_d, LARCH_SI_FTYPE_UV2DI), ++ LSX_BUILTIN_TEST_BRANCH (bz_v, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_v, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN (vrepli_b, LARCH_V16QI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_h, LARCH_V8HI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_w, LARCH_V4SI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_d, LARCH_V2DI_FTYPE_HI), ++ LSX_BUILTIN (vfcmp_caf_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_caf_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cor_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cor_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cun_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cun_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cune_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cune_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_ceq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_ceq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cne_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cne_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_clt_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_clt_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cult_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cult_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cle_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cle_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cule_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cule_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_saf_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_saf_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sor_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sor_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sun_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sun_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sune_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sune_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_seq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_seq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sne_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sne_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_slt_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_slt_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sult_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sult_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sle_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sle_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sule_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sule_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmul_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmul_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfdiv_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfdiv_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcvt_h_s, LARCH_V8HI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcvt_s_d, LARCH_V4SF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmin_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmin_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmina_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmina_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmax_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmax_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmaxa_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmaxa_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfclass_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vfclass_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vfsqrt_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfsqrt_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrecip_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrecip_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrint_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrint_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrsqrt_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrsqrt_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vflogb_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vflogb_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfcvth_s_h, LARCH_V4SF_FTYPE_V8HI), ++ LSX_BUILTIN (vfcvth_d_s, LARCH_V2DF_FTYPE_V4SF), ++ LSX_BUILTIN (vfcvtl_s_h, LARCH_V4SF_FTYPE_V8HI), ++ LSX_BUILTIN (vfcvtl_d_s, LARCH_V2DF_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftint_wu_s, LARCH_UV4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_lu_d, LARCH_UV2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrz_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrz_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrz_wu_s, LARCH_UV4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrz_lu_d, LARCH_UV2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vffint_s_w, LARCH_V4SF_FTYPE_V4SI), ++ LSX_BUILTIN (vffint_d_l, LARCH_V2DF_FTYPE_V2DI), ++ LSX_BUILTIN (vffint_s_wu, LARCH_V4SF_FTYPE_UV4SI), ++ LSX_BUILTIN (vffint_d_lu, LARCH_V2DF_FTYPE_UV2DI), ++ ++ LSX_BUILTIN (vandn_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vneg_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vneg_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vneg_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vneg_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vmuh_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmuh_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmuh_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmuh_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmuh_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmuh_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmuh_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmuh_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsllwil_h_b, LARCH_V8HI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsllwil_w_h, LARCH_V4SI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsllwil_d_w, LARCH_V2DI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsllwil_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vsllwil_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vsllwil_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vsran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssran_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssran_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssran_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrarn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrarn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrarn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrln_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrln_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrln_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrlrn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrlrn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrlrn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vfrstpi_b, LARCH_V16QI_FTYPE_V16QI_V16QI_UQI), ++ LSX_BUILTIN (vfrstpi_h, LARCH_V8HI_FTYPE_V8HI_V8HI_UQI), ++ LSX_BUILTIN (vfrstp_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vfrstp_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vshuf4i_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vbsrl_v, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vbsll_v, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vextrins_b, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vextrins_h, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vextrins_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vextrins_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vmskltz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vmskltz_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vmskltz_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vmskltz_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vsigncov_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsigncov_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsigncov_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsigncov_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vfmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfnmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfnmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfnmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfnmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vftintrne_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrne_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrp_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrp_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrm_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrm_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftint_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vffint_s_l, LARCH_V4SF_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vftintrz_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrp_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrm_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrne_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftinth_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vffinth_d_w, LARCH_V2DF_FTYPE_V4SI), ++ LSX_BUILTIN (vffintl_d_w, LARCH_V2DF_FTYPE_V4SI), ++ LSX_BUILTIN (vftintrzl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrzh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrpl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrph_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrml_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrmh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrnel_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrneh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrne_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrne_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrz_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrz_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrp_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrp_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrm_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrm_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_NO_TARGET_BUILTIN (vstelm_b, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_h, LARCH_VOID_FTYPE_V8HI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_w, LARCH_VOID_FTYPE_V4SI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_d, LARCH_VOID_FTYPE_V2DI_CVPOINTER_SI_UQI), ++ LSX_BUILTIN (vaddwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vaddwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vaddwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vaddwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vaddwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vaddwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vaddwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vaddwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vaddwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vaddwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vaddwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vaddwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vaddwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vaddwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vaddwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vaddwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vsubwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsubwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsubwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsubwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsubwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsubwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsubwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsubwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsubwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsubwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsubwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsubwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vaddwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsubwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsubwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vaddwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vaddwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ ++ LSX_BUILTIN (vmulwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmulwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmulwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmulwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmulwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmulwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmulwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmulwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmulwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmulwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmulwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmulwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmulwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vmulwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vmulwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vmulwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vmulwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vmulwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vmulwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmulwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmulwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmulwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmulwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vmulwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vhaddw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vhaddw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vhsubw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vhsubw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwev_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI), ++ LSX_BUILTIN (vmaddwev_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI), ++ LSX_BUILTIN (vmaddwev_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI), ++ LSX_BUILTIN (vmaddwev_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI), ++ LSX_BUILTIN (vmaddwev_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI), ++ LSX_BUILTIN (vmaddwev_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI), ++ LSX_BUILTIN (vmaddwod_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI), ++ LSX_BUILTIN (vmaddwod_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI), ++ LSX_BUILTIN (vmaddwod_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI), ++ LSX_BUILTIN (vmaddwod_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI), ++ LSX_BUILTIN (vmaddwod_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI), ++ LSX_BUILTIN (vmaddwod_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI), ++ LSX_BUILTIN (vmaddwev_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI), ++ LSX_BUILTIN (vmaddwev_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI), ++ LSX_BUILTIN (vmaddwev_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI), ++ LSX_BUILTIN (vmaddwod_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI), ++ LSX_BUILTIN (vmaddwod_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI), ++ LSX_BUILTIN (vmaddwod_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI), ++ LSX_BUILTIN (vmaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmaddwev_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwod_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwev_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI), ++ LSX_BUILTIN (vmaddwod_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI), ++ LSX_BUILTIN (vrotr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vrotr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vrotr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vrotr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vadd_q, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsub_q, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vldrepl_b, LARCH_V16QI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_h, LARCH_V8HI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_w, LARCH_V4SI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_d, LARCH_V2DI_FTYPE_CVPOINTER_SI), ++ ++ LSX_BUILTIN (vmskgez_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vmsknz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vexth_h_b, LARCH_V8HI_FTYPE_V16QI), ++ LSX_BUILTIN (vexth_w_h, LARCH_V4SI_FTYPE_V8HI), ++ LSX_BUILTIN (vexth_d_w, LARCH_V2DI_FTYPE_V4SI), ++ LSX_BUILTIN (vexth_q_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vexth_hu_bu, LARCH_UV8HI_FTYPE_UV16QI), ++ LSX_BUILTIN (vexth_wu_hu, LARCH_UV4SI_FTYPE_UV8HI), ++ LSX_BUILTIN (vexth_du_wu, LARCH_UV2DI_FTYPE_UV4SI), ++ LSX_BUILTIN (vexth_qu_du, LARCH_UV2DI_FTYPE_UV2DI), ++ LSX_BUILTIN (vrotri_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vrotri_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vrotri_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vrotri_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vextl_q_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vsrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vsrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlrni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlrni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlrni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlrni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vsrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vsrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrani_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrani_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrani_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrani_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vssrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrarni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrarni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrarni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrarni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vpermi_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vld, LARCH_V16QI_FTYPE_CVPOINTER_SI), ++ LSX_NO_TARGET_BUILTIN (vst, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI), ++ LSX_BUILTIN (vssrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vorn_v, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vldi, LARCH_V2DI_FTYPE_HI), ++ LSX_BUILTIN (vshuf_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vldx, LARCH_V16QI_FTYPE_CVPOINTER_DI), ++ LSX_NO_TARGET_BUILTIN (vstx, LARCH_VOID_FTYPE_V16QI_CVPOINTER_DI), ++ LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI), ++ ++ /* Built-in functions for LASX */ ++ LASX_BUILTIN (xvsll_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsll_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsll_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsll_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslli_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvslli_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvslli_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvslli_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsra_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsra_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsra_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsra_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrai_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrai_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrai_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrai_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrar_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrar_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrar_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrar_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrari_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrari_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrari_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrari_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrl_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrl_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrl_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrl_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrli_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrli_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrli_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrli_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrlr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrlr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrlr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrlr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrlri_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrlri_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrlri_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrlri_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvbitclr_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitclr_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitclr_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitclr_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitclri_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitclri_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitclri_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitclri_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvbitset_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitset_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitset_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitset_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitseti_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitseti_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitseti_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitseti_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvbitrev_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitrev_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitrev_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitrev_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitrevi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitrevi_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitrevi_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitrevi_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddi_bu, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvaddi_hu, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvaddi_wu, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvaddi_du, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubi_bu, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsubi_hu, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsubi_wu, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsubi_du, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvmax_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmax_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmax_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmax_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmaxi_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvmaxi_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvmaxi_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvmaxi_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvmax_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmax_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmax_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmax_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaxi_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvmaxi_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvmaxi_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvmaxi_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvmin_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmin_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmin_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmin_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmini_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvmini_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvmini_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvmini_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvmin_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmin_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmin_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmin_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmini_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvmini_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvmini_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvmini_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvseq_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvseq_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvseq_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvseq_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvseqi_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvseqi_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvseqi_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvseqi_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvslt_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvslt_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvslt_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvslt_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslti_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvslti_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvslti_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvslti_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvslt_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvslt_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvslt_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvslt_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvslti_bu, LARCH_V32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvslti_hu, LARCH_V16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvslti_wu, LARCH_V8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvslti_du, LARCH_V4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvsle_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsle_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsle_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsle_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslei_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvslei_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvslei_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvslei_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvsle_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsle_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsle_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsle_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvslei_bu, LARCH_V32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvslei_hu, LARCH_V16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvslei_wu, LARCH_V8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvslei_du, LARCH_V4DI_FTYPE_UV4DI_UQI), ++ ++ LASX_BUILTIN (xvsat_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsat_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsat_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsat_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsat_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvsat_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvsat_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvsat_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ ++ LASX_BUILTIN (xvadda_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvadda_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvadda_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvadda_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsadd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsadd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsadd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsadd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvavg_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvavg_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvavg_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvavg_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvavg_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvavg_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvavg_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvavg_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvavgr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvavgr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvavgr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvavgr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvavgr_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvavgr_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvavgr_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvavgr_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvssub_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvssub_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssub_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssub_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssub_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvssub_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssub_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssub_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvabsd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvabsd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvabsd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvabsd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvabsd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvabsd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvabsd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvabsd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvmul_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmul_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmul_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmul_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvmadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvmadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvmadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvmsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvmsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvmsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvdiv_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvdiv_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvdiv_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvdiv_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvdiv_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvdiv_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvdiv_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvdiv_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvhaddw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvhaddw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvhaddw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvhaddw_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvhaddw_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvhaddw_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvhsubw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvhsubw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvhsubw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvhsubw_hu_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvhsubw_wu_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvhsubw_du_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmod_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmod_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmod_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmod_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvrepl128vei_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvrepl128vei_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvrepl128vei_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvrepl128vei_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickev_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpickev_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpickev_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpickev_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpickod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpickod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpickod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpickod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvilvh_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvilvh_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvilvh_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvilvh_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvilvl_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvilvl_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvilvl_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvilvl_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpackev_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpackev_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpackev_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpackev_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpackod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpackod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpackod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpackod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvshuf_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvshuf_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvshuf_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvshuf_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvand_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvandi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvnor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvnori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvxor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvxori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitsel_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitseli_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_USI), ++ ++ LASX_BUILTIN (xvshuf4i_b, LARCH_V32QI_FTYPE_V32QI_USI), ++ LASX_BUILTIN (xvshuf4i_h, LARCH_V16HI_FTYPE_V16HI_USI), ++ LASX_BUILTIN (xvshuf4i_w, LARCH_V8SI_FTYPE_V8SI_USI), ++ ++ LASX_BUILTIN (xvreplgr2vr_b, LARCH_V32QI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_h, LARCH_V16HI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_w, LARCH_V8SI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_d, LARCH_V4DI_FTYPE_DI), ++ LASX_BUILTIN (xvpcnt_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvpcnt_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvpcnt_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvpcnt_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvclo_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvclo_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvclo_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvclo_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvclz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvclz_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvclz_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvclz_d, LARCH_V4DI_FTYPE_V4DI), ++ ++ LASX_BUILTIN (xvrepli_b, LARCH_V32QI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_h, LARCH_V16HI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_w, LARCH_V8SI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_d, LARCH_V4DI_FTYPE_HI), ++ LASX_BUILTIN (xvfcmp_caf_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_caf_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cor_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cor_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cun_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cun_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cune_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cune_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_ceq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_ceq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cne_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cne_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_clt_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_clt_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cult_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cult_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cle_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cle_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cule_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cule_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_saf_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_saf_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sor_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sor_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sun_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sun_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sune_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sune_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_seq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_seq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sne_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sne_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_slt_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_slt_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sult_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sult_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sle_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sle_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sule_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sule_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmul_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmul_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfdiv_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfdiv_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcvt_h_s, LARCH_V16HI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcvt_s_d, LARCH_V8SF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmin_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmin_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmina_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmina_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmax_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmax_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmaxa_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmaxa_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfclass_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfclass_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvfsqrt_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfsqrt_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrecip_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrecip_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrint_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrint_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrsqrt_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrsqrt_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvflogb_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvflogb_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfcvth_s_h, LARCH_V8SF_FTYPE_V16HI), ++ LASX_BUILTIN (xvfcvth_d_s, LARCH_V4DF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfcvtl_s_h, LARCH_V8SF_FTYPE_V16HI), ++ LASX_BUILTIN (xvfcvtl_d_s, LARCH_V4DF_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftint_wu_s, LARCH_UV8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_lu_d, LARCH_UV4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrz_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrz_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrz_wu_s, LARCH_UV8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrz_lu_d, LARCH_UV4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvffint_s_w, LARCH_V8SF_FTYPE_V8SI), ++ LASX_BUILTIN (xvffint_d_l, LARCH_V4DF_FTYPE_V4DI), ++ LASX_BUILTIN (xvffint_s_wu, LARCH_V8SF_FTYPE_UV8SI), ++ LASX_BUILTIN (xvffint_d_lu, LARCH_V4DF_FTYPE_UV4DI), ++ ++ LASX_BUILTIN (xvreplve_b, LARCH_V32QI_FTYPE_V32QI_SI), ++ LASX_BUILTIN (xvreplve_h, LARCH_V16HI_FTYPE_V16HI_SI), ++ LASX_BUILTIN (xvreplve_w, LARCH_V8SI_FTYPE_V8SI_SI), ++ LASX_BUILTIN (xvreplve_d, LARCH_V4DI_FTYPE_V4DI_SI), ++ LASX_BUILTIN (xvpermi_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ ++ LASX_BUILTIN (xvandn_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvneg_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvneg_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvneg_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvneg_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvmuh_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmuh_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmuh_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmuh_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmuh_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmuh_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmuh_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmuh_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsllwil_h_b, LARCH_V16HI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsllwil_w_h, LARCH_V8SI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsllwil_d_w, LARCH_V4DI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsllwil_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UQI), /* FIXME: U? */ ++ LASX_BUILTIN (xvsllwil_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvsllwil_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvsran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssran_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssran_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssran_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrarn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrarn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrarn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrln_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrln_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrln_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrlrn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrlrn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrlrn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvfrstpi_b, LARCH_V32QI_FTYPE_V32QI_V32QI_UQI), ++ LASX_BUILTIN (xvfrstpi_h, LARCH_V16HI_FTYPE_V16HI_V16HI_UQI), ++ LASX_BUILTIN (xvfrstp_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvfrstp_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvshuf4i_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvbsrl_v, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvbsll_v, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvextrins_b, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvextrins_h, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvextrins_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvextrins_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvmskltz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvmskltz_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvmskltz_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvmskltz_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvsigncov_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsigncov_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsigncov_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsigncov_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvfmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfnmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfnmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfnmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfnmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrne_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrne_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrp_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrp_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrm_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrm_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftint_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvffint_s_l, LARCH_V8SF_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvftintrz_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrp_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrm_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrne_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftinth_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvffinth_d_w, LARCH_V4DF_FTYPE_V8SI), ++ LASX_BUILTIN (xvffintl_d_w, LARCH_V4DF_FTYPE_V8SI), ++ LASX_BUILTIN (xvftintrzh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrzl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrph_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrpl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrmh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrml_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrneh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrnel_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrne_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrne_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrz_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrz_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrp_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrp_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrm_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrm_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvld, LARCH_V32QI_FTYPE_CVPOINTER_SI), ++ LASX_NO_TARGET_BUILTIN (xvst, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_b, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_h, LARCH_VOID_FTYPE_V16HI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_w, LARCH_VOID_FTYPE_V8SI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_d, LARCH_VOID_FTYPE_V4DI_CVPOINTER_SI_UQI), ++ LASX_BUILTIN (xvinsve0_w, LARCH_V8SI_FTYPE_V8SI_V8SI_UQI), ++ LASX_BUILTIN (xvinsve0_d, LARCH_V4DI_FTYPE_V4DI_V4DI_UQI), ++ LASX_BUILTIN (xvpickve_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickve_w_f, LARCH_V8SF_FTYPE_V8SF_UQI), ++ LASX_BUILTIN (xvpickve_d_f, LARCH_V4DF_FTYPE_V4DF_UQI), ++ LASX_BUILTIN (xvssrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvorn_v, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvldi, LARCH_V4DI_FTYPE_HI), ++ LASX_BUILTIN (xvldx, LARCH_V32QI_FTYPE_CVPOINTER_DI), ++ LASX_NO_TARGET_BUILTIN (xvstx, LARCH_VOID_FTYPE_V32QI_CVPOINTER_DI), ++ LASX_BUILTIN (xvextl_qu_du, LARCH_UV4DI_FTYPE_UV4DI), ++ ++ /* LASX */ ++ LASX_BUILTIN (xvinsgr2vr_w, LARCH_V8SI_FTYPE_V8SI_SI_UQI), ++ LASX_BUILTIN (xvinsgr2vr_d, LARCH_V4DI_FTYPE_V4DI_DI_UQI), ++ ++ LASX_BUILTIN (xvreplve0_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvreplve0_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvreplve0_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvreplve0_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvreplve0_q, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_h_b, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_w_h, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_d_w, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (vext2xv_w_b, LARCH_V8SI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_d_h, LARCH_V4DI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_d_b, LARCH_V4DI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_hu_bu, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_wu_hu, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_du_wu, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (vext2xv_wu_bu, LARCH_V8SI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_du_hu, LARCH_V4DI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_du_bu, LARCH_V4DI_FTYPE_V32QI), ++ LASX_BUILTIN (xvpermi_q, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvpermi_d, LARCH_V4DI_FTYPE_V4DI_USI), ++ LASX_BUILTIN (xvperm_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_b, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_h, LARCH_SI_FTYPE_UV16HI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_w, LARCH_SI_FTYPE_UV8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_d, LARCH_SI_FTYPE_UV4DI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_b, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_h, LARCH_SI_FTYPE_UV16HI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_w, LARCH_SI_FTYPE_UV8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_d, LARCH_SI_FTYPE_UV4DI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_v, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_v, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN (xvldrepl_b, LARCH_V32QI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_h, LARCH_V16HI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_w, LARCH_V8SI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_d, LARCH_V4DI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvpickve2gr_w, LARCH_SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve2gr_wu, LARCH_USI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve2gr_d, LARCH_DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickve2gr_du, LARCH_UDI_FTYPE_V4DI_UQI), ++ ++ LASX_BUILTIN (xvaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvaddwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvaddwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvaddwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvaddwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvaddwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvaddwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsubwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsubwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsubwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsubwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsubwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsubwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsubwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmulwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmulwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmulwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmulwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmulwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmulwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmulwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmulwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvaddwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvaddwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvaddwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvaddwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvaddwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvaddwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsubwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsubwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsubwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsubwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsubwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsubwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsubwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmulwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmulwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmulwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmulwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmulwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmulwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmulwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmulwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvaddwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvaddwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvaddwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvmulwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvmulwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvmulwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvaddwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvaddwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvaddwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvmulwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvmulwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvmulwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvhaddw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvhaddw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvhsubw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvhsubw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmaddwev_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI), ++ LASX_BUILTIN (xvmaddwev_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI), ++ LASX_BUILTIN (xvmaddwev_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI), ++ LASX_BUILTIN (xvmaddwev_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwev_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmaddwev_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmaddwev_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmaddwod_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI), ++ LASX_BUILTIN (xvmaddwod_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI), ++ LASX_BUILTIN (xvmaddwod_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI), ++ LASX_BUILTIN (xvmaddwod_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwod_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmaddwod_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmaddwod_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmaddwev_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI), ++ LASX_BUILTIN (xvmaddwev_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI), ++ LASX_BUILTIN (xvmaddwev_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI), ++ LASX_BUILTIN (xvmaddwev_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI), ++ LASX_BUILTIN (xvmaddwod_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI), ++ LASX_BUILTIN (xvmaddwod_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI), ++ LASX_BUILTIN (xvmaddwod_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI), ++ LASX_BUILTIN (xvmaddwod_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI), ++ LASX_BUILTIN (xvrotr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvrotr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvrotr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvrotr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvadd_q, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsub_q, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvaddwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmulwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmulwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmskgez_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvmsknz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvexth_h_b, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (xvexth_w_h, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (xvexth_d_w, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (xvexth_q_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvexth_hu_bu, LARCH_UV16HI_FTYPE_UV32QI), ++ LASX_BUILTIN (xvexth_wu_hu, LARCH_UV8SI_FTYPE_UV16HI), ++ LASX_BUILTIN (xvexth_du_wu, LARCH_UV4DI_FTYPE_UV8SI), ++ LASX_BUILTIN (xvexth_qu_du, LARCH_UV4DI_FTYPE_UV4DI), ++ LASX_BUILTIN (xvrotri_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvrotri_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvrotri_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvrotri_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvextl_q_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvsrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlrni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlrni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlrni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlrni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrani_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrani_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrani_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrani_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrarni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrarni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrarni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrarni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI) + }; + + /* Index I is the function declaration for loongarch_builtins[I], or null if +@@ -192,11 +2275,46 @@ static GTY (()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)]; + using the instruction code or return null if not defined for the target. */ + static GTY (()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; + ++ ++/* MODE is a vector mode whose elements have type TYPE. Return the type ++ of the vector itself. */ ++ ++static tree ++loongarch_builtin_vector_type (tree type, machine_mode mode) ++{ ++ static tree types[2 * (int) MAX_MACHINE_MODE]; ++ int mode_index; ++ ++ mode_index = (int) mode; ++ ++ if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)) ++ mode_index += MAX_MACHINE_MODE; ++ ++ if (types[mode_index] == NULL_TREE) ++ types[mode_index] = build_vector_type_for_mode (type, mode); ++ return types[mode_index]; ++} ++ ++/* Return a type for 'const volatile void *'. */ ++ ++static tree ++loongarch_build_cvpointer_type (void) ++{ ++ static tree cache; ++ ++ if (cache == NULL_TREE) ++ cache = build_pointer_type (build_qualified_type (void_type_node, ++ TYPE_QUAL_CONST ++ | TYPE_QUAL_VOLATILE)); ++ return cache; ++} ++ + /* Source-level argument types. */ + #define LARCH_ATYPE_VOID void_type_node + #define LARCH_ATYPE_INT integer_type_node + #define LARCH_ATYPE_POINTER ptr_type_node +- ++#define LARCH_ATYPE_CVPOINTER loongarch_build_cvpointer_type () ++#define LARCH_ATYPE_BOOLEAN boolean_type_node + /* Standard mode-based argument types. */ + #define LARCH_ATYPE_QI intQI_type_node + #define LARCH_ATYPE_UQI unsigned_intQI_type_node +@@ -209,6 +2327,72 @@ static GTY (()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; + #define LARCH_ATYPE_SF float_type_node + #define LARCH_ATYPE_DF double_type_node + ++/* Vector argument types. */ ++#define LARCH_ATYPE_V2SF \ ++ loongarch_builtin_vector_type (float_type_node, V2SFmode) ++#define LARCH_ATYPE_V2HI \ ++ loongarch_builtin_vector_type (intHI_type_node, V2HImode) ++#define LARCH_ATYPE_V2SI \ ++ loongarch_builtin_vector_type (intSI_type_node, V2SImode) ++#define LARCH_ATYPE_V4QI \ ++ loongarch_builtin_vector_type (intQI_type_node, V4QImode) ++#define LARCH_ATYPE_V4HI \ ++ loongarch_builtin_vector_type (intHI_type_node, V4HImode) ++#define LARCH_ATYPE_V8QI \ ++ loongarch_builtin_vector_type (intQI_type_node, V8QImode) ++ ++#define LARCH_ATYPE_V2DI \ ++ loongarch_builtin_vector_type (long_long_integer_type_node, V2DImode) ++#define LARCH_ATYPE_V4SI \ ++ loongarch_builtin_vector_type (intSI_type_node, V4SImode) ++#define LARCH_ATYPE_V8HI \ ++ loongarch_builtin_vector_type (intHI_type_node, V8HImode) ++#define LARCH_ATYPE_V16QI \ ++ loongarch_builtin_vector_type (intQI_type_node, V16QImode) ++#define LARCH_ATYPE_V2DF \ ++ loongarch_builtin_vector_type (double_type_node, V2DFmode) ++#define LARCH_ATYPE_V4SF \ ++ loongarch_builtin_vector_type (float_type_node, V4SFmode) ++ ++/* LoongArch ASX. */ ++#define LARCH_ATYPE_V4DI \ ++ loongarch_builtin_vector_type (long_long_integer_type_node, V4DImode) ++#define LARCH_ATYPE_V8SI \ ++ loongarch_builtin_vector_type (intSI_type_node, V8SImode) ++#define LARCH_ATYPE_V16HI \ ++ loongarch_builtin_vector_type (intHI_type_node, V16HImode) ++#define LARCH_ATYPE_V32QI \ ++ loongarch_builtin_vector_type (intQI_type_node, V32QImode) ++#define LARCH_ATYPE_V4DF \ ++ loongarch_builtin_vector_type (double_type_node, V4DFmode) ++#define LARCH_ATYPE_V8SF \ ++ loongarch_builtin_vector_type (float_type_node, V8SFmode) ++ ++#define LARCH_ATYPE_UV2DI \ ++ loongarch_builtin_vector_type (long_long_unsigned_type_node, V2DImode) ++#define LARCH_ATYPE_UV4SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V4SImode) ++#define LARCH_ATYPE_UV8HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V8HImode) ++#define LARCH_ATYPE_UV16QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V16QImode) ++ ++#define LARCH_ATYPE_UV4DI \ ++ loongarch_builtin_vector_type (long_long_unsigned_type_node, V4DImode) ++#define LARCH_ATYPE_UV8SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V8SImode) ++#define LARCH_ATYPE_UV16HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V16HImode) ++#define LARCH_ATYPE_UV32QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V32QImode) ++ ++#define LARCH_ATYPE_UV2SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V2SImode) ++#define LARCH_ATYPE_UV4HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V4HImode) ++#define LARCH_ATYPE_UV8QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V8QImode) ++ + /* LARCH_FTYPE_ATYPESN takes N LARCH_FTYPES-like type codes and lists + their associated LARCH_ATYPEs. */ + #define LARCH_FTYPE_ATYPES1(A, B) LARCH_ATYPE_##A, LARCH_ATYPE_##B +@@ -282,6 +2466,108 @@ loongarch_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED) + return loongarch_builtin_decls[code]; + } + ++/* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION. */ ++ ++tree ++loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, ++ tree type_in) ++{ ++ machine_mode in_mode, out_mode; ++ int in_n, out_n; ++ ++ if (TREE_CODE (type_out) != VECTOR_TYPE ++ || TREE_CODE (type_in) != VECTOR_TYPE ++ || !ISA_HAS_LSX) ++ return NULL_TREE; ++ ++ out_mode = TYPE_MODE (TREE_TYPE (type_out)); ++ out_n = TYPE_VECTOR_SUBPARTS (type_out); ++ in_mode = TYPE_MODE (TREE_TYPE (type_in)); ++ in_n = TYPE_VECTOR_SUBPARTS (type_in); ++ ++ /* INSN is the name of the associated instruction pattern, without ++ the leading CODE_FOR_. */ ++#define LARCH_GET_BUILTIN(INSN) \ ++ loongarch_builtin_decls[loongarch_get_builtin_decl_index[CODE_FOR_##INSN]] ++ ++ switch (fn) ++ { ++ CASE_CFN_CEIL: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrp_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrp_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrp_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrp_s); ++ } ++ break; ++ ++ CASE_CFN_TRUNC: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrz_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrz_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrz_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrz_s); ++ } ++ break; ++ ++ CASE_CFN_RINT: ++ CASE_CFN_ROUND: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrint_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrint_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrint_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrint_s); ++ } ++ break; ++ ++ CASE_CFN_FLOOR: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrm_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrm_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrm_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrm_s); ++ } ++ break; ++ ++ default: ++ break; ++ } ++ ++ return NULL_TREE; ++} ++ + /* Take argument ARGNO from EXP's argument list and convert it into + an expand operand. Store the operand in *OP. */ + +@@ -297,6 +2583,15 @@ loongarch_prepare_builtin_arg (struct expand_operand *op, tree exp, + create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg))); + } + ++/* Return a const_int vector of VAL with mode MODE. */ ++ ++rtx ++loongarch_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val) ++{ ++ rtx c = gen_int_mode (val, GET_MODE_INNER (mode)); ++ return gen_const_vec_duplicate (mode, c); ++} ++ + /* Expand instruction ICODE as part of a built-in function sequence. + Use the first NOPS elements of OPS as the instruction's operands. + HAS_TARGET_P is true if operand 0 is a target; it is false if the +@@ -308,7 +2603,340 @@ static rtx + loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + struct expand_operand *ops, bool has_target_p) + { +- if (!maybe_expand_insn (icode, nops, ops)) ++ machine_mode imode; ++ int rangelo = 0, rangehi = 0, error_opno = 0; ++ ++ switch (icode) ++ { ++ case CODE_FOR_lsx_vaddi_bu: ++ case CODE_FOR_lsx_vaddi_hu: ++ case CODE_FOR_lsx_vaddi_wu: ++ case CODE_FOR_lsx_vaddi_du: ++ case CODE_FOR_lsx_vslti_bu: ++ case CODE_FOR_lsx_vslti_hu: ++ case CODE_FOR_lsx_vslti_wu: ++ case CODE_FOR_lsx_vslti_du: ++ case CODE_FOR_lsx_vslei_bu: ++ case CODE_FOR_lsx_vslei_hu: ++ case CODE_FOR_lsx_vslei_wu: ++ case CODE_FOR_lsx_vslei_du: ++ case CODE_FOR_lsx_vmaxi_bu: ++ case CODE_FOR_lsx_vmaxi_hu: ++ case CODE_FOR_lsx_vmaxi_wu: ++ case CODE_FOR_lsx_vmaxi_du: ++ case CODE_FOR_lsx_vmini_bu: ++ case CODE_FOR_lsx_vmini_hu: ++ case CODE_FOR_lsx_vmini_wu: ++ case CODE_FOR_lsx_vmini_du: ++ case CODE_FOR_lsx_vsubi_bu: ++ case CODE_FOR_lsx_vsubi_hu: ++ case CODE_FOR_lsx_vsubi_wu: ++ case CODE_FOR_lsx_vsubi_du: ++ case CODE_FOR_lasx_xvaddi_bu: ++ case CODE_FOR_lasx_xvaddi_hu: ++ case CODE_FOR_lasx_xvaddi_wu: ++ case CODE_FOR_lasx_xvaddi_du: ++ case CODE_FOR_lasx_xvslti_bu: ++ case CODE_FOR_lasx_xvslti_hu: ++ case CODE_FOR_lasx_xvslti_wu: ++ case CODE_FOR_lasx_xvslti_du: ++ case CODE_FOR_lasx_xvslei_bu: ++ case CODE_FOR_lasx_xvslei_hu: ++ case CODE_FOR_lasx_xvslei_wu: ++ case CODE_FOR_lasx_xvslei_du: ++ case CODE_FOR_lasx_xvmaxi_bu: ++ case CODE_FOR_lasx_xvmaxi_hu: ++ case CODE_FOR_lasx_xvmaxi_wu: ++ case CODE_FOR_lasx_xvmaxi_du: ++ case CODE_FOR_lasx_xvmini_bu: ++ case CODE_FOR_lasx_xvmini_hu: ++ case CODE_FOR_lasx_xvmini_wu: ++ case CODE_FOR_lasx_xvmini_du: ++ case CODE_FOR_lasx_xvsubi_bu: ++ case CODE_FOR_lasx_xvsubi_hu: ++ case CODE_FOR_lasx_xvsubi_wu: ++ case CODE_FOR_lasx_xvsubi_du: ++ gcc_assert (has_target_p && nops == 3); ++ /* We only generate a vector of constants iff the second argument ++ is an immediate. We also validate the range of the immediate. */ ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = 0; ++ rangehi = 31; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vseqi_b: ++ case CODE_FOR_lsx_vseqi_h: ++ case CODE_FOR_lsx_vseqi_w: ++ case CODE_FOR_lsx_vseqi_d: ++ case CODE_FOR_lsx_vslti_b: ++ case CODE_FOR_lsx_vslti_h: ++ case CODE_FOR_lsx_vslti_w: ++ case CODE_FOR_lsx_vslti_d: ++ case CODE_FOR_lsx_vslei_b: ++ case CODE_FOR_lsx_vslei_h: ++ case CODE_FOR_lsx_vslei_w: ++ case CODE_FOR_lsx_vslei_d: ++ case CODE_FOR_lsx_vmaxi_b: ++ case CODE_FOR_lsx_vmaxi_h: ++ case CODE_FOR_lsx_vmaxi_w: ++ case CODE_FOR_lsx_vmaxi_d: ++ case CODE_FOR_lsx_vmini_b: ++ case CODE_FOR_lsx_vmini_h: ++ case CODE_FOR_lsx_vmini_w: ++ case CODE_FOR_lsx_vmini_d: ++ case CODE_FOR_lasx_xvseqi_b: ++ case CODE_FOR_lasx_xvseqi_h: ++ case CODE_FOR_lasx_xvseqi_w: ++ case CODE_FOR_lasx_xvseqi_d: ++ case CODE_FOR_lasx_xvslti_b: ++ case CODE_FOR_lasx_xvslti_h: ++ case CODE_FOR_lasx_xvslti_w: ++ case CODE_FOR_lasx_xvslti_d: ++ case CODE_FOR_lasx_xvslei_b: ++ case CODE_FOR_lasx_xvslei_h: ++ case CODE_FOR_lasx_xvslei_w: ++ case CODE_FOR_lasx_xvslei_d: ++ case CODE_FOR_lasx_xvmaxi_b: ++ case CODE_FOR_lasx_xvmaxi_h: ++ case CODE_FOR_lasx_xvmaxi_w: ++ case CODE_FOR_lasx_xvmaxi_d: ++ case CODE_FOR_lasx_xvmini_b: ++ case CODE_FOR_lasx_xvmini_h: ++ case CODE_FOR_lasx_xvmini_w: ++ case CODE_FOR_lasx_xvmini_d: ++ gcc_assert (has_target_p && nops == 3); ++ /* We only generate a vector of constants iff the second argument ++ is an immediate. We also validate the range of the immediate. */ ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = -16; ++ rangehi = 15; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vandi_b: ++ case CODE_FOR_lsx_vori_b: ++ case CODE_FOR_lsx_vnori_b: ++ case CODE_FOR_lsx_vxori_b: ++ case CODE_FOR_lasx_xvandi_b: ++ case CODE_FOR_lasx_xvori_b: ++ case CODE_FOR_lasx_xvnori_b: ++ case CODE_FOR_lasx_xvxori_b: ++ gcc_assert (has_target_p && nops == 3); ++ if (!CONST_INT_P (ops[2].value)) ++ break; ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ break; ++ ++ case CODE_FOR_lsx_vbitseli_b: ++ case CODE_FOR_lasx_xvbitseli_b: ++ gcc_assert (has_target_p && nops == 4); ++ if (!CONST_INT_P (ops[3].value)) ++ break; ++ ops[3].mode = ops[0].mode; ++ ops[3].value = loongarch_gen_const_int_vector (ops[3].mode, ++ INTVAL (ops[3].value)); ++ break; ++ ++ case CODE_FOR_lsx_vreplgr2vr_b: ++ case CODE_FOR_lsx_vreplgr2vr_h: ++ case CODE_FOR_lsx_vreplgr2vr_w: ++ case CODE_FOR_lsx_vreplgr2vr_d: ++ case CODE_FOR_lasx_xvreplgr2vr_b: ++ case CODE_FOR_lasx_xvreplgr2vr_h: ++ case CODE_FOR_lasx_xvreplgr2vr_w: ++ case CODE_FOR_lasx_xvreplgr2vr_d: ++ /* Map the built-ins to vector fill operations. We need fix up the mode ++ for the element being inserted. */ ++ gcc_assert (has_target_p && nops == 2); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ break; ++ ++ case CODE_FOR_lsx_vilvh_b: ++ case CODE_FOR_lsx_vilvh_h: ++ case CODE_FOR_lsx_vilvh_w: ++ case CODE_FOR_lsx_vilvh_d: ++ case CODE_FOR_lsx_vilvl_b: ++ case CODE_FOR_lsx_vilvl_h: ++ case CODE_FOR_lsx_vilvl_w: ++ case CODE_FOR_lsx_vilvl_d: ++ case CODE_FOR_lsx_vpackev_b: ++ case CODE_FOR_lsx_vpackev_h: ++ case CODE_FOR_lsx_vpackev_w: ++ case CODE_FOR_lsx_vpackod_b: ++ case CODE_FOR_lsx_vpackod_h: ++ case CODE_FOR_lsx_vpackod_w: ++ case CODE_FOR_lsx_vpickev_b: ++ case CODE_FOR_lsx_vpickev_h: ++ case CODE_FOR_lsx_vpickev_w: ++ case CODE_FOR_lsx_vpickod_b: ++ case CODE_FOR_lsx_vpickod_h: ++ case CODE_FOR_lsx_vpickod_w: ++ case CODE_FOR_lasx_xvilvh_b: ++ case CODE_FOR_lasx_xvilvh_h: ++ case CODE_FOR_lasx_xvilvh_w: ++ case CODE_FOR_lasx_xvilvh_d: ++ case CODE_FOR_lasx_xvilvl_b: ++ case CODE_FOR_lasx_xvilvl_h: ++ case CODE_FOR_lasx_xvilvl_w: ++ case CODE_FOR_lasx_xvilvl_d: ++ case CODE_FOR_lasx_xvpackev_b: ++ case CODE_FOR_lasx_xvpackev_h: ++ case CODE_FOR_lasx_xvpackev_w: ++ case CODE_FOR_lasx_xvpackod_b: ++ case CODE_FOR_lasx_xvpackod_h: ++ case CODE_FOR_lasx_xvpackod_w: ++ case CODE_FOR_lasx_xvpickev_b: ++ case CODE_FOR_lasx_xvpickev_h: ++ case CODE_FOR_lasx_xvpickev_w: ++ case CODE_FOR_lasx_xvpickod_b: ++ case CODE_FOR_lasx_xvpickod_h: ++ case CODE_FOR_lasx_xvpickod_w: ++ /* Swap the operands 1 and 2 for interleave operations. Built-ins follow ++ convention of ISA, which have op1 as higher component and op2 as lower ++ component. However, the VEC_PERM op in tree and vec_concat in RTL ++ expects first operand to be lower component, because of which this ++ swap is needed for builtins. */ ++ gcc_assert (has_target_p && nops == 3); ++ std::swap (ops[1], ops[2]); ++ break; ++ ++ case CODE_FOR_lsx_vslli_b: ++ case CODE_FOR_lsx_vslli_h: ++ case CODE_FOR_lsx_vslli_w: ++ case CODE_FOR_lsx_vslli_d: ++ case CODE_FOR_lsx_vsrai_b: ++ case CODE_FOR_lsx_vsrai_h: ++ case CODE_FOR_lsx_vsrai_w: ++ case CODE_FOR_lsx_vsrai_d: ++ case CODE_FOR_lsx_vsrli_b: ++ case CODE_FOR_lsx_vsrli_h: ++ case CODE_FOR_lsx_vsrli_w: ++ case CODE_FOR_lsx_vsrli_d: ++ case CODE_FOR_lasx_xvslli_b: ++ case CODE_FOR_lasx_xvslli_h: ++ case CODE_FOR_lasx_xvslli_w: ++ case CODE_FOR_lasx_xvslli_d: ++ case CODE_FOR_lasx_xvsrai_b: ++ case CODE_FOR_lasx_xvsrai_h: ++ case CODE_FOR_lasx_xvsrai_w: ++ case CODE_FOR_lasx_xvsrai_d: ++ case CODE_FOR_lasx_xvsrli_b: ++ case CODE_FOR_lasx_xvsrli_h: ++ case CODE_FOR_lasx_xvsrli_w: ++ case CODE_FOR_lasx_xvsrli_d: ++ gcc_assert (has_target_p && nops == 3); ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = 0; ++ rangehi = GET_MODE_UNIT_BITSIZE (ops[0].mode) - 1; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vinsgr2vr_b: ++ case CODE_FOR_lsx_vinsgr2vr_h: ++ case CODE_FOR_lsx_vinsgr2vr_w: ++ case CODE_FOR_lsx_vinsgr2vr_d: ++ /* Map the built-ins to insert operations. We need to swap operands, ++ fix up the mode for the element being inserted, and generate ++ a bit mask for vec_merge. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ ++ /* Map the built-ins to element insert operations. We need to swap ++ operands and generate a bit mask. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++ std::swap (ops[1], ops[3]); ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ ++ case CODE_FOR_lsx_vshuf4i_b: ++ case CODE_FOR_lsx_vshuf4i_h: ++ case CODE_FOR_lsx_vshuf4i_w: ++ case CODE_FOR_lsx_vshuf4i_w_f: ++ gcc_assert (has_target_p && nops == 3); ++ ops[2].value = loongarch_gen_const_int_vector_shuffle (ops[0].mode, ++ INTVAL (ops[2].value)); ++ break; ++ ++ case CODE_FOR_lasx_xvinsgr2vr_w: ++ case CODE_FOR_lasx_xvinsgr2vr_d: ++ /* Map the built-ins to insert operations. We need to swap operands, ++ fix up the mode for the element being inserted, and generate ++ a bit mask for vec_merge. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ ++ default: ++ break; ++ } ++ ++ if (error_opno != 0) ++ { ++ error ("argument %d to the built-in must be a constant" ++ " in range %d to %d", error_opno, rangelo, rangehi); ++ return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx; ++ } ++ else if (!maybe_expand_insn (icode, nops, ops)) + { + error ("invalid argument to built-in function"); + return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx; +@@ -342,6 +2970,50 @@ loongarch_expand_builtin_direct (enum insn_code icode, rtx target, tree exp, + return loongarch_expand_builtin_insn (icode, opno, ops, has_target_p); + } + ++/* Expand an LSX built-in for a compare and branch instruction specified by ++ ICODE, set a general-purpose register to 1 if the branch was taken, ++ 0 otherwise. */ ++ ++static rtx ++loongarch_expand_builtin_lsx_test_branch (enum insn_code icode, tree exp) ++{ ++ struct expand_operand ops[3]; ++ rtx_insn *cbranch; ++ rtx_code_label *true_label, *done_label; ++ rtx cmp_result; ++ ++ true_label = gen_label_rtx (); ++ done_label = gen_label_rtx (); ++ ++ create_input_operand (&ops[0], true_label, TYPE_MODE (TREE_TYPE (exp))); ++ loongarch_prepare_builtin_arg (&ops[1], exp, 0); ++ create_fixed_operand (&ops[2], const0_rtx); ++ ++ /* Make sure that the operand 1 is a REG. */ ++ if (GET_CODE (ops[1].value) != REG) ++ ops[1].value = force_reg (ops[1].mode, ops[1].value); ++ ++ if ((cbranch = maybe_gen_insn (icode, 3, ops)) == NULL_RTX) ++ error ("failed to expand built-in function"); ++ ++ cmp_result = gen_reg_rtx (SImode); ++ ++ /* First assume that CMP_RESULT is false. */ ++ loongarch_emit_move (cmp_result, const0_rtx); ++ ++ /* Branch to TRUE_LABEL if CBRANCH is taken and DONE_LABEL otherwise. */ ++ emit_jump_insn (cbranch); ++ emit_jump_insn (gen_jump (done_label)); ++ emit_barrier (); ++ ++ /* Set CMP_RESULT to true if the branch was taken. */ ++ emit_label (true_label); ++ loongarch_emit_move (cmp_result, const1_rtx); ++ ++ emit_label (done_label); ++ return cmp_result; ++} ++ + /* Implement TARGET_EXPAND_BUILTIN. */ + + rtx +@@ -362,10 +3034,16 @@ loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, + switch (d->builtin_type) + { + case LARCH_BUILTIN_DIRECT: ++ case LARCH_BUILTIN_LSX: ++ case LARCH_BUILTIN_LASX: + return loongarch_expand_builtin_direct (d->icode, target, exp, true); + + case LARCH_BUILTIN_DIRECT_NO_TARGET: + return loongarch_expand_builtin_direct (d->icode, target, exp, false); ++ ++ case LARCH_BUILTIN_LSX_TEST_BRANCH: ++ case LARCH_BUILTIN_LASX_TEST_BRANCH: ++ return loongarch_expand_builtin_lsx_test_branch (d->icode, exp); + } + gcc_unreachable (); + } +diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc +index d6e3e19f0..76c8ea8db 100644 +--- a/gcc/config/loongarch/loongarch-c.cc ++++ b/gcc/config/loongarch/loongarch-c.cc +@@ -61,8 +61,11 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + builtin_assert ("cpu=loongarch"); + builtin_define ("__loongarch__"); + +- LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", LARCH_ACTUAL_ARCH); +- LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", LARCH_ACTUAL_TUNE); ++ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", la_target.cpu_arch); ++ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", la_target.cpu_tune); ++ ++ LARCH_CPP_SET_PROCESSOR ("__loongarch_arch", la_target.cpu_arch); ++ LARCH_CPP_SET_PROCESSOR ("__loongarch_tune", la_target.cpu_tune); + + /* Base architecture / ABI. */ + if (TARGET_64BIT) +@@ -99,6 +102,21 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + else + builtin_define ("__loongarch_frlen=0"); + ++ if (ISA_HAS_LSX) ++ { ++ builtin_define ("__loongarch_simd"); ++ builtin_define ("__loongarch_sx"); ++ ++ if (!ISA_HAS_LASX) ++ builtin_define ("__loongarch_simd_width=128"); ++ } ++ ++ if (ISA_HAS_LASX) ++ { ++ builtin_define ("__loongarch_asx"); ++ builtin_define ("__loongarch_simd_width=256"); ++ } ++ + /* Native Data Sizes. */ + builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE); + builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE); +diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc +index a886dd932..ea05526d7 100644 +--- a/gcc/config/loongarch/loongarch-cpu.cc ++++ b/gcc/config/loongarch/loongarch-cpu.cc +@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see + #include "tm.h" + #include "diagnostic-core.h" + ++#include "loongarch-def.h" + #include "loongarch-opts.h" + #include "loongarch-cpu.h" + #include "loongarch-str.h" +@@ -80,127 +81,191 @@ get_native_prid_str (void) + } + + /* Fill property tables for CPU_NATIVE. */ +-unsigned int +-fill_native_cpu_config (int p_arch_native, int p_tune_native) ++void ++fill_native_cpu_config (struct loongarch_target *tgt) + { +- int ret_cpu_type; ++ int arch_native_p = tgt->cpu_arch == CPU_NATIVE; ++ int tune_native_p = tgt->cpu_tune == CPU_NATIVE; ++ int native_cpu_type = CPU_NATIVE; + + /* Nothing needs to be done unless "-march/tune=native" + is given or implied. */ +- if (!(p_arch_native || p_tune_native)) +- return CPU_NATIVE; ++ if (!arch_native_p && !tune_native_p) ++ return; + + /* Fill cpucfg_cache with the "cpucfg" instruction. */ + cache_cpucfg (); + +- +- /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].base +- With: base architecture (ARCH) +- At: cpucfg_words[1][1:0] */ +- +- #define NATIVE_BASE_ISA (loongarch_cpu_default_isa[CPU_NATIVE].base) +- switch (cpucfg_cache[1] & 0x3) +- { +- case 0x02: +- NATIVE_BASE_ISA = ISA_BASE_LA64V100; +- break; +- +- default: +- if (p_arch_native) +- fatal_error (UNKNOWN_LOCATION, +- "unknown base architecture %<0x%x%>, %qs failed", +- (unsigned int) (cpucfg_cache[1] & 0x3), +- "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); +- } +- +- /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].fpu +- With: FPU type (FP, FP_SP, FP_DP) +- At: cpucfg_words[2][2:0] */ +- +- #define NATIVE_FPU (loongarch_cpu_default_isa[CPU_NATIVE].fpu) +- switch (cpucfg_cache[2] & 0x7) +- { +- case 0x07: +- NATIVE_FPU = ISA_EXT_FPU64; +- break; +- +- case 0x03: +- NATIVE_FPU = ISA_EXT_FPU32; +- break; +- +- case 0x00: +- NATIVE_FPU = ISA_EXT_NOFPU; +- break; +- +- default: +- if (p_arch_native) +- fatal_error (UNKNOWN_LOCATION, +- "unknown FPU type %<0x%x%>, %qs failed", +- (unsigned int) (cpucfg_cache[2] & 0x7), +- "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); +- } +- +- /* Fill: loongarch_cpu_cache[CPU_NATIVE] +- With: cache size info +- At: cpucfg_words[16:20][31:0] */ +- +- int l1d_present = 0, l1u_present = 0; +- int l2d_present = 0; +- uint32_t l1_szword, l2_szword; +- +- l1u_present |= cpucfg_cache[16] & 3; /* bit[1:0]: unified l1 cache */ +- l1d_present |= cpucfg_cache[16] & 4; /* bit[2:2]: l1 dcache */ +- l1_szword = l1d_present ? 18 : (l1u_present ? 17 : 0); +- l1_szword = l1_szword ? cpucfg_cache[l1_szword]: 0; +- +- l2d_present |= cpucfg_cache[16] & 24; /* bit[4:3]: unified l2 cache */ +- l2d_present |= cpucfg_cache[16] & 128; /* bit[7:7]: l2 dcache */ +- l2_szword = l2d_present ? cpucfg_cache[19]: 0; +- +- loongarch_cpu_cache[CPU_NATIVE].l1d_line_size +- = 1 << ((l1_szword & 0x7f000000) >> 24); /* bit[30:24]: log2(linesize) */ +- +- loongarch_cpu_cache[CPU_NATIVE].l1d_size +- = (1 << ((l1_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ +- * ((l1_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ +- * (1 << ((l1_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesize) */ +- >> 10; /* in kilobytes */ +- +- loongarch_cpu_cache[CPU_NATIVE].l2d_size +- = (1 << ((l2_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ +- * ((l2_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ +- * (1 << ((l2_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesize) */ +- >> 10; /* in kilobytes */ +- +- /* Fill: ret_cpu_type ++ /* Fill: tgt->cpu_arch | tgt->cpu_tune + With: processor ID (PRID) + At: cpucfg_words[0][31:0] */ + + switch (cpucfg_cache[0] & 0x00ffff00) + { + case 0x0014c000: /* LA464 */ +- ret_cpu_type = CPU_LA464; ++ native_cpu_type = CPU_LA464; + break; + + default: +- /* Unknown PRID. This is generally harmless as long as +- the properties above can be obtained via "cpucfg". */ +- if (p_tune_native) ++ /* Unknown PRID. */ ++ if (tune_native_p) + inform (UNKNOWN_LOCATION, "unknown processor ID %<0x%x%>, " + "some tuning parameters will fall back to default", + cpucfg_cache[0]); + break; + } + +- /* Properties that cannot be looked up directly using cpucfg. */ +- loongarch_cpu_issue_rate[CPU_NATIVE] +- = loongarch_cpu_issue_rate[ret_cpu_type]; +- +- loongarch_cpu_multipass_dfa_lookahead[CPU_NATIVE] +- = loongarch_cpu_multipass_dfa_lookahead[ret_cpu_type]; +- +- loongarch_cpu_rtx_cost_data[CPU_NATIVE] +- = loongarch_cpu_rtx_cost_data[ret_cpu_type]; ++ /* if -march=native */ ++ if (arch_native_p) ++ { ++ int tmp; ++ tgt->cpu_arch = native_cpu_type; ++ ++ /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].base ++ With: base architecture (ARCH) ++ At: cpucfg_words[1][1:0] */ ++ ++ #define PRESET_ARCH (loongarch_cpu_default_isa[tgt->cpu_arch].base) ++ switch (cpucfg_cache[1] & 0x3) ++ { ++ case 0x02: ++ tmp = ISA_BASE_LA64V100; ++ break; ++ ++ default: ++ fatal_error (UNKNOWN_LOCATION, ++ "unknown native base architecture %<0x%x%>, " ++ "%qs failed", (unsigned int) (cpucfg_cache[1] & 0x3), ++ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); ++ } ++ ++ /* Check consistency with PRID presets. */ ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_ARCH) ++ warning (0, "base architecture %qs differs from PRID preset %qs", ++ loongarch_isa_base_strings[tmp], ++ loongarch_isa_base_strings[PRESET_ARCH]); ++ ++ /* Use the native value anyways. */ ++ PRESET_ARCH = tmp; ++ ++ /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].fpu ++ With: FPU type (FP, FP_SP, FP_DP) ++ At: cpucfg_words[2][2:0] */ ++ ++ #define PRESET_FPU (loongarch_cpu_default_isa[tgt->cpu_arch].fpu) ++ switch (cpucfg_cache[2] & 0x7) ++ { ++ case 0x07: ++ tmp = ISA_EXT_FPU64; ++ break; ++ ++ case 0x03: ++ tmp = ISA_EXT_FPU32; ++ break; ++ ++ case 0x00: ++ tmp = ISA_EXT_NONE; ++ break; ++ ++ default: ++ fatal_error (UNKNOWN_LOCATION, ++ "unknown native FPU type %<0x%x%>, %qs failed", ++ (unsigned int) (cpucfg_cache[2] & 0x7), ++ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); ++ } ++ ++ /* Check consistency with PRID presets. */ ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_FPU) ++ warning (0, "floating-point unit %qs differs from PRID preset %qs", ++ loongarch_isa_ext_strings[tmp], ++ loongarch_isa_ext_strings[PRESET_FPU]); ++ ++ /* Use the native value anyways. */ ++ PRESET_FPU = tmp; ++ ++ ++ /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].simd ++ With: SIMD extension type (LSX, LASX) ++ At: cpucfg_words[2][7:6] */ ++ ++ #define PRESET_SIMD (loongarch_cpu_default_isa[tgt->cpu_arch].simd) ++ switch (cpucfg_cache[2] & 0xc0) ++ { ++ case 0xc0: ++ tmp = ISA_EXT_SIMD_LASX; ++ break; ++ ++ case 0x40: ++ tmp = ISA_EXT_SIMD_LSX; ++ break; ++ ++ case 0x80: ++ tmp = 0; ++ warning (0, "unknown SIMD extension " ++ "(%qs disabled while %qs is enabled), disabling SIMD", ++ loongarch_isa_ext_strings[ISA_EXT_SIMD_LSX], ++ loongarch_isa_ext_strings[ISA_EXT_SIMD_LASX]); ++ break; ++ ++ case 0x00: ++ tmp = 0; ++ break; ++ } ++ ++ /* Check consistency with PRID presets. */ ++ ++ /* ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_SIMD) ++ warning (0, "SIMD extension %qs differs from PRID preset %qs", ++ loongarch_isa_ext_strings[tmp], ++ loongarch_isa_ext_strings[PRESET_SIMD]); ++ */ ++ ++ /* Use the native value anyways. */ ++ PRESET_SIMD = tmp; ++ } + +- return ret_cpu_type; ++ if (tune_native_p) ++ { ++ tgt->cpu_tune = native_cpu_type; ++ ++ /* Fill: loongarch_cpu_cache[tgt->cpu_tune] ++ With: cache size info ++ At: cpucfg_words[16:20][31:0] */ ++ ++ #define PRESET_CACHE (loongarch_cpu_cache[tgt->cpu_tune]) ++ struct loongarch_cache native_cache; ++ int l1d_present = 0, l1u_present = 0; ++ int l2d_present = 0; ++ uint32_t l1_szword, l2_szword; ++ ++ l1u_present |= cpucfg_cache[16] & 3; /* bit[1:0]: unified l1 */ ++ l1d_present |= cpucfg_cache[16] & 4; /* bit[2:2]: l1d */ ++ l1_szword = l1d_present ? 18 : (l1u_present ? 17 : 0); ++ l1_szword = l1_szword ? cpucfg_cache[l1_szword]: 0; ++ ++ l2d_present |= cpucfg_cache[16] & 24; /* bit[4:3]: unified l2 */ ++ l2d_present |= cpucfg_cache[16] & 128; /* bit[7:7]: l2d */ ++ l2_szword = l2d_present ? cpucfg_cache[19]: 0; ++ ++ native_cache.l1d_line_size ++ = 1 << ((l1_szword & 0x7f000000) >> 24); /* bit[30:24]: log2(line) */ ++ ++ native_cache.l1d_size ++ = (1 << ((l1_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ ++ * ((l1_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ ++ * (1 << ((l1_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(line) */ ++ >> 10; /* in kibibytes */ ++ ++ native_cache.l2d_size ++ = (1 << ((l2_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ ++ * ((l2_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ ++ * (1 << ((l2_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesz) */ ++ >> 10; /* in kibibytes */ ++ ++ /* Use the native value anyways. */ ++ PRESET_CACHE.l1d_line_size = native_cache.l1d_line_size; ++ PRESET_CACHE.l1d_size = native_cache.l1d_size; ++ PRESET_CACHE.l2d_size = native_cache.l2d_size; ++ } + } +diff --git a/gcc/config/loongarch/loongarch-cpu.h b/gcc/config/loongarch/loongarch-cpu.h +index 93d656f70..eacb38774 100644 +--- a/gcc/config/loongarch/loongarch-cpu.h ++++ b/gcc/config/loongarch/loongarch-cpu.h +@@ -21,9 +21,10 @@ along with GCC; see the file COPYING3. If not see + #define LOONGARCH_CPU_H + + #include "system.h" ++#include "loongarch-def.h" + + void cache_cpucfg (void); +-unsigned int fill_native_cpu_config (int p_arch_native, int p_tune_native); ++void fill_native_cpu_config (struct loongarch_target *tgt); + uint32_t get_native_prid (void); + const char* get_native_prid_str (void); + +diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c +index c8769b7d6..eeb32dbf6 100644 +--- a/gcc/config/loongarch/loongarch-def.c ++++ b/gcc/config/loongarch/loongarch-def.c +@@ -21,25 +21,11 @@ along with GCC; see the file COPYING3. If not see + #include "loongarch-def.h" + #include "loongarch-str.h" + +-/* Default RTX cost initializer. */ +-#define COSTS_N_INSNS(N) ((N) * 4) +-#define DEFAULT_COSTS \ +- .fp_add = COSTS_N_INSNS (1), \ +- .fp_mult_sf = COSTS_N_INSNS (2), \ +- .fp_mult_df = COSTS_N_INSNS (4), \ +- .fp_div_sf = COSTS_N_INSNS (6), \ +- .fp_div_df = COSTS_N_INSNS (8), \ +- .int_mult_si = COSTS_N_INSNS (1), \ +- .int_mult_di = COSTS_N_INSNS (1), \ +- .int_div_si = COSTS_N_INSNS (4), \ +- .int_div_di = COSTS_N_INSNS (6), \ +- .branch_cost = 2, \ +- .memory_latency = 4 +- + /* CPU property tables. */ + const char* + loongarch_cpu_strings[N_TUNE_TYPES] = { + [CPU_NATIVE] = STR_CPU_NATIVE, ++ [CPU_ABI_DEFAULT] = STR_CPU_ABI_DEFAULT, + [CPU_LOONGARCH64] = STR_CPU_LOONGARCH64, + [CPU_LA464] = STR_CPU_LA464, + }; +@@ -49,10 +35,12 @@ loongarch_cpu_default_isa[N_ARCH_TYPES] = { + [CPU_LOONGARCH64] = { + .base = ISA_BASE_LA64V100, + .fpu = ISA_EXT_FPU64, ++ .simd = 0, + }, + [CPU_LA464] = { + .base = ISA_BASE_LA64V100, + .fpu = ISA_EXT_FPU64, ++ .simd = ISA_EXT_SIMD_LASX, + }, + }; + +@@ -62,14 +50,44 @@ loongarch_cpu_cache[N_TUNE_TYPES] = { + .l1d_line_size = 64, + .l1d_size = 64, + .l2d_size = 256, ++ .simultaneous_prefetches = 4, + }, + [CPU_LA464] = { + .l1d_line_size = 64, + .l1d_size = 64, + .l2d_size = 256, ++ .simultaneous_prefetches = 4, ++ }, ++}; ++ ++struct loongarch_align ++loongarch_cpu_align[N_TUNE_TYPES] = { ++ [CPU_LOONGARCH64] = { ++ .function = "32", ++ .label = "16", ++ }, ++ [CPU_LA464] = { ++ .function = "32", ++ .label = "16", + }, + }; + ++ ++/* Default RTX cost initializer. */ ++#define COSTS_N_INSNS(N) ((N) * 4) ++#define DEFAULT_COSTS \ ++ .fp_add = COSTS_N_INSNS (1), \ ++ .fp_mult_sf = COSTS_N_INSNS (2), \ ++ .fp_mult_df = COSTS_N_INSNS (4), \ ++ .fp_div_sf = COSTS_N_INSNS (6), \ ++ .fp_div_df = COSTS_N_INSNS (8), \ ++ .int_mult_si = COSTS_N_INSNS (1), \ ++ .int_mult_di = COSTS_N_INSNS (1), \ ++ .int_div_si = COSTS_N_INSNS (4), \ ++ .int_div_di = COSTS_N_INSNS (6), \ ++ .branch_cost = 6, \ ++ .memory_latency = 4 ++ + /* The following properties cannot be looked up directly using "cpucfg". + So it is necessary to provide a default value for "unknown native" + tune targets (i.e. -mtune=native while PRID does not correspond to +@@ -89,7 +107,7 @@ loongarch_cpu_rtx_cost_data[N_TUNE_TYPES] = { + }; + + /* RTX costs to use when optimizing for size. */ +-extern const struct loongarch_rtx_cost_data ++const struct loongarch_rtx_cost_data + loongarch_rtx_cost_optimize_size = { + .fp_add = 4, + .fp_mult_sf = 4, +@@ -100,7 +118,7 @@ loongarch_rtx_cost_optimize_size = { + .int_mult_di = 4, + .int_div_si = 4, + .int_div_di = 4, +- .branch_cost = 2, ++ .branch_cost = 6, + .memory_latency = 4, + }; + +@@ -130,9 +148,11 @@ loongarch_isa_base_strings[N_ISA_BASE_TYPES] = { + + const char* + loongarch_isa_ext_strings[N_ISA_EXT_TYPES] = { +- [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64, ++ [ISA_EXT_NONE] = STR_NONE, + [ISA_EXT_FPU32] = STR_ISA_EXT_FPU32, +- [ISA_EXT_NOFPU] = STR_ISA_EXT_NOFPU, ++ [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64, ++ [ISA_EXT_SIMD_LSX] = STR_ISA_EXT_LSX, ++ [ISA_EXT_SIMD_LASX] = STR_ISA_EXT_LASX, + }; + + const char* +@@ -152,28 +172,34 @@ loongarch_cmodel_strings[] = { + [CMODEL_NORMAL] = STR_CMODEL_NORMAL, + [CMODEL_TINY] = STR_CMODEL_TINY, + [CMODEL_TINY_STATIC] = STR_CMODEL_TS, ++ [CMODEL_MEDIUM] = STR_CMODEL_MEDIUM, + [CMODEL_LARGE] = STR_CMODEL_LARGE, + [CMODEL_EXTREME] = STR_CMODEL_EXTREME, + }; + +-const char* +-loongarch_switch_strings[] = { +- [SW_SOFT_FLOAT] = OPTSTR_SOFT_FLOAT, +- [SW_SINGLE_FLOAT] = OPTSTR_SINGLE_FLOAT, +- [SW_DOUBLE_FLOAT] = OPTSTR_DOUBLE_FLOAT, +-}; +- + + /* ABI-related definitions. */ + const struct loongarch_isa + abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = { + [ABI_BASE_LP64D] = { +- [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU64}, ++ [ABI_EXT_BASE] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU64, ++ .simd = 0 ++ }, + }, + [ABI_BASE_LP64F] = { +- [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU32}, ++ [ABI_EXT_BASE] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU32, ++ .simd = 0 ++ }, + }, + [ABI_BASE_LP64S] = { +- [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_NOFPU}, ++ [ABI_EXT_BASE] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_NONE, ++ .simd = 0 ++ }, + }, + }; +diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h +index c2c35b6ba..0aee7dc19 100644 +--- a/gcc/config/loongarch/loongarch-def.h ++++ b/gcc/config/loongarch/loongarch-def.h +@@ -59,11 +59,13 @@ extern const char* loongarch_isa_base_strings[]; + + /* enum isa_ext_* */ + extern const char* loongarch_isa_ext_strings[]; +-#define ISA_EXT_NOFPU 0 ++#define ISA_EXT_NONE 0 + #define ISA_EXT_FPU32 1 + #define ISA_EXT_FPU64 2 + #define N_ISA_EXT_FPU_TYPES 3 +-#define N_ISA_EXT_TYPES 3 ++#define ISA_EXT_SIMD_LSX 3 ++#define ISA_EXT_SIMD_LASX 4 ++#define N_ISA_EXT_TYPES 5 + + /* enum abi_base */ + extern const char* loongarch_abi_base_strings[]; +@@ -72,6 +74,16 @@ extern const char* loongarch_abi_base_strings[]; + #define ABI_BASE_LP64S 2 + #define N_ABI_BASE_TYPES 3 + ++#define TO_LP64_ABI_BASE(C) (C) ++ ++#define ABI_FPU_64(abi_base) \ ++ (abi_base == ABI_BASE_LP64D) ++#define ABI_FPU_32(abi_base) \ ++ (abi_base == ABI_BASE_LP64F) ++#define ABI_FPU_NONE(abi_base) \ ++ (abi_base == ABI_BASE_LP64S) ++ ++ + /* enum abi_ext */ + extern const char* loongarch_abi_ext_strings[]; + #define ABI_EXT_BASE 0 +@@ -82,59 +94,49 @@ extern const char* loongarch_cmodel_strings[]; + #define CMODEL_NORMAL 0 + #define CMODEL_TINY 1 + #define CMODEL_TINY_STATIC 2 +-#define CMODEL_LARGE 3 +-#define CMODEL_EXTREME 4 +-#define N_CMODEL_TYPES 5 +- +-/* enum switches */ +-/* The "SW_" codes represent command-line switches (options that +- accept no parameters). Definition for other switches that affects +- the target ISA / ABI configuration will also be appended here +- in the future. */ +- +-extern const char* loongarch_switch_strings[]; +-#define SW_SOFT_FLOAT 0 +-#define SW_SINGLE_FLOAT 1 +-#define SW_DOUBLE_FLOAT 2 +-#define N_SWITCH_TYPES 3 ++#define CMODEL_MEDIUM 3 ++#define CMODEL_LARGE 4 ++#define CMODEL_EXTREME 5 ++#define N_CMODEL_TYPES 6 + + /* The common default value for variables whose assignments + are triggered by command-line options. */ + +-#define M_OPTION_NOT_SEEN -1 +-#define M_OPT_ABSENT(opt_enum) ((opt_enum) == M_OPTION_NOT_SEEN) ++#define M_OPT_UNSET -1 ++#define M_OPT_ABSENT(opt_enum) ((opt_enum) == M_OPT_UNSET) + + + /* Internal representation of the target. */ + struct loongarch_isa + { +- unsigned char base; /* ISA_BASE_ */ +- unsigned char fpu; /* ISA_EXT_FPU_ */ ++ int base; /* ISA_BASE_ */ ++ int fpu; /* ISA_EXT_FPU_ */ ++ int simd; /* ISA_EXT_SIMD_ */ + }; + + struct loongarch_abi + { +- unsigned char base; /* ABI_BASE_ */ +- unsigned char ext; /* ABI_EXT_ */ ++ int base; /* ABI_BASE_ */ ++ int ext; /* ABI_EXT_ */ + }; + + struct loongarch_target + { + struct loongarch_isa isa; + struct loongarch_abi abi; +- unsigned char cpu_arch; /* CPU_ */ +- unsigned char cpu_tune; /* same */ +- unsigned char cpu_native; /* same */ +- unsigned char cmodel; /* CMODEL_ */ ++ int cpu_arch; /* CPU_ */ ++ int cpu_tune; /* same */ ++ int cmodel; /* CMODEL_ */ + }; + + /* CPU properties. */ + /* index */ + #define CPU_NATIVE 0 +-#define CPU_LOONGARCH64 1 +-#define CPU_LA464 2 +-#define N_ARCH_TYPES 3 +-#define N_TUNE_TYPES 3 ++#define CPU_ABI_DEFAULT 1 ++#define CPU_LOONGARCH64 2 ++#define CPU_LA464 3 ++#define N_ARCH_TYPES 4 ++#define N_TUNE_TYPES 4 + + /* parallel tables. */ + extern const char* loongarch_cpu_strings[]; +@@ -143,6 +145,7 @@ extern int loongarch_cpu_issue_rate[]; + extern int loongarch_cpu_multipass_dfa_lookahead[]; + + extern struct loongarch_cache loongarch_cpu_cache[]; ++extern struct loongarch_align loongarch_cpu_align[]; + extern struct loongarch_rtx_cost_data loongarch_cpu_rtx_cost_data[]; + + #ifdef __cplusplus +diff --git a/gcc/config/loongarch/loongarch-driver.cc b/gcc/config/loongarch/loongarch-driver.cc +index 0adcc923b..b3626984d 100644 +--- a/gcc/config/loongarch/loongarch-driver.cc ++++ b/gcc/config/loongarch/loongarch-driver.cc +@@ -26,122 +26,137 @@ along with GCC; see the file COPYING3. If not see + #include "tm.h" + #include "obstack.h" + #include "diagnostic-core.h" ++#include "opts.h" + + #include "loongarch-opts.h" + #include "loongarch-driver.h" + +-static int +- opt_arch_driver = M_OPTION_NOT_SEEN, +- opt_tune_driver = M_OPTION_NOT_SEEN, +- opt_fpu_driver = M_OPTION_NOT_SEEN, +- opt_abi_base_driver = M_OPTION_NOT_SEEN, +- opt_abi_ext_driver = M_OPTION_NOT_SEEN, +- opt_cmodel_driver = M_OPTION_NOT_SEEN; +- +-int opt_switches = 0; +- + /* This flag is set to 1 if we believe that the user might be avoiding + linking (implicitly) against something from the startfile search paths. */ + static int no_link = 0; + +-#define LARCH_DRIVER_SET_M_FLAG(OPTS_ARRAY, N_OPTS, FLAG, STR) \ +- for (int i = 0; i < (N_OPTS); i++) \ +- { \ +- if ((OPTS_ARRAY)[i] != 0) \ +- if (strcmp ((STR), (OPTS_ARRAY)[i]) == 0) \ +- (FLAG) = i; \ +- } +- + /* Use the public obstack from the gcc driver (defined in gcc.c). + This is for allocating space for the returned string. */ + extern struct obstack opts_obstack; + +-#define APPEND_LTR(S) \ +- obstack_grow (&opts_obstack, (const void*) (S), \ +- sizeof ((S)) / sizeof (char) -1) +- +-#define APPEND_VAL(S) \ +- obstack_grow (&opts_obstack, (const void*) (S), strlen ((S))) ++const char* ++la_driver_init (int argc ATTRIBUTE_UNUSED, const char **argv ATTRIBUTE_UNUSED) ++{ ++ /* Initialize all fields of la_target to -1 */ ++ loongarch_init_target (&la_target, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, ++ M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET); ++ return ""; ++} + ++const char* ++driver_set_no_link (int argc ATTRIBUTE_UNUSED, ++ const char **argv ATTRIBUTE_UNUSED) ++{ ++ no_link = 1; ++ return ""; ++} + + const char* +-driver_set_m_flag (int argc, const char **argv) ++driver_set_m_parm (int argc, const char **argv) + { +- int parm_off = 0; ++ gcc_assert (argc == 2); ++ ++#define LARCH_DRIVER_PARSE_PARM(OPT_IDX, NAME, OPTSTR_LIST, \ ++ OPT_IDX_LO, OPT_IDX_HI) \ ++ if (strcmp (argv[0], OPTSTR_##NAME) == 0) \ ++ for (int i = (OPT_IDX_LO); i < (OPT_IDX_HI); i++) \ ++ { \ ++ if ((OPTSTR_LIST)[i] != 0) \ ++ if (strcmp (argv[1], (OPTSTR_LIST)[i]) == 0) \ ++ { \ ++ (OPT_IDX) = i; \ ++ return 0; \ ++ } \ ++ } + +- if (argc != 1) +- return "%eset_m_flag requires exactly 1 argument."; ++ LARCH_DRIVER_PARSE_PARM (la_target.abi.base, ABI_BASE, \ ++ loongarch_abi_base_strings, 0, N_ABI_BASE_TYPES) + +-#undef PARM +-#define PARM (argv[0] + parm_off) ++ LARCH_DRIVER_PARSE_PARM (la_target.isa.fpu, ISA_EXT_FPU, \ ++ loongarch_isa_ext_strings, 0, N_ISA_EXT_FPU_TYPES) + +-/* Note: sizeof (OPTSTR_##NAME) equals the length of "