From 928dbd69ddeb3d2557eabd48d4f340e1b77ba35f Mon Sep 17 00:00:00 2001 From: wangzhaofei806 Date: Tue, 1 Nov 2022 08:51:24 +0800 Subject: [PATCH] support processor core FTC663 of Phytium CPU --- gcc/config/aarch64/aarch64-cores.def | 3 + gcc/config/aarch64/aarch64-cost-tables.h | 104 +++ gcc/config/aarch64/aarch64-tune.md | 2 +- gcc/config/aarch64/aarch64.c | 83 +++ gcc/config/aarch64/aarch64.md | 1 + gcc/config/aarch64/ftc663.md | 800 +++++++++++++++++++++++ 6 files changed, 992 insertions(+), 1 deletion(-) create mode 100644 gcc/config/aarch64/ftc663.md diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def index 9c290292479..96210a42d47 100644 --- a/gcc/config/aarch64/aarch64-cores.def +++ b/gcc/config/aarch64/aarch64-cores.def @@ -125,6 +125,9 @@ AARCH64_CORE("a64fx", a64fx, a64fx, 8_2A, AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_F /* HiSilicon ('H') cores. */ AARCH64_CORE("tsv110", tsv110, tsv110, 8_2A, AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_CRYPTO | AARCH64_FL_F16 | AARCH64_FL_AES | AARCH64_FL_SHA2, tsv110, 0x48, 0xd01, -1) +/* Phytium cores. */ +AARCH64_CORE("ftc663", ftc663, ftc663, 8_2A, AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_CRYPTO | AARCH64_FL_F16 | AARCH64_FL_AES | AARCH64_FL_SHA2, ftc663, 0x70, 0x663, 0x1) + /* ARMv8.3-A Architecture Processors. */ /* Marvell cores (TX3). */ diff --git a/gcc/config/aarch64/aarch64-cost-tables.h b/gcc/config/aarch64/aarch64-cost-tables.h index c6805717f6e..fdb7b986498 100644 --- a/gcc/config/aarch64/aarch64-cost-tables.h +++ b/gcc/config/aarch64/aarch64-cost-tables.h @@ -644,4 +644,108 @@ const struct cpu_cost_table a64fx_extra_costs = } }; +const struct cpu_cost_table ftc663_extra_costs = +{ + /* ALU */ + { + 0, /* arith. */ + 0, /* logical. */ + 0, /* shift. */ + 0, /* shift_reg. */ + COSTS_N_INSNS (1), /* arith_shift. */ + COSTS_N_INSNS (1), /* arith_shift_reg. */ + COSTS_N_INSNS (1), /* log_shift. */ + COSTS_N_INSNS (1), /* log_shift_reg. */ + 0, /* extend. */ + COSTS_N_INSNS (1), /* extend_arith. */ + COSTS_N_INSNS (1), /* bfi. */ + 0, /* bfx. */ + 0, /* clz. */ + 0, /* rev. */ + 0, /* non_exec. */ + true /* non_exec_costs_exec. */ + }, + + { + /* MULT SImode */ + { + COSTS_N_INSNS (2), /* simple. */ + COSTS_N_INSNS (2), /* flag_setting. */ + COSTS_N_INSNS (2), /* extend. */ + COSTS_N_INSNS (2), /* add. */ + COSTS_N_INSNS (2), /* extend_add. */ + COSTS_N_INSNS (11) /* idiv. */ + }, + /* MULT DImode */ + { + COSTS_N_INSNS (3), /* simple. */ + 0, /* flag_setting (N/A). */ + COSTS_N_INSNS (3), /* extend. */ + COSTS_N_INSNS (3), /* add. */ + COSTS_N_INSNS (3), /* extend_add. */ + COSTS_N_INSNS (19) /* idiv. */ + } + }, + /* LD/ST */ + { + COSTS_N_INSNS (3), /* load. */ + COSTS_N_INSNS (3), /* load_sign_extend. */ + COSTS_N_INSNS (3), /* ldrd. */ + COSTS_N_INSNS (3), /* ldm_1st. */ + 1, /* ldm_regs_per_insn_1st. */ + 2, /* ldm_regs_per_insn_subsequent. */ + COSTS_N_INSNS (4), /* loadf. */ + COSTS_N_INSNS (4), /* loadd. */ + COSTS_N_INSNS (4), /* load_unaligned. */ + 0, /* store. */ + 0, /* strd. */ + 0, /* stm_1st. */ + 1, /* stm_regs_per_insn_1st. */ + 2, /* stm_regs_per_insn_subsequent. */ + 0, /* storef. */ + 0, /* stored. */ + COSTS_N_INSNS (1), /* store_unaligned. */ + COSTS_N_INSNS (4), /* loadv. */ + COSTS_N_INSNS (4) /* storev. */ + }, + { + /* FP SFmode */ + { + COSTS_N_INSNS (11), /* div. */ + COSTS_N_INSNS (3), /* mult. */ + COSTS_N_INSNS (6), /* mult_addsub. */ + COSTS_N_INSNS (6), /* fma. */ + COSTS_N_INSNS (3), /* addsub. */ + COSTS_N_INSNS (2), /* fpconst. */ + COSTS_N_INSNS (2), /* neg. */ + COSTS_N_INSNS (2), /* compare. */ + COSTS_N_INSNS (2), /* widen. */ + COSTS_N_INSNS (2), /* narrow. */ + COSTS_N_INSNS (2), /* toint. */ + COSTS_N_INSNS (1), /* fromint. */ + COSTS_N_INSNS (2) /* roundint. */ + }, + /* FP DFmode */ + { + COSTS_N_INSNS (18), /* div. */ + COSTS_N_INSNS (3), /* mult. */ + COSTS_N_INSNS (6), /* mult_addsub. */ + COSTS_N_INSNS (6), /* fma. */ + COSTS_N_INSNS (3), /* addsub. */ + COSTS_N_INSNS (2), /* fpconst. */ + COSTS_N_INSNS (2), /* neg. */ + COSTS_N_INSNS (2), /* compare. */ + COSTS_N_INSNS (2), /* widen. */ + COSTS_N_INSNS (2), /* narrow. */ + COSTS_N_INSNS (2), /* toint. */ + COSTS_N_INSNS (1), /* fromint. */ + COSTS_N_INSNS (2) /* roundint. */ + } + }, + /* Vector */ + { + COSTS_N_INSNS (1) /* alu. */ + } +}; + #endif diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md index 7fda2294b8a..5bb720c5f45 100644 --- a/gcc/config/aarch64/aarch64-tune.md +++ b/gcc/config/aarch64/aarch64-tune.md @@ -1,5 +1,5 @@ ;; -*- buffer-read-only: t -*- ;; Generated automatically by gentune.sh from aarch64-cores.def (define_attr "tune" - "cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa65,cortexa65ae,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,thunderx3t110,zeus,neoversev1,saphira,neoversen2,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55" + "cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa65,cortexa65ae,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,ftc663,thunderx3t110,zeus,neoversev1,saphira,neoversen2,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55" (const (symbol_ref "((enum attr_tune) aarch64_tune)"))) diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index f78942b04c6..6b8a0b02462 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -480,6 +480,22 @@ static const struct cpu_addrcost_table a64fx_addrcost_table = 0, /* imm_offset */ }; +static const struct cpu_addrcost_table ftc663_addrcost_table = +{ + { + 1, /* hi */ + 0, /* si */ + 0, /* di */ + 1, /* ti */ + }, + 0, /* pre_modify */ + 0, /* post_modify */ + 0, /* register_offset */ + 1, /* register_sextend */ + 1, /* register_zextend */ + 0, /* imm_offset */ +}; + static const struct cpu_regmove_cost generic_regmove_cost = { 1, /* GP2GP */ @@ -585,6 +601,16 @@ static const struct cpu_regmove_cost a64fx_regmove_cost = 2 /* FP2FP */ }; +static const struct cpu_regmove_cost ftc663_regmove_cost = +{ + 1, /* GP2GP */ + /* Avoid the use of slow int<->fp moves for spilling by setting + their cost higher than memmov_cost. */ + 4, /* GP2FP */ + 5, /* FP2GP */ + 2 /* FP2FP */ +}; + /* Generic costs for vector insn classes. */ static const struct cpu_vector_cost generic_vector_cost = { @@ -781,6 +807,25 @@ static const struct cpu_vector_cost a64fx_vector_cost = 1 /* cond_not_taken_branch_cost */ }; +static const struct cpu_vector_cost ftc663_vector_cost = +{ + 1, /* scalar_int_stmt_cost */ + 1, /* scalar_fp_stmt_cost */ + 4, /* scalar_load_cost */ + 1, /* scalar_store_cost */ + 2, /* vec_int_stmt_cost */ + 2, /* vec_fp_stmt_cost */ + 2, /* vec_permute_cost */ + 3, /* vec_to_scalar_cost */ + 2, /* scalar_to_vec_cost */ + 5, /* vec_align_load_cost */ + 5, /* vec_unalign_load_cost */ + 1, /* vec_unalign_store_cost */ + 1, /* vec_store_cost */ + 1, /* cond_taken_branch_cost */ + 1 /* cond_not_taken_branch_cost */ +}; + /* Generic costs for branch instructions. */ static const struct cpu_branch_cost generic_branch_cost = @@ -924,6 +969,17 @@ static const cpu_prefetch_tune a64fx_prefetch_tune = -1 /* default_opt_level */ }; +static const cpu_prefetch_tune ftc663_prefetch_tune = +{ + 8, /* num_slots */ + 32, /* l1_cache_size */ + 64, /* l1_cache_line_size */ + 2048, /* l2_cache_size */ + true, /* prefetch_dynamic_strides */ + -1, /* minimum_stride */ + -1 /* default_opt_level */ +}; + static const struct tune_params generic_tunings = { &cortexa57_extra_costs, @@ -1463,6 +1519,33 @@ static const struct tune_params a64fx_tunings = &a64fx_prefetch_tune }; +static const struct tune_params ftc663_tunings = +{ + &ftc663_extra_costs, + &ftc663_addrcost_table, + &ftc663_regmove_cost, + &ftc663_vector_cost, + &generic_branch_cost, + &generic_approx_modes, + SVE_NOT_IMPLEMENTED, /* sve_width */ + 4, /* memmov_cost */ + 4, /* issue_rate */ + (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_CMP_BRANCH + | AARCH64_FUSE_ALU_BRANCH), /* fusible_ops */ + "16", /* function_align. */ + "4", /* jump_align. */ + "8", /* loop_align. */ + 2, /* int_reassoc_width. */ + 4, /* fp_reassoc_width. */ + 1, /* vec_reassoc_width. */ + 2, /* min_div_recip_mul_sf. */ + 2, /* min_div_recip_mul_df. */ + 0, /* max_case_values. */ + tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */ + (AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */ + &ftc663_prefetch_tune +}; + /* Support for fine-grained override of the tuning structures. */ struct aarch64_tuning_override_function { diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index ee80261f1ac..0933db13fc9 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -458,6 +458,7 @@ (include "thunderx2t99.md") (include "tsv110.md") (include "thunderx3t110.md") +(include "ftc663.md") ;; ------------------------------------------------------------------- ;; Jumps and other miscellaneous insns diff --git a/gcc/config/aarch64/ftc663.md b/gcc/config/aarch64/ftc663.md new file mode 100644 index 00000000000..bdb87c73f11 --- /dev/null +++ b/gcc/config/aarch64/ftc663.md @@ -0,0 +1,800 @@ +;; ftc663 pipeline description +;; Copyright (C) 2018 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, but +;; WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;; General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ftc663") + +(define_attr "ftc663_neon_type" + "neon_arith_basic, neon_arith_complex, + neon_multiply, neon_multiply_q, + neon_multiply_long, neon_mla, neon_mla_q, neon_mla_long, + neon_sat_mla_long, neon_shift_acc, neon_shift_imm_basic, + neon_shift_imm_q, neon_shift_imm_complex, + neon_shift_reg_basic, neon_shift_reg_basic_q, neon_shift_reg_complex, + neon_shift_reg_complex_q, neon_fp_nar, neon_fp_minmax, neon_fp_reduc_minmax, neon_fp_arith, + neon_fp_arith_q, neon_fp_reductions_q, neon_fp_cvt_int, + neon_fp_cvt_int_q, neon_fp_cvt16, neon_fp_mul, + neon_fp_mul_q, neon_fp_mla, neon_fp_mla_q, neon_fp_recpe_rsqrte, + neon_fp_recpe_rsqrte_q, neon_fp_recps_rsqrts, neon_fp_recps_rsqrts_q, + neon_bitops, neon_bitops_q, neon_dups, neon_from_gp, + neon_from_gp_q, neon_move, neon_tbl3_tbl4, neon_zip_q, neon_to_gp, + neon_load_a, neon_load_b, neon_load_c, neon_load_d, neon_load_e, + neon_load_f, neon_store_a, neon_store_b, neon_store_complex, + unknown" + (cond [ + (eq_attr "type" "neon_abs,neon_abs_q,neon_add, neon_add_q, neon_add_long,\ + neon_add_widen, neon_neg, neon_neg_q,\ + neon_sub, neon_sub_q,\ + neon_sub_long, neon_sub_widen, neon_logic,\ + neon_logic_q, neon_tst, neon_tst_q,\ + neon_compare, neon_compare_q,\ + neon_compare_zero, neon_compare_zero_q,\ + neon_minmax, neon_minmax_q") + (const_string "neon_arith_basic") + (eq_attr "type" "neon_add_halve_narrow_q,\ + neon_add_halve, neon_add_halve_q,\ + neon_sub_halve, neon_sub_halve_q, neon_qabs,\ + neon_qabs_q, neon_qadd, neon_qadd_q, neon_qneg,\ + neon_qneg_q, neon_qsub, neon_qsub_q,\ + neon_sub_halve_narrow_q") + (const_string "neon_arith_complex") + + (eq_attr "type" "neon_mul_b, neon_mul_h, neon_mul_s,\ + neon_mul_h_scalar, neon_mul_s_scalar,\ + neon_sat_mul_b, neon_sat_mul_h,\ + neon_sat_mul_s, neon_sat_mul_h_scalar,\ + neon_sat_mul_s_scalar,\ + neon_mul_b_long, neon_mul_h_long,\ + neon_mul_s_long,\ + neon_mul_h_scalar_long, neon_mul_s_scalar_long,\ + neon_sat_mul_b_long, neon_sat_mul_h_long,\ + neon_sat_mul_s_long, neon_sat_mul_h_scalar_long,\ + neon_sat_mul_s_scalar_long,\ + neon_mla_b, neon_mla_h, neon_mla_s,\ + neon_mla_h_scalar, neon_mla_s_scalar,\ + neon_mla_b_long, neon_mla_h_long,\ + neon_mla_s_long,\ + neon_mla_h_scalar_long, neon_mla_s_scalar_long,\ + neon_sat_mla_b_long, neon_sat_mla_h_long,\ + neon_sat_mla_s_long, neon_sat_mla_h_scalar_long,\ + neon_sat_mla_s_scalar_long") + (const_string "neon_multiply") + (eq_attr "type" "neon_mul_b_q, neon_mul_h_q, neon_mul_s_q,\ + neon_mul_h_scalar_q, neon_mul_s_scalar_q,\ + neon_sat_mul_b_q, neon_sat_mul_h_q,\ + neon_sat_mul_s_q, neon_sat_mul_h_scalar_q,\ + neon_sat_mul_s_scalar_q,\ + neon_mla_b_q, neon_mla_h_q, neon_mla_s_q,\ + neon_mla_h_scalar_q, neon_mla_s_scalar_q") + (const_string "neon_multiply_q") + + (eq_attr "type" "neon_shift_acc, neon_shift_acc_q") + (const_string "neon_shift_acc") + (eq_attr "type" "neon_shift_imm, neon_shift_imm_long") + (const_string "neon_shift_imm_basic") + (eq_attr "type" "neon_shift_imm_q, neon_shift_imm_narrow_q") + (const_string "neon_shift_imm_q") + (eq_attr "type" "neon_sat_shift_imm, neon_sat_shift_imm_q,\ + neon_sat_shift_imm_narrow_q") + (const_string "neon_shift_imm_complex") + (eq_attr "type" "neon_shift_reg") + (const_string "neon_shift_reg_basic") + (eq_attr "type" "neon_shift_reg_q") + (const_string "neon_shift_reg_basic_q") + (eq_attr "type" "neon_sat_shift_reg") + (const_string "neon_shift_reg_complex") + (eq_attr "type" "neon_sat_shift_reg_q") + (const_string "neon_shift_reg_complex_q") + + (eq_attr "type" "neon_fp_neg_s, neon_fp_neg_s_q,\ + neon_fp_abs_s, neon_fp_abs_s_q,\ + neon_fp_round_s, neon_fp_round_s_q,\ + neon_fp_neg_d, neon_fp_neg_d_q,\ + neon_fp_abs_d, neon_fp_abs_d_q,\ + neon_fp_round_d, neon_fp_round_d_q") + (const_string "neon_fp_nar") + (eq_attr "type" "neon_fp_minmax_s,neon_fp_minmax_d,neon_fp_minmax_s_q,neon_fp_minmax_d_q") + (const_string "neon_fp_minmax") + (eq_attr "type" "neon_fp_addsub_s, neon_fp_abd_s,\ + neon_fp_reduc_add_s, neon_fp_reduc_add_s_q, neon_fp_compare_s,\ + neon_fp_addsub_d, neon_fp_abd_d,\ + neon_fp_reduc_add_d, neon_fp_reduc_add_d_q, neon_fp_compare_d") + (const_string "neon_fp_arith") + (eq_attr "type" "neon_fp_addsub_s_q, neon_fp_abd_s_q,\ + neon_fp_reduc_add_s_q, neon_fp_compare_s_q,\ + neon_fp_addsub_d_q, neon_fp_abd_d_q,\ + neon_fp_reduc_add_d_q, neon_fp_compare_d_q") + (const_string "neon_fp_arith_q") + (eq_attr "type" "neon_fp_reduc_minmax_s, neon_fp_reduc_minmax_s_q,\ + neon_fp_reduc_minmax_d, neon_fp_reduc_minmax_d_q") + (const_string "neon_fp_reduc_minmax") + (eq_attr "type" "neon_fp_to_int_s, neon_int_to_fp_s,\ + neon_fp_to_int_d, neon_int_to_fp_d") + (const_string "neon_fp_cvt_int") + (eq_attr "type" "neon_fp_to_int_s_q, neon_int_to_fp_s_q,\ + neon_fp_to_int_d_q, neon_int_to_fp_d_q") + (const_string "neon_fp_cvt_int_q") + (eq_attr "type" "neon_fp_cvt_narrow_s_q, neon_fp_cvt_widen_h") + (const_string "neon_fp_cvt16") + (eq_attr "type" "neon_fp_mul_s, neon_fp_mul_s_scalar,\ + neon_fp_mul_d") + (const_string "neon_fp_mul") + (eq_attr "type" "neon_fp_mul_s_q, neon_fp_mul_s_scalar_q,\ + neon_fp_mul_d_q, neon_fp_mul_d_scalar_q") + (const_string "neon_fp_mul_q") + (eq_attr "type" "neon_fp_mla_s, neon_fp_mla_s_scalar,\ + neon_fp_mla_d") + (const_string "neon_fp_mla") + (eq_attr "type" "neon_fp_mla_s_q, neon_fp_mla_s_scalar_q,\ + neon_fp_mla_d_q, neon_fp_mla_d_scalar_q") + (const_string "neon_fp_mla_q") + (eq_attr "type" "neon_fp_recpe_s, neon_fp_rsqrte_s,\ + neon_fp_recpx_s,\ + neon_fp_recpe_d, neon_fp_rsqrte_d,\ + neon_fp_recpx_d") + (const_string "neon_fp_recpe_rsqrte") + (eq_attr "type" "neon_fp_recpe_s_q, neon_fp_rsqrte_s_q,\ + neon_fp_recpx_s_q,\ + neon_fp_recpe_d_q, neon_fp_rsqrte_d_q,\ + neon_fp_recpx_d_q") + (const_string "neon_fp_recpe_rsqrte_q") + (eq_attr "type" "neon_fp_recps_s, neon_fp_rsqrts_s,\ + neon_fp_recps_d, neon_fp_rsqrts_d") + (const_string "neon_fp_recps_rsqrts") + (eq_attr "type" "neon_fp_recps_s_q, neon_fp_rsqrts_s_q,\ + neon_fp_recps_d_q, neon_fp_rsqrts_d_q") + (const_string "neon_fp_recps_rsqrts_q") + (eq_attr "type" "neon_bsl, neon_cls, neon_cnt,\ + neon_rev, neon_permute, neon_rbit,\ + neon_tbl1, neon_tbl2, neon_zip,\ + neon_ext, neon_ext_q,\ + neon_move, neon_move_q, neon_move_narrow_q") + (const_string "neon_bitops") + (eq_attr "type" "neon_bsl_q, neon_cls_q, neon_cnt_q,\ + neon_rev_q, neon_permute_q, neon_rbit_q") + (const_string "neon_bitops_q") + (eq_attr "type" "neon_dup, neon_dup_q") + (const_string "neon_dups") + (eq_attr "type" "neon_from_gp,f_mcr,f_mcrr") + (const_string "neon_from_gp") + (eq_attr "type" "neon_from_gp_q") + (const_string "neon_from_gp_q") + + (eq_attr "type" "f_loads, f_loadd,\ + neon_load1_1reg, neon_load1_1reg_q,\ + neon_load1_2reg, neon_load1_2reg_q") + (const_string "neon_load_a") + (eq_attr "type" "neon_load1_3reg, neon_load1_3reg_q,\ + neon_load1_4reg, neon_load1_4reg_q") + (const_string "neon_load_b") + (eq_attr "type" "neon_load1_one_lane, neon_load1_one_lane_q,\ + neon_load1_all_lanes, neon_load1_all_lanes_q,\ + neon_load2_2reg, neon_load2_2reg_q,\ + neon_load2_all_lanes, neon_load2_all_lanes_q") + (const_string "neon_load_c") + (eq_attr "type" "neon_load2_4reg, neon_load2_4reg_q,\ + neon_load3_3reg, neon_load3_3reg_q,\ + neon_load3_one_lane, neon_load3_one_lane_q,\ + neon_load4_4reg, neon_load4_4reg_q") + (const_string "neon_load_d") + (eq_attr "type" "neon_load2_one_lane, neon_load2_one_lane_q,\ + neon_load3_all_lanes, neon_load3_all_lanes_q,\ + neon_load4_all_lanes, neon_load4_all_lanes_q") + (const_string "neon_load_e") + (eq_attr "type" "neon_load4_one_lane, neon_load4_one_lane_q") + (const_string "neon_load_f") + + (eq_attr "type" "f_stores, f_stored,\ + neon_store1_1reg") + (const_string "neon_store_a") + (eq_attr "type" "neon_store1_2reg, neon_store1_1reg_q") + (const_string "neon_store_b") + (eq_attr "type" "neon_store1_3reg, neon_store1_3reg_q,\ + neon_store3_3reg, neon_store3_3reg_q,\ + neon_store2_4reg, neon_store2_4reg_q,\ + neon_store4_4reg, neon_store4_4reg_q,\ + neon_store2_2reg, neon_store2_2reg_q,\ + neon_store3_one_lane, neon_store3_one_lane_q,\ + neon_store4_one_lane, neon_store4_one_lane_q,\ + neon_store1_4reg, neon_store1_4reg_q,\ + neon_store1_one_lane, neon_store1_one_lane_q,\ + neon_store2_one_lane, neon_store2_one_lane_q") + (const_string "neon_store_complex")] + (const_string "unknown"))) + +;; The ftc663 core has the following functional units + +;; 1. Two pipelines for simple integer operations: SIU1, SIU2 + +(define_cpu_unit "ftc663_siu1_issue" "ftc663") +(define_reservation "ftc663_siu1" "ftc663_siu1_issue") + +(define_cpu_unit "ftc663_siu2_issue" "ftc663") +(define_reservation "ftc663_siu2" "ftc663_siu2_issue") + +;; 2. One pipeline for complex integer operations: CIU + +(define_cpu_unit "ftc663_ciu_issue" "ftc663") +(define_reservation "ftc663_ciu" "ftc663_ciu_issue") + +;; 3. Two asymmetric pipelines for Asimd and FP operations: FVU1, FVU2 + +(define_automaton "ftc663_fvu") + +(define_cpu_unit "ftc663_fvu1_issue" "ftc663_fvu") +(define_reservation "ftc663_fvu1" "ftc663_fvu1_issue") + +(define_cpu_unit "ftc663_fvu2_issue" "ftc663_fvu") +(define_reservation "ftc663_fvu2" "ftc663_fvu2_issue") + +;; 4. One pipeline for branch operations: BRU + +(define_cpu_unit "ftc663_bru_issue" "ftc663") +(define_reservation "ftc663_bru" "ftc663_bru_issue") + +;; 5. One pipeline for load operations: LDU + +(define_cpu_unit "ftc663_ldu_issue" "ftc663") +(define_reservation "ftc663_ldu" "ftc663_ldu_issue") + +;; 6. One pipeline for store operations: STU + +(define_cpu_unit "ftc663_stu_issue" "ftc663") +(define_reservation "ftc663_stu" "ftc663_stu_issue") + +;; Block all issue queues. + +(define_reservation "ftc663_block" "ftc663_siu1_issue + ftc663_siu2_issue + ftc663_ciu_issue + + ftc663_fvu1_issue + ftc663_fvu2_issue + + ftc663_bru_issue + ftc663_ldu_issue + ftc663_stu_issue") + +;; Integer operations without extra shift + +(define_insn_reservation "ftc663_si_no_shift" 1 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "alu_imm,logic_imm,\ + alu_sreg,logic_reg,\ + adc_imm,adc_reg,\ + adr,clz,rbit,rev,\ + shift_imm,shift_reg,\ + mov_imm,mov_reg,\ + mvn_imm,mvn_reg,\ + no_insn")) + "ftc663_siu1|ftc663_siu2") + +;; Integer operations with extra shift + +(define_insn_reservation "ftc663_si_with_shift" 2 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "extend,\ + alu_shift_imm,alu_shift_reg,\ + logic_shift_imm,logic_shift_reg,\ + mov_shift,mvn_shift,\ + mov_shift_reg,mvn_shift_reg")) + "ftc663_ciu") + +;; CRC instructions + +(define_insn_reservation "ftc663_si_crc" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "crc")) + "ftc663_ciu") + +;; Bitfield insertion instructions + +(define_insn_reservation "ftc663_bfm" 2 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "bfm")) + "ftc663_ciu") + +;; Integer multiply instructions + +(define_insn_reservation "ftc663_mult32" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "mul32" "yes")) + "ftc663_ciu") + +(define_insn_reservation "ftc663_mult64" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "widen_mul64" "yes")) + "ftc663_ciu") + +;; Integer divide instructions + +(define_insn_reservation "ftc663_div" 12 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "udiv,sdiv")) + "ftc663_ciu") + +;; Block all issue pipes for a cycle +(define_insn_reservation "ftc663_block" 1 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "block")) + "ftc663_block") + +;; Branch instructions + +(define_insn_reservation "ftc663_branch" 1 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "branch")) + "ftc663_bru") + +;; Load instructions + +(define_insn_reservation "ftc663_load1" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "load_4,load_8")) + "ftc663_ldu") + +;; Store instructions + +(define_insn_reservation "ftc663_store1" 1 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "store_4,store_8")) + "ftc663_stu") + +;; Advanced SIMD Integer Arithmetic Instructions, basic + +(define_insn_reservation "ftc663_neon_abd" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_abd")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_aba" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_arith_acc")) + "ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_aba_q" 5 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_arith_acc_q")) + "ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_arith_basic" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_arith_basic")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_reduc_add" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_reduc_add,neon_reduc_add_acc")) + "ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_reduc_add_l" 7 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_reduc_add_long")) + "ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_reduc_add_q" 8 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_reduc_add_q")) + "ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_reduc_minmax" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_reduc_minmax")) + "ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_reduc_minmax_q" 8 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_reduc_minmax_q")) + "ftc663_fvu2") + +(define_insn_reservation "ftc663_neon_arith_complex" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_arith_complex")) + "ftc663_fvu1|ftc663_fvu2") + +;; Advanced SIMD Integer Multiply Instructions, D-form + +(define_insn_reservation "ftc663_neon_multiply" 5 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_multiply")) + "ftc663_fvu1") + +;; Advanced SIMD Integer Multiply Instructions, Q-form + +(define_insn_reservation "ftc663_neon_multiply_q" 6 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_multiply_q")) + "ftc663_fvu1") + +;; Advanced SIMD Integer Shift Instructions + +(define_insn_reservation + "ftc663_neon_shift_acc" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_shift_acc")) + "ftc663_fvu2") + +(define_insn_reservation + "ftc663_neon_shift_basic" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_shift_imm_basic,neon_shift_reg_basic")) + "ftc663_fvu2") + +(define_insn_reservation + "ftc663_neon_shift_imm" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_shift_imm_q,neon_shift_imm_complex")) + "ftc663_fvu2") + +(define_insn_reservation + "ftc663_neon_shift_reg" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_shift_reg_basic_q,neon_shift_reg_complex")) + "ftc663_fvu2") + +(define_insn_reservation + "ftc663_neon_shift_complex_q" 5 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_shift_reg_complex_q")) + "ftc663_fvu2") + +;; Advanced SIMD Floating Point Instructions + +(define_insn_reservation + "ftc663_neon_fp_nar" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_fp_nar")) + "(ftc663_fvu1|ftc663_fvu2)") + +(define_insn_reservation + "ftc663_neon_fp_minmax" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_fp_minmax")) + "(ftc663_fvu1|ftc663_fvu2)") + +(define_insn_reservation + "ftc663_neon_fp_reduc_minmax" 7 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_fp_reduc_minmax")) + "(ftc663_fvu1|ftc663_fvu2)") + +(define_insn_reservation + "ftc663_neon_fp_arith" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_fp_arith,neon_fp_arith_q")) + "(ftc663_fvu1|ftc663_fvu2)") + +(define_insn_reservation + "ftc663_neon_fp_cvt_int" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_fp_cvt_int,neon_fp_cvt_int_q")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation + "ftc663_neon_fp_mul" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_fp_mul,neon_fp_mul_q")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation + "ftc663_neon_fp_mla" 7 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_fp_mla,neon_fp_mla_q,\ + neon_fp_recps_rsqrts,neon_fp_recps_rsqrts_q")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation + "ftc663_neon_fp_recpe_rsqrte" 5 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_fp_recpe_rsqrte,neon_fp_recpe_rsqrte_q")) + "ftc663_fvu1|ftc663_fvu2") + +;; Advanced SIMD Miscellaneous Instructions + +(define_insn_reservation + "ftc663_neon_bitops" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_bitops,neon_bitops_q")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation + "ftc663_neon_dups" 8 + (and (eq_attr "tune" "ftc663") + (eq_attr "ftc663_neon_type" "neon_dups")) + "(ftc663_ldu+ftc663_fvu1)|(ftc663_ldu+ftc663_fvu2)") + +;; Advanced SIMD Load Instructions + +(define_insn_reservation + "ftc663_neon_ld1_lane" 8 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load1_one_lane,neon_load1_one_lane_q,\ + neon_load1_all_lanes,neon_load1_all_lanes_q")) + "(ftc663_ldu + ftc663_fvu1)|(ftc663_ldu + ftc663_fvu2)") + +(define_insn_reservation + "ftc663_neon_ld1_reg1" 5 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "f_loads,f_loadd,neon_load1_1reg,neon_load1_1reg_q")) + "ftc663_ldu") + +(define_insn_reservation + "ftc663_neon_ld1_reg2" 5 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load1_2reg")) + "ftc663_ldu") + +(define_insn_reservation + "ftc663_neon_ld1_reg2_q" 6 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load1_2reg_q")) + "ftc663_ldu") + +(define_insn_reservation + "ftc663_neon_ld1_reg3" 6 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load1_3reg")) + "ftc663_ldu") + +(define_insn_reservation + "ftc663_neon_ld1_reg3_q" 7 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load1_3reg_q")) + "ftc663_ldu") + +(define_insn_reservation + "ftc663_neon_ld1_reg4" 6 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load1_4reg")) + "ftc663_ldu") + +(define_insn_reservation + "ftc663_neon_ld1_reg4_q" 8 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load1_4reg_q")) + "ftc663_ldu") + +(define_insn_reservation + "ftc663_neon_ld2" 8 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load2_2reg,neon_load2_2reg_q,neon_load2_all_lanes,\ + neon_load2_4reg,neon_load2_4reg_q,\ + neon_load2_all_lanes_q,neon_load2_one_lane,neon_load2_one_lane_q")) + "(ftc663_ldu + ftc663_fvu1)|(ftc663_ldu + ftc663_fvu2)") + +(define_insn_reservation + "ftc663_neon_ld3" 9 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load3_3reg,neon_load3_3reg_q,\ + neon_load3_one_lane,neon_load3_one_lane_q,\ + neon_load3_all_lanes,neon_load3_all_lanes_q")) + "(ftc663_ldu + ftc663_fvu1)|(ftc663_ldu + ftc663_fvu2)") + +(define_insn_reservation + "ftc663_neon_ld4" 9 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_load4_4reg,neon_load4_4reg_q,neon_load4_all_lanes,neon_load4_all_lanes_q,\ + neon_load4_one_lane,neon_load4_one_lane_q")) + "(ftc663_ldu + ftc663_fvu1)|(ftc663_ldu + ftc663_fvu2)") + +;; Advanced SIMD Store Instructions + +(define_insn_reservation + "ftc663_neon_st1_lane" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_one_lane")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st1_lane_q" 1 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_one_lane_q")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st1_reg1" 1 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_1reg")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st1_reg1_q" 2 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_1reg_q")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st1_reg2" 2 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_2reg")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st1_reg2_q" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_2reg_q")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st1_reg3" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_3reg")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st1_reg3_q" 6 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_3reg_q")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st1_reg4" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_4reg")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st1_reg4_q" 8 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store1_4reg_q")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st2_lane" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store2_one_lane")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st2_lane_q" 2 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store2_one_lane_q")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st2" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store2_2reg,neon_store2_4reg")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st2_q" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store2_2reg_q,neon_store2_4reg_q")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st3_lane" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store3_one_lane")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st3_lane_q" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store3_one_lane_q")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st3" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store3_3reg")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st3_q" 6 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store3_3reg_q")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st4_lane" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store4_one_lane")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st4_lane_q" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store4_one_lane_q")) + "ftc663_stu") + +(define_insn_reservation + "ftc663_neon_st4" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store4_4reg")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +(define_insn_reservation + "ftc663_neon_st4_q" 8 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "neon_store4_4reg_q")) + "(ftc663_fvu1 + ftc663_stu)|(ftc663_fvu2 + ftc663_stu)") + +;; Floating-Point instructions + +(define_insn_reservation "ftc663_fp_const" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "fconsts,fconstd,fmov")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation "ftc663_fp_add_sub" 4 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "fadds,faddd,fmuls,fmuld")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation "ftc663_fp_mac" 7 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "fmacs,ffmas,fmacd,ffmad")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation "ftc663_fp_cvt" 5 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "f_cvt")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation "ftc663_fp_cmp" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "fcmps,fcmpd")) + "ftc663_fvu2") + +(define_insn_reservation "ftc663_fp_arith" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "ffariths,ffarithd")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation "ftc663_fp_div_s" 12 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "fdivs,neon_fp_div_s")) + "ftc663_fvu1") + +(define_insn_reservation "ftc663_fp_div_d" 19 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "fdivd,neon_fp_div_d,\ + neon_fp_div_s_q,neon_fp_div_d_q")) + "ftc663_fvu1") + +(define_insn_reservation "ftc663_fp_sqrt_s" 12 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "fsqrts,neon_fp_sqrt_s")) + "ftc663_fvu1") + +(define_insn_reservation "ftc663_fp_sqrt_d" 19 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "fsqrtd,neon_fp_sqrt_d,\ + neon_fp_sqrt_s_q,neon_fp_sqrt_d_q")) + "ftc663_fvu1") + +(define_insn_reservation "ftc663_crypto_aes" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "crypto_aese,crypto_aesmc")) + "ftc663_fvu1") + +(define_insn_reservation "ftc663_crypto_sha1_xor" 6 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "crypto_sha1_xor")) + "ftc663_fvu1|ftc663_fvu2") + +(define_insn_reservation "ftc663_crypto_sha1_fast" 3 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "crypto_sha1_fast")) + "ftc663_fvu1") + +;; Call instructions + +(define_insn_reservation "ftc663_call" 1 + (and (eq_attr "tune" "ftc663") + (eq_attr "type" "call")) + "ftc663_siu1_issue+ftc663_siu2_issue+ftc663_ciu_issue+ftc663_fvu1_issue+ftc663_fvu2_issue\ + +ftc663_bru_issue+ftc663_ldu_issue+ftc663_stu_issue") + +(define_bypass 1 "ftc663_*" + "ftc663_call,ftc663_branch") -- Gitee