Home | History | Annotate | Line # | Download | only in opcodes
aarch64-opc.c revision 1.1.1.10
      1 /* aarch64-opc.c -- AArch64 opcode support.
      2    Copyright (C) 2009-2026 Free Software Foundation, Inc.
      3    Contributed by ARM Ltd.
      4 
      5    This file is part of the GNU opcodes library.
      6 
      7    This library is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3, or (at your option)
     10    any later version.
     11 
     12    It is distributed in the hope that it will be useful, but WITHOUT
     13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
     15    License for more details.
     16 
     17    You should have received a copy of the GNU General Public License
     18    along with this program; see the file COPYING3. If not,
     19    see <http://www.gnu.org/licenses/>.  */
     20 
     21 #include "sysdep.h"
     22 #include <assert.h>
     23 #include <stdlib.h>
     24 #include <stdio.h>
     25 #include <stdint.h>
     26 #include <stdarg.h>
     27 #include <inttypes.h>
     28 
     29 #include "opintl.h"
     30 #include "libiberty.h"
     31 
     32 #include "aarch64-opc.h"
     33 
     34 #ifdef DEBUG_AARCH64
     35 int debug_dump = false;
     36 #endif /* DEBUG_AARCH64 */
     37 
     38 /* The enumeration strings associated with each value of a 5-bit SVE
     39    pattern operand.  A null entry indicates a reserved meaning.  */
     40 const char *const aarch64_sve_pattern_array[32] = {
     41   /* 0-7.  */
     42   "pow2",
     43   "vl1",
     44   "vl2",
     45   "vl3",
     46   "vl4",
     47   "vl5",
     48   "vl6",
     49   "vl7",
     50   /* 8-15.  */
     51   "vl8",
     52   "vl16",
     53   "vl32",
     54   "vl64",
     55   "vl128",
     56   "vl256",
     57   0,
     58   0,
     59   /* 16-23.  */
     60   0,
     61   0,
     62   0,
     63   0,
     64   0,
     65   0,
     66   0,
     67   0,
     68   /* 24-31.  */
     69   0,
     70   0,
     71   0,
     72   0,
     73   0,
     74   "mul4",
     75   "mul3",
     76   "all"
     77 };
     78 
     79 /* The enumeration strings associated with each value of a 4-bit SVE
     80    prefetch operand.  A null entry indicates a reserved meaning.  */
     81 const char *const aarch64_sve_prfop_array[16] = {
     82   /* 0-7.  */
     83   "pldl1keep",
     84   "pldl1strm",
     85   "pldl2keep",
     86   "pldl2strm",
     87   "pldl3keep",
     88   "pldl3strm",
     89   0,
     90   0,
     91   /* 8-15.  */
     92   "pstl1keep",
     93   "pstl1strm",
     94   "pstl2keep",
     95   "pstl2strm",
     96   "pstl3keep",
     97   "pstl3strm",
     98   0,
     99   0
    100 };
    101 
    102 /* The enumeration strings associated with each value of a 6-bit RPRFM
    103    operation.  */
    104 const char *const aarch64_rprfmop_array[64] = {
    105   "pldkeep",
    106   "pstkeep",
    107   0,
    108   0,
    109   "pldstrm",
    110   "pststrm"
    111 };
    112 
    113 /* Vector length multiples for a predicate-as-counter operand.  Used in things
    114    like AARCH64_OPND_SME_VLxN_10.  */
    115 const char *const aarch64_sme_vlxn_array[2] = {
    116   "vlx2",
    117   "vlx4"
    118 };
    119 
    120 /* Values accepted by the brb alias.  */
    121 const char *const aarch64_brbop_array[] = {
    122   "iall",
    123   "inj",
    124 };
    125 
    126 /* Helper functions to determine which operand to be used to encode/decode
    127    the size:Q fields for AdvSIMD instructions.  */
    128 
    129 static inline bool
    130 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
    131 {
    132   return (qualifier >= AARCH64_OPND_QLF_V_8B
    133 	  && qualifier <= AARCH64_OPND_QLF_V_1Q);
    134 }
    135 
    136 static inline bool
    137 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
    138 {
    139   return (qualifier >= AARCH64_OPND_QLF_S_B
    140 	  && qualifier <= AARCH64_OPND_QLF_S_Q);
    141 }
    142 
    143 enum data_pattern
    144 {
    145   DP_UNKNOWN,
    146   DP_VECTOR_3SAME,
    147   DP_VECTOR_LONG,
    148   DP_VECTOR_WIDE,
    149   DP_VECTOR_ACROSS_LANES,
    150 };
    151 
    152 static const char significant_operand_index [] =
    153 {
    154   0,	/* DP_UNKNOWN, by default using operand 0.  */
    155   0,	/* DP_VECTOR_3SAME */
    156   1,	/* DP_VECTOR_LONG */
    157   2,	/* DP_VECTOR_WIDE */
    158   1,	/* DP_VECTOR_ACROSS_LANES */
    159 };
    160 
    161 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
    162    the data pattern.
    163    N.B. QUALIFIERS is a possible sequence of qualifiers each of which
    164    corresponds to one of a sequence of operands.  */
    165 
    166 static enum data_pattern
    167 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
    168 {
    169   if (vector_qualifier_p (qualifiers[0]))
    170     {
    171       /* e.g. v.4s, v.4s, v.4s
    172 	   or v.4h, v.4h, v.h[3].  */
    173       if (qualifiers[0] == qualifiers[1]
    174 	  && vector_qualifier_p (qualifiers[2])
    175 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    176 	      == aarch64_get_qualifier_esize (qualifiers[1]))
    177 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    178 	      == aarch64_get_qualifier_esize (qualifiers[2])))
    179 	return DP_VECTOR_3SAME;
    180       /* e.g. v.8h, v.8b, v.8b.
    181            or v.4s, v.4h, v.h[2].
    182 	   or v.8h, v.16b.  */
    183       if (vector_qualifier_p (qualifiers[1])
    184 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
    185 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    186 	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
    187 	return DP_VECTOR_LONG;
    188       /* e.g. v.8h, v.8h, v.8b.  */
    189       if (qualifiers[0] == qualifiers[1]
    190 	  && vector_qualifier_p (qualifiers[2])
    191 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
    192 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    193 	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
    194 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    195 	      == aarch64_get_qualifier_esize (qualifiers[1])))
    196 	return DP_VECTOR_WIDE;
    197     }
    198   else if (fp_qualifier_p (qualifiers[0]))
    199     {
    200       /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
    201       if (vector_qualifier_p (qualifiers[1])
    202 	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
    203 	return DP_VECTOR_ACROSS_LANES;
    204     }
    205 
    206   return DP_UNKNOWN;
    207 }
    208 
    209 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
    210    the AdvSIMD instructions.  */
    211 /* N.B. it is possible to do some optimization that doesn't call
    212    get_data_pattern each time when we need to select an operand.  We can
    213    either buffer the caculated the result or statically generate the data,
    214    however, it is not obvious that the optimization will bring significant
    215    benefit.  */
    216 
    217 int
    218 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
    219 {
    220   return
    221     significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
    222 }
    223 
    224 /* Instruction bit-fields.
    226 +   Keep synced with 'enum aarch64_field_kind'.  */
    227 const aarch64_field aarch64_fields[] =
    228 {
    229     AARCH64_FIELD_NIL,	/* NIL.  */
    230     AARCH64_FIELD_CONST (0, 1),	/* CONST_0.  */
    231     AARCH64_FIELD_CONST (0, 2),	/* CONST_00.  */
    232     AARCH64_FIELD_CONST (1, 2),	/* CONST_01.  */
    233     AARCH64_FIELD_CONST (1, 1),	/* CONST_1.  */
    234     AARCH64_FIELD ( 8, 4), /* CRm: in the system instructions.  */
    235     AARCH64_FIELD (10, 2), /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>.  */
    236     AARCH64_FIELD (12, 4), /* CRn: in the system instructions.  */
    237     AARCH64_FIELD (10, 8), /* CSSC_imm8.  */
    238     AARCH64_FIELD (11, 1), /* H: in advsimd scalar x indexed element instructions.  */
    239     AARCH64_FIELD (21, 1), /* L: in advsimd scalar x indexed element instructions.  */
    240     AARCH64_FIELD ( 0, 5), /* LSE128_Rt: Shared input+output operand register.  */
    241     AARCH64_FIELD (16, 5), /* LSE128_Rt2: Shared input+output operand register 2.  */
    242     AARCH64_FIELD (20, 1), /* M: in advsimd scalar x indexed element instructions.  */
    243     AARCH64_FIELD (22, 1), /* N: in logical (immediate) instructions.  */
    244     AARCH64_FIELD (30, 1), /* Q: in most AdvSIMD instructions.  */
    245     AARCH64_FIELD (10, 5), /* Ra: in fp instructions.  */
    246     AARCH64_FIELD ( 0, 5), /* Rd: in many integer instructions.  */
    247     AARCH64_FIELD (16, 5), /* Rm: in ld/st reg offset and some integer inst.  */
    248     AARCH64_FIELD ( 5, 5), /* Rn: in many integer instructions.  */
    249     AARCH64_FIELD (16, 5), /* Rs: in load/store exclusive instructions.  */
    250     AARCH64_FIELD ( 0, 5), /* Rt: in load/store instructions.  */
    251     AARCH64_FIELD (10, 5), /* Rt2: in load/store pair instructions.  */
    252     AARCH64_FIELD (12, 1), /* S: in load/store reg offset instructions.  */
    253     AARCH64_FIELD (12, 2), /* SM3_imm2: Indexed element SM3 2 bits index immediate.  */
    254     AARCH64_FIELD ( 1, 3), /* SME_Pdx2: predicate register, multiple of 2, [3:1].  */
    255     AARCH64_FIELD (13, 3), /* SME_Pm: second source scalable predicate register P0-P7.  */
    256     AARCH64_FIELD ( 0, 3), /* SME_PNd3: PN0-PN7, bits [2:0].  */
    257     AARCH64_FIELD ( 5, 3), /* SME_PNn3: PN0-PN7, bits [7:5].  */
    258     AARCH64_FIELD (16, 1), /* SME_Q: Q class bit, bit 16.  */
    259     AARCH64_FIELD (16, 2), /* SME_Rm: index base register W12-W15 [17:16].  */
    260     AARCH64_FIELD (13, 2), /* SME_Rv: vector select register W12-W15, bits [14:13].  */
    261     AARCH64_FIELD (15, 1), /* SME_V: (horizontal / vertical tiles), bit 15.  */
    262     AARCH64_FIELD (10, 1), /* SME_VL_10: VLx2 or VLx4, bit [10].  */
    263     AARCH64_FIELD (13, 1), /* SME_VL_13: VLx2 or VLx4, bit [13].  */
    264     AARCH64_FIELD ( 0, 1), /* SME_ZAda_1b: tile ZA0-ZA1.  */
    265     AARCH64_FIELD ( 0, 2), /* SME_ZAda_2b: tile ZA0-ZA3.  */
    266     AARCH64_FIELD ( 0, 3), /* SME_ZAda_3b: tile ZA0-ZA7.  */
    267     AARCH64_FIELD ( 1, 4), /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1].  */
    268     AARCH64_FIELD ( 2, 3), /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2].  */
    269     AARCH64_FIELD (16, 4), /* SME_Zm: Z0-Z15, bits [19:16].  */
    270     AARCH64_FIELD (17, 3), /* SME_Zm17_3: Z0-Z15/Z16-Z31, multiple of 2, bits [19:17].  */
    271     AARCH64_FIELD (17, 4), /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17].  */
    272     AARCH64_FIELD (18, 3), /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18].  */
    273     AARCH64_FIELD ( 6, 4), /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6].  */
    274     AARCH64_FIELD ( 7, 3), /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7].  */
    275     AARCH64_FIELD ( 6, 3), /* SME_Zn6_3: Z0-Z15/Z16-Z31, multiple of 2, bits [8:6].  */
    276     AARCH64_FIELD ( 4, 1), /* SME_ZtT: upper bit of Zt, bit [4].  */
    277     AARCH64_FIELD ( 0, 3), /* SME_Zt3: lower 3 bits of Zt, bits [2:0].  */
    278     AARCH64_FIELD ( 0, 2), /* SME_Zt2: lower 2 bits of Zt, bits [1:0].  */
    279     AARCH64_FIELD (23, 1), /* SME_i1: immediate field, bit 23.  */
    280     AARCH64_FIELD (12, 2), /* SME_size_12: bits [13:12].  */
    281     AARCH64_FIELD (22, 2), /* SME_size_22: size<1>, size<0> class field, [23:22].  */
    282     AARCH64_FIELD (23, 1), /* SME_sz_23: bit [23].  */
    283     AARCH64_FIELD (22, 1), /* SME_tszh: immediate and qualifier field, bit 22.  */
    284     AARCH64_FIELD (18, 3), /* SME_tszl: immediate and qualifier field, bits [20:18].  */
    285     AARCH64_FIELD (0,  8), /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0].  */
    286     AARCH64_FIELD ( 4, 1), /* SVE_M_4: Merge/zero select, bit 4.  */
    287     AARCH64_FIELD (14, 1), /* SVE_M_14: Merge/zero select, bit 14.  */
    288     AARCH64_FIELD (16, 1), /* SVE_M_16: Merge/zero select, bit 16.  */
    289     AARCH64_FIELD (17, 1), /* SVE_N: SVE equivalent of N.  */
    290     AARCH64_FIELD ( 0, 4), /* SVE_Pd: p0-p15, bits [3,0].  */
    291     AARCH64_FIELD (10, 3), /* SVE_Pg3: p0-p7, bits [12,10].  */
    292     AARCH64_FIELD ( 5, 4), /* SVE_Pg4_5: p0-p15, bits [8,5].  */
    293     AARCH64_FIELD (10, 4), /* SVE_Pg4_10: p0-p15, bits [13,10].  */
    294     AARCH64_FIELD (16, 4), /* SVE_Pg4_16: p0-p15, bits [19,16].  */
    295     AARCH64_FIELD (16, 4), /* SVE_Pm: p0-p15, bits [19,16].  */
    296     AARCH64_FIELD ( 5, 4), /* SVE_Pn: p0-p15, bits [8,5].  */
    297     AARCH64_FIELD ( 0, 4), /* SVE_Pt: p0-p15, bits [3,0].  */
    298     AARCH64_FIELD ( 5, 5), /* SVE_Rm: SVE alternative position for Rm.  */
    299     AARCH64_FIELD (16, 5), /* SVE_Rn: SVE alternative position for Rn.  */
    300     AARCH64_FIELD ( 0, 5), /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
    301     AARCH64_FIELD ( 5, 5), /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
    302     AARCH64_FIELD ( 5, 5), /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
    303     AARCH64_FIELD ( 5, 5), /* SVE_Za_5: SVE vector register, bits [9,5].  */
    304     AARCH64_FIELD (16, 5), /* SVE_Za_16: SVE vector register, bits [20,16].  */
    305     AARCH64_FIELD ( 0, 5), /* SVE_Zd: SVE vector register. bits [4,0].  */
    306     AARCH64_FIELD ( 5, 5), /* SVE_Zm_5: SVE vector register, bits [9,5].  */
    307     AARCH64_FIELD (16, 5), /* SVE_Zm_16: SVE vector register, bits [20,16]. */
    308     AARCH64_FIELD ( 5, 5), /* SVE_Zn: SVE vector register, bits [9,5].  */
    309     AARCH64_FIELD ( 0, 5), /* SVE_Zt: SVE vector register, bits [4,0].  */
    310     AARCH64_FIELD ( 5, 1), /* SVE_i1: single-bit immediate.  */
    311     AARCH64_FIELD (23, 1), /* SVE_i1_23: single-bit immediate.  */
    312     AARCH64_FIELD (22, 2), /* SVE_i2: 2-bit index, bits [23,22].  */
    313     AARCH64_FIELD (20, 1), /* SVE_i2h: high bit of 2bit immediate, bits.  */
    314     AARCH64_FIELD (22, 1), /* SVE_i3h: high bit of 3-bit immediate.  */
    315     AARCH64_FIELD (19, 2), /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19].  */
    316     AARCH64_FIELD (22, 2), /* SVE_i3h3: two high bits of 3bit immediate, bits [22,23].  */
    317     AARCH64_FIELD (11, 1), /* SVE_i3l: low bit of 3-bit immediate.  */
    318     AARCH64_FIELD (12, 1), /* SVE_i3l2: low bit of 3-bit immediate, bit 12.  */
    319     AARCH64_FIELD (10, 2), /* SVE_i4l2: two low bits of 4bit immediate, bits [11,10].  */
    320     AARCH64_FIELD (16, 3), /* SVE_imm3: 3-bit immediate field.  */
    321     AARCH64_FIELD (16, 4), /* SVE_imm4: 4-bit immediate field.  */
    322     AARCH64_FIELD ( 5, 5), /* SVE_imm5: 5-bit immediate field.  */
    323     AARCH64_FIELD (16, 5), /* SVE_imm5b: secondary 5-bit immediate field.  */
    324     AARCH64_FIELD (16, 6), /* SVE_imm6: 6-bit immediate field.  */
    325     AARCH64_FIELD (14, 7), /* SVE_imm7: 7-bit immediate field.  */
    326     AARCH64_FIELD ( 5, 8), /* SVE_imm8: 8-bit immediate field.  */
    327     AARCH64_FIELD ( 5, 9), /* SVE_imm9: 9-bit immediate field.  */
    328     AARCH64_FIELD (11, 6), /* SVE_immr: SVE equivalent of immr.  */
    329     AARCH64_FIELD ( 5, 6), /* SVE_imms: SVE equivalent of imms.  */
    330     AARCH64_FIELD (10, 2), /* SVE_msz: 2-bit shift amount for ADR.  */
    331     AARCH64_FIELD ( 5, 5), /* SVE_pattern: vector pattern enumeration.  */
    332     AARCH64_FIELD ( 0, 4), /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
    333     AARCH64_FIELD (16, 1), /* SVE_rot1: 1-bit rotation amount.  */
    334     AARCH64_FIELD (10, 2), /* SVE_rot2: 2-bit rotation amount.  */
    335     AARCH64_FIELD (10, 1), /* SVE_rot3: 1-bit rotation amount at bit 10.  */
    336     AARCH64_FIELD (17, 2), /* SVE_size: 2-bit element size, bits [18,17].  */
    337     AARCH64_FIELD (22, 1), /* SVE_sz: 1-bit element size select.  */
    338     AARCH64_FIELD (30, 1), /* SVE_sz2: 1-bit element size select.  */
    339     AARCH64_FIELD (17, 1), /* SVE_sz3: 1-bit element size select.  */
    340     AARCH64_FIELD (14, 1), /* SVE_sz4: 1-bit element size select.  */
    341     AARCH64_FIELD (16, 4), /* SVE_tsz: triangular size select.  */
    342     AARCH64_FIELD (22, 2), /* SVE_tszh: triangular size select high, bits [23,22].  */
    343     AARCH64_FIELD ( 8, 2), /* SVE_tszl_8: triangular size select low, bits [9,8].  */
    344     AARCH64_FIELD (19, 2), /* SVE_tszl_19: triangular size select low, bits [20,19].  */
    345     AARCH64_FIELD (14, 1), /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
    346     AARCH64_FIELD (22, 1), /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
    347     AARCH64_FIELD (22, 1), /* S_imm10: in LDRAA and LDRAB instructions.  */
    348     AARCH64_FIELD (16, 3), /* abc: a:b:c bits in AdvSIMD modified immediate.  */
    349     AARCH64_FIELD (13, 3), /* asisdlso_opcode: opcode in advsimd ld/st single element.  */
    350     AARCH64_FIELD (19, 5), /* b40: in the test bit and branch instructions.  */
    351     AARCH64_FIELD (31, 1), /* b5: in the test bit and branch instructions.  */
    352     AARCH64_FIELD (12, 4), /* cmode: in advsimd modified immediate instructions.  */
    353     AARCH64_FIELD (12, 4), /* cond: condition flags as a source operand.  */
    354     AARCH64_FIELD ( 0, 4), /* cond2: condition in truly conditional-executed inst.  */
    355     AARCH64_FIELD ( 5, 5), /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
    356     AARCH64_FIELD (21, 2), /* hw: in move wide constant instructions.  */
    357     AARCH64_FIELD ( 0, 1), /* imm1_0: general immediate in bits [0].  */
    358     AARCH64_FIELD ( 2, 1), /* imm1_2: general immediate in bits [2].  */
    359     AARCH64_FIELD ( 3, 1), /* imm1_3: general immediate in bits [3].  */
    360     AARCH64_FIELD ( 8, 1), /* imm1_8: general immediate in bits [8].  */
    361     AARCH64_FIELD (10, 1), /* imm1_10: general immediate in bits [10].  */
    362     AARCH64_FIELD (14, 1), /* imm1_14: general immediate in bits [14].  */
    363     AARCH64_FIELD (15, 1), /* imm1_15: general immediate in bits [15].  */
    364     AARCH64_FIELD (16, 1), /* imm1_16: general immediate in bits [16].  */
    365     AARCH64_FIELD (22, 1), /* imm1_22: general immediate in bits [22].  */
    366     AARCH64_FIELD ( 0, 2), /* imm2_0: general immediate in bits [1:0].  */
    367     AARCH64_FIELD ( 1, 2), /* imm2_1: general immediate in bits [2:1].  */
    368     AARCH64_FIELD ( 2, 2), /* imm2_2: general immediate in bits [3:2].  */
    369     AARCH64_FIELD ( 4, 2), /* imm2_4: general immediate in bits [5:4].  */
    370     AARCH64_FIELD ( 8, 2), /* imm2_8: general immediate in bits [9:8].  */
    371     AARCH64_FIELD (10, 2), /* imm2_10: 2-bit immediate, bits [11:10] */
    372     AARCH64_FIELD (12, 2), /* imm2_12: 2-bit immediate, bits [13:12] */
    373     AARCH64_FIELD (13, 2), /* imm2_13: 2-bit immediate, bits [14:13] */
    374     AARCH64_FIELD (15, 2), /* imm2_15: 2-bit immediate, bits [16:15] */
    375     AARCH64_FIELD (16, 2), /* imm2_16: 2-bit immediate, bits [17:16] */
    376     AARCH64_FIELD (19, 2), /* imm2_19: 2-bit immediate, bits [20:19] */
    377     AARCH64_FIELD ( 0, 3), /* imm3_0: general immediate in bits [2:0].  */
    378     AARCH64_FIELD ( 5, 3), /* imm3_5: general immediate in bits [7:5].  */
    379     AARCH64_FIELD (10, 3), /* imm3_10: in add/sub extended reg instructions.  */
    380     AARCH64_FIELD (12, 3), /* imm3_12: general immediate in bits [14:12].  */
    381     AARCH64_FIELD (14, 3), /* imm3_14: general immediate in bits [16:14].  */
    382     AARCH64_FIELD (15, 3), /* imm3_15: general immediate in bits [17:15].  */
    383     AARCH64_FIELD (19, 3), /* imm3_19: general immediate in bits [21:19].  */
    384     AARCH64_FIELD ( 0, 4), /* imm4_0: in rmif instructions.  */
    385     AARCH64_FIELD ( 5, 4), /* imm4_5: in SME instructions.  */
    386     AARCH64_FIELD (10, 4), /* imm4_10: in adddg/subg instructions.  */
    387     AARCH64_FIELD (11, 4), /* imm4_11: in advsimd ext and advsimd ins instructions.  */
    388     AARCH64_FIELD (14, 4), /* imm4_14: general immediate in bits [17:14].  */
    389     AARCH64_FIELD (16, 5), /* imm5: in conditional compare (immediate) instructions.  */
    390     AARCH64_FIELD (10, 6), /* imm6_10: in add/sub reg shifted instructions.  */
    391     AARCH64_FIELD (15, 6), /* imm6_15: in rmif instructions.  */
    392     AARCH64_FIELD (15, 7), /* imm7: in load/store pair pre/post index instructions.  */
    393     AARCH64_FIELD (13, 8), /* imm8: in floating-point scalar move immediate inst.  */
    394     AARCH64_FIELD (12, 9), /* imm9: in load/store pre/post index instructions.  */
    395     AARCH64_FIELD ( 5, 9), /* imm9_5: in CB<cc> (immediate).  */
    396     AARCH64_FIELD (10,12), /* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
    397     AARCH64_FIELD ( 5,14), /* imm14: in test bit and branch instructions.  */
    398     AARCH64_FIELD ( 0,16), /* imm16_0: in udf instruction. */
    399     AARCH64_FIELD ( 5,16), /* imm16_5: in exception instructions.  */
    400     AARCH64_FIELD (17, 1), /* imm17_1: in 1 bit element index.  */
    401     AARCH64_FIELD (17, 2), /* imm17_2: in 2 bits element index.  */
    402     AARCH64_FIELD ( 5,19), /* imm19: e.g. in CBZ.  */
    403     AARCH64_FIELD ( 0,26), /* imm26: in unconditional branch instructions.  */
    404     AARCH64_FIELD (16, 3), /* immb: in advsimd shift by immediate instructions.  */
    405     AARCH64_FIELD (19, 4), /* immh: in advsimd shift by immediate instructions.  */
    406     AARCH64_FIELD ( 5,19), /* immhi: e.g. in ADRP.  */
    407     AARCH64_FIELD (29, 2), /* immlo: e.g. in ADRP.  */
    408     AARCH64_FIELD (16, 6), /* immr: in bitfield and logical immediate instructions.  */
    409     AARCH64_FIELD (10, 6), /* imms: in bitfield and logical immediate instructions.  */
    410     AARCH64_FIELD (11, 1), /* index: in ld/st inst deciding the pre/post-index.  */
    411     AARCH64_FIELD (24, 1), /* index2: in ld/st pair inst deciding the pre/post-index.  */
    412     AARCH64_FIELD (30, 2), /* ldst_size: size field in ld/st reg offset inst.  */
    413     AARCH64_FIELD (13, 2), /* len: in advsimd tbl/tbx instructions.  */
    414     AARCH64_FIELD (30, 1), /* lse_sz: in LSE extension atomic instructions.  */
    415     AARCH64_FIELD ( 0, 4), /* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
    416     AARCH64_FIELD (29, 1), /* op: in AdvSIMD modified immediate instructions.  */
    417     AARCH64_FIELD (19, 2), /* op0: in the system instructions.  */
    418     AARCH64_FIELD (16, 3), /* op1: in the system instructions.  */
    419     AARCH64_FIELD ( 5, 3), /* op2: in the system instructions.  */
    420     AARCH64_FIELD (22, 2), /* opc: in load/store reg offset instructions.  */
    421     AARCH64_FIELD (23, 1), /* opc1: in load/store reg offset instructions.  */
    422     AARCH64_FIELD (12, 4), /* opcode: in advsimd load/store instructions.  */
    423     AARCH64_FIELD (13, 3), /* option: in ld/st reg offset + add/sub extended reg inst.  */
    424     AARCH64_FIELD (11, 2), /* rotate1: FCMLA immediate rotate.  */
    425     AARCH64_FIELD (13, 2), /* rotate2: Indexed element FCMLA immediate rotate.  */
    426     AARCH64_FIELD (12, 1), /* rotate3: FCADD immediate rotate.  */
    427     AARCH64_FIELD (10, 6), /* scale: in the fixed-point scalar to fp converting inst.  */
    428     AARCH64_FIELD (31, 1), /* sf: in integer data processing instructions.  */
    429     AARCH64_FIELD (22, 2), /* shift: in add/sub reg/imm shifted instructions.  */
    430     AARCH64_FIELD (22, 2), /* size: in most AdvSIMD and floating-point instructions.  */
    431     AARCH64_FIELD (22, 1), /* sz: 1-bit element size select.  */
    432     AARCH64_FIELD (22, 2), /* type: floating point type field in fp data inst.  */
    433     AARCH64_FIELD (10, 2), /* vldst_size: size field in the AdvSIMD load/store inst.  */
    434     AARCH64_FIELD ( 5, 3), /* off3: immediate offset used to calculate slice number in a ZA tile.  */
    435     AARCH64_FIELD ( 5, 2), /* off2: immediate offset used to calculate slice number in a ZA tile.  */
    436     AARCH64_FIELD ( 7, 1), /* ZAn_1: name of the 1bit encoded ZA tile.  */
    437     AARCH64_FIELD ( 5, 1), /* ol: immediate offset used to calculate slice number in a ZA tile. */
    438     AARCH64_FIELD ( 6, 2), /* ZAn_2: name of the 2bit encoded ZA tile.  */
    439     AARCH64_FIELD ( 5, 3), /* ZAn_3: name of the 3bit encoded ZA tile.  */
    440     AARCH64_FIELD ( 6, 1), /* ZAn: name of the bit encoded ZA tile.  */
    441     AARCH64_FIELD (12, 4), /* opc2: in rcpc3 ld/st inst deciding the pre/post-index.  */
    442     AARCH64_FIELD (30, 2), /* rcpc3_size: in rcpc3 ld/st, field controls Rt/Rt2 width.  */
    443     AARCH64_FIELD ( 5, 1), /* FLD_brbop: used in BRB to mean IALL or INJ.  */
    444     AARCH64_FIELD ( 8, 1), /* ZA8_1: name of the 1 bit encoded ZA tile ZA0-ZA1.  */
    445     AARCH64_FIELD ( 7, 2), /* ZA7_2: name of the 2 bits encoded ZA tile ZA0-ZA3.  */
    446     AARCH64_FIELD ( 6, 3), /* ZA6_3: name of the 3 bits encoded ZA tile ZA0-ZA7.  */
    447     AARCH64_FIELD ( 5, 4), /* ZA5_4: name of the 4 bits encoded ZA tile ZA0-ZA15.  */
    448 };
    449 
    450 enum aarch64_operand_class
    451 aarch64_get_operand_class (enum aarch64_opnd type)
    452 {
    453   return aarch64_operands[type].op_class;
    454 }
    455 
    456 const char *
    457 aarch64_get_operand_name (enum aarch64_opnd type)
    458 {
    459   return aarch64_operands[type].name;
    460 }
    461 
    462 /* Get operand description string.
    463    This is usually for the diagnosis purpose.  */
    464 const char *
    465 aarch64_get_operand_desc (enum aarch64_opnd type)
    466 {
    467   return aarch64_operands[type].desc;
    468 }
    469 
    470 /* Table of all conditional affixes.  */
    471 const aarch64_cond aarch64_conds[16] =
    472 {
    473   {{"eq", "none"}, 0x0},
    474   {{"ne", "any"}, 0x1},
    475   {{"cs", "hs", "nlast"}, 0x2},
    476   {{"cc", "lo", "ul", "last"}, 0x3},
    477   {{"mi", "first"}, 0x4},
    478   {{"pl", "nfrst"}, 0x5},
    479   {{"vs"}, 0x6},
    480   {{"vc"}, 0x7},
    481   {{"hi", "pmore"}, 0x8},
    482   {{"ls", "plast"}, 0x9},
    483   {{"ge", "tcont"}, 0xa},
    484   {{"lt", "tstop"}, 0xb},
    485   {{"gt"}, 0xc},
    486   {{"le"}, 0xd},
    487   {{"al"}, 0xe},
    488   {{"nv"}, 0xf},
    489 };
    490 
    491 const aarch64_cond *
    492 get_cond_from_value (aarch64_insn value)
    493 {
    494   assert (value < 16);
    495   return &aarch64_conds[(unsigned int) value];
    496 }
    497 
    498 const aarch64_cond *
    499 get_inverted_cond (const aarch64_cond *cond)
    500 {
    501   return &aarch64_conds[cond->value ^ 0x1];
    502 }
    503 
    504 /* Table describing the operand extension/shifting operators; indexed by
    505    enum aarch64_modifier_kind.
    506 
    507    The value column provides the most common values for encoding modifiers,
    508    which enables table-driven encoding/decoding for the modifiers.  */
    509 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
    510 {
    511     {"none", 0x0},
    512     {"msl",  0x0},
    513     {"ror",  0x3},
    514     {"asr",  0x2},
    515     {"lsr",  0x1},
    516     {"lsl",  0x0},
    517     {"uxtb", 0x0},
    518     {"uxth", 0x1},
    519     {"uxtw", 0x2},
    520     {"uxtx", 0x3},
    521     {"sxtb", 0x4},
    522     {"sxth", 0x5},
    523     {"sxtw", 0x6},
    524     {"sxtx", 0x7},
    525     {"mul", 0x0},
    526     {"mul vl", 0x0},
    527     {NULL, 0},
    528 };
    529 
    530 enum aarch64_modifier_kind
    531 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
    532 {
    533   return desc - aarch64_operand_modifiers;
    534 }
    535 
    536 aarch64_insn
    537 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
    538 {
    539   return aarch64_operand_modifiers[kind].value;
    540 }
    541 
    542 enum aarch64_modifier_kind
    543 aarch64_get_operand_modifier_from_value (aarch64_insn value,
    544 					 bool extend_p)
    545 {
    546   if (extend_p)
    547     return AARCH64_MOD_UXTB + value;
    548   else
    549     return AARCH64_MOD_LSL - value;
    550 }
    551 
    552 bool
    553 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
    554 {
    555   return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
    556 }
    557 
    558 static inline bool
    559 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
    560 {
    561   return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
    562 }
    563 
    564 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
    565 {
    566     { "#0x00", 0x0 },
    567     { "oshld", 0x1 },
    568     { "oshst", 0x2 },
    569     { "osh",   0x3 },
    570     { "#0x04", 0x4 },
    571     { "nshld", 0x5 },
    572     { "nshst", 0x6 },
    573     { "nsh",   0x7 },
    574     { "#0x08", 0x8 },
    575     { "ishld", 0x9 },
    576     { "ishst", 0xa },
    577     { "ish",   0xb },
    578     { "#0x0c", 0xc },
    579     { "ld",    0xd },
    580     { "st",    0xe },
    581     { "sy",    0xf },
    582 };
    583 
    584 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
    585 {                       /*  CRm<3:2>  #imm  */
    586     { "oshnxs", 16 },    /*    00       16   */
    587     { "nshnxs", 20 },    /*    01       20   */
    588     { "ishnxs", 24 },    /*    10       24   */
    589     { "synxs",  28 },    /*    11       28   */
    590 };
    591 
    592 /* Table describing the operands supported by the aliases of the HINT
    593    instruction.
    594 
    595    The name column is the operand that is accepted for the alias.  The value
    596    column is the hint number of the alias.  The list of operands is terminated
    597    by NULL in the name column.  */
    598 
    599 const struct aarch64_name_value_pair aarch64_hint_options[] =
    600 {
    601   /* BTI.  This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET.
    602      BTI R and SHUH must be the first and second entries respectively
    603      so that F_DEFAULT refers to the correct table entries.  */
    604   { "r",	HINT_OPD_R },		/* BTI R.  */
    605   { "",		HINT_OPD_NPHINT},	/* SHUH. */
    606   { "csync",	HINT_OPD_CSYNC },	/* PSB CSYNC.  */
    607   { "dsync",	HINT_OPD_DSYNC },	/* GCSB DSYNC.  */
    608   { "c",	HINT_OPD_C },		/* BTI C.  */
    609   { "j",	HINT_OPD_J },		/* BTI J.  */
    610   { "jc",	HINT_OPD_JC },		/* BTI JC.  */
    611   { "keep",	HINT_OPD_KEEP },	/* STSHH KEEP  */
    612   { "strm",	HINT_OPD_STRM },	/* STSHH STRM  */
    613   { "ph",	HINT_OPD_PHINT },	/* SHUH PH.  */
    614   { NULL,	HINT_OPD_NULL },
    615 };
    616 
    617 /* op -> op:       load = 0 instruction = 1 store = 2
    618    l  -> level:    1-3
    619    t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
    620 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
    621 const struct aarch64_name_value_pair aarch64_prfops[32] =
    622 {
    623   { "pldl1keep", B(0, 1, 0) },
    624   { "pldl1strm", B(0, 1, 1) },
    625   { "pldl2keep", B(0, 2, 0) },
    626   { "pldl2strm", B(0, 2, 1) },
    627   { "pldl3keep", B(0, 3, 0) },
    628   { "pldl3strm", B(0, 3, 1) },
    629   { "pldslckeep", B(0, 4, 0) },
    630   { "pldslcstrm", B(0, 4, 1) },
    631   { "plil1keep", B(1, 1, 0) },
    632   { "plil1strm", B(1, 1, 1) },
    633   { "plil2keep", B(1, 2, 0) },
    634   { "plil2strm", B(1, 2, 1) },
    635   { "plil3keep", B(1, 3, 0) },
    636   { "plil3strm", B(1, 3, 1) },
    637   { "plislckeep", B(1, 4, 0) },
    638   { "plislcstrm", B(1, 4, 1) },
    639   { "pstl1keep", B(2, 1, 0) },
    640   { "pstl1strm", B(2, 1, 1) },
    641   { "pstl2keep", B(2, 2, 0) },
    642   { "pstl2strm", B(2, 2, 1) },
    643   { "pstl3keep", B(2, 3, 0) },
    644   { "pstl3strm", B(2, 3, 1) },
    645   { "pstslckeep", B(2, 4, 0) },
    646   { "pstslcstrm", B(2, 4, 1) },
    647   { "ir", B(3, 1, 0) },
    648   { NULL, 0x19 },
    649   { NULL, 0x1a },
    650   { NULL, 0x1b },
    651   { NULL, 0x1c },
    652   { NULL, 0x1d },
    653   { NULL, 0x1e },
    654   { NULL, 0x1f },
    655 };
    656 #undef B
    657 
    658 /* Utilities on value constraint.  */
    660 
    661 static inline bool
    662 value_in_range_p (int64_t value, int64_t low, int64_t high)
    663 {
    664   return (low <= value) && (value <= high);
    665 }
    666 
    667 /* Return true if VALUE is a multiple of ALIGN.  */
    668 static inline bool
    669 value_aligned_p (int64_t value, int align)
    670 {
    671   return (value % align) == 0;
    672 }
    673 
    674 /* A signed value fits in a field.  */
    675 static inline bool
    676 value_fit_signed_field_p (int64_t value, unsigned width)
    677 {
    678   assert (width < 32);
    679   if (width < sizeof (value) * 8)
    680     {
    681       int64_t lim = (uint64_t) 1 << (width - 1);
    682       if (value >= -lim && value < lim)
    683 	return true;
    684     }
    685   return false;
    686 }
    687 
    688 /* An unsigned value fits in a field.  */
    689 static inline bool
    690 value_fit_unsigned_field_p (int64_t value, unsigned width)
    691 {
    692   assert (width < 32);
    693   if (width < sizeof (value) * 8)
    694     {
    695       int64_t lim = (uint64_t) 1 << width;
    696       if (value >= 0 && value < lim)
    697 	return true;
    698     }
    699   return false;
    700 }
    701 
    702 /* Return true if OPERAND is SP or WSP.  */
    703 bool
    704 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
    705 {
    706   return ((aarch64_get_operand_class (operand->type)
    707 	   == AARCH64_OPND_CLASS_INT_REG)
    708 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
    709 	  && operand->reg.regno == 31);
    710 }
    711 
    712 /* Return 1 if OPERAND is XZR or WZP.  */
    713 int
    714 aarch64_zero_register_p (const aarch64_opnd_info *operand)
    715 {
    716   return ((aarch64_get_operand_class (operand->type)
    717 	   == AARCH64_OPND_CLASS_INT_REG)
    718 	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
    719 	  && operand->reg.regno == 31);
    720 }
    721 
    722 /* Return true if the operand *OPERAND that has the operand code
    723    OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
    724    qualified by the qualifier TARGET.  */
    725 
    726 static inline bool
    727 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
    728 			  aarch64_opnd_qualifier_t target)
    729 {
    730   switch (operand->qualifier)
    731     {
    732     case AARCH64_OPND_QLF_W:
    733       if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
    734 	return true;
    735       break;
    736     case AARCH64_OPND_QLF_X:
    737       if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
    738 	return true;
    739       break;
    740     case AARCH64_OPND_QLF_WSP:
    741       if (target == AARCH64_OPND_QLF_W
    742 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    743 	return true;
    744       break;
    745     case AARCH64_OPND_QLF_SP:
    746       if (target == AARCH64_OPND_QLF_X
    747 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    748 	return true;
    749       break;
    750     default:
    751       break;
    752     }
    753 
    754   return false;
    755 }
    756 
    757 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
    758    for operand KNOWN_IDX, return the expected qualifier for operand IDX.
    759 
    760    Return NIL if more than one expected qualifiers are found.  */
    761 
    762 aarch64_opnd_qualifier_t
    763 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
    764 				int idx,
    765 				const aarch64_opnd_qualifier_t known_qlf,
    766 				int known_idx)
    767 {
    768   int i, saved_i;
    769 
    770   /* Special case.
    771 
    772      When the known qualifier is NIL, we have to assume that there is only
    773      one qualifier sequence in the *QSEQ_LIST and return the corresponding
    774      qualifier directly.  One scenario is that for instruction
    775 	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
    776      which has only one possible valid qualifier sequence
    777 	NIL, S_D
    778      the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
    779      determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
    780 
    781      Because the qualifier NIL has dual roles in the qualifier sequence:
    782      it can mean no qualifier for the operand, or the qualifer sequence is
    783      not in use (when all qualifiers in the sequence are NILs), we have to
    784      handle this special case here.  */
    785   if (((enum aarch64_opnd) known_qlf) == AARCH64_OPND_NIL)
    786     {
    787       assert (((enum aarch64_opnd) qseq_list[0][known_idx]) == AARCH64_OPND_NIL);
    788       return qseq_list[0][idx];
    789     }
    790 
    791   for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
    792     {
    793       if (qseq_list[i][known_idx] == known_qlf)
    794 	{
    795 	  if (saved_i != -1)
    796 	    /* More than one sequences are found to have KNOWN_QLF at
    797 	       KNOWN_IDX.  */
    798 	    return AARCH64_OPND_QLF_NIL;
    799 	  saved_i = i;
    800 	}
    801     }
    802 
    803   return qseq_list[saved_i][idx];
    804 }
    805 
    806 enum operand_qualifier_kind
    807 {
    808   OQK_NIL,
    809   OQK_OPD_VARIANT,
    810   OQK_VALUE_IN_RANGE,
    811   OQK_MISC,
    812 };
    813 
    814 /* Operand qualifier description.  */
    815 struct operand_qualifier_data
    816 {
    817   /* The usage of the three data fields depends on the qualifier kind.  */
    818   int data0;
    819   int data1;
    820   int data2;
    821   /* Description.  */
    822   const char *desc;
    823   /* Kind.  */
    824   enum operand_qualifier_kind kind;
    825 };
    826 
    827 /* Indexed by the operand qualifier enumerators.  */
    828 static const struct operand_qualifier_data aarch64_opnd_qualifiers[] =
    829 {
    830   {0, 0, 0, "NIL", OQK_NIL},
    831 
    832   /* Operand variant qualifiers.
    833      First 3 fields:
    834      element size, number of elements and common value for encoding.  */
    835 
    836   {4, 1, 0x0, "w", OQK_OPD_VARIANT},
    837   {8, 1, 0x1, "x", OQK_OPD_VARIANT},
    838   {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
    839   {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
    840 
    841   {1, 1, 0x0, "b", OQK_OPD_VARIANT},
    842   {2, 1, 0x1, "h", OQK_OPD_VARIANT},
    843   {4, 1, 0x2, "s", OQK_OPD_VARIANT},
    844   {8, 1, 0x3, "d", OQK_OPD_VARIANT},
    845   {16, 1, 0x4, "q", OQK_OPD_VARIANT},
    846   {2, 1, 0x0, "2b", OQK_OPD_VARIANT},
    847   {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
    848   {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
    849 
    850   {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
    851   {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
    852   {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
    853   {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
    854   {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
    855   {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
    856   {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
    857   {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
    858   {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
    859   {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
    860   {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
    861 
    862   {0, 0, 0, "z", OQK_OPD_VARIANT},
    863   {0, 0, 0, "m", OQK_OPD_VARIANT},
    864 
    865   /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc).  */
    866   {16, 0, 0, "tag", OQK_OPD_VARIANT},
    867 
    868   /* Qualifiers constraining the value range.
    869      First 3 fields:
    870      Lower bound, higher bound, unused.  */
    871 
    872   {0, 15, 0, "CR",       OQK_VALUE_IN_RANGE},
    873   {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
    874   {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
    875   {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
    876   {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
    877   {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
    878   {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
    879 
    880   /* Qualifiers for miscellaneous purpose.
    881      First 3 fields:
    882      unused, unused and unused.  */
    883 
    884   {0, 0, 0, "lsl", 0},
    885   {0, 0, 0, "msl", 0},
    886 
    887   {0, 0, 0, "retrieving", 0},
    888 };
    889 
    890 static inline bool
    891 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
    892 {
    893   return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
    894 }
    895 
    896 static inline bool
    897 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
    898 {
    899   return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
    900 }
    901 
    902 const char*
    903 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
    904 {
    905   return aarch64_opnd_qualifiers[qualifier].desc;
    906 }
    907 
    908 /* Given an operand qualifier, return the expected data element size
    909    of a qualified operand.  */
    910 unsigned char
    911 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
    912 {
    913   assert (operand_variant_qualifier_p (qualifier));
    914   return aarch64_opnd_qualifiers[qualifier].data0;
    915 }
    916 
    917 unsigned char
    918 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
    919 {
    920   assert (operand_variant_qualifier_p (qualifier));
    921   return aarch64_opnd_qualifiers[qualifier].data1;
    922 }
    923 
    924 aarch64_insn
    925 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
    926 {
    927   assert (operand_variant_qualifier_p (qualifier));
    928   return aarch64_opnd_qualifiers[qualifier].data2;
    929 }
    930 
    931 static int
    932 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
    933 {
    934   assert (qualifier_value_in_range_constraint_p (qualifier));
    935   return aarch64_opnd_qualifiers[qualifier].data0;
    936 }
    937 
    938 static int
    939 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
    940 {
    941   assert (qualifier_value_in_range_constraint_p (qualifier));
    942   return aarch64_opnd_qualifiers[qualifier].data1;
    943 }
    944 
    945 #ifdef DEBUG_AARCH64
    946 void
    947 aarch64_verbose (const char *str, ...)
    948 {
    949   va_list ap;
    950   va_start (ap, str);
    951   printf ("#### ");
    952   vprintf (str, ap);
    953   printf ("\n");
    954   va_end (ap);
    955 }
    956 
    957 static inline void
    958 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
    959 {
    960   int i;
    961   printf ("#### \t");
    962   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
    963     printf ("%s,", aarch64_get_qualifier_name (*qualifier));
    964   printf ("\n");
    965 }
    966 
    967 static void
    968 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
    969 		       const aarch64_opnd_qualifier_t *qualifier)
    970 {
    971   int i;
    972   aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
    973 
    974   aarch64_verbose ("dump_match_qualifiers:");
    975   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
    976     curr[i] = opnd[i].qualifier;
    977   dump_qualifier_sequence (curr);
    978   aarch64_verbose ("against");
    979   dump_qualifier_sequence (qualifier);
    980 }
    981 #endif /* DEBUG_AARCH64 */
    982 
    983 /* This function checks if the given instruction INSN is a destructive
    984    instruction based on the usage of the registers.  It does not recognize
    985    unary destructive instructions.  */
    986 bool
    987 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
    988 {
    989   int i = 0;
    990   const enum aarch64_opnd *opnds = opcode->operands;
    991 
    992   if (opnds[0] == AARCH64_OPND_NIL)
    993     return false;
    994 
    995   while (opnds[++i] != AARCH64_OPND_NIL)
    996     if (opnds[i] == opnds[0])
    997       return true;
    998 
    999   return false;
   1000 }
   1001 
   1002 /* TODO improve this, we can have an extra field at the runtime to
   1003    store the number of operands rather than calculating it every time.  */
   1004 
   1005 int
   1006 aarch64_num_of_operands (const aarch64_opcode *opcode)
   1007 {
   1008   int i = 0;
   1009   const enum aarch64_opnd *opnds = opcode->operands;
   1010   while (opnds[i++] != AARCH64_OPND_NIL)
   1011     ;
   1012   --i;
   1013   assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
   1014   return i;
   1015 }
   1016 
   1017 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
   1018    If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
   1019 
   1020    Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
   1021    This is always 0 if the function succeeds.
   1022 
   1023    N.B. on the entry, it is very likely that only some operands in *INST
   1024    have had their qualifiers been established.
   1025 
   1026    If STOP_AT is not -1, the function will only try to match
   1027    the qualifier sequence for operands before and including the operand
   1028    of index STOP_AT; and on success *RET will only be filled with the first
   1029    (STOP_AT+1) qualifiers.
   1030 
   1031    A couple examples of the matching algorithm:
   1032 
   1033    X,W,NIL should match
   1034    X,W,NIL
   1035 
   1036    NIL,NIL should match
   1037    X  ,NIL
   1038 
   1039    Apart from serving the main encoding routine, this can also be called
   1040    during or after the operand decoding.  */
   1041 
   1042 int
   1043 aarch64_find_best_match (const aarch64_inst *inst,
   1044 			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
   1045 			 int stop_at, aarch64_opnd_qualifier_t *ret,
   1046 			 int *invalid_count)
   1047 {
   1048   int i, num_opnds, invalid, min_invalid;
   1049   const aarch64_opnd_qualifier_t *qualifiers;
   1050 
   1051   num_opnds = aarch64_num_of_operands (inst->opcode);
   1052   if (num_opnds == 0)
   1053     {
   1054       DEBUG_TRACE ("SUCCEED: no operand");
   1055       *invalid_count = 0;
   1056       return 1;
   1057     }
   1058 
   1059   if (stop_at < 0 || stop_at >= num_opnds)
   1060     stop_at = num_opnds - 1;
   1061 
   1062   /* For each pattern.  */
   1063   min_invalid = num_opnds;
   1064   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
   1065     {
   1066       int j;
   1067       qualifiers = *qualifiers_list;
   1068 
   1069       /* Start as positive.  */
   1070       invalid = 0;
   1071 
   1072       DEBUG_TRACE ("%d", i);
   1073 #ifdef DEBUG_AARCH64
   1074       if (debug_dump)
   1075 	dump_match_qualifiers (inst->operands, qualifiers);
   1076 #endif
   1077 
   1078       /* The first entry should be taken literally, even if it's an empty
   1079 	 qualifier sequence.  (This matters for strict testing.)  In other
   1080 	 positions an empty sequence acts as a terminator.  */
   1081       if (i > 0 && empty_qualifier_sequence_p (qualifiers))
   1082 	break;
   1083 
   1084       for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
   1085 	{
   1086 	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
   1087 	      && !(inst->opcode->flags & F_STRICT))
   1088 	    {
   1089 	      /* Either the operand does not have qualifier, or the qualifier
   1090 		 for the operand needs to be deduced from the qualifier
   1091 		 sequence.
   1092 		 In the latter case, any constraint checking related with
   1093 		 the obtained qualifier should be done later in
   1094 		 operand_general_constraint_met_p.  */
   1095 	      continue;
   1096 	    }
   1097 	  else if (*qualifiers != inst->operands[j].qualifier)
   1098 	    {
   1099 	      /* Unless the target qualifier can also qualify the operand
   1100 		 (which has already had a non-nil qualifier), non-equal
   1101 		 qualifiers are generally un-matched.  */
   1102 	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
   1103 		continue;
   1104 	      else
   1105 		invalid += 1;
   1106 	    }
   1107 	  else
   1108 	    continue;	/* Equal qualifiers are certainly matched.  */
   1109 	}
   1110 
   1111       if (min_invalid > invalid)
   1112 	min_invalid = invalid;
   1113 
   1114       /* Qualifiers established.  */
   1115       if (min_invalid == 0)
   1116 	break;
   1117     }
   1118 
   1119   *invalid_count = min_invalid;
   1120   if (min_invalid == 0)
   1121     {
   1122       /* Fill the result in *RET.  */
   1123       int j;
   1124       qualifiers = *qualifiers_list;
   1125 
   1126       DEBUG_TRACE ("complete qualifiers using list %d", i);
   1127 #ifdef DEBUG_AARCH64
   1128       if (debug_dump)
   1129 	dump_qualifier_sequence (qualifiers);
   1130 #endif
   1131 
   1132       for (j = 0; j <= stop_at; ++j, ++qualifiers)
   1133 	ret[j] = *qualifiers;
   1134       for (; j < AARCH64_MAX_OPND_NUM; ++j)
   1135 	ret[j] = AARCH64_OPND_QLF_NIL;
   1136 
   1137       DEBUG_TRACE ("SUCCESS");
   1138       return 1;
   1139     }
   1140 
   1141   DEBUG_TRACE ("FAIL");
   1142   return 0;
   1143 }
   1144 
   1145 /* Operand qualifier matching and resolving.
   1146 
   1147    Return 1 if the operand qualifier(s) in *INST match one of the qualifier
   1148    sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
   1149 
   1150    Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
   1151    This is always 0 if the function succeeds.
   1152 
   1153    if UPDATE_P, update the qualifier(s) in *INST after the matching
   1154    succeeds.  */
   1155 
   1156 static int
   1157 match_operands_qualifier (aarch64_inst *inst, bool update_p,
   1158 			  int *invalid_count)
   1159 {
   1160   int i;
   1161   aarch64_opnd_qualifier_seq_t qualifiers;
   1162 
   1163   if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
   1164 				qualifiers, invalid_count))
   1165     {
   1166       DEBUG_TRACE ("matching FAIL");
   1167       return 0;
   1168     }
   1169 
   1170   /* Update the qualifiers.  */
   1171   if (update_p)
   1172     for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   1173       {
   1174 	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
   1175 	  break;
   1176 	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
   1177 			"update %s with %s for operand %d",
   1178 			aarch64_get_qualifier_name (inst->operands[i].qualifier),
   1179 			aarch64_get_qualifier_name (qualifiers[i]), i);
   1180 	inst->operands[i].qualifier = qualifiers[i];
   1181       }
   1182 
   1183   DEBUG_TRACE ("matching SUCCESS");
   1184   return 1;
   1185 }
   1186 
   1187 /* Return TRUE if VALUE is a wide constant that can be moved into a general
   1188    register by MOVZ.
   1189 
   1190    IS32 indicates whether value is a 32-bit immediate or not.
   1191    If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
   1192    amount will be returned in *SHIFT_AMOUNT.  */
   1193 
   1194 bool
   1195 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
   1196 {
   1197   int amount;
   1198 
   1199   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
   1200 
   1201   if (is32)
   1202     {
   1203       /* Allow all zeros or all ones in top 32-bits, so that
   1204 	 32-bit constant expressions like ~0x80000000 are
   1205 	 permitted.  */
   1206       if (value >> 32 != 0 && value >> 32 != 0xffffffff)
   1207 	/* Immediate out of range.  */
   1208 	return false;
   1209       value &= 0xffffffff;
   1210     }
   1211 
   1212   /* first, try movz then movn */
   1213   amount = -1;
   1214   if ((value & ((uint64_t) 0xffff << 0)) == value)
   1215     amount = 0;
   1216   else if ((value & ((uint64_t) 0xffff << 16)) == value)
   1217     amount = 16;
   1218   else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
   1219     amount = 32;
   1220   else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
   1221     amount = 48;
   1222 
   1223   if (amount == -1)
   1224     {
   1225       DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
   1226       return false;
   1227     }
   1228 
   1229   if (shift_amount != NULL)
   1230     *shift_amount = amount;
   1231 
   1232   DEBUG_TRACE ("exit true with amount %d", amount);
   1233 
   1234   return true;
   1235 }
   1236 
   1237 /* Build the accepted values for immediate logical SIMD instructions.
   1238 
   1239    The standard encodings of the immediate value are:
   1240      N      imms     immr         SIMD size  R             S
   1241      1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
   1242      0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
   1243      0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
   1244      0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
   1245      0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
   1246      0      11110s   00000r       2       UInt(r)       UInt(s)
   1247    where all-ones value of S is reserved.
   1248 
   1249    Let's call E the SIMD size.
   1250 
   1251    The immediate value is: S+1 bits '1' rotated to the right by R.
   1252 
   1253    The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
   1254    (remember S != E - 1).  */
   1255 
   1256 #define TOTAL_IMM_NB  5334
   1257 
   1258 typedef struct
   1259 {
   1260   uint64_t imm;
   1261   aarch64_insn encoding;
   1262 } simd_imm_encoding;
   1263 
   1264 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
   1265 
   1266 static int
   1267 simd_imm_encoding_cmp(const void *i1, const void *i2)
   1268 {
   1269   const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
   1270   const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
   1271 
   1272   if (imm1->imm < imm2->imm)
   1273     return -1;
   1274   if (imm1->imm > imm2->imm)
   1275     return +1;
   1276   return 0;
   1277 }
   1278 
   1279 /* immediate bitfield standard encoding
   1280    imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
   1281    1         ssssss     rrrrrr      64        rrrrrr ssssss
   1282    0         0sssss     0rrrrr      32        rrrrr  sssss
   1283    0         10ssss     00rrrr      16        rrrr   ssss
   1284    0         110sss     000rrr      8         rrr    sss
   1285    0         1110ss     0000rr      4         rr     ss
   1286    0         11110s     00000r      2         r      s  */
   1287 static inline int
   1288 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
   1289 {
   1290   return (is64 << 12) | (r << 6) | s;
   1291 }
   1292 
   1293 static void
   1294 build_immediate_table (void)
   1295 {
   1296   uint32_t log_e, e, s, r, s_mask;
   1297   uint64_t mask, imm;
   1298   int nb_imms;
   1299   int is64;
   1300 
   1301   nb_imms = 0;
   1302   for (log_e = 1; log_e <= 6; log_e++)
   1303     {
   1304       /* Get element size.  */
   1305       e = 1u << log_e;
   1306       if (log_e == 6)
   1307 	{
   1308 	  is64 = 1;
   1309 	  mask = 0xffffffffffffffffull;
   1310 	  s_mask = 0;
   1311 	}
   1312       else
   1313 	{
   1314 	  is64 = 0;
   1315 	  mask = (1ull << e) - 1;
   1316 	  /* log_e  s_mask
   1317 	     1     ((1 << 4) - 1) << 2 = 111100
   1318 	     2     ((1 << 3) - 1) << 3 = 111000
   1319 	     3     ((1 << 2) - 1) << 4 = 110000
   1320 	     4     ((1 << 1) - 1) << 5 = 100000
   1321 	     5     ((1 << 0) - 1) << 6 = 000000  */
   1322 	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
   1323 	}
   1324       for (s = 0; s < e - 1; s++)
   1325 	for (r = 0; r < e; r++)
   1326 	  {
   1327 	    /* s+1 consecutive bits to 1 (s < 63) */
   1328 	    imm = (1ull << (s + 1)) - 1;
   1329 	    /* rotate right by r */
   1330 	    if (r != 0)
   1331 	      imm = (imm >> r) | ((imm << (e - r)) & mask);
   1332 	    /* replicate the constant depending on SIMD size */
   1333 	    switch (log_e)
   1334 	      {
   1335 	      case 1: imm = (imm <<  2) | imm;
   1336 		/* Fall through.  */
   1337 	      case 2: imm = (imm <<  4) | imm;
   1338 		/* Fall through.  */
   1339 	      case 3: imm = (imm <<  8) | imm;
   1340 		/* Fall through.  */
   1341 	      case 4: imm = (imm << 16) | imm;
   1342 		/* Fall through.  */
   1343 	      case 5: imm = (imm << 32) | imm;
   1344 		/* Fall through.  */
   1345 	      case 6: break;
   1346 	      default: abort ();
   1347 	      }
   1348 	    simd_immediates[nb_imms].imm = imm;
   1349 	    simd_immediates[nb_imms].encoding =
   1350 	      encode_immediate_bitfield(is64, s | s_mask, r);
   1351 	    nb_imms++;
   1352 	  }
   1353     }
   1354   assert (nb_imms == TOTAL_IMM_NB);
   1355   qsort(simd_immediates, nb_imms,
   1356 	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1357 }
   1358 
   1359 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
   1360    be accepted by logical (immediate) instructions
   1361    e.g. ORR <Xd|SP>, <Xn>, #<imm>.
   1362 
   1363    ESIZE is the number of bytes in the decoded immediate value.
   1364    If ENCODING is not NULL, on the return of TRUE, the standard encoding for
   1365    VALUE will be returned in *ENCODING.  */
   1366 
   1367 bool
   1368 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
   1369 {
   1370   simd_imm_encoding imm_enc;
   1371   const simd_imm_encoding *imm_encoding;
   1372   static bool initialized = false;
   1373   uint64_t upper;
   1374   int i;
   1375 
   1376   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
   1377 	       value, esize);
   1378 
   1379   if (!initialized)
   1380     {
   1381       build_immediate_table ();
   1382       initialized = true;
   1383     }
   1384 
   1385   /* Allow all zeros or all ones in top bits, so that
   1386      constant expressions like ~1 are permitted.  */
   1387   upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
   1388   if ((value & ~upper) != value && (value | upper) != value)
   1389     return false;
   1390 
   1391   /* Replicate to a full 64-bit value.  */
   1392   value &= ~upper;
   1393   for (i = esize * 8; i < 64; i *= 2)
   1394     value |= (value << i);
   1395 
   1396   imm_enc.imm = value;
   1397   imm_encoding = (const simd_imm_encoding *)
   1398     bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
   1399             sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1400   if (imm_encoding == NULL)
   1401     {
   1402       DEBUG_TRACE ("exit with false");
   1403       return false;
   1404     }
   1405   if (encoding != NULL)
   1406     *encoding = imm_encoding->encoding;
   1407   DEBUG_TRACE ("exit with true");
   1408   return true;
   1409 }
   1410 
   1411 /* If 64-bit immediate IMM is in the format of
   1412    "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
   1413    where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
   1414    of value "abcdefgh".  Otherwise return -1.  */
   1415 int
   1416 aarch64_shrink_expanded_imm8 (uint64_t imm)
   1417 {
   1418   int i, ret;
   1419   uint32_t byte;
   1420 
   1421   ret = 0;
   1422   for (i = 0; i < 8; i++)
   1423     {
   1424       byte = (imm >> (8 * i)) & 0xff;
   1425       if (byte == 0xff)
   1426 	ret |= 1 << i;
   1427       else if (byte != 0x00)
   1428 	return -1;
   1429     }
   1430   return ret;
   1431 }
   1432 
   1433 /* Utility inline functions for operand_general_constraint_met_p.  */
   1434 
   1435 static inline void
   1436 set_error (aarch64_operand_error *mismatch_detail,
   1437 	   enum aarch64_operand_error_kind kind, int idx,
   1438 	   const char* error)
   1439 {
   1440   if (mismatch_detail == NULL)
   1441     return;
   1442   mismatch_detail->kind = kind;
   1443   mismatch_detail->index = idx;
   1444   mismatch_detail->error = error;
   1445 }
   1446 
   1447 static inline void
   1448 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
   1449 		  const char* error)
   1450 {
   1451   if (mismatch_detail == NULL)
   1452     return;
   1453   set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
   1454 }
   1455 
   1456 static inline void
   1457 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
   1458 			 const char *prefix, int lower_bound, int upper_bound)
   1459 {
   1460   if (mismatch_detail == NULL)
   1461     return;
   1462   set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
   1463   mismatch_detail->data[0].s = prefix;
   1464   mismatch_detail->data[1].i = lower_bound;
   1465   mismatch_detail->data[2].i = upper_bound;
   1466 }
   1467 
   1468 static inline void
   1469 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1470 			int idx, int lower_bound, int upper_bound,
   1471 			const char* error)
   1472 {
   1473   if (mismatch_detail == NULL)
   1474     return;
   1475   set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
   1476   mismatch_detail->data[0].i = lower_bound;
   1477   mismatch_detail->data[1].i = upper_bound;
   1478 }
   1479 
   1480 static inline void
   1481 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1482 			    int idx, int lower_bound, int upper_bound)
   1483 {
   1484   if (mismatch_detail == NULL)
   1485     return;
   1486   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1487 			  _("immediate value"));
   1488 }
   1489 
   1490 static inline void
   1491 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1492 			       int idx, int lower_bound, int upper_bound)
   1493 {
   1494   if (mismatch_detail == NULL)
   1495     return;
   1496   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1497 			  _("immediate offset"));
   1498 }
   1499 
   1500 static inline void
   1501 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1502 			      int idx, int lower_bound, int upper_bound)
   1503 {
   1504   if (mismatch_detail == NULL)
   1505     return;
   1506   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1507 			  _("register number"));
   1508 }
   1509 
   1510 static inline void
   1511 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1512 				 int idx, int lower_bound, int upper_bound)
   1513 {
   1514   if (mismatch_detail == NULL)
   1515     return;
   1516   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1517 			  _("register element index"));
   1518 }
   1519 
   1520 static inline void
   1521 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1522 				   int idx, int lower_bound, int upper_bound)
   1523 {
   1524   if (mismatch_detail == NULL)
   1525     return;
   1526   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1527 			  _("shift amount"));
   1528 }
   1529 
   1530 /* Report that the MUL modifier in operand IDX should be in the range
   1531    [LOWER_BOUND, UPPER_BOUND].  */
   1532 static inline void
   1533 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1534 				   int idx, int lower_bound, int upper_bound)
   1535 {
   1536   if (mismatch_detail == NULL)
   1537     return;
   1538   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1539 			  _("multiplier"));
   1540 }
   1541 
   1542 static inline void
   1543 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
   1544 		     int alignment)
   1545 {
   1546   if (mismatch_detail == NULL)
   1547     return;
   1548   set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
   1549   mismatch_detail->data[0].i = alignment;
   1550 }
   1551 
   1552 static inline void
   1553 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
   1554 			   int expected_num)
   1555 {
   1556   if (mismatch_detail == NULL)
   1557     return;
   1558   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
   1559   mismatch_detail->data[0].i = 1 << expected_num;
   1560 }
   1561 
   1562 static inline void
   1563 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
   1564 			   int expected_num)
   1565 {
   1566   if (mismatch_detail == NULL)
   1567     return;
   1568   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
   1569   mismatch_detail->data[0].i = 1 << expected_num;
   1570 }
   1571 
   1572 static inline void
   1573 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
   1574 		     int idx, int expected)
   1575 {
   1576   if (mismatch_detail == NULL)
   1577     return;
   1578   set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
   1579   mismatch_detail->data[0].i = expected;
   1580 }
   1581 
   1582 static inline void
   1583 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
   1584 		 const char* error)
   1585 {
   1586   if (mismatch_detail == NULL)
   1587     return;
   1588   set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
   1589 }
   1590 
   1591 /* Check that indexed register operand OPND has a register in the range
   1592    [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
   1593    PREFIX is the register prefix, such as "z" for SVE vector registers.  */
   1594 
   1595 static bool
   1596 check_reglane (const aarch64_opnd_info *opnd,
   1597 	       aarch64_operand_error *mismatch_detail, int idx,
   1598 	       const char *prefix, int min_regno, int max_regno,
   1599 	       int min_index, int max_index)
   1600 {
   1601   if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
   1602     {
   1603       set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
   1604 			       max_regno);
   1605       return false;
   1606     }
   1607   if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
   1608     {
   1609       set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
   1610 				       max_index);
   1611       return false;
   1612     }
   1613   return true;
   1614 }
   1615 
   1616 /* Check that register list operand OPND has NUM_REGS registers and a
   1617    register stride of STRIDE.  */
   1618 
   1619 static bool
   1620 check_reglist (const aarch64_opnd_info *opnd,
   1621 	       aarch64_operand_error *mismatch_detail, int idx,
   1622 	       int num_regs, int stride)
   1623 {
   1624   if (opnd->reglist.num_regs != num_regs)
   1625     {
   1626       set_reg_list_length_error (mismatch_detail, idx, num_regs);
   1627       return false;
   1628     }
   1629   if (opnd->reglist.stride != stride)
   1630     {
   1631       set_reg_list_stride_error (mismatch_detail, idx, stride);
   1632       return false;
   1633     }
   1634   return true;
   1635 }
   1636 
   1637 typedef struct
   1638 {
   1639   int64_t min;
   1640   int64_t max;
   1641 } imm_range_t;
   1642 
   1643 static imm_range_t
   1644 imm_range_min_max (unsigned size, bool signed_rng)
   1645 {
   1646   assert (size < 63);
   1647   imm_range_t r;
   1648   if (signed_rng)
   1649     {
   1650       r.max = (((int64_t) 0x1) << (size - 1)) - 1;
   1651       r.min = - r.max - 1;
   1652     }
   1653   else
   1654     {
   1655       r.max = (((int64_t) 0x1) << size) - 1;
   1656       r.min = 0;
   1657     }
   1658   return r;
   1659 }
   1660 
   1661 /* Check that an immediate value is in the range provided by the
   1662    operand type.  */
   1663 static bool
   1664 check_immediate_out_of_range (int64_t imm,
   1665 			      enum aarch64_opnd type,
   1666 			      aarch64_operand_error *mismatch_detail,
   1667 			      int idx)
   1668 {
   1669   const aarch64_operand *operand = get_operand_from_code (type);
   1670   uint8_t size = get_operand_fields_width (operand);
   1671   bool unsigned_imm = operand_need_unsigned_offset (operand);
   1672   bool (*value_fit_field) (int64_t, unsigned)
   1673     = (unsigned_imm
   1674       ? value_fit_unsigned_field_p
   1675       : value_fit_signed_field_p);
   1676 
   1677   if (!value_fit_field (imm, size))
   1678     {
   1679       imm_range_t rng = imm_range_min_max (size, !unsigned_imm);
   1680       set_imm_out_of_range_error (mismatch_detail, idx, rng.min, rng.max);
   1681       return false;
   1682     }
   1683   return true;
   1684 }
   1685 
   1686 /* Check that indexed ZA operand OPND has:
   1687 
   1688    - a selection register in the range [MIN_WREG, MIN_WREG + 3]
   1689 
   1690    - RANGE_SIZE consecutive immediate offsets.
   1691 
   1692    - an initial immediate offset that is a multiple of RANGE_SIZE
   1693      in the range [0, MAX_VALUE * RANGE_SIZE]
   1694 
   1695    - a vector group size of GROUP_SIZE.
   1696 
   1697    - STATUS_VG for cases where VGx2 or VGx4 is mandatory.  */
   1698 static bool
   1699 check_za_access (const aarch64_opnd_info *opnd,
   1700 		 aarch64_operand_error *mismatch_detail, int idx,
   1701 		 int min_wreg, int max_value, unsigned int range_size,
   1702 		 int group_size, bool status_vg)
   1703 {
   1704   if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
   1705     {
   1706       if (min_wreg == 12)
   1707 	set_other_error (mismatch_detail, idx,
   1708 			 _("expected a selection register in the"
   1709 			   " range w12-w15"));
   1710       else if (min_wreg == 8)
   1711 	set_other_error (mismatch_detail, idx,
   1712 			 _("expected a selection register in the"
   1713 			   " range w8-w11"));
   1714       else
   1715 	abort ();
   1716       return false;
   1717     }
   1718 
   1719   int max_index = max_value * range_size;
   1720   if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
   1721     {
   1722       set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
   1723       return false;
   1724     }
   1725 
   1726   if ((opnd->indexed_za.index.imm % range_size) != 0)
   1727     {
   1728       assert (range_size == 2 || range_size == 4);
   1729       set_other_error (mismatch_detail, idx,
   1730 		       range_size == 2
   1731 		       ? _("starting offset is not a multiple of 2")
   1732 		       : _("starting offset is not a multiple of 4"));
   1733       return false;
   1734     }
   1735 
   1736   if (opnd->indexed_za.index.countm1 != range_size - 1)
   1737     {
   1738       if (range_size == 1)
   1739 	set_other_error (mismatch_detail, idx,
   1740 			 _("expected a single offset rather than"
   1741 			   " a range"));
   1742       else if (range_size == 2)
   1743 	set_other_error (mismatch_detail, idx,
   1744 			 _("expected a range of two offsets"));
   1745       else if (range_size == 4)
   1746 	set_other_error (mismatch_detail, idx,
   1747 			 _("expected a range of four offsets"));
   1748       else
   1749 	abort ();
   1750       return false;
   1751     }
   1752 
   1753   /* The vector group specifier is optional in assembly code.  */
   1754   if (opnd->indexed_za.group_size != group_size
   1755       && (status_vg || opnd->indexed_za.group_size != 0 ))
   1756     {
   1757       set_invalid_vg_size (mismatch_detail, idx, group_size);
   1758       return false;
   1759     }
   1760 
   1761   return true;
   1762 }
   1763 
   1764 /* Given a load/store operation, calculate the size of transferred data via a
   1765    cumulative sum of qualifier sizes preceding the address operand in the
   1766    OPNDS operand list argument.  */
   1767 int
   1768 calc_ldst_datasize (const aarch64_opnd_info *opnds)
   1769 {
   1770   unsigned num_bytes = 0; /* total number of bytes transferred.  */
   1771   enum aarch64_operand_class opnd_class;
   1772   enum aarch64_opnd type;
   1773 
   1774   for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
   1775     {
   1776       type = opnds[i].type;
   1777       opnd_class = aarch64_operands[type].op_class;
   1778       if (opnd_class == AARCH64_OPND_CLASS_ADDRESS)
   1779 	break;
   1780       num_bytes += aarch64_get_qualifier_esize (opnds[i].qualifier);
   1781     }
   1782   return num_bytes;
   1783 }
   1784 
   1785 
   1786 /* General constraint checking based on operand code.
   1787 
   1788    Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
   1789    as the IDXth operand of opcode OPCODE.  Otherwise return 0.
   1790 
   1791    This function has to be called after the qualifiers for all operands
   1792    have been resolved.
   1793 
   1794    Mismatching error message is returned in *MISMATCH_DETAIL upon request,
   1795    i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
   1796    of error message during the disassembling where error message is not
   1797    wanted.  We avoid the dynamic construction of strings of error messages
   1798    here (i.e. in libopcodes), as it is costly and complicated; instead, we
   1799    use a combination of error code, static string and some integer data to
   1800    represent an error.  */
   1801 
   1802 static bool
   1803 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
   1804 				  enum aarch64_opnd type,
   1805 				  const aarch64_opcode *opcode,
   1806 				  aarch64_operand_error *mismatch_detail)
   1807 {
   1808   unsigned num, modifiers, shift;
   1809   unsigned char size;
   1810   int64_t imm, min_value, max_value;
   1811   uint64_t uvalue, mask;
   1812   const aarch64_opnd_info *opnd = opnds + idx;
   1813   aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
   1814   int i;
   1815 
   1816   assert (opcode->operands[idx] == opnd->type && opnd->type == type);
   1817 
   1818   switch (aarch64_operands[type].op_class)
   1819     {
   1820     case AARCH64_OPND_CLASS_INT_REG:
   1821       /* Check for pair of xzr registers.  */
   1822       if (type == AARCH64_OPND_PAIRREG_OR_XZR
   1823 	  && opnds[idx - 1].reg.regno == 0x1f)
   1824 	{
   1825 	  if (opnds[idx].reg.regno != 0x1f)
   1826 	    {
   1827 	      set_syntax_error (mismatch_detail, idx - 1,
   1828 				_("second reg in pair should be xzr if first is"
   1829 				  " xzr"));
   1830 	      return false;
   1831 	    }
   1832 	}
   1833       /* Check pair reg constraints for instructions taking a pair of
   1834 	 consecutively-numbered general-purpose registers.  */
   1835       else if (type == AARCH64_OPND_PAIRREG
   1836 	       || type == AARCH64_OPND_PAIRREG_OR_XZR)
   1837 	{
   1838 	  assert (idx == 1 || idx == 2 || idx == 3 || idx == 5);
   1839 	  if (opnds[idx - 1].reg.regno % 2 != 0)
   1840 	    {
   1841 	      set_syntax_error (mismatch_detail, idx - 1,
   1842 				_("reg pair must start from even reg"));
   1843 	      return false;
   1844 	    }
   1845 	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
   1846 	    {
   1847 	      set_syntax_error (mismatch_detail, idx,
   1848 				_("reg pair must be contiguous"));
   1849 	      return false;
   1850 	    }
   1851 	  break;
   1852 	}
   1853 
   1854       /* <Xt> may be optional in some IC and TLBI instructions.  */
   1855       if (type == AARCH64_OPND_Rt_SYS)
   1856 	{
   1857 	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
   1858 			       == AARCH64_OPND_CLASS_SYSTEM));
   1859 	  if (!(opnds[1].present && aarch64_sys_ins_reg_tlbid_xt (opnds[0].sysins_op)))
   1860 	    {
   1861 	      if (opnds[1].present
   1862 		  && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
   1863 		{
   1864 		  set_other_error (mismatch_detail, idx, _("extraneous register"));
   1865 		  return false;
   1866 		}
   1867 	      if (!opnds[1].present
   1868 		  && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
   1869 		{
   1870 		  set_other_error (mismatch_detail, idx, _("missing register"));
   1871 		  return false;
   1872 		}
   1873 	   }
   1874 	}
   1875       switch (qualifier)
   1876 	{
   1877 	case AARCH64_OPND_QLF_WSP:
   1878 	case AARCH64_OPND_QLF_SP:
   1879 	  if (!aarch64_stack_pointer_p (opnd))
   1880 	    {
   1881 	      set_other_error (mismatch_detail, idx,
   1882 		       _("stack pointer register expected"));
   1883 	      return false;
   1884 	    }
   1885 	  break;
   1886 	default:
   1887 	  break;
   1888 	}
   1889       break;
   1890 
   1891     case AARCH64_OPND_CLASS_SVE_REG:
   1892       switch (type)
   1893 	{
   1894 	case AARCH64_OPND_SVE_Zm3_INDEX:
   1895 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
   1896 	case AARCH64_OPND_SVE_Zm3_19_INDEX:
   1897 	case AARCH64_OPND_SVE_Zm3_11_INDEX:
   1898 	case AARCH64_OPND_SVE_Zm3_10_INDEX:
   1899 	case AARCH64_OPND_SVE_Zm4_11_INDEX:
   1900 	case AARCH64_OPND_SVE_Zm4_INDEX:
   1901 	  size = get_operand_fields_width (get_operand_from_code (type));
   1902 	  shift = get_operand_specific_data (&aarch64_operands[type]);
   1903 	  if (!check_reglane (opnd, mismatch_detail, idx,
   1904 			      "z", 0, (1 << shift) - 1,
   1905 			      0, (1u << (size - shift)) - 1))
   1906 	    return false;
   1907 	  break;
   1908 
   1909 	case AARCH64_OPND_SVE_Zm1_23_INDEX:
   1910 	  size = get_operand_fields_width (get_operand_from_code (type));
   1911 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 1))
   1912 	    return 0;
   1913 	  break;
   1914 
   1915 	case AARCH64_OPND_SME_Zn_INDEX2_19:
   1916 	case AARCH64_OPND_SVE_Zm2_22_INDEX:
   1917 	  size = get_operand_fields_width (get_operand_from_code (type));
   1918 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 3))
   1919 	    return 0;
   1920 	  break;
   1921 
   1922 	case AARCH64_OPND_SVE_Zn_INDEX:
   1923 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1924 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
   1925 			      0, 64 / size - 1))
   1926 	    return false;
   1927 	  break;
   1928 
   1929 	case AARCH64_OPND_SVE_Zn_5_INDEX:
   1930 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1931 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
   1932 			      0, 16 / size - 1))
   1933 	    return false;
   1934 	  break;
   1935 
   1936 	case AARCH64_OPND_SME_PNn3_INDEX1:
   1937 	case AARCH64_OPND_SME_PNn3_INDEX2:
   1938 	  size = get_operand_field_width (get_operand_from_code (type), 0);
   1939 	  if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
   1940 			      0, (1 << size) - 1))
   1941 	    return false;
   1942 	  break;
   1943 
   1944 	case AARCH64_OPND_SVE_Zm3_12_INDEX:
   1945 	case AARCH64_OPND_SME_Zn_INDEX1_16:
   1946 	case AARCH64_OPND_SME_Zn_INDEX2_15:
   1947 	case AARCH64_OPND_SME_Zn_INDEX2_16:
   1948 	case AARCH64_OPND_SME_Zn_INDEX3_14:
   1949 	case AARCH64_OPND_SME_Zn_INDEX3_15:
   1950 	case AARCH64_OPND_SME_Zn_INDEX4_14:
   1951 	case AARCH64_OPND_SVE_Zn0_INDEX:
   1952 	case AARCH64_OPND_SVE_Zn1_17_INDEX:
   1953 	case AARCH64_OPND_SVE_Zn2_18_INDEX:
   1954 	case AARCH64_OPND_SVE_Zn3_22_INDEX:
   1955 	case AARCH64_OPND_SVE_Zd0_INDEX:
   1956 	case AARCH64_OPND_SVE_Zd1_17_INDEX:
   1957 	case AARCH64_OPND_SVE_Zd2_18_INDEX:
   1958 	case AARCH64_OPND_SVE_Zd3_22_INDEX:
   1959 	  size = get_operand_fields_width (get_operand_from_code (type)) - 5;
   1960 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
   1961 			      0, (1 << size) - 1))
   1962 	    return false;
   1963 	  break;
   1964 
   1965 	case AARCH64_OPND_SME_Zm_INDEX1:
   1966 	case AARCH64_OPND_SME_Zm_INDEX2:
   1967 	case AARCH64_OPND_SME_Zm_INDEX2_3:
   1968 	case AARCH64_OPND_SME_Zm_INDEX3_1:
   1969 	case AARCH64_OPND_SME_Zm_INDEX3_2:
   1970 	case AARCH64_OPND_SME_Zm_INDEX3_3:
   1971 	case AARCH64_OPND_SME_Zm_INDEX3_10:
   1972 	case AARCH64_OPND_SME_Zm_INDEX4_1:
   1973 	case AARCH64_OPND_SME_Zm_INDEX4_2:
   1974 	case AARCH64_OPND_SME_Zm_INDEX4_3:
   1975 	case AARCH64_OPND_SME_Zm_INDEX4_10:
   1976 	  size = get_operand_fields_width (get_operand_from_code (type)) - 5;
   1977 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
   1978 			      0, (1 << size) - 1))
   1979 	    return false;
   1980 	  break;
   1981 
   1982 	case AARCH64_OPND_SME_Zk_INDEX:
   1983 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 3))
   1984 	    return false;
   1985 	  if ((opnd->reglane.regno & 20) != 20)
   1986 	    {
   1987 	      set_other_error (mismatch_detail, idx,
   1988 			       _("register out of range"));
   1989 	      return false;
   1990 	    }
   1991 	  break;
   1992 
   1993 	case AARCH64_OPND_SME_Zm:
   1994 	case AARCH64_OPND_SME_Zm_17:
   1995 	  if (opnd->reg.regno > 15)
   1996 	    {
   1997 	      set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
   1998 	      return false;
   1999 	    }
   2000 	  break;
   2001 
   2002 	case AARCH64_OPND_SME_Zn_6_3:
   2003 	  if (opnd->reg.regno > 15 || opnd->reg.regno % 2 != 0)
   2004 	    {
   2005 	      set_other_error (mismatch_detail, idx,
   2006 			       _("register out of range"));
   2007 	      return false;
   2008 	    }
   2009 	  break;
   2010 
   2011 	case AARCH64_OPND_SME_Zm_17_3:
   2012 	  if (opnd->reg.regno < 16 || opnd->reg.regno % 2 != 0)
   2013 	    {
   2014 	      set_other_error (mismatch_detail, idx,
   2015 			       _("register out of range"));
   2016 	      return false;
   2017 	    }
   2018 	  break;
   2019 
   2020 	case AARCH64_OPND_SME_PnT_Wm_imm:
   2021 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2022 	  max_value = 16 / size - 1;
   2023 	  if (!check_za_access (opnd, mismatch_detail, idx,
   2024 				12, max_value, 1, 0, get_opcode_dependent_value (opcode)))
   2025 	    return false;
   2026 	  break;
   2027 
   2028 	default:
   2029 	  break;
   2030 	}
   2031       break;
   2032 
   2033     case AARCH64_OPND_CLASS_SVE_REGLIST:
   2034       switch (type)
   2035 	{
   2036 	case AARCH64_OPND_SME_Pdx2:
   2037 	case AARCH64_OPND_SME_Zdnx2:
   2038 	case AARCH64_OPND_SME_Zdnx4:
   2039 	case AARCH64_OPND_SME_Znx2_6_3:
   2040 	case AARCH64_OPND_SME_Zmx2_17_3:
   2041 	case AARCH64_OPND_SME_Zmx2:
   2042 	case AARCH64_OPND_SME_Zmx4:
   2043 	case AARCH64_OPND_SME_Znx2:
   2044 	case AARCH64_OPND_SME_Znx2_BIT_INDEX:
   2045 	case AARCH64_OPND_SME_Znx4:
   2046 	  num = get_operand_specific_data (&aarch64_operands[type]);
   2047 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2048 	    return false;
   2049 	  if (((opnd->reglist.first_regno % num) != 0)
   2050 	      || (type == AARCH64_OPND_SME_Znx2_6_3
   2051 		  && opnd->reglist.first_regno > 15)
   2052 	      || (type == AARCH64_OPND_SME_Zmx2_17_3
   2053 		  && opnd->reglist.first_regno < 16))
   2054 	    {
   2055 	      set_other_error (mismatch_detail, idx,
   2056 			       _("start register out of range"));
   2057 	      return false;
   2058 	    }
   2059 	  break;
   2060 
   2061 	case AARCH64_OPND_SME_Ztx2_STRIDED:
   2062 	case AARCH64_OPND_SME_Ztx4_STRIDED:
   2063 	  /* 2-register lists have a stride of 8 and 4-register lists
   2064 	     have a stride of 4.  */
   2065 	  num = get_operand_specific_data (&aarch64_operands[type]);
   2066 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
   2067 	    return false;
   2068 	  num = 16 | (opnd->reglist.stride - 1);
   2069 	  if ((opnd->reglist.first_regno & ~num) != 0)
   2070 	    {
   2071 	      set_other_error (mismatch_detail, idx,
   2072 			       _("start register out of range"));
   2073 	      return false;
   2074 	    }
   2075 	  break;
   2076 
   2077 	case AARCH64_OPND_SME_PdxN:
   2078 	case AARCH64_OPND_SVE_ZnxN:
   2079 	case AARCH64_OPND_SVE_ZtxN:
   2080 	  num = get_opcode_dependent_value (opcode);
   2081 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2082 	    return false;
   2083 	  break;
   2084 
   2085 	case AARCH64_OPND_SME_Zmx2_INDEX_22:
   2086 	  num = get_operand_specific_data (&aarch64_operands[type]);
   2087 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2088 	      return false;
   2089 	  break;
   2090 
   2091 	case AARCH64_OPND_SME_Zn7xN_UNTYPED:
   2092 	  num = get_opcode_dependent_value (opcode);
   2093 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2094 	      return false;
   2095 	  if (opnd->reglist.first_regno > 7)
   2096 	  {
   2097 	    set_other_error (mismatch_detail, idx, _("start register out of range"));
   2098 	    return false;
   2099 	  }
   2100 	  break;
   2101 
   2102 	default:
   2103 	  abort ();
   2104 	}
   2105       break;
   2106 
   2107     case AARCH64_OPND_CLASS_ZA_ACCESS:
   2108       switch (type)
   2109 	{
   2110 	case AARCH64_OPND_SME_ZA_HV_idx_src:
   2111 	case AARCH64_OPND_SME_ZA_HV_idx_dest:
   2112 	case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
   2113 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2114 	  max_value = 16 / size - 1;
   2115 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
   2116 				get_opcode_dependent_value (opcode),
   2117 				get_opcode_dependent_vg_status (opcode)))
   2118 	    return false;
   2119 	  break;
   2120 
   2121 	case AARCH64_OPND_SME_ZA_array_off4:
   2122 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
   2123 				get_opcode_dependent_value (opcode),
   2124 				get_opcode_dependent_vg_status (opcode)))
   2125 	    return false;
   2126 	  break;
   2127 
   2128 	case AARCH64_OPND_SME_ZA_array_off3_0:
   2129 	case AARCH64_OPND_SME_ZA_array_off3_5:
   2130 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
   2131 				get_opcode_dependent_value (opcode),
   2132 				get_opcode_dependent_vg_status (opcode)))
   2133 	    return false;
   2134 	  break;
   2135 
   2136 	case AARCH64_OPND_SME_ZA_array_off1x4:
   2137 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
   2138 				get_opcode_dependent_value (opcode),
   2139 				get_opcode_dependent_vg_status (opcode)))
   2140 	    return false;
   2141 	  break;
   2142 
   2143 	case AARCH64_OPND_SME_ZA_array_off2x2:
   2144 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
   2145 				get_opcode_dependent_value (opcode),
   2146 				get_opcode_dependent_vg_status (opcode)))
   2147 	    return false;
   2148 	  break;
   2149 
   2150 	case AARCH64_OPND_SME_ZA_array_off2x4:
   2151 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
   2152 				get_opcode_dependent_value (opcode),
   2153 				get_opcode_dependent_vg_status (opcode)))
   2154 	    return false;
   2155 	  break;
   2156 
   2157 	case AARCH64_OPND_SME_ZA_array_off3x2:
   2158 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
   2159 				get_opcode_dependent_value (opcode),
   2160 				get_opcode_dependent_vg_status (opcode)))
   2161 	    return false;
   2162 	  break;
   2163 
   2164 	case AARCH64_OPND_SME_ZA_array_vrsb_1:
   2165 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 7, 2,
   2166 				get_opcode_dependent_value (opcode),
   2167 				get_opcode_dependent_vg_status (opcode)))
   2168 	    return false;
   2169 	  break;
   2170 
   2171 	case AARCH64_OPND_SME_ZA_array_vrsh_1:
   2172 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 2,
   2173 				get_opcode_dependent_value (opcode),
   2174 				get_opcode_dependent_vg_status (opcode)))
   2175 	    return false;
   2176 	  break;
   2177 
   2178 	case AARCH64_OPND_SME_ZA_array_vrss_1:
   2179 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 2,
   2180 				get_opcode_dependent_value (opcode),
   2181 				get_opcode_dependent_vg_status (opcode)))
   2182 	    return false;
   2183 	  break;
   2184 
   2185 	case AARCH64_OPND_SME_ZA_array_vrsd_1:
   2186 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 2,
   2187 				get_opcode_dependent_value (opcode),
   2188 				get_opcode_dependent_vg_status (opcode)))
   2189 	    return false;
   2190 	  break;
   2191 
   2192 	case AARCH64_OPND_SME_ZA_array_vrsb_2:
   2193 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 4,
   2194 				get_opcode_dependent_value (opcode),
   2195 				get_opcode_dependent_vg_status (opcode)))
   2196 	    return false;
   2197 	  break;
   2198 
   2199 	case AARCH64_OPND_SME_ZA_array_vrsh_2:
   2200 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 4,
   2201 				get_opcode_dependent_value (opcode),
   2202 				get_opcode_dependent_vg_status (opcode)))
   2203 	    return false;
   2204 	  break;
   2205 
   2206 	case AARCH64_OPND_SME_ZA_ARRAY4:
   2207 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
   2208 				get_opcode_dependent_value (opcode),
   2209 				get_opcode_dependent_vg_status (opcode)))
   2210 	    return false;
   2211 	  break;
   2212 
   2213 	case AARCH64_OPND_SME_ZA_array_vrss_2:
   2214 	case AARCH64_OPND_SME_ZA_array_vrsd_2:
   2215 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 4,
   2216 				get_opcode_dependent_value (opcode),
   2217 				get_opcode_dependent_vg_status (opcode)))
   2218 	    return false;
   2219 	  break;
   2220 
   2221 	case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
   2222 	case AARCH64_OPND_SME_ZA_HV_idx_destxN:
   2223 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2224 	  num = get_opcode_dependent_value (opcode);
   2225 	  max_value = 16 / num / size;
   2226 	  if (max_value > 0)
   2227 	    max_value -= 1;
   2228 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, num,
   2229 				0, get_opcode_dependent_value (opcode)))
   2230 	    return false;
   2231 	  break;
   2232 
   2233 	default:
   2234 	  abort ();
   2235 	}
   2236       break;
   2237 
   2238     case AARCH64_OPND_CLASS_PRED_REG:
   2239       switch (type)
   2240 	{
   2241 	case AARCH64_OPND_SME_PNd3:
   2242 	case AARCH64_OPND_SME_PNg3:
   2243 	  if (opnd->reg.regno < 8)
   2244 	    {
   2245 	      set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
   2246 	      return false;
   2247 	    }
   2248 	  break;
   2249 
   2250 	default:
   2251 	  if (opnd->reg.regno >= 8
   2252 	      && get_operand_fields_width (get_operand_from_code (type)) == 3)
   2253 	    {
   2254 	      set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
   2255 	      return false;
   2256 	    }
   2257 	  break;
   2258 	}
   2259       break;
   2260 
   2261     case AARCH64_OPND_CLASS_COND:
   2262       if (type == AARCH64_OPND_COND1
   2263 	  && (opnds[idx].cond->value & 0xe) == 0xe)
   2264 	{
   2265 	  /* Not allow AL or NV.  */
   2266 	  set_syntax_error (mismatch_detail, idx, NULL);
   2267 	}
   2268       break;
   2269 
   2270     case AARCH64_OPND_CLASS_ADDRESS:
   2271       /* Check writeback.  */
   2272       switch (opcode->iclass)
   2273 	{
   2274 	case ldst_pos:
   2275 	case ldst_unscaled:
   2276 	case ldstnapair_offs:
   2277 	case ldstpair_off:
   2278 	case ldst_unpriv:
   2279 	  if (opnd->addr.writeback == 1)
   2280 	    {
   2281 	      set_syntax_error (mismatch_detail, idx,
   2282 				_("unexpected address writeback"));
   2283 	      return false;
   2284 	    }
   2285 	  break;
   2286 	case ldst_imm10:
   2287 	  if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
   2288 	    {
   2289 	      set_syntax_error (mismatch_detail, idx,
   2290 				_("unexpected address writeback"));
   2291 	      return false;
   2292 	    }
   2293 	  break;
   2294 	case ldst_imm9:
   2295 	case ldstpair_indexed:
   2296 	case asisdlsep:
   2297 	case asisdlsop:
   2298 	  if (opnd->addr.writeback == 0)
   2299 	    {
   2300 	      set_syntax_error (mismatch_detail, idx,
   2301 				_("address writeback expected"));
   2302 	      return false;
   2303 	    }
   2304 	  break;
   2305 	case rcpc3:
   2306 	  if (opnd->addr.writeback)
   2307 	    if ((type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB
   2308 		 && !opnd->addr.preind)
   2309 		|| (type == AARCH64_OPND_RCPC3_ADDR_POSTIND
   2310 		    && !opnd->addr.postind))
   2311 	      {
   2312 		set_syntax_error (mismatch_detail, idx,
   2313 				  _("unexpected address writeback"));
   2314 		return false;
   2315 	      }
   2316 
   2317 	  break;
   2318 	default:
   2319 	  assert (opnd->addr.writeback == 0);
   2320 	  break;
   2321 	}
   2322       switch (type)
   2323 	{
   2324 	case AARCH64_OPND_ADDR_SIMM7:
   2325 	  /* Scaled signed 7 bits immediate offset.  */
   2326 	  /* Get the size of the data element that is accessed, which may be
   2327 	     different from that of the source register size,
   2328 	     e.g. in strb/ldrb.  */
   2329 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2330 	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
   2331 	    {
   2332 	      set_offset_out_of_range_error (mismatch_detail, idx,
   2333 					     -64 * size, 63 * size);
   2334 	      return false;
   2335 	    }
   2336 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   2337 	    {
   2338 	      set_unaligned_error (mismatch_detail, idx, size);
   2339 	      return false;
   2340 	    }
   2341 	  break;
   2342 	case AARCH64_OPND_ADDR_OFFSET:
   2343 	case AARCH64_OPND_ADDR_SIMM9:
   2344 	  /* Unscaled signed 9 bits immediate offset.  */
   2345 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
   2346 	    {
   2347 	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
   2348 	      return false;
   2349 	    }
   2350 	  break;
   2351 
   2352 	case AARCH64_OPND_ADDR_SIMM9_2:
   2353 	  /* Unscaled signed 9 bits immediate offset, which has to be negative
   2354 	     or unaligned.  */
   2355 	  size = aarch64_get_qualifier_esize (qualifier);
   2356 	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
   2357 	       && !value_aligned_p (opnd->addr.offset.imm, size))
   2358 	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
   2359 	    return true;
   2360 	  set_other_error (mismatch_detail, idx,
   2361 			   _("negative or unaligned offset expected"));
   2362 	  return false;
   2363 
   2364 	case AARCH64_OPND_ADDR_SIMM10:
   2365 	  /* Scaled signed 10 bits immediate offset.  */
   2366 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
   2367 	    {
   2368 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
   2369 	      return false;
   2370 	    }
   2371 	  if (!value_aligned_p (opnd->addr.offset.imm, 8))
   2372 	    {
   2373 	      set_unaligned_error (mismatch_detail, idx, 8);
   2374 	      return false;
   2375 	    }
   2376 	  break;
   2377 
   2378 	case AARCH64_OPND_ADDR_SIMM11:
   2379 	  /* Signed 11 bits immediate offset (multiple of 16).  */
   2380 	  if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
   2381 	    {
   2382 	      set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
   2383 	      return false;
   2384 	    }
   2385 
   2386 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
   2387 	    {
   2388 	      set_unaligned_error (mismatch_detail, idx, 16);
   2389 	      return false;
   2390 	    }
   2391 	  break;
   2392 
   2393 	case AARCH64_OPND_ADDR_SIMM13:
   2394 	  /* Signed 13 bits immediate offset (multiple of 16).  */
   2395 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
   2396 	    {
   2397 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
   2398 	      return false;
   2399 	    }
   2400 
   2401 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
   2402 	    {
   2403 	      set_unaligned_error (mismatch_detail, idx, 16);
   2404 	      return false;
   2405 	    }
   2406 	  break;
   2407 
   2408 	case AARCH64_OPND_SIMD_ADDR_POST:
   2409 	  /* AdvSIMD load/store multiple structures, post-index.  */
   2410 	  assert (idx == 1);
   2411 	  if (opnd->addr.offset.is_reg)
   2412 	    {
   2413 	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
   2414 		return true;
   2415 	      else
   2416 		{
   2417 		  set_other_error (mismatch_detail, idx,
   2418 				   _("invalid register offset"));
   2419 		  return false;
   2420 		}
   2421 	    }
   2422 	  else
   2423 	    {
   2424 	      const aarch64_opnd_info *prev = &opnds[idx-1];
   2425 	      unsigned num_bytes; /* total number of bytes transferred.  */
   2426 	      /* The opcode dependent area stores the number of elements in
   2427 		 each structure to be loaded/stored.  */
   2428 	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
   2429 	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
   2430 		/* Special handling of loading single structure to all lane.  */
   2431 		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
   2432 		  * aarch64_get_qualifier_esize (prev->qualifier);
   2433 	      else
   2434 		num_bytes = prev->reglist.num_regs
   2435 		  * aarch64_get_qualifier_esize (prev->qualifier)
   2436 		  * aarch64_get_qualifier_nelem (prev->qualifier);
   2437 	      if ((int) num_bytes != opnd->addr.offset.imm)
   2438 		{
   2439 		  set_other_error (mismatch_detail, idx,
   2440 				   _("invalid post-increment amount"));
   2441 		  return false;
   2442 		}
   2443 	    }
   2444 	  break;
   2445 
   2446 	case AARCH64_OPND_ADDR_REGOFF:
   2447 	  /* Get the size of the data element that is accessed, which may be
   2448 	     different from that of the source register size,
   2449 	     e.g. in strb/ldrb.  */
   2450 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2451 	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
   2452 	  if (opnd->shifter.amount != 0
   2453 	      && opnd->shifter.amount != (int)get_logsz (size))
   2454 	    {
   2455 	      set_other_error (mismatch_detail, idx,
   2456 			       _("invalid shift amount"));
   2457 	      return false;
   2458 	    }
   2459 	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
   2460 	     operators.  */
   2461 	  switch (opnd->shifter.kind)
   2462 	    {
   2463 	    case AARCH64_MOD_UXTW:
   2464 	    case AARCH64_MOD_LSL:
   2465 	    case AARCH64_MOD_SXTW:
   2466 	    case AARCH64_MOD_SXTX: break;
   2467 	    default:
   2468 	      set_other_error (mismatch_detail, idx,
   2469 			       _("invalid extend/shift operator"));
   2470 	      return false;
   2471 	    }
   2472 	  break;
   2473 
   2474 	case AARCH64_OPND_ADDR_UIMM12:
   2475 	  imm = opnd->addr.offset.imm;
   2476 	  /* Get the size of the data element that is accessed, which may be
   2477 	     different from that of the source register size,
   2478 	     e.g. in strb/ldrb.  */
   2479 	  size = aarch64_get_qualifier_esize (qualifier);
   2480 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
   2481 	    {
   2482 	      set_offset_out_of_range_error (mismatch_detail, idx,
   2483 					     0, 4095 * size);
   2484 	      return false;
   2485 	    }
   2486 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   2487 	    {
   2488 	      set_unaligned_error (mismatch_detail, idx, size);
   2489 	      return false;
   2490 	    }
   2491 	  break;
   2492 
   2493 	case AARCH64_OPND_ADDR_PCREL9:
   2494 	case AARCH64_OPND_ADDR_PCREL14:
   2495 	case AARCH64_OPND_ADDR_PCREL19:
   2496 	case AARCH64_OPND_ADDR_PCREL21:
   2497 	case AARCH64_OPND_ADDR_PCREL26:
   2498 	  {
   2499 	    imm = opnd->imm.value;
   2500 	    if (operand_need_shift_by_two (get_operand_from_code (type)))
   2501 	      {
   2502 		/* The offset value in a PC-relative branch instruction is alway
   2503 		   4-byte aligned and is encoded without the lowest 2 bits.  */
   2504 		if (!value_aligned_p (imm, 4))
   2505 		  {
   2506 		    set_unaligned_error (mismatch_detail, idx, 4);
   2507 		    return false;
   2508 		  }
   2509 		/* Right shift by 2 so that we can carry out the following check
   2510 		   canonically.  */
   2511 		imm >>= 2;
   2512 	      }
   2513 
   2514 	    if (!check_immediate_out_of_range (imm, type, mismatch_detail, idx))
   2515 	      return false;
   2516 	  }
   2517 	  break;
   2518 
   2519 	case AARCH64_OPND_SME_ADDR_RI_U4xVL:
   2520 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
   2521 	    {
   2522 	      set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
   2523 	      return false;
   2524 	    }
   2525 	  break;
   2526 
   2527 	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
   2528 	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
   2529 	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
   2530 	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
   2531 	  min_value = -8;
   2532 	  max_value = 7;
   2533 	sve_imm_offset_vl:
   2534 	  assert (!opnd->addr.offset.is_reg);
   2535 	  assert (opnd->addr.preind);
   2536 	  num = 1 + get_operand_specific_data (&aarch64_operands[type]);
   2537 	  min_value *= num;
   2538 	  max_value *= num;
   2539 	  if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
   2540 	      || (opnd->shifter.operator_present
   2541 		  && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
   2542 	    {
   2543 	      set_other_error (mismatch_detail, idx,
   2544 			       _("invalid addressing mode"));
   2545 	      return false;
   2546 	    }
   2547 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
   2548 	    {
   2549 	      set_offset_out_of_range_error (mismatch_detail, idx,
   2550 					     min_value, max_value);
   2551 	      return false;
   2552 	    }
   2553 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
   2554 	    {
   2555 	      set_unaligned_error (mismatch_detail, idx, num);
   2556 	      return false;
   2557 	    }
   2558 	  break;
   2559 
   2560 	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
   2561 	  min_value = -32;
   2562 	  max_value = 31;
   2563 	  goto sve_imm_offset_vl;
   2564 
   2565 	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
   2566 	  min_value = -256;
   2567 	  max_value = 255;
   2568 	  goto sve_imm_offset_vl;
   2569 
   2570 	case AARCH64_OPND_SVE_ADDR_RI_U6:
   2571 	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
   2572 	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
   2573 	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
   2574 	  min_value = 0;
   2575 	  max_value = 63;
   2576 	sve_imm_offset:
   2577 	  assert (!opnd->addr.offset.is_reg);
   2578 	  assert (opnd->addr.preind);
   2579 	  num = 1 << get_operand_specific_data (&aarch64_operands[type]);
   2580 	  min_value *= num;
   2581 	  max_value *= num;
   2582 	  if (opnd->shifter.operator_present
   2583 	      || opnd->shifter.amount_present)
   2584 	    {
   2585 	      set_other_error (mismatch_detail, idx,
   2586 			       _("invalid addressing mode"));
   2587 	      return false;
   2588 	    }
   2589 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
   2590 	    {
   2591 	      set_offset_out_of_range_error (mismatch_detail, idx,
   2592 					     min_value, max_value);
   2593 	      return false;
   2594 	    }
   2595 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
   2596 	    {
   2597 	      set_unaligned_error (mismatch_detail, idx, num);
   2598 	      return false;
   2599 	    }
   2600 	  break;
   2601 
   2602 	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
   2603 	case AARCH64_OPND_SVE_ADDR_RI_S4x32:
   2604 	  min_value = -8;
   2605 	  max_value = 7;
   2606 	  goto sve_imm_offset;
   2607 
   2608 	case AARCH64_OPND_SVE_ADDR_ZX:
   2609 	  /* Everything is already ensured by parse_operands or
   2610 	     aarch64_ext_sve_addr_rr_lsl (because this is a very specific
   2611 	     argument type).  */
   2612 	  assert (opnd->addr.offset.is_reg);
   2613 	  assert (opnd->addr.preind);
   2614 	  assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
   2615 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
   2616 	  assert (opnd->shifter.operator_present == 0);
   2617 	  break;
   2618 
   2619 	case AARCH64_OPND_SVE_ADDR_RR:
   2620 	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
   2621 	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
   2622 	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
   2623 	case AARCH64_OPND_SVE_ADDR_RR_LSL4:
   2624 	case AARCH64_OPND_SVE_ADDR_RM:
   2625 	case AARCH64_OPND_SVE_ADDR_RM_LSL1:
   2626 	case AARCH64_OPND_SVE_ADDR_RM_LSL2:
   2627 	case AARCH64_OPND_SVE_ADDR_RM_LSL3:
   2628 	case AARCH64_OPND_SVE_ADDR_RM_LSL4:
   2629 	case AARCH64_OPND_SVE_ADDR_RX:
   2630 	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
   2631 	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
   2632 	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
   2633 	case AARCH64_OPND_SVE_ADDR_RX_LSL4:
   2634 	case AARCH64_OPND_SVE_ADDR_RZ:
   2635 	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
   2636 	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
   2637 	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
   2638 	  modifiers = 1 << AARCH64_MOD_LSL;
   2639 	sve_rr_operand:
   2640 	  assert (opnd->addr.offset.is_reg);
   2641 	  assert (opnd->addr.preind);
   2642 	  if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
   2643 	      && opnd->addr.offset.regno == 31)
   2644 	    {
   2645 	      set_other_error (mismatch_detail, idx,
   2646 			       _("index register xzr is not allowed"));
   2647 	      return false;
   2648 	    }
   2649 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
   2650 	      || (opnd->shifter.amount
   2651 		  != get_operand_specific_data (&aarch64_operands[type])))
   2652 	    {
   2653 	      set_other_error (mismatch_detail, idx,
   2654 			       _("invalid addressing mode"));
   2655 	      return false;
   2656 	    }
   2657 	  break;
   2658 
   2659 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
   2660 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
   2661 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
   2662 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
   2663 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
   2664 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
   2665 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
   2666 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
   2667 	  modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
   2668 	  goto sve_rr_operand;
   2669 
   2670 	case AARCH64_OPND_SVE_ADDR_ZI_U5:
   2671 	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
   2672 	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
   2673 	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
   2674 	  min_value = 0;
   2675 	  max_value = 31;
   2676 	  goto sve_imm_offset;
   2677 
   2678 	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
   2679 	  modifiers = 1 << AARCH64_MOD_LSL;
   2680 	sve_zz_operand:
   2681 	  assert (opnd->addr.offset.is_reg);
   2682 	  assert (opnd->addr.preind);
   2683 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
   2684 	      || opnd->shifter.amount < 0
   2685 	      || opnd->shifter.amount > 3)
   2686 	    {
   2687 	      set_other_error (mismatch_detail, idx,
   2688 			       _("invalid addressing mode"));
   2689 	      return false;
   2690 	    }
   2691 	  break;
   2692 
   2693 	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
   2694 	  modifiers = (1 << AARCH64_MOD_SXTW);
   2695 	  goto sve_zz_operand;
   2696 
   2697 	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
   2698 	  modifiers = 1 << AARCH64_MOD_UXTW;
   2699 	  goto sve_zz_operand;
   2700 
   2701 	case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
   2702 	case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
   2703 	case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
   2704 	case AARCH64_OPND_RCPC3_ADDR_POSTIND:
   2705 	  {
   2706 	    int num_bytes = calc_ldst_datasize (opnds);
   2707 	    int abs_offset = (type == AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
   2708 			      || type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB)
   2709 	      ? opnd->addr.offset.imm * -1
   2710 	      : opnd->addr.offset.imm;
   2711 	    if ((int) num_bytes != abs_offset
   2712 		&& opnd->addr.offset.imm != 0)
   2713 	      {
   2714 		set_other_error (mismatch_detail, idx,
   2715 				 _("invalid increment amount"));
   2716 		return false;
   2717 	      }
   2718 	  }
   2719 	  break;
   2720 
   2721 	case AARCH64_OPND_RCPC3_ADDR_OFFSET:
   2722 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
   2723 	    {
   2724 	      set_imm_out_of_range_error (mismatch_detail, idx, -256, 255);
   2725 	      return false;
   2726 	    }
   2727 
   2728 	default:
   2729 	  break;
   2730 	}
   2731       break;
   2732 
   2733     case AARCH64_OPND_CLASS_SIMD_REGLIST:
   2734       if (type == AARCH64_OPND_LEt)
   2735 	{
   2736 	  /* Get the upper bound for the element index.  */
   2737 	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
   2738 	  if (!value_in_range_p (opnd->reglist.index, 0, num))
   2739 	    {
   2740 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   2741 	      return false;
   2742 	    }
   2743 	}
   2744       /* The opcode dependent area stores the number of elements in
   2745 	 each structure to be loaded/stored.  */
   2746       num = get_opcode_dependent_value (opcode);
   2747       switch (type)
   2748 	{
   2749 	case AARCH64_OPND_LVn_LUT:
   2750 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2751 	    return 0;
   2752 	  break;
   2753 	case AARCH64_OPND_LVt:
   2754 	  assert (num >= 1 && num <= 4);
   2755 	  /* Unless LD1/ST1, the number of registers should be equal to that
   2756 	     of the structure elements.  */
   2757 	  if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
   2758 	    return false;
   2759 	  break;
   2760 	case AARCH64_OPND_LVt_AL:
   2761 	case AARCH64_OPND_LEt:
   2762 	  assert (num >= 1 && num <= 4);
   2763 	  /* The number of registers should be equal to that of the structure
   2764 	     elements.  */
   2765 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2766 	    return false;
   2767 	  break;
   2768 	default:
   2769 	  break;
   2770 	}
   2771       if (opnd->reglist.stride != 1)
   2772 	{
   2773 	  set_reg_list_stride_error (mismatch_detail, idx, 1);
   2774 	  return false;
   2775 	}
   2776       break;
   2777 
   2778     case AARCH64_OPND_CLASS_IMMEDIATE:
   2779       /* Constraint check on immediate operand.  */
   2780       imm = opnd->imm.value;
   2781       /* E.g. imm_0_31 constrains value to be 0..31.  */
   2782       if (qualifier_value_in_range_constraint_p (qualifier)
   2783 	  && !value_in_range_p (imm, get_lower_bound (qualifier),
   2784 				get_upper_bound (qualifier)))
   2785 	{
   2786 	  set_imm_out_of_range_error (mismatch_detail, idx,
   2787 				      get_lower_bound (qualifier),
   2788 				      get_upper_bound (qualifier));
   2789 	  return false;
   2790 	}
   2791 
   2792       switch (type)
   2793 	{
   2794 	case AARCH64_OPND_AIMM:
   2795 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   2796 	    {
   2797 	      set_other_error (mismatch_detail, idx,
   2798 			       _("invalid shift operator"));
   2799 	      return false;
   2800 	    }
   2801 	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
   2802 	    {
   2803 	      set_other_error (mismatch_detail, idx,
   2804 			       _("shift amount must be 0 or 12"));
   2805 	      return false;
   2806 	    }
   2807 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
   2808 	    {
   2809 	      set_other_error (mismatch_detail, idx,
   2810 			       _("immediate out of range"));
   2811 	      return false;
   2812 	    }
   2813 	  break;
   2814 
   2815 	case AARCH64_OPND_HALF:
   2816 	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
   2817 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   2818 	    {
   2819 	      set_other_error (mismatch_detail, idx,
   2820 			       _("invalid shift operator"));
   2821 	      return false;
   2822 	    }
   2823 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2824 	  if (!value_aligned_p (opnd->shifter.amount, 16))
   2825 	    {
   2826 	      set_other_error (mismatch_detail, idx,
   2827 			       _("shift amount must be a multiple of 16"));
   2828 	      return false;
   2829 	    }
   2830 	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
   2831 	    {
   2832 	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
   2833 						 0, size * 8 - 16);
   2834 	      return false;
   2835 	    }
   2836 	  if (opnd->imm.value < 0)
   2837 	    {
   2838 	      set_other_error (mismatch_detail, idx,
   2839 			       _("negative immediate value not allowed"));
   2840 	      return false;
   2841 	    }
   2842 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
   2843 	    {
   2844 	      set_other_error (mismatch_detail, idx,
   2845 			       _("immediate out of range"));
   2846 	      return false;
   2847 	    }
   2848 	  break;
   2849 
   2850 	case AARCH64_OPND_IMM_MOV:
   2851 	    {
   2852 	      int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2853 	      imm = opnd->imm.value;
   2854 	      assert (idx == 1);
   2855 	      switch (opcode->op)
   2856 		{
   2857 		case OP_MOV_IMM_WIDEN:
   2858 		  imm = ~imm;
   2859 		  /* Fall through.  */
   2860 		case OP_MOV_IMM_WIDE:
   2861 		  if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
   2862 		    {
   2863 		      set_other_error (mismatch_detail, idx,
   2864 				       _("immediate out of range"));
   2865 		      return false;
   2866 		    }
   2867 		  break;
   2868 		case OP_MOV_IMM_LOG:
   2869 		  if (!aarch64_logical_immediate_p (imm, esize, NULL))
   2870 		    {
   2871 		      set_other_error (mismatch_detail, idx,
   2872 				       _("immediate out of range"));
   2873 		      return false;
   2874 		    }
   2875 		  break;
   2876 		default:
   2877 		  assert (0);
   2878 		  return false;
   2879 		}
   2880 	    }
   2881 	  break;
   2882 
   2883 	case AARCH64_OPND_NZCV:
   2884 	case AARCH64_OPND_CCMP_IMM:
   2885 	case AARCH64_OPND_EXCEPTION:
   2886 	case AARCH64_OPND_UNDEFINED:
   2887 	case AARCH64_OPND_TME_UIMM16:
   2888 	case AARCH64_OPND_UIMM4:
   2889 	case AARCH64_OPND_UIMM4_ADDG:
   2890 	case AARCH64_OPND_UIMM7:
   2891 	case AARCH64_OPND_UIMM3_OP1:
   2892 	case AARCH64_OPND_UIMM3_OP2:
   2893 	case AARCH64_OPND_SVE_UIMM3:
   2894 	case AARCH64_OPND_SVE_UIMM7:
   2895 	case AARCH64_OPND_SVE_UIMM8:
   2896 	case AARCH64_OPND_SVE_UIMM4:
   2897 	case AARCH64_OPND_SVE_UIMM8_53:
   2898 	case AARCH64_OPND_CSSC_UIMM8:
   2899 	  size = get_operand_fields_width (get_operand_from_code (type));
   2900 	  assert (size < 32);
   2901 	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
   2902 	    {
   2903 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   2904 					  (1u << size) - 1);
   2905 	      return false;
   2906 	    }
   2907 	  break;
   2908 
   2909 	case AARCH64_OPND_UIMM10:
   2910 	  /* Scaled unsigned 10 bits immediate offset.  */
   2911 	  if (!value_in_range_p (opnd->imm.value, 0, 1008))
   2912 	    {
   2913 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
   2914 	      return false;
   2915 	    }
   2916 
   2917 	  if (!value_aligned_p (opnd->imm.value, 16))
   2918 	    {
   2919 	      set_unaligned_error (mismatch_detail, idx, 16);
   2920 	      return false;
   2921 	    }
   2922 	  break;
   2923 
   2924 	case AARCH64_OPND_SIMM5:
   2925 	case AARCH64_OPND_SVE_SIMM5:
   2926 	case AARCH64_OPND_SVE_SIMM5B:
   2927 	case AARCH64_OPND_SVE_SIMM6:
   2928 	case AARCH64_OPND_SVE_SIMM8:
   2929 	case AARCH64_OPND_CSSC_SIMM8:
   2930 	  size = get_operand_fields_width (get_operand_from_code (type));
   2931 	  assert (size < 32);
   2932 	  if (!value_fit_signed_field_p (opnd->imm.value, size))
   2933 	    {
   2934 	      imm_range_t rng = imm_range_min_max (size, true);
   2935 	      set_imm_out_of_range_error (mismatch_detail, idx, rng.min,
   2936 					  rng.max);
   2937 	      return false;
   2938 	    }
   2939 	  break;
   2940 
   2941 	case AARCH64_OPND_WIDTH:
   2942 	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
   2943 		  && opnds[0].type == AARCH64_OPND_Rd);
   2944 	  size = get_upper_bound (qualifier);
   2945 	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
   2946 	    /* lsb+width <= reg.size  */
   2947 	    {
   2948 	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
   2949 					  size - opnds[idx-1].imm.value);
   2950 	      return false;
   2951 	    }
   2952 	  break;
   2953 
   2954 	case AARCH64_OPND_LIMM:
   2955 	case AARCH64_OPND_SVE_LIMM:
   2956 	  {
   2957 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2958 	    uint64_t uimm = opnd->imm.value;
   2959 	    if (opcode->op == OP_BIC)
   2960 	      uimm = ~uimm;
   2961 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   2962 	      {
   2963 		set_other_error (mismatch_detail, idx,
   2964 				 _("immediate out of range"));
   2965 		return false;
   2966 	      }
   2967 	  }
   2968 	  break;
   2969 
   2970 	case AARCH64_OPND_IMM0:
   2971 	case AARCH64_OPND_FPIMM0:
   2972 	  if (opnd->imm.value != 0)
   2973 	    {
   2974 	      set_other_error (mismatch_detail, idx,
   2975 			       _("immediate zero expected"));
   2976 	      return false;
   2977 	    }
   2978 	  break;
   2979 
   2980 	case AARCH64_OPND_IMM_ROT1:
   2981 	case AARCH64_OPND_IMM_ROT2:
   2982 	case AARCH64_OPND_SVE_IMM_ROT2:
   2983 	  if (opnd->imm.value != 0
   2984 	      && opnd->imm.value != 90
   2985 	      && opnd->imm.value != 180
   2986 	      && opnd->imm.value != 270)
   2987 	    {
   2988 	      set_other_error (mismatch_detail, idx,
   2989 			       _("rotate expected to be 0, 90, 180 or 270"));
   2990 	      return false;
   2991 	    }
   2992 	  break;
   2993 
   2994 	case AARCH64_OPND_IMM_ROT3:
   2995 	case AARCH64_OPND_SVE_IMM_ROT1:
   2996 	case AARCH64_OPND_SVE_IMM_ROT3:
   2997 	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
   2998 	    {
   2999 	      set_other_error (mismatch_detail, idx,
   3000 			       _("rotate expected to be 90 or 270"));
   3001 	      return false;
   3002 	    }
   3003 	  break;
   3004 
   3005 	case AARCH64_OPND_SHLL_IMM:
   3006 	  assert (idx == 2);
   3007 	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
   3008 	  if (opnd->imm.value != size)
   3009 	    {
   3010 	      set_other_error (mismatch_detail, idx,
   3011 			       _("invalid shift amount"));
   3012 	      return false;
   3013 	    }
   3014 	  break;
   3015 
   3016 	case AARCH64_OPND_IMM_VLSL:
   3017 	  size = aarch64_get_qualifier_esize (qualifier);
   3018 	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
   3019 	    {
   3020 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   3021 					  size * 8 - 1);
   3022 	      return false;
   3023 	    }
   3024 	  break;
   3025 
   3026 	case AARCH64_OPND_IMM_VLSR:
   3027 	  size = aarch64_get_qualifier_esize (qualifier);
   3028 	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
   3029 	    {
   3030 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
   3031 	      return false;
   3032 	    }
   3033 	  break;
   3034 
   3035 	case AARCH64_OPND_SIMD_IMM:
   3036 	case AARCH64_OPND_SIMD_IMM_SFT:
   3037 	  /* Qualifier check.  */
   3038 	  switch (qualifier)
   3039 	    {
   3040 	    case AARCH64_OPND_QLF_LSL:
   3041 	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
   3042 		{
   3043 		  set_other_error (mismatch_detail, idx,
   3044 				   _("invalid shift operator"));
   3045 		  return false;
   3046 		}
   3047 	      break;
   3048 	    case AARCH64_OPND_QLF_MSL:
   3049 	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
   3050 		{
   3051 		  set_other_error (mismatch_detail, idx,
   3052 				   _("invalid shift operator"));
   3053 		  return false;
   3054 		}
   3055 	      break;
   3056 	    case AARCH64_OPND_QLF_NIL:
   3057 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   3058 		{
   3059 		  set_other_error (mismatch_detail, idx,
   3060 				   _("shift is not permitted"));
   3061 		  return false;
   3062 		}
   3063 	      break;
   3064 	    default:
   3065 	      assert (0);
   3066 	      return false;
   3067 	    }
   3068 	  /* Is the immediate valid?  */
   3069 	  assert (idx == 1);
   3070 	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
   3071 	    {
   3072 	      /* uimm8 or simm8 */
   3073 	      if (!value_in_range_p (opnd->imm.value, -128, 255))
   3074 		{
   3075 		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
   3076 		  return false;
   3077 		}
   3078 	    }
   3079 	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
   3080 	    {
   3081 	      /* uimm64 is not
   3082 		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
   3083 		 ffffffffgggggggghhhhhhhh'.  */
   3084 	      set_other_error (mismatch_detail, idx,
   3085 			       _("invalid value for immediate"));
   3086 	      return false;
   3087 	    }
   3088 	  /* Is the shift amount valid?  */
   3089 	  switch (opnd->shifter.kind)
   3090 	    {
   3091 	    case AARCH64_MOD_LSL:
   3092 	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   3093 	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
   3094 		{
   3095 		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
   3096 						     (size - 1) * 8);
   3097 		  return false;
   3098 		}
   3099 	      if (!value_aligned_p (opnd->shifter.amount, 8))
   3100 		{
   3101 		  set_unaligned_error (mismatch_detail, idx, 8);
   3102 		  return false;
   3103 		}
   3104 	      break;
   3105 	    case AARCH64_MOD_MSL:
   3106 	      /* Only 8 and 16 are valid shift amount.  */
   3107 	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
   3108 		{
   3109 		  set_other_error (mismatch_detail, idx,
   3110 				   _("shift amount must be 0 or 16"));
   3111 		  return false;
   3112 		}
   3113 	      break;
   3114 	    default:
   3115 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   3116 		{
   3117 		  set_other_error (mismatch_detail, idx,
   3118 				   _("invalid shift operator"));
   3119 		  return false;
   3120 		}
   3121 	      break;
   3122 	    }
   3123 	  break;
   3124 
   3125 	case AARCH64_OPND_FPIMM:
   3126 	case AARCH64_OPND_SIMD_FPIMM:
   3127 	case AARCH64_OPND_SVE_FPIMM8:
   3128 	  if (opnd->imm.is_fp == 0)
   3129 	    {
   3130 	      set_other_error (mismatch_detail, idx,
   3131 			       _("floating-point immediate expected"));
   3132 	      return false;
   3133 	    }
   3134 	  /* The value is expected to be an 8-bit floating-point constant with
   3135 	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
   3136 	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
   3137 	     instruction).  */
   3138 	  if (!value_in_range_p (opnd->imm.value, 0, 255))
   3139 	    {
   3140 	      set_other_error (mismatch_detail, idx,
   3141 			       _("immediate out of range"));
   3142 	      return false;
   3143 	    }
   3144 	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
   3145 	    {
   3146 	      set_other_error (mismatch_detail, idx,
   3147 			       _("invalid shift operator"));
   3148 	      return false;
   3149 	    }
   3150 	  break;
   3151 
   3152 	case AARCH64_OPND_SVE_AIMM:
   3153 	  min_value = 0;
   3154 	sve_aimm:
   3155 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
   3156 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   3157 	  mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
   3158 	  uvalue = opnd->imm.value;
   3159 	  shift = opnd->shifter.amount;
   3160 	  if (size == 1)
   3161 	    {
   3162 	      if (shift != 0)
   3163 		{
   3164 		  set_other_error (mismatch_detail, idx,
   3165 				   _("no shift amount allowed for"
   3166 				     " 8-bit constants"));
   3167 		  return false;
   3168 		}
   3169 	    }
   3170 	  else
   3171 	    {
   3172 	      if (shift != 0 && shift != 8)
   3173 		{
   3174 		  set_other_error (mismatch_detail, idx,
   3175 				   _("shift amount must be 0 or 8"));
   3176 		  return false;
   3177 		}
   3178 	      if (shift == 0 && (uvalue & 0xff) == 0)
   3179 		{
   3180 		  shift = 8;
   3181 		  uvalue = (int64_t) uvalue / 256;
   3182 		}
   3183 	    }
   3184 	  mask >>= shift;
   3185 	  if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
   3186 	    {
   3187 	      set_other_error (mismatch_detail, idx,
   3188 			       _("immediate too big for element size"));
   3189 	      return false;
   3190 	    }
   3191 	  uvalue = (uvalue - min_value) & mask;
   3192 	  if (uvalue > 0xff)
   3193 	    {
   3194 	      set_other_error (mismatch_detail, idx,
   3195 			       _("invalid arithmetic immediate"));
   3196 	      return false;
   3197 	    }
   3198 	  break;
   3199 
   3200 	case AARCH64_OPND_SVE_ASIMM:
   3201 	  min_value = -128;
   3202 	  goto sve_aimm;
   3203 
   3204 	case AARCH64_OPND_SVE_I1_HALF_ONE:
   3205 	  assert (opnd->imm.is_fp);
   3206 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
   3207 	    {
   3208 	      set_other_error (mismatch_detail, idx,
   3209 			       _("floating-point value must be 0.5 or 1.0"));
   3210 	      return false;
   3211 	    }
   3212 	  break;
   3213 
   3214 	case AARCH64_OPND_SVE_I1_HALF_TWO:
   3215 	  assert (opnd->imm.is_fp);
   3216 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
   3217 	    {
   3218 	      set_other_error (mismatch_detail, idx,
   3219 			       _("floating-point value must be 0.5 or 2.0"));
   3220 	      return false;
   3221 	    }
   3222 	  break;
   3223 
   3224 	case AARCH64_OPND_SVE_I1_ZERO_ONE:
   3225 	  assert (opnd->imm.is_fp);
   3226 	  if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
   3227 	    {
   3228 	      set_other_error (mismatch_detail, idx,
   3229 			       _("floating-point value must be 0.0 or 1.0"));
   3230 	      return false;
   3231 	    }
   3232 	  break;
   3233 
   3234 	case AARCH64_OPND_SVE_INV_LIMM:
   3235 	  {
   3236 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   3237 	    uint64_t uimm = ~opnd->imm.value;
   3238 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   3239 	      {
   3240 		set_other_error (mismatch_detail, idx,
   3241 				 _("immediate out of range"));
   3242 		return false;
   3243 	      }
   3244 	  }
   3245 	  break;
   3246 
   3247 	case AARCH64_OPND_SVE_LIMM_MOV:
   3248 	  {
   3249 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   3250 	    uint64_t uimm = opnd->imm.value;
   3251 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   3252 	      {
   3253 		set_other_error (mismatch_detail, idx,
   3254 				 _("immediate out of range"));
   3255 		return false;
   3256 	      }
   3257 	    if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
   3258 	      {
   3259 		set_other_error (mismatch_detail, idx,
   3260 				 _("invalid replicated MOV immediate"));
   3261 		return false;
   3262 	      }
   3263 	  }
   3264 	  break;
   3265 
   3266 	case AARCH64_OPND_SVE_PATTERN_SCALED:
   3267 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL);
   3268 	  if (!value_in_range_p (opnd->shifter.amount, 1, 16))
   3269 	    {
   3270 	      set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
   3271 	      return false;
   3272 	    }
   3273 	  break;
   3274 
   3275 	case AARCH64_OPND_SVE_SHLIMM_PRED:
   3276 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
   3277 	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
   3278 	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
   3279 	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
   3280 	    {
   3281 	      set_imm_out_of_range_error (mismatch_detail, idx,
   3282 					  0, 8 * size - 1);
   3283 	      return false;
   3284 	    }
   3285 	  break;
   3286 
   3287 	case AARCH64_OPND_SME_SHRIMM3:
   3288 	case AARCH64_OPND_SME_SHRIMM4:
   3289 	  size = 1 << get_operand_fields_width (get_operand_from_code (type));
   3290 	  if (!value_in_range_p (opnd->imm.value, 1, size))
   3291 	    {
   3292 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
   3293 	      return false;
   3294 	    }
   3295 	  break;
   3296 
   3297 	case AARCH64_OPND_SME_SHRIMM5:
   3298 	case AARCH64_OPND_SVE_SHRIMM_PRED:
   3299 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
   3300 	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
   3301 	  num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
   3302 	  size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
   3303 	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
   3304 	    {
   3305 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
   3306 	      return false;
   3307 	    }
   3308 	  break;
   3309 
   3310 	case AARCH64_OPND_SME_ZT0_INDEX:
   3311 	  if (!value_in_range_p (opnd->imm.value, 0, 56))
   3312 	    {
   3313 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
   3314 	      return false;
   3315 	    }
   3316 	  if (opnd->imm.value % 8 != 0)
   3317 	    {
   3318 	      set_other_error (mismatch_detail, idx,
   3319 			       _("byte index must be a multiple of 8"));
   3320 	      return false;
   3321 	    }
   3322 	  break;
   3323 
   3324 	case AARCH64_OPND_SME_ZT0_INDEX_MUL_VL:
   3325 	  if (!value_in_range_p (opnd->imm.value, 0, 3))
   3326 	    {
   3327 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 3);
   3328 	      return 0;
   3329 	    }
   3330 	  break;
   3331 
   3332 	default:
   3333 	  break;
   3334 	}
   3335       break;
   3336 
   3337     case AARCH64_OPND_CLASS_SYSTEM:
   3338       switch (type)
   3339 	{
   3340 	case AARCH64_OPND_PSTATEFIELD:
   3341 	  for (i = 0; aarch64_pstatefields[i].name; ++i)
   3342 	    if (aarch64_pstatefields[i].value == opnd->pstatefield)
   3343 	      break;
   3344 	  assert (aarch64_pstatefields[i].name);
   3345 	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
   3346 	  max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
   3347 	  if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
   3348 	    {
   3349 	      set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
   3350 	      return false;
   3351 	    }
   3352 	  break;
   3353 	case AARCH64_OPND_PRFOP:
   3354 	  if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
   3355 	    {
   3356 	      set_other_error (mismatch_detail, idx,
   3357 			       _("the register-index form of PRFM does"
   3358 				 " not accept opcodes in the range 24-31"));
   3359 	      return false;
   3360 	    }
   3361 	  break;
   3362 	default:
   3363 	  break;
   3364 	}
   3365       break;
   3366 
   3367     case AARCH64_OPND_CLASS_SIMD_ELEMENT:
   3368       /* Get the upper bound for the element index.  */
   3369       if (opcode->op == OP_FCMLA_ELEM)
   3370 	/* FCMLA index range depends on the vector size of other operands
   3371 	   and is halfed because complex numbers take two elements.  */
   3372 	num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
   3373 	      * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
   3374       else if (opcode->iclass == lut)
   3375 	{
   3376 	  size = get_operand_fields_width (get_operand_from_code (type)) - 5;
   3377 	  if (!check_reglane (opnd, mismatch_detail, idx, "v", 0, 31,
   3378 			      0, (1 << size) - 1))
   3379 	    return 0;
   3380 	  break;
   3381 	}
   3382       else
   3383 	num = 16;
   3384       num = num / aarch64_get_qualifier_esize (qualifier) - 1;
   3385       assert (aarch64_get_qualifier_nelem (qualifier) == 1);
   3386 
   3387       /* Index out-of-range.  */
   3388       if (!value_in_range_p (opnd->reglane.index, 0, num))
   3389 	{
   3390 	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   3391 	  return false;
   3392 	}
   3393       /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
   3394 	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
   3395 	 number is encoded in "size:M:Rm":
   3396 	 size	<Vm>
   3397 	 00		RESERVED
   3398 	 01		0:Rm
   3399 	 10		M:Rm
   3400 	 11		RESERVED  */
   3401       if (type == AARCH64_OPND_Em16
   3402 	  && (qualifier == AARCH64_OPND_QLF_S_H
   3403 	      || qualifier == AARCH64_OPND_QLF_S_2B)
   3404 	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
   3405 	{
   3406 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
   3407 	  return false;
   3408 	}
   3409       if (type == AARCH64_OPND_Em8
   3410 	  && !value_in_range_p (opnd->reglane.regno, 0, 7))
   3411 	{
   3412 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 7);
   3413 	  return 0;
   3414 	}
   3415       break;
   3416 
   3417     case AARCH64_OPND_CLASS_MODIFIED_REG:
   3418       assert (idx == 1 || idx == 2);
   3419       switch (type)
   3420 	{
   3421 	case AARCH64_OPND_Rm_EXT:
   3422 	  if (!aarch64_extend_operator_p (opnd->shifter.kind)
   3423 	      && opnd->shifter.kind != AARCH64_MOD_LSL)
   3424 	    {
   3425 	      set_other_error (mismatch_detail, idx,
   3426 			       _("extend operator expected"));
   3427 	      return false;
   3428 	    }
   3429 	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
   3430 	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
   3431 	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
   3432 	     case.  */
   3433 	  if (!aarch64_stack_pointer_p (opnds + 0)
   3434 	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
   3435 	    {
   3436 	      if (!opnd->shifter.operator_present)
   3437 		{
   3438 		  set_other_error (mismatch_detail, idx,
   3439 				   _("missing extend operator"));
   3440 		  return false;
   3441 		}
   3442 	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
   3443 		{
   3444 		  set_other_error (mismatch_detail, idx,
   3445 				   _("'LSL' operator not allowed"));
   3446 		  return false;
   3447 		}
   3448 	    }
   3449 	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
   3450 		  || opnd->shifter.kind == AARCH64_MOD_LSL);
   3451 	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
   3452 	    {
   3453 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
   3454 	      return false;
   3455 	    }
   3456 	  /* In the 64-bit form, the final register operand is written as Wm
   3457 	     for all but the (possibly omitted) UXTX/LSL and SXTX
   3458 	     operators.
   3459 	     N.B. GAS allows X register to be used with any operator as a
   3460 	     programming convenience.  */
   3461 	  if (qualifier == AARCH64_OPND_QLF_X
   3462 	      && opnd->shifter.kind != AARCH64_MOD_LSL
   3463 	      && opnd->shifter.kind != AARCH64_MOD_UXTX
   3464 	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
   3465 	    {
   3466 	      set_other_error (mismatch_detail, idx, _("W register expected"));
   3467 	      return false;
   3468 	    }
   3469 	  break;
   3470 
   3471 	case AARCH64_OPND_Rm_SFT:
   3472 	  /* ROR is not available to the shifted register operand in
   3473 	     arithmetic instructions.  */
   3474 	  if (!aarch64_shift_operator_p (opnd->shifter.kind))
   3475 	    {
   3476 	      set_other_error (mismatch_detail, idx,
   3477 			       _("shift operator expected"));
   3478 	      return false;
   3479 	    }
   3480 	  if (opnd->shifter.kind == AARCH64_MOD_ROR
   3481 	      && opcode->iclass != log_shift)
   3482 	    {
   3483 	      set_other_error (mismatch_detail, idx,
   3484 			       _("'ROR' operator not allowed"));
   3485 	      return false;
   3486 	    }
   3487 	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
   3488 	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
   3489 	    {
   3490 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
   3491 	      return false;
   3492 	    }
   3493 	  break;
   3494 
   3495 	case AARCH64_OPND_Rm_LSL:
   3496 	  /* We expect here that opnd->shifter.kind != AARCH64_MOD_LSL
   3497 	     because the parser already restricts the type of shift to LSL only,
   3498 	     so another check of shift kind would be redundant.  */
   3499 	  if (!value_in_range_p (opnd->shifter.amount, 0, 7))
   3500 	    {
   3501 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 7);
   3502 	      return false;
   3503 	    }
   3504 	  break;
   3505 
   3506 	default:
   3507 	  break;
   3508 	}
   3509       break;
   3510 
   3511     default:
   3512       break;
   3513     }
   3514 
   3515   return true;
   3516 }
   3517 
   3518 /* Main entrypoint for the operand constraint checking.
   3519 
   3520    Return 1 if operands of *INST meet the constraint applied by the operand
   3521    codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
   3522    not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
   3523    adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
   3524    with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
   3525    error kind when it is notified that an instruction does not pass the check).
   3526 
   3527    Un-determined operand qualifiers may get established during the process.  */
   3528 
   3529 bool
   3530 aarch64_match_operands_constraint (aarch64_inst *inst,
   3531 				   aarch64_operand_error *mismatch_detail)
   3532 {
   3533   int i;
   3534 
   3535   DEBUG_TRACE ("enter");
   3536 
   3537   i = inst->opcode->tied_operand;
   3538 
   3539   if (i > 0)
   3540     {
   3541       /* Check for tied_operands with specific opcode iclass.  */
   3542       switch (inst->opcode->iclass)
   3543         {
   3544         /* For SME LDR and STR instructions #imm must have the same numerical
   3545            value for both operands.
   3546         */
   3547         case sme_ldr:
   3548         case sme_str:
   3549           assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
   3550           assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
   3551           if (inst->operands[0].indexed_za.index.imm
   3552               != inst->operands[1].addr.offset.imm)
   3553             {
   3554               if (mismatch_detail)
   3555                 {
   3556                   mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
   3557                   mismatch_detail->index = i;
   3558                 }
   3559               return false;
   3560             }
   3561           break;
   3562 
   3563         default:
   3564 	  {
   3565 	    /* Check for cases where a source register needs to be the
   3566 	       same as the destination register.  Do this before
   3567 	       matching qualifiers since if an instruction has both
   3568 	       invalid tying and invalid qualifiers, the error about
   3569 	       qualifiers would suggest several alternative instructions
   3570 	       that also have invalid tying.  */
   3571 	    enum aarch64_operand_class op_class
   3572 	       = aarch64_get_operand_class (inst->operands[0].type);
   3573 	    assert (aarch64_get_operand_class (inst->operands[i].type)
   3574 		    == op_class);
   3575 	    if (op_class == AARCH64_OPND_CLASS_SVE_REGLIST
   3576 		? ((inst->operands[0].reglist.first_regno
   3577 		    != inst->operands[i].reglist.first_regno)
   3578 		   || (inst->operands[0].reglist.num_regs
   3579 		       != inst->operands[i].reglist.num_regs)
   3580 		   || (inst->operands[0].reglist.stride
   3581 		       != inst->operands[i].reglist.stride))
   3582 		: (inst->operands[0].reg.regno
   3583 		   != inst->operands[i].reg.regno))
   3584 	      {
   3585 		if (mismatch_detail)
   3586 		  {
   3587 		    mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
   3588 		    mismatch_detail->index = i;
   3589 		    mismatch_detail->error = NULL;
   3590 		  }
   3591 		return false;
   3592 	      }
   3593 	    break;
   3594 	  }
   3595         }
   3596     }
   3597 
   3598   /* Match operands' qualifier.
   3599      *INST has already had qualifier establish for some, if not all, of
   3600      its operands; we need to find out whether these established
   3601      qualifiers match one of the qualifier sequence in
   3602      INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
   3603      with the corresponding qualifier in such a sequence.
   3604      Only basic operand constraint checking is done here; the more thorough
   3605      constraint checking will carried out by operand_general_constraint_met_p,
   3606      which has be to called after this in order to get all of the operands'
   3607      qualifiers established.  */
   3608   int invalid_count;
   3609   if (match_operands_qualifier (inst, true /* update_p */,
   3610 				&invalid_count) == 0)
   3611     {
   3612       DEBUG_TRACE ("FAIL on operand qualifier matching");
   3613       if (mismatch_detail)
   3614 	{
   3615 	  /* Return an error type to indicate that it is the qualifier
   3616 	     matching failure; we don't care about which operand as there
   3617 	     are enough information in the opcode table to reproduce it.  */
   3618 	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
   3619 	  mismatch_detail->index = -1;
   3620 	  mismatch_detail->error = NULL;
   3621 	  mismatch_detail->data[0].i = invalid_count;
   3622 	}
   3623       return false;
   3624     }
   3625 
   3626   /* Match operands' constraint.  */
   3627   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   3628     {
   3629       enum aarch64_opnd type = inst->opcode->operands[i];
   3630       if (type == AARCH64_OPND_NIL)
   3631 	break;
   3632       if (inst->operands[i].skip)
   3633 	{
   3634 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
   3635 	  continue;
   3636 	}
   3637       if (!operand_general_constraint_met_p (inst->operands, i, type,
   3638 					     inst->opcode, mismatch_detail))
   3639 	{
   3640 	  DEBUG_TRACE ("FAIL on operand %d", i);
   3641 	  return false;
   3642 	}
   3643     }
   3644 
   3645   DEBUG_TRACE ("PASS");
   3646 
   3647   return true;
   3648 }
   3649 
   3650 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
   3651    Also updates the TYPE of each INST->OPERANDS with the corresponding
   3652    value of OPCODE->OPERANDS.
   3653 
   3654    Note that some operand qualifiers may need to be manually cleared by
   3655    the caller before it further calls the aarch64_opcode_encode; by
   3656    doing this, it helps the qualifier matching facilities work
   3657    properly.  */
   3658 
   3659 const aarch64_opcode*
   3660 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
   3661 {
   3662   int i;
   3663   const aarch64_opcode *old = inst->opcode;
   3664 
   3665   inst->opcode = opcode;
   3666 
   3667   /* Update the operand types.  */
   3668   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   3669     {
   3670       inst->operands[i].type = opcode->operands[i];
   3671       if (opcode->operands[i] == AARCH64_OPND_NIL)
   3672 	break;
   3673     }
   3674 
   3675   DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
   3676 
   3677   return old;
   3678 }
   3679 
   3680 int
   3681 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
   3682 {
   3683   int i;
   3684   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   3685     if (operands[i] == operand)
   3686       return i;
   3687     else if (operands[i] == AARCH64_OPND_NIL)
   3688       break;
   3689   return -1;
   3690 }
   3691 
   3692 /* R0...R30, followed by FOR31.  */
   3694 #define BANK(R, FOR31) \
   3695   { R  (0), R  (1), R  (2), R  (3), R  (4), R  (5), R  (6), R  (7), \
   3696     R  (8), R  (9), R (10), R (11), R (12), R (13), R (14), R (15), \
   3697     R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
   3698     R (24), R (25), R (26), R (27), R (28), R (29), R (30),  FOR31 }
   3699 /* [0][0]  32-bit integer regs with sp   Wn
   3700    [0][1]  64-bit integer regs with sp   Xn  sf=1
   3701    [1][0]  32-bit integer regs with #0   Wn
   3702    [1][1]  64-bit integer regs with #0   Xn  sf=1 */
   3703 static const char *int_reg[2][2][32] = {
   3704 #define R32(X) "w" #X
   3705 #define R64(X) "x" #X
   3706   { BANK (R32, "wsp"), BANK (R64, "sp") },
   3707   { BANK (R32, "wzr"), BANK (R64, "xzr") }
   3708 #undef R64
   3709 #undef R32
   3710 };
   3711 
   3712 /* Names of the SVE vector registers, first with .S suffixes,
   3713    then with .D suffixes.  */
   3714 
   3715 static const char *sve_reg[2][32] = {
   3716 #define ZS(X) "z" #X ".s"
   3717 #define ZD(X) "z" #X ".d"
   3718   BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
   3719 #undef ZD
   3720 #undef ZS
   3721 };
   3722 #undef BANK
   3723 
   3724 /* Return the integer register name.
   3725    if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
   3726 
   3727 static inline const char *
   3728 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
   3729 {
   3730   const int has_zr = sp_reg_p ? 0 : 1;
   3731   const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
   3732   return int_reg[has_zr][is_64][regno];
   3733 }
   3734 
   3735 /* Like get_int_reg_name, but IS_64 is always 1.  */
   3736 
   3737 static inline const char *
   3738 get_64bit_int_reg_name (int regno, int sp_reg_p)
   3739 {
   3740   const int has_zr = sp_reg_p ? 0 : 1;
   3741   return int_reg[has_zr][1][regno];
   3742 }
   3743 
   3744 /* Get the name of the integer offset register in OPND, using the shift type
   3745    to decide whether it's a word or doubleword.  */
   3746 
   3747 static inline const char *
   3748 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
   3749 {
   3750   switch (opnd->shifter.kind)
   3751     {
   3752     case AARCH64_MOD_UXTW:
   3753     case AARCH64_MOD_SXTW:
   3754       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
   3755 
   3756     case AARCH64_MOD_LSL:
   3757     case AARCH64_MOD_SXTX:
   3758       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
   3759 
   3760     default:
   3761       abort ();
   3762     }
   3763 }
   3764 
   3765 /* Get the name of the SVE vector offset register in OPND, using the operand
   3766    qualifier to decide whether the suffix should be .S or .D.  */
   3767 
   3768 static inline const char *
   3769 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
   3770 {
   3771   assert (qualifier == AARCH64_OPND_QLF_S_S
   3772 	  || qualifier == AARCH64_OPND_QLF_S_D);
   3773   return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
   3774 }
   3775 
   3776 /* Types for expanding an encoded 8-bit value to a floating-point value.  */
   3777 
   3778 typedef union
   3779 {
   3780   uint64_t i;
   3781   double   d;
   3782 } double_conv_t;
   3783 
   3784 typedef union
   3785 {
   3786   uint32_t i;
   3787   float    f;
   3788 } single_conv_t;
   3789 
   3790 typedef union
   3791 {
   3792   uint32_t i;
   3793   float    f;
   3794 } half_conv_t;
   3795 
   3796 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
   3797    normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
   3798    (depending on the type of the instruction).  IMM8 will be expanded to a
   3799    single-precision floating-point value (SIZE == 4) or a double-precision
   3800    floating-point value (SIZE == 8).  A half-precision floating-point value
   3801    (SIZE == 2) is expanded to a single-precision floating-point value.  The
   3802    expanded value is returned.  */
   3803 
   3804 static uint64_t
   3805 expand_fp_imm (int size, uint32_t imm8)
   3806 {
   3807   uint64_t imm = 0;
   3808   uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
   3809 
   3810   imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
   3811   imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
   3812   imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
   3813   imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
   3814     | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
   3815   if (size == 8)
   3816     {
   3817       imm = (imm8_7 << (63-32))		/* imm8<7>  */
   3818 	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
   3819 	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
   3820 	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
   3821 	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
   3822       imm <<= 32;
   3823     }
   3824   else if (size == 4 || size == 2)
   3825     {
   3826       imm = (imm8_7 << 31)	/* imm8<7>              */
   3827 	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
   3828 	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
   3829 	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
   3830     }
   3831   else
   3832     {
   3833       /* An unsupported size.  */
   3834       assert (0);
   3835     }
   3836 
   3837   return imm;
   3838 }
   3839 
   3840 /* Return a string based on FMT with the register style applied.  */
   3841 
   3842 static const char *
   3843 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
   3844 {
   3845   const char *txt;
   3846   va_list ap;
   3847 
   3848   va_start (ap, fmt);
   3849   txt = styler->apply_style (styler, dis_style_register, fmt, ap);
   3850   va_end (ap);
   3851 
   3852   return txt;
   3853 }
   3854 
   3855 /* Return a string based on FMT with the immediate style applied.  */
   3856 
   3857 static const char *
   3858 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
   3859 {
   3860   const char *txt;
   3861   va_list ap;
   3862 
   3863   va_start (ap, fmt);
   3864   txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
   3865   va_end (ap);
   3866 
   3867   return txt;
   3868 }
   3869 
   3870 /* Return a string based on FMT with the sub-mnemonic style applied.  */
   3871 
   3872 static const char *
   3873 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
   3874 {
   3875   const char *txt;
   3876   va_list ap;
   3877 
   3878   va_start (ap, fmt);
   3879   txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
   3880   va_end (ap);
   3881 
   3882   return txt;
   3883 }
   3884 
   3885 /* Return a string based on FMT with the address style applied.  */
   3886 
   3887 static const char *
   3888 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
   3889 {
   3890   const char *txt;
   3891   va_list ap;
   3892 
   3893   va_start (ap, fmt);
   3894   txt = styler->apply_style (styler, dis_style_address, fmt, ap);
   3895   va_end (ap);
   3896 
   3897   return txt;
   3898 }
   3899 
   3900 /* Produce the string representation of the register list operand *OPND
   3901    in the buffer pointed by BUF of size SIZE.  PREFIX is the part of
   3902    the register name that comes before the register number, such as "v".  */
   3903 static void
   3904 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
   3905 		     const char *prefix, struct aarch64_styler *styler)
   3906 {
   3907   const int mask = (prefix[0] == 'p' ? 15 : 31);
   3908   const int num_regs = opnd->reglist.num_regs;
   3909   const int stride = opnd->reglist.stride;
   3910   const int first_reg = opnd->reglist.first_regno;
   3911   const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
   3912   const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
   3913   char tb[16];	/* Temporary buffer.  */
   3914 
   3915   assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
   3916   assert (num_regs >= 1 && num_regs <= 4);
   3917 
   3918   /* Prepare the index if any.  */
   3919   if (opnd->reglist.has_index)
   3920     /* PR 21096: The %100 is to silence a warning about possible truncation.  */
   3921     snprintf (tb, sizeof (tb), "[%s]",
   3922 	      style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
   3923   else
   3924     tb[0] = '\0';
   3925 
   3926   /* The hyphenated form is preferred for disassembly if there is
   3927      more than one register in the list, and the register numbers
   3928      are monotonically increasing in increments of one.  */
   3929   if (stride == 1 && num_regs > 1)
   3930     if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   3931       snprintf (buf, size, "{%s-%s}%s",
   3932 		style_reg (styler, "%s%d", prefix, first_reg),
   3933 		style_reg (styler, "%s%d", prefix, last_reg), tb);
   3934     else
   3935       snprintf (buf, size, "{%s-%s}%s",
   3936 		style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
   3937 		style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
   3938   else
   3939     {
   3940       const int reg0 = first_reg;
   3941       const int reg1 = (first_reg + stride) & mask;
   3942       const int reg2 = (first_reg + stride * 2) & mask;
   3943       const int reg3 = (first_reg + stride * 3) & mask;
   3944 
   3945       switch (num_regs)
   3946 	{
   3947 	case 1:
   3948 	  snprintf (buf, size, "{%s}%s",
   3949 		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
   3950 		    tb);
   3951 	  break;
   3952 	case 2:
   3953 	  snprintf (buf, size, "{%s, %s}%s",
   3954 		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
   3955 		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
   3956 		    tb);
   3957 	  break;
   3958 	case 3:
   3959 	  snprintf (buf, size, "{%s, %s, %s}%s",
   3960 		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
   3961 		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
   3962 		    style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
   3963 		    tb);
   3964 	  break;
   3965 	case 4:
   3966 	  snprintf (buf, size, "{%s, %s, %s, %s}%s",
   3967 		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
   3968 		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
   3969 		    style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
   3970 		    style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
   3971 		    tb);
   3972 	  break;
   3973 	}
   3974     }
   3975 }
   3976 
   3977 /* Print the register+immediate address in OPND to BUF, which has SIZE
   3978    characters.  BASE is the name of the base register.  */
   3979 
   3980 static void
   3981 print_immediate_offset_address (char *buf, size_t size,
   3982 				const aarch64_opnd_info *opnd,
   3983 				const char *base,
   3984 				struct aarch64_styler *styler)
   3985 {
   3986   if (opnd->addr.writeback)
   3987     {
   3988       if (opnd->addr.preind)
   3989         {
   3990 	  if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
   3991 	    snprintf (buf, size, "[%s]!", style_reg (styler, base));
   3992           else
   3993 	    snprintf (buf, size, "[%s, %s]!",
   3994 		      style_reg (styler, base),
   3995 		      style_imm (styler, "#%d", opnd->addr.offset.imm));
   3996         }
   3997       else
   3998 	snprintf (buf, size, "[%s], %s",
   3999 		  style_reg (styler, base),
   4000 		  style_imm (styler, "#%d", opnd->addr.offset.imm));
   4001     }
   4002   else
   4003     {
   4004       if (opnd->shifter.operator_present)
   4005 	{
   4006 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
   4007 	  snprintf (buf, size, "[%s, %s, %s]",
   4008 		    style_reg (styler, base),
   4009 		    style_imm (styler, "#%d", opnd->addr.offset.imm),
   4010 		    style_sub_mnem (styler, "mul vl"));
   4011 	}
   4012       else if (opnd->addr.offset.imm)
   4013 	snprintf (buf, size, "[%s, %s]",
   4014 		  style_reg (styler, base),
   4015 		  style_imm (styler, "#%d", opnd->addr.offset.imm));
   4016       else
   4017 	snprintf (buf, size, "[%s]", style_reg (styler, base));
   4018     }
   4019 }
   4020 
   4021 /* Produce the string representation of the register offset address operand
   4022    *OPND in the buffer pointed by BUF of size SIZE.  BASE and OFFSET are
   4023    the names of the base and offset registers.  */
   4024 static void
   4025 print_register_offset_address (char *buf, size_t size,
   4026 			       const aarch64_opnd_info *opnd,
   4027 			       const char *base, const char *offset,
   4028 			       struct aarch64_styler *styler)
   4029 {
   4030   char tb[32];			/* Temporary buffer.  */
   4031   bool print_extend_p = true;
   4032   bool print_amount_p = true;
   4033   const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
   4034 
   4035   /* This is the case where offset is the optional argument and the optional
   4036      argument is ignored in the disassembly.  */
   4037   if (opnd->type == AARCH64_OPND_SVE_ADDR_ZX && offset != NULL
   4038       && strcmp (offset,"xzr") == 0)
   4039     {
   4040       /* Example: [<Zn>.S{, <Xm>}].
   4041 	 When the assembly is [Z0.S, XZR] or [Z0.S], Xm is XZR in both the cases
   4042 	 and the preferred disassembly is [Z0.S], ignoring the optional	Xm.  */
   4043       snprintf (buf, size, "[%s]", style_reg (styler, base));
   4044     }
   4045   else
   4046     {
   4047       if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
   4048 				    || !opnd->shifter.amount_present))
   4049 	{
   4050 	  /* Not print the shift/extend amount when the amount is zero and
   4051 	     when it is not the special case of 8-bit load/store
   4052 	     instruction.  */
   4053 	 print_amount_p = false;
   4054 	 /* Likewise, no need to print the shift operator LSL in such a
   4055 	    situation.  */
   4056 	 if (opnd->shifter.kind == AARCH64_MOD_LSL)
   4057 	   print_extend_p = false;
   4058 	}
   4059 
   4060       /* Prepare for the extend/shift.  */
   4061       if (print_extend_p)
   4062 	{
   4063 	  if (print_amount_p)
   4064 	    snprintf (tb, sizeof (tb), ", %s %s",
   4065 		      style_sub_mnem (styler, shift_name),
   4066 		      style_imm (styler, "#%" PRIi64,
   4067 	  /* PR 21096: The %100 is to silence a warning about possible
   4068 	     truncation.  */
   4069 				 (opnd->shifter.amount % 100)));
   4070 	  else
   4071 	    snprintf (tb, sizeof (tb), ", %s",
   4072 		      style_sub_mnem (styler, shift_name));
   4073 	}
   4074       else
   4075 	tb[0] = '\0';
   4076 
   4077       snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
   4078 		style_reg (styler, offset), tb);
   4079     }
   4080 }
   4081 
   4082 /* Print ZA tiles from imm8 in ZERO instruction.
   4083 
   4084    The preferred disassembly of this instruction uses the shortest list of tile
   4085    names that represent the encoded immediate mask.
   4086 
   4087    For example:
   4088     * An all-ones immediate is disassembled as {ZA}.
   4089     * An all-zeros immediate is disassembled as an empty list { }.
   4090 */
   4091 static void
   4092 print_sme_za_list (char *buf, size_t size, int mask,
   4093 		   struct aarch64_styler *styler)
   4094 {
   4095   static const struct {
   4096     unsigned char mask;
   4097     char name[7];
   4098   } zan[] = {
   4099     { 0xff, "za" },
   4100     { 0x55, "za0.h" },
   4101     { 0xaa, "za1.h" },
   4102     { 0x11, "za0.s" },
   4103     { 0x22, "za1.s" },
   4104     { 0x44, "za2.s" },
   4105     { 0x88, "za3.s" },
   4106     { 0x01, "za0.d" },
   4107     { 0x02, "za1.d" },
   4108     { 0x04, "za2.d" },
   4109     { 0x08, "za3.d" },
   4110     { 0x10, "za4.d" },
   4111     { 0x20, "za5.d" },
   4112     { 0x40, "za6.d" },
   4113     { 0x80, "za7.d" },
   4114     { 0x00, " " },
   4115   };
   4116   int k;
   4117 
   4118   k = snprintf (buf, size, "{");
   4119   for (unsigned int i = 0; i < ARRAY_SIZE (zan); i++)
   4120     {
   4121       if ((mask & zan[i].mask) == zan[i].mask)
   4122 	{
   4123 	  mask &= ~zan[i].mask;
   4124 	  if (k > 1)
   4125 	    k += snprintf (buf + k, size - k, ", ");
   4126 
   4127 	  k += snprintf (buf + k, size - k, "%s",
   4128 			 style_reg (styler, zan[i].name));
   4129 	}
   4130       if (mask == 0)
   4131         break;
   4132     }
   4133   snprintf (buf + k, size - k, "}");
   4134 }
   4135 
   4136 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
   4137    in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
   4138    PC, PCREL_P and ADDRESS are used to pass in and return information about
   4139    the PC-relative address calculation, where the PC value is passed in
   4140    PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
   4141    will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
   4142    calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
   4143 
   4144    The function serves both the disassembler and the assembler diagnostics
   4145    issuer, which is the reason why it lives in this file.  */
   4146 
   4147 void
   4148 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
   4149 		       const aarch64_opcode *opcode,
   4150 		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
   4151 		       bfd_vma *address, char** notes,
   4152 		       char *comment, size_t comment_size,
   4153 		       aarch64_feature_set features,
   4154 		       struct aarch64_styler *styler)
   4155 {
   4156   unsigned int i, num_conds;
   4157   const char *name = NULL;
   4158   const aarch64_opnd_info *opnd = opnds + idx;
   4159   enum aarch64_modifier_kind kind;
   4160   uint64_t addr, enum_value;
   4161 
   4162   if (comment != NULL)
   4163     {
   4164       assert (comment_size > 0);
   4165       comment[0] = '\0';
   4166     }
   4167   else
   4168     assert (comment_size == 0);
   4169 
   4170   buf[0] = '\0';
   4171   if (pcrel_p)
   4172     *pcrel_p = 0;
   4173 
   4174   switch (opnd->type)
   4175     {
   4176     case AARCH64_OPND_Rd:
   4177     case AARCH64_OPND_Rn:
   4178     case AARCH64_OPND_Rm:
   4179     case AARCH64_OPND_Rt:
   4180     case AARCH64_OPND_Rt2:
   4181     case AARCH64_OPND_Rs:
   4182     case AARCH64_OPND_Ra:
   4183     case AARCH64_OPND_Rt_IN_SYS_ALIASES:
   4184     case AARCH64_OPND_Rt_LS64:
   4185     case AARCH64_OPND_Rt_SYS:
   4186     case AARCH64_OPND_PAIRREG:
   4187     case AARCH64_OPND_PAIRREG_OR_XZR:
   4188     case AARCH64_OPND_SVE_Rm:
   4189     case AARCH64_OPND_LSE128_Rt:
   4190     case AARCH64_OPND_LSE128_Rt2:
   4191       /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
   4192 	 the <ic_op>, therefore we use opnd->present to override the
   4193 	 generic optional-ness information.  */
   4194       if (opnd->type == AARCH64_OPND_Rt_SYS)
   4195 	{
   4196 	  if (!opnd->present)
   4197 	    break;
   4198 	}
   4199       else if ((opnd->type == AARCH64_OPND_Rt_IN_SYS_ALIASES)
   4200 	       && (opnd->reg.regno
   4201 		   != get_optional_operand_default_value (opcode)))
   4202 	{
   4203 	  /* Avoid printing an invalid additional value for Rt in SYS aliases such as
   4204 	     BRB, provide a helpful comment instead */
   4205 	  snprintf (comment, comment_size, "unpredictable encoding (Rt!=31): #%u", opnd->reg.regno);
   4206 	  break;
   4207 	}
   4208       /* Omit the operand, e.g. RET.  */
   4209       else if (optional_operand_p (opcode, idx)
   4210 	       && (opnd->reg.regno
   4211 		   == get_optional_operand_default_value (opcode)))
   4212 	break;
   4213       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   4214 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   4215       snprintf (buf, size, "%s",
   4216 		style_reg (styler, get_int_reg_name (opnd->reg.regno,
   4217 						     opnd->qualifier, 0)));
   4218       break;
   4219 
   4220     case AARCH64_OPND_Rd_SP:
   4221     case AARCH64_OPND_Rn_SP:
   4222     case AARCH64_OPND_Rt_SP:
   4223     case AARCH64_OPND_SVE_Rn_SP:
   4224     case AARCH64_OPND_Rm_SP:
   4225       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   4226 	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
   4227 	      || opnd->qualifier == AARCH64_OPND_QLF_X
   4228 	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
   4229       snprintf (buf, size, "%s",
   4230 		style_reg (styler, get_int_reg_name (opnd->reg.regno,
   4231 						     opnd->qualifier, 1)));
   4232       break;
   4233 
   4234     case AARCH64_OPND_Rm_EXT:
   4235       kind = opnd->shifter.kind;
   4236       assert (idx == 1 || idx == 2);
   4237       if ((aarch64_stack_pointer_p (opnds)
   4238 	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
   4239 	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
   4240 	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
   4241 	       && kind == AARCH64_MOD_UXTW)
   4242 	      || (opnd->qualifier == AARCH64_OPND_QLF_X
   4243 		  && kind == AARCH64_MOD_UXTX)))
   4244 	{
   4245 	  /* 'LSL' is the preferred form in this case.  */
   4246 	  kind = AARCH64_MOD_LSL;
   4247 	  if (opnd->shifter.amount == 0)
   4248 	    {
   4249 	      /* Shifter omitted.  */
   4250 	      snprintf (buf, size, "%s",
   4251 			style_reg (styler,
   4252 				   get_int_reg_name (opnd->reg.regno,
   4253 						     opnd->qualifier, 0)));
   4254 	      break;
   4255 	    }
   4256 	}
   4257       if (opnd->shifter.amount)
   4258 	snprintf (buf, size, "%s, %s %s",
   4259 		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
   4260 		  style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
   4261 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4262       else
   4263 	snprintf (buf, size, "%s, %s",
   4264 		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
   4265 		  style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
   4266       break;
   4267 
   4268     case AARCH64_OPND_Rm_SFT:
   4269       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   4270 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   4271       if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
   4272 	snprintf (buf, size, "%s",
   4273 		  style_reg (styler, get_int_reg_name (opnd->reg.regno,
   4274 						       opnd->qualifier, 0)));
   4275       else
   4276 	snprintf (buf, size, "%s, %s %s",
   4277 		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
   4278 		  style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
   4279 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4280       break;
   4281 
   4282     case AARCH64_OPND_Rm_LSL:
   4283       assert (opnd->qualifier == AARCH64_OPND_QLF_X);
   4284       assert (opnd->shifter.kind == AARCH64_MOD_LSL);
   4285       if (opnd->shifter.amount == 0)
   4286 	snprintf (buf, size, "%s",
   4287 		  style_reg (styler, get_int_reg_name (opnd->reg.regno,
   4288 						       opnd->qualifier, 0)));
   4289       else
   4290 	snprintf (buf, size, "%s, %s %s",
   4291 		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
   4292 		  style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
   4293 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4294       break;
   4295 
   4296     case AARCH64_OPND_Fd:
   4297     case AARCH64_OPND_Fn:
   4298     case AARCH64_OPND_Fm:
   4299     case AARCH64_OPND_Fa:
   4300     case AARCH64_OPND_Ft:
   4301     case AARCH64_OPND_Ft2:
   4302     case AARCH64_OPND_Sd:
   4303     case AARCH64_OPND_Sn:
   4304     case AARCH64_OPND_Sm:
   4305     case AARCH64_OPND_SVE_VZn:
   4306     case AARCH64_OPND_SVE_Vd:
   4307     case AARCH64_OPND_SVE_Vm:
   4308     case AARCH64_OPND_SVE_Vn:
   4309       snprintf (buf, size, "%s",
   4310 		style_reg (styler, "%s%d",
   4311 			   aarch64_get_qualifier_name (opnd->qualifier),
   4312 			   opnd->reg.regno));
   4313       break;
   4314 
   4315     case AARCH64_OPND_Va:
   4316     case AARCH64_OPND_Vd:
   4317     case AARCH64_OPND_Vn:
   4318     case AARCH64_OPND_Vm:
   4319       snprintf (buf, size, "%s",
   4320 		style_reg (styler, "v%d.%s", opnd->reg.regno,
   4321 			   aarch64_get_qualifier_name (opnd->qualifier)));
   4322       break;
   4323 
   4324     case AARCH64_OPND_Ed:
   4325     case AARCH64_OPND_En:
   4326     case AARCH64_OPND_Em:
   4327     case AARCH64_OPND_Em16:
   4328     case AARCH64_OPND_Em8:
   4329     case AARCH64_OPND_SM3_IMM2:
   4330       snprintf (buf, size, "%s[%s]",
   4331 		style_reg (styler, "v%d.%s", opnd->reglane.regno,
   4332 			   aarch64_get_qualifier_name (opnd->qualifier)),
   4333 		style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4334       break;
   4335 
   4336     case AARCH64_OPND_Em_INDEX1_14:
   4337     case AARCH64_OPND_Em_INDEX2_13:
   4338     case AARCH64_OPND_Em_INDEX3_12:
   4339       snprintf (buf, size, "%s[%s]",
   4340 		style_reg (styler, "v%d", opnd->reglane.regno),
   4341 		style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4342       break;
   4343 
   4344     case AARCH64_OPND_VdD1:
   4345     case AARCH64_OPND_VnD1:
   4346       snprintf (buf, size, "%s[%s]",
   4347 		style_reg (styler, "v%d.d", opnd->reg.regno),
   4348 		style_imm (styler, "1"));
   4349       break;
   4350 
   4351     case AARCH64_OPND_LVn:
   4352     case AARCH64_OPND_LVn_LUT:
   4353     case AARCH64_OPND_LVt:
   4354     case AARCH64_OPND_LVt_AL:
   4355     case AARCH64_OPND_LEt:
   4356       print_register_list (buf, size, opnd, "v", styler);
   4357       break;
   4358 
   4359     case AARCH64_OPND_SVE_Pd:
   4360     case AARCH64_OPND_SVE_Pg3:
   4361     case AARCH64_OPND_SVE_Pg4_5:
   4362     case AARCH64_OPND_SVE_Pg4_10:
   4363     case AARCH64_OPND_SVE_Pg4_16:
   4364     case AARCH64_OPND_SVE_Pm:
   4365     case AARCH64_OPND_SVE_Pn:
   4366     case AARCH64_OPND_SVE_Pt:
   4367     case AARCH64_OPND_SME_Pm:
   4368       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   4369 	snprintf (buf, size, "%s",
   4370 		  style_reg (styler, "p%d", opnd->reg.regno));
   4371       else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
   4372 	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
   4373 	snprintf (buf, size, "%s",
   4374 		  style_reg (styler, "p%d/%s", opnd->reg.regno,
   4375 			     aarch64_get_qualifier_name (opnd->qualifier)));
   4376       else
   4377 	snprintf (buf, size, "%s",
   4378 		  style_reg (styler, "p%d.%s", opnd->reg.regno,
   4379 			     aarch64_get_qualifier_name (opnd->qualifier)));
   4380       break;
   4381 
   4382     case AARCH64_OPND_SVE_PNd:
   4383     case AARCH64_OPND_SVE_PNg4_10:
   4384     case AARCH64_OPND_SVE_PNn:
   4385     case AARCH64_OPND_SVE_PNt:
   4386     case AARCH64_OPND_SME_PNd3:
   4387     case AARCH64_OPND_SME_PNg3:
   4388     case AARCH64_OPND_SME_PNn:
   4389       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   4390 	snprintf (buf, size, "%s",
   4391 		  style_reg (styler, "pn%d", opnd->reg.regno));
   4392       else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
   4393 	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
   4394 	snprintf (buf, size, "%s",
   4395 		  style_reg (styler, "pn%d/%s", opnd->reg.regno,
   4396 			     aarch64_get_qualifier_name (opnd->qualifier)));
   4397       else
   4398 	snprintf (buf, size, "%s",
   4399 		  style_reg (styler, "pn%d.%s", opnd->reg.regno,
   4400 			     aarch64_get_qualifier_name (opnd->qualifier)));
   4401       break;
   4402 
   4403     case AARCH64_OPND_SME_Pdx2:
   4404     case AARCH64_OPND_SME_PdxN:
   4405       print_register_list (buf, size, opnd, "p", styler);
   4406       break;
   4407 
   4408     case AARCH64_OPND_SME_PNn3_INDEX1:
   4409     case AARCH64_OPND_SME_PNn3_INDEX2:
   4410       snprintf (buf, size, "%s[%s]",
   4411 		style_reg (styler, "pn%d", opnd->reglane.regno),
   4412 		style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4413       break;
   4414 
   4415     case AARCH64_OPND_SVE_Za_5:
   4416     case AARCH64_OPND_SVE_Za_16:
   4417     case AARCH64_OPND_SVE_Zd:
   4418     case AARCH64_OPND_SVE_Zm_5:
   4419     case AARCH64_OPND_SVE_Zm_16:
   4420     case AARCH64_OPND_SVE_Zn:
   4421     case AARCH64_OPND_SVE_Zt:
   4422     case AARCH64_OPND_SME_Zm:
   4423     case AARCH64_OPND_SME_Zm_17:
   4424     case AARCH64_OPND_SME_Zn_6_3:
   4425     case AARCH64_OPND_SME_Zm_17_3:
   4426       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   4427        snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
   4428       else
   4429        snprintf (buf, size, "%s",
   4430 		 style_reg (styler, "z%d.%s", opnd->reg.regno,
   4431 			    aarch64_get_qualifier_name (opnd->qualifier)));
   4432       break;
   4433 
   4434     case AARCH64_OPND_SVE_ZnxN:
   4435     case AARCH64_OPND_SVE_ZtxN:
   4436     case AARCH64_OPND_SME_Zdnx2:
   4437     case AARCH64_OPND_SME_Zdnx4:
   4438     case AARCH64_OPND_SME_Znx2_6_3:
   4439     case AARCH64_OPND_SME_Zmx2_17_3:
   4440     case AARCH64_OPND_SME_Zmx2:
   4441     case AARCH64_OPND_SME_Zmx4:
   4442     case AARCH64_OPND_SME_Zmx2_INDEX_22:
   4443     case AARCH64_OPND_SME_Znx2:
   4444     case AARCH64_OPND_SME_Znx2_BIT_INDEX:
   4445     case AARCH64_OPND_SME_Znx4:
   4446     case AARCH64_OPND_SME_Zn7xN_UNTYPED:
   4447     case AARCH64_OPND_SME_Ztx2_STRIDED:
   4448     case AARCH64_OPND_SME_Ztx4_STRIDED:
   4449       print_register_list (buf, size, opnd, "z", styler);
   4450       break;
   4451 
   4452     case AARCH64_OPND_SVE_Zm1_23_INDEX:
   4453     case AARCH64_OPND_SVE_Zm2_22_INDEX:
   4454     case AARCH64_OPND_SVE_Zm3_INDEX:
   4455     case AARCH64_OPND_SVE_Zm3_22_INDEX:
   4456     case AARCH64_OPND_SVE_Zm3_19_INDEX:
   4457     case AARCH64_OPND_SVE_Zm3_12_INDEX:
   4458     case AARCH64_OPND_SVE_Zm3_11_INDEX:
   4459     case AARCH64_OPND_SVE_Zm3_10_INDEX:
   4460     case AARCH64_OPND_SVE_Zm4_11_INDEX:
   4461     case AARCH64_OPND_SVE_Zm4_INDEX:
   4462     case AARCH64_OPND_SVE_Zn_INDEX:
   4463     case AARCH64_OPND_SME_Zk_INDEX:
   4464     case AARCH64_OPND_SME_Zm_INDEX1:
   4465     case AARCH64_OPND_SME_Zm_INDEX2:
   4466     case AARCH64_OPND_SME_Zm_INDEX2_3:
   4467     case AARCH64_OPND_SME_Zm_INDEX3_1:
   4468     case AARCH64_OPND_SME_Zm_INDEX3_2:
   4469     case AARCH64_OPND_SME_Zm_INDEX3_3:
   4470     case AARCH64_OPND_SME_Zm_INDEX3_10:
   4471     case AARCH64_OPND_SVE_Zn_5_INDEX:
   4472     case AARCH64_OPND_SME_Zm_INDEX4_1:
   4473     case AARCH64_OPND_SME_Zm_INDEX4_2:
   4474     case AARCH64_OPND_SME_Zm_INDEX4_3:
   4475     case AARCH64_OPND_SME_Zm_INDEX4_10:
   4476     case AARCH64_OPND_SME_Zn_INDEX1_16:
   4477     case AARCH64_OPND_SME_Zn_INDEX2_15:
   4478     case AARCH64_OPND_SME_Zn_INDEX2_16:
   4479     case AARCH64_OPND_SME_Zn_INDEX2_19:
   4480     case AARCH64_OPND_SME_Zn_INDEX3_14:
   4481     case AARCH64_OPND_SME_Zn_INDEX3_15:
   4482     case AARCH64_OPND_SME_Zn_INDEX4_14:
   4483       snprintf (buf, size, "%s[%s]",
   4484 		(opnd->qualifier == AARCH64_OPND_QLF_NIL
   4485 		 ? style_reg (styler, "z%d", opnd->reglane.regno)
   4486 		 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
   4487 			      aarch64_get_qualifier_name (opnd->qualifier))),
   4488 		style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4489       break;
   4490 
   4491     case AARCH64_OPND_SVE_Zn0_INDEX:
   4492     case AARCH64_OPND_SVE_Zn1_17_INDEX:
   4493     case AARCH64_OPND_SVE_Zn2_18_INDEX:
   4494     case AARCH64_OPND_SVE_Zn3_22_INDEX:
   4495     case AARCH64_OPND_SVE_Zd0_INDEX:
   4496     case AARCH64_OPND_SVE_Zd1_17_INDEX:
   4497     case AARCH64_OPND_SVE_Zd2_18_INDEX:
   4498     case AARCH64_OPND_SVE_Zd3_22_INDEX:
   4499       if (opnd->reglane.index == 0)
   4500 	snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
   4501       else
   4502 	snprintf (buf, size, "%s[%s]",
   4503 		  style_reg (styler, "z%d", opnd->reglane.regno),
   4504 		  style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4505       break;
   4506 
   4507     case AARCH64_OPND_SME_ZAda_1b:
   4508     case AARCH64_OPND_SME_ZAda_2b:
   4509     case AARCH64_OPND_SME_ZAda_3b:
   4510       snprintf (buf, size, "%s",
   4511 		style_reg (styler, "za%d.%s", opnd->reg.regno,
   4512 			   aarch64_get_qualifier_name (opnd->qualifier)));
   4513       break;
   4514 
   4515     case AARCH64_OPND_SME_ZA_HV_idx_src:
   4516     case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
   4517     case AARCH64_OPND_SME_ZA_HV_idx_dest:
   4518     case AARCH64_OPND_SME_ZA_HV_idx_destxN:
   4519     case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
   4520     case AARCH64_OPND_SME_ZA_array_vrsb_1:
   4521     case AARCH64_OPND_SME_ZA_array_vrsh_1:
   4522     case AARCH64_OPND_SME_ZA_array_vrss_1:
   4523     case AARCH64_OPND_SME_ZA_array_vrsd_1:
   4524     case AARCH64_OPND_SME_ZA_array_vrsb_2:
   4525     case AARCH64_OPND_SME_ZA_array_vrsh_2:
   4526     case AARCH64_OPND_SME_ZA_array_vrss_2:
   4527     case AARCH64_OPND_SME_ZA_array_vrsd_2:
   4528     case AARCH64_OPND_SME_ZA_ARRAY4:
   4529       snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
   4530 		opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
   4531 		style_reg (styler, "za%d%c%s%s",
   4532 			   opnd->indexed_za.regno,
   4533 			   opnd->indexed_za.v == 1 ? 'v' : 'h',
   4534 			   opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
   4535 			   (opnd->qualifier == AARCH64_OPND_QLF_NIL
   4536 			    ? ""
   4537 			    : aarch64_get_qualifier_name (opnd->qualifier))),
   4538 		style_reg (styler, "w%d", opnd->indexed_za.index.regno),
   4539 		style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
   4540 		opnd->indexed_za.index.countm1 ? ":" : "",
   4541 		(opnd->indexed_za.index.countm1
   4542 		 ? style_imm (styler, "%d",
   4543 			      opnd->indexed_za.index.imm
   4544 			      + opnd->indexed_za.index.countm1)
   4545 		 : ""),
   4546 		opnd->indexed_za.group_size ? ", " : "",
   4547 		opnd->indexed_za.group_size == 2
   4548 		? style_sub_mnem (styler, "vgx2")
   4549 		: opnd->indexed_za.group_size == 4
   4550 		? style_sub_mnem (styler, "vgx4") : "",
   4551 		opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
   4552       break;
   4553 
   4554     case AARCH64_OPND_SME_list_of_64bit_tiles:
   4555       print_sme_za_list (buf, size, opnd->imm.value, styler);
   4556       break;
   4557 
   4558     case AARCH64_OPND_SME_ZA_array_off1x4:
   4559     case AARCH64_OPND_SME_ZA_array_off2x2:
   4560     case AARCH64_OPND_SME_ZA_array_off2x4:
   4561     case AARCH64_OPND_SME_ZA_array_off3_0:
   4562     case AARCH64_OPND_SME_ZA_array_off3_5:
   4563     case AARCH64_OPND_SME_ZA_array_off3x2:
   4564     case AARCH64_OPND_SME_ZA_array_off4:
   4565       snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
   4566 		style_reg (styler, "za%s%s",
   4567 			   opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
   4568 			   (opnd->qualifier == AARCH64_OPND_QLF_NIL
   4569 			    ? ""
   4570 			    : aarch64_get_qualifier_name (opnd->qualifier))),
   4571 		style_reg (styler, "w%d", opnd->indexed_za.index.regno),
   4572 		style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
   4573 		opnd->indexed_za.index.countm1 ? ":" : "",
   4574 		(opnd->indexed_za.index.countm1
   4575 		 ? style_imm (styler, "%d",
   4576 			      opnd->indexed_za.index.imm
   4577 			      + opnd->indexed_za.index.countm1)
   4578 		 : ""),
   4579 		opnd->indexed_za.group_size ? ", " : "",
   4580 		opnd->indexed_za.group_size == 2
   4581 		? style_sub_mnem (styler, "vgx2")
   4582 		: opnd->indexed_za.group_size == 4
   4583 		? style_sub_mnem (styler, "vgx4") : "");
   4584       break;
   4585 
   4586     case AARCH64_OPND_SME_SM_ZA:
   4587       snprintf (buf, size, "%s",
   4588 		style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
   4589       break;
   4590 
   4591     case AARCH64_OPND_SME_PnT_Wm_imm:
   4592       snprintf (buf, size, "%s[%s, %s]",
   4593 		style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
   4594 			   aarch64_get_qualifier_name (opnd->qualifier)),
   4595 		style_reg (styler, "w%d", opnd->indexed_za.index.regno),
   4596 		style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
   4597       break;
   4598 
   4599     case AARCH64_OPND_SME_VLxN_10:
   4600     case AARCH64_OPND_SME_VLxN_13:
   4601       enum_value = opnd->imm.value;
   4602       assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
   4603       snprintf (buf, size, "%s",
   4604 		style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
   4605       break;
   4606 
   4607     case AARCH64_OPND_BRBOP:
   4608       enum_value = opnd->imm.value;
   4609       assert (enum_value < ARRAY_SIZE (aarch64_brbop_array));
   4610       snprintf (buf, size, "%s",
   4611 		style_sub_mnem (styler, aarch64_brbop_array[enum_value]));
   4612       break;
   4613 
   4614     case AARCH64_OPND_CRn:
   4615     case AARCH64_OPND_CRm:
   4616       snprintf (buf, size, "%s",
   4617 		style_reg (styler, "C%" PRIi64, opnd->imm.value));
   4618       break;
   4619 
   4620     case AARCH64_OPND_IDX:
   4621     case AARCH64_OPND_MASK:
   4622     case AARCH64_OPND_IMM:
   4623     case AARCH64_OPND_IMM_2:
   4624     case AARCH64_OPND_WIDTH:
   4625     case AARCH64_OPND_UIMM3_OP1:
   4626     case AARCH64_OPND_UIMM3_OP2:
   4627     case AARCH64_OPND_BIT_NUM:
   4628     case AARCH64_OPND_IMM_VLSL:
   4629     case AARCH64_OPND_IMM_VLSR:
   4630     case AARCH64_OPND_SHLL_IMM:
   4631     case AARCH64_OPND_IMM0:
   4632     case AARCH64_OPND_IMMR:
   4633     case AARCH64_OPND_IMMS:
   4634     case AARCH64_OPND_UNDEFINED:
   4635     case AARCH64_OPND_FBITS:
   4636     case AARCH64_OPND_TME_UIMM16:
   4637     case AARCH64_OPND_SIMM5:
   4638     case AARCH64_OPND_SME_SHRIMM3:
   4639     case AARCH64_OPND_SME_SHRIMM4:
   4640     case AARCH64_OPND_SME_SHRIMM5:
   4641     case AARCH64_OPND_SVE_SHLIMM_PRED:
   4642     case AARCH64_OPND_SVE_SHLIMM_UNPRED:
   4643     case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
   4644     case AARCH64_OPND_SVE_SHRIMM_PRED:
   4645     case AARCH64_OPND_SVE_SHRIMM_UNPRED:
   4646     case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
   4647     case AARCH64_OPND_SVE_SIMM5:
   4648     case AARCH64_OPND_SVE_SIMM5B:
   4649     case AARCH64_OPND_SVE_SIMM6:
   4650     case AARCH64_OPND_SVE_SIMM8:
   4651     case AARCH64_OPND_SVE_UIMM3:
   4652     case AARCH64_OPND_SVE_UIMM7:
   4653     case AARCH64_OPND_SVE_UIMM8:
   4654     case AARCH64_OPND_SVE_UIMM4:
   4655     case AARCH64_OPND_SVE_UIMM8_53:
   4656     case AARCH64_OPND_IMM_ROT1:
   4657     case AARCH64_OPND_IMM_ROT2:
   4658     case AARCH64_OPND_IMM_ROT3:
   4659     case AARCH64_OPND_SVE_IMM_ROT1:
   4660     case AARCH64_OPND_SVE_IMM_ROT2:
   4661     case AARCH64_OPND_SVE_IMM_ROT3:
   4662     case AARCH64_OPND_CSSC_SIMM8:
   4663     case AARCH64_OPND_CSSC_UIMM8:
   4664       snprintf (buf, size, "%s",
   4665 		style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4666       break;
   4667 
   4668     case AARCH64_OPND_SVE_I1_HALF_ONE:
   4669     case AARCH64_OPND_SVE_I1_HALF_TWO:
   4670     case AARCH64_OPND_SVE_I1_ZERO_ONE:
   4671       {
   4672 	single_conv_t c;
   4673 	c.i = opnd->imm.value;
   4674 	snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
   4675 	break;
   4676       }
   4677 
   4678     case AARCH64_OPND_SVE_PATTERN:
   4679       if (optional_operand_p (opcode, idx)
   4680 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
   4681 	break;
   4682       enum_value = opnd->imm.value;
   4683       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
   4684       if (aarch64_sve_pattern_array[enum_value])
   4685 	snprintf (buf, size, "%s",
   4686 		  style_reg (styler, aarch64_sve_pattern_array[enum_value]));
   4687       else
   4688 	snprintf (buf, size, "%s",
   4689 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4690       break;
   4691 
   4692     case AARCH64_OPND_SVE_PATTERN_SCALED:
   4693       if (optional_operand_p (opcode, idx)
   4694 	  && !opnd->shifter.operator_present
   4695 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
   4696 	break;
   4697       enum_value = opnd->imm.value;
   4698       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
   4699       if (aarch64_sve_pattern_array[opnd->imm.value])
   4700 	snprintf (buf, size, "%s",
   4701 		  style_reg (styler,
   4702 			     aarch64_sve_pattern_array[opnd->imm.value]));
   4703       else
   4704 	snprintf (buf, size, "%s",
   4705 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4706       if (opnd->shifter.operator_present)
   4707 	{
   4708 	  size_t len = strlen (buf);
   4709 	  const char *shift_name
   4710 	    = aarch64_operand_modifiers[opnd->shifter.kind].name;
   4711 	  snprintf (buf + len, size - len, ", %s %s",
   4712 		    style_sub_mnem (styler, shift_name),
   4713 		    style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4714 	}
   4715       break;
   4716 
   4717     case AARCH64_OPND_SVE_PRFOP:
   4718       enum_value = opnd->imm.value;
   4719       assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
   4720       if (aarch64_sve_prfop_array[enum_value])
   4721 	snprintf (buf, size, "%s",
   4722 		  style_reg (styler, aarch64_sve_prfop_array[enum_value]));
   4723       else
   4724 	snprintf (buf, size, "%s",
   4725 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4726       break;
   4727 
   4728     case AARCH64_OPND_IMM_MOV:
   4729       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   4730 	{
   4731 	case 4:	/* e.g. MOV Wd, #<imm32>.  */
   4732 	    {
   4733 	      int imm32 = opnd->imm.value;
   4734 	      snprintf (buf, size, "%s",
   4735 			style_imm (styler, "#0x%-20x", imm32));
   4736 	      snprintf (comment, comment_size, "#%d", imm32);
   4737 	    }
   4738 	  break;
   4739 	case 8:	/* e.g. MOV Xd, #<imm64>.  */
   4740 	  snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
   4741 						opnd->imm.value));
   4742 	  snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
   4743 	  break;
   4744 	default:
   4745 	  snprintf (buf, size, "<invalid>");
   4746 	  break;
   4747 	}
   4748       break;
   4749 
   4750     case AARCH64_OPND_FPIMM0:
   4751       snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
   4752       break;
   4753 
   4754     case AARCH64_OPND_LIMM:
   4755     case AARCH64_OPND_AIMM:
   4756     case AARCH64_OPND_HALF:
   4757     case AARCH64_OPND_SVE_INV_LIMM:
   4758     case AARCH64_OPND_SVE_LIMM:
   4759     case AARCH64_OPND_SVE_LIMM_MOV:
   4760       if (opnd->shifter.amount)
   4761 	snprintf (buf, size, "%s, %s %s",
   4762 		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
   4763 		  style_sub_mnem (styler, "lsl"),
   4764 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4765       else
   4766 	snprintf (buf, size, "%s",
   4767 		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
   4768       break;
   4769 
   4770     case AARCH64_OPND_SIMD_IMM:
   4771     case AARCH64_OPND_SIMD_IMM_SFT:
   4772       if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
   4773 	  || opnd->shifter.kind == AARCH64_MOD_NONE)
   4774 	snprintf (buf, size, "%s",
   4775 		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
   4776       else
   4777 	snprintf (buf, size, "%s, %s %s",
   4778 		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
   4779 		  style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
   4780 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4781       break;
   4782 
   4783     case AARCH64_OPND_SVE_AIMM:
   4784     case AARCH64_OPND_SVE_ASIMM:
   4785       if (opnd->shifter.amount)
   4786 	snprintf (buf, size, "%s, %s %s",
   4787 		  style_imm (styler, "#%" PRIi64, opnd->imm.value),
   4788 		  style_sub_mnem (styler, "lsl"),
   4789 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4790       else
   4791 	snprintf (buf, size, "%s",
   4792 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4793       break;
   4794 
   4795     case AARCH64_OPND_FPIMM:
   4796     case AARCH64_OPND_SIMD_FPIMM:
   4797     case AARCH64_OPND_SVE_FPIMM8:
   4798       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   4799 	{
   4800 	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
   4801 	    {
   4802 	      half_conv_t c;
   4803 	      c.i = expand_fp_imm (2, opnd->imm.value);
   4804 	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
   4805 	    }
   4806 	  break;
   4807 	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
   4808 	    {
   4809 	      single_conv_t c;
   4810 	      c.i = expand_fp_imm (4, opnd->imm.value);
   4811 	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
   4812 	    }
   4813 	  break;
   4814 	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
   4815 	    {
   4816 	      double_conv_t c;
   4817 	      c.i = expand_fp_imm (8, opnd->imm.value);
   4818 	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
   4819 	    }
   4820 	  break;
   4821 	default:
   4822 	  snprintf (buf, size, "<invalid>");
   4823 	  break;
   4824 	}
   4825       break;
   4826 
   4827     case AARCH64_OPND_CCMP_IMM:
   4828     case AARCH64_OPND_NZCV:
   4829     case AARCH64_OPND_EXCEPTION:
   4830     case AARCH64_OPND_UIMM4:
   4831     case AARCH64_OPND_UIMM4_ADDG:
   4832     case AARCH64_OPND_UIMM7:
   4833     case AARCH64_OPND_UIMM10:
   4834       if (optional_operand_p (opcode, idx)
   4835 	  && (opnd->imm.value ==
   4836 	      (int64_t) get_optional_operand_default_value (opcode)))
   4837 	/* Omit the operand, e.g. DCPS1.  */
   4838 	break;
   4839       snprintf (buf, size, "%s",
   4840 		style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
   4841       break;
   4842 
   4843     case AARCH64_OPND_COND:
   4844     case AARCH64_OPND_COND1:
   4845       snprintf (buf, size, "%s",
   4846 		style_sub_mnem (styler, opnd->cond->names[0]));
   4847       num_conds = ARRAY_SIZE (opnd->cond->names);
   4848       for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
   4849 	{
   4850 	  size_t len = comment != NULL ? strlen (comment) : 0;
   4851 	  if (i == 1)
   4852 	    snprintf (comment + len, comment_size - len, "%s = %s",
   4853 		      opnd->cond->names[0], opnd->cond->names[i]);
   4854 	  else
   4855 	    snprintf (comment + len, comment_size - len, ", %s",
   4856 		      opnd->cond->names[i]);
   4857 	}
   4858       break;
   4859 
   4860     case AARCH64_OPND_ADDR_ADRP:
   4861       addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
   4862 	+ opnd->imm.value;
   4863       if (pcrel_p)
   4864 	*pcrel_p = 1;
   4865       if (address)
   4866 	*address = addr;
   4867       /* This is not necessary during the disassembling, as print_address_func
   4868 	 in the disassemble_info will take care of the printing.  But some
   4869 	 other callers may be still interested in getting the string in *STR,
   4870 	 so here we do snprintf regardless.  */
   4871       snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
   4872       break;
   4873 
   4874     case AARCH64_OPND_ADDR_PCREL9:
   4875     case AARCH64_OPND_ADDR_PCREL14:
   4876     case AARCH64_OPND_ADDR_PCREL19:
   4877     case AARCH64_OPND_ADDR_PCREL21:
   4878     case AARCH64_OPND_ADDR_PCREL26:
   4879       addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
   4880       if (pcrel_p)
   4881 	*pcrel_p = 1;
   4882       if (address)
   4883 	*address = addr;
   4884       /* This is not necessary during the disassembling, as print_address_func
   4885 	 in the disassemble_info will take care of the printing.  But some
   4886 	 other callers may be still interested in getting the string in *STR,
   4887 	 so here we do snprintf regardless.  */
   4888       snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
   4889       break;
   4890 
   4891     case AARCH64_OPND_ADDR_SIMPLE:
   4892     case AARCH64_OPND_SIMD_ADDR_SIMPLE:
   4893     case AARCH64_OPND_SIMD_ADDR_POST:
   4894       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   4895       if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
   4896 	{
   4897 	  if (opnd->addr.offset.is_reg)
   4898 	    snprintf (buf, size, "[%s], %s",
   4899 		      style_reg (styler, name),
   4900 		      style_reg (styler, "x%d", opnd->addr.offset.regno));
   4901 	  else
   4902 	    snprintf (buf, size, "[%s], %s",
   4903 		      style_reg (styler, name),
   4904 		      style_imm (styler, "#%d", opnd->addr.offset.imm));
   4905 	}
   4906       else
   4907 	snprintf (buf, size, "[%s]", style_reg (styler, name));
   4908       break;
   4909 
   4910     case AARCH64_OPND_ADDR_REGOFF:
   4911     case AARCH64_OPND_SVE_ADDR_RR:
   4912     case AARCH64_OPND_SVE_ADDR_RR_LSL1:
   4913     case AARCH64_OPND_SVE_ADDR_RR_LSL2:
   4914     case AARCH64_OPND_SVE_ADDR_RR_LSL3:
   4915     case AARCH64_OPND_SVE_ADDR_RR_LSL4:
   4916     case AARCH64_OPND_SVE_ADDR_RM:
   4917     case AARCH64_OPND_SVE_ADDR_RM_LSL1:
   4918     case AARCH64_OPND_SVE_ADDR_RM_LSL2:
   4919     case AARCH64_OPND_SVE_ADDR_RM_LSL3:
   4920     case AARCH64_OPND_SVE_ADDR_RM_LSL4:
   4921     case AARCH64_OPND_SVE_ADDR_RX:
   4922     case AARCH64_OPND_SVE_ADDR_RX_LSL1:
   4923     case AARCH64_OPND_SVE_ADDR_RX_LSL2:
   4924     case AARCH64_OPND_SVE_ADDR_RX_LSL3:
   4925     case AARCH64_OPND_SVE_ADDR_RX_LSL4:
   4926       print_register_offset_address
   4927 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   4928 	 get_offset_int_reg_name (opnd), styler);
   4929       break;
   4930 
   4931     case AARCH64_OPND_SVE_ADDR_ZX:
   4932       print_register_offset_address
   4933 	(buf, size, opnd,
   4934 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
   4935 	 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
   4936       break;
   4937 
   4938     case AARCH64_OPND_SVE_ADDR_RZ:
   4939     case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
   4940     case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
   4941     case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
   4942     case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
   4943     case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
   4944     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
   4945     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
   4946     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
   4947     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
   4948     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
   4949     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
   4950       print_register_offset_address
   4951 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   4952 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
   4953 	 styler);
   4954       break;
   4955 
   4956     case AARCH64_OPND_ADDR_SIMM7:
   4957     case AARCH64_OPND_ADDR_SIMM9:
   4958     case AARCH64_OPND_ADDR_SIMM9_2:
   4959     case AARCH64_OPND_ADDR_SIMM10:
   4960     case AARCH64_OPND_ADDR_SIMM11:
   4961     case AARCH64_OPND_ADDR_SIMM13:
   4962     case AARCH64_OPND_RCPC3_ADDR_OFFSET:
   4963     case AARCH64_OPND_ADDR_OFFSET:
   4964     case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
   4965     case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
   4966     case AARCH64_OPND_RCPC3_ADDR_POSTIND:
   4967     case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
   4968     case AARCH64_OPND_SME_ADDR_RI_U4xVL:
   4969     case AARCH64_OPND_SVE_ADDR_RI_S4x16:
   4970     case AARCH64_OPND_SVE_ADDR_RI_S4x32:
   4971     case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
   4972     case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
   4973     case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
   4974     case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
   4975     case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
   4976     case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
   4977     case AARCH64_OPND_SVE_ADDR_RI_U6:
   4978     case AARCH64_OPND_SVE_ADDR_RI_U6x2:
   4979     case AARCH64_OPND_SVE_ADDR_RI_U6x4:
   4980     case AARCH64_OPND_SVE_ADDR_RI_U6x8:
   4981       print_immediate_offset_address
   4982 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   4983 	 styler);
   4984       break;
   4985 
   4986     case AARCH64_OPND_SVE_ADDR_ZI_U5:
   4987     case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
   4988     case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
   4989     case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
   4990       print_immediate_offset_address
   4991 	(buf, size, opnd,
   4992 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
   4993 	 styler);
   4994       break;
   4995 
   4996     case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
   4997     case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
   4998     case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
   4999       print_register_offset_address
   5000 	(buf, size, opnd,
   5001 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
   5002 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
   5003 	 styler);
   5004       break;
   5005 
   5006     case AARCH64_OPND_ADDR_UIMM12:
   5007       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   5008       if (opnd->addr.offset.imm)
   5009 	snprintf (buf, size, "[%s, %s]",
   5010 		  style_reg (styler, name),
   5011 		  style_imm (styler, "#%d", opnd->addr.offset.imm));
   5012       else
   5013 	snprintf (buf, size, "[%s]", style_reg (styler, name));
   5014       break;
   5015 
   5016     case AARCH64_OPND_SYSREG:
   5017     case AARCH64_OPND_SYSREG128:
   5018       {
   5019 	int min_mismatch = 999;
   5020 	int best_index = -1;
   5021 	uint32_t op_flags = opnd->sysreg.flags;
   5022 	for (i = 0; aarch64_sys_regs[i].name; ++i)
   5023 	  {
   5024 	    const aarch64_sys_reg *sr = aarch64_sys_regs + i;
   5025 
   5026 	    if (!(aarch64_sys_regs[i].value == opnd->sysreg.value)
   5027 		|| aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
   5028 		|| aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags))
   5029 	      continue;
   5030 
   5031 	    int mismatch_score = 0;
   5032 	    if (!AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features))
   5033 	      mismatch_score += 1;
   5034 	    /* This read/write check only works during disassembly.  During
   5035 	       assembly the value of op_flags was copied from sr->flags.  */
   5036 	    if (((sr->flags & F_REG_READ) && (op_flags & F_REG_WRITE))
   5037 		|| ((sr->flags & F_REG_WRITE) && (op_flags & F_REG_READ)))
   5038 	      mismatch_score += 2;
   5039 
   5040 	    if (mismatch_score < min_mismatch)
   5041 	      {
   5042 		min_mismatch = mismatch_score;
   5043 		best_index = i;
   5044 		if (mismatch_score == 0)
   5045 		  break;
   5046 	      }
   5047 	  }
   5048 	if (best_index == -1)
   5049 	  {
   5050 	    /* Use encoding-based name for unrecognised system register.  */
   5051 	    unsigned int value = opnd->sysreg.value;
   5052 	    snprintf (buf, size, "%s",
   5053 		      style_reg (styler, "s%u_%u_c%u_c%u_%u",
   5054 				 (value >> 14) & 0x3, (value >> 11) & 0x7,
   5055 				 (value >> 7) & 0xf, (value >> 3) & 0xf,
   5056 				 value & 0x7));
   5057 	  }
   5058 	else
   5059 	  {
   5060 	    const aarch64_sys_reg *sr = aarch64_sys_regs + best_index;
   5061 	    snprintf (buf, size, "%s", style_reg (styler, sr->name));
   5062 
   5063 	    /* Add a note if we violated read/write constraints.  */
   5064 	    if (notes && min_mismatch)
   5065 	      {
   5066 		if ((sr->flags & F_REG_READ) && (op_flags & F_REG_WRITE))
   5067 		  *notes = _("writing to a read-only register");
   5068 		else if ((sr->flags & F_REG_WRITE) && (op_flags & F_REG_READ))
   5069 		  *notes = _("reading from a write-only register");
   5070 	      }
   5071 	  }
   5072 	break;
   5073       }
   5074 
   5075     case AARCH64_OPND_PSTATEFIELD:
   5076       for (i = 0; aarch64_pstatefields[i].name; ++i)
   5077         if (aarch64_pstatefields[i].value == opnd->pstatefield)
   5078           {
   5079             /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
   5080                SVCRZA and SVCRSMZA.  */
   5081             uint32_t flags = aarch64_pstatefields[i].flags;
   5082             if (flags & F_REG_IN_CRM
   5083                 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
   5084                     != PSTATE_DECODE_CRM (flags)))
   5085               continue;
   5086             break;
   5087           }
   5088       assert (aarch64_pstatefields[i].name);
   5089       snprintf (buf, size, "%s",
   5090 		style_reg (styler, aarch64_pstatefields[i].name));
   5091       break;
   5092 
   5093     case AARCH64_OPND_GIC:
   5094     case AARCH64_OPND_GICR:
   5095     case AARCH64_OPND_GSB:
   5096     case AARCH64_OPND_SYSREG_AT:
   5097     case AARCH64_OPND_SYSREG_DC:
   5098     case AARCH64_OPND_SYSREG_IC:
   5099     case AARCH64_OPND_SYSREG_TLBI:
   5100     case AARCH64_OPND_SYSREG_TLBIP:
   5101     case AARCH64_OPND_SYSREG_PLBI:
   5102     case AARCH64_OPND_SYSREG_MLBI:
   5103     case AARCH64_OPND_SYSREG_SR:
   5104       snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
   5105       break;
   5106 
   5107     case AARCH64_OPND_BARRIER:
   5108     case AARCH64_OPND_BARRIER_DSB_NXS:
   5109       {
   5110 	if (opnd->barrier->name[0] == '#')
   5111 	  snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
   5112 	else
   5113 	  snprintf (buf, size, "%s",
   5114 		    style_sub_mnem (styler, opnd->barrier->name));
   5115       }
   5116       break;
   5117 
   5118     case AARCH64_OPND_BARRIER_ISB:
   5119       /* Operand can be omitted, e.g. in DCPS1.  */
   5120       if (! optional_operand_p (opcode, idx)
   5121 	  || (opnd->barrier->value
   5122 	      != get_optional_operand_default_value (opcode)))
   5123 	snprintf (buf, size, "%s",
   5124 		  style_imm (styler, "#0x%x", opnd->barrier->value));
   5125       break;
   5126 
   5127     case AARCH64_OPND_PRFOP:
   5128       if ((opnd->prfop->name == NULL)
   5129           || (opcode->iclass != ldst_pos && opnd->prfop->value == 0x18))
   5130         snprintf (buf, size, "%s",
   5131                   style_imm (styler, "#0x%02x", opnd->prfop->value));
   5132       else
   5133         snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
   5134       break;
   5135 
   5136     case AARCH64_OPND_RPRFMOP:
   5137       enum_value = opnd->imm.value;
   5138       if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
   5139 	  && aarch64_rprfmop_array[enum_value])
   5140 	snprintf (buf, size, "%s",
   5141 		  style_reg (styler, aarch64_rprfmop_array[enum_value]));
   5142       else
   5143 	snprintf (buf, size, "%s",
   5144 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   5145       break;
   5146 
   5147     case AARCH64_OPND_BARRIER_PSB:
   5148       snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
   5149       break;
   5150 
   5151     case AARCH64_OPND_X16:
   5152       snprintf (buf, size, "%s", style_reg (styler, "x16"));
   5153       break;
   5154 
   5155     case AARCH64_OPND_SME_ZT0:
   5156       snprintf (buf, size, "%s", style_reg (styler, "zt0"));
   5157       break;
   5158 
   5159     case AARCH64_OPND_SME_ZT0_INDEX:
   5160       snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
   5161 		style_imm (styler, "%d", (int) opnd->imm.value));
   5162       break;
   5163     case AARCH64_OPND_SME_ZT0_INDEX_MUL_VL:
   5164       snprintf (buf, size, "%s[%s, %s]", style_reg (styler, "zt0"),
   5165 		style_imm (styler, "%d", (int) opnd->imm.value),
   5166 		style_sub_mnem (styler, "mul vl"));
   5167       break;
   5168 
   5169     case AARCH64_OPND_SME_ZT0_LIST:
   5170       snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
   5171       break;
   5172 
   5173     case AARCH64_OPND_BARRIER_GCSB:
   5174       snprintf (buf, size, "%s", style_sub_mnem (styler, "dsync"));
   5175       break;
   5176 
   5177     case AARCH64_OPND_NOT_BALANCED_10:
   5178     case AARCH64_OPND_NOT_BALANCED_17:
   5179       if (opnd->imm.value)
   5180 	snprintf (buf, size, "%s", style_sub_mnem (styler, "nb"));
   5181       break;
   5182 
   5183     case AARCH64_OPND_BTI_TARGET:
   5184       snprintf (buf, size, "%s",
   5185 		style_sub_mnem (styler, opnd->hint_option->name));
   5186       break;
   5187 
   5188     case AARCH64_OPND_STSHH_POLICY:
   5189       snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->hint_option->name));
   5190       break;
   5191 
   5192     case AARCH64_OPND_SHUH_PHINT:
   5193       if (*(opnd->hint_option->name))
   5194 	snprintf (buf, size, "%s",
   5195 		  style_sub_mnem (styler, opnd->hint_option->name));
   5196       break;
   5197 
   5198     case AARCH64_OPND_MOPS_ADDR_Rd:
   5199     case AARCH64_OPND_MOPS_ADDR_Rs:
   5200       snprintf (buf, size, "[%s]!",
   5201 		style_reg (styler,
   5202 			   get_int_reg_name (opnd->reg.regno,
   5203 					     AARCH64_OPND_QLF_X, 0)));
   5204       break;
   5205 
   5206     case AARCH64_OPND_MOPS_WB_Rn:
   5207       snprintf (buf, size, "%s!",
   5208 		style_reg (styler, get_int_reg_name (opnd->reg.regno,
   5209 						     AARCH64_OPND_QLF_X, 0)));
   5210       break;
   5211 
   5212     default:
   5213       snprintf (buf, size, "<invalid>");
   5214       break;
   5215     }
   5216 }
   5217 
   5218 #define CPENC(op0,op1,crn,crm,op2) \
   5220   ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
   5221   /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
   5222 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
   5223   /* for 3.9.10 System Instructions */
   5224 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
   5225 
   5226 #define C0  0
   5227 #define C1  1
   5228 #define C2  2
   5229 #define C3  3
   5230 #define C4  4
   5231 #define C5  5
   5232 #define C6  6
   5233 #define C7  7
   5234 #define C8  8
   5235 #define C9  9
   5236 #define C10 10
   5237 #define C11 11
   5238 #define C12 12
   5239 #define C13 13
   5240 #define C14 14
   5241 #define C15 15
   5242 
   5243 /* TODO there is one more issues need to be resolved
   5244    1. handle cpu-implementation-defined system registers.
   5245 
   5246    Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
   5247    respectively.  If neither of these are set then the register is read-write.  */
   5248 const aarch64_sys_reg aarch64_sys_regs [] =
   5249 {
   5250   #define SYSREG(name, encoding, flags, features) \
   5251     { name, encoding, flags, features },
   5252   #include "aarch64-sys-regs.def"
   5253   { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }
   5254   #undef SYSREG
   5255 };
   5256 
   5257 bool
   5258 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
   5259 {
   5260   return (reg_flags & F_DEPRECATED) != 0;
   5261 }
   5262 
   5263 bool
   5264 aarch64_sys_reg_128bit_p (const uint32_t reg_flags)
   5265 {
   5266   return (reg_flags & F_REG_128) != 0;
   5267 }
   5268 
   5269 bool
   5270 aarch64_sys_reg_alias_p (const uint32_t reg_flags)
   5271 {
   5272   return (reg_flags & F_REG_ALIAS) != 0;
   5273 }
   5274 
   5275 /* The CPENC below is fairly misleading, the fields
   5276    here are not in CPENC form. They are in op2op1 form. The fields are encoded
   5277    by ins_pstatefield, which just shifts the value by the width of the fields
   5278    in a loop. So if you CPENC them only the first value will be set, the rest
   5279    are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
   5280    value of 0b110000000001000000 (0x30040) while what you want is
   5281    0b011010 (0x1a).  */
   5282 const aarch64_sys_reg aarch64_pstatefields [] =
   5283 {
   5284   { "spsel",	0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES },
   5285   { "daifset",	0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
   5286   { "daifclr",	0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
   5287   { "pan",	0x04, F_REG_MAX_VALUE (1), AARCH64_FEATURE (PAN) },
   5288   { "uao",	0x03, F_REG_MAX_VALUE (1), AARCH64_FEATURE (V8_2A) },
   5289   { "ssbs",	0x19, F_REG_MAX_VALUE (1), AARCH64_FEATURE (SSBS) },
   5290   { "dit",	0x1a, F_REG_MAX_VALUE (1), AARCH64_FEATURE (V8_4A) },
   5291   { "tco",	0x1c, F_REG_MAX_VALUE (1), AARCH64_FEATURE (MEMTAG) },
   5292   { "svcrsm",	0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1),
   5293     AARCH64_FEATURE (SME) },
   5294   { "svcrza",	0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1),
   5295     AARCH64_FEATURE (SME) },
   5296   { "svcrsmza",	0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1),
   5297     AARCH64_FEATURE (SME) },
   5298   { "allint",	0x08, F_REG_MAX_VALUE (1), AARCH64_FEATURE (V8_8A) },
   5299   { 0,	CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES },
   5300 };
   5301 
   5302 bool
   5303 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
   5304 				 const aarch64_sys_reg *reg)
   5305 {
   5306   return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
   5307 }
   5308 
   5309 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
   5310 {
   5311     { "ialluis", CPENS(0,C7,C1,0), 0, AARCH64_NO_FEATURES },
   5312     { "iallu",   CPENS(0,C7,C5,0), 0, AARCH64_NO_FEATURES },
   5313     { "ivau",    CPENS (3, C7, C5, 1), F_HASXT, AARCH64_NO_FEATURES },
   5314     { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5315 };
   5316 
   5317 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
   5318 {
   5319     { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT, AARCH64_NO_FEATURES },
   5320     { "gva",	    CPENS (3, C7, C4, 3),  F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5321     { "gzva",	    CPENS (3, C7, C4, 4),  F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5322     { "zgbva",	    CPENS (3, C7, C4, 5),  F_HASXT, AARCH64_FEATURE (MTETC) },
   5323     { "gbva",	    CPENS (3, C7, C4, 7),  F_HASXT, AARCH64_FEATURE (MTETC) },
   5324     { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT, AARCH64_NO_FEATURES },
   5325     { "igvac",      CPENS (0, C7, C6, 3),  F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5326     { "igsw",       CPENS (0, C7, C6, 4),  F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5327     { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT, AARCH64_NO_FEATURES },
   5328     { "igdvac",	    CPENS (0, C7, C6, 5),  F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5329     { "igdsw",	    CPENS (0, C7, C6, 6),  F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5330     { "cigdvaps",   CPENS (0, C7, C15, 5), F_HASXT, AARCH64_FEATURES (2, MEMTAG, PoPS) },
   5331     { "civaps",     CPENS (0, C7, C15, 1), F_HASXT, AARCH64_FEATURE (PoPS) },
   5332     { "cvac",       CPENS (3, C7, C10, 1), F_HASXT, AARCH64_NO_FEATURES },
   5333     { "cgvac",      CPENS (3, C7, C10, 3), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5334     { "cgdvac",     CPENS (3, C7, C10, 5), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5335     { "cvaoc",      CPENS (3, C7, C11, 0), F_HASXT, AARCH64_FEATURE (OCCMO) },
   5336     { "cgdvaoc",    CPENS (3, C7, C11, 7), F_HASXT, AARCH64_FEATURES (2, OCCMO, MEMTAG) },
   5337     { "csw",	    CPENS (0, C7, C10, 2), F_HASXT, AARCH64_NO_FEATURES },
   5338     { "cgsw",       CPENS (0, C7, C10, 4), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5339     { "cgdsw",	    CPENS (0, C7, C10, 6), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5340     { "cvau",       CPENS (3, C7, C11, 1), F_HASXT, AARCH64_NO_FEATURES },
   5341     { "cvap",       CPENS (3, C7, C12, 1), F_HASXT, AARCH64_FEATURE (V8_2A) },
   5342     { "cgvap",      CPENS (3, C7, C12, 3), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5343     { "cgdvap",     CPENS (3, C7, C12, 5), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5344     { "cvadp",      CPENS (3, C7, C13, 1), F_HASXT, AARCH64_FEATURE (CVADP) },
   5345     { "cgvadp",     CPENS (3, C7, C13, 3), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5346     { "cgdvadp",    CPENS (3, C7, C13, 5), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5347     { "civac",      CPENS (3, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
   5348     { "cigvac",     CPENS (3, C7, C14, 3), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5349     { "cigdvac",    CPENS (3, C7, C14, 5), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5350     { "cisw",       CPENS (0, C7, C14, 2), F_HASXT, AARCH64_NO_FEATURES },
   5351     { "cigsw",      CPENS (0, C7, C14, 4), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5352     { "cigdsw",     CPENS (0, C7, C14, 6), F_HASXT, AARCH64_FEATURE (MEMTAG) },
   5353     { "civaoc",     CPENS (3, C7, C15, 0), F_HASXT, AARCH64_FEATURE (OCCMO) },
   5354     { "cigdvaoc",   CPENS (3, C7, C15, 7), F_HASXT, AARCH64_FEATURES (2, OCCMO, MEMTAG) },
   5355     { "cipae",      CPENS (4, C7, C14, 0), F_HASXT, AARCH64_FEATURE (V8_7A) },
   5356     { "cigdpae",    CPENS (4, C7, C14, 7), F_HASXT, AARCH64_FEATURE (V8_7A) },
   5357     { "cipapa",     CPENS (6, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
   5358     { "cigdpapa",   CPENS (6, C7, C14, 5), F_HASXT, AARCH64_NO_FEATURES },
   5359     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5360 };
   5361 
   5362 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
   5363 {
   5364     { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
   5365     { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
   5366     { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT, AARCH64_NO_FEATURES },
   5367     { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT, AARCH64_NO_FEATURES },
   5368     { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT, AARCH64_NO_FEATURES },
   5369     { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT, AARCH64_NO_FEATURES },
   5370     { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT, AARCH64_NO_FEATURES },
   5371     { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT, AARCH64_NO_FEATURES },
   5372     { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
   5373     { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
   5374     { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
   5375     { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
   5376     { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT, AARCH64_FEATURE (V8_2A) },
   5377     { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT, AARCH64_FEATURE (V8_2A) },
   5378     { "s1e1a",      CPENS (0, C7, C9, 2), F_HASXT, AARCH64_FEATURE (ATS1A) },
   5379     { "s1e2a",      CPENS (4, C7, C9, 2), F_HASXT, AARCH64_FEATURE (ATS1A) },
   5380     { "s1e3a",      CPENS (6, C7, C9, 2), F_HASXT, AARCH64_FEATURE (ATS1A) },
   5381     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5382 };
   5383 
   5384 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
   5385 {
   5386     { "rpaos",      CPENS (6, C8, C4, 3), F_HASXT, AARCH64_NO_FEATURES },
   5387     { "rpalos",     CPENS (6, C8, C4, 7), F_HASXT, AARCH64_NO_FEATURES },
   5388     { "paallos",    CPENS (6, C8, C1, 4), 0, AARCH64_NO_FEATURES },
   5389     { "paall",      CPENS (6, C8, C7, 4), 0, AARCH64_NO_FEATURES },
   5390 
   5391 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5392     { OP, CODE, FLAGS,  AARCH64_FEATURE (TLBID)}, \
   5393     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS, AARCH64_FEATURES (2, XS, TLBID)},
   5394 
   5395     TLBI_XS_OP ( "vmalle1is", CPENS (0, C8, C3, 0), F_TLBID_XT)
   5396     TLBI_XS_OP ( "vmalls12e1is",CPENS(4,C8, C3, 6), F_TLBID_XT)
   5397     TLBI_XS_OP ( "alle2is",   CPENS (4, C8, C3, 0), F_TLBID_XT)
   5398     TLBI_XS_OP ( "alle1is",   CPENS (4, C8, C3, 4), F_TLBID_XT)
   5399 #undef TLBI_XS_OP
   5400 
   5401 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5402     { OP, CODE, FLAGS,  AARCH64_FEATURE (D128_TLBID)}, \
   5403     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS, AARCH64_FEATURES (2, XS, D128_TLBID)},
   5404 
   5405     TLBI_XS_OP ( "vae1is",    CPENS (0, C8, C3, 1), F_HASXT | F_REG_128)
   5406     TLBI_XS_OP ( "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT | F_REG_128)
   5407     TLBI_XS_OP ( "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT | F_REG_128)
   5408     TLBI_XS_OP ( "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT | F_REG_128)
   5409     TLBI_XS_OP ( "vale1is",   CPENS (0, C8, C3, 5), F_HASXT | F_REG_128)
   5410     TLBI_XS_OP ( "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT | F_REG_128)
   5411     TLBI_XS_OP ( "vae2is",    CPENS (4, C8, C3, 1), F_HASXT | F_REG_128)
   5412     TLBI_XS_OP ( "vale2is",   CPENS (4, C8, C3, 5), F_HASXT | F_REG_128)
   5413 #undef TLBI_XS_OP
   5414 
   5415 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5416     { OP, CODE, FLAGS,  AARCH64_NO_FEATURES}, \
   5417     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS, AARCH64_FEATURE (XS)},
   5418 
   5419     TLBI_XS_OP ( "vmalle1",   CPENS (0, C8, C7, 0), 0)
   5420     TLBI_XS_OP ( "vmalls12e1",CPENS (4, C8, C7, 6), 0)
   5421     TLBI_XS_OP ( "alle2",     CPENS (4, C8, C7, 0), 0)
   5422     TLBI_XS_OP ( "alle1",     CPENS (4, C8, C7, 4), 0)
   5423     TLBI_XS_OP ( "alle3",     CPENS (6, C8, C7, 0), 0)
   5424     TLBI_XS_OP ( "alle3is",   CPENS (6, C8, C3, 0), 0)
   5425 #undef TLBI_XS_OP
   5426 
   5427 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5428     { OP, CODE, FLAGS,  AARCH64_FEATURE (D128)}, \
   5429     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS, AARCH64_FEATURES (2, XS, D128)},
   5430 
   5431     TLBI_XS_OP ( "vae1",      CPENS (0, C8, C7, 1), F_HASXT | F_REG_128)
   5432     TLBI_XS_OP ( "aside1",    CPENS (0, C8, C7, 2), F_HASXT)
   5433     TLBI_XS_OP ( "vaae1",     CPENS (0, C8, C7, 3), F_HASXT | F_REG_128)
   5434     TLBI_XS_OP ( "aside1is",  CPENS (0, C8, C3, 2), F_HASXT)
   5435     TLBI_XS_OP ( "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT | F_REG_128)
   5436     TLBI_XS_OP ( "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT | F_REG_128)
   5437     TLBI_XS_OP ( "vae2",      CPENS (4, C8, C7, 1), F_HASXT | F_REG_128)
   5438     TLBI_XS_OP ( "vae3",      CPENS (6, C8, C7, 1), F_HASXT | F_REG_128)
   5439     TLBI_XS_OP ( "vae3is",    CPENS (6, C8, C3, 1), F_HASXT | F_REG_128)
   5440     TLBI_XS_OP ( "vale3is",   CPENS (6, C8, C3, 5), F_HASXT | F_REG_128)
   5441     TLBI_XS_OP ( "vale1",     CPENS (0, C8, C7, 5), F_HASXT | F_REG_128)
   5442     TLBI_XS_OP ( "vale2",     CPENS (4, C8, C7, 5), F_HASXT | F_REG_128)
   5443     TLBI_XS_OP ( "vale3",     CPENS (6, C8, C7, 5), F_HASXT | F_REG_128)
   5444     TLBI_XS_OP ( "vaale1",    CPENS (0, C8, C7, 7), F_HASXT | F_REG_128)
   5445 #undef TLBI_XS_OP
   5446 
   5447 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5448     { OP, CODE, FLAGS,  AARCH64_FEATURES (2, V8_4A, TLBID)}, \
   5449     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS, AARCH64_FEATURES (2, XS, TLBID)},
   5450 
   5451     TLBI_XS_OP ( "vmalle1os",    CPENS (0, C8, C1, 0), F_TLBID_XT)
   5452     TLBI_XS_OP ( "vmalls12e1os", CPENS (4, C8, C1, 6), F_TLBID_XT)
   5453     TLBI_XS_OP ( "alle2os",      CPENS (4, C8, C1, 0), F_TLBID_XT)
   5454     TLBI_XS_OP ( "alle1os",      CPENS (4, C8, C1, 4), F_TLBID_XT)
   5455     TLBI_XS_OP ( "vmallws2e1is", CPENS (4, C8, C2, 2), F_TLBID_XT)
   5456     TLBI_XS_OP ( "vmallws2e1os", CPENS (4, C8, C5, 2), F_TLBID_XT)
   5457 #undef TLBI_XS_OP
   5458 
   5459 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5460     { OP, CODE, FLAGS,  AARCH64_FEATURES (2, V8_4A, D128_TLBID)}, \
   5461     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS, AARCH64_FEATURES (2, XS, D128_TLBID)},
   5462 
   5463     TLBI_XS_OP ( "vae1os",       CPENS (0, C8, C1, 1), F_HASXT | F_REG_128)
   5464     TLBI_XS_OP ( "vaae1os",      CPENS (0, C8, C1, 3), F_HASXT | F_REG_128)
   5465     TLBI_XS_OP ( "vale1os",      CPENS (0, C8, C1, 5), F_HASXT | F_REG_128)
   5466     TLBI_XS_OP ( "vaale1os",     CPENS (0, C8, C1, 7), F_HASXT | F_REG_128)
   5467     TLBI_XS_OP ( "ipas2e1os",    CPENS (4, C8, C4, 0), F_HASXT | F_REG_128)
   5468     TLBI_XS_OP ( "ipas2le1os",   CPENS (4, C8, C4, 4), F_HASXT | F_REG_128)
   5469     TLBI_XS_OP ( "vae2os",       CPENS (4, C8, C1, 1), F_HASXT | F_REG_128)
   5470     TLBI_XS_OP ( "vale2os",      CPENS (4, C8, C1, 5), F_HASXT | F_REG_128)
   5471     TLBI_XS_OP ( "rvae1is",    CPENS (0, C8, C2, 1), F_HASXT | F_REG_128)
   5472     TLBI_XS_OP ( "rvaae1is",   CPENS (0, C8, C2, 3), F_HASXT | F_REG_128)
   5473     TLBI_XS_OP ( "rvale1is",   CPENS (0, C8, C2, 5), F_HASXT | F_REG_128)
   5474     TLBI_XS_OP ( "rvaale1is",  CPENS (0, C8, C2, 7), F_HASXT | F_REG_128)
   5475     TLBI_XS_OP ( "rvae1os",    CPENS (0, C8, C5, 1), F_HASXT | F_REG_128)
   5476     TLBI_XS_OP ( "rvaae1os",   CPENS (0, C8, C5, 3), F_HASXT | F_REG_128)
   5477     TLBI_XS_OP ( "rvale1os",   CPENS (0, C8, C5, 5), F_HASXT | F_REG_128)
   5478     TLBI_XS_OP ( "rvaale1os",  CPENS (0, C8, C5, 7), F_HASXT | F_REG_128)
   5479     TLBI_XS_OP ( "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_REG_128)
   5480     TLBI_XS_OP ( "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_REG_128)
   5481     TLBI_XS_OP ( "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_REG_128)
   5482     TLBI_XS_OP ( "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_REG_128)
   5483     TLBI_XS_OP ( "rvae2is",    CPENS (4, C8, C2, 1), F_HASXT | F_REG_128)
   5484     TLBI_XS_OP ( "rvale2is",   CPENS (4, C8, C2, 5), F_HASXT | F_REG_128)
   5485     TLBI_XS_OP ( "rvae2os",    CPENS (4, C8, C5, 1), F_HASXT | F_REG_128)
   5486     TLBI_XS_OP ( "rvale2os",   CPENS (4, C8, C5, 5), F_HASXT | F_REG_128)
   5487 #undef TLBI_XS_OP
   5488 
   5489 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5490     { OP, CODE, FLAGS,  AARCH64_FEATURE (V8_4A)}, \
   5491     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS, AARCH64_FEATURE (XS)},
   5492 
   5493     TLBI_XS_OP ( "alle3os",      CPENS (6, C8, C1, 0), 0)
   5494     TLBI_XS_OP ( "vmallws2e1",	CPENS (4, C8, C6, 3), 0)
   5495 #undef TLBI_XS_OP
   5496 
   5497 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5498     { OP, CODE, FLAGS,  AARCH64_FEATURES (2, V8_4A, D128)}, \
   5499     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS, AARCH64_FEATURES (2, XS, D128)},
   5500 
   5501     TLBI_XS_OP ( "aside1os",     CPENS (0, C8, C1, 2), F_HASXT)
   5502     TLBI_XS_OP ( "vae3os",       CPENS (6, C8, C1, 1), F_HASXT | F_REG_128 )
   5503     TLBI_XS_OP ( "vale3os",      CPENS (6, C8, C1, 5), F_HASXT | F_REG_128 )
   5504 
   5505     TLBI_XS_OP ( "rvae1",      CPENS (0, C8, C6, 1), F_HASXT | F_REG_128 )
   5506     TLBI_XS_OP ( "rvaae1",     CPENS (0, C8, C6, 3), F_HASXT | F_REG_128 )
   5507     TLBI_XS_OP ( "rvale1",     CPENS (0, C8, C6, 5), F_HASXT | F_REG_128 )
   5508     TLBI_XS_OP ( "rvaale1",    CPENS (0, C8, C6, 7), F_HASXT | F_REG_128 )
   5509     TLBI_XS_OP ( "ripas2e1",   CPENS (4, C8, C4, 2), F_HASXT | F_REG_128 )
   5510     TLBI_XS_OP ( "ripas2le1",  CPENS (4, C8, C4, 6), F_HASXT | F_REG_128 )
   5511     TLBI_XS_OP ( "rvae2",      CPENS (4, C8, C6, 1), F_HASXT | F_REG_128 )
   5512     TLBI_XS_OP ( "rvale2",     CPENS (4, C8, C6, 5), F_HASXT | F_REG_128 )
   5513     TLBI_XS_OP ( "rvae3",      CPENS (6, C8, C6, 1), F_HASXT | F_REG_128 )
   5514     TLBI_XS_OP ( "rvale3",     CPENS (6, C8, C6, 5), F_HASXT | F_REG_128 )
   5515     TLBI_XS_OP ( "rvae3is",    CPENS (6, C8, C2, 1), F_HASXT | F_REG_128 )
   5516     TLBI_XS_OP ( "rvale3is",   CPENS (6, C8, C2, 5), F_HASXT | F_REG_128 )
   5517     TLBI_XS_OP ( "rvae3os",    CPENS (6, C8, C5, 1), F_HASXT | F_REG_128 )
   5518     TLBI_XS_OP ( "rvale3os",   CPENS (6, C8, C5, 5), F_HASXT | F_REG_128 )
   5519 #undef TLBI_XS_OP
   5520 
   5521     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5522 };
   5523 
   5524 const aarch64_sys_ins_reg aarch64_sys_regs_plbi[] =
   5525 {
   5526 #define PLBI_XS_OP(OP, CODE, FLAGS) \
   5527     { OP, CODE, FLAGS, AARCH64_FEATURES (2, TLBID, POE2) }, \
   5528     { OP "nxs", CODE | CPENS (0, 0, C8, 0), FLAGS, AARCH64_FEATURES (3, TLBID, XS, POE2) },
   5529 
   5530     PLBI_XS_OP ( "alle1is",	CPENS (4, C10, C3, 4),	F_TLBID_XT)
   5531     PLBI_XS_OP ( "alle1os",	CPENS (4, C10, C1, 4),	F_TLBID_XT)
   5532     PLBI_XS_OP ( "alle2is",	CPENS (4, C10, C3, 0),	F_TLBID_XT)
   5533     PLBI_XS_OP ( "alle2os",	CPENS (4, C10, C1, 0),	F_TLBID_XT)
   5534     PLBI_XS_OP ( "vmalle1is",	CPENS (0, C10, C3, 0),	F_TLBID_XT)
   5535     PLBI_XS_OP ( "vmalle1os",	CPENS (0, C10, C1, 0),	F_TLBID_XT)
   5536 
   5537 #undef PLBI_XS_OP
   5538 
   5539 #define PLBI_XS_OP(OP, CODE, FLAGS) \
   5540     { OP, CODE, FLAGS, AARCH64_FEATURE (POE2) }, \
   5541     { OP "nxs", CODE | CPENS (0, 0, C8, 0), FLAGS, AARCH64_FEATURES (2, POE2, XS) },
   5542 
   5543     PLBI_XS_OP ( "alle1",	CPENS (4, C10, C7, 4),	0 )
   5544     PLBI_XS_OP ( "alle2",	CPENS (4, C10, C7, 0),	0 )
   5545     PLBI_XS_OP ( "alle3",	CPENS (6, C10, C7, 0),	0 )
   5546     PLBI_XS_OP ( "alle3is",	CPENS (6, C10, C3, 0),	0 )
   5547     PLBI_XS_OP ( "alle3os",	CPENS (6, C10, C1, 0),	0 )
   5548     PLBI_XS_OP ( "aside1",	CPENS (0, C10, C7, 2),	F_HASXT )
   5549     PLBI_XS_OP ( "aside1is",	CPENS (0, C10, C3, 2),	F_HASXT )
   5550     PLBI_XS_OP ( "aside1os",	CPENS (0, C10, C1, 2),	F_HASXT )
   5551     PLBI_XS_OP ( "permae1",	CPENS (0, C10, C7, 3),	F_HASXT )
   5552     PLBI_XS_OP ( "permae1is",	CPENS (0, C10, C3, 3),	F_HASXT )
   5553     PLBI_XS_OP ( "permae1os",	CPENS (0, C10, C1, 3),	F_HASXT )
   5554     PLBI_XS_OP ( "perme1",	CPENS (0, C10, C7, 1),	F_HASXT )
   5555     PLBI_XS_OP ( "perme1is",	CPENS (0, C10, C3, 1),	F_HASXT )
   5556     PLBI_XS_OP ( "perme1os",	CPENS (0, C10, C1, 1),	F_HASXT )
   5557     PLBI_XS_OP ( "perme2",	CPENS (4, C10, C7, 1),	F_HASXT )
   5558     PLBI_XS_OP ( "perme2is",	CPENS (4, C10, C3, 1),	F_HASXT )
   5559     PLBI_XS_OP ( "perme2os",	CPENS (4, C10, C1, 1),	F_HASXT )
   5560     PLBI_XS_OP ( "perme3",	CPENS (6, C10, C7, 1),	F_HASXT )
   5561     PLBI_XS_OP ( "perme3is",	CPENS (6, C10, C3, 1),	F_HASXT )
   5562     PLBI_XS_OP ( "perme3os",	CPENS (6, C10, C1, 1),	F_HASXT )
   5563     PLBI_XS_OP ( "vmalle1",	CPENS (0, C10, C7, 0),	0 )
   5564 
   5565 #undef PLBI_XS_OP
   5566 
   5567     { 0,	CPENS (0,0,0,0), 0, AARCH64_NO_FEATURES }
   5568 };
   5569 
   5570 const aarch64_sys_ins_reg aarch64_sys_regs_mlbi[] =
   5571 {
   5572     { "alle1",    CPENS (4, C7, C0, 4), 0, AARCH64_FEATURE (MPAMv2)},
   5573     { "vmalle1",  CPENS (4, C7, C0, 5), 0, AARCH64_FEATURE (MPAMv2)},
   5574     { "vpide1",   CPENS (4, C7, C0, 6), F_HASXT, AARCH64_FEATURE (MPAMv2)},
   5575     { "vpmge1",   CPENS (4, C7, C0, 7), F_HASXT, AARCH64_FEATURE (MPAMv2)},
   5576     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5577 };
   5578 
   5579 const aarch64_sys_ins_reg aarch64_sys_ins_gic[] =
   5580 {
   5581     { "cdaff", CPENS (0,C12,C1,3), 0, AARCH64_NO_FEATURES },
   5582     { "cddi", CPENS (0,C12,C2,0), 0, AARCH64_NO_FEATURES },
   5583     { "cddis", CPENS (0,C12,C1,0), 0, AARCH64_NO_FEATURES },
   5584     { "cden", CPENS (0,C12,C1,1), 0, AARCH64_NO_FEATURES },
   5585     { "cdeoi", CPENS (0,C12,C1,7), 0, AARCH64_NO_FEATURES },
   5586     { "cdhm", CPENS (0,C12,C2,1), 0, AARCH64_NO_FEATURES },
   5587     { "cdpend", CPENS (0,C12,C1,4), 0, AARCH64_NO_FEATURES },
   5588     { "cdpri", CPENS (0,C12,C1,2), 0, AARCH64_NO_FEATURES },
   5589     { "cdrcfg", CPENS (0,C12,C1,5), 0, AARCH64_NO_FEATURES },
   5590     { "vdaff", CPENS (4,C12,C1,3), 0, AARCH64_NO_FEATURES },
   5591     { "vddi", CPENS (4,C12,C2,0), 0, AARCH64_NO_FEATURES },
   5592     { "vddis", CPENS (4,C12,C1,0), 0, AARCH64_NO_FEATURES },
   5593     { "vden", CPENS (4,C12,C1,1), 0, AARCH64_NO_FEATURES },
   5594     { "vdhm", CPENS (4,C12,C2,1), 0, AARCH64_NO_FEATURES },
   5595     { "vdpend", CPENS (4,C12,C1,4), 0, AARCH64_NO_FEATURES },
   5596     { "vdpri", CPENS (4,C12,C1,2), 0, AARCH64_NO_FEATURES },
   5597     { "vdrcfg", CPENS (4,C12,C1,5), 0, AARCH64_NO_FEATURES },
   5598     { "ldaff", CPENS (6,C12,C1,3), 0, AARCH64_NO_FEATURES },
   5599     { "lddi", CPENS (6,C12,C2,0), 0, AARCH64_NO_FEATURES },
   5600     { "lddis", CPENS (6,C12,C1,0), 0, AARCH64_NO_FEATURES },
   5601     { "lden", CPENS (6,C12,C1,1), 0, AARCH64_NO_FEATURES },
   5602     { "ldhm", CPENS (6,C12,C2,1), 0, AARCH64_NO_FEATURES },
   5603     { "ldpend", CPENS (6,C12,C1,4), 0, AARCH64_NO_FEATURES },
   5604     { "ldpri", CPENS (6,C12,C1,2), 0, AARCH64_NO_FEATURES },
   5605     { "ldrcfg", CPENS (6,C12,C1,5), 0, AARCH64_NO_FEATURES },
   5606     { 0, CPENS (0,0,0,0), 0, AARCH64_NO_FEATURES }
   5607 };
   5608 
   5609 const aarch64_sys_ins_reg aarch64_sys_ins_gicr[] =
   5610 {
   5611     { "cdia", CPENS (0,C12,C3,0), 0, AARCH64_NO_FEATURES },
   5612     { "cdnmia", CPENS (0,C12,C3,1), 0, AARCH64_NO_FEATURES },
   5613     { 0, CPENS (0,0,0,0), 0, AARCH64_NO_FEATURES }
   5614 };
   5615 
   5616 const aarch64_sys_ins_reg aarch64_sys_ins_gsb[] =
   5617 {
   5618     { "sys", CPENS (0,C12,0,0), 0, AARCH64_NO_FEATURES },
   5619     { "ack", CPENS (0,C12,0,1), 0, AARCH64_NO_FEATURES },
   5620     { 0, CPENS (0,0,0,0), 0, AARCH64_NO_FEATURES }
   5621 };
   5622 
   5623 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
   5624 {
   5625     /* RCTX is somewhat unique in a way that it has different values
   5626        (op2) based on the instruction in which it is used (cfp/dvp/cpp).
   5627        Thus op2 is masked out and instead encoded directly in the
   5628        aarch64_opcode_table entries for the respective instructions.  */
   5629     { "rctx",   CPENS(3,C7,C3,0), F_HASXT | F_REG_WRITE, AARCH64_FEATURE (PREDRES) }, /* WO */
   5630     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5631 };
   5632 
   5633 bool
   5634 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
   5635 {
   5636   return (sys_ins_reg->flags & F_HASXT) != 0;
   5637 }
   5638 
   5639 bool
   5640 aarch64_sys_ins_reg_tlbid_xt (const aarch64_sys_ins_reg *sys_ins_reg)
   5641 {
   5642   return (sys_ins_reg->flags & F_TLBID_XT) != 0;
   5643 }
   5644 
   5645 extern bool
   5646 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
   5647 				 const char *reg_name,
   5648 				 const aarch64_feature_set *reg_features)
   5649 {
   5650   /* Armv8-R has no EL3.  */
   5651   if (AARCH64_CPU_HAS_FEATURE (features, V8R))
   5652     {
   5653       const char *suffix = strrchr (reg_name, '_');
   5654       if (suffix && !strcmp (suffix, "_el3"))
   5655 	return false;
   5656     }
   5657 
   5658   return AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features);
   5659 }
   5660 
   5661 #undef C0
   5662 #undef C1
   5663 #undef C2
   5664 #undef C3
   5665 #undef C4
   5666 #undef C5
   5667 #undef C6
   5668 #undef C7
   5669 #undef C8
   5670 #undef C9
   5671 #undef C10
   5672 #undef C11
   5673 #undef C12
   5674 #undef C13
   5675 #undef C14
   5676 #undef C15
   5677 
   5678 #define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
   5679 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
   5680 
   5681 static enum err_type
   5682 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
   5683 	      const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
   5684 	      bool encoding ATTRIBUTE_UNUSED,
   5685 	      aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
   5686 	      aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
   5687 {
   5688   int t  = BITS (insn, 4, 0);
   5689   int n  = BITS (insn, 9, 5);
   5690   int t2 = BITS (insn, 14, 10);
   5691 
   5692   if (BIT (insn, 23))
   5693     {
   5694       /* Write back enabled.  */
   5695       if ((t == n || t2 == n) && n != 31)
   5696 	return ERR_UND;
   5697     }
   5698 
   5699   if (BIT (insn, 22))
   5700     {
   5701       /* Load */
   5702       if (t == t2)
   5703 	return ERR_UND;
   5704     }
   5705 
   5706   return ERR_OK;
   5707 }
   5708 
   5709 /* Verifier for vector by element 3 operands functions where the
   5710    conditions `if sz:L == 11 then UNDEFINED` holds.  */
   5711 
   5712 static enum err_type
   5713 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
   5714 		bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
   5715 		aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
   5716 		aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
   5717 {
   5718   const aarch64_insn undef_pattern = 0x3;
   5719   aarch64_insn value;
   5720 
   5721   assert (inst->opcode);
   5722   assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
   5723   value = encoding ? inst->value : insn;
   5724   assert (value);
   5725 
   5726   if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
   5727     return ERR_UND;
   5728 
   5729   return ERR_OK;
   5730 }
   5731 
   5732 /* Check an instruction that takes three register operands and that
   5733    requires the register numbers to be distinct from one another.  */
   5734 
   5735 static enum err_type
   5736 verify_three_different_regs (const struct aarch64_inst *inst,
   5737 			     const aarch64_insn insn ATTRIBUTE_UNUSED,
   5738 			     bfd_vma pc ATTRIBUTE_UNUSED,
   5739 			     bool encoding ATTRIBUTE_UNUSED,
   5740 			     aarch64_operand_error *mismatch_detail
   5741 			       ATTRIBUTE_UNUSED,
   5742 			     aarch64_instr_sequence *insn_sequence
   5743 			       ATTRIBUTE_UNUSED)
   5744 {
   5745   int rd, rs, rn;
   5746 
   5747   rd = inst->operands[0].reg.regno;
   5748   rs = inst->operands[1].reg.regno;
   5749   rn = inst->operands[2].reg.regno;
   5750   if (rd == rs || rd == rn || rs == rn)
   5751     {
   5752       mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5753       mismatch_detail->error
   5754 	= _("the three register operands must be distinct from one another");
   5755       mismatch_detail->index = -1;
   5756       return ERR_UND;
   5757     }
   5758 
   5759   return ERR_OK;
   5760 }
   5761 
   5762 /* Check an instruction that takes two register operands and that
   5763    requires the register numbers to be distinct from each another.  */
   5764 
   5765 static enum err_type
   5766 verify_two_diff_regs (const struct aarch64_inst *inst,
   5767 			     const aarch64_insn insn ATTRIBUTE_UNUSED,
   5768 			     bfd_vma pc ATTRIBUTE_UNUSED,
   5769 			     bool encoding ATTRIBUTE_UNUSED,
   5770 			     aarch64_operand_error *mismatch_detail
   5771 			       ATTRIBUTE_UNUSED,
   5772 			     aarch64_instr_sequence *insn_sequence
   5773 			       ATTRIBUTE_UNUSED)
   5774 {
   5775   int rd, rn;
   5776 
   5777   rd = inst->operands[0].reg.regno;
   5778   rn = inst->operands[1].reg.regno;
   5779   if (rd == rn)
   5780     {
   5781       mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5782       mismatch_detail->error
   5783 	= _("the two register operands must be distinct from each other");
   5784       mismatch_detail->index = -1;
   5785       return ERR_UND;
   5786     }
   5787 
   5788   return ERR_OK;
   5789 }
   5790 
   5791 /* Add INST to the end of INSN_SEQUENCE.  */
   5792 
   5793 static void
   5794 add_insn_to_sequence (const struct aarch64_inst *inst,
   5795 		      aarch64_instr_sequence *insn_sequence)
   5796 {
   5797   insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
   5798 }
   5799 
   5800 /* Initialize an instruction sequence insn_sequence with the instruction INST.
   5801    If INST is NULL the given insn_sequence is cleared and the sequence is left
   5802    uninitialized.  */
   5803 
   5804 void
   5805 init_insn_sequence (const struct aarch64_inst *inst,
   5806 		    aarch64_instr_sequence *insn_sequence)
   5807 {
   5808   int num_req_entries = 0;
   5809 
   5810   if (insn_sequence->instr)
   5811     {
   5812       XDELETE (insn_sequence->instr);
   5813       insn_sequence->instr = NULL;
   5814     }
   5815 
   5816   /* Handle all the cases here.  May need to think of something smarter than
   5817      a giant if/else chain if this grows.  At that time, a lookup table may be
   5818      best.  */
   5819   if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
   5820     num_req_entries = 1;
   5821   if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
   5822     num_req_entries = 2;
   5823 
   5824   insn_sequence->num_added_insns = 0;
   5825   insn_sequence->num_allocated_insns = num_req_entries;
   5826 
   5827   if (num_req_entries != 0)
   5828     {
   5829       insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
   5830       add_insn_to_sequence (inst, insn_sequence);
   5831     }
   5832 }
   5833 
   5834 /* Subroutine of verify_constraints.  Check whether the instruction
   5835    is part of a MOPS P/M/E sequence and, if so, whether sequencing
   5836    expectations are met.  Return true if the check passes, otherwise
   5837    describe the problem in MISMATCH_DETAIL.
   5838 
   5839    IS_NEW_SECTION is true if INST is assumed to start a new section.
   5840    The other arguments are as for verify_constraints.  */
   5841 
   5842 static bool
   5843 verify_mops_pme_sequence (const struct aarch64_inst *inst,
   5844 			  bool is_new_section,
   5845 			  aarch64_operand_error *mismatch_detail,
   5846 			  aarch64_instr_sequence *insn_sequence)
   5847 {
   5848   const struct aarch64_opcode *opcode;
   5849   const struct aarch64_inst *prev_insn;
   5850   int i;
   5851 
   5852   opcode = inst->opcode;
   5853   if (insn_sequence->instr)
   5854     prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
   5855   else
   5856     prev_insn = NULL;
   5857 
   5858   if (prev_insn
   5859       && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
   5860       && prev_insn->opcode != opcode - 1)
   5861     {
   5862       mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
   5863       mismatch_detail->error = NULL;
   5864       mismatch_detail->index = -1;
   5865       mismatch_detail->data[0].s = prev_insn->opcode[1].name;
   5866       mismatch_detail->data[1].s = prev_insn->opcode->name;
   5867       mismatch_detail->non_fatal = true;
   5868       return false;
   5869     }
   5870 
   5871   if (opcode->constraints & C_SCAN_MOPS_PME)
   5872     {
   5873       if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
   5874 	{
   5875 	  mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
   5876 	  mismatch_detail->error = NULL;
   5877 	  mismatch_detail->index = -1;
   5878 	  mismatch_detail->data[0].s = opcode->name;
   5879 	  mismatch_detail->data[1].s = opcode[-1].name;
   5880 	  mismatch_detail->non_fatal = true;
   5881 	  return false;
   5882 	}
   5883 
   5884       for (i = 0; i < 3; ++i)
   5885 	/* There's no specific requirement for the data register to be
   5886 	   the same between consecutive SET* instructions.  */
   5887 	if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
   5888 	     || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
   5889 	     || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
   5890 	    && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
   5891 	  {
   5892 	    mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5893 	    if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
   5894 	      mismatch_detail->error = _("destination register differs from "
   5895 					 "preceding instruction");
   5896 	    else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
   5897 	      mismatch_detail->error = _("source register differs from "
   5898 					 "preceding instruction");
   5899 	    else
   5900 	      mismatch_detail->error = _("size register differs from "
   5901 					 "preceding instruction");
   5902 	    mismatch_detail->index = i;
   5903 	    mismatch_detail->non_fatal = true;
   5904 	    return false;
   5905 	  }
   5906     }
   5907 
   5908   return true;
   5909 }
   5910 
   5911 /*  This function verifies that the instruction INST adheres to its specified
   5912     constraints.  If it does then ERR_OK is returned, if not then ERR_VFI is
   5913     returned and MISMATCH_DETAIL contains the reason why verification failed.
   5914 
   5915     The function is called both during assembly and disassembly.  If assembling
   5916     then ENCODING will be TRUE, else FALSE.  If dissassembling PC will be set
   5917     and will contain the PC of the current instruction w.r.t to the section.
   5918 
   5919     If ENCODING and PC=0 then you are at a start of a section.  The constraints
   5920     are verified against the given state insn_sequence which is updated as it
   5921     transitions through the verification.  */
   5922 
   5923 enum err_type
   5924 verify_constraints (const struct aarch64_inst *inst,
   5925 		    const aarch64_insn insn ATTRIBUTE_UNUSED,
   5926 		    bfd_vma pc,
   5927 		    bool encoding,
   5928 		    aarch64_operand_error *mismatch_detail,
   5929 		    aarch64_instr_sequence *insn_sequence)
   5930 {
   5931   assert (inst);
   5932   assert (inst->opcode);
   5933 
   5934   const struct aarch64_opcode *opcode = inst->opcode;
   5935   if (!opcode->constraints && !insn_sequence->instr)
   5936     return ERR_OK;
   5937 
   5938   assert (insn_sequence);
   5939 
   5940   enum err_type res = ERR_OK;
   5941 
   5942   /* This instruction puts a constraint on the insn_sequence.  */
   5943   if (opcode->flags & F_SCAN)
   5944     {
   5945       if (insn_sequence->instr)
   5946 	{
   5947 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5948 	  mismatch_detail->error = _("instruction opens new dependency "
   5949 				     "sequence without ending previous one");
   5950 	  mismatch_detail->index = -1;
   5951 	  mismatch_detail->non_fatal = true;
   5952 	  res = ERR_VFI;
   5953 	}
   5954 
   5955       init_insn_sequence (inst, insn_sequence);
   5956       return res;
   5957     }
   5958 
   5959   bool is_new_section = (!encoding && pc == 0);
   5960   if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
   5961 				 insn_sequence))
   5962     {
   5963       res = ERR_VFI;
   5964       if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
   5965 	init_insn_sequence (NULL, insn_sequence);
   5966     }
   5967 
   5968   /* Verify constraints on an existing sequence.  */
   5969   if (insn_sequence->instr)
   5970     {
   5971       const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
   5972       /* If we're decoding and we hit PC=0 with an open sequence then we haven't
   5973 	 closed a previous one that we should have.  */
   5974       if (is_new_section && res == ERR_OK)
   5975 	{
   5976 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5977 	  mismatch_detail->error = _("previous `movprfx' sequence not closed");
   5978 	  mismatch_detail->index = -1;
   5979 	  mismatch_detail->non_fatal = true;
   5980 	  res = ERR_VFI;
   5981 	  /* Reset the sequence.  */
   5982 	  init_insn_sequence (NULL, insn_sequence);
   5983 	  return res;
   5984 	}
   5985 
   5986       /* Validate C_SCAN_MOVPRFX constraints.  Move this to a lookup table.  */
   5987       if (inst_opcode->constraints & C_SCAN_MOVPRFX)
   5988 	{
   5989 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
   5990 	     instruction for better error messages.  */
   5991 	  bool sve_operand_p = false;
   5992 	  for (int i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   5993 	    {
   5994 	      enum aarch64_operand_class op_class
   5995 		= aarch64_get_operand_class (opcode->operands[i]);
   5996 	      if (op_class == AARCH64_OPND_CLASS_SVE_REG
   5997 		  || op_class == AARCH64_OPND_CLASS_SVE_REGLIST
   5998 		  || op_class == AARCH64_OPND_CLASS_PRED_REG)
   5999 		{
   6000 		  sve_operand_p = true;
   6001 		  break;
   6002 		}
   6003 	    }
   6004 
   6005 	  if (!sve_operand_p)
   6006 	    {
   6007 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6008 	      mismatch_detail->error = _("SVE instruction expected after "
   6009 					 "`movprfx'");
   6010 	      mismatch_detail->index = -1;
   6011 	      mismatch_detail->non_fatal = true;
   6012 	      res = ERR_VFI;
   6013 	      goto done;
   6014 	    }
   6015 
   6016 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
   6017 	     instruction that is allowed to be used with a MOVPRFX.  */
   6018 	  if (!(opcode->constraints & C_SCAN_MOVPRFX))
   6019 	    {
   6020 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6021 	      mismatch_detail->error = _("SVE `movprfx' compatible instruction "
   6022 					 "expected");
   6023 	      mismatch_detail->index = -1;
   6024 	      mismatch_detail->non_fatal = true;
   6025 	      res = ERR_VFI;
   6026 	      goto done;
   6027 	    }
   6028 
   6029 	  /* Next check for usage of the predicate register.  */
   6030 	  aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
   6031 	  aarch64_opnd_info blk_pred, inst_pred;
   6032 	  memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
   6033 	  memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
   6034 	  bool predicated = false;
   6035 	  assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
   6036 
   6037 	  /* Determine if the movprfx instruction used is predicated or not.  */
   6038 	  if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
   6039 	    {
   6040 	      predicated = true;
   6041 	      blk_pred = insn_sequence->instr->operands[1];
   6042 	    }
   6043 
   6044 	  unsigned char max_elem_size = 0;
   6045 	  unsigned char current_elem_size;
   6046 	  int num_op_used = 0, last_op_usage = 0;
   6047 	  int i, inst_pred_idx = -1;
   6048 	  int num_ops = aarch64_num_of_operands (opcode);
   6049 	  for (i = 0; i < num_ops; i++)
   6050 	    {
   6051 	      aarch64_opnd_info inst_op = inst->operands[i];
   6052 	      switch (inst_op.type)
   6053 		{
   6054 		  case AARCH64_OPND_SVE_Zd:
   6055 		  case AARCH64_OPND_SVE_Zm_5:
   6056 		  case AARCH64_OPND_SVE_Zm_16:
   6057 		  case AARCH64_OPND_SVE_Zn:
   6058 		  case AARCH64_OPND_SVE_Zt:
   6059 		  case AARCH64_OPND_SVE_Vm:
   6060 		  case AARCH64_OPND_SVE_Vn:
   6061 		  case AARCH64_OPND_Va:
   6062 		  case AARCH64_OPND_Vn:
   6063 		  case AARCH64_OPND_Vm:
   6064 		  case AARCH64_OPND_Sn:
   6065 		  case AARCH64_OPND_Sm:
   6066 		    if (inst_op.reg.regno == blk_dest.reg.regno)
   6067 		      {
   6068 			num_op_used++;
   6069 			last_op_usage = i;
   6070 		      }
   6071 		    current_elem_size
   6072 		      = aarch64_get_qualifier_esize (inst_op.qualifier);
   6073 		    if (current_elem_size > max_elem_size)
   6074 		      max_elem_size = current_elem_size;
   6075 		    break;
   6076 		  case AARCH64_OPND_SVE_Pd:
   6077 		  case AARCH64_OPND_SVE_Pg3:
   6078 		  case AARCH64_OPND_SVE_Pg4_5:
   6079 		  case AARCH64_OPND_SVE_Pg4_10:
   6080 		  case AARCH64_OPND_SVE_Pg4_16:
   6081 		  case AARCH64_OPND_SVE_Pm:
   6082 		  case AARCH64_OPND_SVE_Pn:
   6083 		  case AARCH64_OPND_SVE_Pt:
   6084 		  case AARCH64_OPND_SME_Pm:
   6085 		    inst_pred = inst_op;
   6086 		    inst_pred_idx = i;
   6087 		    break;
   6088 		  default:
   6089 		    break;
   6090 		}
   6091 	    }
   6092 
   6093 	   assert (max_elem_size != 0);
   6094 	   aarch64_opnd_info inst_dest = inst->operands[0];
   6095 	   /* Determine the size that should be used to compare against the
   6096 	      movprfx size.  */
   6097 	   current_elem_size
   6098 	     = opcode->constraints & C_MAX_ELEM
   6099 	       ? max_elem_size
   6100 	       : aarch64_get_qualifier_esize (inst_dest.qualifier);
   6101 
   6102 	  /* If movprfx is predicated do some extra checks.  */
   6103 	  if (predicated)
   6104 	    {
   6105 	      /* The instruction must be predicated.  */
   6106 	      if (inst_pred_idx < 0)
   6107 		{
   6108 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6109 		  mismatch_detail->error = _("predicated instruction expected "
   6110 					     "after `movprfx'");
   6111 		  mismatch_detail->index = -1;
   6112 		  mismatch_detail->non_fatal = true;
   6113 		  res = ERR_VFI;
   6114 		  goto done;
   6115 		}
   6116 
   6117 	      /* The instruction must have a merging predicate.  */
   6118 	      if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
   6119 		{
   6120 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6121 		  mismatch_detail->error = _("merging predicate expected due "
   6122 					     "to preceding `movprfx'");
   6123 		  mismatch_detail->index = inst_pred_idx;
   6124 		  mismatch_detail->non_fatal = true;
   6125 		  res = ERR_VFI;
   6126 		  goto done;
   6127 		}
   6128 
   6129 	      /* The same register must be used in instruction.  */
   6130 	      if (blk_pred.reg.regno != inst_pred.reg.regno)
   6131 		{
   6132 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6133 		  mismatch_detail->error = _("predicate register differs "
   6134 					     "from that in preceding "
   6135 					     "`movprfx'");
   6136 		  mismatch_detail->index = inst_pred_idx;
   6137 		  mismatch_detail->non_fatal = true;
   6138 		  res = ERR_VFI;
   6139 		  goto done;
   6140 		}
   6141 	    }
   6142 
   6143 	  /* Destructive operations by definition must allow one usage of the
   6144 	     same register.  */
   6145 	  int allowed_usage
   6146 	    = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
   6147 
   6148 	  /* Operand is not used at all.  */
   6149 	  if (num_op_used == 0)
   6150 	    {
   6151 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6152 	      mismatch_detail->error = _("output register of preceding "
   6153 					 "`movprfx' not used in current "
   6154 					 "instruction");
   6155 	      mismatch_detail->index = 0;
   6156 	      mismatch_detail->non_fatal = true;
   6157 	      res = ERR_VFI;
   6158 	      goto done;
   6159 	    }
   6160 
   6161 	  /* We now know it's used, now determine exactly where it's used.  */
   6162 	  if (blk_dest.reg.regno != inst_dest.reg.regno)
   6163 	    {
   6164 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6165 	      mismatch_detail->error = _("output register of preceding "
   6166 					 "`movprfx' expected as output");
   6167 	      mismatch_detail->index = 0;
   6168 	      mismatch_detail->non_fatal = true;
   6169 	      res = ERR_VFI;
   6170 	      goto done;
   6171 	    }
   6172 
   6173 	  /* Operand used more than allowed for the specific opcode type.  */
   6174 	  if (num_op_used > allowed_usage)
   6175 	    {
   6176 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6177 	      mismatch_detail->error = _("output register of preceding "
   6178 					 "`movprfx' used as input");
   6179 	      mismatch_detail->index = last_op_usage;
   6180 	      mismatch_detail->non_fatal = true;
   6181 	      res = ERR_VFI;
   6182 	      goto done;
   6183 	    }
   6184 
   6185 	  /* Now the only thing left is the qualifiers checks.  The register
   6186 	     must have the same maximum element size.  */
   6187 	  if (inst_dest.qualifier
   6188 	      && blk_dest.qualifier
   6189 	      && current_elem_size
   6190 		 != aarch64_get_qualifier_esize (blk_dest.qualifier))
   6191 	    {
   6192 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   6193 	      mismatch_detail->error = _("register size not compatible with "
   6194 					 "previous `movprfx'");
   6195 	      mismatch_detail->index = 0;
   6196 	      mismatch_detail->non_fatal = true;
   6197 	      res = ERR_VFI;
   6198 	      goto done;
   6199 	    }
   6200 	}
   6201 
   6202     done:
   6203       if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
   6204 	/* We've checked the last instruction in the sequence and so
   6205 	   don't need the sequence any more.  */
   6206 	init_insn_sequence (NULL, insn_sequence);
   6207       else
   6208 	add_insn_to_sequence (inst, insn_sequence);
   6209     }
   6210 
   6211   return res;
   6212 }
   6213 
   6214 
   6215 /* Return true if VALUE cannot be moved into an SVE register using DUP
   6216    (with any element size, not just ESIZE) and if using DUPM would
   6217    therefore be OK.  ESIZE is the number of bytes in the immediate.  */
   6218 
   6219 bool
   6220 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
   6221 {
   6222   int64_t svalue = uvalue;
   6223   uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
   6224 
   6225   if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
   6226     return false;
   6227   if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
   6228     {
   6229       svalue = (int32_t) uvalue;
   6230       if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
   6231 	{
   6232 	  svalue = (int16_t) uvalue;
   6233 	  if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
   6234 	    return false;
   6235 	}
   6236     }
   6237   if ((svalue & 0xff) == 0)
   6238     svalue /= 256;
   6239   return svalue < -128 || svalue >= 128;
   6240 }
   6241 
   6242 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
   6243    supports the instruction described by INST.  */
   6244 
   6245 bool
   6246 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant,
   6247 			     aarch64_inst *inst)
   6248 {
   6249   if (!inst->opcode->avariant
   6250       || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
   6251     return false;
   6252 
   6253   if (inst->opcode->iclass == sme_fp_sd
   6254       && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
   6255       && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64))
   6256     return false;
   6257 
   6258   if (inst->opcode->iclass == sme_int_sd
   6259       && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
   6260       && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64))
   6261     return false;
   6262 
   6263   return true;
   6264 }
   6265 
   6266 /* Include the opcode description table as well as the operand description
   6267    table.  */
   6268 #define VERIFIER(x) verify_##x
   6269 #include "aarch64-tbl.h"
   6270