Home | History | Annotate | Line # | Download | only in opcodes
aarch64-opc.c revision 1.12
      1 /* aarch64-opc.c -- AArch64 opcode support.
      2    Copyright (C) 2009-2024 Free Software Foundation, Inc.
      3    Contributed by ARM Ltd.
      4 
      5    This file is part of the GNU opcodes library.
      6 
      7    This library is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3, or (at your option)
     10    any later version.
     11 
     12    It is distributed in the hope that it will be useful, but WITHOUT
     13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
     15    License for more details.
     16 
     17    You should have received a copy of the GNU General Public License
     18    along with this program; see the file COPYING3. If not,
     19    see <http://www.gnu.org/licenses/>.  */
     20 
     21 #include "sysdep.h"
     22 #include <assert.h>
     23 #include <stdlib.h>
     24 #include <stdio.h>
     25 #include <stdint.h>
     26 #include <stdarg.h>
     27 #include <inttypes.h>
     28 
     29 #include "opintl.h"
     30 #include "libiberty.h"
     31 
     32 #include "aarch64-opc.h"
     33 
     34 #ifdef DEBUG_AARCH64
     35 int debug_dump = false;
     36 #endif /* DEBUG_AARCH64 */
     37 
     38 /* The enumeration strings associated with each value of a 5-bit SVE
     39    pattern operand.  A null entry indicates a reserved meaning.  */
     40 const char *const aarch64_sve_pattern_array[32] = {
     41   /* 0-7.  */
     42   "pow2",
     43   "vl1",
     44   "vl2",
     45   "vl3",
     46   "vl4",
     47   "vl5",
     48   "vl6",
     49   "vl7",
     50   /* 8-15.  */
     51   "vl8",
     52   "vl16",
     53   "vl32",
     54   "vl64",
     55   "vl128",
     56   "vl256",
     57   0,
     58   0,
     59   /* 16-23.  */
     60   0,
     61   0,
     62   0,
     63   0,
     64   0,
     65   0,
     66   0,
     67   0,
     68   /* 24-31.  */
     69   0,
     70   0,
     71   0,
     72   0,
     73   0,
     74   "mul4",
     75   "mul3",
     76   "all"
     77 };
     78 
     79 /* The enumeration strings associated with each value of a 4-bit SVE
     80    prefetch operand.  A null entry indicates a reserved meaning.  */
     81 const char *const aarch64_sve_prfop_array[16] = {
     82   /* 0-7.  */
     83   "pldl1keep",
     84   "pldl1strm",
     85   "pldl2keep",
     86   "pldl2strm",
     87   "pldl3keep",
     88   "pldl3strm",
     89   0,
     90   0,
     91   /* 8-15.  */
     92   "pstl1keep",
     93   "pstl1strm",
     94   "pstl2keep",
     95   "pstl2strm",
     96   "pstl3keep",
     97   "pstl3strm",
     98   0,
     99   0
    100 };
    101 
    102 /* The enumeration strings associated with each value of a 6-bit RPRFM
    103    operation.  */
    104 const char *const aarch64_rprfmop_array[64] = {
    105   "pldkeep",
    106   "pstkeep",
    107   0,
    108   0,
    109   "pldstrm",
    110   "pststrm"
    111 };
    112 
    113 /* Vector length multiples for a predicate-as-counter operand.  Used in things
    114    like AARCH64_OPND_SME_VLxN_10.  */
    115 const char *const aarch64_sme_vlxn_array[2] = {
    116   "vlx2",
    117   "vlx4"
    118 };
    119 
    120 /* Values accepted by the brb alias.  */
    121 const char *const aarch64_brbop_array[] = {
    122   "iall",
    123   "inj",
    124 };
    125 
    126 /* Helper functions to determine which operand to be used to encode/decode
    127    the size:Q fields for AdvSIMD instructions.  */
    128 
    129 static inline bool
    130 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
    131 {
    132   return (qualifier >= AARCH64_OPND_QLF_V_8B
    133 	  && qualifier <= AARCH64_OPND_QLF_V_1Q);
    134 }
    135 
    136 static inline bool
    137 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
    138 {
    139   return (qualifier >= AARCH64_OPND_QLF_S_B
    140 	  && qualifier <= AARCH64_OPND_QLF_S_Q);
    141 }
    142 
    143 enum data_pattern
    144 {
    145   DP_UNKNOWN,
    146   DP_VECTOR_3SAME,
    147   DP_VECTOR_LONG,
    148   DP_VECTOR_WIDE,
    149   DP_VECTOR_ACROSS_LANES,
    150 };
    151 
    152 static const char significant_operand_index [] =
    153 {
    154   0,	/* DP_UNKNOWN, by default using operand 0.  */
    155   0,	/* DP_VECTOR_3SAME */
    156   1,	/* DP_VECTOR_LONG */
    157   2,	/* DP_VECTOR_WIDE */
    158   1,	/* DP_VECTOR_ACROSS_LANES */
    159 };
    160 
    161 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
    162    the data pattern.
    163    N.B. QUALIFIERS is a possible sequence of qualifiers each of which
    164    corresponds to one of a sequence of operands.  */
    165 
    166 static enum data_pattern
    167 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
    168 {
    169   if (vector_qualifier_p (qualifiers[0]))
    170     {
    171       /* e.g. v.4s, v.4s, v.4s
    172 	   or v.4h, v.4h, v.h[3].  */
    173       if (qualifiers[0] == qualifiers[1]
    174 	  && vector_qualifier_p (qualifiers[2])
    175 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    176 	      == aarch64_get_qualifier_esize (qualifiers[1]))
    177 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    178 	      == aarch64_get_qualifier_esize (qualifiers[2])))
    179 	return DP_VECTOR_3SAME;
    180       /* e.g. v.8h, v.8b, v.8b.
    181            or v.4s, v.4h, v.h[2].
    182 	   or v.8h, v.16b.  */
    183       if (vector_qualifier_p (qualifiers[1])
    184 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
    185 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    186 	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
    187 	return DP_VECTOR_LONG;
    188       /* e.g. v.8h, v.8h, v.8b.  */
    189       if (qualifiers[0] == qualifiers[1]
    190 	  && vector_qualifier_p (qualifiers[2])
    191 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
    192 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    193 	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
    194 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    195 	      == aarch64_get_qualifier_esize (qualifiers[1])))
    196 	return DP_VECTOR_WIDE;
    197     }
    198   else if (fp_qualifier_p (qualifiers[0]))
    199     {
    200       /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
    201       if (vector_qualifier_p (qualifiers[1])
    202 	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
    203 	return DP_VECTOR_ACROSS_LANES;
    204     }
    205 
    206   return DP_UNKNOWN;
    207 }
    208 
    209 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
    210    the AdvSIMD instructions.  */
    211 /* N.B. it is possible to do some optimization that doesn't call
    212    get_data_pattern each time when we need to select an operand.  We can
    213    either buffer the caculated the result or statically generate the data,
    214    however, it is not obvious that the optimization will bring significant
    215    benefit.  */
    216 
    217 int
    218 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
    219 {
    220   return
    221     significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
    222 }
    223 
    224 /* Instruction bit-fields.
    226 +   Keep synced with 'enum aarch64_field_kind'.  */
    227 const aarch64_field fields[] =
    228 {
    229     {  0,  0 },	/* NIL.  */
    230     {  8,  4 },	/* CRm: in the system instructions.  */
    231     { 10,  2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>.  */
    232     { 12,  4 },	/* CRn: in the system instructions.  */
    233     { 10,  8 }, /* CSSC_imm8.  */
    234     { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
    235     { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
    236     {  0,  5 },	/* LSE128_Rt: Shared input+output operand register.  */
    237     { 16,  5 },	/* LSE128_Rt2: Shared input+output operand register 2.  */
    238     { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
    239     { 22,  1 },	/* N: in logical (immediate) instructions.  */
    240     { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
    241     { 10,  5 },	/* Ra: in fp instructions.  */
    242     {  0,  5 },	/* Rd: in many integer instructions.  */
    243     { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
    244     {  5,  5 },	/* Rn: in many integer instructions.  */
    245     { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
    246     {  0,  5 },	/* Rt: in load/store instructions.  */
    247     { 10,  5 },	/* Rt2: in load/store pair instructions.  */
    248     { 12,  1 },	/* S: in load/store reg offset instructions.  */
    249     { 12,  2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate.  */
    250     {  1,  3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1].  */
    251     { 13,  3 }, /* SME_Pm: second source scalable predicate register P0-P7.  */
    252     {  0,  3 }, /* SME_PNd3: PN0-PN7, bits [2:0].  */
    253     {  5,  3 }, /* SME_PNn3: PN0-PN7, bits [7:5].  */
    254     { 16,  1 }, /* SME_Q: Q class bit, bit 16.  */
    255     { 16,  2 }, /* SME_Rm: index base register W12-W15 [17:16].  */
    256     { 13,  2 }, /* SME_Rv: vector select register W12-W15, bits [14:13].  */
    257     { 15,  1 }, /* SME_V: (horizontal / vertical tiles), bit 15.  */
    258     { 10,  1 }, /* SME_VL_10: VLx2 or VLx4, bit [10].  */
    259     { 13,  1 }, /* SME_VL_13: VLx2 or VLx4, bit [13].  */
    260     {  0,  1 }, /* SME_ZAda_1b: tile ZA0-ZA1.  */
    261     {  0,  2 }, /* SME_ZAda_2b: tile ZA0-ZA3.  */
    262     {  0,  3 }, /* SME_ZAda_3b: tile ZA0-ZA7.  */
    263     {  4,  1 }, /* SME_ZdnT: upper bit of Zt, bit [4].  */
    264     {  1,  4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1].  */
    265     {  0,  2 }, /* SME_Zdn2_0: lower 2 bits of Zt, bits [1:0].  */
    266     {  2,  3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2].  */
    267     { 16,  4 }, /* SME_Zm: Z0-Z15, bits [19:16].  */
    268     { 17,  4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17].  */
    269     { 18,  3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18].  */
    270     {  6,  4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6].  */
    271     {  7,  3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7].  */
    272     {  4,  1 }, /* SME_ZtT: upper bit of Zt, bit [4].  */
    273     {  0,  3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0].  */
    274     {  0,  2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0].  */
    275     { 23,  1 }, /* SME_i1: immediate field, bit 23.  */
    276     { 12,  2 }, /* SME_size_12: bits [13:12].  */
    277     { 22,  2 }, /* SME_size_22: size<1>, size<0> class field, [23:22].  */
    278     { 23,  1 }, /* SME_sz_23: bit [23].  */
    279     { 22,  1 }, /* SME_tszh: immediate and qualifier field, bit 22.  */
    280     { 18,  3 }, /* SME_tszl: immediate and qualifier field, bits [20:18].  */
    281     { 0,   8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0].  */
    282     {  4,  1 }, /* SVE_M_4: Merge/zero select, bit 4.  */
    283     { 14,  1 }, /* SVE_M_14: Merge/zero select, bit 14.  */
    284     { 16,  1 }, /* SVE_M_16: Merge/zero select, bit 16.  */
    285     { 17,  1 }, /* SVE_N: SVE equivalent of N.  */
    286     {  0,  4 }, /* SVE_Pd: p0-p15, bits [3,0].  */
    287     { 10,  3 }, /* SVE_Pg3: p0-p7, bits [12,10].  */
    288     {  5,  4 }, /* SVE_Pg4_5: p0-p15, bits [8,5].  */
    289     { 10,  4 }, /* SVE_Pg4_10: p0-p15, bits [13,10].  */
    290     { 16,  4 }, /* SVE_Pg4_16: p0-p15, bits [19,16].  */
    291     { 16,  4 }, /* SVE_Pm: p0-p15, bits [19,16].  */
    292     {  5,  4 }, /* SVE_Pn: p0-p15, bits [8,5].  */
    293     {  0,  4 }, /* SVE_Pt: p0-p15, bits [3,0].  */
    294     {  5,  5 }, /* SVE_Rm: SVE alternative position for Rm.  */
    295     { 16,  5 }, /* SVE_Rn: SVE alternative position for Rn.  */
    296     {  0,  5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
    297     {  5,  5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
    298     {  5,  5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
    299     {  5,  5 }, /* SVE_Za_5: SVE vector register, bits [9,5].  */
    300     { 16,  5 }, /* SVE_Za_16: SVE vector register, bits [20,16].  */
    301     {  0,  5 }, /* SVE_Zd: SVE vector register. bits [4,0].  */
    302     {  5,  5 }, /* SVE_Zm_5: SVE vector register, bits [9,5].  */
    303     { 16,  5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
    304     {  5,  5 }, /* SVE_Zn: SVE vector register, bits [9,5].  */
    305     {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
    306     {  5,  1 }, /* SVE_i1: single-bit immediate.  */
    307     { 23,  1 }, /* SVE_i1_23: single-bit immediate.  */
    308     { 22,  2 }, /* SVE_i2: 2-bit index, bits [23,22].  */
    309     { 20,  1 }, /* SVE_i2h: high bit of 2bit immediate, bits.  */
    310     { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
    311     { 19,  2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19].  */
    312     { 22,  2 }, /* SVE_i3h3: two high bits of 3bit immediate, bits [22,23].  */
    313     { 11,  1 }, /* SVE_i3l: low bit of 3-bit immediate.  */
    314     { 12,  1 }, /* SVE_i3l2: low bit of 3-bit immediate, bit 12.  */
    315     { 10,  2 }, /* SVE_i4l2: two low bits of 4bit immediate, bits [11,10].  */
    316     { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
    317     { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
    318     {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
    319     { 16,  5 }, /* SVE_imm5b: secondary 5-bit immediate field.  */
    320     { 16,  6 }, /* SVE_imm6: 6-bit immediate field.  */
    321     { 14,  7 }, /* SVE_imm7: 7-bit immediate field.  */
    322     {  5,  8 }, /* SVE_imm8: 8-bit immediate field.  */
    323     {  5,  9 }, /* SVE_imm9: 9-bit immediate field.  */
    324     { 11,  6 }, /* SVE_immr: SVE equivalent of immr.  */
    325     {  5,  6 }, /* SVE_imms: SVE equivalent of imms.  */
    326     { 10,  2 }, /* SVE_msz: 2-bit shift amount for ADR.  */
    327     {  5,  5 }, /* SVE_pattern: vector pattern enumeration.  */
    328     {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
    329     { 16,  1 }, /* SVE_rot1: 1-bit rotation amount.  */
    330     { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
    331     { 10,  1 }, /* SVE_rot3: 1-bit rotation amount at bit 10.  */
    332     { 17,  2 }, /* SVE_size: 2-bit element size, bits [18,17].  */
    333     { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
    334     { 30,  1 }, /* SVE_sz2: 1-bit element size select.  */
    335     { 16,  4 }, /* SVE_tsz: triangular size select.  */
    336     { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
    337     {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
    338     { 19,  2 }, /* SVE_tszl_19: triangular size select low, bits [20,19].  */
    339     { 14,  1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
    340     { 22,  1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
    341     { 22,  1 },	/* S_imm10: in LDRAA and LDRAB instructions.  */
    342     { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
    343     { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
    344     { 19,  5 },	/* b40: in the test bit and branch instructions.  */
    345     { 31,  1 },	/* b5: in the test bit and branch instructions.  */
    346     { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
    347     { 12,  4 },	/* cond: condition flags as a source operand.  */
    348     {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
    349     {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
    350     { 21,  2 },	/* hw: in move wide constant instructions.  */
    351     {  0,  1 },	/* imm1_0: general immediate in bits [0].  */
    352     {  2,  1 },	/* imm1_2: general immediate in bits [2].  */
    353     {  3,  1 },	/* imm1_3: general immediate in bits [3].  */
    354     {  8,  1 },	/* imm1_8: general immediate in bits [8].  */
    355     { 10,  1 },	/* imm1_10: general immediate in bits [10].  */
    356     { 14,  1 },	/* imm1_14: general immediate in bits [14].  */
    357     { 15,  1 },	/* imm1_15: general immediate in bits [15].  */
    358     { 16,  1 },	/* imm1_16: general immediate in bits [16].  */
    359     {  0,  2 },	/* imm2_0: general immediate in bits [1:0].  */
    360     {  1,  2 },	/* imm2_1: general immediate in bits [2:1].  */
    361     {  2,  2 },	/* imm2_2: general immediate in bits [3:2].  */
    362     {  8,  2 },	/* imm2_8: general immediate in bits [9:8].  */
    363     { 10,  2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
    364     { 12,  2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
    365     { 13,  2 }, /* imm2_13: 2-bit immediate, bits [14:13] */
    366     { 15,  2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
    367     { 16,  2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
    368     { 19,  2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
    369     {  0,  3 },	/* imm3_0: general immediate in bits [2:0].  */
    370     {  5,  3 },	/* imm3_5: general immediate in bits [7:5].  */
    371     { 10,  3 },	/* imm3_10: in add/sub extended reg instructions.  */
    372     { 12,  3 },	/* imm3_12: general immediate in bits [14:12].  */
    373     { 14,  3 },	/* imm3_14: general immediate in bits [16:14].  */
    374     { 15,  3 },	/* imm3_15: general immediate in bits [17:15].  */
    375     { 19,  3 },	/* imm3_19: general immediate in bits [21:19].  */
    376     {  0,  4 },	/* imm4_0: in rmif instructions.  */
    377     {  5,  4 }, /* imm4_5: in SME instructions.  */
    378     { 10,  4 },	/* imm4_10: in adddg/subg instructions.  */
    379     { 11,  4 },	/* imm4_11: in advsimd ext and advsimd ins instructions.  */
    380     { 14,  4 },	/* imm4_14: general immediate in bits [17:14].  */
    381     { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
    382     { 10,  6 },	/* imm6_10: in add/sub reg shifted instructions.  */
    383     { 15,  6 },	/* imm6_15: in rmif instructions.  */
    384     { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
    385     { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
    386     { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
    387     { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
    388     {  5, 14 },	/* imm14: in test bit and branch instructions.  */
    389     {  0, 16 },	/* imm16_0: in udf instruction. */
    390     {  5, 16 },	/* imm16_5: in exception instructions.  */
    391     { 17,  1 }, /* imm17_1: in 1 bit element index.  */
    392     { 17,  2 }, /* imm17_2: in 2 bits element index.  */
    393     {  5, 19 },	/* imm19: e.g. in CBZ.  */
    394     {  0, 26 },	/* imm26: in unconditional branch instructions.  */
    395     { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
    396     { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
    397     {  5, 19 },	/* immhi: e.g. in ADRP.  */
    398     { 29,  2 },	/* immlo: e.g. in ADRP.  */
    399     { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
    400     { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
    401     { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
    402     { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
    403     { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
    404     { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
    405     { 30,  1 },	/* lse_sz: in LSE extension atomic instructions.  */
    406     {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
    407     { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
    408     { 19,  2 },	/* op0: in the system instructions.  */
    409     { 16,  3 },	/* op1: in the system instructions.  */
    410     {  5,  3 },	/* op2: in the system instructions.  */
    411     { 22,  2 },	/* opc: in load/store reg offset instructions.  */
    412     { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
    413     { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
    414     { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
    415     { 11,  2 }, /* rotate1: FCMLA immediate rotate.  */
    416     { 13,  2 }, /* rotate2: Indexed element FCMLA immediate rotate.  */
    417     { 12,  1 }, /* rotate3: FCADD immediate rotate.  */
    418     { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
    419     { 31,  1 },	/* sf: in integer data processing instructions.  */
    420     { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
    421     { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
    422     { 22,  1 }, /* sz: 1-bit element size select.  */
    423     { 22,  2 },	/* type: floating point type field in fp data inst.  */
    424     { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
    425     {  5,  3 }, /* off3: immediate offset used to calculate slice number in a
    426 		   ZA tile.  */
    427     {  5,  2 }, /* off2: immediate offset used to calculate slice number in
    428 		   a ZA tile.  */
    429     {  7,  1 }, /* ZAn_1: name of the 1bit encoded ZA tile.  */
    430     {  5,  1 }, /* ol: immediate offset used to calculate slice number in a ZA
    431 		   tile.  */
    432     {  6,  2 }, /* ZAn_2: name of the 2bit encoded ZA tile.  */
    433     {  5,  3 }, /* ZAn_3: name of the 3bit encoded ZA tile.  */
    434     {  6,  1 }, /* ZAn: name of the bit encoded ZA tile.  */
    435     { 12,  4 },	/* opc2: in rcpc3 ld/st inst deciding the pre/post-index.  */
    436     { 30,  2 },	/* rcpc3_size: in rcpc3 ld/st, field controls Rt/Rt2 width.  */
    437     {  5,  1 },	/* FLD_brbop: used in BRB to mean IALL or INJ.  */
    438     {  8,  1 }, /* ZA8_1: name of the 1 bit encoded ZA tile ZA0-ZA1.  */
    439     {  7,  2 }, /* ZA7_2: name of the 2 bits encoded ZA tile ZA0-ZA3.  */
    440     {  6,  3 }, /* ZA6_3: name of the 3 bits encoded ZA tile ZA0-ZA7.  */
    441     {  5,  4 }, /* ZA5_4: name of the 4 bits encoded ZA tile ZA0-ZA15.  */
    442 };
    443 
    444 enum aarch64_operand_class
    445 aarch64_get_operand_class (enum aarch64_opnd type)
    446 {
    447   return aarch64_operands[type].op_class;
    448 }
    449 
    450 const char *
    451 aarch64_get_operand_name (enum aarch64_opnd type)
    452 {
    453   return aarch64_operands[type].name;
    454 }
    455 
    456 /* Get operand description string.
    457    This is usually for the diagnosis purpose.  */
    458 const char *
    459 aarch64_get_operand_desc (enum aarch64_opnd type)
    460 {
    461   return aarch64_operands[type].desc;
    462 }
    463 
    464 /* Table of all conditional affixes.  */
    465 const aarch64_cond aarch64_conds[16] =
    466 {
    467   {{"eq", "none"}, 0x0},
    468   {{"ne", "any"}, 0x1},
    469   {{"cs", "hs", "nlast"}, 0x2},
    470   {{"cc", "lo", "ul", "last"}, 0x3},
    471   {{"mi", "first"}, 0x4},
    472   {{"pl", "nfrst"}, 0x5},
    473   {{"vs"}, 0x6},
    474   {{"vc"}, 0x7},
    475   {{"hi", "pmore"}, 0x8},
    476   {{"ls", "plast"}, 0x9},
    477   {{"ge", "tcont"}, 0xa},
    478   {{"lt", "tstop"}, 0xb},
    479   {{"gt"}, 0xc},
    480   {{"le"}, 0xd},
    481   {{"al"}, 0xe},
    482   {{"nv"}, 0xf},
    483 };
    484 
    485 const aarch64_cond *
    486 get_cond_from_value (aarch64_insn value)
    487 {
    488   assert (value < 16);
    489   return &aarch64_conds[(unsigned int) value];
    490 }
    491 
    492 const aarch64_cond *
    493 get_inverted_cond (const aarch64_cond *cond)
    494 {
    495   return &aarch64_conds[cond->value ^ 0x1];
    496 }
    497 
    498 /* Table describing the operand extension/shifting operators; indexed by
    499    enum aarch64_modifier_kind.
    500 
    501    The value column provides the most common values for encoding modifiers,
    502    which enables table-driven encoding/decoding for the modifiers.  */
    503 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
    504 {
    505     {"none", 0x0},
    506     {"msl",  0x0},
    507     {"ror",  0x3},
    508     {"asr",  0x2},
    509     {"lsr",  0x1},
    510     {"lsl",  0x0},
    511     {"uxtb", 0x0},
    512     {"uxth", 0x1},
    513     {"uxtw", 0x2},
    514     {"uxtx", 0x3},
    515     {"sxtb", 0x4},
    516     {"sxth", 0x5},
    517     {"sxtw", 0x6},
    518     {"sxtx", 0x7},
    519     {"mul", 0x0},
    520     {"mul vl", 0x0},
    521     {NULL, 0},
    522 };
    523 
    524 enum aarch64_modifier_kind
    525 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
    526 {
    527   return desc - aarch64_operand_modifiers;
    528 }
    529 
    530 aarch64_insn
    531 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
    532 {
    533   return aarch64_operand_modifiers[kind].value;
    534 }
    535 
    536 enum aarch64_modifier_kind
    537 aarch64_get_operand_modifier_from_value (aarch64_insn value,
    538 					 bool extend_p)
    539 {
    540   if (extend_p)
    541     return AARCH64_MOD_UXTB + value;
    542   else
    543     return AARCH64_MOD_LSL - value;
    544 }
    545 
    546 bool
    547 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
    548 {
    549   return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
    550 }
    551 
    552 static inline bool
    553 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
    554 {
    555   return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
    556 }
    557 
    558 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
    559 {
    560     { "#0x00", 0x0 },
    561     { "oshld", 0x1 },
    562     { "oshst", 0x2 },
    563     { "osh",   0x3 },
    564     { "#0x04", 0x4 },
    565     { "nshld", 0x5 },
    566     { "nshst", 0x6 },
    567     { "nsh",   0x7 },
    568     { "#0x08", 0x8 },
    569     { "ishld", 0x9 },
    570     { "ishst", 0xa },
    571     { "ish",   0xb },
    572     { "#0x0c", 0xc },
    573     { "ld",    0xd },
    574     { "st",    0xe },
    575     { "sy",    0xf },
    576 };
    577 
    578 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
    579 {                       /*  CRm<3:2>  #imm  */
    580     { "oshnxs", 16 },    /*    00       16   */
    581     { "nshnxs", 20 },    /*    01       20   */
    582     { "ishnxs", 24 },    /*    10       24   */
    583     { "synxs",  28 },    /*    11       28   */
    584 };
    585 
    586 /* Table describing the operands supported by the aliases of the HINT
    587    instruction.
    588 
    589    The name column is the operand that is accepted for the alias.  The value
    590    column is the hint number of the alias.  The list of operands is terminated
    591    by NULL in the name column.  */
    592 
    593 const struct aarch64_name_value_pair aarch64_hint_options[] =
    594 {
    595   /* BTI.  This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET.  */
    596   { " ",	HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
    597   { "csync",	HINT_OPD_CSYNC },	/* PSB CSYNC.  */
    598   { "dsync",	HINT_OPD_DSYNC },	/* GCSB DSYNC.  */
    599   { "c",	HINT_OPD_C },		/* BTI C.  */
    600   { "j",	HINT_OPD_J },		/* BTI J.  */
    601   { "jc",	HINT_OPD_JC },		/* BTI JC.  */
    602   { NULL,	HINT_OPD_NULL },
    603 };
    604 
    605 /* op -> op:       load = 0 instruction = 1 store = 2
    606    l  -> level:    1-3
    607    t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
    608 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
    609 const struct aarch64_name_value_pair aarch64_prfops[32] =
    610 {
    611   { "pldl1keep", B(0, 1, 0) },
    612   { "pldl1strm", B(0, 1, 1) },
    613   { "pldl2keep", B(0, 2, 0) },
    614   { "pldl2strm", B(0, 2, 1) },
    615   { "pldl3keep", B(0, 3, 0) },
    616   { "pldl3strm", B(0, 3, 1) },
    617   { "pldslckeep", B(0, 4, 0) },
    618   { "pldslcstrm", B(0, 4, 1) },
    619   { "plil1keep", B(1, 1, 0) },
    620   { "plil1strm", B(1, 1, 1) },
    621   { "plil2keep", B(1, 2, 0) },
    622   { "plil2strm", B(1, 2, 1) },
    623   { "plil3keep", B(1, 3, 0) },
    624   { "plil3strm", B(1, 3, 1) },
    625   { "plislckeep", B(1, 4, 0) },
    626   { "plislcstrm", B(1, 4, 1) },
    627   { "pstl1keep", B(2, 1, 0) },
    628   { "pstl1strm", B(2, 1, 1) },
    629   { "pstl2keep", B(2, 2, 0) },
    630   { "pstl2strm", B(2, 2, 1) },
    631   { "pstl3keep", B(2, 3, 0) },
    632   { "pstl3strm", B(2, 3, 1) },
    633   { "pstslckeep", B(2, 4, 0) },
    634   { "pstslcstrm", B(2, 4, 1) },
    635   { NULL, 0x18 },
    636   { NULL, 0x19 },
    637   { NULL, 0x1a },
    638   { NULL, 0x1b },
    639   { NULL, 0x1c },
    640   { NULL, 0x1d },
    641   { NULL, 0x1e },
    642   { NULL, 0x1f },
    643 };
    644 #undef B
    645 
    646 /* Utilities on value constraint.  */
    648 
    649 static inline bool
    650 value_in_range_p (int64_t value, int64_t low, int64_t high)
    651 {
    652   return (low <= value) && (value <= high);
    653 }
    654 
    655 /* Return true if VALUE is a multiple of ALIGN.  */
    656 static inline bool
    657 value_aligned_p (int64_t value, int align)
    658 {
    659   return (value % align) == 0;
    660 }
    661 
    662 /* A signed value fits in a field.  */
    663 static inline bool
    664 value_fit_signed_field_p (int64_t value, unsigned width)
    665 {
    666   assert (width < 32);
    667   if (width < sizeof (value) * 8)
    668     {
    669       int64_t lim = (uint64_t) 1 << (width - 1);
    670       if (value >= -lim && value < lim)
    671 	return true;
    672     }
    673   return false;
    674 }
    675 
    676 /* An unsigned value fits in a field.  */
    677 static inline bool
    678 value_fit_unsigned_field_p (int64_t value, unsigned width)
    679 {
    680   assert (width < 32);
    681   if (width < sizeof (value) * 8)
    682     {
    683       int64_t lim = (uint64_t) 1 << width;
    684       if (value >= 0 && value < lim)
    685 	return true;
    686     }
    687   return false;
    688 }
    689 
    690 /* Return true if OPERAND is SP or WSP.  */
    691 bool
    692 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
    693 {
    694   return ((aarch64_get_operand_class (operand->type)
    695 	   == AARCH64_OPND_CLASS_INT_REG)
    696 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
    697 	  && operand->reg.regno == 31);
    698 }
    699 
    700 /* Return 1 if OPERAND is XZR or WZP.  */
    701 int
    702 aarch64_zero_register_p (const aarch64_opnd_info *operand)
    703 {
    704   return ((aarch64_get_operand_class (operand->type)
    705 	   == AARCH64_OPND_CLASS_INT_REG)
    706 	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
    707 	  && operand->reg.regno == 31);
    708 }
    709 
    710 /* Return true if the operand *OPERAND that has the operand code
    711    OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
    712    qualified by the qualifier TARGET.  */
    713 
    714 static inline bool
    715 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
    716 			  aarch64_opnd_qualifier_t target)
    717 {
    718   switch (operand->qualifier)
    719     {
    720     case AARCH64_OPND_QLF_W:
    721       if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
    722 	return true;
    723       break;
    724     case AARCH64_OPND_QLF_X:
    725       if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
    726 	return true;
    727       break;
    728     case AARCH64_OPND_QLF_WSP:
    729       if (target == AARCH64_OPND_QLF_W
    730 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    731 	return true;
    732       break;
    733     case AARCH64_OPND_QLF_SP:
    734       if (target == AARCH64_OPND_QLF_X
    735 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    736 	return true;
    737       break;
    738     default:
    739       break;
    740     }
    741 
    742   return false;
    743 }
    744 
    745 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
    746    for operand KNOWN_IDX, return the expected qualifier for operand IDX.
    747 
    748    Return NIL if more than one expected qualifiers are found.  */
    749 
    750 aarch64_opnd_qualifier_t
    751 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
    752 				int idx,
    753 				const aarch64_opnd_qualifier_t known_qlf,
    754 				int known_idx)
    755 {
    756   int i, saved_i;
    757 
    758   /* Special case.
    759 
    760      When the known qualifier is NIL, we have to assume that there is only
    761      one qualifier sequence in the *QSEQ_LIST and return the corresponding
    762      qualifier directly.  One scenario is that for instruction
    763 	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
    764      which has only one possible valid qualifier sequence
    765 	NIL, S_D
    766      the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
    767      determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
    768 
    769      Because the qualifier NIL has dual roles in the qualifier sequence:
    770      it can mean no qualifier for the operand, or the qualifer sequence is
    771      not in use (when all qualifiers in the sequence are NILs), we have to
    772      handle this special case here.  */
    773   if (((enum aarch64_opnd) known_qlf) == AARCH64_OPND_NIL)
    774     {
    775       assert (((enum aarch64_opnd) qseq_list[0][known_idx]) == AARCH64_OPND_NIL);
    776       return qseq_list[0][idx];
    777     }
    778 
    779   for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
    780     {
    781       if (qseq_list[i][known_idx] == known_qlf)
    782 	{
    783 	  if (saved_i != -1)
    784 	    /* More than one sequences are found to have KNOWN_QLF at
    785 	       KNOWN_IDX.  */
    786 	    return AARCH64_OPND_QLF_NIL;
    787 	  saved_i = i;
    788 	}
    789     }
    790 
    791   return qseq_list[saved_i][idx];
    792 }
    793 
    794 enum operand_qualifier_kind
    795 {
    796   OQK_NIL,
    797   OQK_OPD_VARIANT,
    798   OQK_VALUE_IN_RANGE,
    799   OQK_MISC,
    800 };
    801 
    802 /* Operand qualifier description.  */
    803 struct operand_qualifier_data
    804 {
    805   /* The usage of the three data fields depends on the qualifier kind.  */
    806   int data0;
    807   int data1;
    808   int data2;
    809   /* Description.  */
    810   const char *desc;
    811   /* Kind.  */
    812   enum operand_qualifier_kind kind;
    813 };
    814 
    815 /* Indexed by the operand qualifier enumerators.  */
    816 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
    817 {
    818   {0, 0, 0, "NIL", OQK_NIL},
    819 
    820   /* Operand variant qualifiers.
    821      First 3 fields:
    822      element size, number of elements and common value for encoding.  */
    823 
    824   {4, 1, 0x0, "w", OQK_OPD_VARIANT},
    825   {8, 1, 0x1, "x", OQK_OPD_VARIANT},
    826   {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
    827   {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
    828 
    829   {1, 1, 0x0, "b", OQK_OPD_VARIANT},
    830   {2, 1, 0x1, "h", OQK_OPD_VARIANT},
    831   {4, 1, 0x2, "s", OQK_OPD_VARIANT},
    832   {8, 1, 0x3, "d", OQK_OPD_VARIANT},
    833   {16, 1, 0x4, "q", OQK_OPD_VARIANT},
    834   {2, 1, 0x0, "2b", OQK_OPD_VARIANT},
    835   {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
    836   {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
    837 
    838   {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
    839   {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
    840   {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
    841   {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
    842   {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
    843   {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
    844   {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
    845   {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
    846   {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
    847   {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
    848   {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
    849 
    850   {0, 0, 0, "z", OQK_OPD_VARIANT},
    851   {0, 0, 0, "m", OQK_OPD_VARIANT},
    852 
    853   /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc).  */
    854   {16, 0, 0, "tag", OQK_OPD_VARIANT},
    855 
    856   /* Qualifiers constraining the value range.
    857      First 3 fields:
    858      Lower bound, higher bound, unused.  */
    859 
    860   {0, 15, 0, "CR",       OQK_VALUE_IN_RANGE},
    861   {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
    862   {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
    863   {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
    864   {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
    865   {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
    866   {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
    867 
    868   /* Qualifiers for miscellaneous purpose.
    869      First 3 fields:
    870      unused, unused and unused.  */
    871 
    872   {0, 0, 0, "lsl", 0},
    873   {0, 0, 0, "msl", 0},
    874 
    875   {0, 0, 0, "retrieving", 0},
    876 };
    877 
    878 static inline bool
    879 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
    880 {
    881   return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
    882 }
    883 
    884 static inline bool
    885 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
    886 {
    887   return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
    888 }
    889 
    890 const char*
    891 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
    892 {
    893   return aarch64_opnd_qualifiers[qualifier].desc;
    894 }
    895 
    896 /* Given an operand qualifier, return the expected data element size
    897    of a qualified operand.  */
    898 unsigned char
    899 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
    900 {
    901   assert (operand_variant_qualifier_p (qualifier));
    902   return aarch64_opnd_qualifiers[qualifier].data0;
    903 }
    904 
    905 unsigned char
    906 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
    907 {
    908   assert (operand_variant_qualifier_p (qualifier));
    909   return aarch64_opnd_qualifiers[qualifier].data1;
    910 }
    911 
    912 aarch64_insn
    913 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
    914 {
    915   assert (operand_variant_qualifier_p (qualifier));
    916   return aarch64_opnd_qualifiers[qualifier].data2;
    917 }
    918 
    919 static int
    920 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
    921 {
    922   assert (qualifier_value_in_range_constraint_p (qualifier));
    923   return aarch64_opnd_qualifiers[qualifier].data0;
    924 }
    925 
    926 static int
    927 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
    928 {
    929   assert (qualifier_value_in_range_constraint_p (qualifier));
    930   return aarch64_opnd_qualifiers[qualifier].data1;
    931 }
    932 
    933 #ifdef DEBUG_AARCH64
    934 void
    935 aarch64_verbose (const char *str, ...)
    936 {
    937   va_list ap;
    938   va_start (ap, str);
    939   printf ("#### ");
    940   vprintf (str, ap);
    941   printf ("\n");
    942   va_end (ap);
    943 }
    944 
    945 static inline void
    946 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
    947 {
    948   int i;
    949   printf ("#### \t");
    950   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
    951     printf ("%s,", aarch64_get_qualifier_name (*qualifier));
    952   printf ("\n");
    953 }
    954 
    955 static void
    956 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
    957 		       const aarch64_opnd_qualifier_t *qualifier)
    958 {
    959   int i;
    960   aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
    961 
    962   aarch64_verbose ("dump_match_qualifiers:");
    963   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
    964     curr[i] = opnd[i].qualifier;
    965   dump_qualifier_sequence (curr);
    966   aarch64_verbose ("against");
    967   dump_qualifier_sequence (qualifier);
    968 }
    969 #endif /* DEBUG_AARCH64 */
    970 
    971 /* This function checks if the given instruction INSN is a destructive
    972    instruction based on the usage of the registers.  It does not recognize
    973    unary destructive instructions.  */
    974 bool
    975 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
    976 {
    977   int i = 0;
    978   const enum aarch64_opnd *opnds = opcode->operands;
    979 
    980   if (opnds[0] == AARCH64_OPND_NIL)
    981     return false;
    982 
    983   while (opnds[++i] != AARCH64_OPND_NIL)
    984     if (opnds[i] == opnds[0])
    985       return true;
    986 
    987   return false;
    988 }
    989 
    990 /* TODO improve this, we can have an extra field at the runtime to
    991    store the number of operands rather than calculating it every time.  */
    992 
    993 int
    994 aarch64_num_of_operands (const aarch64_opcode *opcode)
    995 {
    996   int i = 0;
    997   const enum aarch64_opnd *opnds = opcode->operands;
    998   while (opnds[i++] != AARCH64_OPND_NIL)
    999     ;
   1000   --i;
   1001   assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
   1002   return i;
   1003 }
   1004 
   1005 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
   1006    If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
   1007 
   1008    Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
   1009    This is always 0 if the function succeeds.
   1010 
   1011    N.B. on the entry, it is very likely that only some operands in *INST
   1012    have had their qualifiers been established.
   1013 
   1014    If STOP_AT is not -1, the function will only try to match
   1015    the qualifier sequence for operands before and including the operand
   1016    of index STOP_AT; and on success *RET will only be filled with the first
   1017    (STOP_AT+1) qualifiers.
   1018 
   1019    A couple examples of the matching algorithm:
   1020 
   1021    X,W,NIL should match
   1022    X,W,NIL
   1023 
   1024    NIL,NIL should match
   1025    X  ,NIL
   1026 
   1027    Apart from serving the main encoding routine, this can also be called
   1028    during or after the operand decoding.  */
   1029 
   1030 int
   1031 aarch64_find_best_match (const aarch64_inst *inst,
   1032 			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
   1033 			 int stop_at, aarch64_opnd_qualifier_t *ret,
   1034 			 int *invalid_count)
   1035 {
   1036   int i, num_opnds, invalid, min_invalid;
   1037   const aarch64_opnd_qualifier_t *qualifiers;
   1038 
   1039   num_opnds = aarch64_num_of_operands (inst->opcode);
   1040   if (num_opnds == 0)
   1041     {
   1042       DEBUG_TRACE ("SUCCEED: no operand");
   1043       *invalid_count = 0;
   1044       return 1;
   1045     }
   1046 
   1047   if (stop_at < 0 || stop_at >= num_opnds)
   1048     stop_at = num_opnds - 1;
   1049 
   1050   /* For each pattern.  */
   1051   min_invalid = num_opnds;
   1052   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
   1053     {
   1054       int j;
   1055       qualifiers = *qualifiers_list;
   1056 
   1057       /* Start as positive.  */
   1058       invalid = 0;
   1059 
   1060       DEBUG_TRACE ("%d", i);
   1061 #ifdef DEBUG_AARCH64
   1062       if (debug_dump)
   1063 	dump_match_qualifiers (inst->operands, qualifiers);
   1064 #endif
   1065 
   1066       /* The first entry should be taken literally, even if it's an empty
   1067 	 qualifier sequence.  (This matters for strict testing.)  In other
   1068 	 positions an empty sequence acts as a terminator.  */
   1069       if (i > 0 && empty_qualifier_sequence_p (qualifiers))
   1070 	break;
   1071 
   1072       for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
   1073 	{
   1074 	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
   1075 	      && !(inst->opcode->flags & F_STRICT))
   1076 	    {
   1077 	      /* Either the operand does not have qualifier, or the qualifier
   1078 		 for the operand needs to be deduced from the qualifier
   1079 		 sequence.
   1080 		 In the latter case, any constraint checking related with
   1081 		 the obtained qualifier should be done later in
   1082 		 operand_general_constraint_met_p.  */
   1083 	      continue;
   1084 	    }
   1085 	  else if (*qualifiers != inst->operands[j].qualifier)
   1086 	    {
   1087 	      /* Unless the target qualifier can also qualify the operand
   1088 		 (which has already had a non-nil qualifier), non-equal
   1089 		 qualifiers are generally un-matched.  */
   1090 	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
   1091 		continue;
   1092 	      else
   1093 		invalid += 1;
   1094 	    }
   1095 	  else
   1096 	    continue;	/* Equal qualifiers are certainly matched.  */
   1097 	}
   1098 
   1099       if (min_invalid > invalid)
   1100 	min_invalid = invalid;
   1101 
   1102       /* Qualifiers established.  */
   1103       if (min_invalid == 0)
   1104 	break;
   1105     }
   1106 
   1107   *invalid_count = min_invalid;
   1108   if (min_invalid == 0)
   1109     {
   1110       /* Fill the result in *RET.  */
   1111       int j;
   1112       qualifiers = *qualifiers_list;
   1113 
   1114       DEBUG_TRACE ("complete qualifiers using list %d", i);
   1115 #ifdef DEBUG_AARCH64
   1116       if (debug_dump)
   1117 	dump_qualifier_sequence (qualifiers);
   1118 #endif
   1119 
   1120       for (j = 0; j <= stop_at; ++j, ++qualifiers)
   1121 	ret[j] = *qualifiers;
   1122       for (; j < AARCH64_MAX_OPND_NUM; ++j)
   1123 	ret[j] = AARCH64_OPND_QLF_NIL;
   1124 
   1125       DEBUG_TRACE ("SUCCESS");
   1126       return 1;
   1127     }
   1128 
   1129   DEBUG_TRACE ("FAIL");
   1130   return 0;
   1131 }
   1132 
   1133 /* Operand qualifier matching and resolving.
   1134 
   1135    Return 1 if the operand qualifier(s) in *INST match one of the qualifier
   1136    sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
   1137 
   1138    Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
   1139    This is always 0 if the function succeeds.
   1140 
   1141    if UPDATE_P, update the qualifier(s) in *INST after the matching
   1142    succeeds.  */
   1143 
   1144 static int
   1145 match_operands_qualifier (aarch64_inst *inst, bool update_p,
   1146 			  int *invalid_count)
   1147 {
   1148   int i;
   1149   aarch64_opnd_qualifier_seq_t qualifiers;
   1150 
   1151   if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
   1152 				qualifiers, invalid_count))
   1153     {
   1154       DEBUG_TRACE ("matching FAIL");
   1155       return 0;
   1156     }
   1157 
   1158   /* Update the qualifiers.  */
   1159   if (update_p)
   1160     for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   1161       {
   1162 	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
   1163 	  break;
   1164 	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
   1165 			"update %s with %s for operand %d",
   1166 			aarch64_get_qualifier_name (inst->operands[i].qualifier),
   1167 			aarch64_get_qualifier_name (qualifiers[i]), i);
   1168 	inst->operands[i].qualifier = qualifiers[i];
   1169       }
   1170 
   1171   DEBUG_TRACE ("matching SUCCESS");
   1172   return 1;
   1173 }
   1174 
   1175 /* Return TRUE if VALUE is a wide constant that can be moved into a general
   1176    register by MOVZ.
   1177 
   1178    IS32 indicates whether value is a 32-bit immediate or not.
   1179    If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
   1180    amount will be returned in *SHIFT_AMOUNT.  */
   1181 
   1182 bool
   1183 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
   1184 {
   1185   int amount;
   1186 
   1187   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
   1188 
   1189   if (is32)
   1190     {
   1191       /* Allow all zeros or all ones in top 32-bits, so that
   1192 	 32-bit constant expressions like ~0x80000000 are
   1193 	 permitted.  */
   1194       if (value >> 32 != 0 && value >> 32 != 0xffffffff)
   1195 	/* Immediate out of range.  */
   1196 	return false;
   1197       value &= 0xffffffff;
   1198     }
   1199 
   1200   /* first, try movz then movn */
   1201   amount = -1;
   1202   if ((value & ((uint64_t) 0xffff << 0)) == value)
   1203     amount = 0;
   1204   else if ((value & ((uint64_t) 0xffff << 16)) == value)
   1205     amount = 16;
   1206   else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
   1207     amount = 32;
   1208   else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
   1209     amount = 48;
   1210 
   1211   if (amount == -1)
   1212     {
   1213       DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
   1214       return false;
   1215     }
   1216 
   1217   if (shift_amount != NULL)
   1218     *shift_amount = amount;
   1219 
   1220   DEBUG_TRACE ("exit true with amount %d", amount);
   1221 
   1222   return true;
   1223 }
   1224 
   1225 /* Build the accepted values for immediate logical SIMD instructions.
   1226 
   1227    The standard encodings of the immediate value are:
   1228      N      imms     immr         SIMD size  R             S
   1229      1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
   1230      0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
   1231      0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
   1232      0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
   1233      0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
   1234      0      11110s   00000r       2       UInt(r)       UInt(s)
   1235    where all-ones value of S is reserved.
   1236 
   1237    Let's call E the SIMD size.
   1238 
   1239    The immediate value is: S+1 bits '1' rotated to the right by R.
   1240 
   1241    The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
   1242    (remember S != E - 1).  */
   1243 
   1244 #define TOTAL_IMM_NB  5334
   1245 
   1246 typedef struct
   1247 {
   1248   uint64_t imm;
   1249   aarch64_insn encoding;
   1250 } simd_imm_encoding;
   1251 
   1252 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
   1253 
   1254 static int
   1255 simd_imm_encoding_cmp(const void *i1, const void *i2)
   1256 {
   1257   const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
   1258   const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
   1259 
   1260   if (imm1->imm < imm2->imm)
   1261     return -1;
   1262   if (imm1->imm > imm2->imm)
   1263     return +1;
   1264   return 0;
   1265 }
   1266 
   1267 /* immediate bitfield standard encoding
   1268    imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
   1269    1         ssssss     rrrrrr      64        rrrrrr ssssss
   1270    0         0sssss     0rrrrr      32        rrrrr  sssss
   1271    0         10ssss     00rrrr      16        rrrr   ssss
   1272    0         110sss     000rrr      8         rrr    sss
   1273    0         1110ss     0000rr      4         rr     ss
   1274    0         11110s     00000r      2         r      s  */
   1275 static inline int
   1276 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
   1277 {
   1278   return (is64 << 12) | (r << 6) | s;
   1279 }
   1280 
   1281 static void
   1282 build_immediate_table (void)
   1283 {
   1284   uint32_t log_e, e, s, r, s_mask;
   1285   uint64_t mask, imm;
   1286   int nb_imms;
   1287   int is64;
   1288 
   1289   nb_imms = 0;
   1290   for (log_e = 1; log_e <= 6; log_e++)
   1291     {
   1292       /* Get element size.  */
   1293       e = 1u << log_e;
   1294       if (log_e == 6)
   1295 	{
   1296 	  is64 = 1;
   1297 	  mask = 0xffffffffffffffffull;
   1298 	  s_mask = 0;
   1299 	}
   1300       else
   1301 	{
   1302 	  is64 = 0;
   1303 	  mask = (1ull << e) - 1;
   1304 	  /* log_e  s_mask
   1305 	     1     ((1 << 4) - 1) << 2 = 111100
   1306 	     2     ((1 << 3) - 1) << 3 = 111000
   1307 	     3     ((1 << 2) - 1) << 4 = 110000
   1308 	     4     ((1 << 1) - 1) << 5 = 100000
   1309 	     5     ((1 << 0) - 1) << 6 = 000000  */
   1310 	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
   1311 	}
   1312       for (s = 0; s < e - 1; s++)
   1313 	for (r = 0; r < e; r++)
   1314 	  {
   1315 	    /* s+1 consecutive bits to 1 (s < 63) */
   1316 	    imm = (1ull << (s + 1)) - 1;
   1317 	    /* rotate right by r */
   1318 	    if (r != 0)
   1319 	      imm = (imm >> r) | ((imm << (e - r)) & mask);
   1320 	    /* replicate the constant depending on SIMD size */
   1321 	    switch (log_e)
   1322 	      {
   1323 	      case 1: imm = (imm <<  2) | imm;
   1324 		/* Fall through.  */
   1325 	      case 2: imm = (imm <<  4) | imm;
   1326 		/* Fall through.  */
   1327 	      case 3: imm = (imm <<  8) | imm;
   1328 		/* Fall through.  */
   1329 	      case 4: imm = (imm << 16) | imm;
   1330 		/* Fall through.  */
   1331 	      case 5: imm = (imm << 32) | imm;
   1332 		/* Fall through.  */
   1333 	      case 6: break;
   1334 	      default: abort ();
   1335 	      }
   1336 	    simd_immediates[nb_imms].imm = imm;
   1337 	    simd_immediates[nb_imms].encoding =
   1338 	      encode_immediate_bitfield(is64, s | s_mask, r);
   1339 	    nb_imms++;
   1340 	  }
   1341     }
   1342   assert (nb_imms == TOTAL_IMM_NB);
   1343   qsort(simd_immediates, nb_imms,
   1344 	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1345 }
   1346 
   1347 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
   1348    be accepted by logical (immediate) instructions
   1349    e.g. ORR <Xd|SP>, <Xn>, #<imm>.
   1350 
   1351    ESIZE is the number of bytes in the decoded immediate value.
   1352    If ENCODING is not NULL, on the return of TRUE, the standard encoding for
   1353    VALUE will be returned in *ENCODING.  */
   1354 
   1355 bool
   1356 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
   1357 {
   1358   simd_imm_encoding imm_enc;
   1359   const simd_imm_encoding *imm_encoding;
   1360   static bool initialized = false;
   1361   uint64_t upper;
   1362   int i;
   1363 
   1364   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
   1365 	       value, esize);
   1366 
   1367   if (!initialized)
   1368     {
   1369       build_immediate_table ();
   1370       initialized = true;
   1371     }
   1372 
   1373   /* Allow all zeros or all ones in top bits, so that
   1374      constant expressions like ~1 are permitted.  */
   1375   upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
   1376   if ((value & ~upper) != value && (value | upper) != value)
   1377     return false;
   1378 
   1379   /* Replicate to a full 64-bit value.  */
   1380   value &= ~upper;
   1381   for (i = esize * 8; i < 64; i *= 2)
   1382     value |= (value << i);
   1383 
   1384   imm_enc.imm = value;
   1385   imm_encoding = (const simd_imm_encoding *)
   1386     bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
   1387             sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1388   if (imm_encoding == NULL)
   1389     {
   1390       DEBUG_TRACE ("exit with false");
   1391       return false;
   1392     }
   1393   if (encoding != NULL)
   1394     *encoding = imm_encoding->encoding;
   1395   DEBUG_TRACE ("exit with true");
   1396   return true;
   1397 }
   1398 
   1399 /* If 64-bit immediate IMM is in the format of
   1400    "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
   1401    where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
   1402    of value "abcdefgh".  Otherwise return -1.  */
   1403 int
   1404 aarch64_shrink_expanded_imm8 (uint64_t imm)
   1405 {
   1406   int i, ret;
   1407   uint32_t byte;
   1408 
   1409   ret = 0;
   1410   for (i = 0; i < 8; i++)
   1411     {
   1412       byte = (imm >> (8 * i)) & 0xff;
   1413       if (byte == 0xff)
   1414 	ret |= 1 << i;
   1415       else if (byte != 0x00)
   1416 	return -1;
   1417     }
   1418   return ret;
   1419 }
   1420 
   1421 /* Utility inline functions for operand_general_constraint_met_p.  */
   1422 
   1423 static inline void
   1424 set_error (aarch64_operand_error *mismatch_detail,
   1425 	   enum aarch64_operand_error_kind kind, int idx,
   1426 	   const char* error)
   1427 {
   1428   if (mismatch_detail == NULL)
   1429     return;
   1430   mismatch_detail->kind = kind;
   1431   mismatch_detail->index = idx;
   1432   mismatch_detail->error = error;
   1433 }
   1434 
   1435 static inline void
   1436 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
   1437 		  const char* error)
   1438 {
   1439   if (mismatch_detail == NULL)
   1440     return;
   1441   set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
   1442 }
   1443 
   1444 static inline void
   1445 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
   1446 			 const char *prefix, int lower_bound, int upper_bound)
   1447 {
   1448   if (mismatch_detail == NULL)
   1449     return;
   1450   set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
   1451   mismatch_detail->data[0].s = prefix;
   1452   mismatch_detail->data[1].i = lower_bound;
   1453   mismatch_detail->data[2].i = upper_bound;
   1454 }
   1455 
   1456 static inline void
   1457 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1458 			int idx, int lower_bound, int upper_bound,
   1459 			const char* error)
   1460 {
   1461   if (mismatch_detail == NULL)
   1462     return;
   1463   set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
   1464   mismatch_detail->data[0].i = lower_bound;
   1465   mismatch_detail->data[1].i = upper_bound;
   1466 }
   1467 
   1468 static inline void
   1469 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1470 			    int idx, int lower_bound, int upper_bound)
   1471 {
   1472   if (mismatch_detail == NULL)
   1473     return;
   1474   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1475 			  _("immediate value"));
   1476 }
   1477 
   1478 static inline void
   1479 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1480 			       int idx, int lower_bound, int upper_bound)
   1481 {
   1482   if (mismatch_detail == NULL)
   1483     return;
   1484   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1485 			  _("immediate offset"));
   1486 }
   1487 
   1488 static inline void
   1489 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1490 			      int idx, int lower_bound, int upper_bound)
   1491 {
   1492   if (mismatch_detail == NULL)
   1493     return;
   1494   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1495 			  _("register number"));
   1496 }
   1497 
   1498 static inline void
   1499 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1500 				 int idx, int lower_bound, int upper_bound)
   1501 {
   1502   if (mismatch_detail == NULL)
   1503     return;
   1504   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1505 			  _("register element index"));
   1506 }
   1507 
   1508 static inline void
   1509 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1510 				   int idx, int lower_bound, int upper_bound)
   1511 {
   1512   if (mismatch_detail == NULL)
   1513     return;
   1514   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1515 			  _("shift amount"));
   1516 }
   1517 
   1518 /* Report that the MUL modifier in operand IDX should be in the range
   1519    [LOWER_BOUND, UPPER_BOUND].  */
   1520 static inline void
   1521 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1522 				   int idx, int lower_bound, int upper_bound)
   1523 {
   1524   if (mismatch_detail == NULL)
   1525     return;
   1526   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1527 			  _("multiplier"));
   1528 }
   1529 
   1530 static inline void
   1531 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
   1532 		     int alignment)
   1533 {
   1534   if (mismatch_detail == NULL)
   1535     return;
   1536   set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
   1537   mismatch_detail->data[0].i = alignment;
   1538 }
   1539 
   1540 static inline void
   1541 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
   1542 			   int expected_num)
   1543 {
   1544   if (mismatch_detail == NULL)
   1545     return;
   1546   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
   1547   mismatch_detail->data[0].i = 1 << expected_num;
   1548 }
   1549 
   1550 static inline void
   1551 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
   1552 			   int expected_num)
   1553 {
   1554   if (mismatch_detail == NULL)
   1555     return;
   1556   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
   1557   mismatch_detail->data[0].i = 1 << expected_num;
   1558 }
   1559 
   1560 static inline void
   1561 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
   1562 		     int idx, int expected)
   1563 {
   1564   if (mismatch_detail == NULL)
   1565     return;
   1566   set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
   1567   mismatch_detail->data[0].i = expected;
   1568 }
   1569 
   1570 static inline void
   1571 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
   1572 		 const char* error)
   1573 {
   1574   if (mismatch_detail == NULL)
   1575     return;
   1576   set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
   1577 }
   1578 
   1579 /* Check that indexed register operand OPND has a register in the range
   1580    [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
   1581    PREFIX is the register prefix, such as "z" for SVE vector registers.  */
   1582 
   1583 static bool
   1584 check_reglane (const aarch64_opnd_info *opnd,
   1585 	       aarch64_operand_error *mismatch_detail, int idx,
   1586 	       const char *prefix, int min_regno, int max_regno,
   1587 	       int min_index, int max_index)
   1588 {
   1589   if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
   1590     {
   1591       set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
   1592 			       max_regno);
   1593       return false;
   1594     }
   1595   if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
   1596     {
   1597       set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
   1598 				       max_index);
   1599       return false;
   1600     }
   1601   return true;
   1602 }
   1603 
   1604 /* Check that register list operand OPND has NUM_REGS registers and a
   1605    register stride of STRIDE.  */
   1606 
   1607 static bool
   1608 check_reglist (const aarch64_opnd_info *opnd,
   1609 	       aarch64_operand_error *mismatch_detail, int idx,
   1610 	       int num_regs, int stride)
   1611 {
   1612   if (opnd->reglist.num_regs != num_regs)
   1613     {
   1614       set_reg_list_length_error (mismatch_detail, idx, num_regs);
   1615       return false;
   1616     }
   1617   if (opnd->reglist.stride != stride)
   1618     {
   1619       set_reg_list_stride_error (mismatch_detail, idx, stride);
   1620       return false;
   1621     }
   1622   return true;
   1623 }
   1624 
   1625 typedef struct
   1626 {
   1627   int64_t min;
   1628   int64_t max;
   1629 } imm_range_t;
   1630 
   1631 static imm_range_t
   1632 imm_range_min_max (unsigned size, bool signed_rng)
   1633 {
   1634   assert (size < 63);
   1635   imm_range_t r;
   1636   if (signed_rng)
   1637     {
   1638       r.max = (((int64_t) 0x1) << (size - 1)) - 1;
   1639       r.min = - r.max - 1;
   1640     }
   1641   else
   1642     {
   1643       r.max = (((int64_t) 0x1) << size) - 1;
   1644       r.min = 0;
   1645     }
   1646   return r;
   1647 }
   1648 
   1649 /* Check that an immediate value is in the range provided by the
   1650    operand type.  */
   1651 static bool
   1652 check_immediate_out_of_range (int64_t imm,
   1653 			      enum aarch64_opnd type,
   1654 			      aarch64_operand_error *mismatch_detail,
   1655 			      int idx)
   1656 {
   1657   const aarch64_operand *operand = get_operand_from_code (type);
   1658   uint8_t size = get_operand_fields_width (operand);
   1659   bool unsigned_imm = operand_need_unsigned_offset (operand);
   1660   bool (*value_fit_field) (int64_t, unsigned)
   1661     = (unsigned_imm
   1662       ? value_fit_unsigned_field_p
   1663       : value_fit_signed_field_p);
   1664 
   1665   if (!value_fit_field (imm, size))
   1666     {
   1667       imm_range_t rng = imm_range_min_max (size, !unsigned_imm);
   1668       set_imm_out_of_range_error (mismatch_detail, idx, rng.min, rng.max);
   1669       return false;
   1670     }
   1671   return true;
   1672 }
   1673 
   1674 /* Check that indexed ZA operand OPND has:
   1675 
   1676    - a selection register in the range [MIN_WREG, MIN_WREG + 3]
   1677 
   1678    - RANGE_SIZE consecutive immediate offsets.
   1679 
   1680    - an initial immediate offset that is a multiple of RANGE_SIZE
   1681      in the range [0, MAX_VALUE * RANGE_SIZE]
   1682 
   1683    - a vector group size of GROUP_SIZE.
   1684 
   1685    - STATUS_VG for cases where VGx2 or VGx4 is mandatory.  */
   1686 static bool
   1687 check_za_access (const aarch64_opnd_info *opnd,
   1688 		 aarch64_operand_error *mismatch_detail, int idx,
   1689 		 int min_wreg, int max_value, unsigned int range_size,
   1690 		 int group_size, bool status_vg)
   1691 {
   1692   if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
   1693     {
   1694       if (min_wreg == 12)
   1695 	set_other_error (mismatch_detail, idx,
   1696 			 _("expected a selection register in the"
   1697 			   " range w12-w15"));
   1698       else if (min_wreg == 8)
   1699 	set_other_error (mismatch_detail, idx,
   1700 			 _("expected a selection register in the"
   1701 			   " range w8-w11"));
   1702       else
   1703 	abort ();
   1704       return false;
   1705     }
   1706 
   1707   int max_index = max_value * range_size;
   1708   if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
   1709     {
   1710       set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
   1711       return false;
   1712     }
   1713 
   1714   if ((opnd->indexed_za.index.imm % range_size) != 0)
   1715     {
   1716       assert (range_size == 2 || range_size == 4);
   1717       set_other_error (mismatch_detail, idx,
   1718 		       range_size == 2
   1719 		       ? _("starting offset is not a multiple of 2")
   1720 		       : _("starting offset is not a multiple of 4"));
   1721       return false;
   1722     }
   1723 
   1724   if (opnd->indexed_za.index.countm1 != range_size - 1)
   1725     {
   1726       if (range_size == 1)
   1727 	set_other_error (mismatch_detail, idx,
   1728 			 _("expected a single offset rather than"
   1729 			   " a range"));
   1730       else if (range_size == 2)
   1731 	set_other_error (mismatch_detail, idx,
   1732 			 _("expected a range of two offsets"));
   1733       else if (range_size == 4)
   1734 	set_other_error (mismatch_detail, idx,
   1735 			 _("expected a range of four offsets"));
   1736       else
   1737 	abort ();
   1738       return false;
   1739     }
   1740 
   1741   /* The vector group specifier is optional in assembly code.  */
   1742   if (opnd->indexed_za.group_size != group_size
   1743       && (status_vg || opnd->indexed_za.group_size != 0 ))
   1744     {
   1745       set_invalid_vg_size (mismatch_detail, idx, group_size);
   1746       return false;
   1747     }
   1748 
   1749   return true;
   1750 }
   1751 
   1752 /* Given a load/store operation, calculate the size of transferred data via a
   1753    cumulative sum of qualifier sizes preceding the address operand in the
   1754    OPNDS operand list argument.  */
   1755 int
   1756 calc_ldst_datasize (const aarch64_opnd_info *opnds)
   1757 {
   1758   unsigned num_bytes = 0; /* total number of bytes transferred.  */
   1759   enum aarch64_operand_class opnd_class;
   1760   enum aarch64_opnd type;
   1761 
   1762   for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
   1763     {
   1764       type = opnds[i].type;
   1765       opnd_class = aarch64_operands[type].op_class;
   1766       if (opnd_class == AARCH64_OPND_CLASS_ADDRESS)
   1767 	break;
   1768       num_bytes += aarch64_get_qualifier_esize (opnds[i].qualifier);
   1769     }
   1770   return num_bytes;
   1771 }
   1772 
   1773 
   1774 /* General constraint checking based on operand code.
   1775 
   1776    Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
   1777    as the IDXth operand of opcode OPCODE.  Otherwise return 0.
   1778 
   1779    This function has to be called after the qualifiers for all operands
   1780    have been resolved.
   1781 
   1782    Mismatching error message is returned in *MISMATCH_DETAIL upon request,
   1783    i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
   1784    of error message during the disassembling where error message is not
   1785    wanted.  We avoid the dynamic construction of strings of error messages
   1786    here (i.e. in libopcodes), as it is costly and complicated; instead, we
   1787    use a combination of error code, static string and some integer data to
   1788    represent an error.  */
   1789 
   1790 static bool
   1791 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
   1792 				  enum aarch64_opnd type,
   1793 				  const aarch64_opcode *opcode,
   1794 				  aarch64_operand_error *mismatch_detail)
   1795 {
   1796   unsigned num, modifiers, shift;
   1797   unsigned char size;
   1798   int64_t imm, min_value, max_value;
   1799   uint64_t uvalue, mask;
   1800   const aarch64_opnd_info *opnd = opnds + idx;
   1801   aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
   1802   int i;
   1803 
   1804   assert (opcode->operands[idx] == opnd->type && opnd->type == type);
   1805 
   1806   switch (aarch64_operands[type].op_class)
   1807     {
   1808     case AARCH64_OPND_CLASS_INT_REG:
   1809       /* Check for pair of xzr registers.  */
   1810       if (type == AARCH64_OPND_PAIRREG_OR_XZR
   1811 	  && opnds[idx - 1].reg.regno == 0x1f)
   1812 	{
   1813 	  if (opnds[idx].reg.regno != 0x1f)
   1814 	    {
   1815 	      set_syntax_error (mismatch_detail, idx - 1,
   1816 				_("second reg in pair should be xzr if first is"
   1817 				  " xzr"));
   1818 	      return false;
   1819 	    }
   1820 	}
   1821       /* Check pair reg constraints for instructions taking a pair of
   1822 	 consecutively-numbered general-purpose registers.  */
   1823       else if (type == AARCH64_OPND_PAIRREG
   1824 	       || type == AARCH64_OPND_PAIRREG_OR_XZR)
   1825 	{
   1826 	  assert (idx == 1 || idx == 2 || idx == 3 || idx == 5);
   1827 	  if (opnds[idx - 1].reg.regno % 2 != 0)
   1828 	    {
   1829 	      set_syntax_error (mismatch_detail, idx - 1,
   1830 				_("reg pair must start from even reg"));
   1831 	      return false;
   1832 	    }
   1833 	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
   1834 	    {
   1835 	      set_syntax_error (mismatch_detail, idx,
   1836 				_("reg pair must be contiguous"));
   1837 	      return false;
   1838 	    }
   1839 	  break;
   1840 	}
   1841 
   1842       /* <Xt> may be optional in some IC and TLBI instructions.  */
   1843       if (type == AARCH64_OPND_Rt_SYS)
   1844 	{
   1845 	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
   1846 			       == AARCH64_OPND_CLASS_SYSTEM));
   1847 	  if (opnds[1].present
   1848 	      && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
   1849 	    {
   1850 	      set_other_error (mismatch_detail, idx, _("extraneous register"));
   1851 	      return false;
   1852 	    }
   1853 	  if (!opnds[1].present
   1854 	      && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
   1855 	    {
   1856 	      set_other_error (mismatch_detail, idx, _("missing register"));
   1857 	      return false;
   1858 	    }
   1859 	}
   1860       switch (qualifier)
   1861 	{
   1862 	case AARCH64_OPND_QLF_WSP:
   1863 	case AARCH64_OPND_QLF_SP:
   1864 	  if (!aarch64_stack_pointer_p (opnd))
   1865 	    {
   1866 	      set_other_error (mismatch_detail, idx,
   1867 		       _("stack pointer register expected"));
   1868 	      return false;
   1869 	    }
   1870 	  break;
   1871 	default:
   1872 	  break;
   1873 	}
   1874       break;
   1875 
   1876     case AARCH64_OPND_CLASS_SVE_REG:
   1877       switch (type)
   1878 	{
   1879 	case AARCH64_OPND_SVE_Zm3_INDEX:
   1880 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
   1881 	case AARCH64_OPND_SVE_Zm3_19_INDEX:
   1882 	case AARCH64_OPND_SVE_Zm3_11_INDEX:
   1883 	case AARCH64_OPND_SVE_Zm3_10_INDEX:
   1884 	case AARCH64_OPND_SVE_Zm4_11_INDEX:
   1885 	case AARCH64_OPND_SVE_Zm4_INDEX:
   1886 	  size = get_operand_fields_width (get_operand_from_code (type));
   1887 	  shift = get_operand_specific_data (&aarch64_operands[type]);
   1888 	  if (!check_reglane (opnd, mismatch_detail, idx,
   1889 			      "z", 0, (1 << shift) - 1,
   1890 			      0, (1u << (size - shift)) - 1))
   1891 	    return false;
   1892 	  break;
   1893 
   1894 	case AARCH64_OPND_SVE_Zm1_23_INDEX:
   1895 	  size = get_operand_fields_width (get_operand_from_code (type));
   1896 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 1))
   1897 	    return 0;
   1898 	  break;
   1899 
   1900 	case AARCH64_OPND_SVE_Zm2_22_INDEX:
   1901 	  size = get_operand_fields_width (get_operand_from_code (type));
   1902 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 3))
   1903 	    return 0;
   1904 	  break;
   1905 
   1906 	case AARCH64_OPND_SVE_Zn_INDEX:
   1907 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1908 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
   1909 			      0, 64 / size - 1))
   1910 	    return false;
   1911 	  break;
   1912 
   1913 	case AARCH64_OPND_SVE_Zn_5_INDEX:
   1914 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1915 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
   1916 			      0, 16 / size - 1))
   1917 	    return false;
   1918 	  break;
   1919 
   1920 	case AARCH64_OPND_SME_PNn3_INDEX1:
   1921 	case AARCH64_OPND_SME_PNn3_INDEX2:
   1922 	  size = get_operand_field_width (get_operand_from_code (type), 1);
   1923 	  if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
   1924 			      0, (1 << size) - 1))
   1925 	    return false;
   1926 	  break;
   1927 
   1928 	case AARCH64_OPND_SVE_Zm3_12_INDEX:
   1929 	case AARCH64_OPND_SME_Zn_INDEX1_16:
   1930 	case AARCH64_OPND_SME_Zn_INDEX2_15:
   1931 	case AARCH64_OPND_SME_Zn_INDEX2_16:
   1932 	case AARCH64_OPND_SME_Zn_INDEX3_14:
   1933 	case AARCH64_OPND_SME_Zn_INDEX3_15:
   1934 	case AARCH64_OPND_SME_Zn_INDEX4_14:
   1935 	case AARCH64_OPND_SVE_Zn0_INDEX:
   1936 	case AARCH64_OPND_SVE_Zn1_17_INDEX:
   1937 	case AARCH64_OPND_SVE_Zn2_18_INDEX:
   1938 	case AARCH64_OPND_SVE_Zn3_22_INDEX:
   1939 	case AARCH64_OPND_SVE_Zd0_INDEX:
   1940 	case AARCH64_OPND_SVE_Zd1_17_INDEX:
   1941 	case AARCH64_OPND_SVE_Zd2_18_INDEX:
   1942 	case AARCH64_OPND_SVE_Zd3_22_INDEX:
   1943 	  size = get_operand_fields_width (get_operand_from_code (type)) - 5;
   1944 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
   1945 			      0, (1 << size) - 1))
   1946 	    return false;
   1947 	  break;
   1948 
   1949 	case AARCH64_OPND_SME_Zm_INDEX1:
   1950 	case AARCH64_OPND_SME_Zm_INDEX2:
   1951 	case AARCH64_OPND_SME_Zm_INDEX2_3:
   1952 	case AARCH64_OPND_SME_Zm_INDEX3_1:
   1953 	case AARCH64_OPND_SME_Zm_INDEX3_2:
   1954 	case AARCH64_OPND_SME_Zm_INDEX3_3:
   1955 	case AARCH64_OPND_SME_Zm_INDEX3_10:
   1956 	case AARCH64_OPND_SME_Zm_INDEX4_1:
   1957 	case AARCH64_OPND_SME_Zm_INDEX4_2:
   1958 	case AARCH64_OPND_SME_Zm_INDEX4_3:
   1959 	case AARCH64_OPND_SME_Zm_INDEX4_10:
   1960 	  size = get_operand_fields_width (get_operand_from_code (type)) - 4;
   1961 	  if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
   1962 			      0, (1 << size) - 1))
   1963 	    return false;
   1964 	  break;
   1965 
   1966 	case AARCH64_OPND_SME_Zm:
   1967 	  if (opnd->reg.regno > 15)
   1968 	    {
   1969 	      set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
   1970 	      return false;
   1971 	    }
   1972 	  break;
   1973 
   1974 	case AARCH64_OPND_SME_PnT_Wm_imm:
   1975 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1976 	  max_value = 16 / size - 1;
   1977 	  if (!check_za_access (opnd, mismatch_detail, idx,
   1978 				12, max_value, 1, 0, get_opcode_dependent_value (opcode)))
   1979 	    return false;
   1980 	  break;
   1981 
   1982 	default:
   1983 	  break;
   1984 	}
   1985       break;
   1986 
   1987     case AARCH64_OPND_CLASS_SVE_REGLIST:
   1988       switch (type)
   1989 	{
   1990 	case AARCH64_OPND_SME_Pdx2:
   1991 	case AARCH64_OPND_SME_Zdnx2:
   1992 	case AARCH64_OPND_SME_Zdnx4:
   1993 	case AARCH64_OPND_SME_Zmx2:
   1994 	case AARCH64_OPND_SME_Zmx4:
   1995 	case AARCH64_OPND_SME_Znx2:
   1996 	case AARCH64_OPND_SME_Znx2_BIT_INDEX:
   1997 	case AARCH64_OPND_SME_Znx4:
   1998 	  num = get_operand_specific_data (&aarch64_operands[type]);
   1999 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2000 	    return false;
   2001 	  if ((opnd->reglist.first_regno % num) != 0)
   2002 	    {
   2003 	      set_other_error (mismatch_detail, idx,
   2004 			       _("start register out of range"));
   2005 	      return false;
   2006 	    }
   2007 	  break;
   2008 
   2009 	case AARCH64_OPND_SME_Zdnx4_STRIDED:
   2010 	case AARCH64_OPND_SME_Ztx2_STRIDED:
   2011 	case AARCH64_OPND_SME_Ztx4_STRIDED:
   2012 	  /* 2-register lists have a stride of 8 and 4-register lists
   2013 	     have a stride of 4.  */
   2014 	  num = get_operand_specific_data (&aarch64_operands[type]);
   2015 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
   2016 	    return false;
   2017 	  num = 16 | (opnd->reglist.stride - 1);
   2018 	  if ((opnd->reglist.first_regno & ~num) != 0)
   2019 	    {
   2020 	      set_other_error (mismatch_detail, idx,
   2021 			       _("start register out of range"));
   2022 	      return false;
   2023 	    }
   2024 	  break;
   2025 
   2026 	case AARCH64_OPND_SME_PdxN:
   2027 	case AARCH64_OPND_SVE_ZnxN:
   2028 	case AARCH64_OPND_SVE_ZtxN:
   2029 	  num = get_opcode_dependent_value (opcode);
   2030 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2031 	    return false;
   2032 	  break;
   2033 
   2034 	default:
   2035 	  abort ();
   2036 	}
   2037       break;
   2038 
   2039     case AARCH64_OPND_CLASS_ZA_ACCESS:
   2040       switch (type)
   2041 	{
   2042 	case AARCH64_OPND_SME_ZA_HV_idx_src:
   2043 	case AARCH64_OPND_SME_ZA_HV_idx_dest:
   2044 	case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
   2045 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2046 	  max_value = 16 / size - 1;
   2047 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
   2048 				get_opcode_dependent_value (opcode),
   2049 				get_opcode_dependent_vg_status (opcode)))
   2050 	    return false;
   2051 	  break;
   2052 
   2053 	case AARCH64_OPND_SME_ZA_array_off4:
   2054 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
   2055 				get_opcode_dependent_value (opcode),
   2056 				get_opcode_dependent_vg_status (opcode)))
   2057 	    return false;
   2058 	  break;
   2059 
   2060 	case AARCH64_OPND_SME_ZA_array_off3_0:
   2061 	case AARCH64_OPND_SME_ZA_array_off3_5:
   2062 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
   2063 				get_opcode_dependent_value (opcode),
   2064 				get_opcode_dependent_vg_status (opcode)))
   2065 	    return false;
   2066 	  break;
   2067 
   2068 	case AARCH64_OPND_SME_ZA_array_off1x4:
   2069 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
   2070 				get_opcode_dependent_value (opcode),
   2071 				get_opcode_dependent_vg_status (opcode)))
   2072 	    return false;
   2073 	  break;
   2074 
   2075 	case AARCH64_OPND_SME_ZA_array_off2x2:
   2076 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
   2077 				get_opcode_dependent_value (opcode),
   2078 				get_opcode_dependent_vg_status (opcode)))
   2079 	    return false;
   2080 	  break;
   2081 
   2082 	case AARCH64_OPND_SME_ZA_array_off2x4:
   2083 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
   2084 				get_opcode_dependent_value (opcode),
   2085 				get_opcode_dependent_vg_status (opcode)))
   2086 	    return false;
   2087 	  break;
   2088 
   2089 	case AARCH64_OPND_SME_ZA_array_off3x2:
   2090 	  if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
   2091 				get_opcode_dependent_value (opcode),
   2092 				get_opcode_dependent_vg_status (opcode)))
   2093 	    return false;
   2094 	  break;
   2095 
   2096 	case AARCH64_OPND_SME_ZA_array_vrsb_1:
   2097 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 7, 2,
   2098 				get_opcode_dependent_value (opcode),
   2099 				get_opcode_dependent_vg_status (opcode)))
   2100 	    return false;
   2101 	  break;
   2102 
   2103 	case AARCH64_OPND_SME_ZA_array_vrsh_1:
   2104 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 2,
   2105 				get_opcode_dependent_value (opcode),
   2106 				get_opcode_dependent_vg_status (opcode)))
   2107 	    return false;
   2108 	  break;
   2109 
   2110 	case AARCH64_OPND_SME_ZA_array_vrss_1:
   2111 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 2,
   2112 				get_opcode_dependent_value (opcode),
   2113 				get_opcode_dependent_vg_status (opcode)))
   2114 	    return false;
   2115 	  break;
   2116 
   2117 	case AARCH64_OPND_SME_ZA_array_vrsd_1:
   2118 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 2,
   2119 				get_opcode_dependent_value (opcode),
   2120 				get_opcode_dependent_vg_status (opcode)))
   2121 	    return false;
   2122 	  break;
   2123 
   2124 	case AARCH64_OPND_SME_ZA_array_vrsb_2:
   2125 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 4,
   2126 				get_opcode_dependent_value (opcode),
   2127 				get_opcode_dependent_vg_status (opcode)))
   2128 	    return false;
   2129 	  break;
   2130 
   2131 	case AARCH64_OPND_SME_ZA_array_vrsh_2:
   2132 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 4,
   2133 				get_opcode_dependent_value (opcode),
   2134 				get_opcode_dependent_vg_status (opcode)))
   2135 	    return false;
   2136 	  break;
   2137 
   2138 	case AARCH64_OPND_SME_ZA_ARRAY4:
   2139 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
   2140 				get_opcode_dependent_value (opcode),
   2141 				get_opcode_dependent_vg_status (opcode)))
   2142 	    return false;
   2143 	  break;
   2144 
   2145 	case AARCH64_OPND_SME_ZA_array_vrss_2:
   2146 	case AARCH64_OPND_SME_ZA_array_vrsd_2:
   2147 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 4,
   2148 				get_opcode_dependent_value (opcode),
   2149 				get_opcode_dependent_vg_status (opcode)))
   2150 	    return false;
   2151 	  break;
   2152 
   2153 	case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
   2154 	case AARCH64_OPND_SME_ZA_HV_idx_destxN:
   2155 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2156 	  num = get_opcode_dependent_value (opcode);
   2157 	  max_value = 16 / num / size;
   2158 	  if (max_value > 0)
   2159 	    max_value -= 1;
   2160 	  if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, num,
   2161 				0, get_opcode_dependent_value (opcode)))
   2162 	    return false;
   2163 	  break;
   2164 
   2165 	default:
   2166 	  abort ();
   2167 	}
   2168       break;
   2169 
   2170     case AARCH64_OPND_CLASS_PRED_REG:
   2171       switch (type)
   2172 	{
   2173 	case AARCH64_OPND_SME_PNd3:
   2174 	case AARCH64_OPND_SME_PNg3:
   2175 	  if (opnd->reg.regno < 8)
   2176 	    {
   2177 	      set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
   2178 	      return false;
   2179 	    }
   2180 	  break;
   2181 
   2182 	default:
   2183 	  if (opnd->reg.regno >= 8
   2184 	      && get_operand_fields_width (get_operand_from_code (type)) == 3)
   2185 	    {
   2186 	      set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
   2187 	      return false;
   2188 	    }
   2189 	  break;
   2190 	}
   2191       break;
   2192 
   2193     case AARCH64_OPND_CLASS_COND:
   2194       if (type == AARCH64_OPND_COND1
   2195 	  && (opnds[idx].cond->value & 0xe) == 0xe)
   2196 	{
   2197 	  /* Not allow AL or NV.  */
   2198 	  set_syntax_error (mismatch_detail, idx, NULL);
   2199 	}
   2200       break;
   2201 
   2202     case AARCH64_OPND_CLASS_ADDRESS:
   2203       /* Check writeback.  */
   2204       switch (opcode->iclass)
   2205 	{
   2206 	case ldst_pos:
   2207 	case ldst_unscaled:
   2208 	case ldstnapair_offs:
   2209 	case ldstpair_off:
   2210 	case ldst_unpriv:
   2211 	  if (opnd->addr.writeback == 1)
   2212 	    {
   2213 	      set_syntax_error (mismatch_detail, idx,
   2214 				_("unexpected address writeback"));
   2215 	      return false;
   2216 	    }
   2217 	  break;
   2218 	case ldst_imm10:
   2219 	  if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
   2220 	    {
   2221 	      set_syntax_error (mismatch_detail, idx,
   2222 				_("unexpected address writeback"));
   2223 	      return false;
   2224 	    }
   2225 	  break;
   2226 	case ldst_imm9:
   2227 	case ldstpair_indexed:
   2228 	case asisdlsep:
   2229 	case asisdlsop:
   2230 	  if (opnd->addr.writeback == 0)
   2231 	    {
   2232 	      set_syntax_error (mismatch_detail, idx,
   2233 				_("address writeback expected"));
   2234 	      return false;
   2235 	    }
   2236 	  break;
   2237 	case rcpc3:
   2238 	  if (opnd->addr.writeback)
   2239 	    if ((type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB
   2240 		 && !opnd->addr.preind)
   2241 		|| (type == AARCH64_OPND_RCPC3_ADDR_POSTIND
   2242 		    && !opnd->addr.postind))
   2243 	      {
   2244 		set_syntax_error (mismatch_detail, idx,
   2245 				  _("unexpected address writeback"));
   2246 		return false;
   2247 	      }
   2248 
   2249 	  break;
   2250 	default:
   2251 	  assert (opnd->addr.writeback == 0);
   2252 	  break;
   2253 	}
   2254       switch (type)
   2255 	{
   2256 	case AARCH64_OPND_ADDR_SIMM7:
   2257 	  /* Scaled signed 7 bits immediate offset.  */
   2258 	  /* Get the size of the data element that is accessed, which may be
   2259 	     different from that of the source register size,
   2260 	     e.g. in strb/ldrb.  */
   2261 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2262 	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
   2263 	    {
   2264 	      set_offset_out_of_range_error (mismatch_detail, idx,
   2265 					     -64 * size, 63 * size);
   2266 	      return false;
   2267 	    }
   2268 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   2269 	    {
   2270 	      set_unaligned_error (mismatch_detail, idx, size);
   2271 	      return false;
   2272 	    }
   2273 	  break;
   2274 	case AARCH64_OPND_ADDR_OFFSET:
   2275 	case AARCH64_OPND_ADDR_SIMM9:
   2276 	  /* Unscaled signed 9 bits immediate offset.  */
   2277 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
   2278 	    {
   2279 	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
   2280 	      return false;
   2281 	    }
   2282 	  break;
   2283 
   2284 	case AARCH64_OPND_ADDR_SIMM9_2:
   2285 	  /* Unscaled signed 9 bits immediate offset, which has to be negative
   2286 	     or unaligned.  */
   2287 	  size = aarch64_get_qualifier_esize (qualifier);
   2288 	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
   2289 	       && !value_aligned_p (opnd->addr.offset.imm, size))
   2290 	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
   2291 	    return true;
   2292 	  set_other_error (mismatch_detail, idx,
   2293 			   _("negative or unaligned offset expected"));
   2294 	  return false;
   2295 
   2296 	case AARCH64_OPND_ADDR_SIMM10:
   2297 	  /* Scaled signed 10 bits immediate offset.  */
   2298 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
   2299 	    {
   2300 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
   2301 	      return false;
   2302 	    }
   2303 	  if (!value_aligned_p (opnd->addr.offset.imm, 8))
   2304 	    {
   2305 	      set_unaligned_error (mismatch_detail, idx, 8);
   2306 	      return false;
   2307 	    }
   2308 	  break;
   2309 
   2310 	case AARCH64_OPND_ADDR_SIMM11:
   2311 	  /* Signed 11 bits immediate offset (multiple of 16).  */
   2312 	  if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
   2313 	    {
   2314 	      set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
   2315 	      return false;
   2316 	    }
   2317 
   2318 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
   2319 	    {
   2320 	      set_unaligned_error (mismatch_detail, idx, 16);
   2321 	      return false;
   2322 	    }
   2323 	  break;
   2324 
   2325 	case AARCH64_OPND_ADDR_SIMM13:
   2326 	  /* Signed 13 bits immediate offset (multiple of 16).  */
   2327 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
   2328 	    {
   2329 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
   2330 	      return false;
   2331 	    }
   2332 
   2333 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
   2334 	    {
   2335 	      set_unaligned_error (mismatch_detail, idx, 16);
   2336 	      return false;
   2337 	    }
   2338 	  break;
   2339 
   2340 	case AARCH64_OPND_SIMD_ADDR_POST:
   2341 	  /* AdvSIMD load/store multiple structures, post-index.  */
   2342 	  assert (idx == 1);
   2343 	  if (opnd->addr.offset.is_reg)
   2344 	    {
   2345 	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
   2346 		return true;
   2347 	      else
   2348 		{
   2349 		  set_other_error (mismatch_detail, idx,
   2350 				   _("invalid register offset"));
   2351 		  return false;
   2352 		}
   2353 	    }
   2354 	  else
   2355 	    {
   2356 	      const aarch64_opnd_info *prev = &opnds[idx-1];
   2357 	      unsigned num_bytes; /* total number of bytes transferred.  */
   2358 	      /* The opcode dependent area stores the number of elements in
   2359 		 each structure to be loaded/stored.  */
   2360 	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
   2361 	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
   2362 		/* Special handling of loading single structure to all lane.  */
   2363 		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
   2364 		  * aarch64_get_qualifier_esize (prev->qualifier);
   2365 	      else
   2366 		num_bytes = prev->reglist.num_regs
   2367 		  * aarch64_get_qualifier_esize (prev->qualifier)
   2368 		  * aarch64_get_qualifier_nelem (prev->qualifier);
   2369 	      if ((int) num_bytes != opnd->addr.offset.imm)
   2370 		{
   2371 		  set_other_error (mismatch_detail, idx,
   2372 				   _("invalid post-increment amount"));
   2373 		  return false;
   2374 		}
   2375 	    }
   2376 	  break;
   2377 
   2378 	case AARCH64_OPND_ADDR_REGOFF:
   2379 	  /* Get the size of the data element that is accessed, which may be
   2380 	     different from that of the source register size,
   2381 	     e.g. in strb/ldrb.  */
   2382 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   2383 	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
   2384 	  if (opnd->shifter.amount != 0
   2385 	      && opnd->shifter.amount != (int)get_logsz (size))
   2386 	    {
   2387 	      set_other_error (mismatch_detail, idx,
   2388 			       _("invalid shift amount"));
   2389 	      return false;
   2390 	    }
   2391 	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
   2392 	     operators.  */
   2393 	  switch (opnd->shifter.kind)
   2394 	    {
   2395 	    case AARCH64_MOD_UXTW:
   2396 	    case AARCH64_MOD_LSL:
   2397 	    case AARCH64_MOD_SXTW:
   2398 	    case AARCH64_MOD_SXTX: break;
   2399 	    default:
   2400 	      set_other_error (mismatch_detail, idx,
   2401 			       _("invalid extend/shift operator"));
   2402 	      return false;
   2403 	    }
   2404 	  break;
   2405 
   2406 	case AARCH64_OPND_ADDR_UIMM12:
   2407 	  imm = opnd->addr.offset.imm;
   2408 	  /* Get the size of the data element that is accessed, which may be
   2409 	     different from that of the source register size,
   2410 	     e.g. in strb/ldrb.  */
   2411 	  size = aarch64_get_qualifier_esize (qualifier);
   2412 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
   2413 	    {
   2414 	      set_offset_out_of_range_error (mismatch_detail, idx,
   2415 					     0, 4095 * size);
   2416 	      return false;
   2417 	    }
   2418 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   2419 	    {
   2420 	      set_unaligned_error (mismatch_detail, idx, size);
   2421 	      return false;
   2422 	    }
   2423 	  break;
   2424 
   2425 	case AARCH64_OPND_ADDR_PCREL14:
   2426 	case AARCH64_OPND_ADDR_PCREL19:
   2427 	case AARCH64_OPND_ADDR_PCREL21:
   2428 	case AARCH64_OPND_ADDR_PCREL26:
   2429 	  {
   2430 	    imm = opnd->imm.value;
   2431 	    if (operand_need_shift_by_two (get_operand_from_code (type)))
   2432 	      {
   2433 		/* The offset value in a PC-relative branch instruction is alway
   2434 		   4-byte aligned and is encoded without the lowest 2 bits.  */
   2435 		if (!value_aligned_p (imm, 4))
   2436 		  {
   2437 		    set_unaligned_error (mismatch_detail, idx, 4);
   2438 		    return false;
   2439 		  }
   2440 		/* Right shift by 2 so that we can carry out the following check
   2441 		   canonically.  */
   2442 		imm >>= 2;
   2443 	      }
   2444 
   2445 	    if (!check_immediate_out_of_range (imm, type, mismatch_detail, idx))
   2446 	      return false;
   2447 	  }
   2448 	  break;
   2449 
   2450 	case AARCH64_OPND_SME_ADDR_RI_U4xVL:
   2451 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
   2452 	    {
   2453 	      set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
   2454 	      return false;
   2455 	    }
   2456 	  break;
   2457 
   2458 	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
   2459 	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
   2460 	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
   2461 	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
   2462 	  min_value = -8;
   2463 	  max_value = 7;
   2464 	sve_imm_offset_vl:
   2465 	  assert (!opnd->addr.offset.is_reg);
   2466 	  assert (opnd->addr.preind);
   2467 	  num = 1 + get_operand_specific_data (&aarch64_operands[type]);
   2468 	  min_value *= num;
   2469 	  max_value *= num;
   2470 	  if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
   2471 	      || (opnd->shifter.operator_present
   2472 		  && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
   2473 	    {
   2474 	      set_other_error (mismatch_detail, idx,
   2475 			       _("invalid addressing mode"));
   2476 	      return false;
   2477 	    }
   2478 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
   2479 	    {
   2480 	      set_offset_out_of_range_error (mismatch_detail, idx,
   2481 					     min_value, max_value);
   2482 	      return false;
   2483 	    }
   2484 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
   2485 	    {
   2486 	      set_unaligned_error (mismatch_detail, idx, num);
   2487 	      return false;
   2488 	    }
   2489 	  break;
   2490 
   2491 	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
   2492 	  min_value = -32;
   2493 	  max_value = 31;
   2494 	  goto sve_imm_offset_vl;
   2495 
   2496 	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
   2497 	  min_value = -256;
   2498 	  max_value = 255;
   2499 	  goto sve_imm_offset_vl;
   2500 
   2501 	case AARCH64_OPND_SVE_ADDR_RI_U6:
   2502 	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
   2503 	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
   2504 	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
   2505 	  min_value = 0;
   2506 	  max_value = 63;
   2507 	sve_imm_offset:
   2508 	  assert (!opnd->addr.offset.is_reg);
   2509 	  assert (opnd->addr.preind);
   2510 	  num = 1 << get_operand_specific_data (&aarch64_operands[type]);
   2511 	  min_value *= num;
   2512 	  max_value *= num;
   2513 	  if (opnd->shifter.operator_present
   2514 	      || opnd->shifter.amount_present)
   2515 	    {
   2516 	      set_other_error (mismatch_detail, idx,
   2517 			       _("invalid addressing mode"));
   2518 	      return false;
   2519 	    }
   2520 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
   2521 	    {
   2522 	      set_offset_out_of_range_error (mismatch_detail, idx,
   2523 					     min_value, max_value);
   2524 	      return false;
   2525 	    }
   2526 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
   2527 	    {
   2528 	      set_unaligned_error (mismatch_detail, idx, num);
   2529 	      return false;
   2530 	    }
   2531 	  break;
   2532 
   2533 	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
   2534 	case AARCH64_OPND_SVE_ADDR_RI_S4x32:
   2535 	  min_value = -8;
   2536 	  max_value = 7;
   2537 	  goto sve_imm_offset;
   2538 
   2539 	case AARCH64_OPND_SVE_ADDR_ZX:
   2540 	  /* Everything is already ensured by parse_operands or
   2541 	     aarch64_ext_sve_addr_rr_lsl (because this is a very specific
   2542 	     argument type).  */
   2543 	  assert (opnd->addr.offset.is_reg);
   2544 	  assert (opnd->addr.preind);
   2545 	  assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
   2546 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
   2547 	  assert (opnd->shifter.operator_present == 0);
   2548 	  break;
   2549 
   2550 	case AARCH64_OPND_SVE_ADDR_R:
   2551 	case AARCH64_OPND_SVE_ADDR_RR:
   2552 	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
   2553 	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
   2554 	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
   2555 	case AARCH64_OPND_SVE_ADDR_RR_LSL4:
   2556 	case AARCH64_OPND_SVE_ADDR_RX:
   2557 	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
   2558 	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
   2559 	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
   2560 	case AARCH64_OPND_SVE_ADDR_RX_LSL4:
   2561 	case AARCH64_OPND_SVE_ADDR_RZ:
   2562 	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
   2563 	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
   2564 	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
   2565 	  modifiers = 1 << AARCH64_MOD_LSL;
   2566 	sve_rr_operand:
   2567 	  assert (opnd->addr.offset.is_reg);
   2568 	  assert (opnd->addr.preind);
   2569 	  if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
   2570 	      && opnd->addr.offset.regno == 31)
   2571 	    {
   2572 	      set_other_error (mismatch_detail, idx,
   2573 			       _("index register xzr is not allowed"));
   2574 	      return false;
   2575 	    }
   2576 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
   2577 	      || (opnd->shifter.amount
   2578 		  != get_operand_specific_data (&aarch64_operands[type])))
   2579 	    {
   2580 	      set_other_error (mismatch_detail, idx,
   2581 			       _("invalid addressing mode"));
   2582 	      return false;
   2583 	    }
   2584 	  break;
   2585 
   2586 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
   2587 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
   2588 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
   2589 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
   2590 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
   2591 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
   2592 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
   2593 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
   2594 	  modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
   2595 	  goto sve_rr_operand;
   2596 
   2597 	case AARCH64_OPND_SVE_ADDR_ZI_U5:
   2598 	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
   2599 	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
   2600 	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
   2601 	  min_value = 0;
   2602 	  max_value = 31;
   2603 	  goto sve_imm_offset;
   2604 
   2605 	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
   2606 	  modifiers = 1 << AARCH64_MOD_LSL;
   2607 	sve_zz_operand:
   2608 	  assert (opnd->addr.offset.is_reg);
   2609 	  assert (opnd->addr.preind);
   2610 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
   2611 	      || opnd->shifter.amount < 0
   2612 	      || opnd->shifter.amount > 3)
   2613 	    {
   2614 	      set_other_error (mismatch_detail, idx,
   2615 			       _("invalid addressing mode"));
   2616 	      return false;
   2617 	    }
   2618 	  break;
   2619 
   2620 	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
   2621 	  modifiers = (1 << AARCH64_MOD_SXTW);
   2622 	  goto sve_zz_operand;
   2623 
   2624 	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
   2625 	  modifiers = 1 << AARCH64_MOD_UXTW;
   2626 	  goto sve_zz_operand;
   2627 
   2628 	case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
   2629 	case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
   2630 	case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
   2631 	case AARCH64_OPND_RCPC3_ADDR_POSTIND:
   2632 	  {
   2633 	    int num_bytes = calc_ldst_datasize (opnds);
   2634 	    int abs_offset = (type == AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
   2635 			      || type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB)
   2636 	      ? opnd->addr.offset.imm * -1
   2637 	      : opnd->addr.offset.imm;
   2638 	    if ((int) num_bytes != abs_offset
   2639 		&& opnd->addr.offset.imm != 0)
   2640 	      {
   2641 		set_other_error (mismatch_detail, idx,
   2642 				 _("invalid increment amount"));
   2643 		return false;
   2644 	      }
   2645 	  }
   2646 	  break;
   2647 
   2648 	case AARCH64_OPND_RCPC3_ADDR_OFFSET:
   2649 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
   2650 	    {
   2651 	      set_imm_out_of_range_error (mismatch_detail, idx, -256, 255);
   2652 	      return false;
   2653 	    }
   2654 
   2655 	default:
   2656 	  break;
   2657 	}
   2658       break;
   2659 
   2660     case AARCH64_OPND_CLASS_SIMD_REGLIST:
   2661       if (type == AARCH64_OPND_LEt)
   2662 	{
   2663 	  /* Get the upper bound for the element index.  */
   2664 	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
   2665 	  if (!value_in_range_p (opnd->reglist.index, 0, num))
   2666 	    {
   2667 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   2668 	      return false;
   2669 	    }
   2670 	}
   2671       /* The opcode dependent area stores the number of elements in
   2672 	 each structure to be loaded/stored.  */
   2673       num = get_opcode_dependent_value (opcode);
   2674       switch (type)
   2675 	{
   2676 	case AARCH64_OPND_LVn_LUT:
   2677 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2678 	    return 0;
   2679 	  break;
   2680 	case AARCH64_OPND_LVt:
   2681 	  assert (num >= 1 && num <= 4);
   2682 	  /* Unless LD1/ST1, the number of registers should be equal to that
   2683 	     of the structure elements.  */
   2684 	  if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
   2685 	    return false;
   2686 	  break;
   2687 	case AARCH64_OPND_LVt_AL:
   2688 	case AARCH64_OPND_LEt:
   2689 	  assert (num >= 1 && num <= 4);
   2690 	  /* The number of registers should be equal to that of the structure
   2691 	     elements.  */
   2692 	  if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
   2693 	    return false;
   2694 	  break;
   2695 	default:
   2696 	  break;
   2697 	}
   2698       if (opnd->reglist.stride != 1)
   2699 	{
   2700 	  set_reg_list_stride_error (mismatch_detail, idx, 1);
   2701 	  return false;
   2702 	}
   2703       break;
   2704 
   2705     case AARCH64_OPND_CLASS_IMMEDIATE:
   2706       /* Constraint check on immediate operand.  */
   2707       imm = opnd->imm.value;
   2708       /* E.g. imm_0_31 constrains value to be 0..31.  */
   2709       if (qualifier_value_in_range_constraint_p (qualifier)
   2710 	  && !value_in_range_p (imm, get_lower_bound (qualifier),
   2711 				get_upper_bound (qualifier)))
   2712 	{
   2713 	  set_imm_out_of_range_error (mismatch_detail, idx,
   2714 				      get_lower_bound (qualifier),
   2715 				      get_upper_bound (qualifier));
   2716 	  return false;
   2717 	}
   2718 
   2719       switch (type)
   2720 	{
   2721 	case AARCH64_OPND_AIMM:
   2722 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   2723 	    {
   2724 	      set_other_error (mismatch_detail, idx,
   2725 			       _("invalid shift operator"));
   2726 	      return false;
   2727 	    }
   2728 	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
   2729 	    {
   2730 	      set_other_error (mismatch_detail, idx,
   2731 			       _("shift amount must be 0 or 12"));
   2732 	      return false;
   2733 	    }
   2734 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
   2735 	    {
   2736 	      set_other_error (mismatch_detail, idx,
   2737 			       _("immediate out of range"));
   2738 	      return false;
   2739 	    }
   2740 	  break;
   2741 
   2742 	case AARCH64_OPND_HALF:
   2743 	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
   2744 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   2745 	    {
   2746 	      set_other_error (mismatch_detail, idx,
   2747 			       _("invalid shift operator"));
   2748 	      return false;
   2749 	    }
   2750 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2751 	  if (!value_aligned_p (opnd->shifter.amount, 16))
   2752 	    {
   2753 	      set_other_error (mismatch_detail, idx,
   2754 			       _("shift amount must be a multiple of 16"));
   2755 	      return false;
   2756 	    }
   2757 	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
   2758 	    {
   2759 	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
   2760 						 0, size * 8 - 16);
   2761 	      return false;
   2762 	    }
   2763 	  if (opnd->imm.value < 0)
   2764 	    {
   2765 	      set_other_error (mismatch_detail, idx,
   2766 			       _("negative immediate value not allowed"));
   2767 	      return false;
   2768 	    }
   2769 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
   2770 	    {
   2771 	      set_other_error (mismatch_detail, idx,
   2772 			       _("immediate out of range"));
   2773 	      return false;
   2774 	    }
   2775 	  break;
   2776 
   2777 	case AARCH64_OPND_IMM_MOV:
   2778 	    {
   2779 	      int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2780 	      imm = opnd->imm.value;
   2781 	      assert (idx == 1);
   2782 	      switch (opcode->op)
   2783 		{
   2784 		case OP_MOV_IMM_WIDEN:
   2785 		  imm = ~imm;
   2786 		  /* Fall through.  */
   2787 		case OP_MOV_IMM_WIDE:
   2788 		  if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
   2789 		    {
   2790 		      set_other_error (mismatch_detail, idx,
   2791 				       _("immediate out of range"));
   2792 		      return false;
   2793 		    }
   2794 		  break;
   2795 		case OP_MOV_IMM_LOG:
   2796 		  if (!aarch64_logical_immediate_p (imm, esize, NULL))
   2797 		    {
   2798 		      set_other_error (mismatch_detail, idx,
   2799 				       _("immediate out of range"));
   2800 		      return false;
   2801 		    }
   2802 		  break;
   2803 		default:
   2804 		  assert (0);
   2805 		  return false;
   2806 		}
   2807 	    }
   2808 	  break;
   2809 
   2810 	case AARCH64_OPND_NZCV:
   2811 	case AARCH64_OPND_CCMP_IMM:
   2812 	case AARCH64_OPND_EXCEPTION:
   2813 	case AARCH64_OPND_UNDEFINED:
   2814 	case AARCH64_OPND_TME_UIMM16:
   2815 	case AARCH64_OPND_UIMM4:
   2816 	case AARCH64_OPND_UIMM4_ADDG:
   2817 	case AARCH64_OPND_UIMM7:
   2818 	case AARCH64_OPND_UIMM3_OP1:
   2819 	case AARCH64_OPND_UIMM3_OP2:
   2820 	case AARCH64_OPND_SVE_UIMM3:
   2821 	case AARCH64_OPND_SVE_UIMM7:
   2822 	case AARCH64_OPND_SVE_UIMM8:
   2823 	case AARCH64_OPND_SVE_UIMM4:
   2824 	case AARCH64_OPND_SVE_UIMM8_53:
   2825 	case AARCH64_OPND_CSSC_UIMM8:
   2826 	  size = get_operand_fields_width (get_operand_from_code (type));
   2827 	  assert (size < 32);
   2828 	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
   2829 	    {
   2830 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   2831 					  (1u << size) - 1);
   2832 	      return false;
   2833 	    }
   2834 	  break;
   2835 
   2836 	case AARCH64_OPND_UIMM10:
   2837 	  /* Scaled unsigned 10 bits immediate offset.  */
   2838 	  if (!value_in_range_p (opnd->imm.value, 0, 1008))
   2839 	    {
   2840 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
   2841 	      return false;
   2842 	    }
   2843 
   2844 	  if (!value_aligned_p (opnd->imm.value, 16))
   2845 	    {
   2846 	      set_unaligned_error (mismatch_detail, idx, 16);
   2847 	      return false;
   2848 	    }
   2849 	  break;
   2850 
   2851 	case AARCH64_OPND_SIMM5:
   2852 	case AARCH64_OPND_SVE_SIMM5:
   2853 	case AARCH64_OPND_SVE_SIMM5B:
   2854 	case AARCH64_OPND_SVE_SIMM6:
   2855 	case AARCH64_OPND_SVE_SIMM8:
   2856 	case AARCH64_OPND_CSSC_SIMM8:
   2857 	  size = get_operand_fields_width (get_operand_from_code (type));
   2858 	  assert (size < 32);
   2859 	  if (!value_fit_signed_field_p (opnd->imm.value, size))
   2860 	    {
   2861 	      imm_range_t rng = imm_range_min_max (size, true);
   2862 	      set_imm_out_of_range_error (mismatch_detail, idx, rng.min,
   2863 					  rng.max);
   2864 	      return false;
   2865 	    }
   2866 	  break;
   2867 
   2868 	case AARCH64_OPND_WIDTH:
   2869 	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
   2870 		  && opnds[0].type == AARCH64_OPND_Rd);
   2871 	  size = get_upper_bound (qualifier);
   2872 	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
   2873 	    /* lsb+width <= reg.size  */
   2874 	    {
   2875 	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
   2876 					  size - opnds[idx-1].imm.value);
   2877 	      return false;
   2878 	    }
   2879 	  break;
   2880 
   2881 	case AARCH64_OPND_LIMM:
   2882 	case AARCH64_OPND_SVE_LIMM:
   2883 	  {
   2884 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2885 	    uint64_t uimm = opnd->imm.value;
   2886 	    if (opcode->op == OP_BIC)
   2887 	      uimm = ~uimm;
   2888 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   2889 	      {
   2890 		set_other_error (mismatch_detail, idx,
   2891 				 _("immediate out of range"));
   2892 		return false;
   2893 	      }
   2894 	  }
   2895 	  break;
   2896 
   2897 	case AARCH64_OPND_IMM0:
   2898 	case AARCH64_OPND_FPIMM0:
   2899 	  if (opnd->imm.value != 0)
   2900 	    {
   2901 	      set_other_error (mismatch_detail, idx,
   2902 			       _("immediate zero expected"));
   2903 	      return false;
   2904 	    }
   2905 	  break;
   2906 
   2907 	case AARCH64_OPND_IMM_ROT1:
   2908 	case AARCH64_OPND_IMM_ROT2:
   2909 	case AARCH64_OPND_SVE_IMM_ROT2:
   2910 	  if (opnd->imm.value != 0
   2911 	      && opnd->imm.value != 90
   2912 	      && opnd->imm.value != 180
   2913 	      && opnd->imm.value != 270)
   2914 	    {
   2915 	      set_other_error (mismatch_detail, idx,
   2916 			       _("rotate expected to be 0, 90, 180 or 270"));
   2917 	      return false;
   2918 	    }
   2919 	  break;
   2920 
   2921 	case AARCH64_OPND_IMM_ROT3:
   2922 	case AARCH64_OPND_SVE_IMM_ROT1:
   2923 	case AARCH64_OPND_SVE_IMM_ROT3:
   2924 	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
   2925 	    {
   2926 	      set_other_error (mismatch_detail, idx,
   2927 			       _("rotate expected to be 90 or 270"));
   2928 	      return false;
   2929 	    }
   2930 	  break;
   2931 
   2932 	case AARCH64_OPND_SHLL_IMM:
   2933 	  assert (idx == 2);
   2934 	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
   2935 	  if (opnd->imm.value != size)
   2936 	    {
   2937 	      set_other_error (mismatch_detail, idx,
   2938 			       _("invalid shift amount"));
   2939 	      return false;
   2940 	    }
   2941 	  break;
   2942 
   2943 	case AARCH64_OPND_IMM_VLSL:
   2944 	  size = aarch64_get_qualifier_esize (qualifier);
   2945 	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
   2946 	    {
   2947 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   2948 					  size * 8 - 1);
   2949 	      return false;
   2950 	    }
   2951 	  break;
   2952 
   2953 	case AARCH64_OPND_IMM_VLSR:
   2954 	  size = aarch64_get_qualifier_esize (qualifier);
   2955 	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
   2956 	    {
   2957 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
   2958 	      return false;
   2959 	    }
   2960 	  break;
   2961 
   2962 	case AARCH64_OPND_SIMD_IMM:
   2963 	case AARCH64_OPND_SIMD_IMM_SFT:
   2964 	  /* Qualifier check.  */
   2965 	  switch (qualifier)
   2966 	    {
   2967 	    case AARCH64_OPND_QLF_LSL:
   2968 	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
   2969 		{
   2970 		  set_other_error (mismatch_detail, idx,
   2971 				   _("invalid shift operator"));
   2972 		  return false;
   2973 		}
   2974 	      break;
   2975 	    case AARCH64_OPND_QLF_MSL:
   2976 	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
   2977 		{
   2978 		  set_other_error (mismatch_detail, idx,
   2979 				   _("invalid shift operator"));
   2980 		  return false;
   2981 		}
   2982 	      break;
   2983 	    case AARCH64_OPND_QLF_NIL:
   2984 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   2985 		{
   2986 		  set_other_error (mismatch_detail, idx,
   2987 				   _("shift is not permitted"));
   2988 		  return false;
   2989 		}
   2990 	      break;
   2991 	    default:
   2992 	      assert (0);
   2993 	      return false;
   2994 	    }
   2995 	  /* Is the immediate valid?  */
   2996 	  assert (idx == 1);
   2997 	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
   2998 	    {
   2999 	      /* uimm8 or simm8 */
   3000 	      if (!value_in_range_p (opnd->imm.value, -128, 255))
   3001 		{
   3002 		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
   3003 		  return false;
   3004 		}
   3005 	    }
   3006 	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
   3007 	    {
   3008 	      /* uimm64 is not
   3009 		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
   3010 		 ffffffffgggggggghhhhhhhh'.  */
   3011 	      set_other_error (mismatch_detail, idx,
   3012 			       _("invalid value for immediate"));
   3013 	      return false;
   3014 	    }
   3015 	  /* Is the shift amount valid?  */
   3016 	  switch (opnd->shifter.kind)
   3017 	    {
   3018 	    case AARCH64_MOD_LSL:
   3019 	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   3020 	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
   3021 		{
   3022 		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
   3023 						     (size - 1) * 8);
   3024 		  return false;
   3025 		}
   3026 	      if (!value_aligned_p (opnd->shifter.amount, 8))
   3027 		{
   3028 		  set_unaligned_error (mismatch_detail, idx, 8);
   3029 		  return false;
   3030 		}
   3031 	      break;
   3032 	    case AARCH64_MOD_MSL:
   3033 	      /* Only 8 and 16 are valid shift amount.  */
   3034 	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
   3035 		{
   3036 		  set_other_error (mismatch_detail, idx,
   3037 				   _("shift amount must be 0 or 16"));
   3038 		  return false;
   3039 		}
   3040 	      break;
   3041 	    default:
   3042 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   3043 		{
   3044 		  set_other_error (mismatch_detail, idx,
   3045 				   _("invalid shift operator"));
   3046 		  return false;
   3047 		}
   3048 	      break;
   3049 	    }
   3050 	  break;
   3051 
   3052 	case AARCH64_OPND_FPIMM:
   3053 	case AARCH64_OPND_SIMD_FPIMM:
   3054 	case AARCH64_OPND_SVE_FPIMM8:
   3055 	  if (opnd->imm.is_fp == 0)
   3056 	    {
   3057 	      set_other_error (mismatch_detail, idx,
   3058 			       _("floating-point immediate expected"));
   3059 	      return false;
   3060 	    }
   3061 	  /* The value is expected to be an 8-bit floating-point constant with
   3062 	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
   3063 	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
   3064 	     instruction).  */
   3065 	  if (!value_in_range_p (opnd->imm.value, 0, 255))
   3066 	    {
   3067 	      set_other_error (mismatch_detail, idx,
   3068 			       _("immediate out of range"));
   3069 	      return false;
   3070 	    }
   3071 	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
   3072 	    {
   3073 	      set_other_error (mismatch_detail, idx,
   3074 			       _("invalid shift operator"));
   3075 	      return false;
   3076 	    }
   3077 	  break;
   3078 
   3079 	case AARCH64_OPND_SVE_AIMM:
   3080 	  min_value = 0;
   3081 	sve_aimm:
   3082 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
   3083 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   3084 	  mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
   3085 	  uvalue = opnd->imm.value;
   3086 	  shift = opnd->shifter.amount;
   3087 	  if (size == 1)
   3088 	    {
   3089 	      if (shift != 0)
   3090 		{
   3091 		  set_other_error (mismatch_detail, idx,
   3092 				   _("no shift amount allowed for"
   3093 				     " 8-bit constants"));
   3094 		  return false;
   3095 		}
   3096 	    }
   3097 	  else
   3098 	    {
   3099 	      if (shift != 0 && shift != 8)
   3100 		{
   3101 		  set_other_error (mismatch_detail, idx,
   3102 				   _("shift amount must be 0 or 8"));
   3103 		  return false;
   3104 		}
   3105 	      if (shift == 0 && (uvalue & 0xff) == 0)
   3106 		{
   3107 		  shift = 8;
   3108 		  uvalue = (int64_t) uvalue / 256;
   3109 		}
   3110 	    }
   3111 	  mask >>= shift;
   3112 	  if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
   3113 	    {
   3114 	      set_other_error (mismatch_detail, idx,
   3115 			       _("immediate too big for element size"));
   3116 	      return false;
   3117 	    }
   3118 	  uvalue = (uvalue - min_value) & mask;
   3119 	  if (uvalue > 0xff)
   3120 	    {
   3121 	      set_other_error (mismatch_detail, idx,
   3122 			       _("invalid arithmetic immediate"));
   3123 	      return false;
   3124 	    }
   3125 	  break;
   3126 
   3127 	case AARCH64_OPND_SVE_ASIMM:
   3128 	  min_value = -128;
   3129 	  goto sve_aimm;
   3130 
   3131 	case AARCH64_OPND_SVE_I1_HALF_ONE:
   3132 	  assert (opnd->imm.is_fp);
   3133 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
   3134 	    {
   3135 	      set_other_error (mismatch_detail, idx,
   3136 			       _("floating-point value must be 0.5 or 1.0"));
   3137 	      return false;
   3138 	    }
   3139 	  break;
   3140 
   3141 	case AARCH64_OPND_SVE_I1_HALF_TWO:
   3142 	  assert (opnd->imm.is_fp);
   3143 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
   3144 	    {
   3145 	      set_other_error (mismatch_detail, idx,
   3146 			       _("floating-point value must be 0.5 or 2.0"));
   3147 	      return false;
   3148 	    }
   3149 	  break;
   3150 
   3151 	case AARCH64_OPND_SVE_I1_ZERO_ONE:
   3152 	  assert (opnd->imm.is_fp);
   3153 	  if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
   3154 	    {
   3155 	      set_other_error (mismatch_detail, idx,
   3156 			       _("floating-point value must be 0.0 or 1.0"));
   3157 	      return false;
   3158 	    }
   3159 	  break;
   3160 
   3161 	case AARCH64_OPND_SVE_INV_LIMM:
   3162 	  {
   3163 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   3164 	    uint64_t uimm = ~opnd->imm.value;
   3165 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   3166 	      {
   3167 		set_other_error (mismatch_detail, idx,
   3168 				 _("immediate out of range"));
   3169 		return false;
   3170 	      }
   3171 	  }
   3172 	  break;
   3173 
   3174 	case AARCH64_OPND_SVE_LIMM_MOV:
   3175 	  {
   3176 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   3177 	    uint64_t uimm = opnd->imm.value;
   3178 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   3179 	      {
   3180 		set_other_error (mismatch_detail, idx,
   3181 				 _("immediate out of range"));
   3182 		return false;
   3183 	      }
   3184 	    if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
   3185 	      {
   3186 		set_other_error (mismatch_detail, idx,
   3187 				 _("invalid replicated MOV immediate"));
   3188 		return false;
   3189 	      }
   3190 	  }
   3191 	  break;
   3192 
   3193 	case AARCH64_OPND_SVE_PATTERN_SCALED:
   3194 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL);
   3195 	  if (!value_in_range_p (opnd->shifter.amount, 1, 16))
   3196 	    {
   3197 	      set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
   3198 	      return false;
   3199 	    }
   3200 	  break;
   3201 
   3202 	case AARCH64_OPND_SVE_SHLIMM_PRED:
   3203 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
   3204 	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
   3205 	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
   3206 	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
   3207 	    {
   3208 	      set_imm_out_of_range_error (mismatch_detail, idx,
   3209 					  0, 8 * size - 1);
   3210 	      return false;
   3211 	    }
   3212 	  break;
   3213 
   3214 	case AARCH64_OPND_SME_SHRIMM4:
   3215 	  size = 1 << get_operand_fields_width (get_operand_from_code (type));
   3216 	  if (!value_in_range_p (opnd->imm.value, 1, size))
   3217 	    {
   3218 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
   3219 	      return false;
   3220 	    }
   3221 	  break;
   3222 
   3223 	case AARCH64_OPND_SME_SHRIMM5:
   3224 	case AARCH64_OPND_SVE_SHRIMM_PRED:
   3225 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
   3226 	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
   3227 	  num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
   3228 	  size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
   3229 	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
   3230 	    {
   3231 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
   3232 	      return false;
   3233 	    }
   3234 	  break;
   3235 
   3236 	case AARCH64_OPND_SME_ZT0_INDEX:
   3237 	  if (!value_in_range_p (opnd->imm.value, 0, 56))
   3238 	    {
   3239 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
   3240 	      return false;
   3241 	    }
   3242 	  if (opnd->imm.value % 8 != 0)
   3243 	    {
   3244 	      set_other_error (mismatch_detail, idx,
   3245 			       _("byte index must be a multiple of 8"));
   3246 	      return false;
   3247 	    }
   3248 	  break;
   3249 
   3250 	case AARCH64_OPND_SME_ZT0_INDEX2_12:
   3251 	  if (!value_in_range_p (opnd->imm.value, 0, 3))
   3252 	    {
   3253 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 3);
   3254 	      return 0;
   3255 	    }
   3256 	  break;
   3257 
   3258 	default:
   3259 	  break;
   3260 	}
   3261       break;
   3262 
   3263     case AARCH64_OPND_CLASS_SYSTEM:
   3264       switch (type)
   3265 	{
   3266 	case AARCH64_OPND_PSTATEFIELD:
   3267 	  for (i = 0; aarch64_pstatefields[i].name; ++i)
   3268 	    if (aarch64_pstatefields[i].value == opnd->pstatefield)
   3269 	      break;
   3270 	  assert (aarch64_pstatefields[i].name);
   3271 	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
   3272 	  max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
   3273 	  if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
   3274 	    {
   3275 	      set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
   3276 	      return false;
   3277 	    }
   3278 	  break;
   3279 	case AARCH64_OPND_PRFOP:
   3280 	  if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
   3281 	    {
   3282 	      set_other_error (mismatch_detail, idx,
   3283 			       _("the register-index form of PRFM does"
   3284 				 " not accept opcodes in the range 24-31"));
   3285 	      return false;
   3286 	    }
   3287 	  break;
   3288 	default:
   3289 	  break;
   3290 	}
   3291       break;
   3292 
   3293     case AARCH64_OPND_CLASS_SIMD_ELEMENT:
   3294       /* Get the upper bound for the element index.  */
   3295       if (opcode->op == OP_FCMLA_ELEM)
   3296 	/* FCMLA index range depends on the vector size of other operands
   3297 	   and is halfed because complex numbers take two elements.  */
   3298 	num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
   3299 	      * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
   3300       else if (opcode->iclass == lut)
   3301 	{
   3302 	  size = get_operand_fields_width (get_operand_from_code (type)) - 5;
   3303 	  if (!check_reglane (opnd, mismatch_detail, idx, "v", 0, 31,
   3304 			      0, (1 << size) - 1))
   3305 	    return 0;
   3306 	  break;
   3307 	}
   3308       else
   3309 	num = 16;
   3310       num = num / aarch64_get_qualifier_esize (qualifier) - 1;
   3311       assert (aarch64_get_qualifier_nelem (qualifier) == 1);
   3312 
   3313       /* Index out-of-range.  */
   3314       if (!value_in_range_p (opnd->reglane.index, 0, num))
   3315 	{
   3316 	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   3317 	  return false;
   3318 	}
   3319       /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
   3320 	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
   3321 	 number is encoded in "size:M:Rm":
   3322 	 size	<Vm>
   3323 	 00		RESERVED
   3324 	 01		0:Rm
   3325 	 10		M:Rm
   3326 	 11		RESERVED  */
   3327       if (type == AARCH64_OPND_Em16
   3328 	  && (qualifier == AARCH64_OPND_QLF_S_H
   3329 	      || qualifier == AARCH64_OPND_QLF_S_2B)
   3330 	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
   3331 	{
   3332 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
   3333 	  return false;
   3334 	}
   3335       if (type == AARCH64_OPND_Em8
   3336 	  && !value_in_range_p (opnd->reglane.regno, 0, 7))
   3337 	{
   3338 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 7);
   3339 	  return 0;
   3340 	}
   3341       break;
   3342 
   3343     case AARCH64_OPND_CLASS_MODIFIED_REG:
   3344       assert (idx == 1 || idx == 2);
   3345       switch (type)
   3346 	{
   3347 	case AARCH64_OPND_Rm_EXT:
   3348 	  if (!aarch64_extend_operator_p (opnd->shifter.kind)
   3349 	      && opnd->shifter.kind != AARCH64_MOD_LSL)
   3350 	    {
   3351 	      set_other_error (mismatch_detail, idx,
   3352 			       _("extend operator expected"));
   3353 	      return false;
   3354 	    }
   3355 	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
   3356 	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
   3357 	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
   3358 	     case.  */
   3359 	  if (!aarch64_stack_pointer_p (opnds + 0)
   3360 	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
   3361 	    {
   3362 	      if (!opnd->shifter.operator_present)
   3363 		{
   3364 		  set_other_error (mismatch_detail, idx,
   3365 				   _("missing extend operator"));
   3366 		  return false;
   3367 		}
   3368 	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
   3369 		{
   3370 		  set_other_error (mismatch_detail, idx,
   3371 				   _("'LSL' operator not allowed"));
   3372 		  return false;
   3373 		}
   3374 	    }
   3375 	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
   3376 		  || opnd->shifter.kind == AARCH64_MOD_LSL);
   3377 	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
   3378 	    {
   3379 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
   3380 	      return false;
   3381 	    }
   3382 	  /* In the 64-bit form, the final register operand is written as Wm
   3383 	     for all but the (possibly omitted) UXTX/LSL and SXTX
   3384 	     operators.
   3385 	     N.B. GAS allows X register to be used with any operator as a
   3386 	     programming convenience.  */
   3387 	  if (qualifier == AARCH64_OPND_QLF_X
   3388 	      && opnd->shifter.kind != AARCH64_MOD_LSL
   3389 	      && opnd->shifter.kind != AARCH64_MOD_UXTX
   3390 	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
   3391 	    {
   3392 	      set_other_error (mismatch_detail, idx, _("W register expected"));
   3393 	      return false;
   3394 	    }
   3395 	  break;
   3396 
   3397 	case AARCH64_OPND_Rm_SFT:
   3398 	  /* ROR is not available to the shifted register operand in
   3399 	     arithmetic instructions.  */
   3400 	  if (!aarch64_shift_operator_p (opnd->shifter.kind))
   3401 	    {
   3402 	      set_other_error (mismatch_detail, idx,
   3403 			       _("shift operator expected"));
   3404 	      return false;
   3405 	    }
   3406 	  if (opnd->shifter.kind == AARCH64_MOD_ROR
   3407 	      && opcode->iclass != log_shift)
   3408 	    {
   3409 	      set_other_error (mismatch_detail, idx,
   3410 			       _("'ROR' operator not allowed"));
   3411 	      return false;
   3412 	    }
   3413 	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
   3414 	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
   3415 	    {
   3416 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
   3417 	      return false;
   3418 	    }
   3419 	  break;
   3420 
   3421 	case AARCH64_OPND_Rm_LSL:
   3422 	  /* We expect here that opnd->shifter.kind != AARCH64_MOD_LSL
   3423 	     because the parser already restricts the type of shift to LSL only,
   3424 	     so another check of shift kind would be redundant.  */
   3425 	  if (!value_in_range_p (opnd->shifter.amount, 0, 7))
   3426 	    {
   3427 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 7);
   3428 	      return false;
   3429 	    }
   3430 	  break;
   3431 
   3432 	default:
   3433 	  break;
   3434 	}
   3435       break;
   3436 
   3437     default:
   3438       break;
   3439     }
   3440 
   3441   return true;
   3442 }
   3443 
   3444 /* Main entrypoint for the operand constraint checking.
   3445 
   3446    Return 1 if operands of *INST meet the constraint applied by the operand
   3447    codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
   3448    not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
   3449    adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
   3450    with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
   3451    error kind when it is notified that an instruction does not pass the check).
   3452 
   3453    Un-determined operand qualifiers may get established during the process.  */
   3454 
   3455 bool
   3456 aarch64_match_operands_constraint (aarch64_inst *inst,
   3457 				   aarch64_operand_error *mismatch_detail)
   3458 {
   3459   int i;
   3460 
   3461   DEBUG_TRACE ("enter");
   3462 
   3463   i = inst->opcode->tied_operand;
   3464 
   3465   if (i > 0)
   3466     {
   3467       /* Check for tied_operands with specific opcode iclass.  */
   3468       switch (inst->opcode->iclass)
   3469         {
   3470         /* For SME LDR and STR instructions #imm must have the same numerical
   3471            value for both operands.
   3472         */
   3473         case sme_ldr:
   3474         case sme_str:
   3475           assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
   3476           assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
   3477           if (inst->operands[0].indexed_za.index.imm
   3478               != inst->operands[1].addr.offset.imm)
   3479             {
   3480               if (mismatch_detail)
   3481                 {
   3482                   mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
   3483                   mismatch_detail->index = i;
   3484                 }
   3485               return false;
   3486             }
   3487           break;
   3488 
   3489         default:
   3490 	  {
   3491 	    /* Check for cases where a source register needs to be the
   3492 	       same as the destination register.  Do this before
   3493 	       matching qualifiers since if an instruction has both
   3494 	       invalid tying and invalid qualifiers, the error about
   3495 	       qualifiers would suggest several alternative instructions
   3496 	       that also have invalid tying.  */
   3497 	    enum aarch64_operand_class op_class
   3498 	       = aarch64_get_operand_class (inst->operands[0].type);
   3499 	    assert (aarch64_get_operand_class (inst->operands[i].type)
   3500 		    == op_class);
   3501 	    if (op_class == AARCH64_OPND_CLASS_SVE_REGLIST
   3502 		? ((inst->operands[0].reglist.first_regno
   3503 		    != inst->operands[i].reglist.first_regno)
   3504 		   || (inst->operands[0].reglist.num_regs
   3505 		       != inst->operands[i].reglist.num_regs)
   3506 		   || (inst->operands[0].reglist.stride
   3507 		       != inst->operands[i].reglist.stride))
   3508 		: (inst->operands[0].reg.regno
   3509 		   != inst->operands[i].reg.regno))
   3510 	      {
   3511 		if (mismatch_detail)
   3512 		  {
   3513 		    mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
   3514 		    mismatch_detail->index = i;
   3515 		    mismatch_detail->error = NULL;
   3516 		  }
   3517 		return false;
   3518 	      }
   3519 	    break;
   3520 	  }
   3521         }
   3522     }
   3523 
   3524   /* Match operands' qualifier.
   3525      *INST has already had qualifier establish for some, if not all, of
   3526      its operands; we need to find out whether these established
   3527      qualifiers match one of the qualifier sequence in
   3528      INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
   3529      with the corresponding qualifier in such a sequence.
   3530      Only basic operand constraint checking is done here; the more thorough
   3531      constraint checking will carried out by operand_general_constraint_met_p,
   3532      which has be to called after this in order to get all of the operands'
   3533      qualifiers established.  */
   3534   int invalid_count;
   3535   if (match_operands_qualifier (inst, true /* update_p */,
   3536 				&invalid_count) == 0)
   3537     {
   3538       DEBUG_TRACE ("FAIL on operand qualifier matching");
   3539       if (mismatch_detail)
   3540 	{
   3541 	  /* Return an error type to indicate that it is the qualifier
   3542 	     matching failure; we don't care about which operand as there
   3543 	     are enough information in the opcode table to reproduce it.  */
   3544 	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
   3545 	  mismatch_detail->index = -1;
   3546 	  mismatch_detail->error = NULL;
   3547 	  mismatch_detail->data[0].i = invalid_count;
   3548 	}
   3549       return false;
   3550     }
   3551 
   3552   /* Match operands' constraint.  */
   3553   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   3554     {
   3555       enum aarch64_opnd type = inst->opcode->operands[i];
   3556       if (type == AARCH64_OPND_NIL)
   3557 	break;
   3558       if (inst->operands[i].skip)
   3559 	{
   3560 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
   3561 	  continue;
   3562 	}
   3563       if (!operand_general_constraint_met_p (inst->operands, i, type,
   3564 					     inst->opcode, mismatch_detail))
   3565 	{
   3566 	  DEBUG_TRACE ("FAIL on operand %d", i);
   3567 	  return false;
   3568 	}
   3569     }
   3570 
   3571   DEBUG_TRACE ("PASS");
   3572 
   3573   return true;
   3574 }
   3575 
   3576 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
   3577    Also updates the TYPE of each INST->OPERANDS with the corresponding
   3578    value of OPCODE->OPERANDS.
   3579 
   3580    Note that some operand qualifiers may need to be manually cleared by
   3581    the caller before it further calls the aarch64_opcode_encode; by
   3582    doing this, it helps the qualifier matching facilities work
   3583    properly.  */
   3584 
   3585 const aarch64_opcode*
   3586 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
   3587 {
   3588   int i;
   3589   const aarch64_opcode *old = inst->opcode;
   3590 
   3591   inst->opcode = opcode;
   3592 
   3593   /* Update the operand types.  */
   3594   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   3595     {
   3596       inst->operands[i].type = opcode->operands[i];
   3597       if (opcode->operands[i] == AARCH64_OPND_NIL)
   3598 	break;
   3599     }
   3600 
   3601   DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
   3602 
   3603   return old;
   3604 }
   3605 
   3606 int
   3607 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
   3608 {
   3609   int i;
   3610   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   3611     if (operands[i] == operand)
   3612       return i;
   3613     else if (operands[i] == AARCH64_OPND_NIL)
   3614       break;
   3615   return -1;
   3616 }
   3617 
   3618 /* R0...R30, followed by FOR31.  */
   3620 #define BANK(R, FOR31) \
   3621   { R  (0), R  (1), R  (2), R  (3), R  (4), R  (5), R  (6), R  (7), \
   3622     R  (8), R  (9), R (10), R (11), R (12), R (13), R (14), R (15), \
   3623     R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
   3624     R (24), R (25), R (26), R (27), R (28), R (29), R (30),  FOR31 }
   3625 /* [0][0]  32-bit integer regs with sp   Wn
   3626    [0][1]  64-bit integer regs with sp   Xn  sf=1
   3627    [1][0]  32-bit integer regs with #0   Wn
   3628    [1][1]  64-bit integer regs with #0   Xn  sf=1 */
   3629 static const char *int_reg[2][2][32] = {
   3630 #define R32(X) "w" #X
   3631 #define R64(X) "x" #X
   3632   { BANK (R32, "wsp"), BANK (R64, "sp") },
   3633   { BANK (R32, "wzr"), BANK (R64, "xzr") }
   3634 #undef R64
   3635 #undef R32
   3636 };
   3637 
   3638 /* Names of the SVE vector registers, first with .S suffixes,
   3639    then with .D suffixes.  */
   3640 
   3641 static const char *sve_reg[2][32] = {
   3642 #define ZS(X) "z" #X ".s"
   3643 #define ZD(X) "z" #X ".d"
   3644   BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
   3645 #undef ZD
   3646 #undef ZS
   3647 };
   3648 #undef BANK
   3649 
   3650 /* Return the integer register name.
   3651    if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
   3652 
   3653 static inline const char *
   3654 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
   3655 {
   3656   const int has_zr = sp_reg_p ? 0 : 1;
   3657   const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
   3658   return int_reg[has_zr][is_64][regno];
   3659 }
   3660 
   3661 /* Like get_int_reg_name, but IS_64 is always 1.  */
   3662 
   3663 static inline const char *
   3664 get_64bit_int_reg_name (int regno, int sp_reg_p)
   3665 {
   3666   const int has_zr = sp_reg_p ? 0 : 1;
   3667   return int_reg[has_zr][1][regno];
   3668 }
   3669 
   3670 /* Get the name of the integer offset register in OPND, using the shift type
   3671    to decide whether it's a word or doubleword.  */
   3672 
   3673 static inline const char *
   3674 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
   3675 {
   3676   switch (opnd->shifter.kind)
   3677     {
   3678     case AARCH64_MOD_UXTW:
   3679     case AARCH64_MOD_SXTW:
   3680       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
   3681 
   3682     case AARCH64_MOD_LSL:
   3683     case AARCH64_MOD_SXTX:
   3684       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
   3685 
   3686     default:
   3687       abort ();
   3688     }
   3689 }
   3690 
   3691 /* Get the name of the SVE vector offset register in OPND, using the operand
   3692    qualifier to decide whether the suffix should be .S or .D.  */
   3693 
   3694 static inline const char *
   3695 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
   3696 {
   3697   assert (qualifier == AARCH64_OPND_QLF_S_S
   3698 	  || qualifier == AARCH64_OPND_QLF_S_D);
   3699   return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
   3700 }
   3701 
   3702 /* Types for expanding an encoded 8-bit value to a floating-point value.  */
   3703 
   3704 typedef union
   3705 {
   3706   uint64_t i;
   3707   double   d;
   3708 } double_conv_t;
   3709 
   3710 typedef union
   3711 {
   3712   uint32_t i;
   3713   float    f;
   3714 } single_conv_t;
   3715 
   3716 typedef union
   3717 {
   3718   uint32_t i;
   3719   float    f;
   3720 } half_conv_t;
   3721 
   3722 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
   3723    normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
   3724    (depending on the type of the instruction).  IMM8 will be expanded to a
   3725    single-precision floating-point value (SIZE == 4) or a double-precision
   3726    floating-point value (SIZE == 8).  A half-precision floating-point value
   3727    (SIZE == 2) is expanded to a single-precision floating-point value.  The
   3728    expanded value is returned.  */
   3729 
   3730 static uint64_t
   3731 expand_fp_imm (int size, uint32_t imm8)
   3732 {
   3733   uint64_t imm = 0;
   3734   uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
   3735 
   3736   imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
   3737   imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
   3738   imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
   3739   imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
   3740     | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
   3741   if (size == 8)
   3742     {
   3743       imm = (imm8_7 << (63-32))		/* imm8<7>  */
   3744 	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
   3745 	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
   3746 	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
   3747 	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
   3748       imm <<= 32;
   3749     }
   3750   else if (size == 4 || size == 2)
   3751     {
   3752       imm = (imm8_7 << 31)	/* imm8<7>              */
   3753 	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
   3754 	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
   3755 	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
   3756     }
   3757   else
   3758     {
   3759       /* An unsupported size.  */
   3760       assert (0);
   3761     }
   3762 
   3763   return imm;
   3764 }
   3765 
   3766 /* Return a string based on FMT with the register style applied.  */
   3767 
   3768 static const char *
   3769 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
   3770 {
   3771   const char *txt;
   3772   va_list ap;
   3773 
   3774   va_start (ap, fmt);
   3775   txt = styler->apply_style (styler, dis_style_register, fmt, ap);
   3776   va_end (ap);
   3777 
   3778   return txt;
   3779 }
   3780 
   3781 /* Return a string based on FMT with the immediate style applied.  */
   3782 
   3783 static const char *
   3784 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
   3785 {
   3786   const char *txt;
   3787   va_list ap;
   3788 
   3789   va_start (ap, fmt);
   3790   txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
   3791   va_end (ap);
   3792 
   3793   return txt;
   3794 }
   3795 
   3796 /* Return a string based on FMT with the sub-mnemonic style applied.  */
   3797 
   3798 static const char *
   3799 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
   3800 {
   3801   const char *txt;
   3802   va_list ap;
   3803 
   3804   va_start (ap, fmt);
   3805   txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
   3806   va_end (ap);
   3807 
   3808   return txt;
   3809 }
   3810 
   3811 /* Return a string based on FMT with the address style applied.  */
   3812 
   3813 static const char *
   3814 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
   3815 {
   3816   const char *txt;
   3817   va_list ap;
   3818 
   3819   va_start (ap, fmt);
   3820   txt = styler->apply_style (styler, dis_style_address, fmt, ap);
   3821   va_end (ap);
   3822 
   3823   return txt;
   3824 }
   3825 
   3826 /* Produce the string representation of the register list operand *OPND
   3827    in the buffer pointed by BUF of size SIZE.  PREFIX is the part of
   3828    the register name that comes before the register number, such as "v".  */
   3829 static void
   3830 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
   3831 		     const char *prefix, struct aarch64_styler *styler)
   3832 {
   3833   const int mask = (prefix[0] == 'p' ? 15 : 31);
   3834   const int num_regs = opnd->reglist.num_regs;
   3835   const int stride = opnd->reglist.stride;
   3836   const int first_reg = opnd->reglist.first_regno;
   3837   const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
   3838   const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
   3839   char tb[16];	/* Temporary buffer.  */
   3840 
   3841   assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
   3842   assert (num_regs >= 1 && num_regs <= 4);
   3843 
   3844   /* Prepare the index if any.  */
   3845   if (opnd->reglist.has_index)
   3846     /* PR 21096: The %100 is to silence a warning about possible truncation.  */
   3847     snprintf (tb, sizeof (tb), "[%s]",
   3848 	      style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
   3849   else
   3850     tb[0] = '\0';
   3851 
   3852   /* The hyphenated form is preferred for disassembly if there is
   3853      more than one register in the list, and the register numbers
   3854      are monotonically increasing in increments of one.  */
   3855   if (stride == 1 && num_regs > 1)
   3856     if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   3857       snprintf (buf, size, "{%s-%s}%s",
   3858 		style_reg (styler, "%s%d", prefix, first_reg),
   3859 		style_reg (styler, "%s%d", prefix, last_reg), tb);
   3860     else
   3861       snprintf (buf, size, "{%s-%s}%s",
   3862 		style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
   3863 		style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
   3864   else
   3865     {
   3866       const int reg0 = first_reg;
   3867       const int reg1 = (first_reg + stride) & mask;
   3868       const int reg2 = (first_reg + stride * 2) & mask;
   3869       const int reg3 = (first_reg + stride * 3) & mask;
   3870 
   3871       switch (num_regs)
   3872 	{
   3873 	case 1:
   3874 	  snprintf (buf, size, "{%s}%s",
   3875 		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
   3876 		    tb);
   3877 	  break;
   3878 	case 2:
   3879 	  snprintf (buf, size, "{%s, %s}%s",
   3880 		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
   3881 		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
   3882 		    tb);
   3883 	  break;
   3884 	case 3:
   3885 	  snprintf (buf, size, "{%s, %s, %s}%s",
   3886 		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
   3887 		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
   3888 		    style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
   3889 		    tb);
   3890 	  break;
   3891 	case 4:
   3892 	  snprintf (buf, size, "{%s, %s, %s, %s}%s",
   3893 		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
   3894 		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
   3895 		    style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
   3896 		    style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
   3897 		    tb);
   3898 	  break;
   3899 	}
   3900     }
   3901 }
   3902 
   3903 /* Print the register+immediate address in OPND to BUF, which has SIZE
   3904    characters.  BASE is the name of the base register.  */
   3905 
   3906 static void
   3907 print_immediate_offset_address (char *buf, size_t size,
   3908 				const aarch64_opnd_info *opnd,
   3909 				const char *base,
   3910 				struct aarch64_styler *styler)
   3911 {
   3912   if (opnd->addr.writeback)
   3913     {
   3914       if (opnd->addr.preind)
   3915         {
   3916 	  if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
   3917 	    snprintf (buf, size, "[%s]!", style_reg (styler, base));
   3918           else
   3919 	    snprintf (buf, size, "[%s, %s]!",
   3920 		      style_reg (styler, base),
   3921 		      style_imm (styler, "#%d", opnd->addr.offset.imm));
   3922         }
   3923       else
   3924 	snprintf (buf, size, "[%s], %s",
   3925 		  style_reg (styler, base),
   3926 		  style_imm (styler, "#%d", opnd->addr.offset.imm));
   3927     }
   3928   else
   3929     {
   3930       if (opnd->shifter.operator_present)
   3931 	{
   3932 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
   3933 	  snprintf (buf, size, "[%s, %s, %s]",
   3934 		    style_reg (styler, base),
   3935 		    style_imm (styler, "#%d", opnd->addr.offset.imm),
   3936 		    style_sub_mnem (styler, "mul vl"));
   3937 	}
   3938       else if (opnd->addr.offset.imm)
   3939 	snprintf (buf, size, "[%s, %s]",
   3940 		  style_reg (styler, base),
   3941 		  style_imm (styler, "#%d", opnd->addr.offset.imm));
   3942       else
   3943 	snprintf (buf, size, "[%s]", style_reg (styler, base));
   3944     }
   3945 }
   3946 
   3947 /* Produce the string representation of the register offset address operand
   3948    *OPND in the buffer pointed by BUF of size SIZE.  BASE and OFFSET are
   3949    the names of the base and offset registers.  */
   3950 static void
   3951 print_register_offset_address (char *buf, size_t size,
   3952 			       const aarch64_opnd_info *opnd,
   3953 			       const char *base, const char *offset,
   3954 			       struct aarch64_styler *styler)
   3955 {
   3956   char tb[32];			/* Temporary buffer.  */
   3957   bool print_extend_p = true;
   3958   bool print_amount_p = true;
   3959   const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
   3960 
   3961   /* This is the case where offset is the optional argument and the optional
   3962      argument is ignored in the disassembly.  */
   3963   if (opnd->type == AARCH64_OPND_SVE_ADDR_ZX && offset != NULL
   3964       && strcmp (offset,"xzr") == 0)
   3965     {
   3966       /* Example: [<Zn>.S{, <Xm>}].
   3967 	 When the assembly is [Z0.S, XZR] or [Z0.S], Xm is XZR in both the cases
   3968 	 and the preferred disassembly is [Z0.S], ignoring the optional	Xm.  */
   3969       snprintf (buf, size, "[%s]", style_reg (styler, base));
   3970     }
   3971   else
   3972     {
   3973       if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
   3974 				    || !opnd->shifter.amount_present))
   3975 	{
   3976 	  /* Not print the shift/extend amount when the amount is zero and
   3977 	     when it is not the special case of 8-bit load/store
   3978 	     instruction.  */
   3979 	 print_amount_p = false;
   3980 	 /* Likewise, no need to print the shift operator LSL in such a
   3981 	    situation.  */
   3982 	 if (opnd->shifter.kind == AARCH64_MOD_LSL)
   3983 	   print_extend_p = false;
   3984 	}
   3985 
   3986       /* Prepare for the extend/shift.  */
   3987       if (print_extend_p)
   3988 	{
   3989 	  if (print_amount_p)
   3990 	    snprintf (tb, sizeof (tb), ", %s %s",
   3991 		      style_sub_mnem (styler, shift_name),
   3992 		      style_imm (styler, "#%" PRIi64,
   3993 	  /* PR 21096: The %100 is to silence a warning about possible
   3994 	     truncation.  */
   3995 				 (opnd->shifter.amount % 100)));
   3996 	  else
   3997 	    snprintf (tb, sizeof (tb), ", %s",
   3998 		      style_sub_mnem (styler, shift_name));
   3999 	}
   4000       else
   4001 	tb[0] = '\0';
   4002 
   4003       snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
   4004 		style_reg (styler, offset), tb);
   4005     }
   4006 }
   4007 
   4008 /* Print ZA tiles from imm8 in ZERO instruction.
   4009 
   4010    The preferred disassembly of this instruction uses the shortest list of tile
   4011    names that represent the encoded immediate mask.
   4012 
   4013    For example:
   4014     * An all-ones immediate is disassembled as {ZA}.
   4015     * An all-zeros immediate is disassembled as an empty list { }.
   4016 */
   4017 static void
   4018 print_sme_za_list (char *buf, size_t size, int mask,
   4019 		   struct aarch64_styler *styler)
   4020 {
   4021   const char* zan[] = { "za",    "za0.h", "za1.h", "za0.s",
   4022                         "za1.s", "za2.s", "za3.s", "za0.d",
   4023                         "za1.d", "za2.d", "za3.d", "za4.d",
   4024                         "za5.d", "za6.d", "za7.d", " " };
   4025   const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
   4026                         0x22, 0x44, 0x88, 0x01,
   4027                         0x02, 0x04, 0x08, 0x10,
   4028                         0x20, 0x40, 0x80, 0x00 };
   4029   int i, k;
   4030   const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
   4031 
   4032   k = snprintf (buf, size, "{");
   4033   for (i = 0; i < ZAN_SIZE; i++)
   4034     {
   4035       if ((mask & zan_v[i]) == zan_v[i])
   4036         {
   4037           mask &= ~zan_v[i];
   4038           if (k > 1)
   4039 	    k += snprintf (buf + k, size - k, ", ");
   4040 
   4041 	  k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
   4042         }
   4043       if (mask == 0)
   4044         break;
   4045     }
   4046   snprintf (buf + k, size - k, "}");
   4047 }
   4048 
   4049 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
   4050    in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
   4051    PC, PCREL_P and ADDRESS are used to pass in and return information about
   4052    the PC-relative address calculation, where the PC value is passed in
   4053    PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
   4054    will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
   4055    calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
   4056 
   4057    The function serves both the disassembler and the assembler diagnostics
   4058    issuer, which is the reason why it lives in this file.  */
   4059 
   4060 void
   4061 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
   4062 		       const aarch64_opcode *opcode,
   4063 		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
   4064 		       bfd_vma *address, char** notes,
   4065 		       char *comment, size_t comment_size,
   4066 		       aarch64_feature_set features,
   4067 		       struct aarch64_styler *styler)
   4068 {
   4069   unsigned int i, num_conds;
   4070   const char *name = NULL;
   4071   const aarch64_opnd_info *opnd = opnds + idx;
   4072   enum aarch64_modifier_kind kind;
   4073   uint64_t addr, enum_value;
   4074 
   4075   if (comment != NULL)
   4076     {
   4077       assert (comment_size > 0);
   4078       comment[0] = '\0';
   4079     }
   4080   else
   4081     assert (comment_size == 0);
   4082 
   4083   buf[0] = '\0';
   4084   if (pcrel_p)
   4085     *pcrel_p = 0;
   4086 
   4087   switch (opnd->type)
   4088     {
   4089     case AARCH64_OPND_Rd:
   4090     case AARCH64_OPND_Rn:
   4091     case AARCH64_OPND_Rm:
   4092     case AARCH64_OPND_Rt:
   4093     case AARCH64_OPND_Rt2:
   4094     case AARCH64_OPND_Rs:
   4095     case AARCH64_OPND_Ra:
   4096     case AARCH64_OPND_Rt_IN_SYS_ALIASES:
   4097     case AARCH64_OPND_Rt_LS64:
   4098     case AARCH64_OPND_Rt_SYS:
   4099     case AARCH64_OPND_PAIRREG:
   4100     case AARCH64_OPND_PAIRREG_OR_XZR:
   4101     case AARCH64_OPND_SVE_Rm:
   4102     case AARCH64_OPND_LSE128_Rt:
   4103     case AARCH64_OPND_LSE128_Rt2:
   4104       /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
   4105 	 the <ic_op>, therefore we use opnd->present to override the
   4106 	 generic optional-ness information.  */
   4107       if (opnd->type == AARCH64_OPND_Rt_SYS)
   4108 	{
   4109 	  if (!opnd->present)
   4110 	    break;
   4111 	}
   4112       else if ((opnd->type == AARCH64_OPND_Rt_IN_SYS_ALIASES)
   4113 	       && (opnd->reg.regno
   4114 		   != get_optional_operand_default_value (opcode)))
   4115 	{
   4116 	  /* Avoid printing an invalid additional value for Rt in SYS aliases such as
   4117 	     BRB, provide a helpful comment instead */
   4118 	  snprintf (comment, comment_size, "unpredictable encoding (Rt!=31): #%u", opnd->reg.regno);
   4119 	  break;
   4120 	}
   4121       /* Omit the operand, e.g. RET.  */
   4122       else if (optional_operand_p (opcode, idx)
   4123 	       && (opnd->reg.regno
   4124 		   == get_optional_operand_default_value (opcode)))
   4125 	break;
   4126       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   4127 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   4128       snprintf (buf, size, "%s",
   4129 		style_reg (styler, get_int_reg_name (opnd->reg.regno,
   4130 						     opnd->qualifier, 0)));
   4131       break;
   4132 
   4133     case AARCH64_OPND_Rd_SP:
   4134     case AARCH64_OPND_Rn_SP:
   4135     case AARCH64_OPND_Rt_SP:
   4136     case AARCH64_OPND_SVE_Rn_SP:
   4137     case AARCH64_OPND_Rm_SP:
   4138       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   4139 	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
   4140 	      || opnd->qualifier == AARCH64_OPND_QLF_X
   4141 	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
   4142       snprintf (buf, size, "%s",
   4143 		style_reg (styler, get_int_reg_name (opnd->reg.regno,
   4144 						     opnd->qualifier, 1)));
   4145       break;
   4146 
   4147     case AARCH64_OPND_Rm_EXT:
   4148       kind = opnd->shifter.kind;
   4149       assert (idx == 1 || idx == 2);
   4150       if ((aarch64_stack_pointer_p (opnds)
   4151 	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
   4152 	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
   4153 	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
   4154 	       && kind == AARCH64_MOD_UXTW)
   4155 	      || (opnd->qualifier == AARCH64_OPND_QLF_X
   4156 		  && kind == AARCH64_MOD_UXTX)))
   4157 	{
   4158 	  /* 'LSL' is the preferred form in this case.  */
   4159 	  kind = AARCH64_MOD_LSL;
   4160 	  if (opnd->shifter.amount == 0)
   4161 	    {
   4162 	      /* Shifter omitted.  */
   4163 	      snprintf (buf, size, "%s",
   4164 			style_reg (styler,
   4165 				   get_int_reg_name (opnd->reg.regno,
   4166 						     opnd->qualifier, 0)));
   4167 	      break;
   4168 	    }
   4169 	}
   4170       if (opnd->shifter.amount)
   4171 	snprintf (buf, size, "%s, %s %s",
   4172 		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
   4173 		  style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
   4174 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4175       else
   4176 	snprintf (buf, size, "%s, %s",
   4177 		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
   4178 		  style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
   4179       break;
   4180 
   4181     case AARCH64_OPND_Rm_SFT:
   4182       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   4183 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   4184       if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
   4185 	snprintf (buf, size, "%s",
   4186 		  style_reg (styler, get_int_reg_name (opnd->reg.regno,
   4187 						       opnd->qualifier, 0)));
   4188       else
   4189 	snprintf (buf, size, "%s, %s %s",
   4190 		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
   4191 		  style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
   4192 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4193       break;
   4194 
   4195     case AARCH64_OPND_Rm_LSL:
   4196       assert (opnd->qualifier == AARCH64_OPND_QLF_X);
   4197       assert (opnd->shifter.kind == AARCH64_MOD_LSL);
   4198       if (opnd->shifter.amount == 0)
   4199 	snprintf (buf, size, "%s",
   4200 		  style_reg (styler, get_int_reg_name (opnd->reg.regno,
   4201 						       opnd->qualifier, 0)));
   4202       else
   4203 	snprintf (buf, size, "%s, %s %s",
   4204 		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
   4205 		  style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
   4206 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4207       break;
   4208 
   4209     case AARCH64_OPND_Fd:
   4210     case AARCH64_OPND_Fn:
   4211     case AARCH64_OPND_Fm:
   4212     case AARCH64_OPND_Fa:
   4213     case AARCH64_OPND_Ft:
   4214     case AARCH64_OPND_Ft2:
   4215     case AARCH64_OPND_Sd:
   4216     case AARCH64_OPND_Sn:
   4217     case AARCH64_OPND_Sm:
   4218     case AARCH64_OPND_SVE_VZn:
   4219     case AARCH64_OPND_SVE_Vd:
   4220     case AARCH64_OPND_SVE_Vm:
   4221     case AARCH64_OPND_SVE_Vn:
   4222       snprintf (buf, size, "%s",
   4223 		style_reg (styler, "%s%d",
   4224 			   aarch64_get_qualifier_name (opnd->qualifier),
   4225 			   opnd->reg.regno));
   4226       break;
   4227 
   4228     case AARCH64_OPND_Va:
   4229     case AARCH64_OPND_Vd:
   4230     case AARCH64_OPND_Vn:
   4231     case AARCH64_OPND_Vm:
   4232       snprintf (buf, size, "%s",
   4233 		style_reg (styler, "v%d.%s", opnd->reg.regno,
   4234 			   aarch64_get_qualifier_name (opnd->qualifier)));
   4235       break;
   4236 
   4237     case AARCH64_OPND_Ed:
   4238     case AARCH64_OPND_En:
   4239     case AARCH64_OPND_Em:
   4240     case AARCH64_OPND_Em16:
   4241     case AARCH64_OPND_Em8:
   4242     case AARCH64_OPND_SM3_IMM2:
   4243       snprintf (buf, size, "%s[%s]",
   4244 		style_reg (styler, "v%d.%s", opnd->reglane.regno,
   4245 			   aarch64_get_qualifier_name (opnd->qualifier)),
   4246 		style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4247       break;
   4248 
   4249     case AARCH64_OPND_Em_INDEX1_14:
   4250     case AARCH64_OPND_Em_INDEX2_13:
   4251     case AARCH64_OPND_Em_INDEX3_12:
   4252       snprintf (buf, size, "%s[%s]",
   4253 		style_reg (styler, "v%d", opnd->reglane.regno),
   4254 		style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4255       break;
   4256 
   4257     case AARCH64_OPND_VdD1:
   4258     case AARCH64_OPND_VnD1:
   4259       snprintf (buf, size, "%s[%s]",
   4260 		style_reg (styler, "v%d.d", opnd->reg.regno),
   4261 		style_imm (styler, "1"));
   4262       break;
   4263 
   4264     case AARCH64_OPND_LVn:
   4265     case AARCH64_OPND_LVn_LUT:
   4266     case AARCH64_OPND_LVt:
   4267     case AARCH64_OPND_LVt_AL:
   4268     case AARCH64_OPND_LEt:
   4269       print_register_list (buf, size, opnd, "v", styler);
   4270       break;
   4271 
   4272     case AARCH64_OPND_SVE_Pd:
   4273     case AARCH64_OPND_SVE_Pg3:
   4274     case AARCH64_OPND_SVE_Pg4_5:
   4275     case AARCH64_OPND_SVE_Pg4_10:
   4276     case AARCH64_OPND_SVE_Pg4_16:
   4277     case AARCH64_OPND_SVE_Pm:
   4278     case AARCH64_OPND_SVE_Pn:
   4279     case AARCH64_OPND_SVE_Pt:
   4280     case AARCH64_OPND_SME_Pm:
   4281       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   4282 	snprintf (buf, size, "%s",
   4283 		  style_reg (styler, "p%d", opnd->reg.regno));
   4284       else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
   4285 	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
   4286 	snprintf (buf, size, "%s",
   4287 		  style_reg (styler, "p%d/%s", opnd->reg.regno,
   4288 			     aarch64_get_qualifier_name (opnd->qualifier)));
   4289       else
   4290 	snprintf (buf, size, "%s",
   4291 		  style_reg (styler, "p%d.%s", opnd->reg.regno,
   4292 			     aarch64_get_qualifier_name (opnd->qualifier)));
   4293       break;
   4294 
   4295     case AARCH64_OPND_SVE_PNd:
   4296     case AARCH64_OPND_SVE_PNg4_10:
   4297     case AARCH64_OPND_SVE_PNn:
   4298     case AARCH64_OPND_SVE_PNt:
   4299     case AARCH64_OPND_SME_PNd3:
   4300     case AARCH64_OPND_SME_PNg3:
   4301     case AARCH64_OPND_SME_PNn:
   4302       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   4303 	snprintf (buf, size, "%s",
   4304 		  style_reg (styler, "pn%d", opnd->reg.regno));
   4305       else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
   4306 	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
   4307 	snprintf (buf, size, "%s",
   4308 		  style_reg (styler, "pn%d/%s", opnd->reg.regno,
   4309 			     aarch64_get_qualifier_name (opnd->qualifier)));
   4310       else
   4311 	snprintf (buf, size, "%s",
   4312 		  style_reg (styler, "pn%d.%s", opnd->reg.regno,
   4313 			     aarch64_get_qualifier_name (opnd->qualifier)));
   4314       break;
   4315 
   4316     case AARCH64_OPND_SME_Pdx2:
   4317     case AARCH64_OPND_SME_PdxN:
   4318       print_register_list (buf, size, opnd, "p", styler);
   4319       break;
   4320 
   4321     case AARCH64_OPND_SME_PNn3_INDEX1:
   4322     case AARCH64_OPND_SME_PNn3_INDEX2:
   4323       snprintf (buf, size, "%s[%s]",
   4324 		style_reg (styler, "pn%d", opnd->reglane.regno),
   4325 		style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4326       break;
   4327 
   4328     case AARCH64_OPND_SVE_Za_5:
   4329     case AARCH64_OPND_SVE_Za_16:
   4330     case AARCH64_OPND_SVE_Zd:
   4331     case AARCH64_OPND_SVE_Zm_5:
   4332     case AARCH64_OPND_SVE_Zm_16:
   4333     case AARCH64_OPND_SVE_Zn:
   4334     case AARCH64_OPND_SVE_Zt:
   4335     case AARCH64_OPND_SME_Zm:
   4336       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   4337        snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
   4338       else
   4339        snprintf (buf, size, "%s",
   4340 		 style_reg (styler, "z%d.%s", opnd->reg.regno,
   4341 			    aarch64_get_qualifier_name (opnd->qualifier)));
   4342       break;
   4343 
   4344     case AARCH64_OPND_SVE_ZnxN:
   4345     case AARCH64_OPND_SVE_ZtxN:
   4346     case AARCH64_OPND_SME_Zdnx2:
   4347     case AARCH64_OPND_SME_Zdnx4:
   4348     case AARCH64_OPND_SME_Zdnx4_STRIDED:
   4349     case AARCH64_OPND_SME_Zmx2:
   4350     case AARCH64_OPND_SME_Zmx4:
   4351     case AARCH64_OPND_SME_Znx2:
   4352     case AARCH64_OPND_SME_Znx2_BIT_INDEX:
   4353     case AARCH64_OPND_SME_Znx4:
   4354     case AARCH64_OPND_SME_Ztx2_STRIDED:
   4355     case AARCH64_OPND_SME_Ztx4_STRIDED:
   4356       print_register_list (buf, size, opnd, "z", styler);
   4357       break;
   4358 
   4359     case AARCH64_OPND_SVE_Zm1_23_INDEX:
   4360     case AARCH64_OPND_SVE_Zm2_22_INDEX:
   4361     case AARCH64_OPND_SVE_Zm3_INDEX:
   4362     case AARCH64_OPND_SVE_Zm3_22_INDEX:
   4363     case AARCH64_OPND_SVE_Zm3_19_INDEX:
   4364     case AARCH64_OPND_SVE_Zm3_12_INDEX:
   4365     case AARCH64_OPND_SVE_Zm3_11_INDEX:
   4366     case AARCH64_OPND_SVE_Zm3_10_INDEX:
   4367     case AARCH64_OPND_SVE_Zm4_11_INDEX:
   4368     case AARCH64_OPND_SVE_Zm4_INDEX:
   4369     case AARCH64_OPND_SVE_Zn_INDEX:
   4370     case AARCH64_OPND_SME_Zm_INDEX1:
   4371     case AARCH64_OPND_SME_Zm_INDEX2:
   4372     case AARCH64_OPND_SME_Zm_INDEX2_3:
   4373     case AARCH64_OPND_SME_Zm_INDEX3_1:
   4374     case AARCH64_OPND_SME_Zm_INDEX3_2:
   4375     case AARCH64_OPND_SME_Zm_INDEX3_3:
   4376     case AARCH64_OPND_SME_Zm_INDEX3_10:
   4377     case AARCH64_OPND_SVE_Zn_5_INDEX:
   4378     case AARCH64_OPND_SME_Zm_INDEX4_1:
   4379     case AARCH64_OPND_SME_Zm_INDEX4_2:
   4380     case AARCH64_OPND_SME_Zm_INDEX4_3:
   4381     case AARCH64_OPND_SME_Zm_INDEX4_10:
   4382     case AARCH64_OPND_SME_Zn_INDEX1_16:
   4383     case AARCH64_OPND_SME_Zn_INDEX2_15:
   4384     case AARCH64_OPND_SME_Zn_INDEX2_16:
   4385     case AARCH64_OPND_SME_Zn_INDEX3_14:
   4386     case AARCH64_OPND_SME_Zn_INDEX3_15:
   4387     case AARCH64_OPND_SME_Zn_INDEX4_14:
   4388       snprintf (buf, size, "%s[%s]",
   4389 		(opnd->qualifier == AARCH64_OPND_QLF_NIL
   4390 		 ? style_reg (styler, "z%d", opnd->reglane.regno)
   4391 		 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
   4392 			      aarch64_get_qualifier_name (opnd->qualifier))),
   4393 		style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4394       break;
   4395 
   4396     case AARCH64_OPND_SVE_Zn0_INDEX:
   4397     case AARCH64_OPND_SVE_Zn1_17_INDEX:
   4398     case AARCH64_OPND_SVE_Zn2_18_INDEX:
   4399     case AARCH64_OPND_SVE_Zn3_22_INDEX:
   4400     case AARCH64_OPND_SVE_Zd0_INDEX:
   4401     case AARCH64_OPND_SVE_Zd1_17_INDEX:
   4402     case AARCH64_OPND_SVE_Zd2_18_INDEX:
   4403     case AARCH64_OPND_SVE_Zd3_22_INDEX:
   4404       if (opnd->reglane.index == 0)
   4405 	snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
   4406       else
   4407 	snprintf (buf, size, "%s[%s]",
   4408 		  style_reg (styler, "z%d", opnd->reglane.regno),
   4409 		  style_imm (styler, "%" PRIi64, opnd->reglane.index));
   4410       break;
   4411 
   4412     case AARCH64_OPND_SME_ZAda_1b:
   4413     case AARCH64_OPND_SME_ZAda_2b:
   4414     case AARCH64_OPND_SME_ZAda_3b:
   4415       snprintf (buf, size, "%s",
   4416 		style_reg (styler, "za%d.%s", opnd->reg.regno,
   4417 			   aarch64_get_qualifier_name (opnd->qualifier)));
   4418       break;
   4419 
   4420     case AARCH64_OPND_SME_ZA_HV_idx_src:
   4421     case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
   4422     case AARCH64_OPND_SME_ZA_HV_idx_dest:
   4423     case AARCH64_OPND_SME_ZA_HV_idx_destxN:
   4424     case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
   4425       snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
   4426 		opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
   4427 		style_reg (styler, "za%d%c.%s",
   4428 			   opnd->indexed_za.regno,
   4429 			   opnd->indexed_za.v == 1 ? 'v' : 'h',
   4430 			   aarch64_get_qualifier_name (opnd->qualifier)),
   4431 		style_reg (styler, "w%d", opnd->indexed_za.index.regno),
   4432 		style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
   4433 		opnd->indexed_za.index.countm1 ? ":" : "",
   4434 		(opnd->indexed_za.index.countm1
   4435 		 ? style_imm (styler, "%d",
   4436 			      opnd->indexed_za.index.imm
   4437 			      + opnd->indexed_za.index.countm1)
   4438 		 : ""),
   4439 		opnd->indexed_za.group_size ? ", " : "",
   4440 		opnd->indexed_za.group_size == 2
   4441 		? style_sub_mnem (styler, "vgx2")
   4442 		: opnd->indexed_za.group_size == 4
   4443 		? style_sub_mnem (styler, "vgx4") : "",
   4444 		opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
   4445       break;
   4446 
   4447     case AARCH64_OPND_SME_list_of_64bit_tiles:
   4448       print_sme_za_list (buf, size, opnd->imm.value, styler);
   4449       break;
   4450 
   4451     case AARCH64_OPND_SME_ZA_array_off1x4:
   4452     case AARCH64_OPND_SME_ZA_array_off2x2:
   4453     case AARCH64_OPND_SME_ZA_array_off2x4:
   4454     case AARCH64_OPND_SME_ZA_array_off3_0:
   4455     case AARCH64_OPND_SME_ZA_array_off3_5:
   4456     case AARCH64_OPND_SME_ZA_array_off3x2:
   4457     case AARCH64_OPND_SME_ZA_array_off4:
   4458       snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
   4459 		style_reg (styler, "za%s%s",
   4460 			   opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
   4461 			   (opnd->qualifier == AARCH64_OPND_QLF_NIL
   4462 			    ? ""
   4463 			    : aarch64_get_qualifier_name (opnd->qualifier))),
   4464 		style_reg (styler, "w%d", opnd->indexed_za.index.regno),
   4465 		style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
   4466 		opnd->indexed_za.index.countm1 ? ":" : "",
   4467 		(opnd->indexed_za.index.countm1
   4468 		 ? style_imm (styler, "%d",
   4469 			      opnd->indexed_za.index.imm
   4470 			      + opnd->indexed_za.index.countm1)
   4471 		 : ""),
   4472 		opnd->indexed_za.group_size ? ", " : "",
   4473 		opnd->indexed_za.group_size == 2
   4474 		? style_sub_mnem (styler, "vgx2")
   4475 		: opnd->indexed_za.group_size == 4
   4476 		? style_sub_mnem (styler, "vgx4") : "");
   4477       break;
   4478 
   4479     case AARCH64_OPND_SME_ZA_array_vrsb_1:
   4480     case AARCH64_OPND_SME_ZA_array_vrsh_1:
   4481     case AARCH64_OPND_SME_ZA_array_vrss_1:
   4482     case AARCH64_OPND_SME_ZA_array_vrsd_1:
   4483     case AARCH64_OPND_SME_ZA_array_vrsb_2:
   4484     case AARCH64_OPND_SME_ZA_array_vrsh_2:
   4485     case AARCH64_OPND_SME_ZA_array_vrss_2:
   4486     case AARCH64_OPND_SME_ZA_array_vrsd_2:
   4487     case AARCH64_OPND_SME_ZA_ARRAY4:
   4488       snprintf (buf, size, "%s [%s, %s%s%s]",
   4489 		style_reg (styler, "za%d%c%s%s",
   4490 			   opnd->indexed_za.regno,
   4491 			   opnd->indexed_za.v ? 'v': 'h',
   4492 			   opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
   4493 			   (opnd->qualifier == AARCH64_OPND_QLF_NIL
   4494 			    ? ""
   4495 			    : aarch64_get_qualifier_name (opnd->qualifier))),
   4496 		style_reg (styler, "w%d", opnd->indexed_za.index.regno),
   4497 		style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
   4498 		opnd->indexed_za.index.countm1 ? ":" : "",
   4499 		opnd->indexed_za.index.countm1  ? style_imm (styler, "%d",
   4500 		opnd->indexed_za.index.imm
   4501 		+ opnd->indexed_za.index.countm1):"");
   4502       break;
   4503 
   4504     case AARCH64_OPND_SME_SM_ZA:
   4505       snprintf (buf, size, "%s",
   4506 		style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
   4507       break;
   4508 
   4509     case AARCH64_OPND_SME_PnT_Wm_imm:
   4510       snprintf (buf, size, "%s[%s, %s]",
   4511 		style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
   4512 			   aarch64_get_qualifier_name (opnd->qualifier)),
   4513 		style_reg (styler, "w%d", opnd->indexed_za.index.regno),
   4514 		style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
   4515       break;
   4516 
   4517     case AARCH64_OPND_SME_VLxN_10:
   4518     case AARCH64_OPND_SME_VLxN_13:
   4519       enum_value = opnd->imm.value;
   4520       assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
   4521       snprintf (buf, size, "%s",
   4522 		style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
   4523       break;
   4524 
   4525     case AARCH64_OPND_BRBOP:
   4526       enum_value = opnd->imm.value;
   4527       assert (enum_value < ARRAY_SIZE (aarch64_brbop_array));
   4528       snprintf (buf, size, "%s",
   4529 		style_sub_mnem (styler, aarch64_brbop_array[enum_value]));
   4530       break;
   4531 
   4532     case AARCH64_OPND_CRn:
   4533     case AARCH64_OPND_CRm:
   4534       snprintf (buf, size, "%s",
   4535 		style_reg (styler, "C%" PRIi64, opnd->imm.value));
   4536       break;
   4537 
   4538     case AARCH64_OPND_IDX:
   4539     case AARCH64_OPND_MASK:
   4540     case AARCH64_OPND_IMM:
   4541     case AARCH64_OPND_IMM_2:
   4542     case AARCH64_OPND_WIDTH:
   4543     case AARCH64_OPND_UIMM3_OP1:
   4544     case AARCH64_OPND_UIMM3_OP2:
   4545     case AARCH64_OPND_BIT_NUM:
   4546     case AARCH64_OPND_IMM_VLSL:
   4547     case AARCH64_OPND_IMM_VLSR:
   4548     case AARCH64_OPND_SHLL_IMM:
   4549     case AARCH64_OPND_IMM0:
   4550     case AARCH64_OPND_IMMR:
   4551     case AARCH64_OPND_IMMS:
   4552     case AARCH64_OPND_UNDEFINED:
   4553     case AARCH64_OPND_FBITS:
   4554     case AARCH64_OPND_TME_UIMM16:
   4555     case AARCH64_OPND_SIMM5:
   4556     case AARCH64_OPND_SME_SHRIMM4:
   4557     case AARCH64_OPND_SME_SHRIMM5:
   4558     case AARCH64_OPND_SVE_SHLIMM_PRED:
   4559     case AARCH64_OPND_SVE_SHLIMM_UNPRED:
   4560     case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
   4561     case AARCH64_OPND_SVE_SHRIMM_PRED:
   4562     case AARCH64_OPND_SVE_SHRIMM_UNPRED:
   4563     case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
   4564     case AARCH64_OPND_SVE_SIMM5:
   4565     case AARCH64_OPND_SVE_SIMM5B:
   4566     case AARCH64_OPND_SVE_SIMM6:
   4567     case AARCH64_OPND_SVE_SIMM8:
   4568     case AARCH64_OPND_SVE_UIMM3:
   4569     case AARCH64_OPND_SVE_UIMM7:
   4570     case AARCH64_OPND_SVE_UIMM8:
   4571     case AARCH64_OPND_SVE_UIMM4:
   4572     case AARCH64_OPND_SVE_UIMM8_53:
   4573     case AARCH64_OPND_IMM_ROT1:
   4574     case AARCH64_OPND_IMM_ROT2:
   4575     case AARCH64_OPND_IMM_ROT3:
   4576     case AARCH64_OPND_SVE_IMM_ROT1:
   4577     case AARCH64_OPND_SVE_IMM_ROT2:
   4578     case AARCH64_OPND_SVE_IMM_ROT3:
   4579     case AARCH64_OPND_CSSC_SIMM8:
   4580     case AARCH64_OPND_CSSC_UIMM8:
   4581       snprintf (buf, size, "%s",
   4582 		style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4583       break;
   4584 
   4585     case AARCH64_OPND_SVE_I1_HALF_ONE:
   4586     case AARCH64_OPND_SVE_I1_HALF_TWO:
   4587     case AARCH64_OPND_SVE_I1_ZERO_ONE:
   4588       {
   4589 	single_conv_t c;
   4590 	c.i = opnd->imm.value;
   4591 	snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
   4592 	break;
   4593       }
   4594 
   4595     case AARCH64_OPND_SVE_PATTERN:
   4596       if (optional_operand_p (opcode, idx)
   4597 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
   4598 	break;
   4599       enum_value = opnd->imm.value;
   4600       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
   4601       if (aarch64_sve_pattern_array[enum_value])
   4602 	snprintf (buf, size, "%s",
   4603 		  style_reg (styler, aarch64_sve_pattern_array[enum_value]));
   4604       else
   4605 	snprintf (buf, size, "%s",
   4606 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4607       break;
   4608 
   4609     case AARCH64_OPND_SVE_PATTERN_SCALED:
   4610       if (optional_operand_p (opcode, idx)
   4611 	  && !opnd->shifter.operator_present
   4612 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
   4613 	break;
   4614       enum_value = opnd->imm.value;
   4615       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
   4616       if (aarch64_sve_pattern_array[opnd->imm.value])
   4617 	snprintf (buf, size, "%s",
   4618 		  style_reg (styler,
   4619 			     aarch64_sve_pattern_array[opnd->imm.value]));
   4620       else
   4621 	snprintf (buf, size, "%s",
   4622 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4623       if (opnd->shifter.operator_present)
   4624 	{
   4625 	  size_t len = strlen (buf);
   4626 	  const char *shift_name
   4627 	    = aarch64_operand_modifiers[opnd->shifter.kind].name;
   4628 	  snprintf (buf + len, size - len, ", %s %s",
   4629 		    style_sub_mnem (styler, shift_name),
   4630 		    style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4631 	}
   4632       break;
   4633 
   4634     case AARCH64_OPND_SVE_PRFOP:
   4635       enum_value = opnd->imm.value;
   4636       assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
   4637       if (aarch64_sve_prfop_array[enum_value])
   4638 	snprintf (buf, size, "%s",
   4639 		  style_reg (styler, aarch64_sve_prfop_array[enum_value]));
   4640       else
   4641 	snprintf (buf, size, "%s",
   4642 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4643       break;
   4644 
   4645     case AARCH64_OPND_IMM_MOV:
   4646       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   4647 	{
   4648 	case 4:	/* e.g. MOV Wd, #<imm32>.  */
   4649 	    {
   4650 	      int imm32 = opnd->imm.value;
   4651 	      snprintf (buf, size, "%s",
   4652 			style_imm (styler, "#0x%-20x", imm32));
   4653 	      snprintf (comment, comment_size, "#%d", imm32);
   4654 	    }
   4655 	  break;
   4656 	case 8:	/* e.g. MOV Xd, #<imm64>.  */
   4657 	  snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
   4658 						opnd->imm.value));
   4659 	  snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
   4660 	  break;
   4661 	default:
   4662 	  snprintf (buf, size, "<invalid>");
   4663 	  break;
   4664 	}
   4665       break;
   4666 
   4667     case AARCH64_OPND_FPIMM0:
   4668       snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
   4669       break;
   4670 
   4671     case AARCH64_OPND_LIMM:
   4672     case AARCH64_OPND_AIMM:
   4673     case AARCH64_OPND_HALF:
   4674     case AARCH64_OPND_SVE_INV_LIMM:
   4675     case AARCH64_OPND_SVE_LIMM:
   4676     case AARCH64_OPND_SVE_LIMM_MOV:
   4677       if (opnd->shifter.amount)
   4678 	snprintf (buf, size, "%s, %s %s",
   4679 		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
   4680 		  style_sub_mnem (styler, "lsl"),
   4681 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4682       else
   4683 	snprintf (buf, size, "%s",
   4684 		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
   4685       break;
   4686 
   4687     case AARCH64_OPND_SIMD_IMM:
   4688     case AARCH64_OPND_SIMD_IMM_SFT:
   4689       if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
   4690 	  || opnd->shifter.kind == AARCH64_MOD_NONE)
   4691 	snprintf (buf, size, "%s",
   4692 		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
   4693       else
   4694 	snprintf (buf, size, "%s, %s %s",
   4695 		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
   4696 		  style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
   4697 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4698       break;
   4699 
   4700     case AARCH64_OPND_SVE_AIMM:
   4701     case AARCH64_OPND_SVE_ASIMM:
   4702       if (opnd->shifter.amount)
   4703 	snprintf (buf, size, "%s, %s %s",
   4704 		  style_imm (styler, "#%" PRIi64, opnd->imm.value),
   4705 		  style_sub_mnem (styler, "lsl"),
   4706 		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
   4707       else
   4708 	snprintf (buf, size, "%s",
   4709 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   4710       break;
   4711 
   4712     case AARCH64_OPND_FPIMM:
   4713     case AARCH64_OPND_SIMD_FPIMM:
   4714     case AARCH64_OPND_SVE_FPIMM8:
   4715       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   4716 	{
   4717 	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
   4718 	    {
   4719 	      half_conv_t c;
   4720 	      c.i = expand_fp_imm (2, opnd->imm.value);
   4721 	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
   4722 	    }
   4723 	  break;
   4724 	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
   4725 	    {
   4726 	      single_conv_t c;
   4727 	      c.i = expand_fp_imm (4, opnd->imm.value);
   4728 	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
   4729 	    }
   4730 	  break;
   4731 	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
   4732 	    {
   4733 	      double_conv_t c;
   4734 	      c.i = expand_fp_imm (8, opnd->imm.value);
   4735 	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
   4736 	    }
   4737 	  break;
   4738 	default:
   4739 	  snprintf (buf, size, "<invalid>");
   4740 	  break;
   4741 	}
   4742       break;
   4743 
   4744     case AARCH64_OPND_CCMP_IMM:
   4745     case AARCH64_OPND_NZCV:
   4746     case AARCH64_OPND_EXCEPTION:
   4747     case AARCH64_OPND_UIMM4:
   4748     case AARCH64_OPND_UIMM4_ADDG:
   4749     case AARCH64_OPND_UIMM7:
   4750     case AARCH64_OPND_UIMM10:
   4751       if (optional_operand_p (opcode, idx)
   4752 	  && (opnd->imm.value ==
   4753 	      (int64_t) get_optional_operand_default_value (opcode)))
   4754 	/* Omit the operand, e.g. DCPS1.  */
   4755 	break;
   4756       snprintf (buf, size, "%s",
   4757 		style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
   4758       break;
   4759 
   4760     case AARCH64_OPND_COND:
   4761     case AARCH64_OPND_COND1:
   4762       snprintf (buf, size, "%s",
   4763 		style_sub_mnem (styler, opnd->cond->names[0]));
   4764       num_conds = ARRAY_SIZE (opnd->cond->names);
   4765       for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
   4766 	{
   4767 	  size_t len = comment != NULL ? strlen (comment) : 0;
   4768 	  if (i == 1)
   4769 	    snprintf (comment + len, comment_size - len, "%s = %s",
   4770 		      opnd->cond->names[0], opnd->cond->names[i]);
   4771 	  else
   4772 	    snprintf (comment + len, comment_size - len, ", %s",
   4773 		      opnd->cond->names[i]);
   4774 	}
   4775       break;
   4776 
   4777     case AARCH64_OPND_ADDR_ADRP:
   4778       addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
   4779 	+ opnd->imm.value;
   4780       if (pcrel_p)
   4781 	*pcrel_p = 1;
   4782       if (address)
   4783 	*address = addr;
   4784       /* This is not necessary during the disassembling, as print_address_func
   4785 	 in the disassemble_info will take care of the printing.  But some
   4786 	 other callers may be still interested in getting the string in *STR,
   4787 	 so here we do snprintf regardless.  */
   4788       snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
   4789       break;
   4790 
   4791     case AARCH64_OPND_ADDR_PCREL14:
   4792     case AARCH64_OPND_ADDR_PCREL19:
   4793     case AARCH64_OPND_ADDR_PCREL21:
   4794     case AARCH64_OPND_ADDR_PCREL26:
   4795       addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
   4796       if (pcrel_p)
   4797 	*pcrel_p = 1;
   4798       if (address)
   4799 	*address = addr;
   4800       /* This is not necessary during the disassembling, as print_address_func
   4801 	 in the disassemble_info will take care of the printing.  But some
   4802 	 other callers may be still interested in getting the string in *STR,
   4803 	 so here we do snprintf regardless.  */
   4804       snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
   4805       break;
   4806 
   4807     case AARCH64_OPND_ADDR_SIMPLE:
   4808     case AARCH64_OPND_SIMD_ADDR_SIMPLE:
   4809     case AARCH64_OPND_SIMD_ADDR_POST:
   4810       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   4811       if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
   4812 	{
   4813 	  if (opnd->addr.offset.is_reg)
   4814 	    snprintf (buf, size, "[%s], %s",
   4815 		      style_reg (styler, name),
   4816 		      style_reg (styler, "x%d", opnd->addr.offset.regno));
   4817 	  else
   4818 	    snprintf (buf, size, "[%s], %s",
   4819 		      style_reg (styler, name),
   4820 		      style_imm (styler, "#%d", opnd->addr.offset.imm));
   4821 	}
   4822       else
   4823 	snprintf (buf, size, "[%s]", style_reg (styler, name));
   4824       break;
   4825 
   4826     case AARCH64_OPND_ADDR_REGOFF:
   4827     case AARCH64_OPND_SVE_ADDR_R:
   4828     case AARCH64_OPND_SVE_ADDR_RR:
   4829     case AARCH64_OPND_SVE_ADDR_RR_LSL1:
   4830     case AARCH64_OPND_SVE_ADDR_RR_LSL2:
   4831     case AARCH64_OPND_SVE_ADDR_RR_LSL3:
   4832     case AARCH64_OPND_SVE_ADDR_RR_LSL4:
   4833     case AARCH64_OPND_SVE_ADDR_RX:
   4834     case AARCH64_OPND_SVE_ADDR_RX_LSL1:
   4835     case AARCH64_OPND_SVE_ADDR_RX_LSL2:
   4836     case AARCH64_OPND_SVE_ADDR_RX_LSL3:
   4837     case AARCH64_OPND_SVE_ADDR_RX_LSL4:
   4838       print_register_offset_address
   4839 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   4840 	 get_offset_int_reg_name (opnd), styler);
   4841       break;
   4842 
   4843     case AARCH64_OPND_SVE_ADDR_ZX:
   4844       print_register_offset_address
   4845 	(buf, size, opnd,
   4846 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
   4847 	 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
   4848       break;
   4849 
   4850     case AARCH64_OPND_SVE_ADDR_RZ:
   4851     case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
   4852     case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
   4853     case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
   4854     case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
   4855     case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
   4856     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
   4857     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
   4858     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
   4859     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
   4860     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
   4861     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
   4862       print_register_offset_address
   4863 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   4864 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
   4865 	 styler);
   4866       break;
   4867 
   4868     case AARCH64_OPND_ADDR_SIMM7:
   4869     case AARCH64_OPND_ADDR_SIMM9:
   4870     case AARCH64_OPND_ADDR_SIMM9_2:
   4871     case AARCH64_OPND_ADDR_SIMM10:
   4872     case AARCH64_OPND_ADDR_SIMM11:
   4873     case AARCH64_OPND_ADDR_SIMM13:
   4874     case AARCH64_OPND_RCPC3_ADDR_OFFSET:
   4875     case AARCH64_OPND_ADDR_OFFSET:
   4876     case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
   4877     case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
   4878     case AARCH64_OPND_RCPC3_ADDR_POSTIND:
   4879     case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
   4880     case AARCH64_OPND_SME_ADDR_RI_U4xVL:
   4881     case AARCH64_OPND_SVE_ADDR_RI_S4x16:
   4882     case AARCH64_OPND_SVE_ADDR_RI_S4x32:
   4883     case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
   4884     case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
   4885     case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
   4886     case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
   4887     case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
   4888     case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
   4889     case AARCH64_OPND_SVE_ADDR_RI_U6:
   4890     case AARCH64_OPND_SVE_ADDR_RI_U6x2:
   4891     case AARCH64_OPND_SVE_ADDR_RI_U6x4:
   4892     case AARCH64_OPND_SVE_ADDR_RI_U6x8:
   4893       print_immediate_offset_address
   4894 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   4895 	 styler);
   4896       break;
   4897 
   4898     case AARCH64_OPND_SVE_ADDR_ZI_U5:
   4899     case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
   4900     case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
   4901     case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
   4902       print_immediate_offset_address
   4903 	(buf, size, opnd,
   4904 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
   4905 	 styler);
   4906       break;
   4907 
   4908     case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
   4909     case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
   4910     case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
   4911       print_register_offset_address
   4912 	(buf, size, opnd,
   4913 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
   4914 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
   4915 	 styler);
   4916       break;
   4917 
   4918     case AARCH64_OPND_ADDR_UIMM12:
   4919       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   4920       if (opnd->addr.offset.imm)
   4921 	snprintf (buf, size, "[%s, %s]",
   4922 		  style_reg (styler, name),
   4923 		  style_imm (styler, "#%d", opnd->addr.offset.imm));
   4924       else
   4925 	snprintf (buf, size, "[%s]", style_reg (styler, name));
   4926       break;
   4927 
   4928     case AARCH64_OPND_SYSREG:
   4929     case AARCH64_OPND_SYSREG128:
   4930       for (i = 0; aarch64_sys_regs[i].name; ++i)
   4931 	{
   4932 	  const aarch64_sys_reg *sr = aarch64_sys_regs + i;
   4933 
   4934 	  bool exact_match
   4935 	    = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
   4936 	    || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
   4937 	    && AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features);
   4938 
   4939 	  /* Try and find an exact match, But if that fails, return the first
   4940 	     partial match that was found.  */
   4941 	  if (aarch64_sys_regs[i].value == opnd->sysreg.value
   4942 	      && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
   4943 	      && ! aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags)
   4944 	      && (name == NULL || exact_match))
   4945 	    {
   4946 	      name = aarch64_sys_regs[i].name;
   4947 	      if (exact_match)
   4948 		{
   4949 		  if (notes)
   4950 		    *notes = NULL;
   4951 		  break;
   4952 		}
   4953 
   4954 	      /* If we didn't match exactly, that means the presense of a flag
   4955 		 indicates what we didn't want for this instruction.  e.g. If
   4956 		 F_REG_READ is there, that means we were looking for a write
   4957 		 register.  See aarch64_ext_sysreg.  */
   4958 	      if (aarch64_sys_regs[i].flags & F_REG_WRITE)
   4959 		*notes = _("reading from a write-only register");
   4960 	      else if (aarch64_sys_regs[i].flags & F_REG_READ)
   4961 		*notes = _("writing to a read-only register");
   4962 	    }
   4963 	}
   4964 
   4965       if (name)
   4966 	snprintf (buf, size, "%s", style_reg (styler, name));
   4967       else
   4968 	{
   4969 	  /* Implementation defined system register.  */
   4970 	  unsigned int value = opnd->sysreg.value;
   4971 	  snprintf (buf, size, "%s",
   4972 		    style_reg (styler, "s%u_%u_c%u_c%u_%u",
   4973 			       (value >> 14) & 0x3, (value >> 11) & 0x7,
   4974 			       (value >> 7) & 0xf, (value >> 3) & 0xf,
   4975 			       value & 0x7));
   4976 	}
   4977       break;
   4978 
   4979     case AARCH64_OPND_PSTATEFIELD:
   4980       for (i = 0; aarch64_pstatefields[i].name; ++i)
   4981         if (aarch64_pstatefields[i].value == opnd->pstatefield)
   4982           {
   4983             /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
   4984                SVCRZA and SVCRSMZA.  */
   4985             uint32_t flags = aarch64_pstatefields[i].flags;
   4986             if (flags & F_REG_IN_CRM
   4987                 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
   4988                     != PSTATE_DECODE_CRM (flags)))
   4989               continue;
   4990             break;
   4991           }
   4992       assert (aarch64_pstatefields[i].name);
   4993       snprintf (buf, size, "%s",
   4994 		style_reg (styler, aarch64_pstatefields[i].name));
   4995       break;
   4996 
   4997     case AARCH64_OPND_SYSREG_AT:
   4998     case AARCH64_OPND_SYSREG_DC:
   4999     case AARCH64_OPND_SYSREG_IC:
   5000     case AARCH64_OPND_SYSREG_TLBI:
   5001     case AARCH64_OPND_SYSREG_TLBIP:
   5002     case AARCH64_OPND_SYSREG_SR:
   5003       snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
   5004       break;
   5005 
   5006     case AARCH64_OPND_BARRIER:
   5007     case AARCH64_OPND_BARRIER_DSB_NXS:
   5008       {
   5009 	if (opnd->barrier->name[0] == '#')
   5010 	  snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
   5011 	else
   5012 	  snprintf (buf, size, "%s",
   5013 		    style_sub_mnem (styler, opnd->barrier->name));
   5014       }
   5015       break;
   5016 
   5017     case AARCH64_OPND_BARRIER_ISB:
   5018       /* Operand can be omitted, e.g. in DCPS1.  */
   5019       if (! optional_operand_p (opcode, idx)
   5020 	  || (opnd->barrier->value
   5021 	      != get_optional_operand_default_value (opcode)))
   5022 	snprintf (buf, size, "%s",
   5023 		  style_imm (styler, "#0x%x", opnd->barrier->value));
   5024       break;
   5025 
   5026     case AARCH64_OPND_PRFOP:
   5027       if (opnd->prfop->name != NULL)
   5028 	snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
   5029       else
   5030 	snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
   5031 					      opnd->prfop->value));
   5032       break;
   5033 
   5034     case AARCH64_OPND_RPRFMOP:
   5035       enum_value = opnd->imm.value;
   5036       if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
   5037 	  && aarch64_rprfmop_array[enum_value])
   5038 	snprintf (buf, size, "%s",
   5039 		  style_reg (styler, aarch64_rprfmop_array[enum_value]));
   5040       else
   5041 	snprintf (buf, size, "%s",
   5042 		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
   5043       break;
   5044 
   5045     case AARCH64_OPND_BARRIER_PSB:
   5046       snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
   5047       break;
   5048 
   5049     case AARCH64_OPND_X16:
   5050       snprintf (buf, size, "%s", style_reg (styler, "x16"));
   5051       break;
   5052 
   5053     case AARCH64_OPND_SME_ZT0:
   5054       snprintf (buf, size, "%s", style_reg (styler, "zt0"));
   5055       break;
   5056 
   5057     case AARCH64_OPND_SME_ZT0_INDEX:
   5058       snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
   5059 		style_imm (styler, "%d", (int) opnd->imm.value));
   5060       break;
   5061     case AARCH64_OPND_SME_ZT0_INDEX2_12:
   5062       snprintf (buf, size, "%s[%s, %s]", style_reg (styler, "zt0"),
   5063 		style_imm (styler, "%d", (int) opnd->imm.value),
   5064 		style_sub_mnem (styler, "mul vl"));
   5065       break;
   5066 
   5067     case AARCH64_OPND_SME_ZT0_LIST:
   5068       snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
   5069       break;
   5070 
   5071     case AARCH64_OPND_BARRIER_GCSB:
   5072       snprintf (buf, size, "%s", style_sub_mnem (styler, "dsync"));
   5073       break;
   5074 
   5075     case AARCH64_OPND_BTI_TARGET:
   5076       if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
   5077 	snprintf (buf, size, "%s",
   5078 		  style_sub_mnem (styler, opnd->hint_option->name));
   5079       break;
   5080 
   5081     case AARCH64_OPND_MOPS_ADDR_Rd:
   5082     case AARCH64_OPND_MOPS_ADDR_Rs:
   5083       snprintf (buf, size, "[%s]!",
   5084 		style_reg (styler,
   5085 			   get_int_reg_name (opnd->reg.regno,
   5086 					     AARCH64_OPND_QLF_X, 0)));
   5087       break;
   5088 
   5089     case AARCH64_OPND_MOPS_WB_Rn:
   5090       snprintf (buf, size, "%s!",
   5091 		style_reg (styler, get_int_reg_name (opnd->reg.regno,
   5092 						     AARCH64_OPND_QLF_X, 0)));
   5093       break;
   5094 
   5095     default:
   5096       snprintf (buf, size, "<invalid>");
   5097       break;
   5098     }
   5099 }
   5100 
   5101 #define CPENC(op0,op1,crn,crm,op2) \
   5103   ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
   5104   /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
   5105 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
   5106   /* for 3.9.10 System Instructions */
   5107 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
   5108 
   5109 #define C0  0
   5110 #define C1  1
   5111 #define C2  2
   5112 #define C3  3
   5113 #define C4  4
   5114 #define C5  5
   5115 #define C6  6
   5116 #define C7  7
   5117 #define C8  8
   5118 #define C9  9
   5119 #define C10 10
   5120 #define C11 11
   5121 #define C12 12
   5122 #define C13 13
   5123 #define C14 14
   5124 #define C15 15
   5125 
   5126 /* TODO there is one more issues need to be resolved
   5127    1. handle cpu-implementation-defined system registers.
   5128 
   5129    Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
   5130    respectively.  If neither of these are set then the register is read-write.  */
   5131 const aarch64_sys_reg aarch64_sys_regs [] =
   5132 {
   5133   #define SYSREG(name, encoding, flags, features) \
   5134     { name, encoding, flags, features },
   5135   #include "aarch64-sys-regs.def"
   5136   { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }
   5137   #undef SYSREG
   5138 };
   5139 
   5140 bool
   5141 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
   5142 {
   5143   return (reg_flags & F_DEPRECATED) != 0;
   5144 }
   5145 
   5146 bool
   5147 aarch64_sys_reg_128bit_p (const uint32_t reg_flags)
   5148 {
   5149   return (reg_flags & F_REG_128) != 0;
   5150 }
   5151 
   5152 bool
   5153 aarch64_sys_reg_alias_p (const uint32_t reg_flags)
   5154 {
   5155   return (reg_flags & F_REG_ALIAS) != 0;
   5156 }
   5157 
   5158 /* The CPENC below is fairly misleading, the fields
   5159    here are not in CPENC form. They are in op2op1 form. The fields are encoded
   5160    by ins_pstatefield, which just shifts the value by the width of the fields
   5161    in a loop. So if you CPENC them only the first value will be set, the rest
   5162    are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
   5163    value of 0b110000000001000000 (0x30040) while what you want is
   5164    0b011010 (0x1a).  */
   5165 const aarch64_sys_reg aarch64_pstatefields [] =
   5166 {
   5167   { "spsel",	0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES },
   5168   { "daifset",	0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
   5169   { "daifclr",	0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
   5170   { "pan",	0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (PAN) },
   5171   { "uao",	0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
   5172   { "ssbs",	0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (SSBS) },
   5173   { "dit",	0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_4A) },
   5174   { "tco",	0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5175   { "svcrsm",	0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
   5176 		      | F_ARCHEXT, AARCH64_FEATURE (SME) },
   5177   { "svcrza",	0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
   5178 		      | F_ARCHEXT, AARCH64_FEATURE (SME) },
   5179   { "svcrsmza",	0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
   5180 		      | F_ARCHEXT, AARCH64_FEATURE (SME) },
   5181   { "allint",	0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_8A) },
   5182   { 0,	CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES },
   5183 };
   5184 
   5185 bool
   5186 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
   5187 				 const aarch64_sys_reg *reg)
   5188 {
   5189   if (!(reg->flags & F_ARCHEXT))
   5190     return true;
   5191 
   5192   return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
   5193 }
   5194 
   5195 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
   5196 {
   5197     { "ialluis", CPENS(0,C7,C1,0), 0, AARCH64_NO_FEATURES },
   5198     { "iallu",   CPENS(0,C7,C5,0), 0, AARCH64_NO_FEATURES },
   5199     { "ivau",    CPENS (3, C7, C5, 1), F_HASXT, AARCH64_NO_FEATURES },
   5200     { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5201 };
   5202 
   5203 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
   5204 {
   5205     { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT, AARCH64_NO_FEATURES },
   5206     { "gva",	    CPENS (3, C7, C4, 3),  F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5207     { "gzva",	    CPENS (3, C7, C4, 4),  F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5208     { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT, AARCH64_NO_FEATURES },
   5209     { "igvac",      CPENS (0, C7, C6, 3),  F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5210     { "igsw",       CPENS (0, C7, C6, 4),  F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5211     { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT, AARCH64_NO_FEATURES },
   5212     { "igdvac",	    CPENS (0, C7, C6, 5),  F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5213     { "igdsw",	    CPENS (0, C7, C6, 6),  F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5214     { "cvac",       CPENS (3, C7, C10, 1), F_HASXT, AARCH64_NO_FEATURES },
   5215     { "cgvac",      CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5216     { "cgdvac",     CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5217     { "csw",	    CPENS (0, C7, C10, 2), F_HASXT, AARCH64_NO_FEATURES },
   5218     { "cgsw",       CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5219     { "cgdsw",	    CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5220     { "cvau",       CPENS (3, C7, C11, 1), F_HASXT, AARCH64_NO_FEATURES },
   5221     { "cvap",       CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
   5222     { "cgvap",      CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5223     { "cgdvap",     CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5224     { "cvadp",      CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (CVADP) },
   5225     { "cgvadp",     CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5226     { "cgdvadp",    CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5227     { "civac",      CPENS (3, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
   5228     { "cigvac",     CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5229     { "cigdvac",    CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5230     { "cisw",       CPENS (0, C7, C14, 2), F_HASXT, AARCH64_NO_FEATURES },
   5231     { "cigsw",      CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5232     { "cigdsw",     CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
   5233     { "cipapa",     CPENS (6, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
   5234     { "cigdpapa",   CPENS (6, C7, C14, 5), F_HASXT, AARCH64_NO_FEATURES },
   5235     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5236 };
   5237 
   5238 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
   5239 {
   5240     { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
   5241     { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
   5242     { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT, AARCH64_NO_FEATURES },
   5243     { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT, AARCH64_NO_FEATURES },
   5244     { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT, AARCH64_NO_FEATURES },
   5245     { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT, AARCH64_NO_FEATURES },
   5246     { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT, AARCH64_NO_FEATURES },
   5247     { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT, AARCH64_NO_FEATURES },
   5248     { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
   5249     { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
   5250     { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
   5251     { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
   5252     { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
   5253     { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
   5254     { "s1e1a",      CPENS (0, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
   5255     { "s1e2a",      CPENS (4, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
   5256     { "s1e3a",      CPENS (6, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
   5257     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5258 };
   5259 
   5260 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
   5261 {
   5262     { "rpaos",      CPENS (6, C8, C4, 3), F_HASXT, AARCH64_NO_FEATURES },
   5263     { "rpalos",     CPENS (6, C8, C4, 7), F_HASXT, AARCH64_NO_FEATURES },
   5264     { "paallos",    CPENS (6, C8, C1, 4), 0, AARCH64_NO_FEATURES },
   5265     { "paall",      CPENS (6, C8, C7, 4), 0, AARCH64_NO_FEATURES },
   5266 
   5267 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5268     { OP, CODE, FLAGS, AARCH64_NO_FEATURES }, \
   5269     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
   5270 
   5271     TLBI_XS_OP ( "vmalle1",   CPENS (0, C8, C7, 0), 0)
   5272     TLBI_XS_OP ( "vae1",      CPENS (0, C8, C7, 1), F_HASXT | F_REG_128)
   5273     TLBI_XS_OP ( "aside1",    CPENS (0, C8, C7, 2), F_HASXT )
   5274     TLBI_XS_OP ( "vaae1",     CPENS (0, C8, C7, 3), F_HASXT | F_REG_128)
   5275     TLBI_XS_OP ( "vmalle1is", CPENS (0, C8, C3, 0), 0)
   5276     TLBI_XS_OP ( "vae1is",    CPENS (0, C8, C3, 1), F_HASXT | F_REG_128)
   5277     TLBI_XS_OP ( "aside1is",  CPENS (0, C8, C3, 2), F_HASXT )
   5278     TLBI_XS_OP ( "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT | F_REG_128)
   5279     TLBI_XS_OP ( "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT | F_REG_128)
   5280     TLBI_XS_OP ( "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT | F_REG_128)
   5281     TLBI_XS_OP ( "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT | F_REG_128)
   5282     TLBI_XS_OP ( "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT | F_REG_128)
   5283     TLBI_XS_OP ( "vae2",      CPENS (4, C8, C7, 1), F_HASXT | F_REG_128)
   5284     TLBI_XS_OP ( "vae2is",    CPENS (4, C8, C3, 1), F_HASXT | F_REG_128)
   5285     TLBI_XS_OP ( "vmalls12e1",CPENS (4, C8, C7, 6), 0)
   5286     TLBI_XS_OP ( "vmalls12e1is",CPENS(4,C8, C3, 6), 0)
   5287     TLBI_XS_OP ( "vae3",      CPENS (6, C8, C7, 1), F_HASXT | F_REG_128)
   5288     TLBI_XS_OP ( "vae3is",    CPENS (6, C8, C3, 1), F_HASXT | F_REG_128)
   5289     TLBI_XS_OP ( "alle2",     CPENS (4, C8, C7, 0), 0)
   5290     TLBI_XS_OP ( "alle2is",   CPENS (4, C8, C3, 0), 0)
   5291     TLBI_XS_OP ( "alle1",     CPENS (4, C8, C7, 4), 0)
   5292     TLBI_XS_OP ( "alle1is",   CPENS (4, C8, C3, 4), 0)
   5293     TLBI_XS_OP ( "alle3",     CPENS (6, C8, C7, 0), 0)
   5294     TLBI_XS_OP ( "alle3is",   CPENS (6, C8, C3, 0), 0)
   5295     TLBI_XS_OP ( "vale1is",   CPENS (0, C8, C3, 5), F_HASXT | F_REG_128)
   5296     TLBI_XS_OP ( "vale2is",   CPENS (4, C8, C3, 5), F_HASXT | F_REG_128)
   5297     TLBI_XS_OP ( "vale3is",   CPENS (6, C8, C3, 5), F_HASXT | F_REG_128)
   5298     TLBI_XS_OP ( "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT | F_REG_128)
   5299     TLBI_XS_OP ( "vale1",     CPENS (0, C8, C7, 5), F_HASXT | F_REG_128)
   5300     TLBI_XS_OP ( "vale2",     CPENS (4, C8, C7, 5), F_HASXT | F_REG_128)
   5301     TLBI_XS_OP ( "vale3",     CPENS (6, C8, C7, 5), F_HASXT | F_REG_128)
   5302     TLBI_XS_OP ( "vaale1",    CPENS (0, C8, C7, 7), F_HASXT | F_REG_128)
   5303 
   5304 #undef TLBI_XS_OP
   5305 #define TLBI_XS_OP(OP, CODE, FLAGS) \
   5306     { OP, CODE, FLAGS | F_ARCHEXT, AARCH64_FEATURE (V8_4A) }, \
   5307     { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
   5308 
   5309     TLBI_XS_OP ( "vmalle1os",    CPENS (0, C8, C1, 0), 0 )
   5310     TLBI_XS_OP ( "vae1os",       CPENS (0, C8, C1, 1), F_HASXT | F_REG_128 )
   5311     TLBI_XS_OP ( "aside1os",     CPENS (0, C8, C1, 2), F_HASXT )
   5312     TLBI_XS_OP ( "vaae1os",      CPENS (0, C8, C1, 3), F_HASXT | F_REG_128 )
   5313     TLBI_XS_OP ( "vale1os",      CPENS (0, C8, C1, 5), F_HASXT | F_REG_128 )
   5314     TLBI_XS_OP ( "vaale1os",     CPENS (0, C8, C1, 7), F_HASXT | F_REG_128 )
   5315     TLBI_XS_OP ( "ipas2e1os",    CPENS (4, C8, C4, 0), F_HASXT | F_REG_128 )
   5316     TLBI_XS_OP ( "ipas2le1os",   CPENS (4, C8, C4, 4), F_HASXT | F_REG_128 )
   5317     TLBI_XS_OP ( "vae2os",       CPENS (4, C8, C1, 1), F_HASXT | F_REG_128 )
   5318     TLBI_XS_OP ( "vale2os",      CPENS (4, C8, C1, 5), F_HASXT | F_REG_128 )
   5319     TLBI_XS_OP ( "vmalls12e1os", CPENS (4, C8, C1, 6), 0 )
   5320     TLBI_XS_OP ( "vae3os",       CPENS (6, C8, C1, 1), F_HASXT | F_REG_128 )
   5321     TLBI_XS_OP ( "vale3os",      CPENS (6, C8, C1, 5), F_HASXT | F_REG_128 )
   5322     TLBI_XS_OP ( "alle2os",      CPENS (4, C8, C1, 0), 0 )
   5323     TLBI_XS_OP ( "alle1os",      CPENS (4, C8, C1, 4), 0 )
   5324     TLBI_XS_OP ( "alle3os",      CPENS (6, C8, C1, 0), 0 )
   5325 
   5326     TLBI_XS_OP ( "rvae1",      CPENS (0, C8, C6, 1), F_HASXT | F_REG_128 )
   5327     TLBI_XS_OP ( "rvaae1",     CPENS (0, C8, C6, 3), F_HASXT | F_REG_128 )
   5328     TLBI_XS_OP ( "rvale1",     CPENS (0, C8, C6, 5), F_HASXT | F_REG_128 )
   5329     TLBI_XS_OP ( "rvaale1",    CPENS (0, C8, C6, 7), F_HASXT | F_REG_128 )
   5330     TLBI_XS_OP ( "rvae1is",    CPENS (0, C8, C2, 1), F_HASXT | F_REG_128 )
   5331     TLBI_XS_OP ( "rvaae1is",   CPENS (0, C8, C2, 3), F_HASXT | F_REG_128 )
   5332     TLBI_XS_OP ( "rvale1is",   CPENS (0, C8, C2, 5), F_HASXT | F_REG_128 )
   5333     TLBI_XS_OP ( "rvaale1is",  CPENS (0, C8, C2, 7), F_HASXT | F_REG_128 )
   5334     TLBI_XS_OP ( "rvae1os",    CPENS (0, C8, C5, 1), F_HASXT | F_REG_128 )
   5335     TLBI_XS_OP ( "rvaae1os",   CPENS (0, C8, C5, 3), F_HASXT | F_REG_128 )
   5336     TLBI_XS_OP ( "rvale1os",   CPENS (0, C8, C5, 5), F_HASXT | F_REG_128 )
   5337     TLBI_XS_OP ( "rvaale1os",  CPENS (0, C8, C5, 7), F_HASXT | F_REG_128 )
   5338     TLBI_XS_OP ( "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_REG_128 )
   5339     TLBI_XS_OP ( "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_REG_128 )
   5340     TLBI_XS_OP ( "ripas2e1",   CPENS (4, C8, C4, 2), F_HASXT | F_REG_128 )
   5341     TLBI_XS_OP ( "ripas2le1",  CPENS (4, C8, C4, 6), F_HASXT | F_REG_128 )
   5342     TLBI_XS_OP ( "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_REG_128 )
   5343     TLBI_XS_OP ( "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_REG_128 )
   5344     TLBI_XS_OP ( "rvae2",      CPENS (4, C8, C6, 1), F_HASXT | F_REG_128 )
   5345     TLBI_XS_OP ( "rvale2",     CPENS (4, C8, C6, 5), F_HASXT | F_REG_128 )
   5346     TLBI_XS_OP ( "rvae2is",    CPENS (4, C8, C2, 1), F_HASXT | F_REG_128 )
   5347     TLBI_XS_OP ( "rvale2is",   CPENS (4, C8, C2, 5), F_HASXT | F_REG_128 )
   5348     TLBI_XS_OP ( "rvae2os",    CPENS (4, C8, C5, 1), F_HASXT | F_REG_128 )
   5349     TLBI_XS_OP ( "rvale2os",   CPENS (4, C8, C5, 5), F_HASXT | F_REG_128 )
   5350     TLBI_XS_OP ( "rvae3",      CPENS (6, C8, C6, 1), F_HASXT | F_REG_128 )
   5351     TLBI_XS_OP ( "rvale3",     CPENS (6, C8, C6, 5), F_HASXT | F_REG_128 )
   5352     TLBI_XS_OP ( "rvae3is",    CPENS (6, C8, C2, 1), F_HASXT | F_REG_128 )
   5353     TLBI_XS_OP ( "rvale3is",   CPENS (6, C8, C2, 5), F_HASXT | F_REG_128 )
   5354     TLBI_XS_OP ( "rvae3os",    CPENS (6, C8, C5, 1), F_HASXT | F_REG_128 )
   5355     TLBI_XS_OP ( "rvale3os",   CPENS (6, C8, C5, 5), F_HASXT | F_REG_128 )
   5356 
   5357 #undef TLBI_XS_OP
   5358 
   5359     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5360 };
   5361 
   5362 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
   5363 {
   5364     /* RCTX is somewhat unique in a way that it has different values
   5365        (op2) based on the instruction in which it is used (cfp/dvp/cpp).
   5366        Thus op2 is masked out and instead encoded directly in the
   5367        aarch64_opcode_table entries for the respective instructions.  */
   5368     { "rctx",   CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE, AARCH64_FEATURE (PREDRES) }, /* WO */
   5369     { 0,       CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
   5370 };
   5371 
   5372 bool
   5373 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
   5374 {
   5375   return (sys_ins_reg->flags & F_HASXT) != 0;
   5376 }
   5377 
   5378 extern bool
   5379 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
   5380 				 const char *reg_name,
   5381 				 uint32_t reg_flags,
   5382 				 const aarch64_feature_set *reg_features)
   5383 {
   5384   /* Armv8-R has no EL3.  */
   5385   if (AARCH64_CPU_HAS_FEATURE (features, V8R))
   5386     {
   5387       const char *suffix = strrchr (reg_name, '_');
   5388       if (suffix && !strcmp (suffix, "_el3"))
   5389 	return false;
   5390     }
   5391 
   5392   if (!(reg_flags & F_ARCHEXT))
   5393     return true;
   5394 
   5395   return AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features);
   5396 }
   5397 
   5398 #undef C0
   5399 #undef C1
   5400 #undef C2
   5401 #undef C3
   5402 #undef C4
   5403 #undef C5
   5404 #undef C6
   5405 #undef C7
   5406 #undef C8
   5407 #undef C9
   5408 #undef C10
   5409 #undef C11
   5410 #undef C12
   5411 #undef C13
   5412 #undef C14
   5413 #undef C15
   5414 
   5415 #define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
   5416 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
   5417 
   5418 static enum err_type
   5419 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
   5420 	      const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
   5421 	      bool encoding ATTRIBUTE_UNUSED,
   5422 	      aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
   5423 	      aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
   5424 {
   5425   int t  = BITS (insn, 4, 0);
   5426   int n  = BITS (insn, 9, 5);
   5427   int t2 = BITS (insn, 14, 10);
   5428 
   5429   if (BIT (insn, 23))
   5430     {
   5431       /* Write back enabled.  */
   5432       if ((t == n || t2 == n) && n != 31)
   5433 	return ERR_UND;
   5434     }
   5435 
   5436   if (BIT (insn, 22))
   5437     {
   5438       /* Load */
   5439       if (t == t2)
   5440 	return ERR_UND;
   5441     }
   5442 
   5443   return ERR_OK;
   5444 }
   5445 
   5446 /* Verifier for vector by element 3 operands functions where the
   5447    conditions `if sz:L == 11 then UNDEFINED` holds.  */
   5448 
   5449 static enum err_type
   5450 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
   5451 		bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
   5452 		aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
   5453 		aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
   5454 {
   5455   const aarch64_insn undef_pattern = 0x3;
   5456   aarch64_insn value;
   5457 
   5458   assert (inst->opcode);
   5459   assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
   5460   value = encoding ? inst->value : insn;
   5461   assert (value);
   5462 
   5463   if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
   5464     return ERR_UND;
   5465 
   5466   return ERR_OK;
   5467 }
   5468 
   5469 /* Check an instruction that takes three register operands and that
   5470    requires the register numbers to be distinct from one another.  */
   5471 
   5472 static enum err_type
   5473 verify_three_different_regs (const struct aarch64_inst *inst,
   5474 			     const aarch64_insn insn ATTRIBUTE_UNUSED,
   5475 			     bfd_vma pc ATTRIBUTE_UNUSED,
   5476 			     bool encoding ATTRIBUTE_UNUSED,
   5477 			     aarch64_operand_error *mismatch_detail
   5478 			       ATTRIBUTE_UNUSED,
   5479 			     aarch64_instr_sequence *insn_sequence
   5480 			       ATTRIBUTE_UNUSED)
   5481 {
   5482   int rd, rs, rn;
   5483 
   5484   rd = inst->operands[0].reg.regno;
   5485   rs = inst->operands[1].reg.regno;
   5486   rn = inst->operands[2].reg.regno;
   5487   if (rd == rs || rd == rn || rs == rn)
   5488     {
   5489       mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5490       mismatch_detail->error
   5491 	= _("the three register operands must be distinct from one another");
   5492       mismatch_detail->index = -1;
   5493       return ERR_UND;
   5494     }
   5495 
   5496   return ERR_OK;
   5497 }
   5498 
   5499 /* Add INST to the end of INSN_SEQUENCE.  */
   5500 
   5501 static void
   5502 add_insn_to_sequence (const struct aarch64_inst *inst,
   5503 		      aarch64_instr_sequence *insn_sequence)
   5504 {
   5505   insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
   5506 }
   5507 
   5508 /* Initialize an instruction sequence insn_sequence with the instruction INST.
   5509    If INST is NULL the given insn_sequence is cleared and the sequence is left
   5510    uninitialized.  */
   5511 
   5512 void
   5513 init_insn_sequence (const struct aarch64_inst *inst,
   5514 		    aarch64_instr_sequence *insn_sequence)
   5515 {
   5516   int num_req_entries = 0;
   5517 
   5518   if (insn_sequence->instr)
   5519     {
   5520       XDELETE (insn_sequence->instr);
   5521       insn_sequence->instr = NULL;
   5522     }
   5523 
   5524   /* Handle all the cases here.  May need to think of something smarter than
   5525      a giant if/else chain if this grows.  At that time, a lookup table may be
   5526      best.  */
   5527   if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
   5528     num_req_entries = 1;
   5529   if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
   5530     num_req_entries = 2;
   5531 
   5532   insn_sequence->num_added_insns = 0;
   5533   insn_sequence->num_allocated_insns = num_req_entries;
   5534 
   5535   if (num_req_entries != 0)
   5536     {
   5537       insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
   5538       add_insn_to_sequence (inst, insn_sequence);
   5539     }
   5540 }
   5541 
   5542 /* Subroutine of verify_constraints.  Check whether the instruction
   5543    is part of a MOPS P/M/E sequence and, if so, whether sequencing
   5544    expectations are met.  Return true if the check passes, otherwise
   5545    describe the problem in MISMATCH_DETAIL.
   5546 
   5547    IS_NEW_SECTION is true if INST is assumed to start a new section.
   5548    The other arguments are as for verify_constraints.  */
   5549 
   5550 static bool
   5551 verify_mops_pme_sequence (const struct aarch64_inst *inst,
   5552 			  bool is_new_section,
   5553 			  aarch64_operand_error *mismatch_detail,
   5554 			  aarch64_instr_sequence *insn_sequence)
   5555 {
   5556   const struct aarch64_opcode *opcode;
   5557   const struct aarch64_inst *prev_insn;
   5558   int i;
   5559 
   5560   opcode = inst->opcode;
   5561   if (insn_sequence->instr)
   5562     prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
   5563   else
   5564     prev_insn = NULL;
   5565 
   5566   if (prev_insn
   5567       && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
   5568       && prev_insn->opcode != opcode - 1)
   5569     {
   5570       mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
   5571       mismatch_detail->error = NULL;
   5572       mismatch_detail->index = -1;
   5573       mismatch_detail->data[0].s = prev_insn->opcode[1].name;
   5574       mismatch_detail->data[1].s = prev_insn->opcode->name;
   5575       mismatch_detail->non_fatal = true;
   5576       return false;
   5577     }
   5578 
   5579   if (opcode->constraints & C_SCAN_MOPS_PME)
   5580     {
   5581       if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
   5582 	{
   5583 	  mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
   5584 	  mismatch_detail->error = NULL;
   5585 	  mismatch_detail->index = -1;
   5586 	  mismatch_detail->data[0].s = opcode->name;
   5587 	  mismatch_detail->data[1].s = opcode[-1].name;
   5588 	  mismatch_detail->non_fatal = true;
   5589 	  return false;
   5590 	}
   5591 
   5592       for (i = 0; i < 3; ++i)
   5593 	/* There's no specific requirement for the data register to be
   5594 	   the same between consecutive SET* instructions.  */
   5595 	if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
   5596 	     || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
   5597 	     || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
   5598 	    && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
   5599 	  {
   5600 	    mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5601 	    if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
   5602 	      mismatch_detail->error = _("destination register differs from "
   5603 					 "preceding instruction");
   5604 	    else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
   5605 	      mismatch_detail->error = _("source register differs from "
   5606 					 "preceding instruction");
   5607 	    else
   5608 	      mismatch_detail->error = _("size register differs from "
   5609 					 "preceding instruction");
   5610 	    mismatch_detail->index = i;
   5611 	    mismatch_detail->non_fatal = true;
   5612 	    return false;
   5613 	  }
   5614     }
   5615 
   5616   return true;
   5617 }
   5618 
   5619 /*  This function verifies that the instruction INST adheres to its specified
   5620     constraints.  If it does then ERR_OK is returned, if not then ERR_VFI is
   5621     returned and MISMATCH_DETAIL contains the reason why verification failed.
   5622 
   5623     The function is called both during assembly and disassembly.  If assembling
   5624     then ENCODING will be TRUE, else FALSE.  If dissassembling PC will be set
   5625     and will contain the PC of the current instruction w.r.t to the section.
   5626 
   5627     If ENCODING and PC=0 then you are at a start of a section.  The constraints
   5628     are verified against the given state insn_sequence which is updated as it
   5629     transitions through the verification.  */
   5630 
   5631 enum err_type
   5632 verify_constraints (const struct aarch64_inst *inst,
   5633 		    const aarch64_insn insn ATTRIBUTE_UNUSED,
   5634 		    bfd_vma pc,
   5635 		    bool encoding,
   5636 		    aarch64_operand_error *mismatch_detail,
   5637 		    aarch64_instr_sequence *insn_sequence)
   5638 {
   5639   assert (inst);
   5640   assert (inst->opcode);
   5641 
   5642   const struct aarch64_opcode *opcode = inst->opcode;
   5643   if (!opcode->constraints && !insn_sequence->instr)
   5644     return ERR_OK;
   5645 
   5646   assert (insn_sequence);
   5647 
   5648   enum err_type res = ERR_OK;
   5649 
   5650   /* This instruction puts a constraint on the insn_sequence.  */
   5651   if (opcode->flags & F_SCAN)
   5652     {
   5653       if (insn_sequence->instr)
   5654 	{
   5655 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5656 	  mismatch_detail->error = _("instruction opens new dependency "
   5657 				     "sequence without ending previous one");
   5658 	  mismatch_detail->index = -1;
   5659 	  mismatch_detail->non_fatal = true;
   5660 	  res = ERR_VFI;
   5661 	}
   5662 
   5663       init_insn_sequence (inst, insn_sequence);
   5664       return res;
   5665     }
   5666 
   5667   bool is_new_section = (!encoding && pc == 0);
   5668   if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
   5669 				 insn_sequence))
   5670     {
   5671       res = ERR_VFI;
   5672       if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
   5673 	init_insn_sequence (NULL, insn_sequence);
   5674     }
   5675 
   5676   /* Verify constraints on an existing sequence.  */
   5677   if (insn_sequence->instr)
   5678     {
   5679       const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
   5680       /* If we're decoding and we hit PC=0 with an open sequence then we haven't
   5681 	 closed a previous one that we should have.  */
   5682       if (is_new_section && res == ERR_OK)
   5683 	{
   5684 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5685 	  mismatch_detail->error = _("previous `movprfx' sequence not closed");
   5686 	  mismatch_detail->index = -1;
   5687 	  mismatch_detail->non_fatal = true;
   5688 	  res = ERR_VFI;
   5689 	  /* Reset the sequence.  */
   5690 	  init_insn_sequence (NULL, insn_sequence);
   5691 	  return res;
   5692 	}
   5693 
   5694       /* Validate C_SCAN_MOVPRFX constraints.  Move this to a lookup table.  */
   5695       if (inst_opcode->constraints & C_SCAN_MOVPRFX)
   5696 	{
   5697 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
   5698 	     instruction for better error messages.  */
   5699 	  if (!opcode->avariant
   5700 	      || (!AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
   5701 		  && !AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2)
   5702 		  && !AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2p1)))
   5703 	    {
   5704 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5705 	      mismatch_detail->error = _("SVE instruction expected after "
   5706 					 "`movprfx'");
   5707 	      mismatch_detail->index = -1;
   5708 	      mismatch_detail->non_fatal = true;
   5709 	      res = ERR_VFI;
   5710 	      goto done;
   5711 	    }
   5712 
   5713 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
   5714 	     instruction that is allowed to be used with a MOVPRFX.  */
   5715 	  if (!(opcode->constraints & C_SCAN_MOVPRFX))
   5716 	    {
   5717 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5718 	      mismatch_detail->error = _("SVE `movprfx' compatible instruction "
   5719 					 "expected");
   5720 	      mismatch_detail->index = -1;
   5721 	      mismatch_detail->non_fatal = true;
   5722 	      res = ERR_VFI;
   5723 	      goto done;
   5724 	    }
   5725 
   5726 	  /* Next check for usage of the predicate register.  */
   5727 	  aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
   5728 	  aarch64_opnd_info blk_pred, inst_pred;
   5729 	  memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
   5730 	  memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
   5731 	  bool predicated = false;
   5732 	  assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
   5733 
   5734 	  /* Determine if the movprfx instruction used is predicated or not.  */
   5735 	  if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
   5736 	    {
   5737 	      predicated = true;
   5738 	      blk_pred = insn_sequence->instr->operands[1];
   5739 	    }
   5740 
   5741 	  unsigned char max_elem_size = 0;
   5742 	  unsigned char current_elem_size;
   5743 	  int num_op_used = 0, last_op_usage = 0;
   5744 	  int i, inst_pred_idx = -1;
   5745 	  int num_ops = aarch64_num_of_operands (opcode);
   5746 	  for (i = 0; i < num_ops; i++)
   5747 	    {
   5748 	      aarch64_opnd_info inst_op = inst->operands[i];
   5749 	      switch (inst_op.type)
   5750 		{
   5751 		  case AARCH64_OPND_SVE_Zd:
   5752 		  case AARCH64_OPND_SVE_Zm_5:
   5753 		  case AARCH64_OPND_SVE_Zm_16:
   5754 		  case AARCH64_OPND_SVE_Zn:
   5755 		  case AARCH64_OPND_SVE_Zt:
   5756 		  case AARCH64_OPND_SVE_Vm:
   5757 		  case AARCH64_OPND_SVE_Vn:
   5758 		  case AARCH64_OPND_Va:
   5759 		  case AARCH64_OPND_Vn:
   5760 		  case AARCH64_OPND_Vm:
   5761 		  case AARCH64_OPND_Sn:
   5762 		  case AARCH64_OPND_Sm:
   5763 		    if (inst_op.reg.regno == blk_dest.reg.regno)
   5764 		      {
   5765 			num_op_used++;
   5766 			last_op_usage = i;
   5767 		      }
   5768 		    current_elem_size
   5769 		      = aarch64_get_qualifier_esize (inst_op.qualifier);
   5770 		    if (current_elem_size > max_elem_size)
   5771 		      max_elem_size = current_elem_size;
   5772 		    break;
   5773 		  case AARCH64_OPND_SVE_Pd:
   5774 		  case AARCH64_OPND_SVE_Pg3:
   5775 		  case AARCH64_OPND_SVE_Pg4_5:
   5776 		  case AARCH64_OPND_SVE_Pg4_10:
   5777 		  case AARCH64_OPND_SVE_Pg4_16:
   5778 		  case AARCH64_OPND_SVE_Pm:
   5779 		  case AARCH64_OPND_SVE_Pn:
   5780 		  case AARCH64_OPND_SVE_Pt:
   5781 		  case AARCH64_OPND_SME_Pm:
   5782 		    inst_pred = inst_op;
   5783 		    inst_pred_idx = i;
   5784 		    break;
   5785 		  default:
   5786 		    break;
   5787 		}
   5788 	    }
   5789 
   5790 	   assert (max_elem_size != 0);
   5791 	   aarch64_opnd_info inst_dest = inst->operands[0];
   5792 	   /* Determine the size that should be used to compare against the
   5793 	      movprfx size.  */
   5794 	   current_elem_size
   5795 	     = opcode->constraints & C_MAX_ELEM
   5796 	       ? max_elem_size
   5797 	       : aarch64_get_qualifier_esize (inst_dest.qualifier);
   5798 
   5799 	  /* If movprfx is predicated do some extra checks.  */
   5800 	  if (predicated)
   5801 	    {
   5802 	      /* The instruction must be predicated.  */
   5803 	      if (inst_pred_idx < 0)
   5804 		{
   5805 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5806 		  mismatch_detail->error = _("predicated instruction expected "
   5807 					     "after `movprfx'");
   5808 		  mismatch_detail->index = -1;
   5809 		  mismatch_detail->non_fatal = true;
   5810 		  res = ERR_VFI;
   5811 		  goto done;
   5812 		}
   5813 
   5814 	      /* The instruction must have a merging predicate.  */
   5815 	      if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
   5816 		{
   5817 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5818 		  mismatch_detail->error = _("merging predicate expected due "
   5819 					     "to preceding `movprfx'");
   5820 		  mismatch_detail->index = inst_pred_idx;
   5821 		  mismatch_detail->non_fatal = true;
   5822 		  res = ERR_VFI;
   5823 		  goto done;
   5824 		}
   5825 
   5826 	      /* The same register must be used in instruction.  */
   5827 	      if (blk_pred.reg.regno != inst_pred.reg.regno)
   5828 		{
   5829 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5830 		  mismatch_detail->error = _("predicate register differs "
   5831 					     "from that in preceding "
   5832 					     "`movprfx'");
   5833 		  mismatch_detail->index = inst_pred_idx;
   5834 		  mismatch_detail->non_fatal = true;
   5835 		  res = ERR_VFI;
   5836 		  goto done;
   5837 		}
   5838 	    }
   5839 
   5840 	  /* Destructive operations by definition must allow one usage of the
   5841 	     same register.  */
   5842 	  int allowed_usage
   5843 	    = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
   5844 
   5845 	  /* Operand is not used at all.  */
   5846 	  if (num_op_used == 0)
   5847 	    {
   5848 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5849 	      mismatch_detail->error = _("output register of preceding "
   5850 					 "`movprfx' not used in current "
   5851 					 "instruction");
   5852 	      mismatch_detail->index = 0;
   5853 	      mismatch_detail->non_fatal = true;
   5854 	      res = ERR_VFI;
   5855 	      goto done;
   5856 	    }
   5857 
   5858 	  /* We now know it's used, now determine exactly where it's used.  */
   5859 	  if (blk_dest.reg.regno != inst_dest.reg.regno)
   5860 	    {
   5861 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5862 	      mismatch_detail->error = _("output register of preceding "
   5863 					 "`movprfx' expected as output");
   5864 	      mismatch_detail->index = 0;
   5865 	      mismatch_detail->non_fatal = true;
   5866 	      res = ERR_VFI;
   5867 	      goto done;
   5868 	    }
   5869 
   5870 	  /* Operand used more than allowed for the specific opcode type.  */
   5871 	  if (num_op_used > allowed_usage)
   5872 	    {
   5873 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5874 	      mismatch_detail->error = _("output register of preceding "
   5875 					 "`movprfx' used as input");
   5876 	      mismatch_detail->index = last_op_usage;
   5877 	      mismatch_detail->non_fatal = true;
   5878 	      res = ERR_VFI;
   5879 	      goto done;
   5880 	    }
   5881 
   5882 	  /* Now the only thing left is the qualifiers checks.  The register
   5883 	     must have the same maximum element size.  */
   5884 	  if (inst_dest.qualifier
   5885 	      && blk_dest.qualifier
   5886 	      && current_elem_size
   5887 		 != aarch64_get_qualifier_esize (blk_dest.qualifier))
   5888 	    {
   5889 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5890 	      mismatch_detail->error = _("register size not compatible with "
   5891 					 "previous `movprfx'");
   5892 	      mismatch_detail->index = 0;
   5893 	      mismatch_detail->non_fatal = true;
   5894 	      res = ERR_VFI;
   5895 	      goto done;
   5896 	    }
   5897 	}
   5898 
   5899     done:
   5900       if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
   5901 	/* We've checked the last instruction in the sequence and so
   5902 	   don't need the sequence any more.  */
   5903 	init_insn_sequence (NULL, insn_sequence);
   5904       else
   5905 	add_insn_to_sequence (inst, insn_sequence);
   5906     }
   5907 
   5908   return res;
   5909 }
   5910 
   5911 
   5912 /* Return true if VALUE cannot be moved into an SVE register using DUP
   5913    (with any element size, not just ESIZE) and if using DUPM would
   5914    therefore be OK.  ESIZE is the number of bytes in the immediate.  */
   5915 
   5916 bool
   5917 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
   5918 {
   5919   int64_t svalue = uvalue;
   5920   uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
   5921 
   5922   if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
   5923     return false;
   5924   if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
   5925     {
   5926       svalue = (int32_t) uvalue;
   5927       if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
   5928 	{
   5929 	  svalue = (int16_t) uvalue;
   5930 	  if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
   5931 	    return false;
   5932 	}
   5933     }
   5934   if ((svalue & 0xff) == 0)
   5935     svalue /= 256;
   5936   return svalue < -128 || svalue >= 128;
   5937 }
   5938 
   5939 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
   5940    supports the instruction described by INST.  */
   5941 
   5942 bool
   5943 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant,
   5944 			     aarch64_inst *inst)
   5945 {
   5946   if (!inst->opcode->avariant
   5947       || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
   5948     return false;
   5949 
   5950   if (inst->opcode->iclass == sme_fp_sd
   5951       && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
   5952       && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64))
   5953     return false;
   5954 
   5955   if (inst->opcode->iclass == sme_int_sd
   5956       && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
   5957       && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64))
   5958     return false;
   5959 
   5960   return true;
   5961 }
   5962 
   5963 /* Include the opcode description table as well as the operand description
   5964    table.  */
   5965 #define VERIFIER(x) verify_##x
   5966 #include "aarch64-tbl.h"
   5967