Home | History | Annotate | Line # | Download | only in opcodes
aarch64-opc.c revision 1.9
      1 /* aarch64-opc.c -- AArch64 opcode support.
      2    Copyright (C) 2009-2020 Free Software Foundation, Inc.
      3    Contributed by ARM Ltd.
      4 
      5    This file is part of the GNU opcodes library.
      6 
      7    This library is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3, or (at your option)
     10    any later version.
     11 
     12    It is distributed in the hope that it will be useful, but WITHOUT
     13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
     15    License for more details.
     16 
     17    You should have received a copy of the GNU General Public License
     18    along with this program; see the file COPYING3. If not,
     19    see <http://www.gnu.org/licenses/>.  */
     20 
     21 #include "sysdep.h"
     22 #include <assert.h>
     23 #include <stdlib.h>
     24 #include <stdio.h>
     25 #include "bfd_stdint.h"
     26 #include <stdarg.h>
     27 #include <inttypes.h>
     28 
     29 #include "opintl.h"
     30 #include "libiberty.h"
     31 
     32 #include "aarch64-opc.h"
     33 
     34 #ifdef DEBUG_AARCH64
     35 int debug_dump = FALSE;
     36 #endif /* DEBUG_AARCH64 */
     37 
     38 /* The enumeration strings associated with each value of a 5-bit SVE
     39    pattern operand.  A null entry indicates a reserved meaning.  */
     40 const char *const aarch64_sve_pattern_array[32] = {
     41   /* 0-7.  */
     42   "pow2",
     43   "vl1",
     44   "vl2",
     45   "vl3",
     46   "vl4",
     47   "vl5",
     48   "vl6",
     49   "vl7",
     50   /* 8-15.  */
     51   "vl8",
     52   "vl16",
     53   "vl32",
     54   "vl64",
     55   "vl128",
     56   "vl256",
     57   0,
     58   0,
     59   /* 16-23.  */
     60   0,
     61   0,
     62   0,
     63   0,
     64   0,
     65   0,
     66   0,
     67   0,
     68   /* 24-31.  */
     69   0,
     70   0,
     71   0,
     72   0,
     73   0,
     74   "mul4",
     75   "mul3",
     76   "all"
     77 };
     78 
     79 /* The enumeration strings associated with each value of a 4-bit SVE
     80    prefetch operand.  A null entry indicates a reserved meaning.  */
     81 const char *const aarch64_sve_prfop_array[16] = {
     82   /* 0-7.  */
     83   "pldl1keep",
     84   "pldl1strm",
     85   "pldl2keep",
     86   "pldl2strm",
     87   "pldl3keep",
     88   "pldl3strm",
     89   0,
     90   0,
     91   /* 8-15.  */
     92   "pstl1keep",
     93   "pstl1strm",
     94   "pstl2keep",
     95   "pstl2strm",
     96   "pstl3keep",
     97   "pstl3strm",
     98   0,
     99   0
    100 };
    101 
    102 /* Helper functions to determine which operand to be used to encode/decode
    103    the size:Q fields for AdvSIMD instructions.  */
    104 
    105 static inline bfd_boolean
    106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
    107 {
    108   return ((qualifier >= AARCH64_OPND_QLF_V_8B
    109 	  && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
    110 	  : FALSE);
    111 }
    112 
    113 static inline bfd_boolean
    114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
    115 {
    116   return ((qualifier >= AARCH64_OPND_QLF_S_B
    117 	  && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
    118 	  : FALSE);
    119 }
    120 
    121 enum data_pattern
    122 {
    123   DP_UNKNOWN,
    124   DP_VECTOR_3SAME,
    125   DP_VECTOR_LONG,
    126   DP_VECTOR_WIDE,
    127   DP_VECTOR_ACROSS_LANES,
    128 };
    129 
    130 static const char significant_operand_index [] =
    131 {
    132   0,	/* DP_UNKNOWN, by default using operand 0.  */
    133   0,	/* DP_VECTOR_3SAME */
    134   1,	/* DP_VECTOR_LONG */
    135   2,	/* DP_VECTOR_WIDE */
    136   1,	/* DP_VECTOR_ACROSS_LANES */
    137 };
    138 
    139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
    140    the data pattern.
    141    N.B. QUALIFIERS is a possible sequence of qualifiers each of which
    142    corresponds to one of a sequence of operands.  */
    143 
    144 static enum data_pattern
    145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
    146 {
    147   if (vector_qualifier_p (qualifiers[0]) == TRUE)
    148     {
    149       /* e.g. v.4s, v.4s, v.4s
    150 	   or v.4h, v.4h, v.h[3].  */
    151       if (qualifiers[0] == qualifiers[1]
    152 	  && vector_qualifier_p (qualifiers[2]) == TRUE
    153 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    154 	      == aarch64_get_qualifier_esize (qualifiers[1]))
    155 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    156 	      == aarch64_get_qualifier_esize (qualifiers[2])))
    157 	return DP_VECTOR_3SAME;
    158       /* e.g. v.8h, v.8b, v.8b.
    159            or v.4s, v.4h, v.h[2].
    160 	   or v.8h, v.16b.  */
    161       if (vector_qualifier_p (qualifiers[1]) == TRUE
    162 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
    163 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    164 	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
    165 	return DP_VECTOR_LONG;
    166       /* e.g. v.8h, v.8h, v.8b.  */
    167       if (qualifiers[0] == qualifiers[1]
    168 	  && vector_qualifier_p (qualifiers[2]) == TRUE
    169 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
    170 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    171 	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
    172 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    173 	      == aarch64_get_qualifier_esize (qualifiers[1])))
    174 	return DP_VECTOR_WIDE;
    175     }
    176   else if (fp_qualifier_p (qualifiers[0]) == TRUE)
    177     {
    178       /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
    179       if (vector_qualifier_p (qualifiers[1]) == TRUE
    180 	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
    181 	return DP_VECTOR_ACROSS_LANES;
    182     }
    183 
    184   return DP_UNKNOWN;
    185 }
    186 
    187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
    188    the AdvSIMD instructions.  */
    189 /* N.B. it is possible to do some optimization that doesn't call
    190    get_data_pattern each time when we need to select an operand.  We can
    191    either buffer the caculated the result or statically generate the data,
    192    however, it is not obvious that the optimization will bring significant
    193    benefit.  */
    194 
    195 int
    196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
    197 {
    198   return
    199     significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
    200 }
    201 
    202 const aarch64_field fields[] =
    204 {
    205     {  0,  0 },	/* NIL.  */
    206     {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
    207     {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
    208     {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
    209     { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
    210     {  5, 19 },	/* imm19: e.g. in CBZ.  */
    211     {  5, 19 },	/* immhi: e.g. in ADRP.  */
    212     { 29,  2 },	/* immlo: e.g. in ADRP.  */
    213     { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
    214     { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
    215     { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
    216     { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
    217     {  0,  5 },	/* Rt: in load/store instructions.  */
    218     {  0,  5 },	/* Rd: in many integer instructions.  */
    219     {  5,  5 },	/* Rn: in many integer instructions.  */
    220     { 10,  5 },	/* Rt2: in load/store pair instructions.  */
    221     { 10,  5 },	/* Ra: in fp instructions.  */
    222     {  5,  3 },	/* op2: in the system instructions.  */
    223     {  8,  4 },	/* CRm: in the system instructions.  */
    224     { 12,  4 },	/* CRn: in the system instructions.  */
    225     { 16,  3 },	/* op1: in the system instructions.  */
    226     { 19,  2 },	/* op0: in the system instructions.  */
    227     { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
    228     { 12,  4 },	/* cond: condition flags as a source operand.  */
    229     { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
    230     { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
    231     { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
    232     { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
    233     { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
    234     { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
    235     { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
    236     { 12,  1 },	/* S: in load/store reg offset instructions.  */
    237     { 21,  2 },	/* hw: in move wide constant instructions.  */
    238     { 22,  2 },	/* opc: in load/store reg offset instructions.  */
    239     { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
    240     { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
    241     { 22,  2 },	/* type: floating point type field in fp data inst.  */
    242     { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
    243     { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
    244     { 15,  6 },	/* imm6_2: in rmif instructions.  */
    245     { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
    246     {  0,  4 },	/* imm4_2: in rmif instructions.  */
    247     { 10,  4 },	/* imm4_3: in adddg/subg instructions.  */
    248     { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
    249     { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
    250     { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
    251     { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
    252     { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
    253     {  5, 14 },	/* imm14: in test bit and branch instructions.  */
    254     {  5, 16 },	/* imm16: in exception instructions.  */
    255     {  0, 16 },	/* imm16_2: in udf instruction. */
    256     {  0, 26 },	/* imm26: in unconditional branch instructions.  */
    257     { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
    258     { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
    259     { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
    260     { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
    261     { 22,  1 },	/* S: in LDRAA and LDRAB instructions.  */
    262     { 22,  1 },	/* N: in logical (immediate) instructions.  */
    263     { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
    264     { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
    265     { 31,  1 },	/* sf: in integer data processing instructions.  */
    266     { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
    267     { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
    268     { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
    269     { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
    270     { 31,  1 },	/* b5: in the test bit and branch instructions.  */
    271     { 19,  5 },	/* b40: in the test bit and branch instructions.  */
    272     { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
    273     {  4,  1 }, /* SVE_M_4: Merge/zero select, bit 4.  */
    274     { 14,  1 }, /* SVE_M_14: Merge/zero select, bit 14.  */
    275     { 16,  1 }, /* SVE_M_16: Merge/zero select, bit 16.  */
    276     { 17,  1 }, /* SVE_N: SVE equivalent of N.  */
    277     {  0,  4 }, /* SVE_Pd: p0-p15, bits [3,0].  */
    278     { 10,  3 }, /* SVE_Pg3: p0-p7, bits [12,10].  */
    279     {  5,  4 }, /* SVE_Pg4_5: p0-p15, bits [8,5].  */
    280     { 10,  4 }, /* SVE_Pg4_10: p0-p15, bits [13,10].  */
    281     { 16,  4 }, /* SVE_Pg4_16: p0-p15, bits [19,16].  */
    282     { 16,  4 }, /* SVE_Pm: p0-p15, bits [19,16].  */
    283     {  5,  4 }, /* SVE_Pn: p0-p15, bits [8,5].  */
    284     {  0,  4 }, /* SVE_Pt: p0-p15, bits [3,0].  */
    285     {  5,  5 }, /* SVE_Rm: SVE alternative position for Rm.  */
    286     { 16,  5 }, /* SVE_Rn: SVE alternative position for Rn.  */
    287     {  0,  5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
    288     {  5,  5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
    289     {  5,  5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
    290     {  5,  5 }, /* SVE_Za_5: SVE vector register, bits [9,5].  */
    291     { 16,  5 }, /* SVE_Za_16: SVE vector register, bits [20,16].  */
    292     {  0,  5 }, /* SVE_Zd: SVE vector register. bits [4,0].  */
    293     {  5,  5 }, /* SVE_Zm_5: SVE vector register, bits [9,5].  */
    294     { 16,  5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
    295     {  5,  5 }, /* SVE_Zn: SVE vector register, bits [9,5].  */
    296     {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
    297     {  5,  1 }, /* SVE_i1: single-bit immediate.  */
    298     { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
    299     { 11,  1 }, /* SVE_i3l: low bit of 3-bit immediate.  */
    300     { 19,  2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19].  */
    301     { 20,  1 }, /* SVE_i2h: high bit of 2bit immediate, bits.  */
    302     { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
    303     { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
    304     {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
    305     { 16,  5 }, /* SVE_imm5b: secondary 5-bit immediate field.  */
    306     { 16,  6 }, /* SVE_imm6: 6-bit immediate field.  */
    307     { 14,  7 }, /* SVE_imm7: 7-bit immediate field.  */
    308     {  5,  8 }, /* SVE_imm8: 8-bit immediate field.  */
    309     {  5,  9 }, /* SVE_imm9: 9-bit immediate field.  */
    310     { 11,  6 }, /* SVE_immr: SVE equivalent of immr.  */
    311     {  5,  6 }, /* SVE_imms: SVE equivalent of imms.  */
    312     { 10,  2 }, /* SVE_msz: 2-bit shift amount for ADR.  */
    313     {  5,  5 }, /* SVE_pattern: vector pattern enumeration.  */
    314     {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
    315     { 16,  1 }, /* SVE_rot1: 1-bit rotation amount.  */
    316     { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
    317     { 10,  1 }, /* SVE_rot3: 1-bit rotation amount at bit 10.  */
    318     { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
    319     { 17,  2 }, /* SVE_size: 2-bit element size, bits [18,17].  */
    320     { 30,  1 }, /* SVE_sz2: 1-bit element size select.  */
    321     { 16,  4 }, /* SVE_tsz: triangular size select.  */
    322     { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
    323     {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
    324     { 19,  2 }, /* SVE_tszl_19: triangular size select low, bits [20,19].  */
    325     { 14,  1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
    326     { 22,  1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
    327     { 11,  2 }, /* rotate1: FCMLA immediate rotate.  */
    328     { 13,  2 }, /* rotate2: Indexed element FCMLA immediate rotate.  */
    329     { 12,  1 }, /* rotate3: FCADD immediate rotate.  */
    330     { 12,  2 }, /* SM3: Indexed element SM3 2 bits index immediate.  */
    331     { 22,  1 }, /* sz: 1-bit element size select.  */
    332 };
    333 
    334 enum aarch64_operand_class
    335 aarch64_get_operand_class (enum aarch64_opnd type)
    336 {
    337   return aarch64_operands[type].op_class;
    338 }
    339 
    340 const char *
    341 aarch64_get_operand_name (enum aarch64_opnd type)
    342 {
    343   return aarch64_operands[type].name;
    344 }
    345 
    346 /* Get operand description string.
    347    This is usually for the diagnosis purpose.  */
    348 const char *
    349 aarch64_get_operand_desc (enum aarch64_opnd type)
    350 {
    351   return aarch64_operands[type].desc;
    352 }
    353 
    354 /* Table of all conditional affixes.  */
    355 const aarch64_cond aarch64_conds[16] =
    356 {
    357   {{"eq", "none"}, 0x0},
    358   {{"ne", "any"}, 0x1},
    359   {{"cs", "hs", "nlast"}, 0x2},
    360   {{"cc", "lo", "ul", "last"}, 0x3},
    361   {{"mi", "first"}, 0x4},
    362   {{"pl", "nfrst"}, 0x5},
    363   {{"vs"}, 0x6},
    364   {{"vc"}, 0x7},
    365   {{"hi", "pmore"}, 0x8},
    366   {{"ls", "plast"}, 0x9},
    367   {{"ge", "tcont"}, 0xa},
    368   {{"lt", "tstop"}, 0xb},
    369   {{"gt"}, 0xc},
    370   {{"le"}, 0xd},
    371   {{"al"}, 0xe},
    372   {{"nv"}, 0xf},
    373 };
    374 
    375 const aarch64_cond *
    376 get_cond_from_value (aarch64_insn value)
    377 {
    378   assert (value < 16);
    379   return &aarch64_conds[(unsigned int) value];
    380 }
    381 
    382 const aarch64_cond *
    383 get_inverted_cond (const aarch64_cond *cond)
    384 {
    385   return &aarch64_conds[cond->value ^ 0x1];
    386 }
    387 
    388 /* Table describing the operand extension/shifting operators; indexed by
    389    enum aarch64_modifier_kind.
    390 
    391    The value column provides the most common values for encoding modifiers,
    392    which enables table-driven encoding/decoding for the modifiers.  */
    393 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
    394 {
    395     {"none", 0x0},
    396     {"msl",  0x0},
    397     {"ror",  0x3},
    398     {"asr",  0x2},
    399     {"lsr",  0x1},
    400     {"lsl",  0x0},
    401     {"uxtb", 0x0},
    402     {"uxth", 0x1},
    403     {"uxtw", 0x2},
    404     {"uxtx", 0x3},
    405     {"sxtb", 0x4},
    406     {"sxth", 0x5},
    407     {"sxtw", 0x6},
    408     {"sxtx", 0x7},
    409     {"mul", 0x0},
    410     {"mul vl", 0x0},
    411     {NULL, 0},
    412 };
    413 
    414 enum aarch64_modifier_kind
    415 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
    416 {
    417   return desc - aarch64_operand_modifiers;
    418 }
    419 
    420 aarch64_insn
    421 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
    422 {
    423   return aarch64_operand_modifiers[kind].value;
    424 }
    425 
    426 enum aarch64_modifier_kind
    427 aarch64_get_operand_modifier_from_value (aarch64_insn value,
    428 					 bfd_boolean extend_p)
    429 {
    430   if (extend_p == TRUE)
    431     return AARCH64_MOD_UXTB + value;
    432   else
    433     return AARCH64_MOD_LSL - value;
    434 }
    435 
    436 bfd_boolean
    437 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
    438 {
    439   return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
    440     ? TRUE : FALSE;
    441 }
    442 
    443 static inline bfd_boolean
    444 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
    445 {
    446   return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
    447     ? TRUE : FALSE;
    448 }
    449 
    450 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
    451 {
    452     { "#0x00", 0x0 },
    453     { "oshld", 0x1 },
    454     { "oshst", 0x2 },
    455     { "osh",   0x3 },
    456     { "#0x04", 0x4 },
    457     { "nshld", 0x5 },
    458     { "nshst", 0x6 },
    459     { "nsh",   0x7 },
    460     { "#0x08", 0x8 },
    461     { "ishld", 0x9 },
    462     { "ishst", 0xa },
    463     { "ish",   0xb },
    464     { "#0x0c", 0xc },
    465     { "ld",    0xd },
    466     { "st",    0xe },
    467     { "sy",    0xf },
    468 };
    469 
    470 /* Table describing the operands supported by the aliases of the HINT
    471    instruction.
    472 
    473    The name column is the operand that is accepted for the alias.  The value
    474    column is the hint number of the alias.  The list of operands is terminated
    475    by NULL in the name column.  */
    476 
    477 const struct aarch64_name_value_pair aarch64_hint_options[] =
    478 {
    479   /* BTI.  This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET.  */
    480   { " ",	HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
    481   { "csync",	HINT_OPD_CSYNC },	/* PSB CSYNC.  */
    482   { "c",	HINT_OPD_C },		/* BTI C.  */
    483   { "j",	HINT_OPD_J },		/* BTI J.  */
    484   { "jc",	HINT_OPD_JC },		/* BTI JC.  */
    485   { NULL,	HINT_OPD_NULL },
    486 };
    487 
    488 /* op -> op:       load = 0 instruction = 1 store = 2
    489    l  -> level:    1-3
    490    t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
    491 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
    492 const struct aarch64_name_value_pair aarch64_prfops[32] =
    493 {
    494   { "pldl1keep", B(0, 1, 0) },
    495   { "pldl1strm", B(0, 1, 1) },
    496   { "pldl2keep", B(0, 2, 0) },
    497   { "pldl2strm", B(0, 2, 1) },
    498   { "pldl3keep", B(0, 3, 0) },
    499   { "pldl3strm", B(0, 3, 1) },
    500   { NULL, 0x06 },
    501   { NULL, 0x07 },
    502   { "plil1keep", B(1, 1, 0) },
    503   { "plil1strm", B(1, 1, 1) },
    504   { "plil2keep", B(1, 2, 0) },
    505   { "plil2strm", B(1, 2, 1) },
    506   { "plil3keep", B(1, 3, 0) },
    507   { "plil3strm", B(1, 3, 1) },
    508   { NULL, 0x0e },
    509   { NULL, 0x0f },
    510   { "pstl1keep", B(2, 1, 0) },
    511   { "pstl1strm", B(2, 1, 1) },
    512   { "pstl2keep", B(2, 2, 0) },
    513   { "pstl2strm", B(2, 2, 1) },
    514   { "pstl3keep", B(2, 3, 0) },
    515   { "pstl3strm", B(2, 3, 1) },
    516   { NULL, 0x16 },
    517   { NULL, 0x17 },
    518   { NULL, 0x18 },
    519   { NULL, 0x19 },
    520   { NULL, 0x1a },
    521   { NULL, 0x1b },
    522   { NULL, 0x1c },
    523   { NULL, 0x1d },
    524   { NULL, 0x1e },
    525   { NULL, 0x1f },
    526 };
    527 #undef B
    528 
    529 /* Utilities on value constraint.  */
    531 
    532 static inline int
    533 value_in_range_p (int64_t value, int low, int high)
    534 {
    535   return (value >= low && value <= high) ? 1 : 0;
    536 }
    537 
    538 /* Return true if VALUE is a multiple of ALIGN.  */
    539 static inline int
    540 value_aligned_p (int64_t value, int align)
    541 {
    542   return (value % align) == 0;
    543 }
    544 
    545 /* A signed value fits in a field.  */
    546 static inline int
    547 value_fit_signed_field_p (int64_t value, unsigned width)
    548 {
    549   assert (width < 32);
    550   if (width < sizeof (value) * 8)
    551     {
    552       int64_t lim = (uint64_t) 1 << (width - 1);
    553       if (value >= -lim && value < lim)
    554 	return 1;
    555     }
    556   return 0;
    557 }
    558 
    559 /* An unsigned value fits in a field.  */
    560 static inline int
    561 value_fit_unsigned_field_p (int64_t value, unsigned width)
    562 {
    563   assert (width < 32);
    564   if (width < sizeof (value) * 8)
    565     {
    566       int64_t lim = (uint64_t) 1 << width;
    567       if (value >= 0 && value < lim)
    568 	return 1;
    569     }
    570   return 0;
    571 }
    572 
    573 /* Return 1 if OPERAND is SP or WSP.  */
    574 int
    575 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
    576 {
    577   return ((aarch64_get_operand_class (operand->type)
    578 	   == AARCH64_OPND_CLASS_INT_REG)
    579 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
    580 	  && operand->reg.regno == 31);
    581 }
    582 
    583 /* Return 1 if OPERAND is XZR or WZP.  */
    584 int
    585 aarch64_zero_register_p (const aarch64_opnd_info *operand)
    586 {
    587   return ((aarch64_get_operand_class (operand->type)
    588 	   == AARCH64_OPND_CLASS_INT_REG)
    589 	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
    590 	  && operand->reg.regno == 31);
    591 }
    592 
    593 /* Return true if the operand *OPERAND that has the operand code
    594    OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
    595    qualified by the qualifier TARGET.  */
    596 
    597 static inline int
    598 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
    599 			  aarch64_opnd_qualifier_t target)
    600 {
    601   switch (operand->qualifier)
    602     {
    603     case AARCH64_OPND_QLF_W:
    604       if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
    605 	return 1;
    606       break;
    607     case AARCH64_OPND_QLF_X:
    608       if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
    609 	return 1;
    610       break;
    611     case AARCH64_OPND_QLF_WSP:
    612       if (target == AARCH64_OPND_QLF_W
    613 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    614 	return 1;
    615       break;
    616     case AARCH64_OPND_QLF_SP:
    617       if (target == AARCH64_OPND_QLF_X
    618 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    619 	return 1;
    620       break;
    621     default:
    622       break;
    623     }
    624 
    625   return 0;
    626 }
    627 
    628 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
    629    for operand KNOWN_IDX, return the expected qualifier for operand IDX.
    630 
    631    Return NIL if more than one expected qualifiers are found.  */
    632 
    633 aarch64_opnd_qualifier_t
    634 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
    635 				int idx,
    636 				const aarch64_opnd_qualifier_t known_qlf,
    637 				int known_idx)
    638 {
    639   int i, saved_i;
    640 
    641   /* Special case.
    642 
    643      When the known qualifier is NIL, we have to assume that there is only
    644      one qualifier sequence in the *QSEQ_LIST and return the corresponding
    645      qualifier directly.  One scenario is that for instruction
    646 	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
    647      which has only one possible valid qualifier sequence
    648 	NIL, S_D
    649      the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
    650      determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
    651 
    652      Because the qualifier NIL has dual roles in the qualifier sequence:
    653      it can mean no qualifier for the operand, or the qualifer sequence is
    654      not in use (when all qualifiers in the sequence are NILs), we have to
    655      handle this special case here.  */
    656   if (known_qlf == AARCH64_OPND_NIL)
    657     {
    658       assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
    659       return qseq_list[0][idx];
    660     }
    661 
    662   for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
    663     {
    664       if (qseq_list[i][known_idx] == known_qlf)
    665 	{
    666 	  if (saved_i != -1)
    667 	    /* More than one sequences are found to have KNOWN_QLF at
    668 	       KNOWN_IDX.  */
    669 	    return AARCH64_OPND_NIL;
    670 	  saved_i = i;
    671 	}
    672     }
    673 
    674   return qseq_list[saved_i][idx];
    675 }
    676 
    677 enum operand_qualifier_kind
    678 {
    679   OQK_NIL,
    680   OQK_OPD_VARIANT,
    681   OQK_VALUE_IN_RANGE,
    682   OQK_MISC,
    683 };
    684 
    685 /* Operand qualifier description.  */
    686 struct operand_qualifier_data
    687 {
    688   /* The usage of the three data fields depends on the qualifier kind.  */
    689   int data0;
    690   int data1;
    691   int data2;
    692   /* Description.  */
    693   const char *desc;
    694   /* Kind.  */
    695   enum operand_qualifier_kind kind;
    696 };
    697 
    698 /* Indexed by the operand qualifier enumerators.  */
    699 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
    700 {
    701   {0, 0, 0, "NIL", OQK_NIL},
    702 
    703   /* Operand variant qualifiers.
    704      First 3 fields:
    705      element size, number of elements and common value for encoding.  */
    706 
    707   {4, 1, 0x0, "w", OQK_OPD_VARIANT},
    708   {8, 1, 0x1, "x", OQK_OPD_VARIANT},
    709   {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
    710   {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
    711 
    712   {1, 1, 0x0, "b", OQK_OPD_VARIANT},
    713   {2, 1, 0x1, "h", OQK_OPD_VARIANT},
    714   {4, 1, 0x2, "s", OQK_OPD_VARIANT},
    715   {8, 1, 0x3, "d", OQK_OPD_VARIANT},
    716   {16, 1, 0x4, "q", OQK_OPD_VARIANT},
    717   {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
    718   {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
    719 
    720   {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
    721   {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
    722   {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
    723   {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
    724   {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
    725   {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
    726   {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
    727   {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
    728   {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
    729   {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
    730   {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
    731 
    732   {0, 0, 0, "z", OQK_OPD_VARIANT},
    733   {0, 0, 0, "m", OQK_OPD_VARIANT},
    734 
    735   /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc).  */
    736   {16, 0, 0, "tag", OQK_OPD_VARIANT},
    737 
    738   /* Qualifiers constraining the value range.
    739      First 3 fields:
    740      Lower bound, higher bound, unused.  */
    741 
    742   {0, 15, 0, "CR",       OQK_VALUE_IN_RANGE},
    743   {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
    744   {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
    745   {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
    746   {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
    747   {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
    748   {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
    749 
    750   /* Qualifiers for miscellaneous purpose.
    751      First 3 fields:
    752      unused, unused and unused.  */
    753 
    754   {0, 0, 0, "lsl", 0},
    755   {0, 0, 0, "msl", 0},
    756 
    757   {0, 0, 0, "retrieving", 0},
    758 };
    759 
    760 static inline bfd_boolean
    761 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
    762 {
    763   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
    764     ? TRUE : FALSE;
    765 }
    766 
    767 static inline bfd_boolean
    768 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
    769 {
    770   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
    771     ? TRUE : FALSE;
    772 }
    773 
    774 const char*
    775 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
    776 {
    777   return aarch64_opnd_qualifiers[qualifier].desc;
    778 }
    779 
    780 /* Given an operand qualifier, return the expected data element size
    781    of a qualified operand.  */
    782 unsigned char
    783 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
    784 {
    785   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    786   return aarch64_opnd_qualifiers[qualifier].data0;
    787 }
    788 
    789 unsigned char
    790 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
    791 {
    792   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    793   return aarch64_opnd_qualifiers[qualifier].data1;
    794 }
    795 
    796 aarch64_insn
    797 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
    798 {
    799   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    800   return aarch64_opnd_qualifiers[qualifier].data2;
    801 }
    802 
    803 static int
    804 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
    805 {
    806   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
    807   return aarch64_opnd_qualifiers[qualifier].data0;
    808 }
    809 
    810 static int
    811 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
    812 {
    813   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
    814   return aarch64_opnd_qualifiers[qualifier].data1;
    815 }
    816 
    817 #ifdef DEBUG_AARCH64
    818 void
    819 aarch64_verbose (const char *str, ...)
    820 {
    821   va_list ap;
    822   va_start (ap, str);
    823   printf ("#### ");
    824   vprintf (str, ap);
    825   printf ("\n");
    826   va_end (ap);
    827 }
    828 
    829 static inline void
    830 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
    831 {
    832   int i;
    833   printf ("#### \t");
    834   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
    835     printf ("%s,", aarch64_get_qualifier_name (*qualifier));
    836   printf ("\n");
    837 }
    838 
    839 static void
    840 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
    841 		       const aarch64_opnd_qualifier_t *qualifier)
    842 {
    843   int i;
    844   aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
    845 
    846   aarch64_verbose ("dump_match_qualifiers:");
    847   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
    848     curr[i] = opnd[i].qualifier;
    849   dump_qualifier_sequence (curr);
    850   aarch64_verbose ("against");
    851   dump_qualifier_sequence (qualifier);
    852 }
    853 #endif /* DEBUG_AARCH64 */
    854 
    855 /* This function checks if the given instruction INSN is a destructive
    856    instruction based on the usage of the registers.  It does not recognize
    857    unary destructive instructions.  */
    858 bfd_boolean
    859 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
    860 {
    861   int i = 0;
    862   const enum aarch64_opnd *opnds = opcode->operands;
    863 
    864   if (opnds[0] == AARCH64_OPND_NIL)
    865     return FALSE;
    866 
    867   while (opnds[++i] != AARCH64_OPND_NIL)
    868     if (opnds[i] == opnds[0])
    869       return TRUE;
    870 
    871   return FALSE;
    872 }
    873 
    874 /* TODO improve this, we can have an extra field at the runtime to
    875    store the number of operands rather than calculating it every time.  */
    876 
    877 int
    878 aarch64_num_of_operands (const aarch64_opcode *opcode)
    879 {
    880   int i = 0;
    881   const enum aarch64_opnd *opnds = opcode->operands;
    882   while (opnds[i++] != AARCH64_OPND_NIL)
    883     ;
    884   --i;
    885   assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
    886   return i;
    887 }
    888 
    889 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
    890    If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
    891 
    892    N.B. on the entry, it is very likely that only some operands in *INST
    893    have had their qualifiers been established.
    894 
    895    If STOP_AT is not -1, the function will only try to match
    896    the qualifier sequence for operands before and including the operand
    897    of index STOP_AT; and on success *RET will only be filled with the first
    898    (STOP_AT+1) qualifiers.
    899 
    900    A couple examples of the matching algorithm:
    901 
    902    X,W,NIL should match
    903    X,W,NIL
    904 
    905    NIL,NIL should match
    906    X  ,NIL
    907 
    908    Apart from serving the main encoding routine, this can also be called
    909    during or after the operand decoding.  */
    910 
    911 int
    912 aarch64_find_best_match (const aarch64_inst *inst,
    913 			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
    914 			 int stop_at, aarch64_opnd_qualifier_t *ret)
    915 {
    916   int found = 0;
    917   int i, num_opnds;
    918   const aarch64_opnd_qualifier_t *qualifiers;
    919 
    920   num_opnds = aarch64_num_of_operands (inst->opcode);
    921   if (num_opnds == 0)
    922     {
    923       DEBUG_TRACE ("SUCCEED: no operand");
    924       return 1;
    925     }
    926 
    927   if (stop_at < 0 || stop_at >= num_opnds)
    928     stop_at = num_opnds - 1;
    929 
    930   /* For each pattern.  */
    931   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
    932     {
    933       int j;
    934       qualifiers = *qualifiers_list;
    935 
    936       /* Start as positive.  */
    937       found = 1;
    938 
    939       DEBUG_TRACE ("%d", i);
    940 #ifdef DEBUG_AARCH64
    941       if (debug_dump)
    942 	dump_match_qualifiers (inst->operands, qualifiers);
    943 #endif
    944 
    945       /* Most opcodes has much fewer patterns in the list.
    946 	 First NIL qualifier indicates the end in the list.   */
    947       if (empty_qualifier_sequence_p (qualifiers) == TRUE)
    948 	{
    949 	  DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
    950 	  if (i)
    951 	    found = 0;
    952 	  break;
    953 	}
    954 
    955       for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
    956 	{
    957 	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
    958 	    {
    959 	      /* Either the operand does not have qualifier, or the qualifier
    960 		 for the operand needs to be deduced from the qualifier
    961 		 sequence.
    962 		 In the latter case, any constraint checking related with
    963 		 the obtained qualifier should be done later in
    964 		 operand_general_constraint_met_p.  */
    965 	      continue;
    966 	    }
    967 	  else if (*qualifiers != inst->operands[j].qualifier)
    968 	    {
    969 	      /* Unless the target qualifier can also qualify the operand
    970 		 (which has already had a non-nil qualifier), non-equal
    971 		 qualifiers are generally un-matched.  */
    972 	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
    973 		continue;
    974 	      else
    975 		{
    976 		  found = 0;
    977 		  break;
    978 		}
    979 	    }
    980 	  else
    981 	    continue;	/* Equal qualifiers are certainly matched.  */
    982 	}
    983 
    984       /* Qualifiers established.  */
    985       if (found == 1)
    986 	break;
    987     }
    988 
    989   if (found == 1)
    990     {
    991       /* Fill the result in *RET.  */
    992       int j;
    993       qualifiers = *qualifiers_list;
    994 
    995       DEBUG_TRACE ("complete qualifiers using list %d", i);
    996 #ifdef DEBUG_AARCH64
    997       if (debug_dump)
    998 	dump_qualifier_sequence (qualifiers);
    999 #endif
   1000 
   1001       for (j = 0; j <= stop_at; ++j, ++qualifiers)
   1002 	ret[j] = *qualifiers;
   1003       for (; j < AARCH64_MAX_OPND_NUM; ++j)
   1004 	ret[j] = AARCH64_OPND_QLF_NIL;
   1005 
   1006       DEBUG_TRACE ("SUCCESS");
   1007       return 1;
   1008     }
   1009 
   1010   DEBUG_TRACE ("FAIL");
   1011   return 0;
   1012 }
   1013 
   1014 /* Operand qualifier matching and resolving.
   1015 
   1016    Return 1 if the operand qualifier(s) in *INST match one of the qualifier
   1017    sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
   1018 
   1019    if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
   1020    succeeds.  */
   1021 
   1022 static int
   1023 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
   1024 {
   1025   int i, nops;
   1026   aarch64_opnd_qualifier_seq_t qualifiers;
   1027 
   1028   if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
   1029 			       qualifiers))
   1030     {
   1031       DEBUG_TRACE ("matching FAIL");
   1032       return 0;
   1033     }
   1034 
   1035   if (inst->opcode->flags & F_STRICT)
   1036     {
   1037       /* Require an exact qualifier match, even for NIL qualifiers.  */
   1038       nops = aarch64_num_of_operands (inst->opcode);
   1039       for (i = 0; i < nops; ++i)
   1040 	if (inst->operands[i].qualifier != qualifiers[i])
   1041 	  return FALSE;
   1042     }
   1043 
   1044   /* Update the qualifiers.  */
   1045   if (update_p == TRUE)
   1046     for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   1047       {
   1048 	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
   1049 	  break;
   1050 	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
   1051 			"update %s with %s for operand %d",
   1052 			aarch64_get_qualifier_name (inst->operands[i].qualifier),
   1053 			aarch64_get_qualifier_name (qualifiers[i]), i);
   1054 	inst->operands[i].qualifier = qualifiers[i];
   1055       }
   1056 
   1057   DEBUG_TRACE ("matching SUCCESS");
   1058   return 1;
   1059 }
   1060 
   1061 /* Return TRUE if VALUE is a wide constant that can be moved into a general
   1062    register by MOVZ.
   1063 
   1064    IS32 indicates whether value is a 32-bit immediate or not.
   1065    If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
   1066    amount will be returned in *SHIFT_AMOUNT.  */
   1067 
   1068 bfd_boolean
   1069 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
   1070 {
   1071   int amount;
   1072 
   1073   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
   1074 
   1075   if (is32)
   1076     {
   1077       /* Allow all zeros or all ones in top 32-bits, so that
   1078 	 32-bit constant expressions like ~0x80000000 are
   1079 	 permitted.  */
   1080       if (value >> 32 != 0 && value >> 32 != 0xffffffff)
   1081 	/* Immediate out of range.  */
   1082 	return FALSE;
   1083       value &= 0xffffffff;
   1084     }
   1085 
   1086   /* first, try movz then movn */
   1087   amount = -1;
   1088   if ((value & ((uint64_t) 0xffff << 0)) == value)
   1089     amount = 0;
   1090   else if ((value & ((uint64_t) 0xffff << 16)) == value)
   1091     amount = 16;
   1092   else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
   1093     amount = 32;
   1094   else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
   1095     amount = 48;
   1096 
   1097   if (amount == -1)
   1098     {
   1099       DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
   1100       return FALSE;
   1101     }
   1102 
   1103   if (shift_amount != NULL)
   1104     *shift_amount = amount;
   1105 
   1106   DEBUG_TRACE ("exit TRUE with amount %d", amount);
   1107 
   1108   return TRUE;
   1109 }
   1110 
   1111 /* Build the accepted values for immediate logical SIMD instructions.
   1112 
   1113    The standard encodings of the immediate value are:
   1114      N      imms     immr         SIMD size  R             S
   1115      1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
   1116      0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
   1117      0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
   1118      0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
   1119      0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
   1120      0      11110s   00000r       2       UInt(r)       UInt(s)
   1121    where all-ones value of S is reserved.
   1122 
   1123    Let's call E the SIMD size.
   1124 
   1125    The immediate value is: S+1 bits '1' rotated to the right by R.
   1126 
   1127    The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
   1128    (remember S != E - 1).  */
   1129 
   1130 #define TOTAL_IMM_NB  5334
   1131 
   1132 typedef struct
   1133 {
   1134   uint64_t imm;
   1135   aarch64_insn encoding;
   1136 } simd_imm_encoding;
   1137 
   1138 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
   1139 
   1140 static int
   1141 simd_imm_encoding_cmp(const void *i1, const void *i2)
   1142 {
   1143   const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
   1144   const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
   1145 
   1146   if (imm1->imm < imm2->imm)
   1147     return -1;
   1148   if (imm1->imm > imm2->imm)
   1149     return +1;
   1150   return 0;
   1151 }
   1152 
   1153 /* immediate bitfield standard encoding
   1154    imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
   1155    1         ssssss     rrrrrr      64        rrrrrr ssssss
   1156    0         0sssss     0rrrrr      32        rrrrr  sssss
   1157    0         10ssss     00rrrr      16        rrrr   ssss
   1158    0         110sss     000rrr      8         rrr    sss
   1159    0         1110ss     0000rr      4         rr     ss
   1160    0         11110s     00000r      2         r      s  */
   1161 static inline int
   1162 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
   1163 {
   1164   return (is64 << 12) | (r << 6) | s;
   1165 }
   1166 
   1167 static void
   1168 build_immediate_table (void)
   1169 {
   1170   uint32_t log_e, e, s, r, s_mask;
   1171   uint64_t mask, imm;
   1172   int nb_imms;
   1173   int is64;
   1174 
   1175   nb_imms = 0;
   1176   for (log_e = 1; log_e <= 6; log_e++)
   1177     {
   1178       /* Get element size.  */
   1179       e = 1u << log_e;
   1180       if (log_e == 6)
   1181 	{
   1182 	  is64 = 1;
   1183 	  mask = 0xffffffffffffffffull;
   1184 	  s_mask = 0;
   1185 	}
   1186       else
   1187 	{
   1188 	  is64 = 0;
   1189 	  mask = (1ull << e) - 1;
   1190 	  /* log_e  s_mask
   1191 	     1     ((1 << 4) - 1) << 2 = 111100
   1192 	     2     ((1 << 3) - 1) << 3 = 111000
   1193 	     3     ((1 << 2) - 1) << 4 = 110000
   1194 	     4     ((1 << 1) - 1) << 5 = 100000
   1195 	     5     ((1 << 0) - 1) << 6 = 000000  */
   1196 	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
   1197 	}
   1198       for (s = 0; s < e - 1; s++)
   1199 	for (r = 0; r < e; r++)
   1200 	  {
   1201 	    /* s+1 consecutive bits to 1 (s < 63) */
   1202 	    imm = (1ull << (s + 1)) - 1;
   1203 	    /* rotate right by r */
   1204 	    if (r != 0)
   1205 	      imm = (imm >> r) | ((imm << (e - r)) & mask);
   1206 	    /* replicate the constant depending on SIMD size */
   1207 	    switch (log_e)
   1208 	      {
   1209 	      case 1: imm = (imm <<  2) | imm;
   1210 		/* Fall through.  */
   1211 	      case 2: imm = (imm <<  4) | imm;
   1212 		/* Fall through.  */
   1213 	      case 3: imm = (imm <<  8) | imm;
   1214 		/* Fall through.  */
   1215 	      case 4: imm = (imm << 16) | imm;
   1216 		/* Fall through.  */
   1217 	      case 5: imm = (imm << 32) | imm;
   1218 		/* Fall through.  */
   1219 	      case 6: break;
   1220 	      default: abort ();
   1221 	      }
   1222 	    simd_immediates[nb_imms].imm = imm;
   1223 	    simd_immediates[nb_imms].encoding =
   1224 	      encode_immediate_bitfield(is64, s | s_mask, r);
   1225 	    nb_imms++;
   1226 	  }
   1227     }
   1228   assert (nb_imms == TOTAL_IMM_NB);
   1229   qsort(simd_immediates, nb_imms,
   1230 	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1231 }
   1232 
   1233 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
   1234    be accepted by logical (immediate) instructions
   1235    e.g. ORR <Xd|SP>, <Xn>, #<imm>.
   1236 
   1237    ESIZE is the number of bytes in the decoded immediate value.
   1238    If ENCODING is not NULL, on the return of TRUE, the standard encoding for
   1239    VALUE will be returned in *ENCODING.  */
   1240 
   1241 bfd_boolean
   1242 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
   1243 {
   1244   simd_imm_encoding imm_enc;
   1245   const simd_imm_encoding *imm_encoding;
   1246   static bfd_boolean initialized = FALSE;
   1247   uint64_t upper;
   1248   int i;
   1249 
   1250   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
   1251 	       value, esize);
   1252 
   1253   if (!initialized)
   1254     {
   1255       build_immediate_table ();
   1256       initialized = TRUE;
   1257     }
   1258 
   1259   /* Allow all zeros or all ones in top bits, so that
   1260      constant expressions like ~1 are permitted.  */
   1261   upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
   1262   if ((value & ~upper) != value && (value | upper) != value)
   1263     return FALSE;
   1264 
   1265   /* Replicate to a full 64-bit value.  */
   1266   value &= ~upper;
   1267   for (i = esize * 8; i < 64; i *= 2)
   1268     value |= (value << i);
   1269 
   1270   imm_enc.imm = value;
   1271   imm_encoding = (const simd_imm_encoding *)
   1272     bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
   1273             sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1274   if (imm_encoding == NULL)
   1275     {
   1276       DEBUG_TRACE ("exit with FALSE");
   1277       return FALSE;
   1278     }
   1279   if (encoding != NULL)
   1280     *encoding = imm_encoding->encoding;
   1281   DEBUG_TRACE ("exit with TRUE");
   1282   return TRUE;
   1283 }
   1284 
   1285 /* If 64-bit immediate IMM is in the format of
   1286    "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
   1287    where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
   1288    of value "abcdefgh".  Otherwise return -1.  */
   1289 int
   1290 aarch64_shrink_expanded_imm8 (uint64_t imm)
   1291 {
   1292   int i, ret;
   1293   uint32_t byte;
   1294 
   1295   ret = 0;
   1296   for (i = 0; i < 8; i++)
   1297     {
   1298       byte = (imm >> (8 * i)) & 0xff;
   1299       if (byte == 0xff)
   1300 	ret |= 1 << i;
   1301       else if (byte != 0x00)
   1302 	return -1;
   1303     }
   1304   return ret;
   1305 }
   1306 
   1307 /* Utility inline functions for operand_general_constraint_met_p.  */
   1308 
   1309 static inline void
   1310 set_error (aarch64_operand_error *mismatch_detail,
   1311 	   enum aarch64_operand_error_kind kind, int idx,
   1312 	   const char* error)
   1313 {
   1314   if (mismatch_detail == NULL)
   1315     return;
   1316   mismatch_detail->kind = kind;
   1317   mismatch_detail->index = idx;
   1318   mismatch_detail->error = error;
   1319 }
   1320 
   1321 static inline void
   1322 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
   1323 		  const char* error)
   1324 {
   1325   if (mismatch_detail == NULL)
   1326     return;
   1327   set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
   1328 }
   1329 
   1330 static inline void
   1331 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1332 			int idx, int lower_bound, int upper_bound,
   1333 			const char* error)
   1334 {
   1335   if (mismatch_detail == NULL)
   1336     return;
   1337   set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
   1338   mismatch_detail->data[0] = lower_bound;
   1339   mismatch_detail->data[1] = upper_bound;
   1340 }
   1341 
   1342 static inline void
   1343 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1344 			    int idx, int lower_bound, int upper_bound)
   1345 {
   1346   if (mismatch_detail == NULL)
   1347     return;
   1348   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1349 			  _("immediate value"));
   1350 }
   1351 
   1352 static inline void
   1353 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1354 			       int idx, int lower_bound, int upper_bound)
   1355 {
   1356   if (mismatch_detail == NULL)
   1357     return;
   1358   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1359 			  _("immediate offset"));
   1360 }
   1361 
   1362 static inline void
   1363 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1364 			      int idx, int lower_bound, int upper_bound)
   1365 {
   1366   if (mismatch_detail == NULL)
   1367     return;
   1368   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1369 			  _("register number"));
   1370 }
   1371 
   1372 static inline void
   1373 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1374 				 int idx, int lower_bound, int upper_bound)
   1375 {
   1376   if (mismatch_detail == NULL)
   1377     return;
   1378   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1379 			  _("register element index"));
   1380 }
   1381 
   1382 static inline void
   1383 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1384 				   int idx, int lower_bound, int upper_bound)
   1385 {
   1386   if (mismatch_detail == NULL)
   1387     return;
   1388   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1389 			  _("shift amount"));
   1390 }
   1391 
   1392 /* Report that the MUL modifier in operand IDX should be in the range
   1393    [LOWER_BOUND, UPPER_BOUND].  */
   1394 static inline void
   1395 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1396 				   int idx, int lower_bound, int upper_bound)
   1397 {
   1398   if (mismatch_detail == NULL)
   1399     return;
   1400   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1401 			  _("multiplier"));
   1402 }
   1403 
   1404 static inline void
   1405 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
   1406 		     int alignment)
   1407 {
   1408   if (mismatch_detail == NULL)
   1409     return;
   1410   set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
   1411   mismatch_detail->data[0] = alignment;
   1412 }
   1413 
   1414 static inline void
   1415 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
   1416 		    int expected_num)
   1417 {
   1418   if (mismatch_detail == NULL)
   1419     return;
   1420   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
   1421   mismatch_detail->data[0] = expected_num;
   1422 }
   1423 
   1424 static inline void
   1425 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
   1426 		 const char* error)
   1427 {
   1428   if (mismatch_detail == NULL)
   1429     return;
   1430   set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
   1431 }
   1432 
   1433 /* General constraint checking based on operand code.
   1434 
   1435    Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
   1436    as the IDXth operand of opcode OPCODE.  Otherwise return 0.
   1437 
   1438    This function has to be called after the qualifiers for all operands
   1439    have been resolved.
   1440 
   1441    Mismatching error message is returned in *MISMATCH_DETAIL upon request,
   1442    i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
   1443    of error message during the disassembling where error message is not
   1444    wanted.  We avoid the dynamic construction of strings of error messages
   1445    here (i.e. in libopcodes), as it is costly and complicated; instead, we
   1446    use a combination of error code, static string and some integer data to
   1447    represent an error.  */
   1448 
   1449 static int
   1450 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
   1451 				  enum aarch64_opnd type,
   1452 				  const aarch64_opcode *opcode,
   1453 				  aarch64_operand_error *mismatch_detail)
   1454 {
   1455   unsigned num, modifiers, shift;
   1456   unsigned char size;
   1457   int64_t imm, min_value, max_value;
   1458   uint64_t uvalue, mask;
   1459   const aarch64_opnd_info *opnd = opnds + idx;
   1460   aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
   1461 
   1462   assert (opcode->operands[idx] == opnd->type && opnd->type == type);
   1463 
   1464   switch (aarch64_operands[type].op_class)
   1465     {
   1466     case AARCH64_OPND_CLASS_INT_REG:
   1467       /* Check pair reg constraints for cas* instructions.  */
   1468       if (type == AARCH64_OPND_PAIRREG)
   1469 	{
   1470 	  assert (idx == 1 || idx == 3);
   1471 	  if (opnds[idx - 1].reg.regno % 2 != 0)
   1472 	    {
   1473 	      set_syntax_error (mismatch_detail, idx - 1,
   1474 				_("reg pair must start from even reg"));
   1475 	      return 0;
   1476 	    }
   1477 	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
   1478 	    {
   1479 	      set_syntax_error (mismatch_detail, idx,
   1480 				_("reg pair must be contiguous"));
   1481 	      return 0;
   1482 	    }
   1483 	  break;
   1484 	}
   1485 
   1486       /* <Xt> may be optional in some IC and TLBI instructions.  */
   1487       if (type == AARCH64_OPND_Rt_SYS)
   1488 	{
   1489 	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
   1490 			       == AARCH64_OPND_CLASS_SYSTEM));
   1491 	  if (opnds[1].present
   1492 	      && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
   1493 	    {
   1494 	      set_other_error (mismatch_detail, idx, _("extraneous register"));
   1495 	      return 0;
   1496 	    }
   1497 	  if (!opnds[1].present
   1498 	      && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
   1499 	    {
   1500 	      set_other_error (mismatch_detail, idx, _("missing register"));
   1501 	      return 0;
   1502 	    }
   1503 	}
   1504       switch (qualifier)
   1505 	{
   1506 	case AARCH64_OPND_QLF_WSP:
   1507 	case AARCH64_OPND_QLF_SP:
   1508 	  if (!aarch64_stack_pointer_p (opnd))
   1509 	    {
   1510 	      set_other_error (mismatch_detail, idx,
   1511 			       _("stack pointer register expected"));
   1512 	      return 0;
   1513 	    }
   1514 	  break;
   1515 	default:
   1516 	  break;
   1517 	}
   1518       break;
   1519 
   1520     case AARCH64_OPND_CLASS_SVE_REG:
   1521       switch (type)
   1522 	{
   1523 	case AARCH64_OPND_SVE_Zm3_INDEX:
   1524 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
   1525 	case AARCH64_OPND_SVE_Zm3_11_INDEX:
   1526 	case AARCH64_OPND_SVE_Zm4_11_INDEX:
   1527 	case AARCH64_OPND_SVE_Zm4_INDEX:
   1528 	  size = get_operand_fields_width (get_operand_from_code (type));
   1529 	  shift = get_operand_specific_data (&aarch64_operands[type]);
   1530 	  mask = (1 << shift) - 1;
   1531 	  if (opnd->reg.regno > mask)
   1532 	    {
   1533 	      assert (mask == 7 || mask == 15);
   1534 	      set_other_error (mismatch_detail, idx,
   1535 			       mask == 15
   1536 			       ? _("z0-z15 expected")
   1537 			       : _("z0-z7 expected"));
   1538 	      return 0;
   1539 	    }
   1540 	  mask = (1u << (size - shift)) - 1;
   1541 	  if (!value_in_range_p (opnd->reglane.index, 0, mask))
   1542 	    {
   1543 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
   1544 	      return 0;
   1545 	    }
   1546 	  break;
   1547 
   1548 	case AARCH64_OPND_SVE_Zn_INDEX:
   1549 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1550 	  if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
   1551 	    {
   1552 	      set_elem_idx_out_of_range_error (mismatch_detail, idx,
   1553 					       0, 64 / size - 1);
   1554 	      return 0;
   1555 	    }
   1556 	  break;
   1557 
   1558 	case AARCH64_OPND_SVE_ZnxN:
   1559 	case AARCH64_OPND_SVE_ZtxN:
   1560 	  if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
   1561 	    {
   1562 	      set_other_error (mismatch_detail, idx,
   1563 			       _("invalid register list"));
   1564 	      return 0;
   1565 	    }
   1566 	  break;
   1567 
   1568 	default:
   1569 	  break;
   1570 	}
   1571       break;
   1572 
   1573     case AARCH64_OPND_CLASS_PRED_REG:
   1574       if (opnd->reg.regno >= 8
   1575 	  && get_operand_fields_width (get_operand_from_code (type)) == 3)
   1576 	{
   1577 	  set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
   1578 	  return 0;
   1579 	}
   1580       break;
   1581 
   1582     case AARCH64_OPND_CLASS_COND:
   1583       if (type == AARCH64_OPND_COND1
   1584 	  && (opnds[idx].cond->value & 0xe) == 0xe)
   1585 	{
   1586 	  /* Not allow AL or NV.  */
   1587 	  set_syntax_error (mismatch_detail, idx, NULL);
   1588 	}
   1589       break;
   1590 
   1591     case AARCH64_OPND_CLASS_ADDRESS:
   1592       /* Check writeback.  */
   1593       switch (opcode->iclass)
   1594 	{
   1595 	case ldst_pos:
   1596 	case ldst_unscaled:
   1597 	case ldstnapair_offs:
   1598 	case ldstpair_off:
   1599 	case ldst_unpriv:
   1600 	  if (opnd->addr.writeback == 1)
   1601 	    {
   1602 	      set_syntax_error (mismatch_detail, idx,
   1603 				_("unexpected address writeback"));
   1604 	      return 0;
   1605 	    }
   1606 	  break;
   1607 	case ldst_imm10:
   1608 	  if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
   1609 	    {
   1610 	      set_syntax_error (mismatch_detail, idx,
   1611 				_("unexpected address writeback"));
   1612 	      return 0;
   1613 	    }
   1614 	  break;
   1615 	case ldst_imm9:
   1616 	case ldstpair_indexed:
   1617 	case asisdlsep:
   1618 	case asisdlsop:
   1619 	  if (opnd->addr.writeback == 0)
   1620 	    {
   1621 	      set_syntax_error (mismatch_detail, idx,
   1622 				_("address writeback expected"));
   1623 	      return 0;
   1624 	    }
   1625 	  break;
   1626 	default:
   1627 	  assert (opnd->addr.writeback == 0);
   1628 	  break;
   1629 	}
   1630       switch (type)
   1631 	{
   1632 	case AARCH64_OPND_ADDR_SIMM7:
   1633 	  /* Scaled signed 7 bits immediate offset.  */
   1634 	  /* Get the size of the data element that is accessed, which may be
   1635 	     different from that of the source register size,
   1636 	     e.g. in strb/ldrb.  */
   1637 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1638 	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
   1639 	    {
   1640 	      set_offset_out_of_range_error (mismatch_detail, idx,
   1641 					     -64 * size, 63 * size);
   1642 	      return 0;
   1643 	    }
   1644 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   1645 	    {
   1646 	      set_unaligned_error (mismatch_detail, idx, size);
   1647 	      return 0;
   1648 	    }
   1649 	  break;
   1650 	case AARCH64_OPND_ADDR_OFFSET:
   1651 	case AARCH64_OPND_ADDR_SIMM9:
   1652 	  /* Unscaled signed 9 bits immediate offset.  */
   1653 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
   1654 	    {
   1655 	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
   1656 	      return 0;
   1657 	    }
   1658 	  break;
   1659 
   1660 	case AARCH64_OPND_ADDR_SIMM9_2:
   1661 	  /* Unscaled signed 9 bits immediate offset, which has to be negative
   1662 	     or unaligned.  */
   1663 	  size = aarch64_get_qualifier_esize (qualifier);
   1664 	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
   1665 	       && !value_aligned_p (opnd->addr.offset.imm, size))
   1666 	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
   1667 	    return 1;
   1668 	  set_other_error (mismatch_detail, idx,
   1669 			   _("negative or unaligned offset expected"));
   1670 	  return 0;
   1671 
   1672 	case AARCH64_OPND_ADDR_SIMM10:
   1673 	  /* Scaled signed 10 bits immediate offset.  */
   1674 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
   1675 	    {
   1676 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
   1677 	      return 0;
   1678 	    }
   1679 	  if (!value_aligned_p (opnd->addr.offset.imm, 8))
   1680 	    {
   1681 	      set_unaligned_error (mismatch_detail, idx, 8);
   1682 	      return 0;
   1683 	    }
   1684 	  break;
   1685 
   1686 	case AARCH64_OPND_ADDR_SIMM11:
   1687 	  /* Signed 11 bits immediate offset (multiple of 16).  */
   1688 	  if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
   1689 	    {
   1690 	      set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
   1691 	      return 0;
   1692 	    }
   1693 
   1694 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
   1695 	    {
   1696 	      set_unaligned_error (mismatch_detail, idx, 16);
   1697 	      return 0;
   1698 	    }
   1699 	  break;
   1700 
   1701 	case AARCH64_OPND_ADDR_SIMM13:
   1702 	  /* Signed 13 bits immediate offset (multiple of 16).  */
   1703 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
   1704 	    {
   1705 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
   1706 	      return 0;
   1707 	    }
   1708 
   1709 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
   1710 	    {
   1711 	      set_unaligned_error (mismatch_detail, idx, 16);
   1712 	      return 0;
   1713 	    }
   1714 	  break;
   1715 
   1716 	case AARCH64_OPND_SIMD_ADDR_POST:
   1717 	  /* AdvSIMD load/store multiple structures, post-index.  */
   1718 	  assert (idx == 1);
   1719 	  if (opnd->addr.offset.is_reg)
   1720 	    {
   1721 	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
   1722 		return 1;
   1723 	      else
   1724 		{
   1725 		  set_other_error (mismatch_detail, idx,
   1726 				   _("invalid register offset"));
   1727 		  return 0;
   1728 		}
   1729 	    }
   1730 	  else
   1731 	    {
   1732 	      const aarch64_opnd_info *prev = &opnds[idx-1];
   1733 	      unsigned num_bytes; /* total number of bytes transferred.  */
   1734 	      /* The opcode dependent area stores the number of elements in
   1735 		 each structure to be loaded/stored.  */
   1736 	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
   1737 	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
   1738 		/* Special handling of loading single structure to all lane.  */
   1739 		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
   1740 		  * aarch64_get_qualifier_esize (prev->qualifier);
   1741 	      else
   1742 		num_bytes = prev->reglist.num_regs
   1743 		  * aarch64_get_qualifier_esize (prev->qualifier)
   1744 		  * aarch64_get_qualifier_nelem (prev->qualifier);
   1745 	      if ((int) num_bytes != opnd->addr.offset.imm)
   1746 		{
   1747 		  set_other_error (mismatch_detail, idx,
   1748 				   _("invalid post-increment amount"));
   1749 		  return 0;
   1750 		}
   1751 	    }
   1752 	  break;
   1753 
   1754 	case AARCH64_OPND_ADDR_REGOFF:
   1755 	  /* Get the size of the data element that is accessed, which may be
   1756 	     different from that of the source register size,
   1757 	     e.g. in strb/ldrb.  */
   1758 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1759 	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
   1760 	  if (opnd->shifter.amount != 0
   1761 	      && opnd->shifter.amount != (int)get_logsz (size))
   1762 	    {
   1763 	      set_other_error (mismatch_detail, idx,
   1764 			       _("invalid shift amount"));
   1765 	      return 0;
   1766 	    }
   1767 	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
   1768 	     operators.  */
   1769 	  switch (opnd->shifter.kind)
   1770 	    {
   1771 	    case AARCH64_MOD_UXTW:
   1772 	    case AARCH64_MOD_LSL:
   1773 	    case AARCH64_MOD_SXTW:
   1774 	    case AARCH64_MOD_SXTX: break;
   1775 	    default:
   1776 	      set_other_error (mismatch_detail, idx,
   1777 			       _("invalid extend/shift operator"));
   1778 	      return 0;
   1779 	    }
   1780 	  break;
   1781 
   1782 	case AARCH64_OPND_ADDR_UIMM12:
   1783 	  imm = opnd->addr.offset.imm;
   1784 	  /* Get the size of the data element that is accessed, which may be
   1785 	     different from that of the source register size,
   1786 	     e.g. in strb/ldrb.  */
   1787 	  size = aarch64_get_qualifier_esize (qualifier);
   1788 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
   1789 	    {
   1790 	      set_offset_out_of_range_error (mismatch_detail, idx,
   1791 					     0, 4095 * size);
   1792 	      return 0;
   1793 	    }
   1794 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   1795 	    {
   1796 	      set_unaligned_error (mismatch_detail, idx, size);
   1797 	      return 0;
   1798 	    }
   1799 	  break;
   1800 
   1801 	case AARCH64_OPND_ADDR_PCREL14:
   1802 	case AARCH64_OPND_ADDR_PCREL19:
   1803 	case AARCH64_OPND_ADDR_PCREL21:
   1804 	case AARCH64_OPND_ADDR_PCREL26:
   1805 	  imm = opnd->imm.value;
   1806 	  if (operand_need_shift_by_two (get_operand_from_code (type)))
   1807 	    {
   1808 	      /* The offset value in a PC-relative branch instruction is alway
   1809 		 4-byte aligned and is encoded without the lowest 2 bits.  */
   1810 	      if (!value_aligned_p (imm, 4))
   1811 		{
   1812 		  set_unaligned_error (mismatch_detail, idx, 4);
   1813 		  return 0;
   1814 		}
   1815 	      /* Right shift by 2 so that we can carry out the following check
   1816 		 canonically.  */
   1817 	      imm >>= 2;
   1818 	    }
   1819 	  size = get_operand_fields_width (get_operand_from_code (type));
   1820 	  if (!value_fit_signed_field_p (imm, size))
   1821 	    {
   1822 	      set_other_error (mismatch_detail, idx,
   1823 			       _("immediate out of range"));
   1824 	      return 0;
   1825 	    }
   1826 	  break;
   1827 
   1828 	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
   1829 	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
   1830 	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
   1831 	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
   1832 	  min_value = -8;
   1833 	  max_value = 7;
   1834 	sve_imm_offset_vl:
   1835 	  assert (!opnd->addr.offset.is_reg);
   1836 	  assert (opnd->addr.preind);
   1837 	  num = 1 + get_operand_specific_data (&aarch64_operands[type]);
   1838 	  min_value *= num;
   1839 	  max_value *= num;
   1840 	  if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
   1841 	      || (opnd->shifter.operator_present
   1842 		  && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
   1843 	    {
   1844 	      set_other_error (mismatch_detail, idx,
   1845 			       _("invalid addressing mode"));
   1846 	      return 0;
   1847 	    }
   1848 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
   1849 	    {
   1850 	      set_offset_out_of_range_error (mismatch_detail, idx,
   1851 					     min_value, max_value);
   1852 	      return 0;
   1853 	    }
   1854 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
   1855 	    {
   1856 	      set_unaligned_error (mismatch_detail, idx, num);
   1857 	      return 0;
   1858 	    }
   1859 	  break;
   1860 
   1861 	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
   1862 	  min_value = -32;
   1863 	  max_value = 31;
   1864 	  goto sve_imm_offset_vl;
   1865 
   1866 	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
   1867 	  min_value = -256;
   1868 	  max_value = 255;
   1869 	  goto sve_imm_offset_vl;
   1870 
   1871 	case AARCH64_OPND_SVE_ADDR_RI_U6:
   1872 	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
   1873 	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
   1874 	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
   1875 	  min_value = 0;
   1876 	  max_value = 63;
   1877 	sve_imm_offset:
   1878 	  assert (!opnd->addr.offset.is_reg);
   1879 	  assert (opnd->addr.preind);
   1880 	  num = 1 << get_operand_specific_data (&aarch64_operands[type]);
   1881 	  min_value *= num;
   1882 	  max_value *= num;
   1883 	  if (opnd->shifter.operator_present
   1884 	      || opnd->shifter.amount_present)
   1885 	    {
   1886 	      set_other_error (mismatch_detail, idx,
   1887 			       _("invalid addressing mode"));
   1888 	      return 0;
   1889 	    }
   1890 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
   1891 	    {
   1892 	      set_offset_out_of_range_error (mismatch_detail, idx,
   1893 					     min_value, max_value);
   1894 	      return 0;
   1895 	    }
   1896 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
   1897 	    {
   1898 	      set_unaligned_error (mismatch_detail, idx, num);
   1899 	      return 0;
   1900 	    }
   1901 	  break;
   1902 
   1903 	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
   1904 	case AARCH64_OPND_SVE_ADDR_RI_S4x32:
   1905 	  min_value = -8;
   1906 	  max_value = 7;
   1907 	  goto sve_imm_offset;
   1908 
   1909 	case AARCH64_OPND_SVE_ADDR_ZX:
   1910 	  /* Everything is already ensured by parse_operands or
   1911 	     aarch64_ext_sve_addr_rr_lsl (because this is a very specific
   1912 	     argument type).  */
   1913 	  assert (opnd->addr.offset.is_reg);
   1914 	  assert (opnd->addr.preind);
   1915 	  assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
   1916 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
   1917 	  assert (opnd->shifter.operator_present == 0);
   1918 	  break;
   1919 
   1920 	case AARCH64_OPND_SVE_ADDR_R:
   1921 	case AARCH64_OPND_SVE_ADDR_RR:
   1922 	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
   1923 	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
   1924 	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
   1925 	case AARCH64_OPND_SVE_ADDR_RX:
   1926 	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
   1927 	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
   1928 	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
   1929 	case AARCH64_OPND_SVE_ADDR_RZ:
   1930 	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
   1931 	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
   1932 	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
   1933 	  modifiers = 1 << AARCH64_MOD_LSL;
   1934 	sve_rr_operand:
   1935 	  assert (opnd->addr.offset.is_reg);
   1936 	  assert (opnd->addr.preind);
   1937 	  if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
   1938 	      && opnd->addr.offset.regno == 31)
   1939 	    {
   1940 	      set_other_error (mismatch_detail, idx,
   1941 			       _("index register xzr is not allowed"));
   1942 	      return 0;
   1943 	    }
   1944 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
   1945 	      || (opnd->shifter.amount
   1946 		  != get_operand_specific_data (&aarch64_operands[type])))
   1947 	    {
   1948 	      set_other_error (mismatch_detail, idx,
   1949 			       _("invalid addressing mode"));
   1950 	      return 0;
   1951 	    }
   1952 	  break;
   1953 
   1954 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
   1955 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
   1956 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
   1957 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
   1958 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
   1959 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
   1960 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
   1961 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
   1962 	  modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
   1963 	  goto sve_rr_operand;
   1964 
   1965 	case AARCH64_OPND_SVE_ADDR_ZI_U5:
   1966 	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
   1967 	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
   1968 	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
   1969 	  min_value = 0;
   1970 	  max_value = 31;
   1971 	  goto sve_imm_offset;
   1972 
   1973 	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
   1974 	  modifiers = 1 << AARCH64_MOD_LSL;
   1975 	sve_zz_operand:
   1976 	  assert (opnd->addr.offset.is_reg);
   1977 	  assert (opnd->addr.preind);
   1978 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
   1979 	      || opnd->shifter.amount < 0
   1980 	      || opnd->shifter.amount > 3)
   1981 	    {
   1982 	      set_other_error (mismatch_detail, idx,
   1983 			       _("invalid addressing mode"));
   1984 	      return 0;
   1985 	    }
   1986 	  break;
   1987 
   1988 	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
   1989 	  modifiers = (1 << AARCH64_MOD_SXTW);
   1990 	  goto sve_zz_operand;
   1991 
   1992 	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
   1993 	  modifiers = 1 << AARCH64_MOD_UXTW;
   1994 	  goto sve_zz_operand;
   1995 
   1996 	default:
   1997 	  break;
   1998 	}
   1999       break;
   2000 
   2001     case AARCH64_OPND_CLASS_SIMD_REGLIST:
   2002       if (type == AARCH64_OPND_LEt)
   2003 	{
   2004 	  /* Get the upper bound for the element index.  */
   2005 	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
   2006 	  if (!value_in_range_p (opnd->reglist.index, 0, num))
   2007 	    {
   2008 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   2009 	      return 0;
   2010 	    }
   2011 	}
   2012       /* The opcode dependent area stores the number of elements in
   2013 	 each structure to be loaded/stored.  */
   2014       num = get_opcode_dependent_value (opcode);
   2015       switch (type)
   2016 	{
   2017 	case AARCH64_OPND_LVt:
   2018 	  assert (num >= 1 && num <= 4);
   2019 	  /* Unless LD1/ST1, the number of registers should be equal to that
   2020 	     of the structure elements.  */
   2021 	  if (num != 1 && opnd->reglist.num_regs != num)
   2022 	    {
   2023 	      set_reg_list_error (mismatch_detail, idx, num);
   2024 	      return 0;
   2025 	    }
   2026 	  break;
   2027 	case AARCH64_OPND_LVt_AL:
   2028 	case AARCH64_OPND_LEt:
   2029 	  assert (num >= 1 && num <= 4);
   2030 	  /* The number of registers should be equal to that of the structure
   2031 	     elements.  */
   2032 	  if (opnd->reglist.num_regs != num)
   2033 	    {
   2034 	      set_reg_list_error (mismatch_detail, idx, num);
   2035 	      return 0;
   2036 	    }
   2037 	  break;
   2038 	default:
   2039 	  break;
   2040 	}
   2041       break;
   2042 
   2043     case AARCH64_OPND_CLASS_IMMEDIATE:
   2044       /* Constraint check on immediate operand.  */
   2045       imm = opnd->imm.value;
   2046       /* E.g. imm_0_31 constrains value to be 0..31.  */
   2047       if (qualifier_value_in_range_constraint_p (qualifier)
   2048 	  && !value_in_range_p (imm, get_lower_bound (qualifier),
   2049 				get_upper_bound (qualifier)))
   2050 	{
   2051 	  set_imm_out_of_range_error (mismatch_detail, idx,
   2052 				      get_lower_bound (qualifier),
   2053 				      get_upper_bound (qualifier));
   2054 	  return 0;
   2055 	}
   2056 
   2057       switch (type)
   2058 	{
   2059 	case AARCH64_OPND_AIMM:
   2060 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   2061 	    {
   2062 	      set_other_error (mismatch_detail, idx,
   2063 			       _("invalid shift operator"));
   2064 	      return 0;
   2065 	    }
   2066 	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
   2067 	    {
   2068 	      set_other_error (mismatch_detail, idx,
   2069 			       _("shift amount must be 0 or 12"));
   2070 	      return 0;
   2071 	    }
   2072 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
   2073 	    {
   2074 	      set_other_error (mismatch_detail, idx,
   2075 			       _("immediate out of range"));
   2076 	      return 0;
   2077 	    }
   2078 	  break;
   2079 
   2080 	case AARCH64_OPND_HALF:
   2081 	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
   2082 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   2083 	    {
   2084 	      set_other_error (mismatch_detail, idx,
   2085 			       _("invalid shift operator"));
   2086 	      return 0;
   2087 	    }
   2088 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2089 	  if (!value_aligned_p (opnd->shifter.amount, 16))
   2090 	    {
   2091 	      set_other_error (mismatch_detail, idx,
   2092 			       _("shift amount must be a multiple of 16"));
   2093 	      return 0;
   2094 	    }
   2095 	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
   2096 	    {
   2097 	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
   2098 						 0, size * 8 - 16);
   2099 	      return 0;
   2100 	    }
   2101 	  if (opnd->imm.value < 0)
   2102 	    {
   2103 	      set_other_error (mismatch_detail, idx,
   2104 			       _("negative immediate value not allowed"));
   2105 	      return 0;
   2106 	    }
   2107 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
   2108 	    {
   2109 	      set_other_error (mismatch_detail, idx,
   2110 			       _("immediate out of range"));
   2111 	      return 0;
   2112 	    }
   2113 	  break;
   2114 
   2115 	case AARCH64_OPND_IMM_MOV:
   2116 	    {
   2117 	      int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2118 	      imm = opnd->imm.value;
   2119 	      assert (idx == 1);
   2120 	      switch (opcode->op)
   2121 		{
   2122 		case OP_MOV_IMM_WIDEN:
   2123 		  imm = ~imm;
   2124 		  /* Fall through.  */
   2125 		case OP_MOV_IMM_WIDE:
   2126 		  if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
   2127 		    {
   2128 		      set_other_error (mismatch_detail, idx,
   2129 				       _("immediate out of range"));
   2130 		      return 0;
   2131 		    }
   2132 		  break;
   2133 		case OP_MOV_IMM_LOG:
   2134 		  if (!aarch64_logical_immediate_p (imm, esize, NULL))
   2135 		    {
   2136 		      set_other_error (mismatch_detail, idx,
   2137 				       _("immediate out of range"));
   2138 		      return 0;
   2139 		    }
   2140 		  break;
   2141 		default:
   2142 		  assert (0);
   2143 		  return 0;
   2144 		}
   2145 	    }
   2146 	  break;
   2147 
   2148 	case AARCH64_OPND_NZCV:
   2149 	case AARCH64_OPND_CCMP_IMM:
   2150 	case AARCH64_OPND_EXCEPTION:
   2151 	case AARCH64_OPND_UNDEFINED:
   2152 	case AARCH64_OPND_TME_UIMM16:
   2153 	case AARCH64_OPND_UIMM4:
   2154 	case AARCH64_OPND_UIMM4_ADDG:
   2155 	case AARCH64_OPND_UIMM7:
   2156 	case AARCH64_OPND_UIMM3_OP1:
   2157 	case AARCH64_OPND_UIMM3_OP2:
   2158 	case AARCH64_OPND_SVE_UIMM3:
   2159 	case AARCH64_OPND_SVE_UIMM7:
   2160 	case AARCH64_OPND_SVE_UIMM8:
   2161 	case AARCH64_OPND_SVE_UIMM8_53:
   2162 	  size = get_operand_fields_width (get_operand_from_code (type));
   2163 	  assert (size < 32);
   2164 	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
   2165 	    {
   2166 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   2167 					  (1u << size) - 1);
   2168 	      return 0;
   2169 	    }
   2170 	  break;
   2171 
   2172 	case AARCH64_OPND_UIMM10:
   2173 	  /* Scaled unsigned 10 bits immediate offset.  */
   2174 	  if (!value_in_range_p (opnd->imm.value, 0, 1008))
   2175 	    {
   2176 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
   2177 	      return 0;
   2178 	    }
   2179 
   2180 	  if (!value_aligned_p (opnd->imm.value, 16))
   2181 	    {
   2182 	      set_unaligned_error (mismatch_detail, idx, 16);
   2183 	      return 0;
   2184 	    }
   2185 	  break;
   2186 
   2187 	case AARCH64_OPND_SIMM5:
   2188 	case AARCH64_OPND_SVE_SIMM5:
   2189 	case AARCH64_OPND_SVE_SIMM5B:
   2190 	case AARCH64_OPND_SVE_SIMM6:
   2191 	case AARCH64_OPND_SVE_SIMM8:
   2192 	  size = get_operand_fields_width (get_operand_from_code (type));
   2193 	  assert (size < 32);
   2194 	  if (!value_fit_signed_field_p (opnd->imm.value, size))
   2195 	    {
   2196 	      set_imm_out_of_range_error (mismatch_detail, idx,
   2197 					  -(1 << (size - 1)),
   2198 					  (1 << (size - 1)) - 1);
   2199 	      return 0;
   2200 	    }
   2201 	  break;
   2202 
   2203 	case AARCH64_OPND_WIDTH:
   2204 	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
   2205 		  && opnds[0].type == AARCH64_OPND_Rd);
   2206 	  size = get_upper_bound (qualifier);
   2207 	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
   2208 	    /* lsb+width <= reg.size  */
   2209 	    {
   2210 	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
   2211 					  size - opnds[idx-1].imm.value);
   2212 	      return 0;
   2213 	    }
   2214 	  break;
   2215 
   2216 	case AARCH64_OPND_LIMM:
   2217 	case AARCH64_OPND_SVE_LIMM:
   2218 	  {
   2219 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2220 	    uint64_t uimm = opnd->imm.value;
   2221 	    if (opcode->op == OP_BIC)
   2222 	      uimm = ~uimm;
   2223 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   2224 	      {
   2225 		set_other_error (mismatch_detail, idx,
   2226 				 _("immediate out of range"));
   2227 		return 0;
   2228 	      }
   2229 	  }
   2230 	  break;
   2231 
   2232 	case AARCH64_OPND_IMM0:
   2233 	case AARCH64_OPND_FPIMM0:
   2234 	  if (opnd->imm.value != 0)
   2235 	    {
   2236 	      set_other_error (mismatch_detail, idx,
   2237 			       _("immediate zero expected"));
   2238 	      return 0;
   2239 	    }
   2240 	  break;
   2241 
   2242 	case AARCH64_OPND_IMM_ROT1:
   2243 	case AARCH64_OPND_IMM_ROT2:
   2244 	case AARCH64_OPND_SVE_IMM_ROT2:
   2245 	  if (opnd->imm.value != 0
   2246 	      && opnd->imm.value != 90
   2247 	      && opnd->imm.value != 180
   2248 	      && opnd->imm.value != 270)
   2249 	    {
   2250 	      set_other_error (mismatch_detail, idx,
   2251 			       _("rotate expected to be 0, 90, 180 or 270"));
   2252 	      return 0;
   2253 	    }
   2254 	  break;
   2255 
   2256 	case AARCH64_OPND_IMM_ROT3:
   2257 	case AARCH64_OPND_SVE_IMM_ROT1:
   2258 	case AARCH64_OPND_SVE_IMM_ROT3:
   2259 	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
   2260 	    {
   2261 	      set_other_error (mismatch_detail, idx,
   2262 			       _("rotate expected to be 90 or 270"));
   2263 	      return 0;
   2264 	    }
   2265 	  break;
   2266 
   2267 	case AARCH64_OPND_SHLL_IMM:
   2268 	  assert (idx == 2);
   2269 	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
   2270 	  if (opnd->imm.value != size)
   2271 	    {
   2272 	      set_other_error (mismatch_detail, idx,
   2273 			       _("invalid shift amount"));
   2274 	      return 0;
   2275 	    }
   2276 	  break;
   2277 
   2278 	case AARCH64_OPND_IMM_VLSL:
   2279 	  size = aarch64_get_qualifier_esize (qualifier);
   2280 	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
   2281 	    {
   2282 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   2283 					  size * 8 - 1);
   2284 	      return 0;
   2285 	    }
   2286 	  break;
   2287 
   2288 	case AARCH64_OPND_IMM_VLSR:
   2289 	  size = aarch64_get_qualifier_esize (qualifier);
   2290 	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
   2291 	    {
   2292 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
   2293 	      return 0;
   2294 	    }
   2295 	  break;
   2296 
   2297 	case AARCH64_OPND_SIMD_IMM:
   2298 	case AARCH64_OPND_SIMD_IMM_SFT:
   2299 	  /* Qualifier check.  */
   2300 	  switch (qualifier)
   2301 	    {
   2302 	    case AARCH64_OPND_QLF_LSL:
   2303 	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
   2304 		{
   2305 		  set_other_error (mismatch_detail, idx,
   2306 				   _("invalid shift operator"));
   2307 		  return 0;
   2308 		}
   2309 	      break;
   2310 	    case AARCH64_OPND_QLF_MSL:
   2311 	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
   2312 		{
   2313 		  set_other_error (mismatch_detail, idx,
   2314 				   _("invalid shift operator"));
   2315 		  return 0;
   2316 		}
   2317 	      break;
   2318 	    case AARCH64_OPND_QLF_NIL:
   2319 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   2320 		{
   2321 		  set_other_error (mismatch_detail, idx,
   2322 				   _("shift is not permitted"));
   2323 		  return 0;
   2324 		}
   2325 	      break;
   2326 	    default:
   2327 	      assert (0);
   2328 	      return 0;
   2329 	    }
   2330 	  /* Is the immediate valid?  */
   2331 	  assert (idx == 1);
   2332 	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
   2333 	    {
   2334 	      /* uimm8 or simm8 */
   2335 	      if (!value_in_range_p (opnd->imm.value, -128, 255))
   2336 		{
   2337 		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
   2338 		  return 0;
   2339 		}
   2340 	    }
   2341 	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
   2342 	    {
   2343 	      /* uimm64 is not
   2344 		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
   2345 		 ffffffffgggggggghhhhhhhh'.  */
   2346 	      set_other_error (mismatch_detail, idx,
   2347 			       _("invalid value for immediate"));
   2348 	      return 0;
   2349 	    }
   2350 	  /* Is the shift amount valid?  */
   2351 	  switch (opnd->shifter.kind)
   2352 	    {
   2353 	    case AARCH64_MOD_LSL:
   2354 	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2355 	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
   2356 		{
   2357 		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
   2358 						     (size - 1) * 8);
   2359 		  return 0;
   2360 		}
   2361 	      if (!value_aligned_p (opnd->shifter.amount, 8))
   2362 		{
   2363 		  set_unaligned_error (mismatch_detail, idx, 8);
   2364 		  return 0;
   2365 		}
   2366 	      break;
   2367 	    case AARCH64_MOD_MSL:
   2368 	      /* Only 8 and 16 are valid shift amount.  */
   2369 	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
   2370 		{
   2371 		  set_other_error (mismatch_detail, idx,
   2372 				   _("shift amount must be 0 or 16"));
   2373 		  return 0;
   2374 		}
   2375 	      break;
   2376 	    default:
   2377 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   2378 		{
   2379 		  set_other_error (mismatch_detail, idx,
   2380 				   _("invalid shift operator"));
   2381 		  return 0;
   2382 		}
   2383 	      break;
   2384 	    }
   2385 	  break;
   2386 
   2387 	case AARCH64_OPND_FPIMM:
   2388 	case AARCH64_OPND_SIMD_FPIMM:
   2389 	case AARCH64_OPND_SVE_FPIMM8:
   2390 	  if (opnd->imm.is_fp == 0)
   2391 	    {
   2392 	      set_other_error (mismatch_detail, idx,
   2393 			       _("floating-point immediate expected"));
   2394 	      return 0;
   2395 	    }
   2396 	  /* The value is expected to be an 8-bit floating-point constant with
   2397 	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
   2398 	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
   2399 	     instruction).  */
   2400 	  if (!value_in_range_p (opnd->imm.value, 0, 255))
   2401 	    {
   2402 	      set_other_error (mismatch_detail, idx,
   2403 			       _("immediate out of range"));
   2404 	      return 0;
   2405 	    }
   2406 	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
   2407 	    {
   2408 	      set_other_error (mismatch_detail, idx,
   2409 			       _("invalid shift operator"));
   2410 	      return 0;
   2411 	    }
   2412 	  break;
   2413 
   2414 	case AARCH64_OPND_SVE_AIMM:
   2415 	  min_value = 0;
   2416 	sve_aimm:
   2417 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
   2418 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2419 	  mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
   2420 	  uvalue = opnd->imm.value;
   2421 	  shift = opnd->shifter.amount;
   2422 	  if (size == 1)
   2423 	    {
   2424 	      if (shift != 0)
   2425 		{
   2426 		  set_other_error (mismatch_detail, idx,
   2427 				   _("no shift amount allowed for"
   2428 				     " 8-bit constants"));
   2429 		  return 0;
   2430 		}
   2431 	    }
   2432 	  else
   2433 	    {
   2434 	      if (shift != 0 && shift != 8)
   2435 		{
   2436 		  set_other_error (mismatch_detail, idx,
   2437 				   _("shift amount must be 0 or 8"));
   2438 		  return 0;
   2439 		}
   2440 	      if (shift == 0 && (uvalue & 0xff) == 0)
   2441 		{
   2442 		  shift = 8;
   2443 		  uvalue = (int64_t) uvalue / 256;
   2444 		}
   2445 	    }
   2446 	  mask >>= shift;
   2447 	  if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
   2448 	    {
   2449 	      set_other_error (mismatch_detail, idx,
   2450 			       _("immediate too big for element size"));
   2451 	      return 0;
   2452 	    }
   2453 	  uvalue = (uvalue - min_value) & mask;
   2454 	  if (uvalue > 0xff)
   2455 	    {
   2456 	      set_other_error (mismatch_detail, idx,
   2457 			       _("invalid arithmetic immediate"));
   2458 	      return 0;
   2459 	    }
   2460 	  break;
   2461 
   2462 	case AARCH64_OPND_SVE_ASIMM:
   2463 	  min_value = -128;
   2464 	  goto sve_aimm;
   2465 
   2466 	case AARCH64_OPND_SVE_I1_HALF_ONE:
   2467 	  assert (opnd->imm.is_fp);
   2468 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
   2469 	    {
   2470 	      set_other_error (mismatch_detail, idx,
   2471 			       _("floating-point value must be 0.5 or 1.0"));
   2472 	      return 0;
   2473 	    }
   2474 	  break;
   2475 
   2476 	case AARCH64_OPND_SVE_I1_HALF_TWO:
   2477 	  assert (opnd->imm.is_fp);
   2478 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
   2479 	    {
   2480 	      set_other_error (mismatch_detail, idx,
   2481 			       _("floating-point value must be 0.5 or 2.0"));
   2482 	      return 0;
   2483 	    }
   2484 	  break;
   2485 
   2486 	case AARCH64_OPND_SVE_I1_ZERO_ONE:
   2487 	  assert (opnd->imm.is_fp);
   2488 	  if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
   2489 	    {
   2490 	      set_other_error (mismatch_detail, idx,
   2491 			       _("floating-point value must be 0.0 or 1.0"));
   2492 	      return 0;
   2493 	    }
   2494 	  break;
   2495 
   2496 	case AARCH64_OPND_SVE_INV_LIMM:
   2497 	  {
   2498 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2499 	    uint64_t uimm = ~opnd->imm.value;
   2500 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   2501 	      {
   2502 		set_other_error (mismatch_detail, idx,
   2503 				 _("immediate out of range"));
   2504 		return 0;
   2505 	      }
   2506 	  }
   2507 	  break;
   2508 
   2509 	case AARCH64_OPND_SVE_LIMM_MOV:
   2510 	  {
   2511 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
   2512 	    uint64_t uimm = opnd->imm.value;
   2513 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
   2514 	      {
   2515 		set_other_error (mismatch_detail, idx,
   2516 				 _("immediate out of range"));
   2517 		return 0;
   2518 	      }
   2519 	    if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
   2520 	      {
   2521 		set_other_error (mismatch_detail, idx,
   2522 				 _("invalid replicated MOV immediate"));
   2523 		return 0;
   2524 	      }
   2525 	  }
   2526 	  break;
   2527 
   2528 	case AARCH64_OPND_SVE_PATTERN_SCALED:
   2529 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL);
   2530 	  if (!value_in_range_p (opnd->shifter.amount, 1, 16))
   2531 	    {
   2532 	      set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
   2533 	      return 0;
   2534 	    }
   2535 	  break;
   2536 
   2537 	case AARCH64_OPND_SVE_SHLIMM_PRED:
   2538 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
   2539 	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
   2540 	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
   2541 	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
   2542 	    {
   2543 	      set_imm_out_of_range_error (mismatch_detail, idx,
   2544 					  0, 8 * size - 1);
   2545 	      return 0;
   2546 	    }
   2547 	  break;
   2548 
   2549 	case AARCH64_OPND_SVE_SHRIMM_PRED:
   2550 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
   2551 	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
   2552 	  num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
   2553 	  size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
   2554 	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
   2555 	    {
   2556 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
   2557 	      return 0;
   2558 	    }
   2559 	  break;
   2560 
   2561 	default:
   2562 	  break;
   2563 	}
   2564       break;
   2565 
   2566     case AARCH64_OPND_CLASS_SYSTEM:
   2567       switch (type)
   2568 	{
   2569 	case AARCH64_OPND_PSTATEFIELD:
   2570 	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
   2571 	  /* MSR UAO, #uimm4
   2572 	     MSR PAN, #uimm4
   2573 	     MSR SSBS,#uimm4
   2574 	     The immediate must be #0 or #1.  */
   2575 	  if ((opnd->pstatefield == 0x03	/* UAO.  */
   2576 	       || opnd->pstatefield == 0x04	/* PAN.  */
   2577 	       || opnd->pstatefield == 0x19     /* SSBS.  */
   2578 	       || opnd->pstatefield == 0x1a)	/* DIT.  */
   2579 	      && opnds[1].imm.value > 1)
   2580 	    {
   2581 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
   2582 	      return 0;
   2583 	    }
   2584 	  /* MSR SPSel, #uimm4
   2585 	     Uses uimm4 as a control value to select the stack pointer: if
   2586 	     bit 0 is set it selects the current exception level's stack
   2587 	     pointer, if bit 0 is clear it selects shared EL0 stack pointer.
   2588 	     Bits 1 to 3 of uimm4 are reserved and should be zero.  */
   2589 	  if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
   2590 	    {
   2591 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
   2592 	      return 0;
   2593 	    }
   2594 	  break;
   2595 	default:
   2596 	  break;
   2597 	}
   2598       break;
   2599 
   2600     case AARCH64_OPND_CLASS_SIMD_ELEMENT:
   2601       /* Get the upper bound for the element index.  */
   2602       if (opcode->op == OP_FCMLA_ELEM)
   2603 	/* FCMLA index range depends on the vector size of other operands
   2604 	   and is halfed because complex numbers take two elements.  */
   2605 	num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
   2606 	      * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
   2607       else
   2608 	num = 16;
   2609       num = num / aarch64_get_qualifier_esize (qualifier) - 1;
   2610       assert (aarch64_get_qualifier_nelem (qualifier) == 1);
   2611 
   2612       /* Index out-of-range.  */
   2613       if (!value_in_range_p (opnd->reglane.index, 0, num))
   2614 	{
   2615 	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   2616 	  return 0;
   2617 	}
   2618       /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
   2619 	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
   2620 	 number is encoded in "size:M:Rm":
   2621 	 size	<Vm>
   2622 	 00		RESERVED
   2623 	 01		0:Rm
   2624 	 10		M:Rm
   2625 	 11		RESERVED  */
   2626       if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
   2627 	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
   2628 	{
   2629 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
   2630 	  return 0;
   2631 	}
   2632       break;
   2633 
   2634     case AARCH64_OPND_CLASS_MODIFIED_REG:
   2635       assert (idx == 1 || idx == 2);
   2636       switch (type)
   2637 	{
   2638 	case AARCH64_OPND_Rm_EXT:
   2639 	  if (!aarch64_extend_operator_p (opnd->shifter.kind)
   2640 	      && opnd->shifter.kind != AARCH64_MOD_LSL)
   2641 	    {
   2642 	      set_other_error (mismatch_detail, idx,
   2643 			       _("extend operator expected"));
   2644 	      return 0;
   2645 	    }
   2646 	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
   2647 	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
   2648 	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
   2649 	     case.  */
   2650 	  if (!aarch64_stack_pointer_p (opnds + 0)
   2651 	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
   2652 	    {
   2653 	      if (!opnd->shifter.operator_present)
   2654 		{
   2655 		  set_other_error (mismatch_detail, idx,
   2656 				   _("missing extend operator"));
   2657 		  return 0;
   2658 		}
   2659 	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
   2660 		{
   2661 		  set_other_error (mismatch_detail, idx,
   2662 				   _("'LSL' operator not allowed"));
   2663 		  return 0;
   2664 		}
   2665 	    }
   2666 	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
   2667 		  || opnd->shifter.kind == AARCH64_MOD_LSL);
   2668 	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
   2669 	    {
   2670 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
   2671 	      return 0;
   2672 	    }
   2673 	  /* In the 64-bit form, the final register operand is written as Wm
   2674 	     for all but the (possibly omitted) UXTX/LSL and SXTX
   2675 	     operators.
   2676 	     N.B. GAS allows X register to be used with any operator as a
   2677 	     programming convenience.  */
   2678 	  if (qualifier == AARCH64_OPND_QLF_X
   2679 	      && opnd->shifter.kind != AARCH64_MOD_LSL
   2680 	      && opnd->shifter.kind != AARCH64_MOD_UXTX
   2681 	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
   2682 	    {
   2683 	      set_other_error (mismatch_detail, idx, _("W register expected"));
   2684 	      return 0;
   2685 	    }
   2686 	  break;
   2687 
   2688 	case AARCH64_OPND_Rm_SFT:
   2689 	  /* ROR is not available to the shifted register operand in
   2690 	     arithmetic instructions.  */
   2691 	  if (!aarch64_shift_operator_p (opnd->shifter.kind))
   2692 	    {
   2693 	      set_other_error (mismatch_detail, idx,
   2694 			       _("shift operator expected"));
   2695 	      return 0;
   2696 	    }
   2697 	  if (opnd->shifter.kind == AARCH64_MOD_ROR
   2698 	      && opcode->iclass != log_shift)
   2699 	    {
   2700 	      set_other_error (mismatch_detail, idx,
   2701 			       _("'ROR' operator not allowed"));
   2702 	      return 0;
   2703 	    }
   2704 	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
   2705 	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
   2706 	    {
   2707 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
   2708 	      return 0;
   2709 	    }
   2710 	  break;
   2711 
   2712 	default:
   2713 	  break;
   2714 	}
   2715       break;
   2716 
   2717     default:
   2718       break;
   2719     }
   2720 
   2721   return 1;
   2722 }
   2723 
   2724 /* Main entrypoint for the operand constraint checking.
   2725 
   2726    Return 1 if operands of *INST meet the constraint applied by the operand
   2727    codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
   2728    not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
   2729    adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
   2730    with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
   2731    error kind when it is notified that an instruction does not pass the check).
   2732 
   2733    Un-determined operand qualifiers may get established during the process.  */
   2734 
   2735 int
   2736 aarch64_match_operands_constraint (aarch64_inst *inst,
   2737 				   aarch64_operand_error *mismatch_detail)
   2738 {
   2739   int i;
   2740 
   2741   DEBUG_TRACE ("enter");
   2742 
   2743   /* Check for cases where a source register needs to be the same as the
   2744      destination register.  Do this before matching qualifiers since if
   2745      an instruction has both invalid tying and invalid qualifiers,
   2746      the error about qualifiers would suggest several alternative
   2747      instructions that also have invalid tying.  */
   2748   i = inst->opcode->tied_operand;
   2749   if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
   2750     {
   2751       if (mismatch_detail)
   2752 	{
   2753 	  mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
   2754 	  mismatch_detail->index = i;
   2755 	  mismatch_detail->error = NULL;
   2756 	}
   2757       return 0;
   2758     }
   2759 
   2760   /* Match operands' qualifier.
   2761      *INST has already had qualifier establish for some, if not all, of
   2762      its operands; we need to find out whether these established
   2763      qualifiers match one of the qualifier sequence in
   2764      INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
   2765      with the corresponding qualifier in such a sequence.
   2766      Only basic operand constraint checking is done here; the more thorough
   2767      constraint checking will carried out by operand_general_constraint_met_p,
   2768      which has be to called after this in order to get all of the operands'
   2769      qualifiers established.  */
   2770   if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
   2771     {
   2772       DEBUG_TRACE ("FAIL on operand qualifier matching");
   2773       if (mismatch_detail)
   2774 	{
   2775 	  /* Return an error type to indicate that it is the qualifier
   2776 	     matching failure; we don't care about which operand as there
   2777 	     are enough information in the opcode table to reproduce it.  */
   2778 	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
   2779 	  mismatch_detail->index = -1;
   2780 	  mismatch_detail->error = NULL;
   2781 	}
   2782       return 0;
   2783     }
   2784 
   2785   /* Match operands' constraint.  */
   2786   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2787     {
   2788       enum aarch64_opnd type = inst->opcode->operands[i];
   2789       if (type == AARCH64_OPND_NIL)
   2790 	break;
   2791       if (inst->operands[i].skip)
   2792 	{
   2793 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
   2794 	  continue;
   2795 	}
   2796       if (operand_general_constraint_met_p (inst->operands, i, type,
   2797 					    inst->opcode, mismatch_detail) == 0)
   2798 	{
   2799 	  DEBUG_TRACE ("FAIL on operand %d", i);
   2800 	  return 0;
   2801 	}
   2802     }
   2803 
   2804   DEBUG_TRACE ("PASS");
   2805 
   2806   return 1;
   2807 }
   2808 
   2809 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
   2810    Also updates the TYPE of each INST->OPERANDS with the corresponding
   2811    value of OPCODE->OPERANDS.
   2812 
   2813    Note that some operand qualifiers may need to be manually cleared by
   2814    the caller before it further calls the aarch64_opcode_encode; by
   2815    doing this, it helps the qualifier matching facilities work
   2816    properly.  */
   2817 
   2818 const aarch64_opcode*
   2819 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
   2820 {
   2821   int i;
   2822   const aarch64_opcode *old = inst->opcode;
   2823 
   2824   inst->opcode = opcode;
   2825 
   2826   /* Update the operand types.  */
   2827   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2828     {
   2829       inst->operands[i].type = opcode->operands[i];
   2830       if (opcode->operands[i] == AARCH64_OPND_NIL)
   2831 	break;
   2832     }
   2833 
   2834   DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
   2835 
   2836   return old;
   2837 }
   2838 
   2839 int
   2840 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
   2841 {
   2842   int i;
   2843   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2844     if (operands[i] == operand)
   2845       return i;
   2846     else if (operands[i] == AARCH64_OPND_NIL)
   2847       break;
   2848   return -1;
   2849 }
   2850 
   2851 /* R0...R30, followed by FOR31.  */
   2853 #define BANK(R, FOR31) \
   2854   { R  (0), R  (1), R  (2), R  (3), R  (4), R  (5), R  (6), R  (7), \
   2855     R  (8), R  (9), R (10), R (11), R (12), R (13), R (14), R (15), \
   2856     R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
   2857     R (24), R (25), R (26), R (27), R (28), R (29), R (30),  FOR31 }
   2858 /* [0][0]  32-bit integer regs with sp   Wn
   2859    [0][1]  64-bit integer regs with sp   Xn  sf=1
   2860    [1][0]  32-bit integer regs with #0   Wn
   2861    [1][1]  64-bit integer regs with #0   Xn  sf=1 */
   2862 static const char *int_reg[2][2][32] = {
   2863 #define R32(X) "w" #X
   2864 #define R64(X) "x" #X
   2865   { BANK (R32, "wsp"), BANK (R64, "sp") },
   2866   { BANK (R32, "wzr"), BANK (R64, "xzr") }
   2867 #undef R64
   2868 #undef R32
   2869 };
   2870 
   2871 /* Names of the SVE vector registers, first with .S suffixes,
   2872    then with .D suffixes.  */
   2873 
   2874 static const char *sve_reg[2][32] = {
   2875 #define ZS(X) "z" #X ".s"
   2876 #define ZD(X) "z" #X ".d"
   2877   BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
   2878 #undef ZD
   2879 #undef ZS
   2880 };
   2881 #undef BANK
   2882 
   2883 /* Return the integer register name.
   2884    if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
   2885 
   2886 static inline const char *
   2887 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
   2888 {
   2889   const int has_zr = sp_reg_p ? 0 : 1;
   2890   const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
   2891   return int_reg[has_zr][is_64][regno];
   2892 }
   2893 
   2894 /* Like get_int_reg_name, but IS_64 is always 1.  */
   2895 
   2896 static inline const char *
   2897 get_64bit_int_reg_name (int regno, int sp_reg_p)
   2898 {
   2899   const int has_zr = sp_reg_p ? 0 : 1;
   2900   return int_reg[has_zr][1][regno];
   2901 }
   2902 
   2903 /* Get the name of the integer offset register in OPND, using the shift type
   2904    to decide whether it's a word or doubleword.  */
   2905 
   2906 static inline const char *
   2907 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
   2908 {
   2909   switch (opnd->shifter.kind)
   2910     {
   2911     case AARCH64_MOD_UXTW:
   2912     case AARCH64_MOD_SXTW:
   2913       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
   2914 
   2915     case AARCH64_MOD_LSL:
   2916     case AARCH64_MOD_SXTX:
   2917       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
   2918 
   2919     default:
   2920       abort ();
   2921     }
   2922 }
   2923 
   2924 /* Get the name of the SVE vector offset register in OPND, using the operand
   2925    qualifier to decide whether the suffix should be .S or .D.  */
   2926 
   2927 static inline const char *
   2928 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
   2929 {
   2930   assert (qualifier == AARCH64_OPND_QLF_S_S
   2931 	  || qualifier == AARCH64_OPND_QLF_S_D);
   2932   return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
   2933 }
   2934 
   2935 /* Types for expanding an encoded 8-bit value to a floating-point value.  */
   2936 
   2937 typedef union
   2938 {
   2939   uint64_t i;
   2940   double   d;
   2941 } double_conv_t;
   2942 
   2943 typedef union
   2944 {
   2945   uint32_t i;
   2946   float    f;
   2947 } single_conv_t;
   2948 
   2949 typedef union
   2950 {
   2951   uint32_t i;
   2952   float    f;
   2953 } half_conv_t;
   2954 
   2955 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
   2956    normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
   2957    (depending on the type of the instruction).  IMM8 will be expanded to a
   2958    single-precision floating-point value (SIZE == 4) or a double-precision
   2959    floating-point value (SIZE == 8).  A half-precision floating-point value
   2960    (SIZE == 2) is expanded to a single-precision floating-point value.  The
   2961    expanded value is returned.  */
   2962 
   2963 static uint64_t
   2964 expand_fp_imm (int size, uint32_t imm8)
   2965 {
   2966   uint64_t imm = 0;
   2967   uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
   2968 
   2969   imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
   2970   imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
   2971   imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
   2972   imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
   2973     | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
   2974   if (size == 8)
   2975     {
   2976       imm = (imm8_7 << (63-32))		/* imm8<7>  */
   2977 	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
   2978 	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
   2979 	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
   2980 	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
   2981       imm <<= 32;
   2982     }
   2983   else if (size == 4 || size == 2)
   2984     {
   2985       imm = (imm8_7 << 31)	/* imm8<7>              */
   2986 	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
   2987 	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
   2988 	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
   2989     }
   2990   else
   2991     {
   2992       /* An unsupported size.  */
   2993       assert (0);
   2994     }
   2995 
   2996   return imm;
   2997 }
   2998 
   2999 /* Produce the string representation of the register list operand *OPND
   3000    in the buffer pointed by BUF of size SIZE.  PREFIX is the part of
   3001    the register name that comes before the register number, such as "v".  */
   3002 static void
   3003 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
   3004 		     const char *prefix)
   3005 {
   3006   const int num_regs = opnd->reglist.num_regs;
   3007   const int first_reg = opnd->reglist.first_regno;
   3008   const int last_reg = (first_reg + num_regs - 1) & 0x1f;
   3009   const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
   3010   char tb[8];	/* Temporary buffer.  */
   3011 
   3012   assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
   3013   assert (num_regs >= 1 && num_regs <= 4);
   3014 
   3015   /* Prepare the index if any.  */
   3016   if (opnd->reglist.has_index)
   3017     /* PR 21096: The %100 is to silence a warning about possible truncation.  */
   3018     snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
   3019   else
   3020     tb[0] = '\0';
   3021 
   3022   /* The hyphenated form is preferred for disassembly if there are
   3023      more than two registers in the list, and the register numbers
   3024      are monotonically increasing in increments of one.  */
   3025   if (num_regs > 2 && last_reg > first_reg)
   3026     snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
   3027 	      prefix, last_reg, qlf_name, tb);
   3028   else
   3029     {
   3030       const int reg0 = first_reg;
   3031       const int reg1 = (first_reg + 1) & 0x1f;
   3032       const int reg2 = (first_reg + 2) & 0x1f;
   3033       const int reg3 = (first_reg + 3) & 0x1f;
   3034 
   3035       switch (num_regs)
   3036 	{
   3037 	case 1:
   3038 	  snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
   3039 	  break;
   3040 	case 2:
   3041 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
   3042 		    prefix, reg1, qlf_name, tb);
   3043 	  break;
   3044 	case 3:
   3045 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
   3046 		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
   3047 		    prefix, reg2, qlf_name, tb);
   3048 	  break;
   3049 	case 4:
   3050 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
   3051 		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
   3052 		    prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
   3053 	  break;
   3054 	}
   3055     }
   3056 }
   3057 
   3058 /* Print the register+immediate address in OPND to BUF, which has SIZE
   3059    characters.  BASE is the name of the base register.  */
   3060 
   3061 static void
   3062 print_immediate_offset_address (char *buf, size_t size,
   3063 				const aarch64_opnd_info *opnd,
   3064 				const char *base)
   3065 {
   3066   if (opnd->addr.writeback)
   3067     {
   3068       if (opnd->addr.preind)
   3069         {
   3070 	  if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
   3071             snprintf (buf, size, "[%s]!", base);
   3072           else
   3073 	    snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
   3074         }
   3075       else
   3076 	snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
   3077     }
   3078   else
   3079     {
   3080       if (opnd->shifter.operator_present)
   3081 	{
   3082 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
   3083 	  snprintf (buf, size, "[%s, #%d, mul vl]",
   3084 		    base, opnd->addr.offset.imm);
   3085 	}
   3086       else if (opnd->addr.offset.imm)
   3087 	snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
   3088       else
   3089 	snprintf (buf, size, "[%s]", base);
   3090     }
   3091 }
   3092 
   3093 /* Produce the string representation of the register offset address operand
   3094    *OPND in the buffer pointed by BUF of size SIZE.  BASE and OFFSET are
   3095    the names of the base and offset registers.  */
   3096 static void
   3097 print_register_offset_address (char *buf, size_t size,
   3098 			       const aarch64_opnd_info *opnd,
   3099 			       const char *base, const char *offset)
   3100 {
   3101   char tb[16];			/* Temporary buffer.  */
   3102   bfd_boolean print_extend_p = TRUE;
   3103   bfd_boolean print_amount_p = TRUE;
   3104   const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
   3105 
   3106   if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
   3107 				|| !opnd->shifter.amount_present))
   3108     {
   3109       /* Not print the shift/extend amount when the amount is zero and
   3110          when it is not the special case of 8-bit load/store instruction.  */
   3111       print_amount_p = FALSE;
   3112       /* Likewise, no need to print the shift operator LSL in such a
   3113 	 situation.  */
   3114       if (opnd->shifter.kind == AARCH64_MOD_LSL)
   3115 	print_extend_p = FALSE;
   3116     }
   3117 
   3118   /* Prepare for the extend/shift.  */
   3119   if (print_extend_p)
   3120     {
   3121       if (print_amount_p)
   3122 	snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
   3123   /* PR 21096: The %100 is to silence a warning about possible truncation.  */
   3124 		  (opnd->shifter.amount % 100));
   3125       else
   3126 	snprintf (tb, sizeof (tb), ", %s", shift_name);
   3127     }
   3128   else
   3129     tb[0] = '\0';
   3130 
   3131   snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
   3132 }
   3133 
   3134 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
   3135    in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
   3136    PC, PCREL_P and ADDRESS are used to pass in and return information about
   3137    the PC-relative address calculation, where the PC value is passed in
   3138    PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
   3139    will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
   3140    calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
   3141 
   3142    The function serves both the disassembler and the assembler diagnostics
   3143    issuer, which is the reason why it lives in this file.  */
   3144 
   3145 void
   3146 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
   3147 		       const aarch64_opcode *opcode,
   3148 		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
   3149 		       bfd_vma *address, char** notes,
   3150 		       aarch64_feature_set features)
   3151 {
   3152   unsigned int i, num_conds;
   3153   const char *name = NULL;
   3154   const aarch64_opnd_info *opnd = opnds + idx;
   3155   enum aarch64_modifier_kind kind;
   3156   uint64_t addr, enum_value;
   3157 
   3158   buf[0] = '\0';
   3159   if (pcrel_p)
   3160     *pcrel_p = 0;
   3161 
   3162   switch (opnd->type)
   3163     {
   3164     case AARCH64_OPND_Rd:
   3165     case AARCH64_OPND_Rn:
   3166     case AARCH64_OPND_Rm:
   3167     case AARCH64_OPND_Rt:
   3168     case AARCH64_OPND_Rt2:
   3169     case AARCH64_OPND_Rs:
   3170     case AARCH64_OPND_Ra:
   3171     case AARCH64_OPND_Rt_SYS:
   3172     case AARCH64_OPND_PAIRREG:
   3173     case AARCH64_OPND_SVE_Rm:
   3174       /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
   3175 	 the <ic_op>, therefore we use opnd->present to override the
   3176 	 generic optional-ness information.  */
   3177       if (opnd->type == AARCH64_OPND_Rt_SYS)
   3178 	{
   3179 	  if (!opnd->present)
   3180 	    break;
   3181 	}
   3182       /* Omit the operand, e.g. RET.  */
   3183       else if (optional_operand_p (opcode, idx)
   3184 	       && (opnd->reg.regno
   3185 		   == get_optional_operand_default_value (opcode)))
   3186 	break;
   3187       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   3188 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   3189       snprintf (buf, size, "%s",
   3190 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   3191       break;
   3192 
   3193     case AARCH64_OPND_Rd_SP:
   3194     case AARCH64_OPND_Rn_SP:
   3195     case AARCH64_OPND_Rt_SP:
   3196     case AARCH64_OPND_SVE_Rn_SP:
   3197     case AARCH64_OPND_Rm_SP:
   3198       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   3199 	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
   3200 	      || opnd->qualifier == AARCH64_OPND_QLF_X
   3201 	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
   3202       snprintf (buf, size, "%s",
   3203 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
   3204       break;
   3205 
   3206     case AARCH64_OPND_Rm_EXT:
   3207       kind = opnd->shifter.kind;
   3208       assert (idx == 1 || idx == 2);
   3209       if ((aarch64_stack_pointer_p (opnds)
   3210 	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
   3211 	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
   3212 	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
   3213 	       && kind == AARCH64_MOD_UXTW)
   3214 	      || (opnd->qualifier == AARCH64_OPND_QLF_X
   3215 		  && kind == AARCH64_MOD_UXTX)))
   3216 	{
   3217 	  /* 'LSL' is the preferred form in this case.  */
   3218 	  kind = AARCH64_MOD_LSL;
   3219 	  if (opnd->shifter.amount == 0)
   3220 	    {
   3221 	      /* Shifter omitted.  */
   3222 	      snprintf (buf, size, "%s",
   3223 			get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   3224 	      break;
   3225 	    }
   3226 	}
   3227       if (opnd->shifter.amount)
   3228 	snprintf (buf, size, "%s, %s #%" PRIi64,
   3229 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   3230 		  aarch64_operand_modifiers[kind].name,
   3231 		  opnd->shifter.amount);
   3232       else
   3233 	snprintf (buf, size, "%s, %s",
   3234 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   3235 		  aarch64_operand_modifiers[kind].name);
   3236       break;
   3237 
   3238     case AARCH64_OPND_Rm_SFT:
   3239       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   3240 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   3241       if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
   3242 	snprintf (buf, size, "%s",
   3243 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   3244       else
   3245 	snprintf (buf, size, "%s, %s #%" PRIi64,
   3246 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   3247 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
   3248 		  opnd->shifter.amount);
   3249       break;
   3250 
   3251     case AARCH64_OPND_Fd:
   3252     case AARCH64_OPND_Fn:
   3253     case AARCH64_OPND_Fm:
   3254     case AARCH64_OPND_Fa:
   3255     case AARCH64_OPND_Ft:
   3256     case AARCH64_OPND_Ft2:
   3257     case AARCH64_OPND_Sd:
   3258     case AARCH64_OPND_Sn:
   3259     case AARCH64_OPND_Sm:
   3260     case AARCH64_OPND_SVE_VZn:
   3261     case AARCH64_OPND_SVE_Vd:
   3262     case AARCH64_OPND_SVE_Vm:
   3263     case AARCH64_OPND_SVE_Vn:
   3264       snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
   3265 		opnd->reg.regno);
   3266       break;
   3267 
   3268     case AARCH64_OPND_Va:
   3269     case AARCH64_OPND_Vd:
   3270     case AARCH64_OPND_Vn:
   3271     case AARCH64_OPND_Vm:
   3272       snprintf (buf, size, "v%d.%s", opnd->reg.regno,
   3273 		aarch64_get_qualifier_name (opnd->qualifier));
   3274       break;
   3275 
   3276     case AARCH64_OPND_Ed:
   3277     case AARCH64_OPND_En:
   3278     case AARCH64_OPND_Em:
   3279     case AARCH64_OPND_Em16:
   3280     case AARCH64_OPND_SM3_IMM2:
   3281       snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
   3282 		aarch64_get_qualifier_name (opnd->qualifier),
   3283 		opnd->reglane.index);
   3284       break;
   3285 
   3286     case AARCH64_OPND_VdD1:
   3287     case AARCH64_OPND_VnD1:
   3288       snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
   3289       break;
   3290 
   3291     case AARCH64_OPND_LVn:
   3292     case AARCH64_OPND_LVt:
   3293     case AARCH64_OPND_LVt_AL:
   3294     case AARCH64_OPND_LEt:
   3295       print_register_list (buf, size, opnd, "v");
   3296       break;
   3297 
   3298     case AARCH64_OPND_SVE_Pd:
   3299     case AARCH64_OPND_SVE_Pg3:
   3300     case AARCH64_OPND_SVE_Pg4_5:
   3301     case AARCH64_OPND_SVE_Pg4_10:
   3302     case AARCH64_OPND_SVE_Pg4_16:
   3303     case AARCH64_OPND_SVE_Pm:
   3304     case AARCH64_OPND_SVE_Pn:
   3305     case AARCH64_OPND_SVE_Pt:
   3306       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   3307 	snprintf (buf, size, "p%d", opnd->reg.regno);
   3308       else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
   3309 	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
   3310 	snprintf (buf, size, "p%d/%s", opnd->reg.regno,
   3311 		  aarch64_get_qualifier_name (opnd->qualifier));
   3312       else
   3313 	snprintf (buf, size, "p%d.%s", opnd->reg.regno,
   3314 		  aarch64_get_qualifier_name (opnd->qualifier));
   3315       break;
   3316 
   3317     case AARCH64_OPND_SVE_Za_5:
   3318     case AARCH64_OPND_SVE_Za_16:
   3319     case AARCH64_OPND_SVE_Zd:
   3320     case AARCH64_OPND_SVE_Zm_5:
   3321     case AARCH64_OPND_SVE_Zm_16:
   3322     case AARCH64_OPND_SVE_Zn:
   3323     case AARCH64_OPND_SVE_Zt:
   3324       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
   3325 	snprintf (buf, size, "z%d", opnd->reg.regno);
   3326       else
   3327 	snprintf (buf, size, "z%d.%s", opnd->reg.regno,
   3328 		  aarch64_get_qualifier_name (opnd->qualifier));
   3329       break;
   3330 
   3331     case AARCH64_OPND_SVE_ZnxN:
   3332     case AARCH64_OPND_SVE_ZtxN:
   3333       print_register_list (buf, size, opnd, "z");
   3334       break;
   3335 
   3336     case AARCH64_OPND_SVE_Zm3_INDEX:
   3337     case AARCH64_OPND_SVE_Zm3_22_INDEX:
   3338     case AARCH64_OPND_SVE_Zm3_11_INDEX:
   3339     case AARCH64_OPND_SVE_Zm4_11_INDEX:
   3340     case AARCH64_OPND_SVE_Zm4_INDEX:
   3341     case AARCH64_OPND_SVE_Zn_INDEX:
   3342       snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
   3343 		aarch64_get_qualifier_name (opnd->qualifier),
   3344 		opnd->reglane.index);
   3345       break;
   3346 
   3347     case AARCH64_OPND_CRn:
   3348     case AARCH64_OPND_CRm:
   3349       snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
   3350       break;
   3351 
   3352     case AARCH64_OPND_IDX:
   3353     case AARCH64_OPND_MASK:
   3354     case AARCH64_OPND_IMM:
   3355     case AARCH64_OPND_IMM_2:
   3356     case AARCH64_OPND_WIDTH:
   3357     case AARCH64_OPND_UIMM3_OP1:
   3358     case AARCH64_OPND_UIMM3_OP2:
   3359     case AARCH64_OPND_BIT_NUM:
   3360     case AARCH64_OPND_IMM_VLSL:
   3361     case AARCH64_OPND_IMM_VLSR:
   3362     case AARCH64_OPND_SHLL_IMM:
   3363     case AARCH64_OPND_IMM0:
   3364     case AARCH64_OPND_IMMR:
   3365     case AARCH64_OPND_IMMS:
   3366     case AARCH64_OPND_UNDEFINED:
   3367     case AARCH64_OPND_FBITS:
   3368     case AARCH64_OPND_TME_UIMM16:
   3369     case AARCH64_OPND_SIMM5:
   3370     case AARCH64_OPND_SVE_SHLIMM_PRED:
   3371     case AARCH64_OPND_SVE_SHLIMM_UNPRED:
   3372     case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
   3373     case AARCH64_OPND_SVE_SHRIMM_PRED:
   3374     case AARCH64_OPND_SVE_SHRIMM_UNPRED:
   3375     case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
   3376     case AARCH64_OPND_SVE_SIMM5:
   3377     case AARCH64_OPND_SVE_SIMM5B:
   3378     case AARCH64_OPND_SVE_SIMM6:
   3379     case AARCH64_OPND_SVE_SIMM8:
   3380     case AARCH64_OPND_SVE_UIMM3:
   3381     case AARCH64_OPND_SVE_UIMM7:
   3382     case AARCH64_OPND_SVE_UIMM8:
   3383     case AARCH64_OPND_SVE_UIMM8_53:
   3384     case AARCH64_OPND_IMM_ROT1:
   3385     case AARCH64_OPND_IMM_ROT2:
   3386     case AARCH64_OPND_IMM_ROT3:
   3387     case AARCH64_OPND_SVE_IMM_ROT1:
   3388     case AARCH64_OPND_SVE_IMM_ROT2:
   3389     case AARCH64_OPND_SVE_IMM_ROT3:
   3390       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
   3391       break;
   3392 
   3393     case AARCH64_OPND_SVE_I1_HALF_ONE:
   3394     case AARCH64_OPND_SVE_I1_HALF_TWO:
   3395     case AARCH64_OPND_SVE_I1_ZERO_ONE:
   3396       {
   3397 	single_conv_t c;
   3398 	c.i = opnd->imm.value;
   3399 	snprintf (buf, size, "#%.1f", c.f);
   3400 	break;
   3401       }
   3402 
   3403     case AARCH64_OPND_SVE_PATTERN:
   3404       if (optional_operand_p (opcode, idx)
   3405 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
   3406 	break;
   3407       enum_value = opnd->imm.value;
   3408       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
   3409       if (aarch64_sve_pattern_array[enum_value])
   3410 	snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
   3411       else
   3412 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
   3413       break;
   3414 
   3415     case AARCH64_OPND_SVE_PATTERN_SCALED:
   3416       if (optional_operand_p (opcode, idx)
   3417 	  && !opnd->shifter.operator_present
   3418 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
   3419 	break;
   3420       enum_value = opnd->imm.value;
   3421       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
   3422       if (aarch64_sve_pattern_array[opnd->imm.value])
   3423 	snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
   3424       else
   3425 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
   3426       if (opnd->shifter.operator_present)
   3427 	{
   3428 	  size_t len = strlen (buf);
   3429 	  snprintf (buf + len, size - len, ", %s #%" PRIi64,
   3430 		    aarch64_operand_modifiers[opnd->shifter.kind].name,
   3431 		    opnd->shifter.amount);
   3432 	}
   3433       break;
   3434 
   3435     case AARCH64_OPND_SVE_PRFOP:
   3436       enum_value = opnd->imm.value;
   3437       assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
   3438       if (aarch64_sve_prfop_array[enum_value])
   3439 	snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
   3440       else
   3441 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
   3442       break;
   3443 
   3444     case AARCH64_OPND_IMM_MOV:
   3445       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   3446 	{
   3447 	case 4:	/* e.g. MOV Wd, #<imm32>.  */
   3448 	    {
   3449 	      int imm32 = opnd->imm.value;
   3450 	      snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
   3451 	    }
   3452 	  break;
   3453 	case 8:	/* e.g. MOV Xd, #<imm64>.  */
   3454 	  snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
   3455 		    opnd->imm.value, opnd->imm.value);
   3456 	  break;
   3457 	default: assert (0);
   3458 	}
   3459       break;
   3460 
   3461     case AARCH64_OPND_FPIMM0:
   3462       snprintf (buf, size, "#0.0");
   3463       break;
   3464 
   3465     case AARCH64_OPND_LIMM:
   3466     case AARCH64_OPND_AIMM:
   3467     case AARCH64_OPND_HALF:
   3468     case AARCH64_OPND_SVE_INV_LIMM:
   3469     case AARCH64_OPND_SVE_LIMM:
   3470     case AARCH64_OPND_SVE_LIMM_MOV:
   3471       if (opnd->shifter.amount)
   3472 	snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
   3473 		  opnd->shifter.amount);
   3474       else
   3475 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
   3476       break;
   3477 
   3478     case AARCH64_OPND_SIMD_IMM:
   3479     case AARCH64_OPND_SIMD_IMM_SFT:
   3480       if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
   3481 	  || opnd->shifter.kind == AARCH64_MOD_NONE)
   3482 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
   3483       else
   3484 	snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
   3485 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
   3486 		  opnd->shifter.amount);
   3487       break;
   3488 
   3489     case AARCH64_OPND_SVE_AIMM:
   3490     case AARCH64_OPND_SVE_ASIMM:
   3491       if (opnd->shifter.amount)
   3492 	snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
   3493 		  opnd->shifter.amount);
   3494       else
   3495 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
   3496       break;
   3497 
   3498     case AARCH64_OPND_FPIMM:
   3499     case AARCH64_OPND_SIMD_FPIMM:
   3500     case AARCH64_OPND_SVE_FPIMM8:
   3501       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   3502 	{
   3503 	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
   3504 	    {
   3505 	      half_conv_t c;
   3506 	      c.i = expand_fp_imm (2, opnd->imm.value);
   3507 	      snprintf (buf, size,  "#%.18e", c.f);
   3508 	    }
   3509 	  break;
   3510 	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
   3511 	    {
   3512 	      single_conv_t c;
   3513 	      c.i = expand_fp_imm (4, opnd->imm.value);
   3514 	      snprintf (buf, size,  "#%.18e", c.f);
   3515 	    }
   3516 	  break;
   3517 	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
   3518 	    {
   3519 	      double_conv_t c;
   3520 	      c.i = expand_fp_imm (8, opnd->imm.value);
   3521 	      snprintf (buf, size,  "#%.18e", c.d);
   3522 	    }
   3523 	  break;
   3524 	default: assert (0);
   3525 	}
   3526       break;
   3527 
   3528     case AARCH64_OPND_CCMP_IMM:
   3529     case AARCH64_OPND_NZCV:
   3530     case AARCH64_OPND_EXCEPTION:
   3531     case AARCH64_OPND_UIMM4:
   3532     case AARCH64_OPND_UIMM4_ADDG:
   3533     case AARCH64_OPND_UIMM7:
   3534     case AARCH64_OPND_UIMM10:
   3535       if (optional_operand_p (opcode, idx) == TRUE
   3536 	  && (opnd->imm.value ==
   3537 	      (int64_t) get_optional_operand_default_value (opcode)))
   3538 	/* Omit the operand, e.g. DCPS1.  */
   3539 	break;
   3540       snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
   3541       break;
   3542 
   3543     case AARCH64_OPND_COND:
   3544     case AARCH64_OPND_COND1:
   3545       snprintf (buf, size, "%s", opnd->cond->names[0]);
   3546       num_conds = ARRAY_SIZE (opnd->cond->names);
   3547       for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
   3548 	{
   3549 	  size_t len = strlen (buf);
   3550 	  if (i == 1)
   3551 	    snprintf (buf + len, size - len, "  // %s = %s",
   3552 		      opnd->cond->names[0], opnd->cond->names[i]);
   3553 	  else
   3554 	    snprintf (buf + len, size - len, ", %s",
   3555 		      opnd->cond->names[i]);
   3556 	}
   3557       break;
   3558 
   3559     case AARCH64_OPND_ADDR_ADRP:
   3560       addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
   3561 	+ opnd->imm.value;
   3562       if (pcrel_p)
   3563 	*pcrel_p = 1;
   3564       if (address)
   3565 	*address = addr;
   3566       /* This is not necessary during the disassembling, as print_address_func
   3567 	 in the disassemble_info will take care of the printing.  But some
   3568 	 other callers may be still interested in getting the string in *STR,
   3569 	 so here we do snprintf regardless.  */
   3570       snprintf (buf, size, "#0x%" PRIx64, addr);
   3571       break;
   3572 
   3573     case AARCH64_OPND_ADDR_PCREL14:
   3574     case AARCH64_OPND_ADDR_PCREL19:
   3575     case AARCH64_OPND_ADDR_PCREL21:
   3576     case AARCH64_OPND_ADDR_PCREL26:
   3577       addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
   3578       if (pcrel_p)
   3579 	*pcrel_p = 1;
   3580       if (address)
   3581 	*address = addr;
   3582       /* This is not necessary during the disassembling, as print_address_func
   3583 	 in the disassemble_info will take care of the printing.  But some
   3584 	 other callers may be still interested in getting the string in *STR,
   3585 	 so here we do snprintf regardless.  */
   3586       snprintf (buf, size, "#0x%" PRIx64, addr);
   3587       break;
   3588 
   3589     case AARCH64_OPND_ADDR_SIMPLE:
   3590     case AARCH64_OPND_SIMD_ADDR_SIMPLE:
   3591     case AARCH64_OPND_SIMD_ADDR_POST:
   3592       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   3593       if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
   3594 	{
   3595 	  if (opnd->addr.offset.is_reg)
   3596 	    snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
   3597 	  else
   3598 	    snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
   3599 	}
   3600       else
   3601 	snprintf (buf, size, "[%s]", name);
   3602       break;
   3603 
   3604     case AARCH64_OPND_ADDR_REGOFF:
   3605     case AARCH64_OPND_SVE_ADDR_R:
   3606     case AARCH64_OPND_SVE_ADDR_RR:
   3607     case AARCH64_OPND_SVE_ADDR_RR_LSL1:
   3608     case AARCH64_OPND_SVE_ADDR_RR_LSL2:
   3609     case AARCH64_OPND_SVE_ADDR_RR_LSL3:
   3610     case AARCH64_OPND_SVE_ADDR_RX:
   3611     case AARCH64_OPND_SVE_ADDR_RX_LSL1:
   3612     case AARCH64_OPND_SVE_ADDR_RX_LSL2:
   3613     case AARCH64_OPND_SVE_ADDR_RX_LSL3:
   3614       print_register_offset_address
   3615 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   3616 	 get_offset_int_reg_name (opnd));
   3617       break;
   3618 
   3619     case AARCH64_OPND_SVE_ADDR_ZX:
   3620       print_register_offset_address
   3621 	(buf, size, opnd,
   3622 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
   3623 	 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
   3624       break;
   3625 
   3626     case AARCH64_OPND_SVE_ADDR_RZ:
   3627     case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
   3628     case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
   3629     case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
   3630     case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
   3631     case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
   3632     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
   3633     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
   3634     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
   3635     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
   3636     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
   3637     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
   3638       print_register_offset_address
   3639 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   3640 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
   3641       break;
   3642 
   3643     case AARCH64_OPND_ADDR_SIMM7:
   3644     case AARCH64_OPND_ADDR_SIMM9:
   3645     case AARCH64_OPND_ADDR_SIMM9_2:
   3646     case AARCH64_OPND_ADDR_SIMM10:
   3647     case AARCH64_OPND_ADDR_SIMM11:
   3648     case AARCH64_OPND_ADDR_SIMM13:
   3649     case AARCH64_OPND_ADDR_OFFSET:
   3650     case AARCH64_OPND_SVE_ADDR_RI_S4x16:
   3651     case AARCH64_OPND_SVE_ADDR_RI_S4x32:
   3652     case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
   3653     case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
   3654     case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
   3655     case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
   3656     case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
   3657     case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
   3658     case AARCH64_OPND_SVE_ADDR_RI_U6:
   3659     case AARCH64_OPND_SVE_ADDR_RI_U6x2:
   3660     case AARCH64_OPND_SVE_ADDR_RI_U6x4:
   3661     case AARCH64_OPND_SVE_ADDR_RI_U6x8:
   3662       print_immediate_offset_address
   3663 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
   3664       break;
   3665 
   3666     case AARCH64_OPND_SVE_ADDR_ZI_U5:
   3667     case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
   3668     case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
   3669     case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
   3670       print_immediate_offset_address
   3671 	(buf, size, opnd,
   3672 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
   3673       break;
   3674 
   3675     case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
   3676     case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
   3677     case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
   3678       print_register_offset_address
   3679 	(buf, size, opnd,
   3680 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
   3681 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
   3682       break;
   3683 
   3684     case AARCH64_OPND_ADDR_UIMM12:
   3685       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   3686       if (opnd->addr.offset.imm)
   3687 	snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
   3688       else
   3689 	snprintf (buf, size, "[%s]", name);
   3690       break;
   3691 
   3692     case AARCH64_OPND_SYSREG:
   3693       for (i = 0; aarch64_sys_regs[i].name; ++i)
   3694 	{
   3695 	  const aarch64_sys_reg *sr = aarch64_sys_regs + i;
   3696 
   3697 	  bfd_boolean exact_match
   3698 	    = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
   3699 	    || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
   3700 	    && AARCH64_CPU_HAS_FEATURE (features, sr->features);
   3701 
   3702 	  /* Try and find an exact match, But if that fails, return the first
   3703 	     partial match that was found.  */
   3704 	  if (aarch64_sys_regs[i].value == opnd->sysreg.value
   3705 	      && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
   3706 	      && (name == NULL || exact_match))
   3707 	    {
   3708 	      name = aarch64_sys_regs[i].name;
   3709 	      if (exact_match)
   3710 		{
   3711 		  if (notes)
   3712 		    *notes = NULL;
   3713 		  break;
   3714 		}
   3715 
   3716 	      /* If we didn't match exactly, that means the presense of a flag
   3717 		 indicates what we didn't want for this instruction.  e.g. If
   3718 		 F_REG_READ is there, that means we were looking for a write
   3719 		 register.  See aarch64_ext_sysreg.  */
   3720 	      if (aarch64_sys_regs[i].flags & F_REG_WRITE)
   3721 		*notes = _("reading from a write-only register");
   3722 	      else if (aarch64_sys_regs[i].flags & F_REG_READ)
   3723 		*notes = _("writing to a read-only register");
   3724 	    }
   3725 	}
   3726 
   3727       if (name)
   3728 	snprintf (buf, size, "%s", name);
   3729       else
   3730 	{
   3731 	  /* Implementation defined system register.  */
   3732 	  unsigned int value = opnd->sysreg.value;
   3733 	  snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
   3734 		    (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
   3735 		    value & 0x7);
   3736 	}
   3737       break;
   3738 
   3739     case AARCH64_OPND_PSTATEFIELD:
   3740       for (i = 0; aarch64_pstatefields[i].name; ++i)
   3741 	if (aarch64_pstatefields[i].value == opnd->pstatefield)
   3742 	  break;
   3743       assert (aarch64_pstatefields[i].name);
   3744       snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
   3745       break;
   3746 
   3747     case AARCH64_OPND_SYSREG_AT:
   3748     case AARCH64_OPND_SYSREG_DC:
   3749     case AARCH64_OPND_SYSREG_IC:
   3750     case AARCH64_OPND_SYSREG_TLBI:
   3751     case AARCH64_OPND_SYSREG_SR:
   3752       snprintf (buf, size, "%s", opnd->sysins_op->name);
   3753       break;
   3754 
   3755     case AARCH64_OPND_BARRIER:
   3756       snprintf (buf, size, "%s", opnd->barrier->name);
   3757       break;
   3758 
   3759     case AARCH64_OPND_BARRIER_ISB:
   3760       /* Operand can be omitted, e.g. in DCPS1.  */
   3761       if (! optional_operand_p (opcode, idx)
   3762 	  || (opnd->barrier->value
   3763 	      != get_optional_operand_default_value (opcode)))
   3764 	snprintf (buf, size, "#0x%x", opnd->barrier->value);
   3765       break;
   3766 
   3767     case AARCH64_OPND_PRFOP:
   3768       if (opnd->prfop->name != NULL)
   3769 	snprintf (buf, size, "%s", opnd->prfop->name);
   3770       else
   3771 	snprintf (buf, size, "#0x%02x", opnd->prfop->value);
   3772       break;
   3773 
   3774     case AARCH64_OPND_BARRIER_PSB:
   3775       snprintf (buf, size, "csync");
   3776       break;
   3777 
   3778     case AARCH64_OPND_BTI_TARGET:
   3779       if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
   3780 	snprintf (buf, size, "%s", opnd->hint_option->name);
   3781       break;
   3782 
   3783     default:
   3784       assert (0);
   3785     }
   3786 }
   3787 
   3788 #define CPENC(op0,op1,crn,crm,op2) \
   3790   ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
   3791   /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
   3792 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
   3793   /* for 3.9.10 System Instructions */
   3794 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
   3795 
   3796 #define C0  0
   3797 #define C1  1
   3798 #define C2  2
   3799 #define C3  3
   3800 #define C4  4
   3801 #define C5  5
   3802 #define C6  6
   3803 #define C7  7
   3804 #define C8  8
   3805 #define C9  9
   3806 #define C10 10
   3807 #define C11 11
   3808 #define C12 12
   3809 #define C13 13
   3810 #define C14 14
   3811 #define C15 15
   3812 
   3813 #define SYSREG(name, encoding, flags, features) \
   3814   { name, encoding, flags, features }
   3815 
   3816 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
   3817 
   3818 #define SR_FEAT(n,e,f,feat) \
   3819   SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
   3820 
   3821 #define SR_FEAT2(n,e,f,fe1,fe2) \
   3822   SYSREG ((n), (e), (f) | F_ARCHEXT, \
   3823 	  AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
   3824 
   3825 #define SR_RNG(n,e,f)	 SR_FEAT2(n,e,f,RNG,V8_5)
   3826 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
   3827 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
   3828 
   3829 #define SR_V8_A(n,e,f)	  SR_FEAT (n,e,f,V8_A)
   3830 #define SR_V8_R(n,e,f)	  SR_FEAT (n,e,f,V8_R)
   3831 #define SR_V8_1(n,e,f)	  SR_FEAT (n,e,f,V8_1)
   3832 #define SR_V8_2(n,e,f)	  SR_FEAT (n,e,f,V8_2)
   3833 #define SR_V8_3(n,e,f)	  SR_FEAT (n,e,f,V8_3)
   3834 #define SR_V8_4(n,e,f)	  SR_FEAT (n,e,f,V8_4)
   3835 #define SR_V8_4(n,e,f)	  SR_FEAT (n,e,f,V8_4)
   3836 #define SR_PAN(n,e,f)	  SR_FEAT (n,e,f,PAN)
   3837 #define SR_RAS(n,e,f)	  SR_FEAT (n,e,f,RAS)
   3838 #define SR_SSBS(n,e,f)	  SR_FEAT (n,e,f,SSBS)
   3839 #define SR_SVE(n,e,f)	  SR_FEAT (n,e,f,SVE)
   3840 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
   3841 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
   3842 #define SR_MEMTAG(n,e,f)  SR_FEAT (n,e,f,MEMTAG)
   3843 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
   3844 
   3845 #define SR_EXPAND_ELx(f,x) \
   3846   f (x, 1),  \
   3847   f (x, 2),  \
   3848   f (x, 3),  \
   3849   f (x, 4),  \
   3850   f (x, 5),  \
   3851   f (x, 6),  \
   3852   f (x, 7),  \
   3853   f (x, 8),  \
   3854   f (x, 9),  \
   3855   f (x, 10), \
   3856   f (x, 11), \
   3857   f (x, 12), \
   3858   f (x, 13), \
   3859   f (x, 14), \
   3860   f (x, 15),
   3861 
   3862 #define SR_EXPAND_EL12(f) \
   3863   SR_EXPAND_ELx (f,1) \
   3864   SR_EXPAND_ELx (f,2)
   3865 
   3866 /* TODO there is one more issues need to be resolved
   3867    1. handle cpu-implementation-defined system registers.
   3868 
   3869    Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
   3870    respectively.  If neither of these are set then the register is read-write.  */
   3871 const aarch64_sys_reg aarch64_sys_regs [] =
   3872 {
   3873   SR_CORE ("spsr_el1",		CPEN_ (0,C0,0),		0), /* = spsr_svc.  */
   3874   SR_V8_1 ("spsr_el12",		CPEN_ (5,C0,0),		0),
   3875   SR_CORE ("elr_el1",		CPEN_ (0,C0,1),		0),
   3876   SR_V8_1 ("elr_el12",		CPEN_ (5,C0,1),		0),
   3877   SR_CORE ("sp_el0",		CPEN_ (0,C1,0),		0),
   3878   SR_CORE ("spsel",		CPEN_ (0,C2,0),		0),
   3879   SR_CORE ("daif",		CPEN_ (3,C2,1),		0),
   3880   SR_CORE ("currentel",		CPEN_ (0,C2,2),		F_REG_READ),
   3881   SR_PAN  ("pan",		CPEN_ (0,C2,3),		0),
   3882   SR_V8_2 ("uao",		CPEN_ (0,C2,4),		0),
   3883   SR_CORE ("nzcv",		CPEN_ (3,C2,0),		0),
   3884   SR_SSBS ("ssbs",		CPEN_ (3,C2,6),		0),
   3885   SR_CORE ("fpcr",		CPEN_ (3,C4,0),		0),
   3886   SR_CORE ("fpsr",		CPEN_ (3,C4,1),		0),
   3887   SR_CORE ("dspsr_el0",		CPEN_ (3,C5,0),		0),
   3888   SR_CORE ("dlr_el0",		CPEN_ (3,C5,1),		0),
   3889   SR_CORE ("spsr_el2",		CPEN_ (4,C0,0),		0), /* = spsr_hyp.  */
   3890   SR_CORE ("elr_el2",		CPEN_ (4,C0,1),		0),
   3891   SR_CORE ("sp_el1",		CPEN_ (4,C1,0),		0),
   3892   SR_CORE ("spsr_irq",		CPEN_ (4,C3,0),		0),
   3893   SR_CORE ("spsr_abt",		CPEN_ (4,C3,1),		0),
   3894   SR_CORE ("spsr_und",		CPEN_ (4,C3,2),		0),
   3895   SR_CORE ("spsr_fiq",		CPEN_ (4,C3,3),		0),
   3896   SR_CORE ("spsr_el3",		CPEN_ (6,C0,0),		0),
   3897   SR_CORE ("elr_el3",		CPEN_ (6,C0,1),		0),
   3898   SR_CORE ("sp_el2",		CPEN_ (6,C1,0),		0),
   3899   SR_CORE ("spsr_svc",		CPEN_ (0,C0,0),		F_DEPRECATED), /* = spsr_el1.  */
   3900   SR_CORE ("spsr_hyp",		CPEN_ (4,C0,0),		F_DEPRECATED), /* = spsr_el2.  */
   3901   SR_CORE ("midr_el1",		CPENC (3,0,C0,C0,0),	F_REG_READ),
   3902   SR_CORE ("ctr_el0",		CPENC (3,3,C0,C0,1),	F_REG_READ),
   3903   SR_CORE ("mpidr_el1",		CPENC (3,0,C0,C0,5),	F_REG_READ),
   3904   SR_CORE ("revidr_el1",	CPENC (3,0,C0,C0,6),	F_REG_READ),
   3905   SR_CORE ("aidr_el1",		CPENC (3,1,C0,C0,7),	F_REG_READ),
   3906   SR_CORE ("dczid_el0",		CPENC (3,3,C0,C0,7),	F_REG_READ),
   3907   SR_CORE ("id_dfr0_el1",	CPENC (3,0,C0,C1,2),	F_REG_READ),
   3908   SR_CORE ("id_pfr0_el1",	CPENC (3,0,C0,C1,0),	F_REG_READ),
   3909   SR_CORE ("id_pfr1_el1",	CPENC (3,0,C0,C1,1),	F_REG_READ),
   3910   SR_ID_PFR2 ("id_pfr2_el1",	CPENC (3,0,C0,C3,4),	F_REG_READ),
   3911   SR_CORE ("id_afr0_el1",	CPENC (3,0,C0,C1,3),	F_REG_READ),
   3912   SR_CORE ("id_mmfr0_el1",	CPENC (3,0,C0,C1,4),	F_REG_READ),
   3913   SR_CORE ("id_mmfr1_el1",	CPENC (3,0,C0,C1,5),	F_REG_READ),
   3914   SR_CORE ("id_mmfr2_el1",	CPENC (3,0,C0,C1,6),	F_REG_READ),
   3915   SR_CORE ("id_mmfr3_el1",	CPENC (3,0,C0,C1,7),	F_REG_READ),
   3916   SR_CORE ("id_mmfr4_el1",	CPENC (3,0,C0,C2,6),	F_REG_READ),
   3917   SR_CORE ("id_isar0_el1",	CPENC (3,0,C0,C2,0),	F_REG_READ),
   3918   SR_CORE ("id_isar1_el1",	CPENC (3,0,C0,C2,1),	F_REG_READ),
   3919   SR_CORE ("id_isar2_el1",	CPENC (3,0,C0,C2,2),	F_REG_READ),
   3920   SR_CORE ("id_isar3_el1",	CPENC (3,0,C0,C2,3),	F_REG_READ),
   3921   SR_CORE ("id_isar4_el1",	CPENC (3,0,C0,C2,4),	F_REG_READ),
   3922   SR_CORE ("id_isar5_el1",	CPENC (3,0,C0,C2,5),	F_REG_READ),
   3923   SR_CORE ("mvfr0_el1",		CPENC (3,0,C0,C3,0),	F_REG_READ),
   3924   SR_CORE ("mvfr1_el1",		CPENC (3,0,C0,C3,1),	F_REG_READ),
   3925   SR_CORE ("mvfr2_el1",		CPENC (3,0,C0,C3,2),	F_REG_READ),
   3926   SR_CORE ("ccsidr_el1",	CPENC (3,1,C0,C0,0),	F_REG_READ),
   3927   SR_CORE ("id_aa64pfr0_el1",	CPENC (3,0,C0,C4,0),	F_REG_READ),
   3928   SR_CORE ("id_aa64pfr1_el1",	CPENC (3,0,C0,C4,1),	F_REG_READ),
   3929   SR_CORE ("id_aa64dfr0_el1",	CPENC (3,0,C0,C5,0),	F_REG_READ),
   3930   SR_CORE ("id_aa64dfr1_el1",	CPENC (3,0,C0,C5,1),	F_REG_READ),
   3931   SR_CORE ("id_aa64isar0_el1",	CPENC (3,0,C0,C6,0),	F_REG_READ),
   3932   SR_CORE ("id_aa64isar1_el1",	CPENC (3,0,C0,C6,1),	F_REG_READ),
   3933   SR_CORE ("id_aa64mmfr0_el1",	CPENC (3,0,C0,C7,0),	F_REG_READ),
   3934   SR_CORE ("id_aa64mmfr1_el1",	CPENC (3,0,C0,C7,1),	F_REG_READ),
   3935   SR_V8_2 ("id_aa64mmfr2_el1",	CPENC (3,0,C0,C7,2),	F_REG_READ),
   3936   SR_CORE ("id_aa64afr0_el1",	CPENC (3,0,C0,C5,4),	F_REG_READ),
   3937   SR_CORE ("id_aa64afr1_el1",	CPENC (3,0,C0,C5,5),	F_REG_READ),
   3938   SR_SVE  ("id_aa64zfr0_el1",	CPENC (3,0,C0,C4,4),	F_REG_READ),
   3939   SR_CORE ("clidr_el1",		CPENC (3,1,C0,C0,1),	F_REG_READ),
   3940   SR_CORE ("csselr_el1",	CPENC (3,2,C0,C0,0),	0),
   3941   SR_CORE ("vpidr_el2",		CPENC (3,4,C0,C0,0),	0),
   3942   SR_CORE ("vmpidr_el2",	CPENC (3,4,C0,C0,5),	0),
   3943   SR_CORE ("sctlr_el1",		CPENC (3,0,C1,C0,0),	0),
   3944   SR_CORE ("sctlr_el2",		CPENC (3,4,C1,C0,0),	0),
   3945   SR_CORE ("sctlr_el3",		CPENC (3,6,C1,C0,0),	0),
   3946   SR_V8_1 ("sctlr_el12",	CPENC (3,5,C1,C0,0),	0),
   3947   SR_CORE ("actlr_el1",		CPENC (3,0,C1,C0,1),	0),
   3948   SR_CORE ("actlr_el2",		CPENC (3,4,C1,C0,1),	0),
   3949   SR_CORE ("actlr_el3",		CPENC (3,6,C1,C0,1),	0),
   3950   SR_CORE ("cpacr_el1",		CPENC (3,0,C1,C0,2),	0),
   3951   SR_V8_1 ("cpacr_el12",	CPENC (3,5,C1,C0,2),	0),
   3952   SR_CORE ("cptr_el2",		CPENC (3,4,C1,C1,2),	0),
   3953   SR_CORE ("cptr_el3",		CPENC (3,6,C1,C1,2),	0),
   3954   SR_CORE ("scr_el3",		CPENC (3,6,C1,C1,0),	0),
   3955   SR_CORE ("hcr_el2",		CPENC (3,4,C1,C1,0),	0),
   3956   SR_CORE ("mdcr_el2",		CPENC (3,4,C1,C1,1),	0),
   3957   SR_CORE ("mdcr_el3",		CPENC (3,6,C1,C3,1),	0),
   3958   SR_CORE ("hstr_el2",		CPENC (3,4,C1,C1,3),	0),
   3959   SR_CORE ("hacr_el2",		CPENC (3,4,C1,C1,7),	0),
   3960   SR_SVE  ("zcr_el1",		CPENC (3,0,C1,C2,0),	0),
   3961   SR_SVE  ("zcr_el12",		CPENC (3,5,C1,C2,0),	0),
   3962   SR_SVE  ("zcr_el2",		CPENC (3,4,C1,C2,0),	0),
   3963   SR_SVE  ("zcr_el3",		CPENC (3,6,C1,C2,0),	0),
   3964   SR_SVE  ("zidr_el1",		CPENC (3,0,C0,C0,7),	0),
   3965   SR_CORE ("ttbr0_el1",		CPENC (3,0,C2,C0,0),	0),
   3966   SR_CORE ("ttbr1_el1",		CPENC (3,0,C2,C0,1),	0),
   3967   SR_V8_A ("ttbr0_el2",		CPENC (3,4,C2,C0,0),	0),
   3968   SR_V8_1_A ("ttbr1_el2",	CPENC (3,4,C2,C0,1),	0),
   3969   SR_CORE ("ttbr0_el3",		CPENC (3,6,C2,C0,0),	0),
   3970   SR_V8_1 ("ttbr0_el12",	CPENC (3,5,C2,C0,0),	0),
   3971   SR_V8_1 ("ttbr1_el12",	CPENC (3,5,C2,C0,1),	0),
   3972   SR_V8_A ("vttbr_el2",		CPENC (3,4,C2,C1,0),	0),
   3973   SR_CORE ("tcr_el1",		CPENC (3,0,C2,C0,2),	0),
   3974   SR_CORE ("tcr_el2",		CPENC (3,4,C2,C0,2),	0),
   3975   SR_CORE ("tcr_el3",		CPENC (3,6,C2,C0,2),	0),
   3976   SR_V8_1 ("tcr_el12",		CPENC (3,5,C2,C0,2),	0),
   3977   SR_CORE ("vtcr_el2",		CPENC (3,4,C2,C1,2),	0),
   3978   SR_V8_3 ("apiakeylo_el1",	CPENC (3,0,C2,C1,0),	0),
   3979   SR_V8_3 ("apiakeyhi_el1",	CPENC (3,0,C2,C1,1),	0),
   3980   SR_V8_3 ("apibkeylo_el1",	CPENC (3,0,C2,C1,2),	0),
   3981   SR_V8_3 ("apibkeyhi_el1",	CPENC (3,0,C2,C1,3),	0),
   3982   SR_V8_3 ("apdakeylo_el1",	CPENC (3,0,C2,C2,0),	0),
   3983   SR_V8_3 ("apdakeyhi_el1",	CPENC (3,0,C2,C2,1),	0),
   3984   SR_V8_3 ("apdbkeylo_el1",	CPENC (3,0,C2,C2,2),	0),
   3985   SR_V8_3 ("apdbkeyhi_el1",	CPENC (3,0,C2,C2,3),	0),
   3986   SR_V8_3 ("apgakeylo_el1",	CPENC (3,0,C2,C3,0),	0),
   3987   SR_V8_3 ("apgakeyhi_el1",	CPENC (3,0,C2,C3,1),	0),
   3988   SR_CORE ("afsr0_el1",		CPENC (3,0,C5,C1,0),	0),
   3989   SR_CORE ("afsr1_el1",		CPENC (3,0,C5,C1,1),	0),
   3990   SR_CORE ("afsr0_el2",		CPENC (3,4,C5,C1,0),	0),
   3991   SR_CORE ("afsr1_el2",		CPENC (3,4,C5,C1,1),	0),
   3992   SR_CORE ("afsr0_el3",		CPENC (3,6,C5,C1,0),	0),
   3993   SR_V8_1 ("afsr0_el12",	CPENC (3,5,C5,C1,0),	0),
   3994   SR_CORE ("afsr1_el3",		CPENC (3,6,C5,C1,1),	0),
   3995   SR_V8_1 ("afsr1_el12",	CPENC (3,5,C5,C1,1),	0),
   3996   SR_CORE ("esr_el1",		CPENC (3,0,C5,C2,0),	0),
   3997   SR_CORE ("esr_el2",		CPENC (3,4,C5,C2,0),	0),
   3998   SR_CORE ("esr_el3",		CPENC (3,6,C5,C2,0),	0),
   3999   SR_V8_1 ("esr_el12",		CPENC (3,5,C5,C2,0),	0),
   4000   SR_RAS  ("vsesr_el2",		CPENC (3,4,C5,C2,3),	0),
   4001   SR_CORE ("fpexc32_el2",	CPENC (3,4,C5,C3,0),	0),
   4002   SR_RAS  ("erridr_el1",	CPENC (3,0,C5,C3,0),	F_REG_READ),
   4003   SR_RAS  ("errselr_el1",	CPENC (3,0,C5,C3,1),	0),
   4004   SR_RAS  ("erxfr_el1",		CPENC (3,0,C5,C4,0),	F_REG_READ),
   4005   SR_RAS  ("erxctlr_el1",	CPENC (3,0,C5,C4,1),	0),
   4006   SR_RAS  ("erxstatus_el1",	CPENC (3,0,C5,C4,2),	0),
   4007   SR_RAS  ("erxaddr_el1",	CPENC (3,0,C5,C4,3),	0),
   4008   SR_RAS  ("erxmisc0_el1",	CPENC (3,0,C5,C5,0),	0),
   4009   SR_RAS  ("erxmisc1_el1",	CPENC (3,0,C5,C5,1),	0),
   4010   SR_CORE ("far_el1",		CPENC (3,0,C6,C0,0),	0),
   4011   SR_CORE ("far_el2",		CPENC (3,4,C6,C0,0),	0),
   4012   SR_CORE ("far_el3",		CPENC (3,6,C6,C0,0),	0),
   4013   SR_V8_1 ("far_el12",		CPENC (3,5,C6,C0,0),	0),
   4014   SR_CORE ("hpfar_el2",		CPENC (3,4,C6,C0,4),	0),
   4015   SR_CORE ("par_el1",		CPENC (3,0,C7,C4,0),	0),
   4016   SR_CORE ("mair_el1",		CPENC (3,0,C10,C2,0),	0),
   4017   SR_CORE ("mair_el2",		CPENC (3,4,C10,C2,0),	0),
   4018   SR_CORE ("mair_el3",		CPENC (3,6,C10,C2,0),	0),
   4019   SR_V8_1 ("mair_el12",		CPENC (3,5,C10,C2,0),	0),
   4020   SR_CORE ("amair_el1",		CPENC (3,0,C10,C3,0),	0),
   4021   SR_CORE ("amair_el2",		CPENC (3,4,C10,C3,0),	0),
   4022   SR_CORE ("amair_el3",		CPENC (3,6,C10,C3,0),	0),
   4023   SR_V8_1 ("amair_el12",	CPENC (3,5,C10,C3,0),	0),
   4024   SR_CORE ("vbar_el1",		CPENC (3,0,C12,C0,0),	0),
   4025   SR_CORE ("vbar_el2",		CPENC (3,4,C12,C0,0),	0),
   4026   SR_CORE ("vbar_el3",		CPENC (3,6,C12,C0,0),	0),
   4027   SR_V8_1 ("vbar_el12",		CPENC (3,5,C12,C0,0),	0),
   4028   SR_CORE ("rvbar_el1",		CPENC (3,0,C12,C0,1),	F_REG_READ),
   4029   SR_CORE ("rvbar_el2",		CPENC (3,4,C12,C0,1),	F_REG_READ),
   4030   SR_CORE ("rvbar_el3",		CPENC (3,6,C12,C0,1),	F_REG_READ),
   4031   SR_CORE ("rmr_el1",		CPENC (3,0,C12,C0,2),	0),
   4032   SR_CORE ("rmr_el2",		CPENC (3,4,C12,C0,2),	0),
   4033   SR_CORE ("rmr_el3",		CPENC (3,6,C12,C0,2),	0),
   4034   SR_CORE ("isr_el1",		CPENC (3,0,C12,C1,0),	F_REG_READ),
   4035   SR_RAS  ("disr_el1",		CPENC (3,0,C12,C1,1),	0),
   4036   SR_RAS  ("vdisr_el2",		CPENC (3,4,C12,C1,1),	0),
   4037   SR_CORE ("contextidr_el1",	CPENC (3,0,C13,C0,1),	0),
   4038   SR_V8_1 ("contextidr_el2",	CPENC (3,4,C13,C0,1),	0),
   4039   SR_V8_1 ("contextidr_el12",	CPENC (3,5,C13,C0,1),	0),
   4040   SR_RNG  ("rndr",		CPENC (3,3,C2,C4,0),	F_REG_READ),
   4041   SR_RNG  ("rndrrs",		CPENC (3,3,C2,C4,1),	F_REG_READ),
   4042   SR_MEMTAG ("tco",		CPENC (3,3,C4,C2,7),	0),
   4043   SR_MEMTAG ("tfsre0_el1",	CPENC (3,0,C5,C6,1),	0),
   4044   SR_MEMTAG ("tfsr_el1",	CPENC (3,0,C5,C6,0),	0),
   4045   SR_MEMTAG ("tfsr_el2",	CPENC (3,4,C5,C6,0),	0),
   4046   SR_MEMTAG ("tfsr_el3",	CPENC (3,6,C5,C6,0),	0),
   4047   SR_MEMTAG ("tfsr_el12",	CPENC (3,5,C5,C6,0),	0),
   4048   SR_MEMTAG ("rgsr_el1",	CPENC (3,0,C1,C0,5),	0),
   4049   SR_MEMTAG ("gcr_el1",		CPENC (3,0,C1,C0,6),	0),
   4050   SR_MEMTAG ("gmid_el1",	CPENC (3,1,C0,C0,4),	F_REG_READ),
   4051   SR_CORE ("tpidr_el0",		CPENC (3,3,C13,C0,2),	0),
   4052   SR_CORE ("tpidrro_el0",       CPENC (3,3,C13,C0,3),	0),
   4053   SR_CORE ("tpidr_el1",		CPENC (3,0,C13,C0,4),	0),
   4054   SR_CORE ("tpidr_el2",		CPENC (3,4,C13,C0,2),	0),
   4055   SR_CORE ("tpidr_el3",		CPENC (3,6,C13,C0,2),	0),
   4056   SR_SCXTNUM ("scxtnum_el0",	CPENC (3,3,C13,C0,7),	0),
   4057   SR_SCXTNUM ("scxtnum_el1",	CPENC (3,0,C13,C0,7),	0),
   4058   SR_SCXTNUM ("scxtnum_el2",	CPENC (3,4,C13,C0,7),	0),
   4059   SR_SCXTNUM ("scxtnum_el12",   CPENC (3,5,C13,C0,7),	0),
   4060   SR_SCXTNUM ("scxtnum_el3",    CPENC (3,6,C13,C0,7),	0),
   4061   SR_CORE ("teecr32_el1",       CPENC (2,2,C0, C0,0),	0), /* See section 3.9.7.1.  */
   4062   SR_CORE ("cntfrq_el0",	CPENC (3,3,C14,C0,0),	0),
   4063   SR_CORE ("cntpct_el0",	CPENC (3,3,C14,C0,1),	F_REG_READ),
   4064   SR_CORE ("cntvct_el0",	CPENC (3,3,C14,C0,2),	F_REG_READ),
   4065   SR_CORE ("cntvoff_el2",       CPENC (3,4,C14,C0,3),	0),
   4066   SR_CORE ("cntkctl_el1",       CPENC (3,0,C14,C1,0),	0),
   4067   SR_V8_1 ("cntkctl_el12",	CPENC (3,5,C14,C1,0),	0),
   4068   SR_CORE ("cnthctl_el2",	CPENC (3,4,C14,C1,0),	0),
   4069   SR_CORE ("cntp_tval_el0",	CPENC (3,3,C14,C2,0),	0),
   4070   SR_V8_1 ("cntp_tval_el02",	CPENC (3,5,C14,C2,0),	0),
   4071   SR_CORE ("cntp_ctl_el0",      CPENC (3,3,C14,C2,1),	0),
   4072   SR_V8_1 ("cntp_ctl_el02",	CPENC (3,5,C14,C2,1),	0),
   4073   SR_CORE ("cntp_cval_el0",     CPENC (3,3,C14,C2,2),	0),
   4074   SR_V8_1 ("cntp_cval_el02",	CPENC (3,5,C14,C2,2),	0),
   4075   SR_CORE ("cntv_tval_el0",     CPENC (3,3,C14,C3,0),	0),
   4076   SR_V8_1 ("cntv_tval_el02",	CPENC (3,5,C14,C3,0),	0),
   4077   SR_CORE ("cntv_ctl_el0",      CPENC (3,3,C14,C3,1),	0),
   4078   SR_V8_1 ("cntv_ctl_el02",	CPENC (3,5,C14,C3,1),	0),
   4079   SR_CORE ("cntv_cval_el0",     CPENC (3,3,C14,C3,2),	0),
   4080   SR_V8_1 ("cntv_cval_el02",	CPENC (3,5,C14,C3,2),	0),
   4081   SR_CORE ("cnthp_tval_el2",	CPENC (3,4,C14,C2,0),	0),
   4082   SR_CORE ("cnthp_ctl_el2",	CPENC (3,4,C14,C2,1),	0),
   4083   SR_CORE ("cnthp_cval_el2",	CPENC (3,4,C14,C2,2),	0),
   4084   SR_CORE ("cntps_tval_el1",	CPENC (3,7,C14,C2,0),	0),
   4085   SR_CORE ("cntps_ctl_el1",	CPENC (3,7,C14,C2,1),	0),
   4086   SR_CORE ("cntps_cval_el1",	CPENC (3,7,C14,C2,2),	0),
   4087   SR_V8_1 ("cnthv_tval_el2",	CPENC (3,4,C14,C3,0),	0),
   4088   SR_V8_1 ("cnthv_ctl_el2",	CPENC (3,4,C14,C3,1),	0),
   4089   SR_V8_1 ("cnthv_cval_el2",	CPENC (3,4,C14,C3,2),	0),
   4090   SR_CORE ("dacr32_el2",	CPENC (3,4,C3,C0,0),	0),
   4091   SR_CORE ("ifsr32_el2",	CPENC (3,4,C5,C0,1),	0),
   4092   SR_CORE ("teehbr32_el1",	CPENC (2,2,C1,C0,0),	0),
   4093   SR_CORE ("sder32_el3",	CPENC (3,6,C1,C1,1),	0),
   4094   SR_CORE ("mdscr_el1",		CPENC (2,0,C0,C2,2),	0),
   4095   SR_CORE ("mdccsr_el0",	CPENC (2,3,C0,C1,0),	F_REG_READ),
   4096   SR_CORE ("mdccint_el1",       CPENC (2,0,C0,C2,0),	0),
   4097   SR_CORE ("dbgdtr_el0",	CPENC (2,3,C0,C4,0),	0),
   4098   SR_CORE ("dbgdtrrx_el0",	CPENC (2,3,C0,C5,0),	F_REG_READ),
   4099   SR_CORE ("dbgdtrtx_el0",	CPENC (2,3,C0,C5,0),	F_REG_WRITE),
   4100   SR_CORE ("osdtrrx_el1",	CPENC (2,0,C0,C0,2),	0),
   4101   SR_CORE ("osdtrtx_el1",	CPENC (2,0,C0,C3,2),	0),
   4102   SR_CORE ("oseccr_el1",	CPENC (2,0,C0,C6,2),	0),
   4103   SR_CORE ("dbgvcr32_el2",      CPENC (2,4,C0,C7,0),	0),
   4104   SR_CORE ("dbgbvr0_el1",       CPENC (2,0,C0,C0,4),	0),
   4105   SR_CORE ("dbgbvr1_el1",       CPENC (2,0,C0,C1,4),	0),
   4106   SR_CORE ("dbgbvr2_el1",       CPENC (2,0,C0,C2,4),	0),
   4107   SR_CORE ("dbgbvr3_el1",       CPENC (2,0,C0,C3,4),	0),
   4108   SR_CORE ("dbgbvr4_el1",       CPENC (2,0,C0,C4,4),	0),
   4109   SR_CORE ("dbgbvr5_el1",       CPENC (2,0,C0,C5,4),	0),
   4110   SR_CORE ("dbgbvr6_el1",       CPENC (2,0,C0,C6,4),	0),
   4111   SR_CORE ("dbgbvr7_el1",       CPENC (2,0,C0,C7,4),	0),
   4112   SR_CORE ("dbgbvr8_el1",       CPENC (2,0,C0,C8,4),	0),
   4113   SR_CORE ("dbgbvr9_el1",       CPENC (2,0,C0,C9,4),	0),
   4114   SR_CORE ("dbgbvr10_el1",      CPENC (2,0,C0,C10,4),	0),
   4115   SR_CORE ("dbgbvr11_el1",      CPENC (2,0,C0,C11,4),	0),
   4116   SR_CORE ("dbgbvr12_el1",      CPENC (2,0,C0,C12,4),	0),
   4117   SR_CORE ("dbgbvr13_el1",      CPENC (2,0,C0,C13,4),	0),
   4118   SR_CORE ("dbgbvr14_el1",      CPENC (2,0,C0,C14,4),	0),
   4119   SR_CORE ("dbgbvr15_el1",      CPENC (2,0,C0,C15,4),	0),
   4120   SR_CORE ("dbgbcr0_el1",       CPENC (2,0,C0,C0,5),	0),
   4121   SR_CORE ("dbgbcr1_el1",       CPENC (2,0,C0,C1,5),	0),
   4122   SR_CORE ("dbgbcr2_el1",       CPENC (2,0,C0,C2,5),	0),
   4123   SR_CORE ("dbgbcr3_el1",       CPENC (2,0,C0,C3,5),	0),
   4124   SR_CORE ("dbgbcr4_el1",       CPENC (2,0,C0,C4,5),	0),
   4125   SR_CORE ("dbgbcr5_el1",       CPENC (2,0,C0,C5,5),	0),
   4126   SR_CORE ("dbgbcr6_el1",       CPENC (2,0,C0,C6,5),	0),
   4127   SR_CORE ("dbgbcr7_el1",       CPENC (2,0,C0,C7,5),	0),
   4128   SR_CORE ("dbgbcr8_el1",       CPENC (2,0,C0,C8,5),	0),
   4129   SR_CORE ("dbgbcr9_el1",       CPENC (2,0,C0,C9,5),	0),
   4130   SR_CORE ("dbgbcr10_el1",      CPENC (2,0,C0,C10,5),	0),
   4131   SR_CORE ("dbgbcr11_el1",      CPENC (2,0,C0,C11,5),	0),
   4132   SR_CORE ("dbgbcr12_el1",      CPENC (2,0,C0,C12,5),	0),
   4133   SR_CORE ("dbgbcr13_el1",      CPENC (2,0,C0,C13,5),	0),
   4134   SR_CORE ("dbgbcr14_el1",      CPENC (2,0,C0,C14,5),	0),
   4135   SR_CORE ("dbgbcr15_el1",      CPENC (2,0,C0,C15,5),	0),
   4136   SR_CORE ("dbgwvr0_el1",       CPENC (2,0,C0,C0,6),	0),
   4137   SR_CORE ("dbgwvr1_el1",       CPENC (2,0,C0,C1,6),	0),
   4138   SR_CORE ("dbgwvr2_el1",       CPENC (2,0,C0,C2,6),	0),
   4139   SR_CORE ("dbgwvr3_el1",       CPENC (2,0,C0,C3,6),	0),
   4140   SR_CORE ("dbgwvr4_el1",       CPENC (2,0,C0,C4,6),	0),
   4141   SR_CORE ("dbgwvr5_el1",       CPENC (2,0,C0,C5,6),	0),
   4142   SR_CORE ("dbgwvr6_el1",       CPENC (2,0,C0,C6,6),	0),
   4143   SR_CORE ("dbgwvr7_el1",       CPENC (2,0,C0,C7,6),	0),
   4144   SR_CORE ("dbgwvr8_el1",       CPENC (2,0,C0,C8,6),	0),
   4145   SR_CORE ("dbgwvr9_el1",       CPENC (2,0,C0,C9,6),	0),
   4146   SR_CORE ("dbgwvr10_el1",      CPENC (2,0,C0,C10,6),	0),
   4147   SR_CORE ("dbgwvr11_el1",      CPENC (2,0,C0,C11,6),	0),
   4148   SR_CORE ("dbgwvr12_el1",      CPENC (2,0,C0,C12,6),	0),
   4149   SR_CORE ("dbgwvr13_el1",      CPENC (2,0,C0,C13,6),	0),
   4150   SR_CORE ("dbgwvr14_el1",      CPENC (2,0,C0,C14,6),	0),
   4151   SR_CORE ("dbgwvr15_el1",      CPENC (2,0,C0,C15,6),	0),
   4152   SR_CORE ("dbgwcr0_el1",       CPENC (2,0,C0,C0,7),	0),
   4153   SR_CORE ("dbgwcr1_el1",       CPENC (2,0,C0,C1,7),	0),
   4154   SR_CORE ("dbgwcr2_el1",       CPENC (2,0,C0,C2,7),	0),
   4155   SR_CORE ("dbgwcr3_el1",       CPENC (2,0,C0,C3,7),	0),
   4156   SR_CORE ("dbgwcr4_el1",       CPENC (2,0,C0,C4,7),	0),
   4157   SR_CORE ("dbgwcr5_el1",       CPENC (2,0,C0,C5,7),	0),
   4158   SR_CORE ("dbgwcr6_el1",       CPENC (2,0,C0,C6,7),	0),
   4159   SR_CORE ("dbgwcr7_el1",       CPENC (2,0,C0,C7,7),	0),
   4160   SR_CORE ("dbgwcr8_el1",       CPENC (2,0,C0,C8,7),	0),
   4161   SR_CORE ("dbgwcr9_el1",       CPENC (2,0,C0,C9,7),	0),
   4162   SR_CORE ("dbgwcr10_el1",      CPENC (2,0,C0,C10,7),	0),
   4163   SR_CORE ("dbgwcr11_el1",      CPENC (2,0,C0,C11,7),	0),
   4164   SR_CORE ("dbgwcr12_el1",      CPENC (2,0,C0,C12,7),	0),
   4165   SR_CORE ("dbgwcr13_el1",      CPENC (2,0,C0,C13,7),	0),
   4166   SR_CORE ("dbgwcr14_el1",      CPENC (2,0,C0,C14,7),	0),
   4167   SR_CORE ("dbgwcr15_el1",      CPENC (2,0,C0,C15,7),	0),
   4168   SR_CORE ("mdrar_el1",		CPENC (2,0,C1,C0,0),	F_REG_READ),
   4169   SR_CORE ("oslar_el1",		CPENC (2,0,C1,C0,4),	F_REG_WRITE),
   4170   SR_CORE ("oslsr_el1",		CPENC (2,0,C1,C1,4),	F_REG_READ),
   4171   SR_CORE ("osdlr_el1",		CPENC (2,0,C1,C3,4),	0),
   4172   SR_CORE ("dbgprcr_el1",       CPENC (2,0,C1,C4,4),	0),
   4173   SR_CORE ("dbgclaimset_el1",   CPENC (2,0,C7,C8,6),	0),
   4174   SR_CORE ("dbgclaimclr_el1",   CPENC (2,0,C7,C9,6),	0),
   4175   SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6),	F_REG_READ),
   4176   SR_PROFILE ("pmblimitr_el1",	CPENC (3,0,C9,C10,0),	0),
   4177   SR_PROFILE ("pmbptr_el1",	CPENC (3,0,C9,C10,1),	0),
   4178   SR_PROFILE ("pmbsr_el1",	CPENC (3,0,C9,C10,3),	0),
   4179   SR_PROFILE ("pmbidr_el1",	CPENC (3,0,C9,C10,7),	F_REG_READ),
   4180   SR_PROFILE ("pmscr_el1",	CPENC (3,0,C9,C9,0),	0),
   4181   SR_PROFILE ("pmsicr_el1",	CPENC (3,0,C9,C9,2),	0),
   4182   SR_PROFILE ("pmsirr_el1",	CPENC (3,0,C9,C9,3),	0),
   4183   SR_PROFILE ("pmsfcr_el1",	CPENC (3,0,C9,C9,4),	0),
   4184   SR_PROFILE ("pmsevfr_el1",	CPENC (3,0,C9,C9,5),	0),
   4185   SR_PROFILE ("pmslatfr_el1",	CPENC (3,0,C9,C9,6),	0),
   4186   SR_PROFILE ("pmsidr_el1",	CPENC (3,0,C9,C9,7),	0),
   4187   SR_PROFILE ("pmscr_el2",	CPENC (3,4,C9,C9,0),	0),
   4188   SR_PROFILE ("pmscr_el12",	CPENC (3,5,C9,C9,0),	0),
   4189   SR_CORE ("pmcr_el0",		CPENC (3,3,C9,C12,0),	0),
   4190   SR_CORE ("pmcntenset_el0",    CPENC (3,3,C9,C12,1),	0),
   4191   SR_CORE ("pmcntenclr_el0",    CPENC (3,3,C9,C12,2),	0),
   4192   SR_CORE ("pmovsclr_el0",      CPENC (3,3,C9,C12,3),	0),
   4193   SR_CORE ("pmswinc_el0",       CPENC (3,3,C9,C12,4),	F_REG_WRITE),
   4194   SR_CORE ("pmselr_el0",	CPENC (3,3,C9,C12,5),	0),
   4195   SR_CORE ("pmceid0_el0",       CPENC (3,3,C9,C12,6),	F_REG_READ),
   4196   SR_CORE ("pmceid1_el0",       CPENC (3,3,C9,C12,7),	F_REG_READ),
   4197   SR_CORE ("pmccntr_el0",       CPENC (3,3,C9,C13,0),	0),
   4198   SR_CORE ("pmxevtyper_el0",    CPENC (3,3,C9,C13,1),	0),
   4199   SR_CORE ("pmxevcntr_el0",     CPENC (3,3,C9,C13,2),	0),
   4200   SR_CORE ("pmuserenr_el0",     CPENC (3,3,C9,C14,0),	0),
   4201   SR_CORE ("pmintenset_el1",    CPENC (3,0,C9,C14,1),	0),
   4202   SR_CORE ("pmintenclr_el1",    CPENC (3,0,C9,C14,2),	0),
   4203   SR_CORE ("pmovsset_el0",      CPENC (3,3,C9,C14,3),	0),
   4204   SR_CORE ("pmevcntr0_el0",     CPENC (3,3,C14,C8,0),	0),
   4205   SR_CORE ("pmevcntr1_el0",     CPENC (3,3,C14,C8,1),	0),
   4206   SR_CORE ("pmevcntr2_el0",     CPENC (3,3,C14,C8,2),	0),
   4207   SR_CORE ("pmevcntr3_el0",     CPENC (3,3,C14,C8,3),	0),
   4208   SR_CORE ("pmevcntr4_el0",     CPENC (3,3,C14,C8,4),	0),
   4209   SR_CORE ("pmevcntr5_el0",     CPENC (3,3,C14,C8,5),	0),
   4210   SR_CORE ("pmevcntr6_el0",     CPENC (3,3,C14,C8,6),	0),
   4211   SR_CORE ("pmevcntr7_el0",     CPENC (3,3,C14,C8,7),	0),
   4212   SR_CORE ("pmevcntr8_el0",     CPENC (3,3,C14,C9,0),	0),
   4213   SR_CORE ("pmevcntr9_el0",     CPENC (3,3,C14,C9,1),	0),
   4214   SR_CORE ("pmevcntr10_el0",    CPENC (3,3,C14,C9,2),	0),
   4215   SR_CORE ("pmevcntr11_el0",    CPENC (3,3,C14,C9,3),	0),
   4216   SR_CORE ("pmevcntr12_el0",    CPENC (3,3,C14,C9,4),	0),
   4217   SR_CORE ("pmevcntr13_el0",    CPENC (3,3,C14,C9,5),	0),
   4218   SR_CORE ("pmevcntr14_el0",    CPENC (3,3,C14,C9,6),	0),
   4219   SR_CORE ("pmevcntr15_el0",    CPENC (3,3,C14,C9,7),	0),
   4220   SR_CORE ("pmevcntr16_el0",    CPENC (3,3,C14,C10,0),	0),
   4221   SR_CORE ("pmevcntr17_el0",    CPENC (3,3,C14,C10,1),	0),
   4222   SR_CORE ("pmevcntr18_el0",    CPENC (3,3,C14,C10,2),	0),
   4223   SR_CORE ("pmevcntr19_el0",    CPENC (3,3,C14,C10,3),	0),
   4224   SR_CORE ("pmevcntr20_el0",    CPENC (3,3,C14,C10,4),	0),
   4225   SR_CORE ("pmevcntr21_el0",    CPENC (3,3,C14,C10,5),	0),
   4226   SR_CORE ("pmevcntr22_el0",    CPENC (3,3,C14,C10,6),	0),
   4227   SR_CORE ("pmevcntr23_el0",    CPENC (3,3,C14,C10,7),	0),
   4228   SR_CORE ("pmevcntr24_el0",    CPENC (3,3,C14,C11,0),	0),
   4229   SR_CORE ("pmevcntr25_el0",    CPENC (3,3,C14,C11,1),	0),
   4230   SR_CORE ("pmevcntr26_el0",    CPENC (3,3,C14,C11,2),	0),
   4231   SR_CORE ("pmevcntr27_el0",    CPENC (3,3,C14,C11,3),	0),
   4232   SR_CORE ("pmevcntr28_el0",    CPENC (3,3,C14,C11,4),	0),
   4233   SR_CORE ("pmevcntr29_el0",    CPENC (3,3,C14,C11,5),	0),
   4234   SR_CORE ("pmevcntr30_el0",    CPENC (3,3,C14,C11,6),	0),
   4235   SR_CORE ("pmevtyper0_el0",    CPENC (3,3,C14,C12,0),	0),
   4236   SR_CORE ("pmevtyper1_el0",    CPENC (3,3,C14,C12,1),	0),
   4237   SR_CORE ("pmevtyper2_el0",    CPENC (3,3,C14,C12,2),	0),
   4238   SR_CORE ("pmevtyper3_el0",    CPENC (3,3,C14,C12,3),	0),
   4239   SR_CORE ("pmevtyper4_el0",    CPENC (3,3,C14,C12,4),	0),
   4240   SR_CORE ("pmevtyper5_el0",    CPENC (3,3,C14,C12,5),	0),
   4241   SR_CORE ("pmevtyper6_el0",    CPENC (3,3,C14,C12,6),	0),
   4242   SR_CORE ("pmevtyper7_el0",    CPENC (3,3,C14,C12,7),	0),
   4243   SR_CORE ("pmevtyper8_el0",    CPENC (3,3,C14,C13,0),	0),
   4244   SR_CORE ("pmevtyper9_el0",    CPENC (3,3,C14,C13,1),	0),
   4245   SR_CORE ("pmevtyper10_el0",   CPENC (3,3,C14,C13,2),	0),
   4246   SR_CORE ("pmevtyper11_el0",   CPENC (3,3,C14,C13,3),	0),
   4247   SR_CORE ("pmevtyper12_el0",   CPENC (3,3,C14,C13,4),	0),
   4248   SR_CORE ("pmevtyper13_el0",   CPENC (3,3,C14,C13,5),	0),
   4249   SR_CORE ("pmevtyper14_el0",   CPENC (3,3,C14,C13,6),	0),
   4250   SR_CORE ("pmevtyper15_el0",   CPENC (3,3,C14,C13,7),	0),
   4251   SR_CORE ("pmevtyper16_el0",   CPENC (3,3,C14,C14,0),	0),
   4252   SR_CORE ("pmevtyper17_el0",   CPENC (3,3,C14,C14,1),	0),
   4253   SR_CORE ("pmevtyper18_el0",   CPENC (3,3,C14,C14,2),	0),
   4254   SR_CORE ("pmevtyper19_el0",   CPENC (3,3,C14,C14,3),	0),
   4255   SR_CORE ("pmevtyper20_el0",   CPENC (3,3,C14,C14,4),	0),
   4256   SR_CORE ("pmevtyper21_el0",   CPENC (3,3,C14,C14,5),	0),
   4257   SR_CORE ("pmevtyper22_el0",   CPENC (3,3,C14,C14,6),	0),
   4258   SR_CORE ("pmevtyper23_el0",   CPENC (3,3,C14,C14,7),	0),
   4259   SR_CORE ("pmevtyper24_el0",   CPENC (3,3,C14,C15,0),	0),
   4260   SR_CORE ("pmevtyper25_el0",   CPENC (3,3,C14,C15,1),	0),
   4261   SR_CORE ("pmevtyper26_el0",   CPENC (3,3,C14,C15,2),	0),
   4262   SR_CORE ("pmevtyper27_el0",   CPENC (3,3,C14,C15,3),	0),
   4263   SR_CORE ("pmevtyper28_el0",   CPENC (3,3,C14,C15,4),	0),
   4264   SR_CORE ("pmevtyper29_el0",   CPENC (3,3,C14,C15,5),	0),
   4265   SR_CORE ("pmevtyper30_el0",   CPENC (3,3,C14,C15,6),	0),
   4266   SR_CORE ("pmccfiltr_el0",     CPENC (3,3,C14,C15,7),	0),
   4267 
   4268   SR_V8_4 ("dit",		CPEN_ (3,C2,5),		0),
   4269   SR_V8_4 ("vstcr_el2",		CPENC (3,4,C2,C6,2),	0),
   4270   SR_V8_4_A ("vsttbr_el2",	CPENC (3,4,C2,C6,0),	0),
   4271   SR_V8_4 ("cnthvs_tval_el2",	CPENC (3,4,C14,C4,0),	0),
   4272   SR_V8_4 ("cnthvs_cval_el2",	CPENC (3,4,C14,C4,2),	0),
   4273   SR_V8_4 ("cnthvs_ctl_el2",	CPENC (3,4,C14,C4,1),	0),
   4274   SR_V8_4 ("cnthps_tval_el2",	CPENC (3,4,C14,C5,0),	0),
   4275   SR_V8_4 ("cnthps_cval_el2",	CPENC (3,4,C14,C5,2),	0),
   4276   SR_V8_4 ("cnthps_ctl_el2",	CPENC (3,4,C14,C5,1),	0),
   4277   SR_V8_4 ("sder32_el2",	CPENC (3,4,C1,C3,1),	0),
   4278   SR_V8_4 ("vncr_el2",		CPENC (3,4,C2,C2,0),	0),
   4279 
   4280   SR_CORE ("mpam0_el1",		CPENC (3,0,C10,C5,1),	0),
   4281   SR_CORE ("mpam1_el1",		CPENC (3,0,C10,C5,0),	0),
   4282   SR_CORE ("mpam1_el12",	CPENC (3,5,C10,C5,0),	0),
   4283   SR_CORE ("mpam2_el2",		CPENC (3,4,C10,C5,0),	0),
   4284   SR_CORE ("mpam3_el3",		CPENC (3,6,C10,C5,0),	0),
   4285   SR_CORE ("mpamhcr_el2",	CPENC (3,4,C10,C4,0),	0),
   4286   SR_CORE ("mpamidr_el1",	CPENC (3,0,C10,C4,4),	F_REG_READ),
   4287   SR_CORE ("mpamvpm0_el2",	CPENC (3,4,C10,C6,0),	0),
   4288   SR_CORE ("mpamvpm1_el2",	CPENC (3,4,C10,C6,1),	0),
   4289   SR_CORE ("mpamvpm2_el2",	CPENC (3,4,C10,C6,2),	0),
   4290   SR_CORE ("mpamvpm3_el2",	CPENC (3,4,C10,C6,3),	0),
   4291   SR_CORE ("mpamvpm4_el2",	CPENC (3,4,C10,C6,4),	0),
   4292   SR_CORE ("mpamvpm5_el2",	CPENC (3,4,C10,C6,5),	0),
   4293   SR_CORE ("mpamvpm6_el2",	CPENC (3,4,C10,C6,6),	0),
   4294   SR_CORE ("mpamvpm7_el2",	CPENC (3,4,C10,C6,7),	0),
   4295   SR_CORE ("mpamvpmv_el2",	CPENC (3,4,C10,C4,1),	0),
   4296 
   4297   SR_V8_R ("mpuir_el1",		CPENC (3,0,C0,C0,4),	F_REG_READ),
   4298   SR_V8_R ("mpuir_el2",		CPENC (3,4,C0,C0,4),	F_REG_READ),
   4299   SR_V8_R ("prbar_el1",		CPENC (3,0,C6,C8,0),	0),
   4300   SR_V8_R ("prbar_el2",		CPENC (3,4,C6,C8,0),	0),
   4301 
   4302 #define ENC_BARLAR(x,n,lar) \
   4303   CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
   4304 
   4305 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
   4306 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
   4307 
   4308   SR_EXPAND_EL12 (PRBARn_ELx)
   4309   SR_V8_R ("prenr_el1",		CPENC (3,0,C6,C1,1),	0),
   4310   SR_V8_R ("prenr_el2",		CPENC (3,4,C6,C1,1),	0),
   4311   SR_V8_R ("prlar_el1",		CPENC (3,0,C6,C8,1),	0),
   4312   SR_V8_R ("prlar_el2",		CPENC (3,4,C6,C8,1),	0),
   4313   SR_EXPAND_EL12 (PRLARn_ELx)
   4314   SR_V8_R ("prselr_el1",	CPENC (3,0,C6,C2,1),	0),
   4315   SR_V8_R ("prselr_el2",	CPENC (3,4,C6,C2,1),	0),
   4316   SR_V8_R ("vsctlr_el2",	CPENC (3,4,C2,C0,0),	0),
   4317 
   4318   { 0, CPENC (0,0,0,0,0), 0, 0 }
   4319 };
   4320 
   4321 bfd_boolean
   4322 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
   4323 {
   4324   return (reg_flags & F_DEPRECATED) != 0;
   4325 }
   4326 
   4327 /* The CPENC below is fairly misleading, the fields
   4328    here are not in CPENC form. They are in op2op1 form. The fields are encoded
   4329    by ins_pstatefield, which just shifts the value by the width of the fields
   4330    in a loop. So if you CPENC them only the first value will be set, the rest
   4331    are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
   4332    value of 0b110000000001000000 (0x30040) while what you want is
   4333    0b011010 (0x1a).  */
   4334 const aarch64_sys_reg aarch64_pstatefields [] =
   4335 {
   4336   SR_CORE ("spsel",	  0x05,	0),
   4337   SR_CORE ("daifset",	  0x1e,	0),
   4338   SR_CORE ("daifclr",	  0x1f,	0),
   4339   SR_PAN  ("pan",	  0x04, 0),
   4340   SR_V8_2 ("uao",	  0x03, 0),
   4341   SR_SSBS ("ssbs",	  0x19, 0),
   4342   SR_V8_4 ("dit",	  0x1a,	0),
   4343   SR_MEMTAG ("tco",	  0x1c,	0),
   4344   { 0,	  CPENC (0,0,0,0,0), 0, 0 },
   4345 };
   4346 
   4347 bfd_boolean
   4348 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
   4349 				 const aarch64_sys_reg *reg)
   4350 {
   4351   if (!(reg->flags & F_ARCHEXT))
   4352     return TRUE;
   4353 
   4354   return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
   4355 }
   4356 
   4357 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
   4358 {
   4359     { "ialluis", CPENS(0,C7,C1,0), 0 },
   4360     { "iallu",   CPENS(0,C7,C5,0), 0 },
   4361     { "ivau",    CPENS (3, C7, C5, 1), F_HASXT },
   4362     { 0, CPENS(0,0,0,0), 0 }
   4363 };
   4364 
   4365 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
   4366 {
   4367     { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT },
   4368     { "gva",	    CPENS (3, C7, C4, 3),  F_HASXT | F_ARCHEXT },
   4369     { "gzva",	    CPENS (3, C7, C4, 4),  F_HASXT | F_ARCHEXT },
   4370     { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT },
   4371     { "igvac",      CPENS (0, C7, C6, 3),  F_HASXT | F_ARCHEXT },
   4372     { "igsw",       CPENS (0, C7, C6, 4),  F_HASXT | F_ARCHEXT },
   4373     { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT },
   4374     { "igdvac",	    CPENS (0, C7, C6, 5),  F_HASXT | F_ARCHEXT },
   4375     { "igdsw",	    CPENS (0, C7, C6, 6),  F_HASXT | F_ARCHEXT },
   4376     { "cvac",       CPENS (3, C7, C10, 1), F_HASXT },
   4377     { "cgvac",      CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
   4378     { "cgdvac",     CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
   4379     { "csw",	    CPENS (0, C7, C10, 2), F_HASXT },
   4380     { "cgsw",       CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
   4381     { "cgdsw",	    CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
   4382     { "cvau",       CPENS (3, C7, C11, 1), F_HASXT },
   4383     { "cvap",       CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
   4384     { "cgvap",      CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
   4385     { "cgdvap",     CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
   4386     { "cvadp",      CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
   4387     { "cgvadp",     CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
   4388     { "cgdvadp",    CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
   4389     { "civac",      CPENS (3, C7, C14, 1), F_HASXT },
   4390     { "cigvac",     CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
   4391     { "cigdvac",    CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
   4392     { "cisw",       CPENS (0, C7, C14, 2), F_HASXT },
   4393     { "cigsw",      CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
   4394     { "cigdsw",     CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
   4395     { 0,       CPENS(0,0,0,0), 0 }
   4396 };
   4397 
   4398 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
   4399 {
   4400     { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT },
   4401     { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT },
   4402     { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT },
   4403     { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT },
   4404     { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT },
   4405     { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT },
   4406     { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT },
   4407     { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT },
   4408     { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT },
   4409     { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT },
   4410     { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT },
   4411     { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT },
   4412     { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
   4413     { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
   4414     { 0,       CPENS(0,0,0,0), 0 }
   4415 };
   4416 
   4417 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
   4418 {
   4419     { "vmalle1",   CPENS(0,C8,C7,0), 0 },
   4420     { "vae1",      CPENS (0, C8, C7, 1), F_HASXT },
   4421     { "aside1",    CPENS (0, C8, C7, 2), F_HASXT },
   4422     { "vaae1",     CPENS (0, C8, C7, 3), F_HASXT },
   4423     { "vmalle1is", CPENS(0,C8,C3,0), 0 },
   4424     { "vae1is",    CPENS (0, C8, C3, 1), F_HASXT },
   4425     { "aside1is",  CPENS (0, C8, C3, 2), F_HASXT },
   4426     { "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT },
   4427     { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
   4428     { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
   4429     { "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT },
   4430     { "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT },
   4431     { "vae2",      CPENS (4, C8, C7, 1), F_HASXT },
   4432     { "vae2is",    CPENS (4, C8, C3, 1), F_HASXT },
   4433     { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
   4434     { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
   4435     { "vae3",      CPENS (6, C8, C7, 1), F_HASXT },
   4436     { "vae3is",    CPENS (6, C8, C3, 1), F_HASXT },
   4437     { "alle2",     CPENS(4,C8,C7,0), 0 },
   4438     { "alle2is",   CPENS(4,C8,C3,0), 0 },
   4439     { "alle1",     CPENS(4,C8,C7,4), 0 },
   4440     { "alle1is",   CPENS(4,C8,C3,4), 0 },
   4441     { "alle3",     CPENS(6,C8,C7,0), 0 },
   4442     { "alle3is",   CPENS(6,C8,C3,0), 0 },
   4443     { "vale1is",   CPENS (0, C8, C3, 5), F_HASXT },
   4444     { "vale2is",   CPENS (4, C8, C3, 5), F_HASXT },
   4445     { "vale3is",   CPENS (6, C8, C3, 5), F_HASXT },
   4446     { "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT },
   4447     { "vale1",     CPENS (0, C8, C7, 5), F_HASXT },
   4448     { "vale2",     CPENS (4, C8, C7, 5), F_HASXT },
   4449     { "vale3",     CPENS (6, C8, C7, 5), F_HASXT },
   4450     { "vaale1",    CPENS (0, C8, C7, 7), F_HASXT },
   4451 
   4452     { "vmalle1os",    CPENS (0, C8, C1, 0), F_ARCHEXT },
   4453     { "vae1os",       CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
   4454     { "aside1os",     CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
   4455     { "vaae1os",      CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
   4456     { "vale1os",      CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
   4457     { "vaale1os",     CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
   4458     { "ipas2e1os",    CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
   4459     { "ipas2le1os",   CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
   4460     { "vae2os",       CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
   4461     { "vale2os",      CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
   4462     { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
   4463     { "vae3os",       CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
   4464     { "vale3os",      CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
   4465     { "alle2os",      CPENS (4, C8, C1, 0), F_ARCHEXT },
   4466     { "alle1os",      CPENS (4, C8, C1, 4), F_ARCHEXT },
   4467     { "alle3os",      CPENS (6, C8, C1, 0), F_ARCHEXT },
   4468 
   4469     { "rvae1",      CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
   4470     { "rvaae1",     CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
   4471     { "rvale1",     CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
   4472     { "rvaale1",    CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
   4473     { "rvae1is",    CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
   4474     { "rvaae1is",   CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
   4475     { "rvale1is",   CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
   4476     { "rvaale1is",  CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
   4477     { "rvae1os",    CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
   4478     { "rvaae1os",   CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
   4479     { "rvale1os",   CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
   4480     { "rvaale1os",  CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
   4481     { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
   4482     { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
   4483     { "ripas2e1",   CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
   4484     { "ripas2le1",  CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
   4485     { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
   4486     { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
   4487     { "rvae2",      CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
   4488     { "rvale2",     CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
   4489     { "rvae2is",    CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
   4490     { "rvale2is",   CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
   4491     { "rvae2os",    CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
   4492     { "rvale2os",   CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
   4493     { "rvae3",      CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
   4494     { "rvale3",     CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
   4495     { "rvae3is",    CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
   4496     { "rvale3is",   CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
   4497     { "rvae3os",    CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
   4498     { "rvale3os",   CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
   4499 
   4500     { 0,       CPENS(0,0,0,0), 0 }
   4501 };
   4502 
   4503 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
   4504 {
   4505     /* RCTX is somewhat unique in a way that it has different values
   4506        (op2) based on the instruction in which it is used (cfp/dvp/cpp).
   4507        Thus op2 is masked out and instead encoded directly in the
   4508        aarch64_opcode_table entries for the respective instructions.  */
   4509     { "rctx",   CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
   4510 
   4511     { 0,       CPENS(0,0,0,0), 0 }
   4512 };
   4513 
   4514 bfd_boolean
   4515 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
   4516 {
   4517   return (sys_ins_reg->flags & F_HASXT) != 0;
   4518 }
   4519 
   4520 extern bfd_boolean
   4521 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
   4522 		 const char *reg_name,
   4523                  aarch64_insn reg_value,
   4524                  uint32_t reg_flags,
   4525                  aarch64_feature_set reg_features)
   4526 {
   4527   /* Armv8-R has no EL3.  */
   4528   if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
   4529     {
   4530       const char *suffix = strrchr (reg_name, '_');
   4531       if (suffix && !strcmp (suffix, "_el3"))
   4532 	return FALSE;
   4533     }
   4534 
   4535   if (!(reg_flags & F_ARCHEXT))
   4536     return TRUE;
   4537 
   4538   if (reg_features
   4539       && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
   4540     return TRUE;
   4541 
   4542   /* ARMv8.4 TLB instructions.  */
   4543   if ((reg_value == CPENS (0, C8, C1, 0)
   4544        || reg_value == CPENS (0, C8, C1, 1)
   4545        || reg_value == CPENS (0, C8, C1, 2)
   4546        || reg_value == CPENS (0, C8, C1, 3)
   4547        || reg_value == CPENS (0, C8, C1, 5)
   4548        || reg_value == CPENS (0, C8, C1, 7)
   4549        || reg_value == CPENS (4, C8, C4, 0)
   4550        || reg_value == CPENS (4, C8, C4, 4)
   4551        || reg_value == CPENS (4, C8, C1, 1)
   4552        || reg_value == CPENS (4, C8, C1, 5)
   4553        || reg_value == CPENS (4, C8, C1, 6)
   4554        || reg_value == CPENS (6, C8, C1, 1)
   4555        || reg_value == CPENS (6, C8, C1, 5)
   4556        || reg_value == CPENS (4, C8, C1, 0)
   4557        || reg_value == CPENS (4, C8, C1, 4)
   4558        || reg_value == CPENS (6, C8, C1, 0)
   4559        || reg_value == CPENS (0, C8, C6, 1)
   4560        || reg_value == CPENS (0, C8, C6, 3)
   4561        || reg_value == CPENS (0, C8, C6, 5)
   4562        || reg_value == CPENS (0, C8, C6, 7)
   4563        || reg_value == CPENS (0, C8, C2, 1)
   4564        || reg_value == CPENS (0, C8, C2, 3)
   4565        || reg_value == CPENS (0, C8, C2, 5)
   4566        || reg_value == CPENS (0, C8, C2, 7)
   4567        || reg_value == CPENS (0, C8, C5, 1)
   4568        || reg_value == CPENS (0, C8, C5, 3)
   4569        || reg_value == CPENS (0, C8, C5, 5)
   4570        || reg_value == CPENS (0, C8, C5, 7)
   4571        || reg_value == CPENS (4, C8, C0, 2)
   4572        || reg_value == CPENS (4, C8, C0, 6)
   4573        || reg_value == CPENS (4, C8, C4, 2)
   4574        || reg_value == CPENS (4, C8, C4, 6)
   4575        || reg_value == CPENS (4, C8, C4, 3)
   4576        || reg_value == CPENS (4, C8, C4, 7)
   4577        || reg_value == CPENS (4, C8, C6, 1)
   4578        || reg_value == CPENS (4, C8, C6, 5)
   4579        || reg_value == CPENS (4, C8, C2, 1)
   4580        || reg_value == CPENS (4, C8, C2, 5)
   4581        || reg_value == CPENS (4, C8, C5, 1)
   4582        || reg_value == CPENS (4, C8, C5, 5)
   4583        || reg_value == CPENS (6, C8, C6, 1)
   4584        || reg_value == CPENS (6, C8, C6, 5)
   4585        || reg_value == CPENS (6, C8, C2, 1)
   4586        || reg_value == CPENS (6, C8, C2, 5)
   4587        || reg_value == CPENS (6, C8, C5, 1)
   4588        || reg_value == CPENS (6, C8, C5, 5))
   4589       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
   4590     return TRUE;
   4591 
   4592   /* DC CVAP.  Values are from aarch64_sys_regs_dc.  */
   4593   if (reg_value == CPENS (3, C7, C12, 1)
   4594       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
   4595     return TRUE;
   4596 
   4597   /* DC CVADP.  Values are from aarch64_sys_regs_dc.  */
   4598   if (reg_value == CPENS (3, C7, C13, 1)
   4599       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
   4600     return TRUE;
   4601 
   4602   /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension.  */
   4603   if ((reg_value == CPENS (0, C7, C6, 3)
   4604        || reg_value == CPENS (0, C7, C6, 4)
   4605        || reg_value == CPENS (0, C7, C10, 4)
   4606        || reg_value == CPENS (0, C7, C14, 4)
   4607        || reg_value == CPENS (3, C7, C10, 3)
   4608        || reg_value == CPENS (3, C7, C12, 3)
   4609        || reg_value == CPENS (3, C7, C13, 3)
   4610        || reg_value == CPENS (3, C7, C14, 3)
   4611        || reg_value == CPENS (3, C7, C4, 3)
   4612        || reg_value == CPENS (0, C7, C6, 5)
   4613        || reg_value == CPENS (0, C7, C6, 6)
   4614        || reg_value == CPENS (0, C7, C10, 6)
   4615        || reg_value == CPENS (0, C7, C14, 6)
   4616        || reg_value == CPENS (3, C7, C10, 5)
   4617        || reg_value == CPENS (3, C7, C12, 5)
   4618        || reg_value == CPENS (3, C7, C13, 5)
   4619        || reg_value == CPENS (3, C7, C14, 5)
   4620        || reg_value == CPENS (3, C7, C4, 4))
   4621       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
   4622     return TRUE;
   4623 
   4624   /* AT S1E1RP, AT S1E1WP.  Values are from aarch64_sys_regs_at.  */
   4625   if ((reg_value == CPENS (0, C7, C9, 0)
   4626        || reg_value == CPENS (0, C7, C9, 1))
   4627       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
   4628     return TRUE;
   4629 
   4630   /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
   4631   if (reg_value == CPENS (3, C7, C3, 0)
   4632       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
   4633     return TRUE;
   4634 
   4635   return FALSE;
   4636 }
   4637 
   4638 #undef C0
   4639 #undef C1
   4640 #undef C2
   4641 #undef C3
   4642 #undef C4
   4643 #undef C5
   4644 #undef C6
   4645 #undef C7
   4646 #undef C8
   4647 #undef C9
   4648 #undef C10
   4649 #undef C11
   4650 #undef C12
   4651 #undef C13
   4652 #undef C14
   4653 #undef C15
   4654 
   4655 #define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
   4656 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
   4657 
   4658 static enum err_type
   4659 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
   4660 	      const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
   4661 	      bfd_boolean encoding ATTRIBUTE_UNUSED,
   4662 	      aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
   4663 	      aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
   4664 {
   4665   int t  = BITS (insn, 4, 0);
   4666   int n  = BITS (insn, 9, 5);
   4667   int t2 = BITS (insn, 14, 10);
   4668 
   4669   if (BIT (insn, 23))
   4670     {
   4671       /* Write back enabled.  */
   4672       if ((t == n || t2 == n) && n != 31)
   4673 	return ERR_UND;
   4674     }
   4675 
   4676   if (BIT (insn, 22))
   4677     {
   4678       /* Load */
   4679       if (t == t2)
   4680 	return ERR_UND;
   4681     }
   4682 
   4683   return ERR_OK;
   4684 }
   4685 
   4686 /* Verifier for vector by element 3 operands functions where the
   4687    conditions `if sz:L == 11 then UNDEFINED` holds.  */
   4688 
   4689 static enum err_type
   4690 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
   4691 		bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
   4692 		aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
   4693 		aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
   4694 {
   4695   const aarch64_insn undef_pattern = 0x3;
   4696   aarch64_insn value;
   4697 
   4698   assert (inst->opcode);
   4699   assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
   4700   value = encoding ? inst->value : insn;
   4701   assert (value);
   4702 
   4703   if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
   4704     return ERR_UND;
   4705 
   4706   return ERR_OK;
   4707 }
   4708 
   4709 /* Initialize an instruction sequence insn_sequence with the instruction INST.
   4710    If INST is NULL the given insn_sequence is cleared and the sequence is left
   4711    uninitialized.  */
   4712 
   4713 void
   4714 init_insn_sequence (const struct aarch64_inst *inst,
   4715 		    aarch64_instr_sequence *insn_sequence)
   4716 {
   4717   int num_req_entries = 0;
   4718   insn_sequence->next_insn = 0;
   4719   insn_sequence->num_insns = num_req_entries;
   4720   if (insn_sequence->instr)
   4721     XDELETE (insn_sequence->instr);
   4722   insn_sequence->instr = NULL;
   4723 
   4724   if (inst)
   4725     {
   4726       insn_sequence->instr = XNEW (aarch64_inst);
   4727       memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
   4728     }
   4729 
   4730   /* Handle all the cases here.  May need to think of something smarter than
   4731      a giant if/else chain if this grows.  At that time, a lookup table may be
   4732      best.  */
   4733   if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
   4734     num_req_entries = 1;
   4735 
   4736   if (insn_sequence->current_insns)
   4737     XDELETEVEC (insn_sequence->current_insns);
   4738   insn_sequence->current_insns = NULL;
   4739 
   4740   if (num_req_entries != 0)
   4741     {
   4742       size_t size = num_req_entries * sizeof (aarch64_inst);
   4743       insn_sequence->current_insns
   4744 	= (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
   4745       memset (insn_sequence->current_insns, 0, size);
   4746     }
   4747 }
   4748 
   4749 
   4750 /*  This function verifies that the instruction INST adheres to its specified
   4751     constraints.  If it does then ERR_OK is returned, if not then ERR_VFI is
   4752     returned and MISMATCH_DETAIL contains the reason why verification failed.
   4753 
   4754     The function is called both during assembly and disassembly.  If assembling
   4755     then ENCODING will be TRUE, else FALSE.  If dissassembling PC will be set
   4756     and will contain the PC of the current instruction w.r.t to the section.
   4757 
   4758     If ENCODING and PC=0 then you are at a start of a section.  The constraints
   4759     are verified against the given state insn_sequence which is updated as it
   4760     transitions through the verification.  */
   4761 
   4762 enum err_type
   4763 verify_constraints (const struct aarch64_inst *inst,
   4764 		    const aarch64_insn insn ATTRIBUTE_UNUSED,
   4765 		    bfd_vma pc,
   4766 		    bfd_boolean encoding,
   4767 		    aarch64_operand_error *mismatch_detail,
   4768 		    aarch64_instr_sequence *insn_sequence)
   4769 {
   4770   assert (inst);
   4771   assert (inst->opcode);
   4772 
   4773   const struct aarch64_opcode *opcode = inst->opcode;
   4774   if (!opcode->constraints && !insn_sequence->instr)
   4775     return ERR_OK;
   4776 
   4777   assert (insn_sequence);
   4778 
   4779   enum err_type res = ERR_OK;
   4780 
   4781   /* This instruction puts a constraint on the insn_sequence.  */
   4782   if (opcode->flags & F_SCAN)
   4783     {
   4784       if (insn_sequence->instr)
   4785 	{
   4786 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4787 	  mismatch_detail->error = _("instruction opens new dependency "
   4788 				     "sequence without ending previous one");
   4789 	  mismatch_detail->index = -1;
   4790 	  mismatch_detail->non_fatal = TRUE;
   4791 	  res = ERR_VFI;
   4792 	}
   4793 
   4794       init_insn_sequence (inst, insn_sequence);
   4795       return res;
   4796     }
   4797 
   4798   /* Verify constraints on an existing sequence.  */
   4799   if (insn_sequence->instr)
   4800     {
   4801       const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
   4802       /* If we're decoding and we hit PC=0 with an open sequence then we haven't
   4803 	 closed a previous one that we should have.  */
   4804       if (!encoding && pc == 0)
   4805 	{
   4806 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4807 	  mismatch_detail->error = _("previous `movprfx' sequence not closed");
   4808 	  mismatch_detail->index = -1;
   4809 	  mismatch_detail->non_fatal = TRUE;
   4810 	  res = ERR_VFI;
   4811 	  /* Reset the sequence.  */
   4812 	  init_insn_sequence (NULL, insn_sequence);
   4813 	  return res;
   4814 	}
   4815 
   4816       /* Validate C_SCAN_MOVPRFX constraints.  Move this to a lookup table.  */
   4817       if (inst_opcode->constraints & C_SCAN_MOVPRFX)
   4818 	{
   4819 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
   4820 	     instruction for better error messages.  */
   4821 	  if (!opcode->avariant
   4822 	      || !(*opcode->avariant &
   4823 		   (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
   4824 	    {
   4825 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4826 	      mismatch_detail->error = _("SVE instruction expected after "
   4827 					 "`movprfx'");
   4828 	      mismatch_detail->index = -1;
   4829 	      mismatch_detail->non_fatal = TRUE;
   4830 	      res = ERR_VFI;
   4831 	      goto done;
   4832 	    }
   4833 
   4834 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
   4835 	     instruction that is allowed to be used with a MOVPRFX.  */
   4836 	  if (!(opcode->constraints & C_SCAN_MOVPRFX))
   4837 	    {
   4838 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4839 	      mismatch_detail->error = _("SVE `movprfx' compatible instruction "
   4840 					 "expected");
   4841 	      mismatch_detail->index = -1;
   4842 	      mismatch_detail->non_fatal = TRUE;
   4843 	      res = ERR_VFI;
   4844 	      goto done;
   4845 	    }
   4846 
   4847 	  /* Next check for usage of the predicate register.  */
   4848 	  aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
   4849 	  aarch64_opnd_info blk_pred, inst_pred;
   4850 	  memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
   4851 	  memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
   4852 	  bfd_boolean predicated = FALSE;
   4853 	  assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
   4854 
   4855 	  /* Determine if the movprfx instruction used is predicated or not.  */
   4856 	  if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
   4857 	    {
   4858 	      predicated = TRUE;
   4859 	      blk_pred = insn_sequence->instr->operands[1];
   4860 	    }
   4861 
   4862 	  unsigned char max_elem_size = 0;
   4863 	  unsigned char current_elem_size;
   4864 	  int num_op_used = 0, last_op_usage = 0;
   4865 	  int i, inst_pred_idx = -1;
   4866 	  int num_ops = aarch64_num_of_operands (opcode);
   4867 	  for (i = 0; i < num_ops; i++)
   4868 	    {
   4869 	      aarch64_opnd_info inst_op = inst->operands[i];
   4870 	      switch (inst_op.type)
   4871 		{
   4872 		  case AARCH64_OPND_SVE_Zd:
   4873 		  case AARCH64_OPND_SVE_Zm_5:
   4874 		  case AARCH64_OPND_SVE_Zm_16:
   4875 		  case AARCH64_OPND_SVE_Zn:
   4876 		  case AARCH64_OPND_SVE_Zt:
   4877 		  case AARCH64_OPND_SVE_Vm:
   4878 		  case AARCH64_OPND_SVE_Vn:
   4879 		  case AARCH64_OPND_Va:
   4880 		  case AARCH64_OPND_Vn:
   4881 		  case AARCH64_OPND_Vm:
   4882 		  case AARCH64_OPND_Sn:
   4883 		  case AARCH64_OPND_Sm:
   4884 		    if (inst_op.reg.regno == blk_dest.reg.regno)
   4885 		      {
   4886 			num_op_used++;
   4887 			last_op_usage = i;
   4888 		      }
   4889 		    current_elem_size
   4890 		      = aarch64_get_qualifier_esize (inst_op.qualifier);
   4891 		    if (current_elem_size > max_elem_size)
   4892 		      max_elem_size = current_elem_size;
   4893 		    break;
   4894 		  case AARCH64_OPND_SVE_Pd:
   4895 		  case AARCH64_OPND_SVE_Pg3:
   4896 		  case AARCH64_OPND_SVE_Pg4_5:
   4897 		  case AARCH64_OPND_SVE_Pg4_10:
   4898 		  case AARCH64_OPND_SVE_Pg4_16:
   4899 		  case AARCH64_OPND_SVE_Pm:
   4900 		  case AARCH64_OPND_SVE_Pn:
   4901 		  case AARCH64_OPND_SVE_Pt:
   4902 		    inst_pred = inst_op;
   4903 		    inst_pred_idx = i;
   4904 		    break;
   4905 		  default:
   4906 		    break;
   4907 		}
   4908 	    }
   4909 
   4910 	   assert (max_elem_size != 0);
   4911 	   aarch64_opnd_info inst_dest = inst->operands[0];
   4912 	   /* Determine the size that should be used to compare against the
   4913 	      movprfx size.  */
   4914 	   current_elem_size
   4915 	     = opcode->constraints & C_MAX_ELEM
   4916 	       ? max_elem_size
   4917 	       : aarch64_get_qualifier_esize (inst_dest.qualifier);
   4918 
   4919 	  /* If movprfx is predicated do some extra checks.  */
   4920 	  if (predicated)
   4921 	    {
   4922 	      /* The instruction must be predicated.  */
   4923 	      if (inst_pred_idx < 0)
   4924 		{
   4925 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4926 		  mismatch_detail->error = _("predicated instruction expected "
   4927 					     "after `movprfx'");
   4928 		  mismatch_detail->index = -1;
   4929 		  mismatch_detail->non_fatal = TRUE;
   4930 		  res = ERR_VFI;
   4931 		  goto done;
   4932 		}
   4933 
   4934 	      /* The instruction must have a merging predicate.  */
   4935 	      if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
   4936 		{
   4937 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4938 		  mismatch_detail->error = _("merging predicate expected due "
   4939 					     "to preceding `movprfx'");
   4940 		  mismatch_detail->index = inst_pred_idx;
   4941 		  mismatch_detail->non_fatal = TRUE;
   4942 		  res = ERR_VFI;
   4943 		  goto done;
   4944 		}
   4945 
   4946 	      /* The same register must be used in instruction.  */
   4947 	      if (blk_pred.reg.regno != inst_pred.reg.regno)
   4948 		{
   4949 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4950 		  mismatch_detail->error = _("predicate register differs "
   4951 					     "from that in preceding "
   4952 					     "`movprfx'");
   4953 		  mismatch_detail->index = inst_pred_idx;
   4954 		  mismatch_detail->non_fatal = TRUE;
   4955 		  res = ERR_VFI;
   4956 		  goto done;
   4957 		}
   4958 	    }
   4959 
   4960 	  /* Destructive operations by definition must allow one usage of the
   4961 	     same register.  */
   4962 	  int allowed_usage
   4963 	    = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
   4964 
   4965 	  /* Operand is not used at all.  */
   4966 	  if (num_op_used == 0)
   4967 	    {
   4968 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4969 	      mismatch_detail->error = _("output register of preceding "
   4970 					 "`movprfx' not used in current "
   4971 					 "instruction");
   4972 	      mismatch_detail->index = 0;
   4973 	      mismatch_detail->non_fatal = TRUE;
   4974 	      res = ERR_VFI;
   4975 	      goto done;
   4976 	    }
   4977 
   4978 	  /* We now know it's used, now determine exactly where it's used.  */
   4979 	  if (blk_dest.reg.regno != inst_dest.reg.regno)
   4980 	    {
   4981 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4982 	      mismatch_detail->error = _("output register of preceding "
   4983 					 "`movprfx' expected as output");
   4984 	      mismatch_detail->index = 0;
   4985 	      mismatch_detail->non_fatal = TRUE;
   4986 	      res = ERR_VFI;
   4987 	      goto done;
   4988 	    }
   4989 
   4990 	  /* Operand used more than allowed for the specific opcode type.  */
   4991 	  if (num_op_used > allowed_usage)
   4992 	    {
   4993 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   4994 	      mismatch_detail->error = _("output register of preceding "
   4995 					 "`movprfx' used as input");
   4996 	      mismatch_detail->index = last_op_usage;
   4997 	      mismatch_detail->non_fatal = TRUE;
   4998 	      res = ERR_VFI;
   4999 	      goto done;
   5000 	    }
   5001 
   5002 	  /* Now the only thing left is the qualifiers checks.  The register
   5003 	     must have the same maximum element size.  */
   5004 	  if (inst_dest.qualifier
   5005 	      && blk_dest.qualifier
   5006 	      && current_elem_size
   5007 		 != aarch64_get_qualifier_esize (blk_dest.qualifier))
   5008 	    {
   5009 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
   5010 	      mismatch_detail->error = _("register size not compatible with "
   5011 					 "previous `movprfx'");
   5012 	      mismatch_detail->index = 0;
   5013 	      mismatch_detail->non_fatal = TRUE;
   5014 	      res = ERR_VFI;
   5015 	      goto done;
   5016 	    }
   5017 	}
   5018 
   5019     done:
   5020       /* Add the new instruction to the sequence.  */
   5021       memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
   5022 	      inst, sizeof (aarch64_inst));
   5023 
   5024       /* Check if sequence is now full.  */
   5025       if (insn_sequence->next_insn >= insn_sequence->num_insns)
   5026 	{
   5027 	  /* Sequence is full, but we don't have anything special to do for now,
   5028 	     so clear and reset it.  */
   5029 	  init_insn_sequence (NULL, insn_sequence);
   5030 	}
   5031     }
   5032 
   5033   return res;
   5034 }
   5035 
   5036 
   5037 /* Return true if VALUE cannot be moved into an SVE register using DUP
   5038    (with any element size, not just ESIZE) and if using DUPM would
   5039    therefore be OK.  ESIZE is the number of bytes in the immediate.  */
   5040 
   5041 bfd_boolean
   5042 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
   5043 {
   5044   int64_t svalue = uvalue;
   5045   uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
   5046 
   5047   if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
   5048     return FALSE;
   5049   if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
   5050     {
   5051       svalue = (int32_t) uvalue;
   5052       if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
   5053 	{
   5054 	  svalue = (int16_t) uvalue;
   5055 	  if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
   5056 	    return FALSE;
   5057 	}
   5058     }
   5059   if ((svalue & 0xff) == 0)
   5060     svalue /= 256;
   5061   return svalue < -128 || svalue >= 128;
   5062 }
   5063 
   5064 /* Include the opcode description table as well as the operand description
   5065    table.  */
   5066 #define VERIFIER(x) verify_##x
   5067 #include "aarch64-tbl.h"
   5068