Home | History | Annotate | Line # | Download | only in opcode
aarch64.h revision 1.1.1.12
      1 /* AArch64 assembler/disassembler support.
      2 
      3    Copyright (C) 2009-2025 Free Software Foundation, Inc.
      4    Contributed by ARM Ltd.
      5 
      6    This file is part of GNU Binutils.
      7 
      8    This program is free software; you can redistribute it and/or modify
      9    it under the terms of the GNU General Public License as published by
     10    the Free Software Foundation; either version 3 of the license, or
     11    (at your option) any later version.
     12 
     13    This program is distributed in the hope that it will be useful,
     14    but WITHOUT ANY WARRANTY; without even the implied warranty of
     15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     16    GNU General Public License for more details.
     17 
     18    You should have received a copy of the GNU General Public License
     19    along with this program; see the file COPYING3. If not,
     20    see <http://www.gnu.org/licenses/>.  */
     21 
     22 #ifndef OPCODE_AARCH64_H
     23 #define OPCODE_AARCH64_H
     24 
     25 #include "bfd.h"
     26 #include <stdint.h>
     27 #include <assert.h>
     28 #include <stdlib.h>
     29 
     30 #include "dis-asm.h"
     31 
     32 #ifdef __cplusplus
     33 extern "C" {
     34 #endif
     35 
     36 /* The offset for pc-relative addressing is currently defined to be 0.  */
     37 #define AARCH64_PCREL_OFFSET		0
     38 
     39 typedef uint32_t aarch64_insn;
     40 
     41 /* An enum containing all known CPU features.  The values act as bit positions
     42    into aarch64_feature_set.  */
     43 enum aarch64_feature_bit {
     44   /* All processors.  */
     45   AARCH64_FEATURE_V8,
     46   /* ARMv8.6 processors.  */
     47   AARCH64_FEATURE_V8_6A,
     48   /* Bfloat16 insns.  */
     49   AARCH64_FEATURE_BFLOAT16,
     50   /* Armv8-A processors.  */
     51   AARCH64_FEATURE_V8A,
     52   /* SVE2 instructions.  */
     53   AARCH64_FEATURE_SVE2,
     54   /* ARMv8.2 processors.  */
     55   AARCH64_FEATURE_V8_2A,
     56   /* ARMv8.3 processors.  */
     57   AARCH64_FEATURE_V8_3A,
     58   AARCH64_FEATURE_SVE2_AES,
     59   AARCH64_FEATURE_SVE2_BITPERM,
     60   AARCH64_FEATURE_SVE2_SM4,
     61   AARCH64_FEATURE_SVE2_SHA3,
     62   /* ARMv8.4 processors.  */
     63   AARCH64_FEATURE_V8_4A,
     64   /* Armv8-R processors.  */
     65   AARCH64_FEATURE_V8R,
     66   /* Armv8.7 processors.  */
     67   AARCH64_FEATURE_V8_7A,
     68   /* Scalable Matrix Extension.  */
     69   AARCH64_FEATURE_SME,
     70   /* Atomic 64-byte load/store.  */
     71   AARCH64_FEATURE_LS64,
     72   /* v8.3 Pointer Authentication.  */
     73   AARCH64_FEATURE_PAUTH,
     74   /* FP instructions.  */
     75   AARCH64_FEATURE_FP,
     76   /* SIMD instructions.  */
     77   AARCH64_FEATURE_SIMD,
     78   /* CRC instructions.  */
     79   AARCH64_FEATURE_CRC,
     80   /* LSE instructions.  */
     81   AARCH64_FEATURE_LSE,
     82   /* LSFE instructions.  */
     83   AARCH64_FEATURE_LSFE,
     84   /* PAN instructions.  */
     85   AARCH64_FEATURE_PAN,
     86   /* LOR instructions.  */
     87   AARCH64_FEATURE_LOR,
     88   /* v8.1 SIMD instructions.  */
     89   AARCH64_FEATURE_RDMA,
     90   /* v8.1 features.  */
     91   AARCH64_FEATURE_V8_1A,
     92   /* v8.2 FP16 instructions.  */
     93   AARCH64_FEATURE_F16,
     94   /* RAS Extensions.  */
     95   AARCH64_FEATURE_RAS,
     96   /* Statistical Profiling.  */
     97   AARCH64_FEATURE_PROFILE,
     98   /* SVE instructions.  */
     99   AARCH64_FEATURE_SVE,
    100   /* RCPC instructions.  */
    101   AARCH64_FEATURE_RCPC,
    102   /* RCPC2 instructions.  */
    103   AARCH64_FEATURE_RCPC2,
    104   /* Complex # instructions.  */
    105   AARCH64_FEATURE_COMPNUM,
    106   /* JavaScript conversion instructions.  */
    107   AARCH64_FEATURE_JSCVT,
    108   /* Dot Product instructions.  */
    109   AARCH64_FEATURE_DOTPROD,
    110   /* SM3 & SM4 instructions.  */
    111   AARCH64_FEATURE_SM4,
    112   /* SHA2 instructions.  */
    113   AARCH64_FEATURE_SHA2,
    114   /* SHA3 instructions.  */
    115   AARCH64_FEATURE_SHA3,
    116   /* AES instructions.  */
    117   AARCH64_FEATURE_AES,
    118   /* v8.2 FP16FML ins.  */
    119   AARCH64_FEATURE_F16_FML,
    120   /* ARMv8.5 processors.  */
    121   AARCH64_FEATURE_V8_5A,
    122   /* v8.5 Flag Manipulation version 2.  */
    123   AARCH64_FEATURE_FLAGMANIP,
    124   /* FRINT[32,64][Z,X] insns.  */
    125   AARCH64_FEATURE_FRINTTS,
    126   /* SB instruction.  */
    127   AARCH64_FEATURE_SB,
    128   /* Execution and Data Prediction Restriction instructions.  */
    129   AARCH64_FEATURE_PREDRES,
    130   /* DC CVADP.  */
    131   AARCH64_FEATURE_CVADP,
    132   /* Random Number instructions.  */
    133   AARCH64_FEATURE_RNG,
    134   /* SCXTNUM_ELx.  */
    135   AARCH64_FEATURE_SCXTNUM,
    136   /* ID_PFR2 instructions.  */
    137   AARCH64_FEATURE_ID_PFR2,
    138   /* SSBS mechanism enabled.  */
    139   AARCH64_FEATURE_SSBS,
    140   /* Compare and branch instructions.  */
    141   AARCH64_FEATURE_CMPBR,
    142   /* Memory Tagging Extension.  */
    143   AARCH64_FEATURE_MEMTAG,
    144   /* Outer Cacheable Cache Maintenance Operation.  */
    145   AARCH64_FEATURE_OCCMO,
    146   /* Transactional Memory Extension.  */
    147   AARCH64_FEATURE_TME,
    148   /* XS memory attribute.  */
    149   AARCH64_FEATURE_XS,
    150   /* WFx instructions with timeout.  */
    151   AARCH64_FEATURE_WFXT,
    152   /* Standardization of memory operations.  */
    153   AARCH64_FEATURE_MOPS,
    154   /* Hinted conditional branches.  */
    155   AARCH64_FEATURE_HBC,
    156   /* Matrix Multiply instructions.  */
    157   AARCH64_FEATURE_I8MM,
    158   AARCH64_FEATURE_F32MM,
    159   AARCH64_FEATURE_F64MM,
    160   /* v8.4 Flag Manipulation.  */
    161   AARCH64_FEATURE_FLAGM,
    162   /* Armv9.0-A processors.  */
    163   AARCH64_FEATURE_V9A,
    164   /* SME F64F64.  */
    165   AARCH64_FEATURE_SME_F64F64,
    166   /* SME I16I64.  */
    167   AARCH64_FEATURE_SME_I16I64,
    168   /* Armv8.8 processors.  */
    169   AARCH64_FEATURE_V8_8A,
    170   /* Common Short Sequence Compression instructions.  */
    171   AARCH64_FEATURE_CSSC,
    172   /* Armv8.9-A processors.  */
    173   AARCH64_FEATURE_V8_9A,
    174   /* Check Feature Status Extension.  */
    175   AARCH64_FEATURE_CHK,
    176   /* Guarded Control Stack.  */
    177   AARCH64_FEATURE_GCS,
    178   /* SPE Call Return branch records.  */
    179   AARCH64_FEATURE_SPE_CRR,
    180   /* SPE Filter by data source.  */
    181   AARCH64_FEATURE_SPE_FDS,
    182   /* Additional SPE events.  */
    183   AARCH64_FEATURE_SPEv1p4,
    184   /* SME2.  */
    185   AARCH64_FEATURE_SME2,
    186   /* Translation Hardening Extension.  */
    187   AARCH64_FEATURE_THE,
    188   /* LSE128.  */
    189   AARCH64_FEATURE_LSE128,
    190   /* LSUI - Unprivileged Load Store.  */
    191   AARCH64_FEATURE_LSUI,
    192   /* ARMv8.9-A RAS Extensions.  */
    193   AARCH64_FEATURE_RASv2,
    194   /* Delegated SError exceptions for EL3. */
    195   AARCH64_FEATURE_E3DSE,
    196   /* System Control Register2.  */
    197   AARCH64_FEATURE_SCTLR2,
    198   /* Fine Grained Traps.  */
    199   AARCH64_FEATURE_FGT2,
    200   /* Physical Fault Address.  */
    201   AARCH64_FEATURE_PFAR,
    202   /* Address Translate Stage 1.  */
    203   AARCH64_FEATURE_ATS1A,
    204   /* Memory Attribute Index Enhancement.  */
    205   AARCH64_FEATURE_AIE,
    206   /* Stage 1 Permission Indirection Extension.  */
    207   AARCH64_FEATURE_S1PIE,
    208   /* Stage 2 Permission Indirection Extension.  */
    209   AARCH64_FEATURE_S2PIE,
    210   /* Stage 1 Permission Overlay Extension.  */
    211   AARCH64_FEATURE_S1POE,
    212   /* Stage 2 Permission Overlay Extension.  */
    213   AARCH64_FEATURE_S2POE,
    214   /* Extension to Translation Control Registers.  */
    215   AARCH64_FEATURE_TCR2,
    216   /* Speculation Prediction Restriction instructions.  */
    217   AARCH64_FEATURE_PREDRES2,
    218   /* Instrumentation Extension.  */
    219   AARCH64_FEATURE_ITE,
    220   /* 128-bit page table descriptor, system registers
    221      and instructions.  */
    222   AARCH64_FEATURE_D128,
    223   /* Armv8.9-A/Armv9.4-A architecture Debug extension.  */
    224   AARCH64_FEATURE_DEBUGv8p9,
    225   /* Performance Monitors Extension.  */
    226   AARCH64_FEATURE_PMUv3p9,
    227   /* Performance Monitors Snapshots Extension.  */
    228   AARCH64_FEATURE_PMUv3_SS,
    229   /* Performance Monitors Instruction Counter Extension.  */
    230   AARCH64_FEATURE_PMUv3_ICNTR,
    231   /* System Performance Monitors Extension */
    232   AARCH64_FEATURE_SPMU,
    233   /* System Performance Monitors Extension version 2 */
    234   AARCH64_FEATURE_SPMU2,
    235   /* Performance Monitors Synchronous-Exception-Based Event Extension.  */
    236   AARCH64_FEATURE_SEBEP,
    237   /* SME2.1 instructions.  */
    238   AARCH64_FEATURE_SME2p1,
    239   /* SVE2.1 instructions.  */
    240   AARCH64_FEATURE_SVE2p1,
    241   /* SVE_F16F32MM instructions.  */
    242   AARCH64_FEATURE_SVE_F16F32MM,
    243   /* F8F32MM instructions.  */
    244   AARCH64_FEATURE_F8F32MM,
    245   /* F8F16MM instructions.  */
    246   AARCH64_FEATURE_F8F16MM,
    247   /* SVE_PMULL128 extension. */
    248   AARCH64_FEATURE_SVE_AES,
    249   /* SVE AES2 instructions.  */
    250   AARCH64_FEATURE_SVE_AES2,
    251   /* SSVE_AES extension. */
    252   AARCH64_FEATURE_SSVE_AES,
    253   /* RCPC3 instructions.  */
    254   AARCH64_FEATURE_RCPC3,
    255   /* Enhanced Software Step Extension. */
    256   AARCH64_FEATURE_STEP2,
    257   /* Checked Pointer Arithmetic instructions. */
    258   AARCH64_FEATURE_CPA,
    259   /* FAMINMAX instructions.  */
    260   AARCH64_FEATURE_FAMINMAX,
    261   /* FP8 instructions.  */
    262   AARCH64_FEATURE_FP8,
    263   /* LUT instructions.  */
    264   AARCH64_FEATURE_LUT,
    265   /* Branch Record Buffer Extension */
    266   AARCH64_FEATURE_BRBE,
    267   /* SME LUTv2 instructions.  */
    268   AARCH64_FEATURE_SME_LUTv2,
    269   /* FP8FMA instructions.  */
    270   AARCH64_FEATURE_FP8FMA,
    271   /* FP8DOT4 instructions.  */
    272   AARCH64_FEATURE_FP8DOT4,
    273   /* FP8DOT2 instructions.  */
    274   AARCH64_FEATURE_FP8DOT2,
    275   /* SSVE FP8FMA instructions.  */
    276   AARCH64_FEATURE_SSVE_FP8FMA,
    277   /* SSVE FP8DOT4 instructions.  */
    278   AARCH64_FEATURE_SSVE_FP8DOT4,
    279   /* SSVE FP8DOT2 instructions.  */
    280   AARCH64_FEATURE_SSVE_FP8DOT2,
    281   /* SME F8F32 instructions.  */
    282   AARCH64_FEATURE_SME_F8F32,
    283   /* SME F8F16 instructions.  */
    284   AARCH64_FEATURE_SME_F8F16,
    285   /* Non-widening half-precision FP16 to FP16 arithmetic for SME2.  */
    286   AARCH64_FEATURE_SME_F16F16,
    287   /* FEAT_SVE_BFSCALE.  */
    288   AARCH64_FEATURE_SVE_BFSCALE,
    289   /* SVE Z-targeting non-widening BFloat16 instructions.  */
    290   AARCH64_FEATURE_SVE_B16B16,
    291   /* SME non-widening BFloat16 instructions.  */
    292   AARCH64_FEATURE_SME_B16B16,
    293   /* SVE2.2.  */
    294   AARCH64_FEATURE_SVE2p2,
    295   /* SME2.2.  */
    296   AARCH64_FEATURE_SME2p2,
    297   /* Armv9.1-A processors.  */
    298   AARCH64_FEATURE_V9_1A,
    299   /* Armv9.2-A processors.  */
    300   AARCH64_FEATURE_V9_2A,
    301   /* Armv9.3-A processors.  */
    302   AARCH64_FEATURE_V9_3A,
    303   /* Armv9.4-A processors.  */
    304   AARCH64_FEATURE_V9_4A,
    305   /* Armv9.5-A processors.  */
    306   AARCH64_FEATURE_V9_5A,
    307   /* Armv9.6-A processors.  */
    308   AARCH64_FEATURE_V9_6A,
    309   /* FPRCVT instructions.  */
    310   AARCH64_FEATURE_FPRCVT,
    311   /* Point of Physical Storage.  */
    312   AARCH64_FEATURE_PoPS,
    313 
    314   /* Virtual features.  These are used to gate instructions that are enabled
    315      by either of two (or more) sets of command line flags.  */
    316   /* +sve2 or +ssve-aes */
    317   AARCH64_FEATURE_SVE2_SSVE_AES,
    318   /* +fp8fma+sve or +ssve-fp8fma  */
    319   AARCH64_FEATURE_FP8FMA_SVE,
    320   /* +fp8dot4+sve or +ssve-fp8dot4  */
    321   AARCH64_FEATURE_FP8DOT4_SVE,
    322   /* +fp8dot2+sve or +ssve-fp8dot2  */
    323   AARCH64_FEATURE_FP8DOT2_SVE,
    324   /* +sme-f16f16 or +sme-f8f16  */
    325   AARCH64_FEATURE_SME_F16F16_F8F16,
    326   /* +sve or +sme2p2 */
    327   AARCH64_FEATURE_SVE_SME2p2,
    328   /* +sve2 or +sme2 */
    329   AARCH64_FEATURE_SVE2_SME2,
    330   /* +sve2p1 or +sme */
    331   AARCH64_FEATURE_SVE2p1_SME,
    332   /* +sve2p1 or +sme2 */
    333   AARCH64_FEATURE_SVE2p1_SME2,
    334   /* +sve2p1 or +sme2p1 */
    335   AARCH64_FEATURE_SVE2p1_SME2p1,
    336   /* +sve2p2 or +sme2p2 */
    337   AARCH64_FEATURE_SVE2p2_SME2p2,
    338   AARCH64_NUM_FEATURES
    339 };
    340 
    341 typedef uint64_t aarch64_feature_word;
    342 #define AARCH64_BITS_PER_FEATURE_WORD 64
    343 
    344 #define AA64_REPLICATE(SEP, BODY, ...)	\
    345   BODY (0, __VA_ARGS__) SEP		\
    346   BODY (1, __VA_ARGS__) SEP		\
    347   BODY (2, __VA_ARGS__)
    348 
    349 /* Some useful SEP operators for use with replication.  */
    350 #define REP_COMMA ,
    351 #define REP_SEMICOLON ;
    352 #define REP_OR_OR ||
    353 #define REP_AND_AND &&
    354 #define REP_PLUS +
    355 
    356 /* Not currently needed, but if an empty SEP is required define:
    357   #define REP_NO_SEP
    358   Then use REP_NO_SEP in the SEP field.  */
    359 
    360 /* Used to generate one instance of VAL for each value of ELT (ELT is
    361    not otherwise used).  */
    362 #define AA64_REPVAL(ELT, VAL) VAL
    363 
    364 /* static_assert requires C11 (or C++11) or later.  Support older
    365    versions by disabling this check since compilers without this are
    366    pretty uncommon these days.  */
    367 #if ((defined __STDC_VERSION__ && __STDC_VERSION__ >= 201112L)	\
    368      || (defined __cplusplus && __cplusplus >= 201103L))
    369 static_assert ((AA64_REPLICATE (REP_PLUS, AA64_REPVAL,
    370 				AARCH64_BITS_PER_FEATURE_WORD))
    371 	       >= AARCH64_NUM_FEATURES,
    372 	       "Insufficient repetitions in AA64_REPLICATE()");
    373 #endif
    374 
    375 /* These macros take an initial argument X that gives the index into
    376    an aarch64_feature_set.  The macros then return the bitmask for
    377    that array index.  */
    378 
    379 /* A mask in which feature bit BIT is set and all other bits are clear.  */
    380 #define AARCH64_UINT64_BIT(X, BIT)			\
    381   ((X) == (BIT) / AARCH64_BITS_PER_FEATURE_WORD		\
    382    ? 1ULL << (BIT) % AARCH64_BITS_PER_FEATURE_WORD	\
    383    : 0)
    384 
    385 /* A mask that includes only AARCH64_FEATURE_<NAME>.  */
    386 #define AARCH64_FEATBIT(X, NAME) \
    387   AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME)
    388 
    389 /* A mask of the features that are enabled by each architecture version,
    390    excluding those that are inherited from other architecture versions.  */
    391 #define AARCH64_ARCH_V8A_FEATURES(X)	(AARCH64_FEATBIT (X, V8A)	\
    392 					 | AARCH64_FEATBIT (X, FP)	\
    393 					 | AARCH64_FEATBIT (X, RAS)	\
    394 					 | AARCH64_FEATBIT (X, SIMD)	\
    395 					 | AARCH64_FEATBIT (X, CHK))
    396 #define AARCH64_ARCH_V8_1A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_1A)	\
    397 					 | AARCH64_FEATBIT (X, CRC)	\
    398 					 | AARCH64_FEATBIT (X, LSE)	\
    399 					 | AARCH64_FEATBIT (X, PAN)	\
    400 					 | AARCH64_FEATBIT (X, LOR)	\
    401 					 | AARCH64_FEATBIT (X, RDMA))
    402 #define AARCH64_ARCH_V8_2A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_2A))
    403 #define AARCH64_ARCH_V8_3A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_3A)	\
    404 					 | AARCH64_FEATBIT (X, PAUTH)	\
    405 					 | AARCH64_FEATBIT (X, RCPC)	\
    406 					 | AARCH64_FEATBIT (X, COMPNUM) \
    407 					 | AARCH64_FEATBIT (X, JSCVT))
    408 #define AARCH64_ARCH_V8_4A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_4A)	\
    409 					 | AARCH64_FEATBIT (X, RCPC2)	\
    410 					 | AARCH64_FEATBIT (X, DOTPROD)	\
    411 					 | AARCH64_FEATBIT (X, FLAGM)	\
    412 					 | AARCH64_FEATBIT (X, F16_FML))
    413 #define AARCH64_ARCH_V8_5A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_5A)	\
    414 					 | AARCH64_FEATBIT (X, FLAGMANIP) \
    415 					 | AARCH64_FEATBIT (X, FRINTTS)	\
    416 					 | AARCH64_FEATBIT (X, SB)	\
    417 					 | AARCH64_FEATBIT (X, PREDRES)	\
    418 					 | AARCH64_FEATBIT (X, CVADP)	\
    419 					 | AARCH64_FEATBIT (X, SCXTNUM)	\
    420 					 | AARCH64_FEATBIT (X, ID_PFR2)	\
    421 					 | AARCH64_FEATBIT (X, SSBS))
    422 #define AARCH64_ARCH_V8_6A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_6A)	\
    423 					 | AARCH64_FEATBIT (X, BFLOAT16) \
    424 					 | AARCH64_FEATBIT (X, I8MM))
    425 #define AARCH64_ARCH_V8_7A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_7A)	\
    426 					 | AARCH64_FEATBIT (X, XS)      \
    427 					 | AARCH64_FEATBIT (X, WFXT)    \
    428 					 | AARCH64_FEATBIT (X, LS64))
    429 #define AARCH64_ARCH_V8_8A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_8A)	\
    430 					 | AARCH64_FEATBIT (X, MOPS)	\
    431 					 | AARCH64_FEATBIT (X, HBC))
    432 #define AARCH64_ARCH_V8_9A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_9A)	\
    433 					 | AARCH64_FEATBIT (X, CSSC) \
    434 					 | AARCH64_FEATBIT (X, SPEv1p4) \
    435 					 | AARCH64_FEATBIT (X, SPE_CRR)	\
    436 					 | AARCH64_FEATBIT (X, SPE_FDS) \
    437 					 | AARCH64_FEATBIT (X, RASv2)	\
    438 					 | AARCH64_FEATBIT (X, SCTLR2)	\
    439 					 | AARCH64_FEATBIT (X, FGT2)	\
    440 					 | AARCH64_FEATBIT (X, PFAR)	\
    441 					 | AARCH64_FEATBIT (X, ATS1A)	\
    442 					 | AARCH64_FEATBIT (X, AIE)	\
    443 					 | AARCH64_FEATBIT (X, S1PIE)	\
    444 					 | AARCH64_FEATBIT (X, S2PIE)	\
    445 					 | AARCH64_FEATBIT (X, S1POE)	\
    446 					 | AARCH64_FEATBIT (X, S2POE)	\
    447 					 | AARCH64_FEATBIT (X, TCR2)	\
    448 					 | AARCH64_FEATBIT (X, DEBUGv8p9) \
    449 					 | AARCH64_FEATBIT (X, PMUv3p9)	\
    450 					 | AARCH64_FEATBIT (X, PMUv3_SS) \
    451 					 | AARCH64_FEATBIT (X, PMUv3_ICNTR) \
    452 					 | AARCH64_FEATBIT (X, SPMU) \
    453 					 | AARCH64_FEATBIT (X, SEBEP) \
    454 					 | AARCH64_FEATBIT (X, PREDRES2) \
    455 					)
    456 
    457 #define AARCH64_ARCH_V9A_FEATURES(X)	(AARCH64_FEATBIT (X, V9A)	\
    458 					 | AARCH64_FEATBIT (X, F16)	\
    459 					 | AARCH64_FEATBIT (X, SVE)	\
    460 					 | AARCH64_FEATBIT (X, SVE2))
    461 #define AARCH64_ARCH_V9_1A_FEATURES(X)	(AARCH64_FEATBIT (X, V9_1A)	\
    462 					 | AARCH64_ARCH_V8_6A_FEATURES (X))
    463 #define AARCH64_ARCH_V9_2A_FEATURES(X)	(AARCH64_FEATBIT (X, V9_2A)	\
    464 					 | AARCH64_ARCH_V8_7A_FEATURES (X))
    465 #define AARCH64_ARCH_V9_3A_FEATURES(X)	(AARCH64_FEATBIT (X, V9_3A)	\
    466 					 | AARCH64_ARCH_V8_8A_FEATURES (X))
    467 #define AARCH64_ARCH_V9_4A_FEATURES(X)	(AARCH64_FEATBIT (X, V9_4A)	\
    468 					 | AARCH64_ARCH_V8_9A_FEATURES (X) \
    469 					 | AARCH64_FEATBIT (X, SVE2p1))
    470 #define AARCH64_ARCH_V9_5A_FEATURES(X)	(AARCH64_FEATBIT (X, V9_5A)	\
    471 					 | AARCH64_FEATBIT (X, CPA)	\
    472 					 | AARCH64_FEATBIT (X, LUT)	\
    473 					 | AARCH64_FEATBIT (X, FAMINMAX)\
    474 					 | AARCH64_FEATBIT (X, E3DSE)	\
    475 					 | AARCH64_FEATBIT (X, SPMU2)	\
    476 					 | AARCH64_FEATBIT (X, STEP2)	\
    477 					)
    478 #define AARCH64_ARCH_V9_6A_FEATURES(X)	(AARCH64_FEATBIT (X, V9_6A)	\
    479 					 | AARCH64_FEATBIT (X, CMPBR)	\
    480 					 | AARCH64_FEATBIT (X, FPRCVT)	\
    481 					 | AARCH64_FEATBIT (X, LSUI)	\
    482 					 | AARCH64_FEATBIT (X, OCCMO)	\
    483 					 | AARCH64_FEATBIT (X, SVE2p2))
    484 
    485 /* Architectures are the sum of the base and extensions.  */
    486 #define AARCH64_ARCH_V8A(X)	(AARCH64_FEATBIT (X, V8) \
    487 				 | AARCH64_ARCH_V8A_FEATURES (X))
    488 #define AARCH64_ARCH_V8_1A(X)	(AARCH64_ARCH_V8A (X) \
    489 				 | AARCH64_ARCH_V8_1A_FEATURES (X))
    490 #define AARCH64_ARCH_V8_2A(X)	(AARCH64_ARCH_V8_1A (X)	\
    491 				 | AARCH64_ARCH_V8_2A_FEATURES (X))
    492 #define AARCH64_ARCH_V8_3A(X)	(AARCH64_ARCH_V8_2A (X)	\
    493 				 | AARCH64_ARCH_V8_3A_FEATURES (X))
    494 #define AARCH64_ARCH_V8_4A(X)	(AARCH64_ARCH_V8_3A (X)	\
    495 				 | AARCH64_ARCH_V8_4A_FEATURES (X))
    496 #define AARCH64_ARCH_V8_5A(X)	(AARCH64_ARCH_V8_4A (X)	\
    497 				 | AARCH64_ARCH_V8_5A_FEATURES (X))
    498 #define AARCH64_ARCH_V8_6A(X)	(AARCH64_ARCH_V8_5A (X)	\
    499 				 | AARCH64_ARCH_V8_6A_FEATURES (X))
    500 #define AARCH64_ARCH_V8_7A(X)	(AARCH64_ARCH_V8_6A (X)	\
    501 				 | AARCH64_ARCH_V8_7A_FEATURES (X))
    502 #define AARCH64_ARCH_V8_8A(X)	(AARCH64_ARCH_V8_7A (X)	\
    503 				 | AARCH64_ARCH_V8_8A_FEATURES (X))
    504 #define AARCH64_ARCH_V8_9A(X)	(AARCH64_ARCH_V8_8A (X)	\
    505 				 | AARCH64_ARCH_V8_9A_FEATURES (X))
    506 #define AARCH64_ARCH_V8R(X)	((AARCH64_ARCH_V8_4A (X)	\
    507 				  | AARCH64_FEATBIT (X, V8R))	\
    508 				 & ~AARCH64_FEATBIT (X, V8A)	\
    509 				 & ~AARCH64_FEATBIT (X, LOR))
    510 
    511 #define AARCH64_ARCH_V9A(X)	(AARCH64_ARCH_V8_5A (X) \
    512 				 | AARCH64_ARCH_V9A_FEATURES (X))
    513 #define AARCH64_ARCH_V9_1A(X)	(AARCH64_ARCH_V9A (X) \
    514 				 | AARCH64_ARCH_V9_1A_FEATURES (X))
    515 #define AARCH64_ARCH_V9_2A(X)	(AARCH64_ARCH_V9_1A (X) \
    516 				 | AARCH64_ARCH_V9_2A_FEATURES (X))
    517 #define AARCH64_ARCH_V9_3A(X)	(AARCH64_ARCH_V9_2A (X) \
    518 				 | AARCH64_ARCH_V9_3A_FEATURES (X))
    519 #define AARCH64_ARCH_V9_4A(X)	(AARCH64_ARCH_V9_3A (X) \
    520 				 | AARCH64_ARCH_V9_4A_FEATURES (X))
    521 #define AARCH64_ARCH_V9_5A(X)	(AARCH64_ARCH_V9_4A (X) \
    522 				 | AARCH64_ARCH_V9_5A_FEATURES (X))
    523 #define AARCH64_ARCH_V9_6A(X)	(AARCH64_ARCH_V9_5A (X) \
    524 				 | AARCH64_ARCH_V9_6A_FEATURES (X))
    525 
    526 #define AARCH64_ARCH_NONE(X)	0
    527 
    528 /* CPU-specific features.  */
    529 typedef struct {
    530   aarch64_feature_word flags[AA64_REPLICATE (REP_PLUS, AA64_REPVAL, 1)];
    531 } aarch64_feature_set;
    532 
    533 #define AARCH64_CPU_HAS_FEATURE_BODY(ELT, CPU, FEAT)	\
    534   ((~(CPU).flags[ELT] & AARCH64_FEATBIT (ELT, FEAT)) == 0)
    535 #define AARCH64_CPU_HAS_FEATURE(CPU, FEAT)	\
    536   (AA64_REPLICATE (REP_AND_AND, AARCH64_CPU_HAS_FEATURE_BODY, CPU, FEAT))
    537 
    538 #define AARCH64_CPU_HAS_ALL_FEATURES_BODY(ELT, CPU, FEAT) \
    539   ((~(CPU).flags[ELT] & (FEAT).flags[ELT]) == 0)
    540 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU, FEAT)	\
    541   (AA64_REPLICATE (REP_AND_AND, AARCH64_CPU_HAS_ALL_FEATURES_BODY, CPU, FEAT))
    542 
    543 #define AARCH64_CPU_HAS_ANY_FEATURES_BODY(ELT, CPU, FEAT)	\
    544   (((CPU).flags[ELT] & (FEAT).flags[ELT]) != 0)
    545 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT)	\
    546   (AA64_REPLICATE (REP_OR_OR, AARCH64_CPU_HAS_ANY_FEATURES_BODY, CPU, FEAT))
    547 
    548 #define AARCH64_SET_FEATURE_BODY(ELT, DEST, FEAT)	\
    549   (DEST).flags[ELT] = FEAT (ELT)
    550 #define AARCH64_SET_FEATURE(DEST, FEAT) \
    551   (AA64_REPLICATE (REP_COMMA, AARCH64_SET_FEATURE_BODY, DEST, FEAT))
    552 
    553 #define AARCH64_CLEAR_FEATURE_BODY(ELT, DEST, SRC, FEAT)	\
    554   (DEST).flags[ELT] = ((SRC).flags[ELT]			\
    555 			 & ~AARCH64_FEATBIT (ELT, FEAT))
    556 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT)		\
    557   (AA64_REPLICATE (REP_COMMA, AARCH64_CLEAR_FEATURE_BODY, DEST, SRC, FEAT))
    558 
    559 #define AARCH64_MERGE_FEATURE_SETS_BODY(ELT, TARG, F1, F2)	\
    560   (TARG).flags[ELT] = (F1).flags[ELT] | (F2).flags[ELT];
    561 #define AARCH64_MERGE_FEATURE_SETS(TARG, F1, F2)			\
    562   do									\
    563     {									\
    564       AA64_REPLICATE (REP_SEMICOLON,					\
    565 		      AARCH64_MERGE_FEATURE_SETS_BODY, TARG, F1, F2);	\
    566     }									\
    567   while (0)
    568 
    569 #define AARCH64_CLEAR_FEATURES_BODY(ELT, TARG, F1, F2)	\
    570   (TARG).flags[ELT] = (F1).flags[ELT] &~ (F2).flags[ELT];
    571 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2)				\
    572   do									\
    573     {									\
    574       AA64_REPLICATE (REP_SEMICOLON,					\
    575 		      AARCH64_CLEAR_FEATURES_BODY, TARG, F1, F2);	\
    576     }									\
    577   while (0)
    578 
    579 /* aarch64_feature_set initializers for no features and all features,
    580    respectively.  */
    581 #define AARCH64_NO_FEATURES { { AA64_REPLICATE (REP_COMMA, AA64_REPVAL, 0) } }
    582 #define AARCH64_ALL_FEATURES { { AA64_REPLICATE (REP_COMMA, AA64_REPVAL, -1) } }
    583 
    584 /* An aarch64_feature_set initializer for a single feature,
    585    AARCH64_FEATURE_<FEAT>.  */
    586 #define AARCH64_FEATURE_BODY(ELT, FEAT)		\
    587   AARCH64_FEATBIT (ELT, FEAT)
    588 #define AARCH64_FEATURE(FEAT)					\
    589   { { AA64_REPLICATE (REP_COMMA, AARCH64_FEATURE_BODY, FEAT) } }
    590 
    591 /* An aarch64_feature_set initializer for a specific architecture version,
    592    including all the features that are enabled by default for that architecture
    593    version.  */
    594 #define AARCH64_ARCH_FEATURES_BODY(ELT, ARCH)	\
    595   AARCH64_ARCH_##ARCH (ELT)
    596 #define AARCH64_ARCH_FEATURES(ARCH)		\
    597   { { AA64_REPLICATE (REP_COMMA, AARCH64_ARCH_FEATURES_BODY, ARCH) } }
    598 
    599 /* Used by AARCH64_CPU_FEATURES.  */
    600 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \
    601   (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X))
    602 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \
    603   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2))
    604 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \
    605   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__))
    606 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \
    607   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__))
    608 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \
    609   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__))
    610 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \
    611   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__))
    612 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \
    613   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__))
    614 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \
    615   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__))
    616 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \
    617   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__))
    618 
    619 /* An aarch64_feature_set initializer for a CPU that implements architecture
    620    version ARCH, and additionally provides the N features listed in "...".  */
    621 #define AARCH64_CPU_FEATURES_BODY(ELT, ARCH, N, ...)		\
    622   AARCH64_OR_FEATURES_##N (ELT, ARCH, __VA_ARGS__)
    623 #define AARCH64_CPU_FEATURES(ARCH, N, ...)			\
    624   { { AA64_REPLICATE (REP_COMMA, AARCH64_CPU_FEATURES_BODY,	\
    625 		      ARCH, N, __VA_ARGS__) } }
    626 
    627 /* An aarch64_feature_set initializer for the N features listed in "...".  */
    628 #define AARCH64_FEATURES(N, ...) \
    629   AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__)
    630 
    631 enum aarch64_operand_class
    632 {
    633   AARCH64_OPND_CLASS_NIL,
    634   AARCH64_OPND_CLASS_INT_REG,
    635   AARCH64_OPND_CLASS_MODIFIED_REG,
    636   AARCH64_OPND_CLASS_FP_REG,
    637   AARCH64_OPND_CLASS_SIMD_REG,
    638   AARCH64_OPND_CLASS_SIMD_ELEMENT,
    639   AARCH64_OPND_CLASS_SISD_REG,
    640   AARCH64_OPND_CLASS_SIMD_REGLIST,
    641   AARCH64_OPND_CLASS_SVE_REG,
    642   AARCH64_OPND_CLASS_SVE_REGLIST,
    643   AARCH64_OPND_CLASS_PRED_REG,
    644   AARCH64_OPND_CLASS_ZA_ACCESS,
    645   AARCH64_OPND_CLASS_ADDRESS,
    646   AARCH64_OPND_CLASS_IMMEDIATE,
    647   AARCH64_OPND_CLASS_SYSTEM,
    648   AARCH64_OPND_CLASS_COND,
    649 };
    650 
    651 /* Operand code that helps both parsing and coding.
    652    Keep AARCH64_OPERANDS synced.  */
    653 
    654 enum aarch64_opnd
    655 {
    656   AARCH64_OPND_NIL,	/* no operand---MUST BE FIRST!*/
    657 
    658   AARCH64_OPND_Rd,	/* Integer register as destination.  */
    659   AARCH64_OPND_Rn,	/* Integer register as source.  */
    660   AARCH64_OPND_Rm,	/* Integer register as source.  */
    661   AARCH64_OPND_Rt,	/* Integer register used in ld/st instructions.  */
    662   AARCH64_OPND_Rt2,	/* Integer register used in ld/st pair instructions.  */
    663   AARCH64_OPND_X16,	/* Integer register x16 in chkfeat instruction.  */
    664   AARCH64_OPND_Rt_LS64,	/* Integer register used in LS64 instructions.  */
    665   AARCH64_OPND_Rt_SP,	/* Integer Rt or SP used in STG instructions.  */
    666   AARCH64_OPND_Rs,	/* Integer register used in ld/st exclusive.  */
    667   AARCH64_OPND_Ra,	/* Integer register used in ddp_3src instructions.  */
    668   AARCH64_OPND_Rt_SYS,	/* Integer register used in system instructions.  */
    669 
    670   AARCH64_OPND_Rd_SP,	/* Integer Rd or SP.  */
    671   AARCH64_OPND_Rn_SP,	/* Integer Rn or SP.  */
    672   AARCH64_OPND_Rm_SP,	/* Integer Rm or SP.  */
    673   AARCH64_OPND_PAIRREG,	/* Paired register operand.  */
    674   AARCH64_OPND_PAIRREG_OR_XZR,	/* Paired register operand, optionally xzr.  */
    675   AARCH64_OPND_Rm_EXT,	/* Integer Rm extended.  */
    676   AARCH64_OPND_Rm_SFT,	/* Integer Rm shifted.  */
    677   AARCH64_OPND_Rm_LSL,	/* Integer Rm shifted (LSL-only).  */
    678 
    679   AARCH64_OPND_Fd,	/* Floating-point Fd.  */
    680   AARCH64_OPND_Fn,	/* Floating-point Fn.  */
    681   AARCH64_OPND_Fm,	/* Floating-point Fm.  */
    682   AARCH64_OPND_Fa,	/* Floating-point Fa.  */
    683   AARCH64_OPND_Ft,	/* Floating-point Ft.  */
    684   AARCH64_OPND_Ft2,	/* Floating-point Ft2.  */
    685 
    686   AARCH64_OPND_Sd,	/* AdvSIMD Scalar Sd.  */
    687   AARCH64_OPND_Sn,	/* AdvSIMD Scalar Sn.  */
    688   AARCH64_OPND_Sm,	/* AdvSIMD Scalar Sm.  */
    689 
    690   AARCH64_OPND_Va,	/* AdvSIMD Vector Va.  */
    691   AARCH64_OPND_Vd,	/* AdvSIMD Vector Vd.  */
    692   AARCH64_OPND_Vn,	/* AdvSIMD Vector Vn.  */
    693   AARCH64_OPND_Vm,	/* AdvSIMD Vector Vm.  */
    694   AARCH64_OPND_VdD1,	/* AdvSIMD <Vd>.D[1]; for FMOV only.  */
    695   AARCH64_OPND_VnD1,	/* AdvSIMD <Vn>.D[1]; for FMOV only.  */
    696   AARCH64_OPND_Ed,	/* AdvSIMD Vector Element Vd.  */
    697   AARCH64_OPND_En,	/* AdvSIMD Vector Element Vn.  */
    698   AARCH64_OPND_Em,	/* AdvSIMD Vector Element Vm.  */
    699   AARCH64_OPND_Em16,	/* AdvSIMD Vector Element Vm restricted to V0 - V15 when
    700 			   qualifier is S_H or S_2B.  */
    701   AARCH64_OPND_Em8,	/* AdvSIMD Vector Element Vm restricted to V0 - V7,
    702 			   used only with qualifier S_B.  */
    703   AARCH64_OPND_Em_INDEX1_14,  /* AdvSIMD 1-bit encoded index in Vm at [14]  */
    704   AARCH64_OPND_Em_INDEX2_13,  /* AdvSIMD 2-bit encoded index in Vm at [14:13]  */
    705   AARCH64_OPND_Em_INDEX3_12,  /* AdvSIMD 3-bit encoded index in Vm at [14:12]  */
    706   AARCH64_OPND_LVn,	/* AdvSIMD Vector register list used in e.g. TBL.  */
    707   AARCH64_OPND_LVt,	/* AdvSIMD Vector register list used in ld/st.  */
    708   AARCH64_OPND_LVt_AL,	/* AdvSIMD Vector register list for loading single
    709 			   structure to all lanes.  */
    710   AARCH64_OPND_LVn_LUT,	/* AdvSIMD Vector register list used in lut.  */
    711   AARCH64_OPND_LEt,	/* AdvSIMD Vector Element list.  */
    712 
    713   AARCH64_OPND_CRn,	/* Co-processor register in CRn field.  */
    714   AARCH64_OPND_CRm,	/* Co-processor register in CRm field.  */
    715 
    716   AARCH64_OPND_IDX,	/* AdvSIMD EXT index operand.  */
    717   AARCH64_OPND_MASK,	/* AdvSIMD EXT index operand.  */
    718   AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left.  */
    719   AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right.  */
    720   AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift.  */
    721   AARCH64_OPND_SIMD_IMM_SFT,	/* AdvSIMD modified immediate with shift.  */
    722   AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate.  */
    723   AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
    724 			   (no encoding).  */
    725   AARCH64_OPND_IMM0,	/* Immediate for #0.  */
    726   AARCH64_OPND_FPIMM0,	/* Immediate for #0.0.  */
    727   AARCH64_OPND_FPIMM,	/* Floating-point Immediate.  */
    728   AARCH64_OPND_IMMR,	/* Immediate #<immr> in e.g. BFM.  */
    729   AARCH64_OPND_IMMS,	/* Immediate #<imms> in e.g. BFM.  */
    730   AARCH64_OPND_WIDTH,	/* Immediate #<width> in e.g. BFI.  */
    731   AARCH64_OPND_IMM,	/* Immediate.  */
    732   AARCH64_OPND_IMM_2,	/* Immediate.  */
    733   AARCH64_OPND_IMMP1_2,	/* Immediate plus 1.  */
    734   AARCH64_OPND_IMMS1_2,	/* Immediate minus 1.  */
    735   AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field.  */
    736   AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field.  */
    737   AARCH64_OPND_UIMM4,	/* Unsigned 4-bit immediate in the CRm field.  */
    738   AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg.  */
    739   AARCH64_OPND_UIMM7,	/* Unsigned 7-bit immediate in the CRm:op2 fields.  */
    740   AARCH64_OPND_UIMM10,	/* Unsigned 10-bit immediate in addg/subg.  */
    741   AARCH64_OPND_BIT_NUM,	/* Immediate.  */
    742   AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions.  */
    743   AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
    744   AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions.  */
    745   AARCH64_OPND_SIMM5,	/* 5-bit signed immediate in the imm5 field.  */
    746   AARCH64_OPND_NZCV,	/* Flag bit specifier giving an alternative value for
    747 			   each condition flag.  */
    748 
    749   AARCH64_OPND_LIMM,	/* Logical Immediate.  */
    750   AARCH64_OPND_AIMM,	/* Arithmetic immediate.  */
    751   AARCH64_OPND_HALF,	/* #<imm16>{, LSL #<shift>} operand in move wide.  */
    752   AARCH64_OPND_FBITS,	/* FP #<fbits> operand in e.g. SCVTF */
    753   AARCH64_OPND_IMM_MOV,	/* Immediate operand for the MOV alias.  */
    754   AARCH64_OPND_IMM_ROT1,	/* Immediate rotate operand for FCMLA.  */
    755   AARCH64_OPND_IMM_ROT2,	/* Immediate rotate operand for indexed FCMLA.  */
    756   AARCH64_OPND_IMM_ROT3,	/* Immediate rotate operand for FCADD.  */
    757 
    758   AARCH64_OPND_COND,	/* Standard condition as the last operand.  */
    759   AARCH64_OPND_COND1,	/* Same as the above, but excluding AL and NV.  */
    760 
    761   AARCH64_OPND_ADDR_ADRP,	/* Memory address for ADRP */
    762   AARCH64_OPND_ADDR_PCREL9,	/* 9-bit PC-relative address for e.g. CB<cc>.  */
    763   AARCH64_OPND_ADDR_PCREL14,	/* 14-bit PC-relative address for e.g. TBZ.  */
    764   AARCH64_OPND_ADDR_PCREL19,	/* 19-bit PC-relative address for e.g. LDR.  */
    765   AARCH64_OPND_ADDR_PCREL21,	/* 21-bit PC-relative address for e.g. ADR.  */
    766   AARCH64_OPND_ADDR_PCREL26,	/* 26-bit PC-relative address for e.g. BL.  */
    767 
    768   AARCH64_OPND_ADDR_SIMPLE,	/* Address of ld/st exclusive.  */
    769   AARCH64_OPND_ADDR_REGOFF,	/* Address of register offset.  */
    770   AARCH64_OPND_ADDR_SIMM7,	/* Address of signed 7-bit immediate.  */
    771   AARCH64_OPND_ADDR_SIMM9,	/* Address of signed 9-bit immediate.  */
    772   AARCH64_OPND_ADDR_SIMM9_2,	/* Same as the above, but the immediate is
    773 				   negative or unaligned and there is
    774 				   no writeback allowed.  This operand code
    775 				   is only used to support the programmer-
    776 				   friendly feature of using LDR/STR as the
    777 				   the mnemonic name for LDUR/STUR instructions
    778 				   wherever there is no ambiguity.  */
    779   AARCH64_OPND_ADDR_SIMM10,	/* Address of signed 10-bit immediate.  */
    780   AARCH64_OPND_ADDR_SIMM11,	/* Address with a signed 11-bit (multiple of
    781 				   16) immediate.  */
    782   AARCH64_OPND_ADDR_UIMM12,	/* Address of unsigned 12-bit immediate.  */
    783   AARCH64_OPND_ADDR_SIMM13,	/* Address with a signed 13-bit (multiple of
    784 				   16) immediate.  */
    785   AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures.  */
    786   AARCH64_OPND_ADDR_OFFSET,     /* Address with an optional 9-bit immediate.  */
    787   AARCH64_OPND_SIMD_ADDR_POST,	/* Address of ld/st multiple post-indexed.  */
    788 
    789   AARCH64_OPND_SYSREG,		/* System register operand.  */
    790   AARCH64_OPND_SYSREG128,	/* 128-bit system register operand.  */
    791   AARCH64_OPND_PSTATEFIELD,	/* PSTATE field name operand.  */
    792   AARCH64_OPND_SYSREG_AT,	/* System register <at_op> operand.  */
    793   AARCH64_OPND_SYSREG_DC,	/* System register <dc_op> operand.  */
    794   AARCH64_OPND_SYSREG_IC,	/* System register <ic_op> operand.  */
    795   AARCH64_OPND_SYSREG_TLBI,	/* System register <tlbi_op> operand.  */
    796   AARCH64_OPND_SYSREG_TLBIP,	/* System register <tlbip_op> operand.  */
    797   AARCH64_OPND_SYSREG_SR,	/* System register RCTX operand.  */
    798   AARCH64_OPND_BARRIER,		/* Barrier operand.  */
    799   AARCH64_OPND_BARRIER_DSB_NXS,	/* Barrier operand for DSB nXS variant.  */
    800   AARCH64_OPND_BARRIER_ISB,	/* Barrier operand for ISB.  */
    801   AARCH64_OPND_PRFOP,		/* Prefetch operation.  */
    802   AARCH64_OPND_RPRFMOP,		/* Range prefetch operation.  */
    803   AARCH64_OPND_BARRIER_PSB,	/* Barrier operand for PSB.  */
    804   AARCH64_OPND_BARRIER_GCSB,	/* Barrier operand for GCSB.  */
    805   AARCH64_OPND_BTI_TARGET,	/* BTI {<target>}.  */
    806   AARCH64_OPND_STSHH_POLICY,	/* STSHH {<policy>}.  */
    807   AARCH64_OPND_BRBOP,		/* BRB operation IALL or INJ in bit 5.  */
    808   AARCH64_OPND_Rt_IN_SYS_ALIASES,	/* Defaulted and omitted Rt used in SYS aliases such as brb.  */
    809   AARCH64_OPND_LSE128_Rt,	/* LSE128 <Xt1>.  */
    810   AARCH64_OPND_LSE128_Rt2,	/* LSE128 <Xt2>.  */
    811   AARCH64_OPND_SVE_ADDR_RI_S4x16,   /* SVE [<Xn|SP>, #<simm4>*16].  */
    812   AARCH64_OPND_SVE_ADDR_RI_S4x32,   /* SVE [<Xn|SP>, #<simm4>*32].  */
    813   AARCH64_OPND_SVE_ADDR_RI_S4xVL,   /* SVE [<Xn|SP>, #<simm4>, MUL VL].  */
    814   AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL].  */
    815   AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL].  */
    816   AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL].  */
    817   AARCH64_OPND_SVE_ADDR_RI_S6xVL,   /* SVE [<Xn|SP>, #<simm6>, MUL VL].  */
    818   AARCH64_OPND_SVE_ADDR_RI_S9xVL,   /* SVE [<Xn|SP>, #<simm9>, MUL VL].  */
    819   AARCH64_OPND_SVE_ADDR_RI_U6,	    /* SVE [<Xn|SP>, #<uimm6>].  */
    820   AARCH64_OPND_SVE_ADDR_RI_U6x2,    /* SVE [<Xn|SP>, #<uimm6>*2].  */
    821   AARCH64_OPND_SVE_ADDR_RI_U6x4,    /* SVE [<Xn|SP>, #<uimm6>*4].  */
    822   AARCH64_OPND_SVE_ADDR_RI_U6x8,    /* SVE [<Xn|SP>, #<uimm6>*8].  */
    823   AARCH64_OPND_SVE_ADDR_RR,	    /* SVE [<Xn|SP>{, <Xm|XZR>}].  */
    824   AARCH64_OPND_SVE_ADDR_RR_LSL1,    /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #1}].  */
    825   AARCH64_OPND_SVE_ADDR_RR_LSL2,    /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #2}].  */
    826   AARCH64_OPND_SVE_ADDR_RR_LSL3,    /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #3}].  */
    827   AARCH64_OPND_SVE_ADDR_RR_LSL4,    /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #4}].  */
    828   AARCH64_OPND_SVE_ADDR_RM,	    /* SVE [<Xn|SP>, <Xm|XZR>].  */
    829   AARCH64_OPND_SVE_ADDR_RM_LSL1,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1].  */
    830   AARCH64_OPND_SVE_ADDR_RM_LSL2,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2].  */
    831   AARCH64_OPND_SVE_ADDR_RM_LSL3,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3].  */
    832   AARCH64_OPND_SVE_ADDR_RM_LSL4,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4].  */
    833   AARCH64_OPND_SVE_ADDR_RX,	    /* SVE [<Xn|SP>, <Xm>].  */
    834   AARCH64_OPND_SVE_ADDR_RX_LSL1,    /* SVE [<Xn|SP>, <Xm>, LSL #1].  */
    835   AARCH64_OPND_SVE_ADDR_RX_LSL2,    /* SVE [<Xn|SP>, <Xm>, LSL #2].  */
    836   AARCH64_OPND_SVE_ADDR_RX_LSL3,    /* SVE [<Xn|SP>, <Xm>, LSL #3].  */
    837   AARCH64_OPND_SVE_ADDR_RX_LSL4,    /* SVE [<Xn|SP>, <Xm>, LSL #4].  */
    838   AARCH64_OPND_SVE_ADDR_ZX,	    /* SVE [Zn.<T>{, <Xm>}].  */
    839   AARCH64_OPND_SVE_ADDR_RZ,	    /* SVE [<Xn|SP>, Zm.D].  */
    840   AARCH64_OPND_SVE_ADDR_RZ_LSL1,    /* SVE [<Xn|SP>, Zm.D, LSL #1].  */
    841   AARCH64_OPND_SVE_ADDR_RZ_LSL2,    /* SVE [<Xn|SP>, Zm.D, LSL #2].  */
    842   AARCH64_OPND_SVE_ADDR_RZ_LSL3,    /* SVE [<Xn|SP>, Zm.D, LSL #3].  */
    843   AARCH64_OPND_SVE_ADDR_RZ_XTW_14,  /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
    844 				       Bit 14 controls S/U choice.  */
    845   AARCH64_OPND_SVE_ADDR_RZ_XTW_22,  /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
    846 				       Bit 22 controls S/U choice.  */
    847   AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
    848 				       Bit 14 controls S/U choice.  */
    849   AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
    850 				       Bit 22 controls S/U choice.  */
    851   AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
    852 				       Bit 14 controls S/U choice.  */
    853   AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
    854 				       Bit 22 controls S/U choice.  */
    855   AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
    856 				       Bit 14 controls S/U choice.  */
    857   AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
    858 				       Bit 22 controls S/U choice.  */
    859   AARCH64_OPND_SVE_ADDR_ZI_U5,	    /* SVE [Zn.<T>, #<uimm5>].  */
    860   AARCH64_OPND_SVE_ADDR_ZI_U5x2,    /* SVE [Zn.<T>, #<uimm5>*2].  */
    861   AARCH64_OPND_SVE_ADDR_ZI_U5x4,    /* SVE [Zn.<T>, #<uimm5>*4].  */
    862   AARCH64_OPND_SVE_ADDR_ZI_U5x8,    /* SVE [Zn.<T>, #<uimm5>*8].  */
    863   AARCH64_OPND_SVE_ADDR_ZZ_LSL,     /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>].  */
    864   AARCH64_OPND_SVE_ADDR_ZZ_SXTW,    /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>].  */
    865   AARCH64_OPND_SVE_ADDR_ZZ_UXTW,    /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>].  */
    866   AARCH64_OPND_SVE_AIMM,	/* SVE unsigned arithmetic immediate.  */
    867   AARCH64_OPND_SVE_ASIMM,	/* SVE signed arithmetic immediate.  */
    868   AARCH64_OPND_SVE_FPIMM8,	/* SVE 8-bit floating-point immediate.  */
    869   AARCH64_OPND_SVE_I1_HALF_ONE,	/* SVE choice between 0.5 and 1.0.  */
    870   AARCH64_OPND_SVE_I1_HALF_TWO,	/* SVE choice between 0.5 and 2.0.  */
    871   AARCH64_OPND_SVE_I1_ZERO_ONE,	/* SVE choice between 0.0 and 1.0.  */
    872   AARCH64_OPND_SVE_IMM_ROT1,	/* SVE 1-bit rotate operand (90 or 270).  */
    873   AARCH64_OPND_SVE_IMM_ROT2,	/* SVE 2-bit rotate operand (N*90).  */
    874   AARCH64_OPND_SVE_IMM_ROT3,	/* SVE cadd 1-bit rotate (90 or 270).  */
    875   AARCH64_OPND_SVE_INV_LIMM,	/* SVE inverted logical immediate.  */
    876   AARCH64_OPND_SVE_LIMM,	/* SVE logical immediate.  */
    877   AARCH64_OPND_SVE_LIMM_MOV,	/* SVE logical immediate for MOV.  */
    878   AARCH64_OPND_SVE_PATTERN,	/* SVE vector pattern enumeration.  */
    879   AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor.  */
    880   AARCH64_OPND_SVE_PRFOP,	/* SVE prefetch operation.  */
    881   AARCH64_OPND_SVE_Pd,		/* SVE p0-p15 in Pd.  */
    882   AARCH64_OPND_SVE_PNd,		/* SVE pn0-pn15 in Pd.  */
    883   AARCH64_OPND_SVE_Pg3,		/* SVE p0-p7 in Pg.  */
    884   AARCH64_OPND_SVE_Pg4_5,	/* SVE p0-p15 in Pg, bits [8,5].  */
    885   AARCH64_OPND_SVE_Pg4_10,	/* SVE p0-p15 in Pg, bits [13,10].  */
    886   AARCH64_OPND_SVE_PNg4_10,	/* SVE pn0-pn15 in Pg, bits [13,10].  */
    887   AARCH64_OPND_SVE_Pg4_16,	/* SVE p0-p15 in Pg, bits [19,16].  */
    888   AARCH64_OPND_SVE_Pm,		/* SVE p0-p15 in Pm.  */
    889   AARCH64_OPND_SVE_Pn,		/* SVE p0-p15 in Pn.  */
    890   AARCH64_OPND_SVE_PNn,		/* SVE pn0-pn15 in Pn.  */
    891   AARCH64_OPND_SVE_Pt,		/* SVE p0-p15 in Pt.  */
    892   AARCH64_OPND_SVE_PNt,		/* SVE pn0-pn15 in Pt.  */
    893   AARCH64_OPND_SVE_Rm,		/* Integer Rm or ZR, alt. SVE position.  */
    894   AARCH64_OPND_SVE_Rn_SP,	/* Integer Rn or SP, alt. SVE position.  */
    895   AARCH64_OPND_SVE_SHLIMM_PRED,	  /* SVE shift left amount (predicated).  */
    896   AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated).  */
    897   AARCH64_OPND_SVE_SHLIMM_UNPRED_22,	/* SVE 3 bit shift left unpred.  */
    898   AARCH64_OPND_SVE_SHRIMM_PRED,	  /* SVE shift right amount (predicated).  */
    899   AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated).  */
    900   AARCH64_OPND_SVE_SHRIMM_UNPRED_22,	/* SVE 3 bit shift right unpred.  */
    901   AARCH64_OPND_SVE_SIMM5,	/* SVE signed 5-bit immediate.  */
    902   AARCH64_OPND_SVE_SIMM5B,	/* SVE secondary signed 5-bit immediate.  */
    903   AARCH64_OPND_SVE_SIMM6,	/* SVE signed 6-bit immediate.  */
    904   AARCH64_OPND_SVE_SIMM8,	/* SVE signed 8-bit immediate.  */
    905   AARCH64_OPND_SVE_UIMM3,	/* SVE unsigned 3-bit immediate.  */
    906   AARCH64_OPND_SVE_UIMM7,	/* SVE unsigned 7-bit immediate.  */
    907   AARCH64_OPND_SVE_UIMM8,	/* SVE unsigned 8-bit immediate.  */
    908   AARCH64_OPND_SVE_UIMM8_53,	/* SVE split unsigned 8-bit immediate.  */
    909   AARCH64_OPND_SVE_UIMM4,	/* SVE unsigned 4-bit immediate.  */
    910   AARCH64_OPND_SVE_VZn,		/* Scalar SIMD&FP register in Zn field.  */
    911   AARCH64_OPND_SVE_Vd,		/* Scalar SIMD&FP register in Vd.  */
    912   AARCH64_OPND_SVE_Vm,		/* Scalar SIMD&FP register in Vm.  */
    913   AARCH64_OPND_SVE_Vn,		/* Scalar SIMD&FP register in Vn.  */
    914   AARCH64_OPND_SME_ZA_array_vrsb_1, /* Tile to vector, two registers (B).  */
    915   AARCH64_OPND_SME_ZA_array_vrsh_1, /* Tile to vector, two registers (H).  */
    916   AARCH64_OPND_SME_ZA_array_vrss_1, /* Tile to vector, two registers (S).  */
    917   AARCH64_OPND_SME_ZA_array_vrsd_1, /* Tile to vector, two registers (D).  */
    918   AARCH64_OPND_SME_ZA_array_vrsb_2, /* Tile to vector, four registers (B).  */
    919   AARCH64_OPND_SME_ZA_array_vrsh_2, /* Tile to vector, four registers (H).  */
    920   AARCH64_OPND_SME_ZA_array_vrss_2, /* Tile to vector, four registers (S). */
    921   AARCH64_OPND_SME_ZA_array_vrsd_2, /* Tile to vector, four registers (D).  */
    922   AARCH64_OPND_SME_ZA_ARRAY4, /* Tile to vector, single (BHSDQ).  */
    923   AARCH64_OPND_SVE_Za_5,	/* SVE vector register in Za, bits [9,5].  */
    924   AARCH64_OPND_SVE_Za_16,	/* SVE vector register in Za, bits [20,16].  */
    925   AARCH64_OPND_SVE_Zd,		/* SVE vector register in Zd.  */
    926   AARCH64_OPND_SVE_Zm_5,	/* SVE vector register in Zm, bits [9,5].  */
    927   AARCH64_OPND_SVE_Zm_16,	/* SVE vector register in Zm, bits [20,16].  */
    928   AARCH64_OPND_SVE_Zm1_23_INDEX, /* SVE bit index in Zm, bit 23.  */
    929   AARCH64_OPND_SVE_Zm2_22_INDEX, /* SVE bit index in Zm, bits [23,22].  */
    930   AARCH64_OPND_SVE_Zm3_INDEX,	/* z0-z7[0-3] in Zm, bits [20,16].  */
    931   AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11.  */
    932   AARCH64_OPND_SVE_Zm3_12_INDEX, /* SVE bit index in Zm, bits 12 plus bit [23,22].  */
    933   AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19.  */
    934   AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22.  */
    935   AARCH64_OPND_SVE_Zm3_10_INDEX, /* z0-z7[0-15] in Zm3_INDEX plus bit 11:10.  */
    936   AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11.  */
    937   AARCH64_OPND_SVE_Zm4_INDEX,	/* z0-z15[0-1] in Zm, bits [20,16].  */
    938   AARCH64_OPND_SVE_Zn,		/* SVE vector register in Zn.  */
    939   AARCH64_OPND_SVE_Zn_INDEX,	/* Indexed SVE vector register, for DUP.  */
    940   AARCH64_OPND_SVE_Zn_5_INDEX,	/* Indexed SVE vector register, for DUPQ.  */
    941   AARCH64_OPND_SVE_ZnxN,	/* SVE vector register list in Zn.  */
    942   AARCH64_OPND_SVE_Zt,		/* SVE vector register in Zt.  */
    943   AARCH64_OPND_SVE_ZtxN,	/* SVE vector register list in Zt.  */
    944   AARCH64_OPND_SME_Zdnx2,	/* SVE vector register list from [4:1]*2.  */
    945   AARCH64_OPND_SME_Zdnx4,	/* SVE vector register list from [4:2]*4.  */
    946   AARCH64_OPND_SME_Zm,		/* SVE vector register list in 4-bit Zm.  */
    947   AARCH64_OPND_SME_Zm_17,	/* SVE vector register list in [20:17].  */
    948   AARCH64_OPND_SME_Zmx2,	/* SVE vector register list from [20:17]*2.  */
    949   AARCH64_OPND_SME_Zmx4,	/* SVE vector register list from [20:18]*4.  */
    950   AARCH64_OPND_SME_Znx2,	/* SVE vector register list from [9:6]*2.  */
    951   AARCH64_OPND_SME_Znx2_BIT_INDEX, /* SVE vector register list encoding a bit index from [9:6]*2.  */
    952   AARCH64_OPND_SME_Znx4,	/* SVE vector register list from [9:7]*4.  */
    953   AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23.  */
    954   AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19.  */
    955   AARCH64_OPND_SME_ZAda_1b,	/* SME <ZAda>.H, 1-bits.  */
    956   AARCH64_OPND_SME_ZAda_2b,	/* SME <ZAda>.S, 2-bits.  */
    957   AARCH64_OPND_SME_ZAda_3b,	/* SME <ZAda>.D, 3-bits.  */
    958   AARCH64_OPND_SME_ZA_HV_idx_src,	/* SME source ZA tile vector.  */
    959   AARCH64_OPND_SME_ZA_HV_idx_srcxN,	/* SME N source ZA tile vectors.  */
    960   AARCH64_OPND_SME_ZA_HV_idx_dest,	/* SME destination ZA tile vector.  */
    961   AARCH64_OPND_SME_ZA_HV_idx_destxN,	/* SME N dest ZA tile vectors.  */
    962   AARCH64_OPND_SME_Pdx2,	/* Predicate register list in [3:1].  */
    963   AARCH64_OPND_SME_PdxN,	/* Predicate register list in [3:0].  */
    964   AARCH64_OPND_SME_Pm,		/* SME scalable predicate register, bits [15:13].  */
    965   AARCH64_OPND_SME_PNd3,	/* Predicate-as-counter register, bits [3:0].  */
    966   AARCH64_OPND_SME_PNg3,	/* Predicate-as-counter register, bits [12:10].  */
    967   AARCH64_OPND_SME_PNn,		/* Predicate-as-counter register, bits [8:5].  */
    968   AARCH64_OPND_SME_PNn3_INDEX1,	/* Indexed pred-as-counter reg, bits [8:5].  */
    969   AARCH64_OPND_SME_PNn3_INDEX2,	/* Indexed pred-as-counter reg, bits [9:5].  */
    970   AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles.  */
    971   AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector.  */
    972   AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3].  */
    973   AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1].  */
    974   AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3].  */
    975   AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}].  */
    976   AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}].  */
    977   AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1].  */
    978   AARCH64_OPND_SME_ZA_array_off4,   /* SME ZA[<Wv>{, #<imm>}].  */
    979   AARCH64_OPND_SME_ADDR_RI_U4xVL,   /* SME [<Xn|SP>{, #<imm>, MUL VL}].  */
    980   AARCH64_OPND_SME_SM_ZA,           /* SME {SM | ZA}.  */
    981   AARCH64_OPND_SME_PnT_Wm_imm,      /* SME <Pn>.<T>[<Wm>, #<imm>].  */
    982   AARCH64_OPND_SME_SHRIMM4,	    /* 4-bit right shift, bits [19:16].  */
    983   AARCH64_OPND_SME_SHRIMM5,	    /* size + 5-bit right shift, bits [23:22,20:16].  */
    984   AARCH64_OPND_SME_Zm_INDEX1,	    /* Zn.T[index], bits [19:16,10].  */
    985   AARCH64_OPND_SME_Zm_INDEX2,	    /* Zn.T[index], bits [19:16,11:10].  */
    986   AARCH64_OPND_SME_Zm_INDEX2_3,	    /* Zn.T[index], bits [19:16,10,3].  */
    987   AARCH64_OPND_SME_Zm_INDEX3_1,     /* Zn.T[index], bits [19:16,10,2:1].  */
    988   AARCH64_OPND_SME_Zm_INDEX3_2,     /* Zn.T[index], bits [19:16,11:10,2].  */
    989   AARCH64_OPND_SME_Zm_INDEX3_3,     /* Zn.T[index], bits [19:16,11:10,3].  */
    990   AARCH64_OPND_SME_Zm_INDEX3_10,    /* Zn.T[index], bits [19:16,15,11:10].  */
    991   AARCH64_OPND_SME_Zm_INDEX4_1,     /* Zn.T[index], bits [19:16,11:10,2:1].  */
    992   AARCH64_OPND_SME_Zm_INDEX4_2,     /* Zn.T[index], bits [19:16,11:10,3:2].  */
    993   AARCH64_OPND_SME_Zm_INDEX4_3,     /* Zn.T[index], bits [19:16,15,11,10,3].  */
    994   AARCH64_OPND_SME_Zm_INDEX4_10,    /* Zn.T[index], bits [19:16,15,12:10].  */
    995   AARCH64_OPND_SME_Zn_INDEX1_16,    /* Zn[index], bits [9:5] and [16:16].  */
    996   AARCH64_OPND_SME_Zn_INDEX2_15,    /* Zn[index], bits [9:5] and [16:15].  */
    997   AARCH64_OPND_SME_Zn_INDEX2_16,    /* Zn[index], bits [9:5] and [17:16].  */
    998   AARCH64_OPND_SME_Zn_INDEX2_19,    /* Zn[index], bits [9:5] and [20:19].  */
    999   AARCH64_OPND_SME_Zn_INDEX3_14,    /* Zn[index], bits [9:5] and [16:14].  */
   1000   AARCH64_OPND_SME_Zn_INDEX3_15,    /* Zn[index], bits [9:5] and [17:15].  */
   1001   AARCH64_OPND_SME_Zn_INDEX4_14,    /* Zn[index], bits [9:5] and [17:14].  */
   1002   AARCH64_OPND_SVE_Zn0_INDEX,	    /* Zn[index], bits [9:5].  */
   1003   AARCH64_OPND_SVE_Zn1_17_INDEX,    /* Zn[index], bits [9:5,17].  */
   1004   AARCH64_OPND_SVE_Zn2_18_INDEX,    /* Zn[index], bits [9:5,18:17].  */
   1005   AARCH64_OPND_SVE_Zn3_22_INDEX,    /* Zn[index], bits [9:5,18:17,22].  */
   1006   AARCH64_OPND_SVE_Zd0_INDEX,	    /* Zn[index], bits [4:0].  */
   1007   AARCH64_OPND_SVE_Zd1_17_INDEX,    /* Zn[index], bits [4:0,17].  */
   1008   AARCH64_OPND_SVE_Zd2_18_INDEX,    /* Zn[index], bits [4:0,18:17].  */
   1009   AARCH64_OPND_SVE_Zd3_22_INDEX,    /* Zn[index], bits [4:0,18:17,22].  */
   1010   AARCH64_OPND_SME_VLxN_10,	/* VLx2 or VLx4, in bit 10.  */
   1011   AARCH64_OPND_SME_VLxN_13,	/* VLx2 or VLx4, in bit 13.  */
   1012   AARCH64_OPND_SME_ZT0,		/* The fixed token zt0/ZT0 (not encoded).  */
   1013   AARCH64_OPND_SME_ZT0_INDEX,	/* ZT0[<imm>], bits [14:12].  */
   1014   AARCH64_OPND_SME_ZT0_INDEX_MUL_VL,/* ZT0[<imm>], bits [13:12].  */
   1015   AARCH64_OPND_SME_ZT0_LIST,	/* { zt0/ZT0 } (not encoded).  */
   1016   AARCH64_OPND_TME_UIMM16,	/* TME unsigned 16-bit immediate.  */
   1017   AARCH64_OPND_SM3_IMM2,	/* SM3 encodes lane in bits [13, 14].  */
   1018   AARCH64_OPND_MOPS_ADDR_Rd,	/* [Rd]!, in bits [0, 4].  */
   1019   AARCH64_OPND_MOPS_ADDR_Rs,	/* [Rs]!, in bits [16, 20].  */
   1020   AARCH64_OPND_MOPS_WB_Rn,	/* Rn!, in bits [5, 9].  */
   1021   AARCH64_OPND_CSSC_SIMM8,	/* CSSC signed 8-bit immediate.  */
   1022   AARCH64_OPND_CSSC_UIMM8,	/* CSSC unsigned 8-bit immediate.  */
   1023   AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND,   /* [<Xn|SP>]{, #<imm>}.  */
   1024   AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB, /* [<Xn|SP>] or [<Xn|SP>, #<imm>]!.  */
   1025   AARCH64_OPND_RCPC3_ADDR_POSTIND,	 /* [<Xn|SP>], #<imm>.  */
   1026   AARCH64_OPND_RCPC3_ADDR_PREIND_WB, 	 /* [<Xn|SP>, #<imm>]!.  */
   1027   AARCH64_OPND_RCPC3_ADDR_OFFSET,
   1028 };
   1029 
   1030 /* Qualifier constrains an operand.  It either specifies a variant of an
   1031    operand type or limits values available to an operand type.
   1032 
   1033    N.B. Order is important.
   1034    Keep aarch64_opnd_qualifiers (opcodes/aarch64-opc.c) synced.  */
   1035 
   1036 enum aarch64_opnd_qualifier
   1037 {
   1038   /* Indicating no further qualification on an operand.  */
   1039   AARCH64_OPND_QLF_NIL,
   1040 
   1041   /* Qualifying an operand which is a general purpose (integer) register;
   1042      indicating the operand data size or a specific register.  */
   1043   AARCH64_OPND_QLF_W,	/* Wn, WZR or WSP.  */
   1044   AARCH64_OPND_QLF_X,	/* Xn, XZR or XSP.  */
   1045   AARCH64_OPND_QLF_WSP,	/* WSP.  */
   1046   AARCH64_OPND_QLF_SP,	/* SP.  */
   1047 
   1048   /* Qualifying an operand which is a floating-point register, a SIMD
   1049      vector element or a SIMD vector element list; indicating operand data
   1050      size or the size of each SIMD vector element in the case of a SIMD
   1051      vector element list.
   1052      These qualifiers are also used to qualify an address operand to
   1053      indicate the size of data element a load/store instruction is
   1054      accessing.
   1055      They are also used for the immediate shift operand in e.g. SSHR.  Such
   1056      a use is only for the ease of operand encoding/decoding and qualifier
   1057      sequence matching; such a use should not be applied widely; use the value
   1058      constraint qualifiers for immediate operands wherever possible.  */
   1059   AARCH64_OPND_QLF_S_B,
   1060   AARCH64_OPND_QLF_S_H,
   1061   AARCH64_OPND_QLF_S_S,
   1062   AARCH64_OPND_QLF_S_D,
   1063   AARCH64_OPND_QLF_S_Q,
   1064   /* These type qualifiers have a special meaning in that they mean 2 x 1 byte,
   1065      4 x 1 byte or 2 x 2 byte are selected by the instruction.  Other than that
   1066      they have no difference with AARCH64_OPND_QLF_S_B in encoding.  They are
   1067      here purely for syntactical reasons and is an exception from normal
   1068      AArch64 disassembly scheme.  */
   1069   AARCH64_OPND_QLF_S_2B,
   1070   AARCH64_OPND_QLF_S_4B,
   1071   AARCH64_OPND_QLF_S_2H,
   1072 
   1073   /* Qualifying an operand which is a SIMD vector register or a SIMD vector
   1074      register list; indicating register shape.
   1075      They are also used for the immediate shift operand in e.g. SSHR.  Such
   1076      a use is only for the ease of operand encoding/decoding and qualifier
   1077      sequence matching; such a use should not be applied widely; use the value
   1078      constraint qualifiers for immediate operands wherever possible.  */
   1079   AARCH64_OPND_QLF_V_4B,
   1080   AARCH64_OPND_QLF_V_8B,
   1081   AARCH64_OPND_QLF_V_16B,
   1082   AARCH64_OPND_QLF_V_2H,
   1083   AARCH64_OPND_QLF_V_4H,
   1084   AARCH64_OPND_QLF_V_8H,
   1085   AARCH64_OPND_QLF_V_2S,
   1086   AARCH64_OPND_QLF_V_4S,
   1087   AARCH64_OPND_QLF_V_1D,
   1088   AARCH64_OPND_QLF_V_2D,
   1089   AARCH64_OPND_QLF_V_1Q,
   1090 
   1091   AARCH64_OPND_QLF_P_Z,
   1092   AARCH64_OPND_QLF_P_M,
   1093 
   1094   /* Used in scaled signed immediate that are scaled by a Tag granule
   1095      like in stg, st2g, etc.   */
   1096   AARCH64_OPND_QLF_imm_tag,
   1097 
   1098   /* Constraint on value.  */
   1099   AARCH64_OPND_QLF_CR,		/* CRn, CRm. */
   1100   AARCH64_OPND_QLF_imm_0_7,
   1101   AARCH64_OPND_QLF_imm_0_15,
   1102   AARCH64_OPND_QLF_imm_0_31,
   1103   AARCH64_OPND_QLF_imm_0_63,
   1104   AARCH64_OPND_QLF_imm_1_32,
   1105   AARCH64_OPND_QLF_imm_1_64,
   1106 
   1107   /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
   1108      or shift-ones.  */
   1109   AARCH64_OPND_QLF_LSL,
   1110   AARCH64_OPND_QLF_MSL,
   1111 
   1112   /* Special qualifier helping retrieve qualifier information during the
   1113      decoding time (currently not in use).  */
   1114   AARCH64_OPND_QLF_RETRIEVE,
   1115 
   1116   /* Special qualifier used for indicating error in qualifier retrieval.  */
   1117   AARCH64_OPND_QLF_ERR,
   1118 } ATTRIBUTE_PACKED;
   1119 
   1120 /* Instruction class.  */
   1122 
   1123 enum aarch64_insn_class
   1124 {
   1125   aarch64_misc,
   1126   addsub_carry,
   1127   addsub_ext,
   1128   addsub_imm,
   1129   addsub_shift,
   1130   asimdall,
   1131   asimddiff,
   1132   asimdelem,
   1133   asimdext,
   1134   asimdimm,
   1135   asimdins,
   1136   asimdmisc,
   1137   asimdperm,
   1138   asimdsame,
   1139   asimdshf,
   1140   asimdtbl,
   1141   asisddiff,
   1142   asisdelem,
   1143   asisdlse,
   1144   asisdlsep,
   1145   asisdlso,
   1146   asisdlsop,
   1147   asisdmisc,
   1148   asisdone,
   1149   asisdpair,
   1150   asisdsame,
   1151   asisdshf,
   1152   bitfield,
   1153   branch_imm,
   1154   branch_reg,
   1155   compbranch,
   1156   condbranch,
   1157   condcmp_imm,
   1158   condcmp_reg,
   1159   condsel,
   1160   cryptoaes,
   1161   cryptosha2,
   1162   cryptosha3,
   1163   dp_1src,
   1164   dp_2src,
   1165   dp_3src,
   1166   exception,
   1167   extract,
   1168   float2fix,
   1169   float2int,
   1170   floatccmp,
   1171   floatcmp,
   1172   floatdp1,
   1173   floatdp2,
   1174   floatdp3,
   1175   floatimm,
   1176   floatsel,
   1177   fprcvtfloat2int,
   1178   fprcvtint2float,
   1179   ldst_immpost,
   1180   ldst_immpre,
   1181   ldst_imm9,	/* immpost or immpre */
   1182   ldst_imm10,	/* LDRAA/LDRAB */
   1183   ldst_pos,
   1184   ldst_regoff,
   1185   ldst_unpriv,
   1186   ldst_unscaled,
   1187   ldstexcl,
   1188   ldstnapair_offs,
   1189   ldstpair_off,
   1190   ldstpair_indexed,
   1191   loadlit,
   1192   log_imm,
   1193   log_shift,
   1194   lse_atomic,
   1195   lse128_atomic,
   1196   movewide,
   1197   pcreladdr,
   1198   ic_system,
   1199   sme_fp_sd,
   1200   sme_int_sd,
   1201   sme_misc,
   1202   sme_mov,
   1203   sme_ldr,
   1204   sme_psel,
   1205   sme_shift,
   1206   sme_size_12_bh,
   1207   sme_size_12_bhs,
   1208   sme_size_12_hs,
   1209   sme_size_12_b,
   1210   sme_size_22,
   1211   sme_size_22_hsd,
   1212   sme_sz_23,
   1213   sme_str,
   1214   sme_start,
   1215   sme_stop,
   1216   sme2_mov,
   1217   sme2_movaz,
   1218   sve_cpy,
   1219   sve_index,
   1220   sve_limm,
   1221   sve_misc,
   1222   sve_movprfx,
   1223   sve_pred_zm,
   1224   sve_shift_pred,
   1225   sve_shift_unpred,
   1226   sve_size_bh,
   1227   sve_size_bhs,
   1228   sve_size_bhsd,
   1229   sve_size_hsd,
   1230   sve_size_hsd2,
   1231   sve_size_hsd3,
   1232   sve_size_sd,
   1233   sve_size_sd2,
   1234   sve_size_sd3,
   1235   sve_size_sd4,
   1236   sve_size_13,
   1237   sve_shift_tsz_hsd,
   1238   sve_shift_tsz_bhsd,
   1239   sve_size_tsz_bhs,
   1240   testbranch,
   1241   cryptosm3,
   1242   cryptosm4,
   1243   dotproduct,
   1244   bfloat16,
   1245   cssc,
   1246   gcs,
   1247   the,
   1248   sve2_urqvs,
   1249   sve_index1,
   1250   rcpc3,
   1251   lut,
   1252   last_iclass = lut
   1253 };
   1254 
   1255 /* Opcode enumerators.  */
   1256 
   1257 enum aarch64_op
   1258 {
   1259   OP_NIL,
   1260   OP_STRB_POS,
   1261   OP_LDRB_POS,
   1262   OP_LDRSB_POS,
   1263   OP_STRH_POS,
   1264   OP_LDRH_POS,
   1265   OP_LDRSH_POS,
   1266   OP_STR_POS,
   1267   OP_LDR_POS,
   1268   OP_STRF_POS,
   1269   OP_LDRF_POS,
   1270   OP_LDRSW_POS,
   1271   OP_PRFM_POS,
   1272 
   1273   OP_STURB,
   1274   OP_LDURB,
   1275   OP_LDURSB,
   1276   OP_STURH,
   1277   OP_LDURH,
   1278   OP_LDURSH,
   1279   OP_STUR,
   1280   OP_LDUR,
   1281   OP_STURV,
   1282   OP_LDURV,
   1283   OP_LDURSW,
   1284   OP_PRFUM,
   1285 
   1286   OP_LDR_LIT,
   1287   OP_LDRV_LIT,
   1288   OP_LDRSW_LIT,
   1289   OP_PRFM_LIT,
   1290 
   1291   OP_ADD,
   1292   OP_B,
   1293   OP_BL,
   1294 
   1295   OP_MOVN,
   1296   OP_MOVZ,
   1297   OP_MOVK,
   1298 
   1299   OP_MOV_IMM_LOG,	/* MOV alias for moving bitmask immediate.  */
   1300   OP_MOV_IMM_WIDE,	/* MOV alias for moving wide immediate.  */
   1301   OP_MOV_IMM_WIDEN,	/* MOV alias for moving wide immediate (negated).  */
   1302 
   1303   OP_MOV_V,		/* MOV alias for moving vector register.  */
   1304 
   1305   OP_ASR_IMM,
   1306   OP_LSR_IMM,
   1307   OP_LSL_IMM,
   1308 
   1309   OP_BIC,
   1310 
   1311   OP_UBFX,
   1312   OP_BFXIL,
   1313   OP_SBFX,
   1314   OP_SBFIZ,
   1315   OP_BFI,
   1316   OP_BFC,		/* ARMv8.2.  */
   1317   OP_UBFIZ,
   1318   OP_UXTB,
   1319   OP_UXTH,
   1320   OP_UXTW,
   1321 
   1322   OP_CINC,
   1323   OP_CINV,
   1324   OP_CNEG,
   1325   OP_CSET,
   1326   OP_CSETM,
   1327 
   1328   OP_FCVT,
   1329   OP_FCVTN,
   1330   OP_FCVTN2,
   1331   OP_FCVTL,
   1332   OP_FCVTL2,
   1333   OP_FCVTXN_S,		/* Scalar version.  */
   1334 
   1335   OP_ROR_IMM,
   1336 
   1337   OP_SXTL,
   1338   OP_SXTL2,
   1339   OP_UXTL,
   1340   OP_UXTL2,
   1341 
   1342   OP_MOV_P_P,
   1343   OP_MOV_PN_PN,
   1344   OP_MOV_Z_P_Z,
   1345   OP_MOV_Z_V,
   1346   OP_MOV_Z_Z,
   1347   OP_MOV_Z_Zi,
   1348   OP_MOVM_P_P_P,
   1349   OP_MOVS_P_P,
   1350   OP_MOVZS_P_P_P,
   1351   OP_MOVZ_P_P_P,
   1352   OP_NOTS_P_P_P_Z,
   1353   OP_NOT_P_P_P_Z,
   1354 
   1355   OP_FCMLA_ELEM,	/* ARMv8.3, indexed element version.  */
   1356 
   1357   OP_TOTAL_NUM,		/* Pseudo.  */
   1358 };
   1359 
   1360 /* Error types.  */
   1361 enum err_type
   1362 {
   1363   ERR_OK,
   1364   ERR_UND,
   1365   ERR_UNP,
   1366   ERR_NYI,
   1367   ERR_VFI,
   1368   ERR_NR_ENTRIES
   1369 };
   1370 
   1371 /* Maximum number of operands an instruction can have.  */
   1372 #define AARCH64_MAX_OPND_NUM 7
   1373 /* Maximum number of qualifier sequences an instruction can have.  */
   1374 #define AARCH64_MAX_QLF_SEQ_NUM 10
   1375 /* Operand qualifier typedef  */
   1376 typedef enum aarch64_opnd_qualifier aarch64_opnd_qualifier_t;
   1377 /* Operand qualifier sequence typedef.  */
   1378 typedef aarch64_opnd_qualifier_t	\
   1379 	  aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
   1380 
   1381 /* FIXME: improve the efficiency.  */
   1382 static inline bool
   1383 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
   1384 {
   1385   int i;
   1386   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   1387     if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
   1388       return false;
   1389   return true;
   1390 }
   1391 
   1392 /*  Forward declare error reporting type.  */
   1393 typedef struct aarch64_operand_error aarch64_operand_error;
   1394 /* Forward declare instruction sequence type.  */
   1395 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
   1396 /* Forward declare instruction definition.  */
   1397 typedef struct aarch64_inst aarch64_inst;
   1398 
   1399 /* This structure holds information for a particular opcode.  */
   1400 
   1401 struct aarch64_opcode
   1402 {
   1403   /* The name of the mnemonic.  */
   1404   const char *name;
   1405 
   1406   /* The opcode itself.  Those bits which will be filled in with
   1407      operands are zeroes.  */
   1408   aarch64_insn opcode;
   1409 
   1410   /* The opcode mask.  This is used by the disassembler.  This is a
   1411      mask containing ones indicating those bits which must match the
   1412      opcode field, and zeroes indicating those bits which need not
   1413      match (and are presumably filled in by operands).  */
   1414   aarch64_insn mask;
   1415 
   1416   /* Instruction class.  */
   1417   enum aarch64_insn_class iclass;
   1418 
   1419   /* Enumerator identifier.  */
   1420   enum aarch64_op op;
   1421 
   1422   /* Which architecture variant provides this instruction.  */
   1423   const aarch64_feature_set *avariant;
   1424 
   1425   /* An array of operand codes.  Each code is an index into the
   1426      operand table.  They appear in the order which the operands must
   1427      appear in assembly code, and are terminated by a zero.  */
   1428   enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
   1429 
   1430   /* A list of operand qualifier code sequence.  Each operand qualifier
   1431      code qualifies the corresponding operand code.  Each operand
   1432      qualifier sequence specifies a valid opcode variant and related
   1433      constraint on operands.  */
   1434   aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
   1435 
   1436   /* Flags providing information about this instruction */
   1437   uint64_t flags;
   1438 
   1439   /* Extra constraints on the instruction that the verifier checks.  */
   1440   uint32_t constraints;
   1441 
   1442   /* If nonzero, this operand and operand 0 are both registers and
   1443      are required to have the same register number.  */
   1444   unsigned char tied_operand;
   1445 
   1446   /* If non-NULL, a function to verify that a given instruction is valid.  */
   1447   enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
   1448 			      bfd_vma, bool, aarch64_operand_error *,
   1449 			      struct aarch64_instr_sequence *);
   1450 };
   1451 
   1452 typedef struct aarch64_opcode aarch64_opcode;
   1453 
   1454 /* Table describing all the AArch64 opcodes.  */
   1455 extern const aarch64_opcode aarch64_opcode_table[];
   1456 
   1457 /* Opcode flags.  */
   1458 #define F_ALIAS (1 << 0)
   1459 #define F_HAS_ALIAS (1 << 1)
   1460 /* Disassembly preference priority 1-3 (the larger the higher).  If nothing
   1461    is specified, it is the priority 0 by default, i.e. the lowest priority.  */
   1462 #define F_P1 (1 << 2)
   1463 #define F_P2 (2 << 2)
   1464 #define F_P3 (3 << 2)
   1465 /* Flag an instruction that is truly conditional executed, e.g. b.cond.  */
   1466 #define F_COND (1 << 4)
   1467 /* Instruction has the field of 'sf'.  */
   1468 #define F_SF (1 << 5)
   1469 /* Instruction has the field of 'size:Q'.  */
   1470 #define F_SIZEQ (1 << 6)
   1471 /* Floating-point instruction has the field of 'type'.  */
   1472 #define F_FPTYPE (1 << 7)
   1473 /* AdvSIMD scalar instruction has the field of 'size'.  */
   1474 #define F_SSIZE (1 << 8)
   1475 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q".  */
   1476 #define F_T (1 << 9)
   1477 /* Size of GPR operand in AdvSIMD instructions encoded in Q.  */
   1478 #define F_GPRSIZE_IN_Q (1 << 10)
   1479 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22.  */
   1480 #define F_LDS_SIZE (1 << 11)
   1481 /* Optional operand; assume maximum of 1 operand can be optional.  */
   1482 #define F_OPD0_OPT (1 << 12)
   1483 #define F_OPD1_OPT (2 << 12)
   1484 #define F_OPD2_OPT (3 << 12)
   1485 #define F_OPD3_OPT (4 << 12)
   1486 #define F_OPD4_OPT (5 << 12)
   1487 /* Default value for the optional operand when omitted from the assembly.  */
   1488 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
   1489 /* Instruction that is an alias of another instruction needs to be
   1490    encoded/decoded by converting it to/from the real form, followed by
   1491    the encoding/decoding according to the rules of the real opcode.
   1492    This compares to the direct coding using the alias's information.
   1493    N.B. this flag requires F_ALIAS to be used together.  */
   1494 #define F_CONV (1 << 20)
   1495 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
   1496    friendly pseudo instruction available only in the assembly code (thus will
   1497    not show up in the disassembly).  */
   1498 #define F_PSEUDO (1 << 21)
   1499 /* Instruction has miscellaneous encoding/decoding rules.  */
   1500 #define F_MISC (1 << 22)
   1501 /* Instruction has the field of 'N'; used in conjunction with F_SF.  */
   1502 #define F_N (1 << 23)
   1503 /* Opcode dependent field.  */
   1504 #define F_OD(X) (((X) & 0x7) << 24)
   1505 /* Instruction has the field of 'sz'.  */
   1506 #define F_LSE_SZ (1 << 27)
   1507 /* Require an exact qualifier match, even for NIL qualifiers.  */
   1508 #define F_STRICT (1ULL << 28)
   1509 /* This system instruction is used to read system registers.  */
   1510 #define F_SYS_READ (1ULL << 29)
   1511 /* This system instruction is used to write system registers.  */
   1512 #define F_SYS_WRITE (1ULL << 30)
   1513 /* This instruction has an extra constraint on it that imposes a requirement on
   1514    subsequent instructions.  */
   1515 #define F_SCAN (1ULL << 31)
   1516 /* Instruction takes a pair of optional operands.  If we specify the Nth operand
   1517    to be optional, then we also implicitly specify (N+1)th operand to also be
   1518    optional.  */
   1519 #define F_OPD_PAIR_OPT (1ULL << 32)
   1520 /* This instruction does not allow the full range of values that the
   1521    width of fields in the assembler instruction would theoretically
   1522    allow.  This impacts the constraints on assembly but yields no
   1523    impact on disassembly.  */
   1524 #define F_OPD_NARROW (1ULL << 33)
   1525 /* For the instruction with size[22:23] field.  */
   1526 #define F_OPD_SIZE (1ULL << 34)
   1527 /* RCPC3 instruction has the field of 'size'.  */
   1528 #define F_RCPC3_SIZE (1ULL << 35)
   1529 /* This instruction need VGx2 or VGx4 mandatorily in the operand passed to
   1530    assembler.  */
   1531 #define F_VG_REQ (1ULL << 36)
   1532 
   1533 /* 4-bit flag field to indicate subclass of instructions.
   1534    Note the overlap between the set of subclass flags in each logical category
   1535    (F_LDST_*, F_ARITH_*, F_BRANCH_* etc.);  The usage of flags as
   1536    iclass-specific enums is intentional.  */
   1537 #define F_SUBCLASS (15ULL << 37)
   1538 
   1539 #define F_LDST_LOAD (1ULL << 37)
   1540 #define F_LDST_STORE (2ULL << 37)
   1541 /* Subclasses to denote add, sub and mov insns.  */
   1542 #define F_ARITH_ADD (1ULL << 37)
   1543 #define F_ARITH_SUB (2ULL << 37)
   1544 #define F_ARITH_MOV (3ULL << 37)
   1545 /* Subclasses to denote call and ret insns.  */
   1546 #define F_BRANCH_CALL (1ULL << 37)
   1547 #define F_BRANCH_RET (2ULL << 37)
   1548 /* Subclass to denote that only tag update is involved.  */
   1549 #define F_DP_TAG_ONLY (1ULL << 37)
   1550 
   1551 #define F_SUBCLASS_OTHER (F_SUBCLASS)
   1552 
   1553 /* For LSFE instructions with size[30:31] field.  */
   1554 #define F_LSFE_SZ (1ULL << 41)
   1555 
   1556 /* When parsing immediate values, register names should not be misinterpreted
   1557    as symbols.  However, for backwards compatibility we need to permit some
   1558    newer register names within older instructions.  These flags specify which
   1559    register names are invalid immediate value, and are required for all
   1560    instructions with immediate operands (and are otherwise ignored).  */
   1561 #define F_INVALID_IMM_SYMS (3ULL << 42)
   1562 
   1563 /* Any GP or SIMD register except WSP/SP.  */
   1564 #define F_INVALID_IMM_SYMS_1 (1ULL << 42)
   1565 
   1566 /* As above, plus WSP/SP, and Z and P registers.  */
   1567 #define F_INVALID_IMM_SYMS_2 (2ULL << 42)
   1568 
   1569 /* As above, plus PN registers.  */
   1570 #define F_INVALID_IMM_SYMS_3 (3ULL << 42)
   1571 
   1572 /* Next bit is 44.  */
   1573 
   1574 /* Instruction constraints.  */
   1575 /* This instruction has a predication constraint on the instruction at PC+4.  */
   1576 #define C_SCAN_MOVPRFX (1U << 0)
   1577 /* This instruction's operation width is determined by the operand with the
   1578    largest element size.  */
   1579 #define C_MAX_ELEM (1U << 1)
   1580 #define C_SCAN_MOPS_P (1U << 2)
   1581 #define C_SCAN_MOPS_M (2U << 2)
   1582 #define C_SCAN_MOPS_E (3U << 2)
   1583 #define C_SCAN_MOPS_PME (3U << 2)
   1584 /* Next bit is 4.  */
   1585 
   1586 static inline bool
   1587 alias_opcode_p (const aarch64_opcode *opcode)
   1588 {
   1589   return (opcode->flags & F_ALIAS) != 0;
   1590 }
   1591 
   1592 static inline bool
   1593 opcode_has_alias (const aarch64_opcode *opcode)
   1594 {
   1595   return (opcode->flags & F_HAS_ALIAS) != 0;
   1596 }
   1597 
   1598 /* Priority for disassembling preference.  */
   1599 static inline int
   1600 opcode_priority (const aarch64_opcode *opcode)
   1601 {
   1602   return (opcode->flags >> 2) & 0x3;
   1603 }
   1604 
   1605 static inline bool
   1606 pseudo_opcode_p (const aarch64_opcode *opcode)
   1607 {
   1608   return (opcode->flags & F_PSEUDO) != 0lu;
   1609 }
   1610 
   1611 /* Whether the opcode has the specific subclass flag.
   1612    N.B. The overlap between F_LDST_*, F_ARITH_*, and F_BRANCH_* etc. subclass
   1613    flags means that the callers of this function have the responsibility of
   1614    checking for the flags appropriate for the specific iclass.  */
   1615 static inline bool
   1616 aarch64_opcode_subclass_p (const aarch64_opcode *opcode, uint64_t flag)
   1617 {
   1618   return ((opcode->flags & F_SUBCLASS) == flag);
   1619 }
   1620 
   1621 /* Deal with two possible scenarios: If F_OP_PAIR_OPT not set, as is the case
   1622    by default, F_OPDn_OPT must equal IDX + 1, else F_OPDn_OPT must be in range
   1623    [IDX, IDX + 1].  */
   1624 static inline bool
   1625 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
   1626 {
   1627   if (opcode->flags & F_OPD_PAIR_OPT)
   1628     return (((opcode->flags >> 12) & 0x7) == idx
   1629 	    || ((opcode->flags >> 12) & 0x7) == idx + 1);
   1630   return ((opcode->flags >> 12) & 0x7) == idx + 1;
   1631 }
   1632 
   1633 static inline aarch64_insn
   1634 get_optional_operand_default_value (const aarch64_opcode *opcode)
   1635 {
   1636   return (opcode->flags >> 15) & 0x1f;
   1637 }
   1638 
   1639 static inline unsigned int
   1640 get_opcode_dependent_value (const aarch64_opcode *opcode)
   1641 {
   1642   return (opcode->flags >> 24) & 0x7;
   1643 }
   1644 
   1645 static inline bool
   1646 get_opcode_dependent_vg_status (const aarch64_opcode *opcode)
   1647 {
   1648   return (opcode->flags >> 36) & 0x1;
   1649 }
   1650 
   1651 static inline bool
   1652 opcode_has_special_coder (const aarch64_opcode *opcode)
   1653 {
   1654   return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
   1655 	  | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND
   1656 	  | F_OPD_SIZE | F_RCPC3_SIZE | F_LSFE_SZ )) != 0;
   1657 }
   1658 
   1659 struct aarch64_name_value_pair
   1661 {
   1662   const char *  name;
   1663   aarch64_insn	value;
   1664 };
   1665 
   1666 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
   1667 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
   1668 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
   1669 extern const struct aarch64_name_value_pair aarch64_prfops [32];
   1670 extern const struct aarch64_name_value_pair aarch64_hint_options [];
   1671 
   1672 #define AARCH64_MAX_SYSREG_NAME_LEN 32
   1673 
   1674 typedef struct
   1675 {
   1676   const char *  name;
   1677   aarch64_insn	value;
   1678   uint32_t	flags;
   1679 
   1680   /* A set of features, all of which are required for this system register to be
   1681      available.  */
   1682   aarch64_feature_set features;
   1683 } aarch64_sys_reg;
   1684 
   1685 extern const aarch64_sys_reg aarch64_sys_regs [];
   1686 extern const aarch64_sys_reg aarch64_pstatefields [];
   1687 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
   1688 extern bool aarch64_sys_reg_128bit_p (const uint32_t);
   1689 extern bool aarch64_sys_reg_alias_p (const uint32_t);
   1690 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
   1691 					     const aarch64_sys_reg *);
   1692 
   1693 typedef struct
   1694 {
   1695   const char *name;
   1696   uint32_t value;
   1697   uint32_t flags ;
   1698 
   1699   /* A set of features, all of which are required for this system instruction to be
   1700      available.  */
   1701   aarch64_feature_set features;
   1702 } aarch64_sys_ins_reg;
   1703 
   1704 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
   1705 extern bool
   1706 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
   1707 				 const char *reg_name,
   1708 				 uint32_t, const aarch64_feature_set *);
   1709 
   1710 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
   1711 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
   1712 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
   1713 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
   1714 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
   1715 
   1716 /* Shift/extending operator kinds.
   1717    N.B. order is important; keep aarch64_operand_modifiers synced.  */
   1718 enum aarch64_modifier_kind
   1719 {
   1720   AARCH64_MOD_NONE,
   1721   AARCH64_MOD_MSL,
   1722   AARCH64_MOD_ROR,
   1723   AARCH64_MOD_ASR,
   1724   AARCH64_MOD_LSR,
   1725   AARCH64_MOD_LSL,
   1726   AARCH64_MOD_UXTB,
   1727   AARCH64_MOD_UXTH,
   1728   AARCH64_MOD_UXTW,
   1729   AARCH64_MOD_UXTX,
   1730   AARCH64_MOD_SXTB,
   1731   AARCH64_MOD_SXTH,
   1732   AARCH64_MOD_SXTW,
   1733   AARCH64_MOD_SXTX,
   1734   AARCH64_MOD_MUL,
   1735   AARCH64_MOD_MUL_VL,
   1736 };
   1737 
   1738 bool
   1739 aarch64_extend_operator_p (enum aarch64_modifier_kind);
   1740 
   1741 enum aarch64_modifier_kind
   1742 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
   1743 /* Condition.  */
   1744 
   1745 typedef struct
   1746 {
   1747   /* A list of names with the first one as the disassembly preference;
   1748      terminated by NULL if fewer than 3.  */
   1749   const char *names[4];
   1750   aarch64_insn value;
   1751 } aarch64_cond;
   1752 
   1753 extern const aarch64_cond aarch64_conds[16];
   1754 
   1755 const aarch64_cond* get_cond_from_value (aarch64_insn value);
   1756 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
   1757 
   1758 /* Information about a reference to part of ZA.  */
   1760 struct aarch64_indexed_za
   1761 {
   1762   /* Which tile is being accessed.  Unused (and 0) for an index into ZA.  */
   1763   int regno;
   1764 
   1765   struct
   1766   {
   1767     /* The 32-bit index register.  */
   1768     int regno;
   1769 
   1770     /* The first (or only) immediate offset.  */
   1771     int64_t imm;
   1772 
   1773     /* The last immediate offset minus the first immediate offset.
   1774        Unlike the range size, this is guaranteed not to overflow
   1775        when the end offset > the start offset.  */
   1776     uint64_t countm1;
   1777   } index;
   1778 
   1779   /* The vector group size, or 0 if none.  */
   1780   unsigned group_size : 8;
   1781 
   1782   /* True if a tile access is vertical, false if it is horizontal.
   1783      Unused (and 0) for an index into ZA.  */
   1784   unsigned v : 1;
   1785 };
   1786 
   1787 /* Information about a list of registers.  */
   1788 struct aarch64_reglist
   1789 {
   1790   unsigned first_regno : 8;
   1791   unsigned num_regs : 8;
   1792   /* The difference between the nth and the n+1th register.  */
   1793   unsigned stride : 8;
   1794   /* 1 if it is a list of reg element.  */
   1795   unsigned has_index : 1;
   1796   /* Lane index; valid only when has_index is 1.  */
   1797   int64_t index;
   1798 };
   1799 
   1800 /* Structure representing an operand.  */
   1801 
   1802 struct aarch64_opnd_info
   1803 {
   1804   enum aarch64_opnd type;
   1805   aarch64_opnd_qualifier_t qualifier;
   1806   int idx;
   1807 
   1808   union
   1809     {
   1810       struct
   1811 	{
   1812 	  unsigned regno;
   1813 	} reg;
   1814       struct
   1815 	{
   1816 	  unsigned int regno;
   1817 	  int64_t index;
   1818 	} reglane;
   1819       /* e.g. LVn.  */
   1820       struct aarch64_reglist reglist;
   1821       /* e.g. immediate or pc relative address offset.  */
   1822       struct
   1823 	{
   1824 	  int64_t value;
   1825 	  unsigned is_fp : 1;
   1826 	} imm;
   1827       /* e.g. address in STR (register offset).  */
   1828       struct
   1829 	{
   1830 	  unsigned base_regno;
   1831 	  struct
   1832 	    {
   1833 	      union
   1834 		{
   1835 		  int imm;
   1836 		  unsigned regno;
   1837 		};
   1838 	      unsigned is_reg;
   1839 	    } offset;
   1840 	  unsigned pcrel : 1;		/* PC-relative.  */
   1841 	  unsigned writeback : 1;
   1842 	  unsigned preind : 1;		/* Pre-indexed.  */
   1843 	  unsigned postind : 1;		/* Post-indexed.  */
   1844 	} addr;
   1845 
   1846       struct
   1847 	{
   1848 	  /* The encoding of the system register.  */
   1849 	  aarch64_insn value;
   1850 
   1851 	  /* The system register flags.  */
   1852 	  uint32_t flags;
   1853 	} sysreg;
   1854 
   1855       /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}]  */
   1856       struct aarch64_indexed_za indexed_za;
   1857 
   1858       const aarch64_cond *cond;
   1859       /* The encoding of the PSTATE field.  */
   1860       aarch64_insn pstatefield;
   1861       const aarch64_sys_ins_reg *sysins_op;
   1862       const struct aarch64_name_value_pair *barrier;
   1863       const struct aarch64_name_value_pair *hint_option;
   1864       const struct aarch64_name_value_pair *prfop;
   1865     };
   1866 
   1867   /* Operand shifter; in use when the operand is a register offset address,
   1868      add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}.  */
   1869   struct
   1870     {
   1871       enum aarch64_modifier_kind kind;
   1872       unsigned operator_present: 1;	/* Only valid during encoding.  */
   1873       /* Value of the 'S' field in ld/st reg offset; used only in decoding.  */
   1874       unsigned amount_present: 1;
   1875       int64_t amount;
   1876     } shifter;
   1877 
   1878   unsigned skip:1;	/* Operand is not completed if there is a fixup needed
   1879 			   to be done on it.  In some (but not all) of these
   1880 			   cases, we need to tell libopcodes to skip the
   1881 			   constraint checking and the encoding for this
   1882 			   operand, so that the libopcodes can pick up the
   1883 			   right opcode before the operand is fixed-up.  This
   1884 			   flag should only be used during the
   1885 			   assembling/encoding.  */
   1886   unsigned present:1;	/* Whether this operand is present in the assembly
   1887 			   line; not used during the disassembly.  */
   1888 };
   1889 
   1890 typedef struct aarch64_opnd_info aarch64_opnd_info;
   1891 
   1892 /* Structure representing an instruction.
   1893 
   1894    It is used during both the assembling and disassembling.  The assembler
   1895    fills an aarch64_inst after a successful parsing and then passes it to the
   1896    encoding routine to do the encoding.  During the disassembling, the
   1897    disassembler calls the decoding routine to decode a binary instruction; on a
   1898    successful return, such a structure will be filled with information of the
   1899    instruction; then the disassembler uses the information to print out the
   1900    instruction.  */
   1901 
   1902 struct aarch64_inst
   1903 {
   1904   /* The value of the binary instruction.  */
   1905   aarch64_insn value;
   1906 
   1907   /* Corresponding opcode entry.  */
   1908   const aarch64_opcode *opcode;
   1909 
   1910   /* Condition for a truly conditional-executed instruction, e.g. b.cond.  */
   1911   const aarch64_cond *cond;
   1912 
   1913   /* Operands information.  */
   1914   aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
   1915 };
   1916 
   1917 /* Defining the HINT #imm values for the aarch64_hint_options.  */
   1918 #define HINT_OPD_CSYNC	0x11
   1919 #define HINT_OPD_DSYNC	0x13
   1920 #define HINT_OPD_C	0x22
   1921 #define HINT_OPD_J	0x24
   1922 #define HINT_OPD_JC	0x26
   1923 #define HINT_OPD_KEEP	0x30
   1924 #define HINT_OPD_STRM	0x31
   1925 #define HINT_OPD_NULL	0x00
   1926 
   1927 
   1928 /* Diagnosis related declaration and interface.  */
   1930 
   1931 /* Operand error kind enumerators.
   1932 
   1933    AARCH64_OPDE_RECOVERABLE
   1934      Less severe error found during the parsing, very possibly because that
   1935      GAS has picked up a wrong instruction template for the parsing.
   1936 
   1937    AARCH64_OPDE_A_SHOULD_FOLLOW_B
   1938      The instruction forms (or is expected to form) part of a sequence,
   1939      but the preceding instruction in the sequence wasn't the expected one.
   1940      The message refers to two strings: the name of the current instruction,
   1941      followed by the name of the expected preceding instruction.
   1942 
   1943    AARCH64_OPDE_EXPECTED_A_AFTER_B
   1944      Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
   1945      so that the current instruction is assumed to be the incorrect one:
   1946      "since the previous instruction was B, the current one should be A".
   1947 
   1948    AARCH64_OPDE_SYNTAX_ERROR
   1949      General syntax error; it can be either a user error, or simply because
   1950      that GAS is trying a wrong instruction template.
   1951 
   1952    AARCH64_OPDE_FATAL_SYNTAX_ERROR
   1953      Definitely a user syntax error.
   1954 
   1955    AARCH64_OPDE_INVALID_VARIANT
   1956      No syntax error, but the operands are not a valid combination, e.g.
   1957      FMOV D0,S0
   1958 
   1959    The following errors are only reported against an asm string that is
   1960    syntactically valid and that has valid operand qualifiers.
   1961 
   1962    AARCH64_OPDE_INVALID_VG_SIZE
   1963      Error about a "VGx<n>" modifier in a ZA index not having the
   1964      correct <n>.  This error effectively forms a pair with
   1965      AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number
   1966      of vectors that an instruction operates on.  However, the "VGx<n>"
   1967      modifier is optional, whereas a register list always has a known
   1968      and explicit length.  It therefore seems better to place more
   1969      importance on the register list length when selecting an opcode table
   1970      entry.  This in turn means that having an incorrect register length
   1971      should be more severe than having an incorrect "VGx<n>".
   1972 
   1973    AARCH64_OPDE_REG_LIST_LENGTH
   1974      Error about a register list operand having an unexpected number of
   1975      registers.  This error is low severity because there might be another
   1976      opcode entry that supports the given number of registers.
   1977 
   1978    AARCH64_OPDE_REG_LIST_STRIDE
   1979      Error about a register list operand having the correct number
   1980      (and type) of registers, but an unexpected stride.  This error is
   1981      more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies
   1982      that the length is known to be correct.  However, it is lower than
   1983      many other errors, since some instructions have forms that share
   1984      the same number of registers but have different strides.
   1985 
   1986    AARCH64_OPDE_UNTIED_IMMS
   1987      The asm failed to use the same immediate for a destination operand
   1988      and a tied source operand.
   1989 
   1990    AARCH64_OPDE_UNTIED_OPERAND
   1991      The asm failed to use the same register for a destination operand
   1992      and a tied source operand.
   1993 
   1994    AARCH64_OPDE_OUT_OF_RANGE
   1995      Error about some immediate value out of a valid range.
   1996 
   1997    AARCH64_OPDE_UNALIGNED
   1998      Error about some immediate value not properly aligned (i.e. not being a
   1999      multiple times of a certain value).
   2000 
   2001    AARCH64_OPDE_OTHER_ERROR
   2002      Error of the highest severity and used for any severe issue that does not
   2003      fall into any of the above categories.
   2004 
   2005    AARCH64_OPDE_INVALID_REGNO
   2006      A register was syntactically valid and had the right type, but it was
   2007      outside the range supported by the associated operand field.  This is
   2008      a high severity error because there are currently no instructions that
   2009      would accept the operands that precede the erroneous one (if any) and
   2010      yet still accept a wider range of registers.
   2011 
   2012    AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
   2013    AARCH64_OPDE_FATAL_SYNTAX_ERROR are only detected by GAS while the
   2014    AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
   2015    only libopcodes has the information about the valid variants of each
   2016    instruction.
   2017 
   2018    The enumerators have an increasing severity.  This is helpful when there are
   2019    multiple instruction templates available for a given mnemonic name (e.g.
   2020    FMOV); this mechanism will help choose the most suitable template from which
   2021    the generated diagnostics can most closely describe the issues, if any.
   2022 
   2023    This enum needs to be kept up-to-date with operand_mismatch_kind_names
   2024    in tc-aarch64.c.  */
   2025 
   2026 enum aarch64_operand_error_kind
   2027 {
   2028   AARCH64_OPDE_NIL,
   2029   AARCH64_OPDE_RECOVERABLE,
   2030   AARCH64_OPDE_A_SHOULD_FOLLOW_B,
   2031   AARCH64_OPDE_EXPECTED_A_AFTER_B,
   2032   AARCH64_OPDE_SYNTAX_ERROR,
   2033   AARCH64_OPDE_FATAL_SYNTAX_ERROR,
   2034   AARCH64_OPDE_INVALID_VARIANT,
   2035   AARCH64_OPDE_INVALID_VG_SIZE,
   2036   AARCH64_OPDE_REG_LIST_LENGTH,
   2037   AARCH64_OPDE_REG_LIST_STRIDE,
   2038   AARCH64_OPDE_UNTIED_IMMS,
   2039   AARCH64_OPDE_UNTIED_OPERAND,
   2040   AARCH64_OPDE_OUT_OF_RANGE,
   2041   AARCH64_OPDE_UNALIGNED,
   2042   AARCH64_OPDE_OTHER_ERROR,
   2043   AARCH64_OPDE_INVALID_REGNO
   2044 };
   2045 
   2046 /* N.B. GAS assumes that this structure work well with shallow copy.  */
   2047 struct aarch64_operand_error
   2048 {
   2049   enum aarch64_operand_error_kind kind;
   2050   int index;
   2051   const char *error;
   2052   /* Some data for extra information.  */
   2053   union {
   2054     int i;
   2055     const char *s;
   2056   } data[3];
   2057   bool non_fatal;
   2058 };
   2059 
   2060 /* AArch64 sequence structure used to track instructions with F_SCAN
   2061    dependencies for both assembler and disassembler.  */
   2062 struct aarch64_instr_sequence
   2063 {
   2064   /* The instructions in the sequence, starting with the one that
   2065      caused it to be opened.  */
   2066   aarch64_inst *instr;
   2067   /* The number of instructions already in the sequence.  */
   2068   int num_added_insns;
   2069   /* The number of instructions allocated to the sequence.  */
   2070   int num_allocated_insns;
   2071 };
   2072 
   2073 /* Encoding entrypoint.  */
   2074 
   2075 extern bool
   2076 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
   2077 		       aarch64_insn *, aarch64_opnd_qualifier_t *,
   2078 		       aarch64_operand_error *, aarch64_instr_sequence *);
   2079 
   2080 extern const aarch64_opcode *
   2081 aarch64_replace_opcode (struct aarch64_inst *,
   2082 			const aarch64_opcode *);
   2083 
   2084 /* Given the opcode enumerator OP, return the pointer to the corresponding
   2085    opcode entry.  */
   2086 
   2087 extern const aarch64_opcode *
   2088 aarch64_get_opcode (enum aarch64_op);
   2089 
   2090 /* An instance of this structure is passed to aarch64_print_operand, and
   2091    the callback within this structure is used to apply styling to the
   2092    disassembler output.  This structure encapsulates the callback and a
   2093    state pointer.  */
   2094 
   2095 struct aarch64_styler
   2096 {
   2097   /* The callback used to apply styling.  Returns a string created from FMT
   2098      and ARGS with STYLE applied to the string.  STYLER is a pointer back
   2099      to this object so that the callback can access the state member.
   2100 
   2101      The string returned from this callback must remain valid until the
   2102      call to aarch64_print_operand has completed.  */
   2103   const char *(*apply_style) (struct aarch64_styler *styler,
   2104 			      enum disassembler_style style,
   2105 			      const char *fmt,
   2106 			      va_list args);
   2107 
   2108   /* A pointer to a state object which can be used by the apply_style
   2109      callback function.  */
   2110   void *state;
   2111 };
   2112 
   2113 /* Generate the string representation of an operand.  */
   2114 extern void
   2115 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
   2116 		       const aarch64_opnd_info *, int, int *, bfd_vma *,
   2117 		       char **, char *, size_t,
   2118 		       aarch64_feature_set features,
   2119 		       struct aarch64_styler *styler);
   2120 
   2121 /* Miscellaneous interface.  */
   2122 
   2123 extern int
   2124 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
   2125 
   2126 extern aarch64_opnd_qualifier_t
   2127 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
   2128 				const aarch64_opnd_qualifier_t, int);
   2129 
   2130 extern bool
   2131 aarch64_is_destructive_by_operands (const aarch64_opcode *);
   2132 
   2133 extern int
   2134 aarch64_num_of_operands (const aarch64_opcode *);
   2135 
   2136 extern bool
   2137 aarch64_stack_pointer_p (const aarch64_opnd_info *);
   2138 
   2139 extern int
   2140 aarch64_zero_register_p (const aarch64_opnd_info *);
   2141 
   2142 extern enum err_type
   2143 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
   2144 		     aarch64_operand_error *);
   2145 
   2146 extern void
   2147 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
   2148 
   2149 /* Given an operand qualifier, return the expected data element size
   2150    of a qualified operand.  */
   2151 extern unsigned char
   2152 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
   2153 
   2154 extern enum aarch64_operand_class
   2155 aarch64_get_operand_class (enum aarch64_opnd);
   2156 
   2157 extern const char *
   2158 aarch64_get_operand_name (enum aarch64_opnd);
   2159 
   2160 extern const char *
   2161 aarch64_get_operand_desc (enum aarch64_opnd);
   2162 
   2163 extern bool
   2164 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
   2165 
   2166 extern bool
   2167 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *);
   2168 
   2169 extern int
   2170 calc_ldst_datasize (const aarch64_opnd_info *opnds);
   2171 
   2172 #ifdef DEBUG_AARCH64
   2173 extern int debug_dump;
   2174 
   2175 extern void
   2176 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
   2177 
   2178 #define DEBUG_TRACE(M, ...)					\
   2179   {								\
   2180     if (debug_dump)						\
   2181       aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__);	\
   2182   }
   2183 
   2184 #define DEBUG_TRACE_IF(C, M, ...)				\
   2185   {								\
   2186     if (debug_dump && (C))					\
   2187       aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__);	\
   2188   }
   2189 #else  /* !DEBUG_AARCH64 */
   2190 #define DEBUG_TRACE(M, ...) ;
   2191 #define DEBUG_TRACE_IF(C, M, ...) ;
   2192 #endif /* DEBUG_AARCH64 */
   2193 
   2194 extern const char *const aarch64_sve_pattern_array[32];
   2195 extern const char *const aarch64_sve_prfop_array[16];
   2196 extern const char *const aarch64_rprfmop_array[64];
   2197 extern const char *const aarch64_sme_vlxn_array[2];
   2198 extern const char *const aarch64_brbop_array[2];
   2199 
   2200 #ifdef __cplusplus
   2201 }
   2202 #endif
   2203 
   2204 #endif /* OPCODE_AARCH64_H */
   2205