Home | History | Annotate | Line # | Download | only in opcode
aarch64.h revision 1.1.1.10
      1 /* AArch64 assembler/disassembler support.
      2 
      3    Copyright (C) 2009-2024 Free Software Foundation, Inc.
      4    Contributed by ARM Ltd.
      5 
      6    This file is part of GNU Binutils.
      7 
      8    This program is free software; you can redistribute it and/or modify
      9    it under the terms of the GNU General Public License as published by
     10    the Free Software Foundation; either version 3 of the license, or
     11    (at your option) any later version.
     12 
     13    This program is distributed in the hope that it will be useful,
     14    but WITHOUT ANY WARRANTY; without even the implied warranty of
     15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     16    GNU General Public License for more details.
     17 
     18    You should have received a copy of the GNU General Public License
     19    along with this program; see the file COPYING3. If not,
     20    see <http://www.gnu.org/licenses/>.  */
     21 
     22 #ifndef OPCODE_AARCH64_H
     23 #define OPCODE_AARCH64_H
     24 
     25 #include "bfd.h"
     26 #include <stdint.h>
     27 #include <assert.h>
     28 #include <stdlib.h>
     29 
     30 #include "dis-asm.h"
     31 
     32 #ifdef __cplusplus
     33 extern "C" {
     34 #endif
     35 
     36 /* The offset for pc-relative addressing is currently defined to be 0.  */
     37 #define AARCH64_PCREL_OFFSET		0
     38 
     39 typedef uint32_t aarch64_insn;
     40 
     41 /* An enum containing all known CPU features.  The values act as bit positions
     42    into aarch64_feature_set.  */
     43 enum aarch64_feature_bit {
     44   /* All processors.  */
     45   AARCH64_FEATURE_V8,
     46   /* ARMv8.6 processors.  */
     47   AARCH64_FEATURE_V8_6A,
     48   /* Bfloat16 insns.  */
     49   AARCH64_FEATURE_BFLOAT16,
     50   /* Armv8-A processors.  */
     51   AARCH64_FEATURE_V8A,
     52   /* SVE2 instructions.  */
     53   AARCH64_FEATURE_SVE2,
     54   /* ARMv8.2 processors.  */
     55   AARCH64_FEATURE_V8_2A,
     56   /* ARMv8.3 processors.  */
     57   AARCH64_FEATURE_V8_3A,
     58   AARCH64_FEATURE_SVE2_AES,
     59   AARCH64_FEATURE_SVE2_BITPERM,
     60   AARCH64_FEATURE_SVE2_SM4,
     61   AARCH64_FEATURE_SVE2_SHA3,
     62   /* ARMv8.4 processors.  */
     63   AARCH64_FEATURE_V8_4A,
     64   /* Armv8-R processors.  */
     65   AARCH64_FEATURE_V8R,
     66   /* Armv8.7 processors.  */
     67   AARCH64_FEATURE_V8_7A,
     68   /* Scalable Matrix Extension.  */
     69   AARCH64_FEATURE_SME,
     70   /* Atomic 64-byte load/store.  */
     71   AARCH64_FEATURE_LS64,
     72   /* v8.3 Pointer Authentication.  */
     73   AARCH64_FEATURE_PAUTH,
     74   /* FP instructions.  */
     75   AARCH64_FEATURE_FP,
     76   /* SIMD instructions.  */
     77   AARCH64_FEATURE_SIMD,
     78   /* CRC instructions.  */
     79   AARCH64_FEATURE_CRC,
     80   /* LSE instructions.  */
     81   AARCH64_FEATURE_LSE,
     82   /* PAN instructions.  */
     83   AARCH64_FEATURE_PAN,
     84   /* LOR instructions.  */
     85   AARCH64_FEATURE_LOR,
     86   /* v8.1 SIMD instructions.  */
     87   AARCH64_FEATURE_RDMA,
     88   /* v8.1 features.  */
     89   AARCH64_FEATURE_V8_1A,
     90   /* v8.2 FP16 instructions.  */
     91   AARCH64_FEATURE_F16,
     92   /* RAS Extensions.  */
     93   AARCH64_FEATURE_RAS,
     94   /* Statistical Profiling.  */
     95   AARCH64_FEATURE_PROFILE,
     96   /* SVE instructions.  */
     97   AARCH64_FEATURE_SVE,
     98   /* RCPC instructions.  */
     99   AARCH64_FEATURE_RCPC,
    100   /* RCPC2 instructions.  */
    101   AARCH64_FEATURE_RCPC2,
    102   /* Complex # instructions.  */
    103   AARCH64_FEATURE_COMPNUM,
    104   /* JavaScript conversion instructions.  */
    105   AARCH64_FEATURE_JSCVT,
    106   /* Dot Product instructions.  */
    107   AARCH64_FEATURE_DOTPROD,
    108   /* SM3 & SM4 instructions.  */
    109   AARCH64_FEATURE_SM4,
    110   /* SHA2 instructions.  */
    111   AARCH64_FEATURE_SHA2,
    112   /* SHA3 instructions.  */
    113   AARCH64_FEATURE_SHA3,
    114   /* AES instructions.  */
    115   AARCH64_FEATURE_AES,
    116   /* v8.2 FP16FML ins.  */
    117   AARCH64_FEATURE_F16_FML,
    118   /* ARMv8.5 processors.  */
    119   AARCH64_FEATURE_V8_5A,
    120   /* v8.5 Flag Manipulation version 2.  */
    121   AARCH64_FEATURE_FLAGMANIP,
    122   /* FRINT[32,64][Z,X] insns.  */
    123   AARCH64_FEATURE_FRINTTS,
    124   /* SB instruction.  */
    125   AARCH64_FEATURE_SB,
    126   /* Execution and Data Prediction Restriction instructions.  */
    127   AARCH64_FEATURE_PREDRES,
    128   /* DC CVADP.  */
    129   AARCH64_FEATURE_CVADP,
    130   /* Random Number instructions.  */
    131   AARCH64_FEATURE_RNG,
    132   /* SCXTNUM_ELx.  */
    133   AARCH64_FEATURE_SCXTNUM,
    134   /* ID_PFR2 instructions.  */
    135   AARCH64_FEATURE_ID_PFR2,
    136   /* SSBS mechanism enabled.  */
    137   AARCH64_FEATURE_SSBS,
    138   /* Memory Tagging Extension.  */
    139   AARCH64_FEATURE_MEMTAG,
    140   /* Transactional Memory Extension.  */
    141   AARCH64_FEATURE_TME,
    142   /* XS memory attribute.  */
    143   AARCH64_FEATURE_XS,
    144   /* WFx instructions with timeout.  */
    145   AARCH64_FEATURE_WFXT,
    146   /* Standardization of memory operations.  */
    147   AARCH64_FEATURE_MOPS,
    148   /* Hinted conditional branches.  */
    149   AARCH64_FEATURE_HBC,
    150   /* Matrix Multiply instructions.  */
    151   AARCH64_FEATURE_I8MM,
    152   AARCH64_FEATURE_F32MM,
    153   AARCH64_FEATURE_F64MM,
    154   /* v8.4 Flag Manipulation.  */
    155   AARCH64_FEATURE_FLAGM,
    156   /* Armv9.0-A processors.  */
    157   AARCH64_FEATURE_V9A,
    158   /* SME F64F64.  */
    159   AARCH64_FEATURE_SME_F64F64,
    160   /* SME I16I64.  */
    161   AARCH64_FEATURE_SME_I16I64,
    162   /* Armv8.8 processors.  */
    163   AARCH64_FEATURE_V8_8A,
    164   /* Common Short Sequence Compression instructions.  */
    165   AARCH64_FEATURE_CSSC,
    166   /* Armv8.9-A processors.  */
    167   AARCH64_FEATURE_V8_9A,
    168   /* Check Feature Status Extension.  */
    169   AARCH64_FEATURE_CHK,
    170   /* Guarded Control Stack.  */
    171   AARCH64_FEATURE_GCS,
    172   /* SPE Call Return branch records.  */
    173   AARCH64_FEATURE_SPE_CRR,
    174   /* SPE Filter by data source.  */
    175   AARCH64_FEATURE_SPE_FDS,
    176   /* Additional SPE events.  */
    177   AARCH64_FEATURE_SPEv1p4,
    178   /* SME2.  */
    179   AARCH64_FEATURE_SME2,
    180   /* Translation Hardening Extension.  */
    181   AARCH64_FEATURE_THE,
    182   /* LSE128.  */
    183   AARCH64_FEATURE_LSE128,
    184   /* ARMv8.9-A RAS Extensions.  */
    185   AARCH64_FEATURE_RASv2,
    186   /* System Control Register2.  */
    187   AARCH64_FEATURE_SCTLR2,
    188   /* Fine Grained Traps.  */
    189   AARCH64_FEATURE_FGT2,
    190   /* Physical Fault Address.  */
    191   AARCH64_FEATURE_PFAR,
    192   /* Address Translate Stage 1.  */
    193   AARCH64_FEATURE_ATS1A,
    194   /* Memory Attribute Index Enhancement.  */
    195   AARCH64_FEATURE_AIE,
    196   /* Stage 1 Permission Indirection Extension.  */
    197   AARCH64_FEATURE_S1PIE,
    198   /* Stage 2 Permission Indirection Extension.  */
    199   AARCH64_FEATURE_S2PIE,
    200   /* Stage 1 Permission Overlay Extension.  */
    201   AARCH64_FEATURE_S1POE,
    202   /* Stage 2 Permission Overlay Extension.  */
    203   AARCH64_FEATURE_S2POE,
    204   /* Extension to Translation Control Registers.  */
    205   AARCH64_FEATURE_TCR2,
    206   /* Speculation Prediction Restriction instructions.  */
    207   AARCH64_FEATURE_PREDRES2,
    208   /* Instrumentation Extension.  */
    209   AARCH64_FEATURE_ITE,
    210   /* 128-bit page table descriptor, system registers
    211      and isntructions.  */
    212   AARCH64_FEATURE_D128,
    213   /* Armv8.9-A/Armv9.4-A architecture Debug extension.  */
    214   AARCH64_FEATURE_DEBUGv8p9,
    215   /* Performance Monitors Extension.  */
    216   AARCH64_FEATURE_PMUv3p9,
    217   /* Performance Monitors Snapshots Extension.  */
    218   AARCH64_FEATURE_PMUv3_SS,
    219   /* Performance Monitors Instruction Counter Extension.  */
    220   AARCH64_FEATURE_PMUv3_ICNTR,
    221   /* System Performance Monitors Extension */
    222   AARCH64_FEATURE_SPMU,
    223   /* Performance Monitors Synchronous-Exception-Based Event Extension.  */
    224   AARCH64_FEATURE_SEBEP,
    225   /* SVE2.1 and SME2.1 non-widening BFloat16 instructions.  */
    226   AARCH64_FEATURE_B16B16,
    227   /* SME2.1 instructions.  */
    228   AARCH64_FEATURE_SME2p1,
    229   /* SVE2.1 instructions.  */
    230   AARCH64_FEATURE_SVE2p1,
    231   /* RCPC3 instructions.  */
    232   AARCH64_FEATURE_RCPC3,
    233   /* Checked Pointer Arithmetic instructions. */
    234   AARCH64_FEATURE_CPA,
    235   /* FAMINMAX instructions.  */
    236   AARCH64_FEATURE_FAMINMAX,
    237   /* FP8 instructions.  */
    238   AARCH64_FEATURE_FP8,
    239   AARCH64_NUM_FEATURES
    240 };
    241 
    242 /* These macros take an initial argument X that gives the index into
    243    an aarch64_feature_set.  The macros then return the bitmask for
    244    that array index.  */
    245 
    246 /* A mask in which feature bit BIT is set and all other bits are clear.  */
    247 #define AARCH64_UINT64_BIT(X, BIT) \
    248   ((X) == (BIT) / 64 ? 1ULL << (BIT) % 64 : 0)
    249 
    250 /* A mask that includes only AARCH64_FEATURE_<NAME>.  */
    251 #define AARCH64_FEATBIT(X, NAME) \
    252   AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME)
    253 
    254 /* A mask of the features that are enabled by each architecture version,
    255    excluding those that are inherited from other architecture versions.  */
    256 #define AARCH64_ARCH_V8A_FEATURES(X)	(AARCH64_FEATBIT (X, V8A)	\
    257 					 | AARCH64_FEATBIT (X, FP)	\
    258 					 | AARCH64_FEATBIT (X, RAS)	\
    259 					 | AARCH64_FEATBIT (X, SIMD)	\
    260 					 | AARCH64_FEATBIT (X, CHK))
    261 #define AARCH64_ARCH_V8_1A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_1A)	\
    262 					 | AARCH64_FEATBIT (X, CRC)	\
    263 					 | AARCH64_FEATBIT (X, LSE)	\
    264 					 | AARCH64_FEATBIT (X, PAN)	\
    265 					 | AARCH64_FEATBIT (X, LOR)	\
    266 					 | AARCH64_FEATBIT (X, RDMA))
    267 #define AARCH64_ARCH_V8_2A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_2A))
    268 #define AARCH64_ARCH_V8_3A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_3A)	\
    269 					 | AARCH64_FEATBIT (X, PAUTH)	\
    270 					 | AARCH64_FEATBIT (X, RCPC)	\
    271 					 | AARCH64_FEATBIT (X, COMPNUM) \
    272 					 | AARCH64_FEATBIT (X, JSCVT))
    273 #define AARCH64_ARCH_V8_4A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_4A)	\
    274 					 | AARCH64_FEATBIT (X, RCPC2)	\
    275 					 | AARCH64_FEATBIT (X, DOTPROD)	\
    276 					 | AARCH64_FEATBIT (X, FLAGM)	\
    277 					 | AARCH64_FEATBIT (X, F16_FML))
    278 #define AARCH64_ARCH_V8_5A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_5A)	\
    279 					 | AARCH64_FEATBIT (X, FLAGMANIP) \
    280 					 | AARCH64_FEATBIT (X, FRINTTS)	\
    281 					 | AARCH64_FEATBIT (X, SB)	\
    282 					 | AARCH64_FEATBIT (X, PREDRES)	\
    283 					 | AARCH64_FEATBIT (X, CVADP)	\
    284 					 | AARCH64_FEATBIT (X, SCXTNUM)	\
    285 					 | AARCH64_FEATBIT (X, ID_PFR2)	\
    286 					 | AARCH64_FEATBIT (X, SSBS))
    287 #define AARCH64_ARCH_V8_6A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_6A)	\
    288 					 | AARCH64_FEATBIT (X, BFLOAT16) \
    289 					 | AARCH64_FEATBIT (X, I8MM))
    290 #define AARCH64_ARCH_V8_7A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_7A)	\
    291 					 | AARCH64_FEATBIT (X, XS)      \
    292 					 | AARCH64_FEATBIT (X, WFXT)    \
    293 					 | AARCH64_FEATBIT (X, LS64))
    294 #define AARCH64_ARCH_V8_8A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_8A)	\
    295 					 | AARCH64_FEATBIT (X, MOPS)	\
    296 					 | AARCH64_FEATBIT (X, HBC))
    297 #define AARCH64_ARCH_V8_9A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_9A)	\
    298 					 | AARCH64_FEATBIT (X, SPEv1p4) \
    299 					 | AARCH64_FEATBIT (X, SPE_CRR)	\
    300 					 | AARCH64_FEATBIT (X, SPE_FDS) \
    301 					 | AARCH64_FEATBIT (X, RASv2)	\
    302 					 | AARCH64_FEATBIT (X, SCTLR2)	\
    303 					 | AARCH64_FEATBIT (X, FGT2)	\
    304 					 | AARCH64_FEATBIT (X, PFAR)	\
    305 					 | AARCH64_FEATBIT (X, ATS1A)	\
    306 					 | AARCH64_FEATBIT (X, AIE)	\
    307 					 | AARCH64_FEATBIT (X, S1PIE)	\
    308 					 | AARCH64_FEATBIT (X, S2PIE)	\
    309 					 | AARCH64_FEATBIT (X, S1POE)	\
    310 					 | AARCH64_FEATBIT (X, S2POE)	\
    311 					 | AARCH64_FEATBIT (X, TCR2)	\
    312 					 | AARCH64_FEATBIT (X, DEBUGv8p9) \
    313 					 | AARCH64_FEATBIT (X, PMUv3p9)	\
    314 					 | AARCH64_FEATBIT (X, PMUv3_SS) \
    315 					 | AARCH64_FEATBIT (X, PMUv3_ICNTR) \
    316 					 | AARCH64_FEATBIT (X, SPMU) \
    317 					 | AARCH64_FEATBIT (X, SEBEP) \
    318 					 | AARCH64_FEATBIT (X, PREDRES2) \
    319 					)
    320 
    321 #define AARCH64_ARCH_V9A_FEATURES(X)	(AARCH64_FEATBIT (X, V9A)	\
    322 					 | AARCH64_FEATBIT (X, F16)	\
    323 					 | AARCH64_FEATBIT (X, SVE)	\
    324 					 | AARCH64_FEATBIT (X, SVE2))
    325 #define AARCH64_ARCH_V9_1A_FEATURES(X)	AARCH64_ARCH_V8_6A_FEATURES (X)
    326 #define AARCH64_ARCH_V9_2A_FEATURES(X)	AARCH64_ARCH_V8_7A_FEATURES (X)
    327 #define AARCH64_ARCH_V9_3A_FEATURES(X)	AARCH64_ARCH_V8_8A_FEATURES (X)
    328 #define AARCH64_ARCH_V9_4A_FEATURES(X)	AARCH64_ARCH_V8_9A_FEATURES (X)
    329 
    330 /* Architectures are the sum of the base and extensions.  */
    331 #define AARCH64_ARCH_V8A(X)	(AARCH64_FEATBIT (X, V8) \
    332 				 | AARCH64_ARCH_V8A_FEATURES (X))
    333 #define AARCH64_ARCH_V8_1A(X)	(AARCH64_ARCH_V8A (X) \
    334 				 | AARCH64_ARCH_V8_1A_FEATURES (X))
    335 #define AARCH64_ARCH_V8_2A(X)	(AARCH64_ARCH_V8_1A (X)	\
    336 				 | AARCH64_ARCH_V8_2A_FEATURES (X))
    337 #define AARCH64_ARCH_V8_3A(X)	(AARCH64_ARCH_V8_2A (X)	\
    338 				 | AARCH64_ARCH_V8_3A_FEATURES (X))
    339 #define AARCH64_ARCH_V8_4A(X)	(AARCH64_ARCH_V8_3A (X)	\
    340 				 | AARCH64_ARCH_V8_4A_FEATURES (X))
    341 #define AARCH64_ARCH_V8_5A(X)	(AARCH64_ARCH_V8_4A (X)	\
    342 				 | AARCH64_ARCH_V8_5A_FEATURES (X))
    343 #define AARCH64_ARCH_V8_6A(X)	(AARCH64_ARCH_V8_5A (X)	\
    344 				 | AARCH64_ARCH_V8_6A_FEATURES (X))
    345 #define AARCH64_ARCH_V8_7A(X)	(AARCH64_ARCH_V8_6A (X)	\
    346 				 | AARCH64_ARCH_V8_7A_FEATURES (X))
    347 #define AARCH64_ARCH_V8_8A(X)	(AARCH64_ARCH_V8_7A (X)	\
    348 				 | AARCH64_ARCH_V8_8A_FEATURES (X))
    349 #define AARCH64_ARCH_V8_9A(X)	(AARCH64_ARCH_V8_8A (X)	\
    350 				 | AARCH64_ARCH_V8_9A_FEATURES (X))
    351 #define AARCH64_ARCH_V8R(X)	((AARCH64_ARCH_V8_4A (X)	\
    352 				  | AARCH64_FEATBIT (X, V8R))	\
    353 				 & ~AARCH64_FEATBIT (X, V8A)	\
    354 				 & ~AARCH64_FEATBIT (X, LOR))
    355 
    356 #define AARCH64_ARCH_V9A(X)	(AARCH64_ARCH_V8_5A (X) \
    357 				 | AARCH64_ARCH_V9A_FEATURES (X))
    358 #define AARCH64_ARCH_V9_1A(X)	(AARCH64_ARCH_V9A (X) \
    359 				 | AARCH64_ARCH_V9_1A_FEATURES (X))
    360 #define AARCH64_ARCH_V9_2A(X)	(AARCH64_ARCH_V9_1A (X) \
    361 				 | AARCH64_ARCH_V9_2A_FEATURES (X))
    362 #define AARCH64_ARCH_V9_3A(X)	(AARCH64_ARCH_V9_2A (X) \
    363 				 | AARCH64_ARCH_V9_3A_FEATURES (X))
    364 #define AARCH64_ARCH_V9_4A(X)	(AARCH64_ARCH_V9_3A (X) \
    365 				 | AARCH64_ARCH_V9_4A_FEATURES (X))
    366 
    367 #define AARCH64_ARCH_NONE(X)	0
    368 
    369 /* CPU-specific features.  */
    370 typedef struct {
    371   uint64_t flags[(AARCH64_NUM_FEATURES + 63) / 64];
    372 } aarch64_feature_set;
    373 
    374 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT)	\
    375   ((~(CPU).flags[0] & AARCH64_FEATBIT (0, FEAT)) == 0		\
    376    && (~(CPU).flags[1] & AARCH64_FEATBIT (1, FEAT)) == 0)
    377 
    378 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT)	\
    379   ((~(CPU).flags[0] & (FEAT).flags[0]) == 0	\
    380    && (~(CPU).flags[1] & (FEAT).flags[1]) == 0)
    381 
    382 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT)	\
    383   (((CPU).flags[0] & (FEAT).flags[0]) != 0	\
    384    || ((CPU).flags[1] & (FEAT).flags[1]) != 0)
    385 
    386 #define AARCH64_SET_FEATURE(DEST, FEAT) \
    387   ((DEST).flags[0] = FEAT (0),		\
    388    (DEST).flags[1] = FEAT (1))
    389 
    390 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT)		\
    391   ((DEST).flags[0] = (SRC).flags[0] & ~AARCH64_FEATBIT (0, FEAT), \
    392    (DEST).flags[1] = (SRC).flags[1] & ~AARCH64_FEATBIT (1, FEAT))
    393 
    394 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2)		\
    395   do							\
    396     {							\
    397       (TARG).flags[0] = (F1).flags[0] | (F2).flags[0];	\
    398       (TARG).flags[1] = (F1).flags[1] | (F2).flags[1];	\
    399     }							\
    400   while (0)
    401 
    402 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2)		\
    403   do							\
    404     {							\
    405       (TARG).flags[0] = (F1).flags[0] &~ (F2).flags[0];	\
    406       (TARG).flags[1] = (F1).flags[1] &~ (F2).flags[1];	\
    407     }							\
    408   while (0)
    409 
    410 /* aarch64_feature_set initializers for no features and all features,
    411    respectively.  */
    412 #define AARCH64_NO_FEATURES { { 0, 0 } }
    413 #define AARCH64_ALL_FEATURES { { -1, -1 } }
    414 
    415 /* An aarch64_feature_set initializer for a single feature,
    416    AARCH64_FEATURE_<FEAT>.  */
    417 #define AARCH64_FEATURE(FEAT) \
    418   { { AARCH64_FEATBIT (0, FEAT), AARCH64_FEATBIT (1, FEAT) } }
    419 
    420 /* An aarch64_feature_set initializer for a specific architecture version,
    421    including all the features that are enabled by default for that architecture
    422    version.  */
    423 #define AARCH64_ARCH_FEATURES(ARCH) \
    424   { { AARCH64_ARCH_##ARCH (0), AARCH64_ARCH_##ARCH (1) } }
    425 
    426 /* Used by AARCH64_CPU_FEATURES.  */
    427 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \
    428   (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X))
    429 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \
    430   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2))
    431 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \
    432   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__))
    433 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \
    434   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__))
    435 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \
    436   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__))
    437 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \
    438   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__))
    439 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \
    440   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__))
    441 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \
    442   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__))
    443 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \
    444   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__))
    445 
    446 /* An aarch64_feature_set initializer for a CPU that implements architecture
    447    version ARCH, and additionally provides the N features listed in "...".  */
    448 #define AARCH64_CPU_FEATURES(ARCH, N, ...)			\
    449   { { AARCH64_OR_FEATURES_##N (0, ARCH, __VA_ARGS__),		\
    450       AARCH64_OR_FEATURES_##N (1, ARCH, __VA_ARGS__) } }
    451 
    452 /* An aarch64_feature_set initializer for the N features listed in "...".  */
    453 #define AARCH64_FEATURES(N, ...) \
    454   AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__)
    455 
    456 enum aarch64_operand_class
    457 {
    458   AARCH64_OPND_CLASS_NIL,
    459   AARCH64_OPND_CLASS_INT_REG,
    460   AARCH64_OPND_CLASS_MODIFIED_REG,
    461   AARCH64_OPND_CLASS_FP_REG,
    462   AARCH64_OPND_CLASS_SIMD_REG,
    463   AARCH64_OPND_CLASS_SIMD_ELEMENT,
    464   AARCH64_OPND_CLASS_SISD_REG,
    465   AARCH64_OPND_CLASS_SIMD_REGLIST,
    466   AARCH64_OPND_CLASS_SVE_REG,
    467   AARCH64_OPND_CLASS_SVE_REGLIST,
    468   AARCH64_OPND_CLASS_PRED_REG,
    469   AARCH64_OPND_CLASS_ZA_ACCESS,
    470   AARCH64_OPND_CLASS_ADDRESS,
    471   AARCH64_OPND_CLASS_IMMEDIATE,
    472   AARCH64_OPND_CLASS_SYSTEM,
    473   AARCH64_OPND_CLASS_COND,
    474 };
    475 
    476 /* Operand code that helps both parsing and coding.
    477    Keep AARCH64_OPERANDS synced.  */
    478 
    479 enum aarch64_opnd
    480 {
    481   AARCH64_OPND_NIL,	/* no operand---MUST BE FIRST!*/
    482 
    483   AARCH64_OPND_Rd,	/* Integer register as destination.  */
    484   AARCH64_OPND_Rn,	/* Integer register as source.  */
    485   AARCH64_OPND_Rm,	/* Integer register as source.  */
    486   AARCH64_OPND_Rt,	/* Integer register used in ld/st instructions.  */
    487   AARCH64_OPND_Rt2,	/* Integer register used in ld/st pair instructions.  */
    488   AARCH64_OPND_X16,	/* Integer register x16 in chkfeat instruction.  */
    489   AARCH64_OPND_Rt_LS64,	/* Integer register used in LS64 instructions.  */
    490   AARCH64_OPND_Rt_SP,	/* Integer Rt or SP used in STG instructions.  */
    491   AARCH64_OPND_Rs,	/* Integer register used in ld/st exclusive.  */
    492   AARCH64_OPND_Ra,	/* Integer register used in ddp_3src instructions.  */
    493   AARCH64_OPND_Rt_SYS,	/* Integer register used in system instructions.  */
    494 
    495   AARCH64_OPND_Rd_SP,	/* Integer Rd or SP.  */
    496   AARCH64_OPND_Rn_SP,	/* Integer Rn or SP.  */
    497   AARCH64_OPND_Rm_SP,	/* Integer Rm or SP.  */
    498   AARCH64_OPND_PAIRREG,	/* Paired register operand.  */
    499   AARCH64_OPND_PAIRREG_OR_XZR,	/* Paired register operand, optionally xzr.  */
    500   AARCH64_OPND_Rm_EXT,	/* Integer Rm extended.  */
    501   AARCH64_OPND_Rm_SFT,	/* Integer Rm shifted.  */
    502   AARCH64_OPND_Rm_LSL,	/* Integer Rm shifted (LSL-only).  */
    503 
    504   AARCH64_OPND_Fd,	/* Floating-point Fd.  */
    505   AARCH64_OPND_Fn,	/* Floating-point Fn.  */
    506   AARCH64_OPND_Fm,	/* Floating-point Fm.  */
    507   AARCH64_OPND_Fa,	/* Floating-point Fa.  */
    508   AARCH64_OPND_Ft,	/* Floating-point Ft.  */
    509   AARCH64_OPND_Ft2,	/* Floating-point Ft2.  */
    510 
    511   AARCH64_OPND_Sd,	/* AdvSIMD Scalar Sd.  */
    512   AARCH64_OPND_Sn,	/* AdvSIMD Scalar Sn.  */
    513   AARCH64_OPND_Sm,	/* AdvSIMD Scalar Sm.  */
    514 
    515   AARCH64_OPND_Va,	/* AdvSIMD Vector Va.  */
    516   AARCH64_OPND_Vd,	/* AdvSIMD Vector Vd.  */
    517   AARCH64_OPND_Vn,	/* AdvSIMD Vector Vn.  */
    518   AARCH64_OPND_Vm,	/* AdvSIMD Vector Vm.  */
    519   AARCH64_OPND_VdD1,	/* AdvSIMD <Vd>.D[1]; for FMOV only.  */
    520   AARCH64_OPND_VnD1,	/* AdvSIMD <Vn>.D[1]; for FMOV only.  */
    521   AARCH64_OPND_Ed,	/* AdvSIMD Vector Element Vd.  */
    522   AARCH64_OPND_En,	/* AdvSIMD Vector Element Vn.  */
    523   AARCH64_OPND_Em,	/* AdvSIMD Vector Element Vm.  */
    524   AARCH64_OPND_Em16,	/* AdvSIMD Vector Element Vm restricted to V0 - V15 when
    525 			   qualifier is S_H.  */
    526   AARCH64_OPND_LVn,	/* AdvSIMD Vector register list used in e.g. TBL.  */
    527   AARCH64_OPND_LVt,	/* AdvSIMD Vector register list used in ld/st.  */
    528   AARCH64_OPND_LVt_AL,	/* AdvSIMD Vector register list for loading single
    529 			   structure to all lanes.  */
    530   AARCH64_OPND_LEt,	/* AdvSIMD Vector Element list.  */
    531 
    532   AARCH64_OPND_CRn,	/* Co-processor register in CRn field.  */
    533   AARCH64_OPND_CRm,	/* Co-processor register in CRm field.  */
    534 
    535   AARCH64_OPND_IDX,	/* AdvSIMD EXT index operand.  */
    536   AARCH64_OPND_MASK,	/* AdvSIMD EXT index operand.  */
    537   AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left.  */
    538   AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right.  */
    539   AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift.  */
    540   AARCH64_OPND_SIMD_IMM_SFT,	/* AdvSIMD modified immediate with shift.  */
    541   AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate.  */
    542   AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
    543 			   (no encoding).  */
    544   AARCH64_OPND_IMM0,	/* Immediate for #0.  */
    545   AARCH64_OPND_FPIMM0,	/* Immediate for #0.0.  */
    546   AARCH64_OPND_FPIMM,	/* Floating-point Immediate.  */
    547   AARCH64_OPND_IMMR,	/* Immediate #<immr> in e.g. BFM.  */
    548   AARCH64_OPND_IMMS,	/* Immediate #<imms> in e.g. BFM.  */
    549   AARCH64_OPND_WIDTH,	/* Immediate #<width> in e.g. BFI.  */
    550   AARCH64_OPND_IMM,	/* Immediate.  */
    551   AARCH64_OPND_IMM_2,	/* Immediate.  */
    552   AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field.  */
    553   AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field.  */
    554   AARCH64_OPND_UIMM4,	/* Unsigned 4-bit immediate in the CRm field.  */
    555   AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg.  */
    556   AARCH64_OPND_UIMM7,	/* Unsigned 7-bit immediate in the CRm:op2 fields.  */
    557   AARCH64_OPND_UIMM10,	/* Unsigned 10-bit immediate in addg/subg.  */
    558   AARCH64_OPND_BIT_NUM,	/* Immediate.  */
    559   AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions.  */
    560   AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
    561   AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions.  */
    562   AARCH64_OPND_SIMM5,	/* 5-bit signed immediate in the imm5 field.  */
    563   AARCH64_OPND_NZCV,	/* Flag bit specifier giving an alternative value for
    564 			   each condition flag.  */
    565 
    566   AARCH64_OPND_LIMM,	/* Logical Immediate.  */
    567   AARCH64_OPND_AIMM,	/* Arithmetic immediate.  */
    568   AARCH64_OPND_HALF,	/* #<imm16>{, LSL #<shift>} operand in move wide.  */
    569   AARCH64_OPND_FBITS,	/* FP #<fbits> operand in e.g. SCVTF */
    570   AARCH64_OPND_IMM_MOV,	/* Immediate operand for the MOV alias.  */
    571   AARCH64_OPND_IMM_ROT1,	/* Immediate rotate operand for FCMLA.  */
    572   AARCH64_OPND_IMM_ROT2,	/* Immediate rotate operand for indexed FCMLA.  */
    573   AARCH64_OPND_IMM_ROT3,	/* Immediate rotate operand for FCADD.  */
    574 
    575   AARCH64_OPND_COND,	/* Standard condition as the last operand.  */
    576   AARCH64_OPND_COND1,	/* Same as the above, but excluding AL and NV.  */
    577 
    578   AARCH64_OPND_ADDR_ADRP,	/* Memory address for ADRP */
    579   AARCH64_OPND_ADDR_PCREL14,	/* 14-bit PC-relative address for e.g. TBZ.  */
    580   AARCH64_OPND_ADDR_PCREL19,	/* 19-bit PC-relative address for e.g. LDR.  */
    581   AARCH64_OPND_ADDR_PCREL21,	/* 21-bit PC-relative address for e.g. ADR.  */
    582   AARCH64_OPND_ADDR_PCREL26,	/* 26-bit PC-relative address for e.g. BL.  */
    583 
    584   AARCH64_OPND_ADDR_SIMPLE,	/* Address of ld/st exclusive.  */
    585   AARCH64_OPND_ADDR_REGOFF,	/* Address of register offset.  */
    586   AARCH64_OPND_ADDR_SIMM7,	/* Address of signed 7-bit immediate.  */
    587   AARCH64_OPND_ADDR_SIMM9,	/* Address of signed 9-bit immediate.  */
    588   AARCH64_OPND_ADDR_SIMM9_2,	/* Same as the above, but the immediate is
    589 				   negative or unaligned and there is
    590 				   no writeback allowed.  This operand code
    591 				   is only used to support the programmer-
    592 				   friendly feature of using LDR/STR as the
    593 				   the mnemonic name for LDUR/STUR instructions
    594 				   wherever there is no ambiguity.  */
    595   AARCH64_OPND_ADDR_SIMM10,	/* Address of signed 10-bit immediate.  */
    596   AARCH64_OPND_ADDR_SIMM11,	/* Address with a signed 11-bit (multiple of
    597 				   16) immediate.  */
    598   AARCH64_OPND_ADDR_UIMM12,	/* Address of unsigned 12-bit immediate.  */
    599   AARCH64_OPND_ADDR_SIMM13,	/* Address with a signed 13-bit (multiple of
    600 				   16) immediate.  */
    601   AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures.  */
    602   AARCH64_OPND_ADDR_OFFSET,     /* Address with an optional 9-bit immediate.  */
    603   AARCH64_OPND_SIMD_ADDR_POST,	/* Address of ld/st multiple post-indexed.  */
    604 
    605   AARCH64_OPND_SYSREG,		/* System register operand.  */
    606   AARCH64_OPND_SYSREG128,	/* 128-bit system register operand.  */
    607   AARCH64_OPND_PSTATEFIELD,	/* PSTATE field name operand.  */
    608   AARCH64_OPND_SYSREG_AT,	/* System register <at_op> operand.  */
    609   AARCH64_OPND_SYSREG_DC,	/* System register <dc_op> operand.  */
    610   AARCH64_OPND_SYSREG_IC,	/* System register <ic_op> operand.  */
    611   AARCH64_OPND_SYSREG_TLBI,	/* System register <tlbi_op> operand.  */
    612   AARCH64_OPND_SYSREG_TLBIP,	/* System register <tlbip_op> operand.  */
    613   AARCH64_OPND_SYSREG_SR,	/* System register RCTX operand.  */
    614   AARCH64_OPND_BARRIER,		/* Barrier operand.  */
    615   AARCH64_OPND_BARRIER_DSB_NXS,	/* Barrier operand for DSB nXS variant.  */
    616   AARCH64_OPND_BARRIER_ISB,	/* Barrier operand for ISB.  */
    617   AARCH64_OPND_PRFOP,		/* Prefetch operation.  */
    618   AARCH64_OPND_RPRFMOP,		/* Range prefetch operation.  */
    619   AARCH64_OPND_BARRIER_PSB,	/* Barrier operand for PSB.  */
    620   AARCH64_OPND_BARRIER_GCSB,	/* Barrier operand for GCSB.  */
    621   AARCH64_OPND_BTI_TARGET,	/* BTI {<target>}.  */
    622   AARCH64_OPND_LSE128_Rt,	/* LSE128 <Xt1>.  */
    623   AARCH64_OPND_LSE128_Rt2,	/* LSE128 <Xt2>.  */
    624   AARCH64_OPND_SVE_ADDR_RI_S4x16,   /* SVE [<Xn|SP>, #<simm4>*16].  */
    625   AARCH64_OPND_SVE_ADDR_RI_S4x32,   /* SVE [<Xn|SP>, #<simm4>*32].  */
    626   AARCH64_OPND_SVE_ADDR_RI_S4xVL,   /* SVE [<Xn|SP>, #<simm4>, MUL VL].  */
    627   AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL].  */
    628   AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL].  */
    629   AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL].  */
    630   AARCH64_OPND_SVE_ADDR_RI_S6xVL,   /* SVE [<Xn|SP>, #<simm6>, MUL VL].  */
    631   AARCH64_OPND_SVE_ADDR_RI_S9xVL,   /* SVE [<Xn|SP>, #<simm9>, MUL VL].  */
    632   AARCH64_OPND_SVE_ADDR_RI_U6,	    /* SVE [<Xn|SP>, #<uimm6>].  */
    633   AARCH64_OPND_SVE_ADDR_RI_U6x2,    /* SVE [<Xn|SP>, #<uimm6>*2].  */
    634   AARCH64_OPND_SVE_ADDR_RI_U6x4,    /* SVE [<Xn|SP>, #<uimm6>*4].  */
    635   AARCH64_OPND_SVE_ADDR_RI_U6x8,    /* SVE [<Xn|SP>, #<uimm6>*8].  */
    636   AARCH64_OPND_SVE_ADDR_R,	    /* SVE [<Xn|SP>].  */
    637   AARCH64_OPND_SVE_ADDR_RR,	    /* SVE [<Xn|SP>, <Xm|XZR>].  */
    638   AARCH64_OPND_SVE_ADDR_RR_LSL1,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1].  */
    639   AARCH64_OPND_SVE_ADDR_RR_LSL2,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2].  */
    640   AARCH64_OPND_SVE_ADDR_RR_LSL3,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3].  */
    641   AARCH64_OPND_SVE_ADDR_RR_LSL4,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4].  */
    642   AARCH64_OPND_SVE_ADDR_RX,	    /* SVE [<Xn|SP>, <Xm>].  */
    643   AARCH64_OPND_SVE_ADDR_RX_LSL1,    /* SVE [<Xn|SP>, <Xm>, LSL #1].  */
    644   AARCH64_OPND_SVE_ADDR_RX_LSL2,    /* SVE [<Xn|SP>, <Xm>, LSL #2].  */
    645   AARCH64_OPND_SVE_ADDR_RX_LSL3,    /* SVE [<Xn|SP>, <Xm>, LSL #3].  */
    646   AARCH64_OPND_SVE_ADDR_ZX,	    /* SVE [Zn.<T>{, <Xm>}].  */
    647   AARCH64_OPND_SVE_ADDR_RZ,	    /* SVE [<Xn|SP>, Zm.D].  */
    648   AARCH64_OPND_SVE_ADDR_RZ_LSL1,    /* SVE [<Xn|SP>, Zm.D, LSL #1].  */
    649   AARCH64_OPND_SVE_ADDR_RZ_LSL2,    /* SVE [<Xn|SP>, Zm.D, LSL #2].  */
    650   AARCH64_OPND_SVE_ADDR_RZ_LSL3,    /* SVE [<Xn|SP>, Zm.D, LSL #3].  */
    651   AARCH64_OPND_SVE_ADDR_RZ_XTW_14,  /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
    652 				       Bit 14 controls S/U choice.  */
    653   AARCH64_OPND_SVE_ADDR_RZ_XTW_22,  /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
    654 				       Bit 22 controls S/U choice.  */
    655   AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
    656 				       Bit 14 controls S/U choice.  */
    657   AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
    658 				       Bit 22 controls S/U choice.  */
    659   AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
    660 				       Bit 14 controls S/U choice.  */
    661   AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
    662 				       Bit 22 controls S/U choice.  */
    663   AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
    664 				       Bit 14 controls S/U choice.  */
    665   AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
    666 				       Bit 22 controls S/U choice.  */
    667   AARCH64_OPND_SVE_ADDR_ZI_U5,	    /* SVE [Zn.<T>, #<uimm5>].  */
    668   AARCH64_OPND_SVE_ADDR_ZI_U5x2,    /* SVE [Zn.<T>, #<uimm5>*2].  */
    669   AARCH64_OPND_SVE_ADDR_ZI_U5x4,    /* SVE [Zn.<T>, #<uimm5>*4].  */
    670   AARCH64_OPND_SVE_ADDR_ZI_U5x8,    /* SVE [Zn.<T>, #<uimm5>*8].  */
    671   AARCH64_OPND_SVE_ADDR_ZZ_LSL,     /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>].  */
    672   AARCH64_OPND_SVE_ADDR_ZZ_SXTW,    /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>].  */
    673   AARCH64_OPND_SVE_ADDR_ZZ_UXTW,    /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>].  */
    674   AARCH64_OPND_SVE_AIMM,	/* SVE unsigned arithmetic immediate.  */
    675   AARCH64_OPND_SVE_ASIMM,	/* SVE signed arithmetic immediate.  */
    676   AARCH64_OPND_SVE_FPIMM8,	/* SVE 8-bit floating-point immediate.  */
    677   AARCH64_OPND_SVE_I1_HALF_ONE,	/* SVE choice between 0.5 and 1.0.  */
    678   AARCH64_OPND_SVE_I1_HALF_TWO,	/* SVE choice between 0.5 and 2.0.  */
    679   AARCH64_OPND_SVE_I1_ZERO_ONE,	/* SVE choice between 0.0 and 1.0.  */
    680   AARCH64_OPND_SVE_IMM_ROT1,	/* SVE 1-bit rotate operand (90 or 270).  */
    681   AARCH64_OPND_SVE_IMM_ROT2,	/* SVE 2-bit rotate operand (N*90).  */
    682   AARCH64_OPND_SVE_IMM_ROT3,	/* SVE cadd 1-bit rotate (90 or 270).  */
    683   AARCH64_OPND_SVE_INV_LIMM,	/* SVE inverted logical immediate.  */
    684   AARCH64_OPND_SVE_LIMM,	/* SVE logical immediate.  */
    685   AARCH64_OPND_SVE_LIMM_MOV,	/* SVE logical immediate for MOV.  */
    686   AARCH64_OPND_SVE_PATTERN,	/* SVE vector pattern enumeration.  */
    687   AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor.  */
    688   AARCH64_OPND_SVE_PRFOP,	/* SVE prefetch operation.  */
    689   AARCH64_OPND_SVE_Pd,		/* SVE p0-p15 in Pd.  */
    690   AARCH64_OPND_SVE_PNd,		/* SVE pn0-pn15 in Pd.  */
    691   AARCH64_OPND_SVE_Pg3,		/* SVE p0-p7 in Pg.  */
    692   AARCH64_OPND_SVE_Pg4_5,	/* SVE p0-p15 in Pg, bits [8,5].  */
    693   AARCH64_OPND_SVE_Pg4_10,	/* SVE p0-p15 in Pg, bits [13,10].  */
    694   AARCH64_OPND_SVE_PNg4_10,	/* SVE pn0-pn15 in Pg, bits [13,10].  */
    695   AARCH64_OPND_SVE_Pg4_16,	/* SVE p0-p15 in Pg, bits [19,16].  */
    696   AARCH64_OPND_SVE_Pm,		/* SVE p0-p15 in Pm.  */
    697   AARCH64_OPND_SVE_Pn,		/* SVE p0-p15 in Pn.  */
    698   AARCH64_OPND_SVE_PNn,		/* SVE pn0-pn15 in Pn.  */
    699   AARCH64_OPND_SVE_Pt,		/* SVE p0-p15 in Pt.  */
    700   AARCH64_OPND_SVE_PNt,		/* SVE pn0-pn15 in Pt.  */
    701   AARCH64_OPND_SVE_Rm,		/* Integer Rm or ZR, alt. SVE position.  */
    702   AARCH64_OPND_SVE_Rn_SP,	/* Integer Rn or SP, alt. SVE position.  */
    703   AARCH64_OPND_SVE_SHLIMM_PRED,	  /* SVE shift left amount (predicated).  */
    704   AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated).  */
    705   AARCH64_OPND_SVE_SHLIMM_UNPRED_22,	/* SVE 3 bit shift left unpred.  */
    706   AARCH64_OPND_SVE_SHRIMM_PRED,	  /* SVE shift right amount (predicated).  */
    707   AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated).  */
    708   AARCH64_OPND_SVE_SHRIMM_UNPRED_22,	/* SVE 3 bit shift right unpred.  */
    709   AARCH64_OPND_SVE_SIMM5,	/* SVE signed 5-bit immediate.  */
    710   AARCH64_OPND_SVE_SIMM5B,	/* SVE secondary signed 5-bit immediate.  */
    711   AARCH64_OPND_SVE_SIMM6,	/* SVE signed 6-bit immediate.  */
    712   AARCH64_OPND_SVE_SIMM8,	/* SVE signed 8-bit immediate.  */
    713   AARCH64_OPND_SVE_UIMM3,	/* SVE unsigned 3-bit immediate.  */
    714   AARCH64_OPND_SVE_UIMM7,	/* SVE unsigned 7-bit immediate.  */
    715   AARCH64_OPND_SVE_UIMM8,	/* SVE unsigned 8-bit immediate.  */
    716   AARCH64_OPND_SVE_UIMM8_53,	/* SVE split unsigned 8-bit immediate.  */
    717   AARCH64_OPND_SVE_VZn,		/* Scalar SIMD&FP register in Zn field.  */
    718   AARCH64_OPND_SVE_Vd,		/* Scalar SIMD&FP register in Vd.  */
    719   AARCH64_OPND_SVE_Vm,		/* Scalar SIMD&FP register in Vm.  */
    720   AARCH64_OPND_SVE_Vn,		/* Scalar SIMD&FP register in Vn.  */
    721   AARCH64_OPND_SME_ZA_array_vrsb_1, /* Tile to vector, two registers (B).  */
    722   AARCH64_OPND_SME_ZA_array_vrsh_1, /* Tile to vector, two registers (H).  */
    723   AARCH64_OPND_SME_ZA_array_vrss_1, /* Tile to vector, two registers (S).  */
    724   AARCH64_OPND_SME_ZA_array_vrsd_1, /* Tile to vector, two registers (D).  */
    725   AARCH64_OPND_SME_ZA_array_vrsb_2, /* Tile to vector, four registers (B).  */
    726   AARCH64_OPND_SME_ZA_array_vrsh_2, /* Tile to vector, four registers (H).  */
    727   AARCH64_OPND_SME_ZA_array_vrss_2, /* Tile to vector, four registers (S). */
    728   AARCH64_OPND_SME_ZA_array_vrsd_2, /* Tile to vector, four registers (D).  */
    729   AARCH64_OPND_SVE_Za_5,	/* SVE vector register in Za, bits [9,5].  */
    730   AARCH64_OPND_SVE_Za_16,	/* SVE vector register in Za, bits [20,16].  */
    731   AARCH64_OPND_SVE_Zd,		/* SVE vector register in Zd.  */
    732   AARCH64_OPND_SVE_Zm_5,	/* SVE vector register in Zm, bits [9,5].  */
    733   AARCH64_OPND_SVE_Zm_16,	/* SVE vector register in Zm, bits [20,16].  */
    734   AARCH64_OPND_SVE_Zm3_INDEX,	/* z0-z7[0-3] in Zm, bits [20,16].  */
    735   AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11.  */
    736   AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19.  */
    737   AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22.  */
    738   AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11.  */
    739   AARCH64_OPND_SVE_Zm_imm4,     /* SVE vector register with 4bit index.  */
    740   AARCH64_OPND_SVE_Zm4_INDEX,	/* z0-z15[0-1] in Zm, bits [20,16].  */
    741   AARCH64_OPND_SVE_Zn,		/* SVE vector register in Zn.  */
    742   AARCH64_OPND_SVE_Zn_5_INDEX,	/* Indexed SVE vector register, for DUPQ.  */
    743   AARCH64_OPND_SVE_Zn_INDEX,	/* Indexed SVE vector register, for DUP.  */
    744   AARCH64_OPND_SVE_ZnxN,	/* SVE vector register list in Zn.  */
    745   AARCH64_OPND_SVE_Zt,		/* SVE vector register in Zt.  */
    746   AARCH64_OPND_SVE_ZtxN,	/* SVE vector register list in Zt.  */
    747   AARCH64_OPND_SME_Zdnx2,	/* SVE vector register list from [4:1]*2.  */
    748   AARCH64_OPND_SME_Zdnx4,	/* SVE vector register list from [4:2]*4.  */
    749   AARCH64_OPND_SME_Zm,		/* SVE vector register list in 4-bit Zm.  */
    750   AARCH64_OPND_SME_Zmx2,	/* SVE vector register list from [20:17]*2.  */
    751   AARCH64_OPND_SME_Zmx4,	/* SVE vector register list from [20:18]*4.  */
    752   AARCH64_OPND_SME_Znx2,	/* SVE vector register list from [9:6]*2.  */
    753   AARCH64_OPND_SME_Znx4,	/* SVE vector register list from [9:7]*4.  */
    754   AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23.  */
    755   AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19.  */
    756   AARCH64_OPND_SME_ZAda_2b,	/* SME <ZAda>.S, 2-bits.  */
    757   AARCH64_OPND_SME_ZAda_3b,	/* SME <ZAda>.D, 3-bits.  */
    758   AARCH64_OPND_SME_ZA_HV_idx_src,	/* SME source ZA tile vector.  */
    759   AARCH64_OPND_SME_ZA_HV_idx_srcxN,	/* SME N source ZA tile vectors.  */
    760   AARCH64_OPND_SME_ZA_HV_idx_dest,	/* SME destination ZA tile vector.  */
    761   AARCH64_OPND_SME_ZA_HV_idx_destxN,	/* SME N dest ZA tile vectors.  */
    762   AARCH64_OPND_SME_Pdx2,	/* Predicate register list in [3:1].  */
    763   AARCH64_OPND_SME_PdxN,	/* Predicate register list in [3:0].  */
    764   AARCH64_OPND_SME_Pm,		/* SME scalable predicate register, bits [15:13].  */
    765   AARCH64_OPND_SME_PNd3,	/* Predicate-as-counter register, bits [3:0].  */
    766   AARCH64_OPND_SME_PNg3,	/* Predicate-as-counter register, bits [12:10].  */
    767   AARCH64_OPND_SME_PNn,		/* Predicate-as-counter register, bits [8:5].  */
    768   AARCH64_OPND_SME_PNn3_INDEX1,	/* Indexed pred-as-counter reg, bits [8:5].  */
    769   AARCH64_OPND_SME_PNn3_INDEX2,	/* Indexed pred-as-counter reg, bits [9:5].  */
    770   AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles.  */
    771   AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector.  */
    772   AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3].  */
    773   AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1].  */
    774   AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3].  */
    775   AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}].  */
    776   AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}].  */
    777   AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1].  */
    778   AARCH64_OPND_SME_ZA_array_off4,   /* SME ZA[<Wv>{, #<imm>}].  */
    779   AARCH64_OPND_SME_ADDR_RI_U4xVL,   /* SME [<Xn|SP>{, #<imm>, MUL VL}].  */
    780   AARCH64_OPND_SME_SM_ZA,           /* SME {SM | ZA}.  */
    781   AARCH64_OPND_SME_PnT_Wm_imm,      /* SME <Pn>.<T>[<Wm>, #<imm>].  */
    782   AARCH64_OPND_SME_SHRIMM4,	    /* 4-bit right shift, bits [19:16].  */
    783   AARCH64_OPND_SME_SHRIMM5,	    /* size + 5-bit right shift, bits [23:22,20:16].  */
    784   AARCH64_OPND_SME_Zm_INDEX1,	    /* Zn.T[index], bits [19:16,10].  */
    785   AARCH64_OPND_SME_Zm_INDEX2,	    /* Zn.T[index], bits [19:16,11:10].  */
    786   AARCH64_OPND_SME_Zm_INDEX3_1,     /* Zn.T[index], bits [19:16,10,2:1].  */
    787   AARCH64_OPND_SME_Zm_INDEX3_2,     /* Zn.T[index], bits [19:16,11:10,2].  */
    788   AARCH64_OPND_SME_Zm_INDEX3_10,    /* Zn.T[index], bits [19:16,15,11:10].  */
    789   AARCH64_OPND_SME_Zm_INDEX4_1,     /* Zn.T[index], bits [19:16,11:10,2:1].  */
    790   AARCH64_OPND_SME_Zm_INDEX4_10,    /* Zn.T[index], bits [19:16,15,12:10].  */
    791   AARCH64_OPND_SME_Zn_INDEX1_16,    /* Zn[index], bits [9:5] and [16:16].  */
    792   AARCH64_OPND_SME_Zn_INDEX2_15,    /* Zn[index], bits [9:5] and [16:15].  */
    793   AARCH64_OPND_SME_Zn_INDEX2_16,    /* Zn[index], bits [9:5] and [17:16].  */
    794   AARCH64_OPND_SME_Zn_INDEX3_14,    /* Zn[index], bits [9:5] and [16:14].  */
    795   AARCH64_OPND_SME_Zn_INDEX3_15,    /* Zn[index], bits [9:5] and [17:15].  */
    796   AARCH64_OPND_SME_Zn_INDEX4_14,    /* Zn[index], bits [9:5] and [17:14].  */
    797   AARCH64_OPND_SME_VLxN_10,	/* VLx2 or VLx4, in bit 10.  */
    798   AARCH64_OPND_SME_VLxN_13,	/* VLx2 or VLx4, in bit 13.  */
    799   AARCH64_OPND_SME_ZT0,		/* The fixed token zt0/ZT0 (not encoded).  */
    800   AARCH64_OPND_SME_ZT0_INDEX,	/* ZT0[<imm>], bits [14:12].  */
    801   AARCH64_OPND_SME_ZT0_LIST,	/* { zt0/ZT0 } (not encoded).  */
    802   AARCH64_OPND_TME_UIMM16,	/* TME unsigned 16-bit immediate.  */
    803   AARCH64_OPND_SM3_IMM2,	/* SM3 encodes lane in bits [13, 14].  */
    804   AARCH64_OPND_MOPS_ADDR_Rd,	/* [Rd]!, in bits [0, 4].  */
    805   AARCH64_OPND_MOPS_ADDR_Rs,	/* [Rs]!, in bits [16, 20].  */
    806   AARCH64_OPND_MOPS_WB_Rn,	/* Rn!, in bits [5, 9].  */
    807   AARCH64_OPND_CSSC_SIMM8,	/* CSSC signed 8-bit immediate.  */
    808   AARCH64_OPND_CSSC_UIMM8,	/* CSSC unsigned 8-bit immediate.  */
    809   AARCH64_OPND_SME_Zt2,		/* Qobule SVE vector register list.  */
    810   AARCH64_OPND_SME_Zt3,		/* Trible SVE vector register list.  */
    811   AARCH64_OPND_SME_Zt4,		/* Quad SVE vector register list.  */
    812   AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND,   /* [<Xn|SP>]{, #<imm>}.  */
    813   AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB, /* [<Xn|SP>] or [<Xn|SP>, #<imm>]!.  */
    814   AARCH64_OPND_RCPC3_ADDR_POSTIND,	 /* [<Xn|SP>], #<imm>.  */
    815   AARCH64_OPND_RCPC3_ADDR_PREIND_WB, 	 /* [<Xn|SP>, #<imm>]!.  */
    816   AARCH64_OPND_RCPC3_ADDR_OFFSET
    817 };
    818 
    819 /* Qualifier constrains an operand.  It either specifies a variant of an
    820    operand type or limits values available to an operand type.
    821 
    822    N.B. Order is important; keep aarch64_opnd_qualifiers synced.  */
    823 
    824 enum aarch64_opnd_qualifier
    825 {
    826   /* Indicating no further qualification on an operand.  */
    827   AARCH64_OPND_QLF_NIL,
    828 
    829   /* Qualifying an operand which is a general purpose (integer) register;
    830      indicating the operand data size or a specific register.  */
    831   AARCH64_OPND_QLF_W,	/* Wn, WZR or WSP.  */
    832   AARCH64_OPND_QLF_X,	/* Xn, XZR or XSP.  */
    833   AARCH64_OPND_QLF_WSP,	/* WSP.  */
    834   AARCH64_OPND_QLF_SP,	/* SP.  */
    835 
    836   /* Qualifying an operand which is a floating-point register, a SIMD
    837      vector element or a SIMD vector element list; indicating operand data
    838      size or the size of each SIMD vector element in the case of a SIMD
    839      vector element list.
    840      These qualifiers are also used to qualify an address operand to
    841      indicate the size of data element a load/store instruction is
    842      accessing.
    843      They are also used for the immediate shift operand in e.g. SSHR.  Such
    844      a use is only for the ease of operand encoding/decoding and qualifier
    845      sequence matching; such a use should not be applied widely; use the value
    846      constraint qualifiers for immediate operands wherever possible.  */
    847   AARCH64_OPND_QLF_S_B,
    848   AARCH64_OPND_QLF_S_H,
    849   AARCH64_OPND_QLF_S_S,
    850   AARCH64_OPND_QLF_S_D,
    851   AARCH64_OPND_QLF_S_Q,
    852   /* These type qualifiers have a special meaning in that they mean 4 x 1 byte
    853      or 2 x 2 byte are selected by the instruction.  Other than that they have
    854      no difference with AARCH64_OPND_QLF_S_B in encoding.  They are here purely
    855      for syntactical reasons and is an exception from normal AArch64
    856      disassembly scheme.  */
    857   AARCH64_OPND_QLF_S_4B,
    858   AARCH64_OPND_QLF_S_2H,
    859 
    860   /* Qualifying an operand which is a SIMD vector register or a SIMD vector
    861      register list; indicating register shape.
    862      They are also used for the immediate shift operand in e.g. SSHR.  Such
    863      a use is only for the ease of operand encoding/decoding and qualifier
    864      sequence matching; such a use should not be applied widely; use the value
    865      constraint qualifiers for immediate operands wherever possible.  */
    866   AARCH64_OPND_QLF_V_4B,
    867   AARCH64_OPND_QLF_V_8B,
    868   AARCH64_OPND_QLF_V_16B,
    869   AARCH64_OPND_QLF_V_2H,
    870   AARCH64_OPND_QLF_V_4H,
    871   AARCH64_OPND_QLF_V_8H,
    872   AARCH64_OPND_QLF_V_2S,
    873   AARCH64_OPND_QLF_V_4S,
    874   AARCH64_OPND_QLF_V_1D,
    875   AARCH64_OPND_QLF_V_2D,
    876   AARCH64_OPND_QLF_V_1Q,
    877 
    878   AARCH64_OPND_QLF_P_Z,
    879   AARCH64_OPND_QLF_P_M,
    880 
    881   /* Used in scaled signed immediate that are scaled by a Tag granule
    882      like in stg, st2g, etc.   */
    883   AARCH64_OPND_QLF_imm_tag,
    884 
    885   /* Constraint on value.  */
    886   AARCH64_OPND_QLF_CR,		/* CRn, CRm. */
    887   AARCH64_OPND_QLF_imm_0_7,
    888   AARCH64_OPND_QLF_imm_0_15,
    889   AARCH64_OPND_QLF_imm_0_31,
    890   AARCH64_OPND_QLF_imm_0_63,
    891   AARCH64_OPND_QLF_imm_1_32,
    892   AARCH64_OPND_QLF_imm_1_64,
    893 
    894   /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
    895      or shift-ones.  */
    896   AARCH64_OPND_QLF_LSL,
    897   AARCH64_OPND_QLF_MSL,
    898 
    899   /* Special qualifier helping retrieve qualifier information during the
    900      decoding time (currently not in use).  */
    901   AARCH64_OPND_QLF_RETRIEVE,
    902 
    903   /* Special qualifier used for indicating error in qualifier retrieval.  */
    904   AARCH64_OPND_QLF_ERR,
    905 };
    906 
    907 /* Instruction class.  */
    909 
    910 enum aarch64_insn_class
    911 {
    912   aarch64_misc,
    913   addsub_carry,
    914   addsub_ext,
    915   addsub_imm,
    916   addsub_shift,
    917   asimdall,
    918   asimddiff,
    919   asimdelem,
    920   asimdext,
    921   asimdimm,
    922   asimdins,
    923   asimdmisc,
    924   asimdperm,
    925   asimdsame,
    926   asimdshf,
    927   asimdtbl,
    928   asisddiff,
    929   asisdelem,
    930   asisdlse,
    931   asisdlsep,
    932   asisdlso,
    933   asisdlsop,
    934   asisdmisc,
    935   asisdone,
    936   asisdpair,
    937   asisdsame,
    938   asisdshf,
    939   bitfield,
    940   branch_imm,
    941   branch_reg,
    942   compbranch,
    943   condbranch,
    944   condcmp_imm,
    945   condcmp_reg,
    946   condsel,
    947   cryptoaes,
    948   cryptosha2,
    949   cryptosha3,
    950   dp_1src,
    951   dp_2src,
    952   dp_3src,
    953   exception,
    954   extract,
    955   float2fix,
    956   float2int,
    957   floatccmp,
    958   floatcmp,
    959   floatdp1,
    960   floatdp2,
    961   floatdp3,
    962   floatimm,
    963   floatsel,
    964   ldst_immpost,
    965   ldst_immpre,
    966   ldst_imm9,	/* immpost or immpre */
    967   ldst_imm10,	/* LDRAA/LDRAB */
    968   ldst_pos,
    969   ldst_regoff,
    970   ldst_unpriv,
    971   ldst_unscaled,
    972   ldstexcl,
    973   ldstnapair_offs,
    974   ldstpair_off,
    975   ldstpair_indexed,
    976   loadlit,
    977   log_imm,
    978   log_shift,
    979   lse_atomic,
    980   lse128_atomic,
    981   movewide,
    982   pcreladdr,
    983   ic_system,
    984   sme_fp_sd,
    985   sme_int_sd,
    986   sme_misc,
    987   sme_mov,
    988   sme_ldr,
    989   sme_psel,
    990   sme_shift,
    991   sme_size_12_bhs,
    992   sme_size_12_hs,
    993   sme_size_22,
    994   sme_size_22_hsd,
    995   sme_sz_23,
    996   sme_str,
    997   sme_start,
    998   sme_stop,
    999   sme2_mov,
   1000   sme2_movaz,
   1001   sve_cpy,
   1002   sve_index,
   1003   sve_limm,
   1004   sve_misc,
   1005   sve_movprfx,
   1006   sve_pred_zm,
   1007   sve_shift_pred,
   1008   sve_shift_unpred,
   1009   sve_size_bhs,
   1010   sve_size_bhsd,
   1011   sve_size_hsd,
   1012   sve_size_hsd2,
   1013   sve_size_sd,
   1014   sve_size_bh,
   1015   sve_size_sd2,
   1016   sve_size_13,
   1017   sve_shift_tsz_hsd,
   1018   sve_shift_tsz_bhsd,
   1019   sve_size_tsz_bhs,
   1020   testbranch,
   1021   cryptosm3,
   1022   cryptosm4,
   1023   dotproduct,
   1024   bfloat16,
   1025   cssc,
   1026   gcs,
   1027   the,
   1028   sve2_urqvs,
   1029   sve_index1,
   1030   rcpc3
   1031 };
   1032 
   1033 /* Opcode enumerators.  */
   1034 
   1035 enum aarch64_op
   1036 {
   1037   OP_NIL,
   1038   OP_STRB_POS,
   1039   OP_LDRB_POS,
   1040   OP_LDRSB_POS,
   1041   OP_STRH_POS,
   1042   OP_LDRH_POS,
   1043   OP_LDRSH_POS,
   1044   OP_STR_POS,
   1045   OP_LDR_POS,
   1046   OP_STRF_POS,
   1047   OP_LDRF_POS,
   1048   OP_LDRSW_POS,
   1049   OP_PRFM_POS,
   1050 
   1051   OP_STURB,
   1052   OP_LDURB,
   1053   OP_LDURSB,
   1054   OP_STURH,
   1055   OP_LDURH,
   1056   OP_LDURSH,
   1057   OP_STUR,
   1058   OP_LDUR,
   1059   OP_STURV,
   1060   OP_LDURV,
   1061   OP_LDURSW,
   1062   OP_PRFUM,
   1063 
   1064   OP_LDR_LIT,
   1065   OP_LDRV_LIT,
   1066   OP_LDRSW_LIT,
   1067   OP_PRFM_LIT,
   1068 
   1069   OP_ADD,
   1070   OP_B,
   1071   OP_BL,
   1072 
   1073   OP_MOVN,
   1074   OP_MOVZ,
   1075   OP_MOVK,
   1076 
   1077   OP_MOV_IMM_LOG,	/* MOV alias for moving bitmask immediate.  */
   1078   OP_MOV_IMM_WIDE,	/* MOV alias for moving wide immediate.  */
   1079   OP_MOV_IMM_WIDEN,	/* MOV alias for moving wide immediate (negated).  */
   1080 
   1081   OP_MOV_V,		/* MOV alias for moving vector register.  */
   1082 
   1083   OP_ASR_IMM,
   1084   OP_LSR_IMM,
   1085   OP_LSL_IMM,
   1086 
   1087   OP_BIC,
   1088 
   1089   OP_UBFX,
   1090   OP_BFXIL,
   1091   OP_SBFX,
   1092   OP_SBFIZ,
   1093   OP_BFI,
   1094   OP_BFC,		/* ARMv8.2.  */
   1095   OP_UBFIZ,
   1096   OP_UXTB,
   1097   OP_UXTH,
   1098   OP_UXTW,
   1099 
   1100   OP_CINC,
   1101   OP_CINV,
   1102   OP_CNEG,
   1103   OP_CSET,
   1104   OP_CSETM,
   1105 
   1106   OP_FCVT,
   1107   OP_FCVTN,
   1108   OP_FCVTN2,
   1109   OP_FCVTL,
   1110   OP_FCVTL2,
   1111   OP_FCVTXN_S,		/* Scalar version.  */
   1112 
   1113   OP_ROR_IMM,
   1114 
   1115   OP_SXTL,
   1116   OP_SXTL2,
   1117   OP_UXTL,
   1118   OP_UXTL2,
   1119 
   1120   OP_MOV_P_P,
   1121   OP_MOV_PN_PN,
   1122   OP_MOV_Z_P_Z,
   1123   OP_MOV_Z_V,
   1124   OP_MOV_Z_Z,
   1125   OP_MOV_Z_Zi,
   1126   OP_MOVM_P_P_P,
   1127   OP_MOVS_P_P,
   1128   OP_MOVZS_P_P_P,
   1129   OP_MOVZ_P_P_P,
   1130   OP_NOTS_P_P_P_Z,
   1131   OP_NOT_P_P_P_Z,
   1132 
   1133   OP_FCMLA_ELEM,	/* ARMv8.3, indexed element version.  */
   1134 
   1135   OP_TOTAL_NUM,		/* Pseudo.  */
   1136 };
   1137 
   1138 /* Error types.  */
   1139 enum err_type
   1140 {
   1141   ERR_OK,
   1142   ERR_UND,
   1143   ERR_UNP,
   1144   ERR_NYI,
   1145   ERR_VFI,
   1146   ERR_NR_ENTRIES
   1147 };
   1148 
   1149 /* Maximum number of operands an instruction can have.  */
   1150 #define AARCH64_MAX_OPND_NUM 7
   1151 /* Maximum number of qualifier sequences an instruction can have.  */
   1152 #define AARCH64_MAX_QLF_SEQ_NUM 10
   1153 /* Operand qualifier typedef; optimized for the size.  */
   1154 typedef unsigned char aarch64_opnd_qualifier_t;
   1155 /* Operand qualifier sequence typedef.  */
   1156 typedef aarch64_opnd_qualifier_t	\
   1157 	  aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
   1158 
   1159 /* FIXME: improve the efficiency.  */
   1160 static inline bool
   1161 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
   1162 {
   1163   int i;
   1164   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   1165     if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
   1166       return false;
   1167   return true;
   1168 }
   1169 
   1170 /*  Forward declare error reporting type.  */
   1171 typedef struct aarch64_operand_error aarch64_operand_error;
   1172 /* Forward declare instruction sequence type.  */
   1173 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
   1174 /* Forward declare instruction definition.  */
   1175 typedef struct aarch64_inst aarch64_inst;
   1176 
   1177 /* This structure holds information for a particular opcode.  */
   1178 
   1179 struct aarch64_opcode
   1180 {
   1181   /* The name of the mnemonic.  */
   1182   const char *name;
   1183 
   1184   /* The opcode itself.  Those bits which will be filled in with
   1185      operands are zeroes.  */
   1186   aarch64_insn opcode;
   1187 
   1188   /* The opcode mask.  This is used by the disassembler.  This is a
   1189      mask containing ones indicating those bits which must match the
   1190      opcode field, and zeroes indicating those bits which need not
   1191      match (and are presumably filled in by operands).  */
   1192   aarch64_insn mask;
   1193 
   1194   /* Instruction class.  */
   1195   enum aarch64_insn_class iclass;
   1196 
   1197   /* Enumerator identifier.  */
   1198   enum aarch64_op op;
   1199 
   1200   /* Which architecture variant provides this instruction.  */
   1201   const aarch64_feature_set *avariant;
   1202 
   1203   /* An array of operand codes.  Each code is an index into the
   1204      operand table.  They appear in the order which the operands must
   1205      appear in assembly code, and are terminated by a zero.  */
   1206   enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
   1207 
   1208   /* A list of operand qualifier code sequence.  Each operand qualifier
   1209      code qualifies the corresponding operand code.  Each operand
   1210      qualifier sequence specifies a valid opcode variant and related
   1211      constraint on operands.  */
   1212   aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
   1213 
   1214   /* Flags providing information about this instruction */
   1215   uint64_t flags;
   1216 
   1217   /* Extra constraints on the instruction that the verifier checks.  */
   1218   uint32_t constraints;
   1219 
   1220   /* If nonzero, this operand and operand 0 are both registers and
   1221      are required to have the same register number.  */
   1222   unsigned char tied_operand;
   1223 
   1224   /* If non-NULL, a function to verify that a given instruction is valid.  */
   1225   enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
   1226 			      bfd_vma, bool, aarch64_operand_error *,
   1227 			      struct aarch64_instr_sequence *);
   1228 };
   1229 
   1230 typedef struct aarch64_opcode aarch64_opcode;
   1231 
   1232 /* Table describing all the AArch64 opcodes.  */
   1233 extern const aarch64_opcode aarch64_opcode_table[];
   1234 
   1235 /* Opcode flags.  */
   1236 #define F_ALIAS (1 << 0)
   1237 #define F_HAS_ALIAS (1 << 1)
   1238 /* Disassembly preference priority 1-3 (the larger the higher).  If nothing
   1239    is specified, it is the priority 0 by default, i.e. the lowest priority.  */
   1240 #define F_P1 (1 << 2)
   1241 #define F_P2 (2 << 2)
   1242 #define F_P3 (3 << 2)
   1243 /* Flag an instruction that is truly conditional executed, e.g. b.cond.  */
   1244 #define F_COND (1 << 4)
   1245 /* Instruction has the field of 'sf'.  */
   1246 #define F_SF (1 << 5)
   1247 /* Instruction has the field of 'size:Q'.  */
   1248 #define F_SIZEQ (1 << 6)
   1249 /* Floating-point instruction has the field of 'type'.  */
   1250 #define F_FPTYPE (1 << 7)
   1251 /* AdvSIMD scalar instruction has the field of 'size'.  */
   1252 #define F_SSIZE (1 << 8)
   1253 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q".  */
   1254 #define F_T (1 << 9)
   1255 /* Size of GPR operand in AdvSIMD instructions encoded in Q.  */
   1256 #define F_GPRSIZE_IN_Q (1 << 10)
   1257 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22.  */
   1258 #define F_LDS_SIZE (1 << 11)
   1259 /* Optional operand; assume maximum of 1 operand can be optional.  */
   1260 #define F_OPD0_OPT (1 << 12)
   1261 #define F_OPD1_OPT (2 << 12)
   1262 #define F_OPD2_OPT (3 << 12)
   1263 #define F_OPD3_OPT (4 << 12)
   1264 #define F_OPD4_OPT (5 << 12)
   1265 /* Default value for the optional operand when omitted from the assembly.  */
   1266 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
   1267 /* Instruction that is an alias of another instruction needs to be
   1268    encoded/decoded by converting it to/from the real form, followed by
   1269    the encoding/decoding according to the rules of the real opcode.
   1270    This compares to the direct coding using the alias's information.
   1271    N.B. this flag requires F_ALIAS to be used together.  */
   1272 #define F_CONV (1 << 20)
   1273 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
   1274    friendly pseudo instruction available only in the assembly code (thus will
   1275    not show up in the disassembly).  */
   1276 #define F_PSEUDO (1 << 21)
   1277 /* Instruction has miscellaneous encoding/decoding rules.  */
   1278 #define F_MISC (1 << 22)
   1279 /* Instruction has the field of 'N'; used in conjunction with F_SF.  */
   1280 #define F_N (1 << 23)
   1281 /* Opcode dependent field.  */
   1282 #define F_OD(X) (((X) & 0x7) << 24)
   1283 /* Instruction has the field of 'sz'.  */
   1284 #define F_LSE_SZ (1 << 27)
   1285 /* Require an exact qualifier match, even for NIL qualifiers.  */
   1286 #define F_STRICT (1ULL << 28)
   1287 /* This system instruction is used to read system registers.  */
   1288 #define F_SYS_READ (1ULL << 29)
   1289 /* This system instruction is used to write system registers.  */
   1290 #define F_SYS_WRITE (1ULL << 30)
   1291 /* This instruction has an extra constraint on it that imposes a requirement on
   1292    subsequent instructions.  */
   1293 #define F_SCAN (1ULL << 31)
   1294 /* Instruction takes a pair of optional operands.  If we specify the Nth operand
   1295    to be optional, then we also implicitly specify (N+1)th operand to also be
   1296    optional.  */
   1297 #define F_OPD_PAIR_OPT (1ULL << 32)
   1298 /* This instruction does not allow the full range of values that the
   1299    width of fields in the assembler instruction would theoretically
   1300    allow.  This impacts the constraintts on assembly but yelds no
   1301    impact on disassembly.  */
   1302 #define F_OPD_NARROW (1ULL << 33)
   1303 /* For the instruction with size[22:23] field.  */
   1304 #define F_OPD_SIZE (1ULL << 34)
   1305 /* RCPC3 instruction has the field of 'size'.  */
   1306 #define F_RCPC3_SIZE (1ULL << 35)
   1307 /* Next bit is 36.  */
   1308 
   1309 /* Instruction constraints.  */
   1310 /* This instruction has a predication constraint on the instruction at PC+4.  */
   1311 #define C_SCAN_MOVPRFX (1U << 0)
   1312 /* This instruction's operation width is determined by the operand with the
   1313    largest element size.  */
   1314 #define C_MAX_ELEM (1U << 1)
   1315 #define C_SCAN_MOPS_P (1U << 2)
   1316 #define C_SCAN_MOPS_M (2U << 2)
   1317 #define C_SCAN_MOPS_E (3U << 2)
   1318 #define C_SCAN_MOPS_PME (3U << 2)
   1319 /* Next bit is 4.  */
   1320 
   1321 static inline bool
   1322 alias_opcode_p (const aarch64_opcode *opcode)
   1323 {
   1324   return (opcode->flags & F_ALIAS) != 0;
   1325 }
   1326 
   1327 static inline bool
   1328 opcode_has_alias (const aarch64_opcode *opcode)
   1329 {
   1330   return (opcode->flags & F_HAS_ALIAS) != 0;
   1331 }
   1332 
   1333 /* Priority for disassembling preference.  */
   1334 static inline int
   1335 opcode_priority (const aarch64_opcode *opcode)
   1336 {
   1337   return (opcode->flags >> 2) & 0x3;
   1338 }
   1339 
   1340 static inline bool
   1341 pseudo_opcode_p (const aarch64_opcode *opcode)
   1342 {
   1343   return (opcode->flags & F_PSEUDO) != 0lu;
   1344 }
   1345 
   1346 /* Deal with two possible scenarios: If F_OP_PAIR_OPT not set, as is the case
   1347    by default, F_OPDn_OPT must equal IDX + 1, else F_OPDn_OPT must be in range
   1348    [IDX, IDX + 1].  */
   1349 static inline bool
   1350 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
   1351 {
   1352   if (opcode->flags & F_OPD_PAIR_OPT)
   1353     return (((opcode->flags >> 12) & 0x7) == idx
   1354 	    || ((opcode->flags >> 12) & 0x7) == idx + 1);
   1355   return ((opcode->flags >> 12) & 0x7) == idx + 1;
   1356 }
   1357 
   1358 static inline aarch64_insn
   1359 get_optional_operand_default_value (const aarch64_opcode *opcode)
   1360 {
   1361   return (opcode->flags >> 15) & 0x1f;
   1362 }
   1363 
   1364 static inline unsigned int
   1365 get_opcode_dependent_value (const aarch64_opcode *opcode)
   1366 {
   1367   return (opcode->flags >> 24) & 0x7;
   1368 }
   1369 
   1370 static inline bool
   1371 opcode_has_special_coder (const aarch64_opcode *opcode)
   1372 {
   1373   return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
   1374 	  | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND
   1375 	  | F_OPD_SIZE | F_RCPC3_SIZE)) != 0;
   1376 }
   1377 
   1378 struct aarch64_name_value_pair
   1380 {
   1381   const char *  name;
   1382   aarch64_insn	value;
   1383 };
   1384 
   1385 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
   1386 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
   1387 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
   1388 extern const struct aarch64_name_value_pair aarch64_prfops [32];
   1389 extern const struct aarch64_name_value_pair aarch64_hint_options [];
   1390 
   1391 #define AARCH64_MAX_SYSREG_NAME_LEN 32
   1392 
   1393 typedef struct
   1394 {
   1395   const char *  name;
   1396   aarch64_insn	value;
   1397   uint32_t	flags;
   1398 
   1399   /* A set of features, all of which are required for this system register to be
   1400      available.  */
   1401   aarch64_feature_set features;
   1402 } aarch64_sys_reg;
   1403 
   1404 extern const aarch64_sys_reg aarch64_sys_regs [];
   1405 extern const aarch64_sys_reg aarch64_pstatefields [];
   1406 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
   1407 extern bool aarch64_sys_reg_128bit_p (const uint32_t);
   1408 extern bool aarch64_sys_reg_alias_p (const uint32_t);
   1409 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
   1410 					     const aarch64_sys_reg *);
   1411 
   1412 typedef struct
   1413 {
   1414   const char *name;
   1415   uint32_t value;
   1416   uint32_t flags ;
   1417 
   1418   /* A set of features, all of which are required for this system instruction to be
   1419      available.  */
   1420   aarch64_feature_set features;
   1421 } aarch64_sys_ins_reg;
   1422 
   1423 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
   1424 extern bool
   1425 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
   1426 				 const char *reg_name,
   1427 				 uint32_t, const aarch64_feature_set *);
   1428 
   1429 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
   1430 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
   1431 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
   1432 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
   1433 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
   1434 
   1435 /* Shift/extending operator kinds.
   1436    N.B. order is important; keep aarch64_operand_modifiers synced.  */
   1437 enum aarch64_modifier_kind
   1438 {
   1439   AARCH64_MOD_NONE,
   1440   AARCH64_MOD_MSL,
   1441   AARCH64_MOD_ROR,
   1442   AARCH64_MOD_ASR,
   1443   AARCH64_MOD_LSR,
   1444   AARCH64_MOD_LSL,
   1445   AARCH64_MOD_UXTB,
   1446   AARCH64_MOD_UXTH,
   1447   AARCH64_MOD_UXTW,
   1448   AARCH64_MOD_UXTX,
   1449   AARCH64_MOD_SXTB,
   1450   AARCH64_MOD_SXTH,
   1451   AARCH64_MOD_SXTW,
   1452   AARCH64_MOD_SXTX,
   1453   AARCH64_MOD_MUL,
   1454   AARCH64_MOD_MUL_VL,
   1455 };
   1456 
   1457 bool
   1458 aarch64_extend_operator_p (enum aarch64_modifier_kind);
   1459 
   1460 enum aarch64_modifier_kind
   1461 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
   1462 /* Condition.  */
   1463 
   1464 typedef struct
   1465 {
   1466   /* A list of names with the first one as the disassembly preference;
   1467      terminated by NULL if fewer than 3.  */
   1468   const char *names[4];
   1469   aarch64_insn value;
   1470 } aarch64_cond;
   1471 
   1472 extern const aarch64_cond aarch64_conds[16];
   1473 
   1474 const aarch64_cond* get_cond_from_value (aarch64_insn value);
   1475 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
   1476 
   1477 /* Information about a reference to part of ZA.  */
   1479 struct aarch64_indexed_za
   1480 {
   1481   /* Which tile is being accessed.  Unused (and 0) for an index into ZA.  */
   1482   int regno;
   1483 
   1484   struct
   1485   {
   1486     /* The 32-bit index register.  */
   1487     int regno;
   1488 
   1489     /* The first (or only) immediate offset.  */
   1490     int64_t imm;
   1491 
   1492     /* The last immediate offset minus the first immediate offset.
   1493        Unlike the range size, this is guaranteed not to overflow
   1494        when the end offset > the start offset.  */
   1495     uint64_t countm1;
   1496   } index;
   1497 
   1498   /* The vector group size, or 0 if none.  */
   1499   unsigned group_size : 8;
   1500 
   1501   /* True if a tile access is vertical, false if it is horizontal.
   1502      Unused (and 0) for an index into ZA.  */
   1503   unsigned v : 1;
   1504 };
   1505 
   1506 /* Information about a list of registers.  */
   1507 struct aarch64_reglist
   1508 {
   1509   unsigned first_regno : 8;
   1510   unsigned num_regs : 8;
   1511   /* The difference between the nth and the n+1th register.  */
   1512   unsigned stride : 8;
   1513   /* 1 if it is a list of reg element.  */
   1514   unsigned has_index : 1;
   1515   /* Lane index; valid only when has_index is 1.  */
   1516   int64_t index;
   1517 };
   1518 
   1519 /* Structure representing an operand.  */
   1520 
   1521 struct aarch64_opnd_info
   1522 {
   1523   enum aarch64_opnd type;
   1524   aarch64_opnd_qualifier_t qualifier;
   1525   int idx;
   1526 
   1527   union
   1528     {
   1529       struct
   1530 	{
   1531 	  unsigned regno;
   1532 	} reg;
   1533       struct
   1534 	{
   1535 	  unsigned int regno;
   1536 	  int64_t index;
   1537 	} reglane;
   1538       /* e.g. LVn.  */
   1539       struct aarch64_reglist reglist;
   1540       /* e.g. immediate or pc relative address offset.  */
   1541       struct
   1542 	{
   1543 	  int64_t value;
   1544 	  unsigned is_fp : 1;
   1545 	} imm;
   1546       /* e.g. address in STR (register offset).  */
   1547       struct
   1548 	{
   1549 	  unsigned base_regno;
   1550 	  struct
   1551 	    {
   1552 	      union
   1553 		{
   1554 		  int imm;
   1555 		  unsigned regno;
   1556 		};
   1557 	      unsigned is_reg;
   1558 	    } offset;
   1559 	  unsigned pcrel : 1;		/* PC-relative.  */
   1560 	  unsigned writeback : 1;
   1561 	  unsigned preind : 1;		/* Pre-indexed.  */
   1562 	  unsigned postind : 1;		/* Post-indexed.  */
   1563 	} addr;
   1564 
   1565       struct
   1566 	{
   1567 	  /* The encoding of the system register.  */
   1568 	  aarch64_insn value;
   1569 
   1570 	  /* The system register flags.  */
   1571 	  uint32_t flags;
   1572 	} sysreg;
   1573 
   1574       /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}]  */
   1575       struct aarch64_indexed_za indexed_za;
   1576 
   1577       const aarch64_cond *cond;
   1578       /* The encoding of the PSTATE field.  */
   1579       aarch64_insn pstatefield;
   1580       const aarch64_sys_ins_reg *sysins_op;
   1581       const struct aarch64_name_value_pair *barrier;
   1582       const struct aarch64_name_value_pair *hint_option;
   1583       const struct aarch64_name_value_pair *prfop;
   1584     };
   1585 
   1586   /* Operand shifter; in use when the operand is a register offset address,
   1587      add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}.  */
   1588   struct
   1589     {
   1590       enum aarch64_modifier_kind kind;
   1591       unsigned operator_present: 1;	/* Only valid during encoding.  */
   1592       /* Value of the 'S' field in ld/st reg offset; used only in decoding.  */
   1593       unsigned amount_present: 1;
   1594       int64_t amount;
   1595     } shifter;
   1596 
   1597   unsigned skip:1;	/* Operand is not completed if there is a fixup needed
   1598 			   to be done on it.  In some (but not all) of these
   1599 			   cases, we need to tell libopcodes to skip the
   1600 			   constraint checking and the encoding for this
   1601 			   operand, so that the libopcodes can pick up the
   1602 			   right opcode before the operand is fixed-up.  This
   1603 			   flag should only be used during the
   1604 			   assembling/encoding.  */
   1605   unsigned present:1;	/* Whether this operand is present in the assembly
   1606 			   line; not used during the disassembly.  */
   1607 };
   1608 
   1609 typedef struct aarch64_opnd_info aarch64_opnd_info;
   1610 
   1611 /* Structure representing an instruction.
   1612 
   1613    It is used during both the assembling and disassembling.  The assembler
   1614    fills an aarch64_inst after a successful parsing and then passes it to the
   1615    encoding routine to do the encoding.  During the disassembling, the
   1616    disassembler calls the decoding routine to decode a binary instruction; on a
   1617    successful return, such a structure will be filled with information of the
   1618    instruction; then the disassembler uses the information to print out the
   1619    instruction.  */
   1620 
   1621 struct aarch64_inst
   1622 {
   1623   /* The value of the binary instruction.  */
   1624   aarch64_insn value;
   1625 
   1626   /* Corresponding opcode entry.  */
   1627   const aarch64_opcode *opcode;
   1628 
   1629   /* Condition for a truly conditional-executed instrutions, e.g. b.cond.  */
   1630   const aarch64_cond *cond;
   1631 
   1632   /* Operands information.  */
   1633   aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
   1634 };
   1635 
   1636 /* Defining the HINT #imm values for the aarch64_hint_options.  */
   1637 #define HINT_OPD_CSYNC	0x11
   1638 #define HINT_OPD_DSYNC	0x13
   1639 #define HINT_OPD_C	0x22
   1640 #define HINT_OPD_J	0x24
   1641 #define HINT_OPD_JC	0x26
   1642 #define HINT_OPD_NULL	0x00
   1643 
   1644 
   1645 /* Diagnosis related declaration and interface.  */
   1647 
   1648 /* Operand error kind enumerators.
   1649 
   1650    AARCH64_OPDE_RECOVERABLE
   1651      Less severe error found during the parsing, very possibly because that
   1652      GAS has picked up a wrong instruction template for the parsing.
   1653 
   1654    AARCH64_OPDE_A_SHOULD_FOLLOW_B
   1655      The instruction forms (or is expected to form) part of a sequence,
   1656      but the preceding instruction in the sequence wasn't the expected one.
   1657      The message refers to two strings: the name of the current instruction,
   1658      followed by the name of the expected preceding instruction.
   1659 
   1660    AARCH64_OPDE_EXPECTED_A_AFTER_B
   1661      Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
   1662      so that the current instruction is assumed to be the incorrect one:
   1663      "since the previous instruction was B, the current one should be A".
   1664 
   1665    AARCH64_OPDE_SYNTAX_ERROR
   1666      General syntax error; it can be either a user error, or simply because
   1667      that GAS is trying a wrong instruction template.
   1668 
   1669    AARCH64_OPDE_FATAL_SYNTAX_ERROR
   1670      Definitely a user syntax error.
   1671 
   1672    AARCH64_OPDE_INVALID_VARIANT
   1673      No syntax error, but the operands are not a valid combination, e.g.
   1674      FMOV D0,S0
   1675 
   1676    The following errors are only reported against an asm string that is
   1677    syntactically valid and that has valid operand qualifiers.
   1678 
   1679    AARCH64_OPDE_INVALID_VG_SIZE
   1680      Error about a "VGx<n>" modifier in a ZA index not having the
   1681      correct <n>.  This error effectively forms a pair with
   1682      AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number
   1683      of vectors that an instruction operates on.  However, the "VGx<n>"
   1684      modifier is optional, whereas a register list always has a known
   1685      and explicit length.  It therefore seems better to place more
   1686      importance on the register list length when selecting an opcode table
   1687      entry.  This in turn means that having an incorrect register length
   1688      should be more severe than having an incorrect "VGx<n>".
   1689 
   1690    AARCH64_OPDE_REG_LIST_LENGTH
   1691      Error about a register list operand having an unexpected number of
   1692      registers.  This error is low severity because there might be another
   1693      opcode entry that supports the given number of registers.
   1694 
   1695    AARCH64_OPDE_REG_LIST_STRIDE
   1696      Error about a register list operand having the correct number
   1697      (and type) of registers, but an unexpected stride.  This error is
   1698      more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies
   1699      that the length is known to be correct.  However, it is lower than
   1700      many other errors, since some instructions have forms that share
   1701      the same number of registers but have different strides.
   1702 
   1703    AARCH64_OPDE_UNTIED_IMMS
   1704      The asm failed to use the same immediate for a destination operand
   1705      and a tied source operand.
   1706 
   1707    AARCH64_OPDE_UNTIED_OPERAND
   1708      The asm failed to use the same register for a destination operand
   1709      and a tied source operand.
   1710 
   1711    AARCH64_OPDE_OUT_OF_RANGE
   1712      Error about some immediate value out of a valid range.
   1713 
   1714    AARCH64_OPDE_UNALIGNED
   1715      Error about some immediate value not properly aligned (i.e. not being a
   1716      multiple times of a certain value).
   1717 
   1718    AARCH64_OPDE_OTHER_ERROR
   1719      Error of the highest severity and used for any severe issue that does not
   1720      fall into any of the above categories.
   1721 
   1722    AARCH64_OPDE_INVALID_REGNO
   1723      A register was syntactically valid and had the right type, but it was
   1724      outside the range supported by the associated operand field.  This is
   1725      a high severity error because there are currently no instructions that
   1726      would accept the operands that precede the erroneous one (if any) and
   1727      yet still accept a wider range of registers.
   1728 
   1729    AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
   1730    AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the
   1731    AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
   1732    only libopcodes has the information about the valid variants of each
   1733    instruction.
   1734 
   1735    The enumerators have an increasing severity.  This is helpful when there are
   1736    multiple instruction templates available for a given mnemonic name (e.g.
   1737    FMOV); this mechanism will help choose the most suitable template from which
   1738    the generated diagnostics can most closely describe the issues, if any.
   1739 
   1740    This enum needs to be kept up-to-date with operand_mismatch_kind_names
   1741    in tc-aarch64.c.  */
   1742 
   1743 enum aarch64_operand_error_kind
   1744 {
   1745   AARCH64_OPDE_NIL,
   1746   AARCH64_OPDE_RECOVERABLE,
   1747   AARCH64_OPDE_A_SHOULD_FOLLOW_B,
   1748   AARCH64_OPDE_EXPECTED_A_AFTER_B,
   1749   AARCH64_OPDE_SYNTAX_ERROR,
   1750   AARCH64_OPDE_FATAL_SYNTAX_ERROR,
   1751   AARCH64_OPDE_INVALID_VARIANT,
   1752   AARCH64_OPDE_INVALID_VG_SIZE,
   1753   AARCH64_OPDE_REG_LIST_LENGTH,
   1754   AARCH64_OPDE_REG_LIST_STRIDE,
   1755   AARCH64_OPDE_UNTIED_IMMS,
   1756   AARCH64_OPDE_UNTIED_OPERAND,
   1757   AARCH64_OPDE_OUT_OF_RANGE,
   1758   AARCH64_OPDE_UNALIGNED,
   1759   AARCH64_OPDE_OTHER_ERROR,
   1760   AARCH64_OPDE_INVALID_REGNO
   1761 };
   1762 
   1763 /* N.B. GAS assumes that this structure work well with shallow copy.  */
   1764 struct aarch64_operand_error
   1765 {
   1766   enum aarch64_operand_error_kind kind;
   1767   int index;
   1768   const char *error;
   1769   /* Some data for extra information.  */
   1770   union {
   1771     int i;
   1772     const char *s;
   1773   } data[3];
   1774   bool non_fatal;
   1775 };
   1776 
   1777 /* AArch64 sequence structure used to track instructions with F_SCAN
   1778    dependencies for both assembler and disassembler.  */
   1779 struct aarch64_instr_sequence
   1780 {
   1781   /* The instructions in the sequence, starting with the one that
   1782      caused it to be opened.  */
   1783   aarch64_inst *instr;
   1784   /* The number of instructions already in the sequence.  */
   1785   int num_added_insns;
   1786   /* The number of instructions allocated to the sequence.  */
   1787   int num_allocated_insns;
   1788 };
   1789 
   1790 /* Encoding entrypoint.  */
   1791 
   1792 extern bool
   1793 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
   1794 		       aarch64_insn *, aarch64_opnd_qualifier_t *,
   1795 		       aarch64_operand_error *, aarch64_instr_sequence *);
   1796 
   1797 extern const aarch64_opcode *
   1798 aarch64_replace_opcode (struct aarch64_inst *,
   1799 			const aarch64_opcode *);
   1800 
   1801 /* Given the opcode enumerator OP, return the pointer to the corresponding
   1802    opcode entry.  */
   1803 
   1804 extern const aarch64_opcode *
   1805 aarch64_get_opcode (enum aarch64_op);
   1806 
   1807 /* An instance of this structure is passed to aarch64_print_operand, and
   1808    the callback within this structure is used to apply styling to the
   1809    disassembler output.  This structure encapsulates the callback and a
   1810    state pointer.  */
   1811 
   1812 struct aarch64_styler
   1813 {
   1814   /* The callback used to apply styling.  Returns a string created from FMT
   1815      and ARGS with STYLE applied to the string.  STYLER is a pointer back
   1816      to this object so that the callback can access the state member.
   1817 
   1818      The string returned from this callback must remain valid until the
   1819      call to aarch64_print_operand has completed.  */
   1820   const char *(*apply_style) (struct aarch64_styler *styler,
   1821 			      enum disassembler_style style,
   1822 			      const char *fmt,
   1823 			      va_list args);
   1824 
   1825   /* A pointer to a state object which can be used by the apply_style
   1826      callback function.  */
   1827   void *state;
   1828 };
   1829 
   1830 /* Generate the string representation of an operand.  */
   1831 extern void
   1832 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
   1833 		       const aarch64_opnd_info *, int, int *, bfd_vma *,
   1834 		       char **, char *, size_t,
   1835 		       aarch64_feature_set features,
   1836 		       struct aarch64_styler *styler);
   1837 
   1838 /* Miscellaneous interface.  */
   1839 
   1840 extern int
   1841 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
   1842 
   1843 extern aarch64_opnd_qualifier_t
   1844 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
   1845 				const aarch64_opnd_qualifier_t, int);
   1846 
   1847 extern bool
   1848 aarch64_is_destructive_by_operands (const aarch64_opcode *);
   1849 
   1850 extern int
   1851 aarch64_num_of_operands (const aarch64_opcode *);
   1852 
   1853 extern int
   1854 aarch64_stack_pointer_p (const aarch64_opnd_info *);
   1855 
   1856 extern int
   1857 aarch64_zero_register_p (const aarch64_opnd_info *);
   1858 
   1859 extern enum err_type
   1860 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
   1861 		     aarch64_operand_error *);
   1862 
   1863 extern void
   1864 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
   1865 
   1866 /* Given an operand qualifier, return the expected data element size
   1867    of a qualified operand.  */
   1868 extern unsigned char
   1869 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
   1870 
   1871 extern enum aarch64_operand_class
   1872 aarch64_get_operand_class (enum aarch64_opnd);
   1873 
   1874 extern const char *
   1875 aarch64_get_operand_name (enum aarch64_opnd);
   1876 
   1877 extern const char *
   1878 aarch64_get_operand_desc (enum aarch64_opnd);
   1879 
   1880 extern bool
   1881 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
   1882 
   1883 extern bool
   1884 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *);
   1885 
   1886 extern int
   1887 calc_ldst_datasize (const aarch64_opnd_info *opnds);
   1888 
   1889 #ifdef DEBUG_AARCH64
   1890 extern int debug_dump;
   1891 
   1892 extern void
   1893 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
   1894 
   1895 #define DEBUG_TRACE(M, ...)					\
   1896   {								\
   1897     if (debug_dump)						\
   1898       aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__);	\
   1899   }
   1900 
   1901 #define DEBUG_TRACE_IF(C, M, ...)				\
   1902   {								\
   1903     if (debug_dump && (C))					\
   1904       aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__);	\
   1905   }
   1906 #else  /* !DEBUG_AARCH64 */
   1907 #define DEBUG_TRACE(M, ...) ;
   1908 #define DEBUG_TRACE_IF(C, M, ...) ;
   1909 #endif /* DEBUG_AARCH64 */
   1910 
   1911 extern const char *const aarch64_sve_pattern_array[32];
   1912 extern const char *const aarch64_sve_prfop_array[16];
   1913 extern const char *const aarch64_rprfmop_array[64];
   1914 extern const char *const aarch64_sme_vlxn_array[2];
   1915 
   1916 #ifdef __cplusplus
   1917 }
   1918 #endif
   1919 
   1920 #endif /* OPCODE_AARCH64_H */
   1921