aarch64.h revision 1.1.1.7.2.1 1 /* AArch64 assembler/disassembler support.
2
3 Copyright (C) 2009-2024 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GNU Binutils.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #ifndef OPCODE_AARCH64_H
23 #define OPCODE_AARCH64_H
24
25 #include "bfd.h"
26 #include <stdint.h>
27 #include <assert.h>
28 #include <stdlib.h>
29
30 #include "dis-asm.h"
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 /* The offset for pc-relative addressing is currently defined to be 0. */
37 #define AARCH64_PCREL_OFFSET 0
38
39 typedef uint32_t aarch64_insn;
40
41 /* An enum containing all known CPU features. The values act as bit positions
42 into aarch64_feature_set. */
43 enum aarch64_feature_bit {
44 /* All processors. */
45 AARCH64_FEATURE_V8,
46 /* ARMv8.6 processors. */
47 AARCH64_FEATURE_V8_6A,
48 /* Bfloat16 insns. */
49 AARCH64_FEATURE_BFLOAT16,
50 /* Armv8-A processors. */
51 AARCH64_FEATURE_V8A,
52 /* SVE2 instructions. */
53 AARCH64_FEATURE_SVE2,
54 /* ARMv8.2 processors. */
55 AARCH64_FEATURE_V8_2A,
56 /* ARMv8.3 processors. */
57 AARCH64_FEATURE_V8_3A,
58 AARCH64_FEATURE_SVE2_AES,
59 AARCH64_FEATURE_SVE2_BITPERM,
60 AARCH64_FEATURE_SVE2_SM4,
61 AARCH64_FEATURE_SVE2_SHA3,
62 /* ARMv8.4 processors. */
63 AARCH64_FEATURE_V8_4A,
64 /* Armv8-R processors. */
65 AARCH64_FEATURE_V8R,
66 /* Armv8.7 processors. */
67 AARCH64_FEATURE_V8_7A,
68 /* Scalable Matrix Extension. */
69 AARCH64_FEATURE_SME,
70 /* Atomic 64-byte load/store. */
71 AARCH64_FEATURE_LS64,
72 /* v8.3 Pointer Authentication. */
73 AARCH64_FEATURE_PAC,
74 /* FP instructions. */
75 AARCH64_FEATURE_FP,
76 /* SIMD instructions. */
77 AARCH64_FEATURE_SIMD,
78 /* CRC instructions. */
79 AARCH64_FEATURE_CRC,
80 /* LSE instructions. */
81 AARCH64_FEATURE_LSE,
82 /* PAN instructions. */
83 AARCH64_FEATURE_PAN,
84 /* LOR instructions. */
85 AARCH64_FEATURE_LOR,
86 /* v8.1 SIMD instructions. */
87 AARCH64_FEATURE_RDMA,
88 /* v8.1 features. */
89 AARCH64_FEATURE_V8_1A,
90 /* v8.2 FP16 instructions. */
91 AARCH64_FEATURE_F16,
92 /* RAS Extensions. */
93 AARCH64_FEATURE_RAS,
94 /* Statistical Profiling. */
95 AARCH64_FEATURE_PROFILE,
96 /* SVE instructions. */
97 AARCH64_FEATURE_SVE,
98 /* RCPC instructions. */
99 AARCH64_FEATURE_RCPC,
100 /* RCPC2 instructions. */
101 AARCH64_FEATURE_RCPC2,
102 /* Complex # instructions. */
103 AARCH64_FEATURE_COMPNUM,
104 /* JavaScript conversion instructions. */
105 AARCH64_FEATURE_JSCVT,
106 /* Dot Product instructions. */
107 AARCH64_FEATURE_DOTPROD,
108 /* SM3 & SM4 instructions. */
109 AARCH64_FEATURE_SM4,
110 /* SHA2 instructions. */
111 AARCH64_FEATURE_SHA2,
112 /* SHA3 instructions. */
113 AARCH64_FEATURE_SHA3,
114 /* AES instructions. */
115 AARCH64_FEATURE_AES,
116 /* v8.2 FP16FML ins. */
117 AARCH64_FEATURE_F16_FML,
118 /* ARMv8.5 processors. */
119 AARCH64_FEATURE_V8_5A,
120 /* v8.5 Flag Manipulation version 2. */
121 AARCH64_FEATURE_FLAGMANIP,
122 /* FRINT[32,64][Z,X] insns. */
123 AARCH64_FEATURE_FRINTTS,
124 /* SB instruction. */
125 AARCH64_FEATURE_SB,
126 /* Execution and Data Prediction Restriction instructions. */
127 AARCH64_FEATURE_PREDRES,
128 /* DC CVADP. */
129 AARCH64_FEATURE_CVADP,
130 /* Random Number instructions. */
131 AARCH64_FEATURE_RNG,
132 /* SCXTNUM_ELx. */
133 AARCH64_FEATURE_SCXTNUM,
134 /* ID_PFR2 instructions. */
135 AARCH64_FEATURE_ID_PFR2,
136 /* SSBS mechanism enabled. */
137 AARCH64_FEATURE_SSBS,
138 /* Memory Tagging Extension. */
139 AARCH64_FEATURE_MEMTAG,
140 /* Transactional Memory Extension. */
141 AARCH64_FEATURE_TME,
142 /* XS memory attribute. */
143 AARCH64_FEATURE_XS,
144 /* WFx instructions with timeout. */
145 AARCH64_FEATURE_WFXT,
146 /* Standardization of memory operations. */
147 AARCH64_FEATURE_MOPS,
148 /* Hinted conditional branches. */
149 AARCH64_FEATURE_HBC,
150 /* Matrix Multiply instructions. */
151 AARCH64_FEATURE_I8MM,
152 AARCH64_FEATURE_F32MM,
153 AARCH64_FEATURE_F64MM,
154 /* v8.4 Flag Manipulation. */
155 AARCH64_FEATURE_FLAGM,
156 /* Armv9.0-A processors. */
157 AARCH64_FEATURE_V9A,
158 /* SME F64F64. */
159 AARCH64_FEATURE_SME_F64F64,
160 /* SME I16I64. */
161 AARCH64_FEATURE_SME_I16I64,
162 /* Armv8.8 processors. */
163 AARCH64_FEATURE_V8_8A,
164 /* Common Short Sequence Compression instructions. */
165 AARCH64_FEATURE_CSSC,
166 /* Armv8.9-A processors. */
167 AARCH64_FEATURE_V8_9A,
168 /* Check Feature Status Extension. */
169 AARCH64_FEATURE_CHK,
170 /* Guarded Control Stack. */
171 AARCH64_FEATURE_GCS,
172 /* SPE Call Return branch records. */
173 AARCH64_FEATURE_SPE_CRR,
174 /* SPE Filter by data source. */
175 AARCH64_FEATURE_SPE_FDS,
176 /* Additional SPE events. */
177 AARCH64_FEATURE_SPEv1p4,
178 /* SME2. */
179 AARCH64_FEATURE_SME2,
180 /* Translation Hardening Extension. */
181 AARCH64_FEATURE_THE,
182 /* LSE128. */
183 AARCH64_FEATURE_LSE128,
184 /* ARMv8.9-A RAS Extensions. */
185 AARCH64_FEATURE_RASv2,
186 /* System Control Register2. */
187 AARCH64_FEATURE_SCTLR2,
188 /* Fine Grained Traps. */
189 AARCH64_FEATURE_FGT2,
190 /* Physical Fault Address. */
191 AARCH64_FEATURE_PFAR,
192 /* Address Translate Stage 1. */
193 AARCH64_FEATURE_ATS1A,
194 /* Memory Attribute Index Enhancement. */
195 AARCH64_FEATURE_AIE,
196 /* Stage 1 Permission Indirection Extension. */
197 AARCH64_FEATURE_S1PIE,
198 /* Stage 2 Permission Indirection Extension. */
199 AARCH64_FEATURE_S2PIE,
200 /* Stage 1 Permission Overlay Extension. */
201 AARCH64_FEATURE_S1POE,
202 /* Stage 2 Permission Overlay Extension. */
203 AARCH64_FEATURE_S2POE,
204 /* Extension to Translation Control Registers. */
205 AARCH64_FEATURE_TCR2,
206 /* Speculation Prediction Restriction instructions. */
207 AARCH64_FEATURE_PREDRES2,
208 /* Instrumentation Extension. */
209 AARCH64_FEATURE_ITE,
210 /* 128-bit page table descriptor, system registers
211 and isntructions. */
212 AARCH64_FEATURE_D128,
213 /* Armv8.9-A/Armv9.4-A architecture Debug extension. */
214 AARCH64_FEATURE_DEBUGv8p9,
215 /* Performance Monitors Extension. */
216 AARCH64_FEATURE_PMUv3p9,
217 /* Performance Monitors Snapshots Extension. */
218 AARCH64_FEATURE_PMUv3_SS,
219 /* Performance Monitors Instruction Counter Extension. */
220 AARCH64_FEATURE_PMUv3_ICNTR,
221 /* Performance Monitors Synchronous-Exception-Based Event Extension. */
222 AARCH64_FEATURE_SEBEP,
223 /* SVE2.1 and SME2.1 non-widening BFloat16 instructions. */
224 AARCH64_FEATURE_B16B16,
225 /* SME2.1 instructions. */
226 AARCH64_FEATURE_SME2p1,
227 /* SVE2.1 instructions. */
228 AARCH64_FEATURE_SVE2p1,
229 /* RCPC3 instructions. */
230 AARCH64_FEATURE_RCPC3,
231 AARCH64_NUM_FEATURES
232 };
233
234 /* These macros take an initial argument X that gives the index into
235 an aarch64_feature_set. The macros then return the bitmask for
236 that array index. */
237
238 /* A mask in which feature bit BIT is set and all other bits are clear. */
239 #define AARCH64_UINT64_BIT(X, BIT) \
240 ((X) == (BIT) / 64 ? 1ULL << (BIT) % 64 : 0)
241
242 /* A mask that includes only AARCH64_FEATURE_<NAME>. */
243 #define AARCH64_FEATBIT(X, NAME) \
244 AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME)
245
246 /* A mask of the features that are enabled by each architecture version,
247 excluding those that are inherited from other architecture versions. */
248 #define AARCH64_ARCH_V8A_FEATURES(X) (AARCH64_FEATBIT (X, V8A) \
249 | AARCH64_FEATBIT (X, FP) \
250 | AARCH64_FEATBIT (X, RAS) \
251 | AARCH64_FEATBIT (X, SIMD) \
252 | AARCH64_FEATBIT (X, CHK))
253 #define AARCH64_ARCH_V8_1A_FEATURES(X) (AARCH64_FEATBIT (X, V8_1A) \
254 | AARCH64_FEATBIT (X, CRC) \
255 | AARCH64_FEATBIT (X, LSE) \
256 | AARCH64_FEATBIT (X, PAN) \
257 | AARCH64_FEATBIT (X, LOR) \
258 | AARCH64_FEATBIT (X, RDMA))
259 #define AARCH64_ARCH_V8_2A_FEATURES(X) (AARCH64_FEATBIT (X, V8_2A))
260 #define AARCH64_ARCH_V8_3A_FEATURES(X) (AARCH64_FEATBIT (X, V8_3A) \
261 | AARCH64_FEATBIT (X, PAC) \
262 | AARCH64_FEATBIT (X, RCPC) \
263 | AARCH64_FEATBIT (X, COMPNUM) \
264 | AARCH64_FEATBIT (X, JSCVT))
265 #define AARCH64_ARCH_V8_4A_FEATURES(X) (AARCH64_FEATBIT (X, V8_4A) \
266 | AARCH64_FEATBIT (X, RCPC2) \
267 | AARCH64_FEATBIT (X, DOTPROD) \
268 | AARCH64_FEATBIT (X, FLAGM) \
269 | AARCH64_FEATBIT (X, F16_FML))
270 #define AARCH64_ARCH_V8_5A_FEATURES(X) (AARCH64_FEATBIT (X, V8_5A) \
271 | AARCH64_FEATBIT (X, FLAGMANIP) \
272 | AARCH64_FEATBIT (X, FRINTTS) \
273 | AARCH64_FEATBIT (X, SB) \
274 | AARCH64_FEATBIT (X, PREDRES) \
275 | AARCH64_FEATBIT (X, CVADP) \
276 | AARCH64_FEATBIT (X, SCXTNUM) \
277 | AARCH64_FEATBIT (X, ID_PFR2) \
278 | AARCH64_FEATBIT (X, SSBS))
279 #define AARCH64_ARCH_V8_6A_FEATURES(X) (AARCH64_FEATBIT (X, V8_6A) \
280 | AARCH64_FEATBIT (X, BFLOAT16) \
281 | AARCH64_FEATBIT (X, I8MM))
282 #define AARCH64_ARCH_V8_7A_FEATURES(X) (AARCH64_FEATBIT (X, V8_7A) \
283 | AARCH64_FEATBIT (X, XS) \
284 | AARCH64_FEATBIT (X, WFXT) \
285 | AARCH64_FEATBIT (X, LS64))
286 #define AARCH64_ARCH_V8_8A_FEATURES(X) (AARCH64_FEATBIT (X, V8_8A) \
287 | AARCH64_FEATBIT (X, MOPS) \
288 | AARCH64_FEATBIT (X, HBC))
289 #define AARCH64_ARCH_V8_9A_FEATURES(X) (AARCH64_FEATBIT (X, V8_9A) \
290 | AARCH64_FEATBIT (X, SPEv1p4) \
291 | AARCH64_FEATBIT (X, SPE_CRR) \
292 | AARCH64_FEATBIT (X, SPE_FDS) \
293 | AARCH64_FEATBIT (X, RASv2) \
294 | AARCH64_FEATBIT (X, SCTLR2) \
295 | AARCH64_FEATBIT (X, FGT2) \
296 | AARCH64_FEATBIT (X, PFAR) \
297 | AARCH64_FEATBIT (X, ATS1A) \
298 | AARCH64_FEATBIT (X, AIE) \
299 | AARCH64_FEATBIT (X, S1PIE) \
300 | AARCH64_FEATBIT (X, S2PIE) \
301 | AARCH64_FEATBIT (X, S1POE) \
302 | AARCH64_FEATBIT (X, S2POE) \
303 | AARCH64_FEATBIT (X, TCR2) \
304 | AARCH64_FEATBIT (X, DEBUGv8p9) \
305 | AARCH64_FEATBIT (X, PMUv3p9) \
306 | AARCH64_FEATBIT (X, PMUv3_SS) \
307 | AARCH64_FEATBIT (X, PMUv3_ICNTR) \
308 | AARCH64_FEATBIT (X, SEBEP) \
309 | AARCH64_FEATBIT (X, PREDRES2) \
310 )
311
312 #define AARCH64_ARCH_V9A_FEATURES(X) (AARCH64_FEATBIT (X, V9A) \
313 | AARCH64_FEATBIT (X, F16) \
314 | AARCH64_FEATBIT (X, SVE) \
315 | AARCH64_FEATBIT (X, SVE2))
316 #define AARCH64_ARCH_V9_1A_FEATURES(X) AARCH64_ARCH_V8_6A_FEATURES (X)
317 #define AARCH64_ARCH_V9_2A_FEATURES(X) AARCH64_ARCH_V8_7A_FEATURES (X)
318 #define AARCH64_ARCH_V9_3A_FEATURES(X) AARCH64_ARCH_V8_8A_FEATURES (X)
319 #define AARCH64_ARCH_V9_4A_FEATURES(X) AARCH64_ARCH_V8_9A_FEATURES (X)
320
321 /* Architectures are the sum of the base and extensions. */
322 #define AARCH64_ARCH_V8A(X) (AARCH64_FEATBIT (X, V8) \
323 | AARCH64_ARCH_V8A_FEATURES (X))
324 #define AARCH64_ARCH_V8_1A(X) (AARCH64_ARCH_V8A (X) \
325 | AARCH64_ARCH_V8_1A_FEATURES (X))
326 #define AARCH64_ARCH_V8_2A(X) (AARCH64_ARCH_V8_1A (X) \
327 | AARCH64_ARCH_V8_2A_FEATURES (X))
328 #define AARCH64_ARCH_V8_3A(X) (AARCH64_ARCH_V8_2A (X) \
329 | AARCH64_ARCH_V8_3A_FEATURES (X))
330 #define AARCH64_ARCH_V8_4A(X) (AARCH64_ARCH_V8_3A (X) \
331 | AARCH64_ARCH_V8_4A_FEATURES (X))
332 #define AARCH64_ARCH_V8_5A(X) (AARCH64_ARCH_V8_4A (X) \
333 | AARCH64_ARCH_V8_5A_FEATURES (X))
334 #define AARCH64_ARCH_V8_6A(X) (AARCH64_ARCH_V8_5A (X) \
335 | AARCH64_ARCH_V8_6A_FEATURES (X))
336 #define AARCH64_ARCH_V8_7A(X) (AARCH64_ARCH_V8_6A (X) \
337 | AARCH64_ARCH_V8_7A_FEATURES (X))
338 #define AARCH64_ARCH_V8_8A(X) (AARCH64_ARCH_V8_7A (X) \
339 | AARCH64_ARCH_V8_8A_FEATURES (X))
340 #define AARCH64_ARCH_V8_9A(X) (AARCH64_ARCH_V8_8A (X) \
341 | AARCH64_ARCH_V8_9A_FEATURES (X))
342 #define AARCH64_ARCH_V8R(X) ((AARCH64_ARCH_V8_4A (X) \
343 | AARCH64_FEATBIT (X, V8R)) \
344 & ~AARCH64_FEATBIT (X, V8A) \
345 & ~AARCH64_FEATBIT (X, LOR))
346
347 #define AARCH64_ARCH_V9A(X) (AARCH64_ARCH_V8_5A (X) \
348 | AARCH64_ARCH_V9A_FEATURES (X))
349 #define AARCH64_ARCH_V9_1A(X) (AARCH64_ARCH_V9A (X) \
350 | AARCH64_ARCH_V9_1A_FEATURES (X))
351 #define AARCH64_ARCH_V9_2A(X) (AARCH64_ARCH_V9_1A (X) \
352 | AARCH64_ARCH_V9_2A_FEATURES (X))
353 #define AARCH64_ARCH_V9_3A(X) (AARCH64_ARCH_V9_2A (X) \
354 | AARCH64_ARCH_V9_3A_FEATURES (X))
355 #define AARCH64_ARCH_V9_4A(X) (AARCH64_ARCH_V9_3A (X) \
356 | AARCH64_ARCH_V9_4A_FEATURES (X))
357
358 #define AARCH64_ARCH_NONE(X) 0
359
360 /* CPU-specific features. */
361 typedef struct {
362 uint64_t flags[(AARCH64_NUM_FEATURES + 63) / 64];
363 } aarch64_feature_set;
364
365 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT) \
366 ((~(CPU).flags[0] & AARCH64_FEATBIT (0, FEAT)) == 0 \
367 && (~(CPU).flags[1] & AARCH64_FEATBIT (1, FEAT)) == 0)
368
369 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \
370 ((~(CPU).flags[0] & (FEAT).flags[0]) == 0 \
371 && (~(CPU).flags[1] & (FEAT).flags[1]) == 0)
372
373 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \
374 (((CPU).flags[0] & (FEAT).flags[0]) != 0 \
375 || ((CPU).flags[1] & (FEAT).flags[1]) != 0)
376
377 #define AARCH64_SET_FEATURE(DEST, FEAT) \
378 ((DEST).flags[0] = FEAT (0), \
379 (DEST).flags[1] = FEAT (1))
380
381 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT) \
382 ((DEST).flags[0] = (SRC).flags[0] & ~AARCH64_FEATBIT (0, FEAT), \
383 (DEST).flags[1] = (SRC).flags[1] & ~AARCH64_FEATBIT (1, FEAT))
384
385 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2) \
386 do \
387 { \
388 (TARG).flags[0] = (F1).flags[0] | (F2).flags[0]; \
389 (TARG).flags[1] = (F1).flags[1] | (F2).flags[1]; \
390 } \
391 while (0)
392
393 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2) \
394 do \
395 { \
396 (TARG).flags[0] = (F1).flags[0] &~ (F2).flags[0]; \
397 (TARG).flags[1] = (F1).flags[1] &~ (F2).flags[1]; \
398 } \
399 while (0)
400
401 /* aarch64_feature_set initializers for no features and all features,
402 respectively. */
403 #define AARCH64_NO_FEATURES { { 0, 0 } }
404 #define AARCH64_ALL_FEATURES { { -1, -1 } }
405
406 /* An aarch64_feature_set initializer for a single feature,
407 AARCH64_FEATURE_<FEAT>. */
408 #define AARCH64_FEATURE(FEAT) \
409 { { AARCH64_FEATBIT (0, FEAT), AARCH64_FEATBIT (1, FEAT) } }
410
411 /* An aarch64_feature_set initializer for a specific architecture version,
412 including all the features that are enabled by default for that architecture
413 version. */
414 #define AARCH64_ARCH_FEATURES(ARCH) \
415 { { AARCH64_ARCH_##ARCH (0), AARCH64_ARCH_##ARCH (1) } }
416
417 /* Used by AARCH64_CPU_FEATURES. */
418 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \
419 (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X))
420 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \
421 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2))
422 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \
423 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__))
424 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \
425 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__))
426 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \
427 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__))
428 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \
429 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__))
430 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \
431 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__))
432 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \
433 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__))
434 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \
435 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__))
436
437 /* An aarch64_feature_set initializer for a CPU that implements architecture
438 version ARCH, and additionally provides the N features listed in "...". */
439 #define AARCH64_CPU_FEATURES(ARCH, N, ...) \
440 { { AARCH64_OR_FEATURES_##N (0, ARCH, __VA_ARGS__), \
441 AARCH64_OR_FEATURES_##N (1, ARCH, __VA_ARGS__) } }
442
443 /* An aarch64_feature_set initializer for the N features listed in "...". */
444 #define AARCH64_FEATURES(N, ...) \
445 AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__)
446
447 enum aarch64_operand_class
448 {
449 AARCH64_OPND_CLASS_NIL,
450 AARCH64_OPND_CLASS_INT_REG,
451 AARCH64_OPND_CLASS_MODIFIED_REG,
452 AARCH64_OPND_CLASS_FP_REG,
453 AARCH64_OPND_CLASS_SIMD_REG,
454 AARCH64_OPND_CLASS_SIMD_ELEMENT,
455 AARCH64_OPND_CLASS_SISD_REG,
456 AARCH64_OPND_CLASS_SIMD_REGLIST,
457 AARCH64_OPND_CLASS_SVE_REG,
458 AARCH64_OPND_CLASS_SVE_REGLIST,
459 AARCH64_OPND_CLASS_PRED_REG,
460 AARCH64_OPND_CLASS_ZA_ACCESS,
461 AARCH64_OPND_CLASS_ADDRESS,
462 AARCH64_OPND_CLASS_IMMEDIATE,
463 AARCH64_OPND_CLASS_SYSTEM,
464 AARCH64_OPND_CLASS_COND,
465 };
466
467 /* Operand code that helps both parsing and coding.
468 Keep AARCH64_OPERANDS synced. */
469
470 enum aarch64_opnd
471 {
472 AARCH64_OPND_NIL, /* no operand---MUST BE FIRST!*/
473
474 AARCH64_OPND_Rd, /* Integer register as destination. */
475 AARCH64_OPND_Rn, /* Integer register as source. */
476 AARCH64_OPND_Rm, /* Integer register as source. */
477 AARCH64_OPND_Rt, /* Integer register used in ld/st instructions. */
478 AARCH64_OPND_Rt2, /* Integer register used in ld/st pair instructions. */
479 AARCH64_OPND_X16, /* Integer register x16 in chkfeat instruction. */
480 AARCH64_OPND_Rt_LS64, /* Integer register used in LS64 instructions. */
481 AARCH64_OPND_Rt_SP, /* Integer Rt or SP used in STG instructions. */
482 AARCH64_OPND_Rs, /* Integer register used in ld/st exclusive. */
483 AARCH64_OPND_Ra, /* Integer register used in ddp_3src instructions. */
484 AARCH64_OPND_Rt_SYS, /* Integer register used in system instructions. */
485
486 AARCH64_OPND_Rd_SP, /* Integer Rd or SP. */
487 AARCH64_OPND_Rn_SP, /* Integer Rn or SP. */
488 AARCH64_OPND_Rm_SP, /* Integer Rm or SP. */
489 AARCH64_OPND_PAIRREG, /* Paired register operand. */
490 AARCH64_OPND_PAIRREG_OR_XZR, /* Paired register operand, optionally xzr. */
491 AARCH64_OPND_Rm_EXT, /* Integer Rm extended. */
492 AARCH64_OPND_Rm_SFT, /* Integer Rm shifted. */
493
494 AARCH64_OPND_Fd, /* Floating-point Fd. */
495 AARCH64_OPND_Fn, /* Floating-point Fn. */
496 AARCH64_OPND_Fm, /* Floating-point Fm. */
497 AARCH64_OPND_Fa, /* Floating-point Fa. */
498 AARCH64_OPND_Ft, /* Floating-point Ft. */
499 AARCH64_OPND_Ft2, /* Floating-point Ft2. */
500
501 AARCH64_OPND_Sd, /* AdvSIMD Scalar Sd. */
502 AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */
503 AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */
504
505 AARCH64_OPND_Va, /* AdvSIMD Vector Va. */
506 AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */
507 AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */
508 AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */
509 AARCH64_OPND_VdD1, /* AdvSIMD <Vd>.D[1]; for FMOV only. */
510 AARCH64_OPND_VnD1, /* AdvSIMD <Vn>.D[1]; for FMOV only. */
511 AARCH64_OPND_Ed, /* AdvSIMD Vector Element Vd. */
512 AARCH64_OPND_En, /* AdvSIMD Vector Element Vn. */
513 AARCH64_OPND_Em, /* AdvSIMD Vector Element Vm. */
514 AARCH64_OPND_Em16, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when
515 qualifier is S_H. */
516 AARCH64_OPND_LVn, /* AdvSIMD Vector register list used in e.g. TBL. */
517 AARCH64_OPND_LVt, /* AdvSIMD Vector register list used in ld/st. */
518 AARCH64_OPND_LVt_AL, /* AdvSIMD Vector register list for loading single
519 structure to all lanes. */
520 AARCH64_OPND_LEt, /* AdvSIMD Vector Element list. */
521
522 AARCH64_OPND_CRn, /* Co-processor register in CRn field. */
523 AARCH64_OPND_CRm, /* Co-processor register in CRm field. */
524
525 AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */
526 AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */
527 AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */
528 AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */
529 AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */
530 AARCH64_OPND_SIMD_IMM_SFT, /* AdvSIMD modified immediate with shift. */
531 AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate. */
532 AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
533 (no encoding). */
534 AARCH64_OPND_IMM0, /* Immediate for #0. */
535 AARCH64_OPND_FPIMM0, /* Immediate for #0.0. */
536 AARCH64_OPND_FPIMM, /* Floating-point Immediate. */
537 AARCH64_OPND_IMMR, /* Immediate #<immr> in e.g. BFM. */
538 AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */
539 AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */
540 AARCH64_OPND_IMM, /* Immediate. */
541 AARCH64_OPND_IMM_2, /* Immediate. */
542 AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */
543 AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */
544 AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */
545 AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg. */
546 AARCH64_OPND_UIMM7, /* Unsigned 7-bit immediate in the CRm:op2 fields. */
547 AARCH64_OPND_UIMM10, /* Unsigned 10-bit immediate in addg/subg. */
548 AARCH64_OPND_BIT_NUM, /* Immediate. */
549 AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions. */
550 AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
551 AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions. */
552 AARCH64_OPND_SIMM5, /* 5-bit signed immediate in the imm5 field. */
553 AARCH64_OPND_NZCV, /* Flag bit specifier giving an alternative value for
554 each condition flag. */
555
556 AARCH64_OPND_LIMM, /* Logical Immediate. */
557 AARCH64_OPND_AIMM, /* Arithmetic immediate. */
558 AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */
559 AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */
560 AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */
561 AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */
562 AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */
563 AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */
564
565 AARCH64_OPND_COND, /* Standard condition as the last operand. */
566 AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */
567
568 AARCH64_OPND_ADDR_ADRP, /* Memory address for ADRP */
569 AARCH64_OPND_ADDR_PCREL14, /* 14-bit PC-relative address for e.g. TBZ. */
570 AARCH64_OPND_ADDR_PCREL19, /* 19-bit PC-relative address for e.g. LDR. */
571 AARCH64_OPND_ADDR_PCREL21, /* 21-bit PC-relative address for e.g. ADR. */
572 AARCH64_OPND_ADDR_PCREL26, /* 26-bit PC-relative address for e.g. BL. */
573
574 AARCH64_OPND_ADDR_SIMPLE, /* Address of ld/st exclusive. */
575 AARCH64_OPND_ADDR_REGOFF, /* Address of register offset. */
576 AARCH64_OPND_ADDR_SIMM7, /* Address of signed 7-bit immediate. */
577 AARCH64_OPND_ADDR_SIMM9, /* Address of signed 9-bit immediate. */
578 AARCH64_OPND_ADDR_SIMM9_2, /* Same as the above, but the immediate is
579 negative or unaligned and there is
580 no writeback allowed. This operand code
581 is only used to support the programmer-
582 friendly feature of using LDR/STR as the
583 the mnemonic name for LDUR/STUR instructions
584 wherever there is no ambiguity. */
585 AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */
586 AARCH64_OPND_ADDR_SIMM11, /* Address with a signed 11-bit (multiple of
587 16) immediate. */
588 AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */
589 AARCH64_OPND_ADDR_SIMM13, /* Address with a signed 13-bit (multiple of
590 16) immediate. */
591 AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */
592 AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */
593 AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */
594
595 AARCH64_OPND_SYSREG, /* System register operand. */
596 AARCH64_OPND_SYSREG128, /* 128-bit system register operand. */
597 AARCH64_OPND_PSTATEFIELD, /* PSTATE field name operand. */
598 AARCH64_OPND_SYSREG_AT, /* System register <at_op> operand. */
599 AARCH64_OPND_SYSREG_DC, /* System register <dc_op> operand. */
600 AARCH64_OPND_SYSREG_IC, /* System register <ic_op> operand. */
601 AARCH64_OPND_SYSREG_TLBI, /* System register <tlbi_op> operand. */
602 AARCH64_OPND_SYSREG_TLBIP, /* System register <tlbip_op> operand. */
603 AARCH64_OPND_SYSREG_SR, /* System register RCTX operand. */
604 AARCH64_OPND_BARRIER, /* Barrier operand. */
605 AARCH64_OPND_BARRIER_DSB_NXS, /* Barrier operand for DSB nXS variant. */
606 AARCH64_OPND_BARRIER_ISB, /* Barrier operand for ISB. */
607 AARCH64_OPND_PRFOP, /* Prefetch operation. */
608 AARCH64_OPND_RPRFMOP, /* Range prefetch operation. */
609 AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */
610 AARCH64_OPND_BARRIER_GCSB, /* Barrier operand for GCSB. */
611 AARCH64_OPND_BTI_TARGET, /* BTI {<target>}. */
612 AARCH64_OPND_LSE128_Rt, /* LSE128 <Xt1>. */
613 AARCH64_OPND_LSE128_Rt2, /* LSE128 <Xt2>. */
614 AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */
615 AARCH64_OPND_SVE_ADDR_RI_S4x32, /* SVE [<Xn|SP>, #<simm4>*32]. */
616 AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
617 AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
618 AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
619 AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */
620 AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */
621 AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */
622 AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */
623 AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */
624 AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */
625 AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */
626 AARCH64_OPND_SVE_ADDR_R, /* SVE [<Xn|SP>]. */
627 AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>, <Xm|XZR>]. */
628 AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */
629 AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */
630 AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */
631 AARCH64_OPND_SVE_ADDR_RR_LSL4, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4]. */
632 AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */
633 AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */
634 AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */
635 AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */
636 AARCH64_OPND_SVE_ADDR_ZX, /* SVE [Zn.<T>{, <Xm>}]. */
637 AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */
638 AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */
639 AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */
640 AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */
641 AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
642 Bit 14 controls S/U choice. */
643 AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
644 Bit 22 controls S/U choice. */
645 AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
646 Bit 14 controls S/U choice. */
647 AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
648 Bit 22 controls S/U choice. */
649 AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
650 Bit 14 controls S/U choice. */
651 AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
652 Bit 22 controls S/U choice. */
653 AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
654 Bit 14 controls S/U choice. */
655 AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
656 Bit 22 controls S/U choice. */
657 AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */
658 AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */
659 AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */
660 AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */
661 AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */
662 AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */
663 AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */
664 AARCH64_OPND_SVE_AIMM, /* SVE unsigned arithmetic immediate. */
665 AARCH64_OPND_SVE_ASIMM, /* SVE signed arithmetic immediate. */
666 AARCH64_OPND_SVE_FPIMM8, /* SVE 8-bit floating-point immediate. */
667 AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */
668 AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */
669 AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */
670 AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */
671 AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */
672 AARCH64_OPND_SVE_IMM_ROT3, /* SVE cadd 1-bit rotate (90 or 270). */
673 AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */
674 AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */
675 AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */
676 AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */
677 AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */
678 AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */
679 AARCH64_OPND_SVE_Pd, /* SVE p0-p15 in Pd. */
680 AARCH64_OPND_SVE_PNd, /* SVE pn0-pn15 in Pd. */
681 AARCH64_OPND_SVE_Pg3, /* SVE p0-p7 in Pg. */
682 AARCH64_OPND_SVE_Pg4_5, /* SVE p0-p15 in Pg, bits [8,5]. */
683 AARCH64_OPND_SVE_Pg4_10, /* SVE p0-p15 in Pg, bits [13,10]. */
684 AARCH64_OPND_SVE_PNg4_10, /* SVE pn0-pn15 in Pg, bits [13,10]. */
685 AARCH64_OPND_SVE_Pg4_16, /* SVE p0-p15 in Pg, bits [19,16]. */
686 AARCH64_OPND_SVE_Pm, /* SVE p0-p15 in Pm. */
687 AARCH64_OPND_SVE_Pn, /* SVE p0-p15 in Pn. */
688 AARCH64_OPND_SVE_PNn, /* SVE pn0-pn15 in Pn. */
689 AARCH64_OPND_SVE_Pt, /* SVE p0-p15 in Pt. */
690 AARCH64_OPND_SVE_PNt, /* SVE pn0-pn15 in Pt. */
691 AARCH64_OPND_SVE_Rm, /* Integer Rm or ZR, alt. SVE position. */
692 AARCH64_OPND_SVE_Rn_SP, /* Integer Rn or SP, alt. SVE position. */
693 AARCH64_OPND_SVE_SHLIMM_PRED, /* SVE shift left amount (predicated). */
694 AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated). */
695 AARCH64_OPND_SVE_SHLIMM_UNPRED_22, /* SVE 3 bit shift left unpred. */
696 AARCH64_OPND_SVE_SHRIMM_PRED, /* SVE shift right amount (predicated). */
697 AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated). */
698 AARCH64_OPND_SVE_SHRIMM_UNPRED_22, /* SVE 3 bit shift right unpred. */
699 AARCH64_OPND_SVE_SIMM5, /* SVE signed 5-bit immediate. */
700 AARCH64_OPND_SVE_SIMM5B, /* SVE secondary signed 5-bit immediate. */
701 AARCH64_OPND_SVE_SIMM6, /* SVE signed 6-bit immediate. */
702 AARCH64_OPND_SVE_SIMM8, /* SVE signed 8-bit immediate. */
703 AARCH64_OPND_SVE_UIMM3, /* SVE unsigned 3-bit immediate. */
704 AARCH64_OPND_SVE_UIMM7, /* SVE unsigned 7-bit immediate. */
705 AARCH64_OPND_SVE_UIMM8, /* SVE unsigned 8-bit immediate. */
706 AARCH64_OPND_SVE_UIMM8_53, /* SVE split unsigned 8-bit immediate. */
707 AARCH64_OPND_SVE_VZn, /* Scalar SIMD&FP register in Zn field. */
708 AARCH64_OPND_SVE_Vd, /* Scalar SIMD&FP register in Vd. */
709 AARCH64_OPND_SVE_Vm, /* Scalar SIMD&FP register in Vm. */
710 AARCH64_OPND_SVE_Vn, /* Scalar SIMD&FP register in Vn. */
711 AARCH64_OPND_SME_ZA_array_vrsb_1, /* Tile to vector, two registers (B). */
712 AARCH64_OPND_SME_ZA_array_vrsh_1, /* Tile to vector, two registers (H). */
713 AARCH64_OPND_SME_ZA_array_vrss_1, /* Tile to vector, two registers (S). */
714 AARCH64_OPND_SME_ZA_array_vrsd_1, /* Tile to vector, two registers (D). */
715 AARCH64_OPND_SME_ZA_array_vrsb_2, /* Tile to vector, four registers (B). */
716 AARCH64_OPND_SME_ZA_array_vrsh_2, /* Tile to vector, four registers (H). */
717 AARCH64_OPND_SME_ZA_array_vrss_2, /* Tile to vector, four registers (S). */
718 AARCH64_OPND_SME_ZA_array_vrsd_2, /* Tile to vector, four registers (D). */
719 AARCH64_OPND_SVE_Za_5, /* SVE vector register in Za, bits [9,5]. */
720 AARCH64_OPND_SVE_Za_16, /* SVE vector register in Za, bits [20,16]. */
721 AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */
722 AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */
723 AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */
724 AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */
725 AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11. */
726 AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19. */
727 AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */
728 AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11. */
729 AARCH64_OPND_SVE_Zm_imm4, /* SVE vector register with 4bit index. */
730 AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */
731 AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */
732 AARCH64_OPND_SVE_Zn_5_INDEX, /* Indexed SVE vector register, for DUPQ. */
733 AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */
734 AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */
735 AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */
736 AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */
737 AARCH64_OPND_SME_Zdnx2, /* SVE vector register list from [4:1]*2. */
738 AARCH64_OPND_SME_Zdnx4, /* SVE vector register list from [4:2]*4. */
739 AARCH64_OPND_SME_Zm, /* SVE vector register list in 4-bit Zm. */
740 AARCH64_OPND_SME_Zmx2, /* SVE vector register list from [20:17]*2. */
741 AARCH64_OPND_SME_Zmx4, /* SVE vector register list from [20:18]*4. */
742 AARCH64_OPND_SME_Znx2, /* SVE vector register list from [9:6]*2. */
743 AARCH64_OPND_SME_Znx4, /* SVE vector register list from [9:7]*4. */
744 AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23. */
745 AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19. */
746 AARCH64_OPND_SME_ZAda_2b, /* SME <ZAda>.S, 2-bits. */
747 AARCH64_OPND_SME_ZAda_3b, /* SME <ZAda>.D, 3-bits. */
748 AARCH64_OPND_SME_ZA_HV_idx_src, /* SME source ZA tile vector. */
749 AARCH64_OPND_SME_ZA_HV_idx_srcxN, /* SME N source ZA tile vectors. */
750 AARCH64_OPND_SME_ZA_HV_idx_dest, /* SME destination ZA tile vector. */
751 AARCH64_OPND_SME_ZA_HV_idx_destxN, /* SME N dest ZA tile vectors. */
752 AARCH64_OPND_SME_Pdx2, /* Predicate register list in [3:1]. */
753 AARCH64_OPND_SME_PdxN, /* Predicate register list in [3:0]. */
754 AARCH64_OPND_SME_Pm, /* SME scalable predicate register, bits [15:13]. */
755 AARCH64_OPND_SME_PNd3, /* Predicate-as-counter register, bits [3:0]. */
756 AARCH64_OPND_SME_PNg3, /* Predicate-as-counter register, bits [12:10]. */
757 AARCH64_OPND_SME_PNn, /* Predicate-as-counter register, bits [8:5]. */
758 AARCH64_OPND_SME_PNn3_INDEX1, /* Indexed pred-as-counter reg, bits [8:5]. */
759 AARCH64_OPND_SME_PNn3_INDEX2, /* Indexed pred-as-counter reg, bits [9:5]. */
760 AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles. */
761 AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector. */
762 AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3]. */
763 AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1]. */
764 AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3]. */
765 AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}]. */
766 AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}]. */
767 AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1]. */
768 AARCH64_OPND_SME_ZA_array_off4, /* SME ZA[<Wv>{, #<imm>}]. */
769 AARCH64_OPND_SME_ADDR_RI_U4xVL, /* SME [<Xn|SP>{, #<imm>, MUL VL}]. */
770 AARCH64_OPND_SME_SM_ZA, /* SME {SM | ZA}. */
771 AARCH64_OPND_SME_PnT_Wm_imm, /* SME <Pn>.<T>[<Wm>, #<imm>]. */
772 AARCH64_OPND_SME_SHRIMM4, /* 4-bit right shift, bits [19:16]. */
773 AARCH64_OPND_SME_SHRIMM5, /* size + 5-bit right shift, bits [23:22,20:16]. */
774 AARCH64_OPND_SME_Zm_INDEX1, /* Zn.T[index], bits [19:16,10]. */
775 AARCH64_OPND_SME_Zm_INDEX2, /* Zn.T[index], bits [19:16,11:10]. */
776 AARCH64_OPND_SME_Zm_INDEX3_1, /* Zn.T[index], bits [19:16,10,2:1]. */
777 AARCH64_OPND_SME_Zm_INDEX3_2, /* Zn.T[index], bits [19:16,11:10,2]. */
778 AARCH64_OPND_SME_Zm_INDEX3_10, /* Zn.T[index], bits [19:16,15,11:10]. */
779 AARCH64_OPND_SME_Zm_INDEX4_1, /* Zn.T[index], bits [19:16,11:10,2:1]. */
780 AARCH64_OPND_SME_Zm_INDEX4_10, /* Zn.T[index], bits [19:16,15,12:10]. */
781 AARCH64_OPND_SME_Zn_INDEX1_16, /* Zn[index], bits [9:5] and [16:16]. */
782 AARCH64_OPND_SME_Zn_INDEX2_15, /* Zn[index], bits [9:5] and [16:15]. */
783 AARCH64_OPND_SME_Zn_INDEX2_16, /* Zn[index], bits [9:5] and [17:16]. */
784 AARCH64_OPND_SME_Zn_INDEX3_14, /* Zn[index], bits [9:5] and [16:14]. */
785 AARCH64_OPND_SME_Zn_INDEX3_15, /* Zn[index], bits [9:5] and [17:15]. */
786 AARCH64_OPND_SME_Zn_INDEX4_14, /* Zn[index], bits [9:5] and [17:14]. */
787 AARCH64_OPND_SME_VLxN_10, /* VLx2 or VLx4, in bit 10. */
788 AARCH64_OPND_SME_VLxN_13, /* VLx2 or VLx4, in bit 13. */
789 AARCH64_OPND_SME_ZT0, /* The fixed token zt0/ZT0 (not encoded). */
790 AARCH64_OPND_SME_ZT0_INDEX, /* ZT0[<imm>], bits [14:12]. */
791 AARCH64_OPND_SME_ZT0_LIST, /* { zt0/ZT0 } (not encoded). */
792 AARCH64_OPND_TME_UIMM16, /* TME unsigned 16-bit immediate. */
793 AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */
794 AARCH64_OPND_MOPS_ADDR_Rd, /* [Rd]!, in bits [0, 4]. */
795 AARCH64_OPND_MOPS_ADDR_Rs, /* [Rs]!, in bits [16, 20]. */
796 AARCH64_OPND_MOPS_WB_Rn, /* Rn!, in bits [5, 9]. */
797 AARCH64_OPND_CSSC_SIMM8, /* CSSC signed 8-bit immediate. */
798 AARCH64_OPND_CSSC_UIMM8, /* CSSC unsigned 8-bit immediate. */
799 AARCH64_OPND_SME_Zt2, /* Qobule SVE vector register list. */
800 AARCH64_OPND_SME_Zt3, /* Trible SVE vector register list. */
801 AARCH64_OPND_SME_Zt4, /* Quad SVE vector register list. */
802 AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND, /* [<Xn|SP>]{, #<imm>}. */
803 AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB, /* [<Xn|SP>] or [<Xn|SP>, #<imm>]!. */
804 AARCH64_OPND_RCPC3_ADDR_POSTIND, /* [<Xn|SP>], #<imm>. */
805 AARCH64_OPND_RCPC3_ADDR_PREIND_WB, /* [<Xn|SP>, #<imm>]!. */
806 AARCH64_OPND_RCPC3_ADDR_OFFSET
807 };
808
809 /* Qualifier constrains an operand. It either specifies a variant of an
810 operand type or limits values available to an operand type.
811
812 N.B. Order is important; keep aarch64_opnd_qualifiers synced. */
813
814 enum aarch64_opnd_qualifier
815 {
816 /* Indicating no further qualification on an operand. */
817 AARCH64_OPND_QLF_NIL,
818
819 /* Qualifying an operand which is a general purpose (integer) register;
820 indicating the operand data size or a specific register. */
821 AARCH64_OPND_QLF_W, /* Wn, WZR or WSP. */
822 AARCH64_OPND_QLF_X, /* Xn, XZR or XSP. */
823 AARCH64_OPND_QLF_WSP, /* WSP. */
824 AARCH64_OPND_QLF_SP, /* SP. */
825
826 /* Qualifying an operand which is a floating-point register, a SIMD
827 vector element or a SIMD vector element list; indicating operand data
828 size or the size of each SIMD vector element in the case of a SIMD
829 vector element list.
830 These qualifiers are also used to qualify an address operand to
831 indicate the size of data element a load/store instruction is
832 accessing.
833 They are also used for the immediate shift operand in e.g. SSHR. Such
834 a use is only for the ease of operand encoding/decoding and qualifier
835 sequence matching; such a use should not be applied widely; use the value
836 constraint qualifiers for immediate operands wherever possible. */
837 AARCH64_OPND_QLF_S_B,
838 AARCH64_OPND_QLF_S_H,
839 AARCH64_OPND_QLF_S_S,
840 AARCH64_OPND_QLF_S_D,
841 AARCH64_OPND_QLF_S_Q,
842 /* These type qualifiers have a special meaning in that they mean 4 x 1 byte
843 or 2 x 2 byte are selected by the instruction. Other than that they have
844 no difference with AARCH64_OPND_QLF_S_B in encoding. They are here purely
845 for syntactical reasons and is an exception from normal AArch64
846 disassembly scheme. */
847 AARCH64_OPND_QLF_S_4B,
848 AARCH64_OPND_QLF_S_2H,
849
850 /* Qualifying an operand which is a SIMD vector register or a SIMD vector
851 register list; indicating register shape.
852 They are also used for the immediate shift operand in e.g. SSHR. Such
853 a use is only for the ease of operand encoding/decoding and qualifier
854 sequence matching; such a use should not be applied widely; use the value
855 constraint qualifiers for immediate operands wherever possible. */
856 AARCH64_OPND_QLF_V_4B,
857 AARCH64_OPND_QLF_V_8B,
858 AARCH64_OPND_QLF_V_16B,
859 AARCH64_OPND_QLF_V_2H,
860 AARCH64_OPND_QLF_V_4H,
861 AARCH64_OPND_QLF_V_8H,
862 AARCH64_OPND_QLF_V_2S,
863 AARCH64_OPND_QLF_V_4S,
864 AARCH64_OPND_QLF_V_1D,
865 AARCH64_OPND_QLF_V_2D,
866 AARCH64_OPND_QLF_V_1Q,
867
868 AARCH64_OPND_QLF_P_Z,
869 AARCH64_OPND_QLF_P_M,
870
871 /* Used in scaled signed immediate that are scaled by a Tag granule
872 like in stg, st2g, etc. */
873 AARCH64_OPND_QLF_imm_tag,
874
875 /* Constraint on value. */
876 AARCH64_OPND_QLF_CR, /* CRn, CRm. */
877 AARCH64_OPND_QLF_imm_0_7,
878 AARCH64_OPND_QLF_imm_0_15,
879 AARCH64_OPND_QLF_imm_0_31,
880 AARCH64_OPND_QLF_imm_0_63,
881 AARCH64_OPND_QLF_imm_1_32,
882 AARCH64_OPND_QLF_imm_1_64,
883
884 /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
885 or shift-ones. */
886 AARCH64_OPND_QLF_LSL,
887 AARCH64_OPND_QLF_MSL,
888
889 /* Special qualifier helping retrieve qualifier information during the
890 decoding time (currently not in use). */
891 AARCH64_OPND_QLF_RETRIEVE,
892 };
893
894 /* Instruction class. */
896
897 enum aarch64_insn_class
898 {
899 aarch64_misc,
900 addsub_carry,
901 addsub_ext,
902 addsub_imm,
903 addsub_shift,
904 asimdall,
905 asimddiff,
906 asimdelem,
907 asimdext,
908 asimdimm,
909 asimdins,
910 asimdmisc,
911 asimdperm,
912 asimdsame,
913 asimdshf,
914 asimdtbl,
915 asisddiff,
916 asisdelem,
917 asisdlse,
918 asisdlsep,
919 asisdlso,
920 asisdlsop,
921 asisdmisc,
922 asisdone,
923 asisdpair,
924 asisdsame,
925 asisdshf,
926 bitfield,
927 branch_imm,
928 branch_reg,
929 compbranch,
930 condbranch,
931 condcmp_imm,
932 condcmp_reg,
933 condsel,
934 cryptoaes,
935 cryptosha2,
936 cryptosha3,
937 dp_1src,
938 dp_2src,
939 dp_3src,
940 exception,
941 extract,
942 float2fix,
943 float2int,
944 floatccmp,
945 floatcmp,
946 floatdp1,
947 floatdp2,
948 floatdp3,
949 floatimm,
950 floatsel,
951 ldst_immpost,
952 ldst_immpre,
953 ldst_imm9, /* immpost or immpre */
954 ldst_imm10, /* LDRAA/LDRAB */
955 ldst_pos,
956 ldst_regoff,
957 ldst_unpriv,
958 ldst_unscaled,
959 ldstexcl,
960 ldstnapair_offs,
961 ldstpair_off,
962 ldstpair_indexed,
963 loadlit,
964 log_imm,
965 log_shift,
966 lse_atomic,
967 lse128_atomic,
968 movewide,
969 pcreladdr,
970 ic_system,
971 sme_fp_sd,
972 sme_int_sd,
973 sme_misc,
974 sme_mov,
975 sme_ldr,
976 sme_psel,
977 sme_shift,
978 sme_size_12_bhs,
979 sme_size_12_hs,
980 sme_size_22,
981 sme_size_22_hsd,
982 sme_sz_23,
983 sme_str,
984 sme_start,
985 sme_stop,
986 sme2_mov,
987 sme2_movaz,
988 sve_cpy,
989 sve_index,
990 sve_limm,
991 sve_misc,
992 sve_movprfx,
993 sve_pred_zm,
994 sve_shift_pred,
995 sve_shift_unpred,
996 sve_size_bhs,
997 sve_size_bhsd,
998 sve_size_hsd,
999 sve_size_hsd2,
1000 sve_size_sd,
1001 sve_size_bh,
1002 sve_size_sd2,
1003 sve_size_13,
1004 sve_shift_tsz_hsd,
1005 sve_shift_tsz_bhsd,
1006 sve_size_tsz_bhs,
1007 testbranch,
1008 cryptosm3,
1009 cryptosm4,
1010 dotproduct,
1011 bfloat16,
1012 cssc,
1013 gcs,
1014 the,
1015 sve2_urqvs,
1016 sve_index1,
1017 rcpc3
1018 };
1019
1020 /* Opcode enumerators. */
1021
1022 enum aarch64_op
1023 {
1024 OP_NIL,
1025 OP_STRB_POS,
1026 OP_LDRB_POS,
1027 OP_LDRSB_POS,
1028 OP_STRH_POS,
1029 OP_LDRH_POS,
1030 OP_LDRSH_POS,
1031 OP_STR_POS,
1032 OP_LDR_POS,
1033 OP_STRF_POS,
1034 OP_LDRF_POS,
1035 OP_LDRSW_POS,
1036 OP_PRFM_POS,
1037
1038 OP_STURB,
1039 OP_LDURB,
1040 OP_LDURSB,
1041 OP_STURH,
1042 OP_LDURH,
1043 OP_LDURSH,
1044 OP_STUR,
1045 OP_LDUR,
1046 OP_STURV,
1047 OP_LDURV,
1048 OP_LDURSW,
1049 OP_PRFUM,
1050
1051 OP_LDR_LIT,
1052 OP_LDRV_LIT,
1053 OP_LDRSW_LIT,
1054 OP_PRFM_LIT,
1055
1056 OP_ADD,
1057 OP_B,
1058 OP_BL,
1059
1060 OP_MOVN,
1061 OP_MOVZ,
1062 OP_MOVK,
1063
1064 OP_MOV_IMM_LOG, /* MOV alias for moving bitmask immediate. */
1065 OP_MOV_IMM_WIDE, /* MOV alias for moving wide immediate. */
1066 OP_MOV_IMM_WIDEN, /* MOV alias for moving wide immediate (negated). */
1067
1068 OP_MOV_V, /* MOV alias for moving vector register. */
1069
1070 OP_ASR_IMM,
1071 OP_LSR_IMM,
1072 OP_LSL_IMM,
1073
1074 OP_BIC,
1075
1076 OP_UBFX,
1077 OP_BFXIL,
1078 OP_SBFX,
1079 OP_SBFIZ,
1080 OP_BFI,
1081 OP_BFC, /* ARMv8.2. */
1082 OP_UBFIZ,
1083 OP_UXTB,
1084 OP_UXTH,
1085 OP_UXTW,
1086
1087 OP_CINC,
1088 OP_CINV,
1089 OP_CNEG,
1090 OP_CSET,
1091 OP_CSETM,
1092
1093 OP_FCVT,
1094 OP_FCVTN,
1095 OP_FCVTN2,
1096 OP_FCVTL,
1097 OP_FCVTL2,
1098 OP_FCVTXN_S, /* Scalar version. */
1099
1100 OP_ROR_IMM,
1101
1102 OP_SXTL,
1103 OP_SXTL2,
1104 OP_UXTL,
1105 OP_UXTL2,
1106
1107 OP_MOV_P_P,
1108 OP_MOV_PN_PN,
1109 OP_MOV_Z_P_Z,
1110 OP_MOV_Z_V,
1111 OP_MOV_Z_Z,
1112 OP_MOV_Z_Zi,
1113 OP_MOVM_P_P_P,
1114 OP_MOVS_P_P,
1115 OP_MOVZS_P_P_P,
1116 OP_MOVZ_P_P_P,
1117 OP_NOTS_P_P_P_Z,
1118 OP_NOT_P_P_P_Z,
1119
1120 OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */
1121
1122 OP_TOTAL_NUM, /* Pseudo. */
1123 };
1124
1125 /* Error types. */
1126 enum err_type
1127 {
1128 ERR_OK,
1129 ERR_UND,
1130 ERR_UNP,
1131 ERR_NYI,
1132 ERR_VFI,
1133 ERR_NR_ENTRIES
1134 };
1135
1136 /* Maximum number of operands an instruction can have. */
1137 #define AARCH64_MAX_OPND_NUM 7
1138 /* Maximum number of qualifier sequences an instruction can have. */
1139 #define AARCH64_MAX_QLF_SEQ_NUM 10
1140 /* Operand qualifier typedef; optimized for the size. */
1141 typedef unsigned char aarch64_opnd_qualifier_t;
1142 /* Operand qualifier sequence typedef. */
1143 typedef aarch64_opnd_qualifier_t \
1144 aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
1145
1146 /* FIXME: improve the efficiency. */
1147 static inline bool
1148 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
1149 {
1150 int i;
1151 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1152 if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
1153 return false;
1154 return true;
1155 }
1156
1157 /* Forward declare error reporting type. */
1158 typedef struct aarch64_operand_error aarch64_operand_error;
1159 /* Forward declare instruction sequence type. */
1160 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
1161 /* Forward declare instruction definition. */
1162 typedef struct aarch64_inst aarch64_inst;
1163
1164 /* This structure holds information for a particular opcode. */
1165
1166 struct aarch64_opcode
1167 {
1168 /* The name of the mnemonic. */
1169 const char *name;
1170
1171 /* The opcode itself. Those bits which will be filled in with
1172 operands are zeroes. */
1173 aarch64_insn opcode;
1174
1175 /* The opcode mask. This is used by the disassembler. This is a
1176 mask containing ones indicating those bits which must match the
1177 opcode field, and zeroes indicating those bits which need not
1178 match (and are presumably filled in by operands). */
1179 aarch64_insn mask;
1180
1181 /* Instruction class. */
1182 enum aarch64_insn_class iclass;
1183
1184 /* Enumerator identifier. */
1185 enum aarch64_op op;
1186
1187 /* Which architecture variant provides this instruction. */
1188 const aarch64_feature_set *avariant;
1189
1190 /* An array of operand codes. Each code is an index into the
1191 operand table. They appear in the order which the operands must
1192 appear in assembly code, and are terminated by a zero. */
1193 enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
1194
1195 /* A list of operand qualifier code sequence. Each operand qualifier
1196 code qualifies the corresponding operand code. Each operand
1197 qualifier sequence specifies a valid opcode variant and related
1198 constraint on operands. */
1199 aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
1200
1201 /* Flags providing information about this instruction */
1202 uint64_t flags;
1203
1204 /* Extra constraints on the instruction that the verifier checks. */
1205 uint32_t constraints;
1206
1207 /* If nonzero, this operand and operand 0 are both registers and
1208 are required to have the same register number. */
1209 unsigned char tied_operand;
1210
1211 /* If non-NULL, a function to verify that a given instruction is valid. */
1212 enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
1213 bfd_vma, bool, aarch64_operand_error *,
1214 struct aarch64_instr_sequence *);
1215 };
1216
1217 typedef struct aarch64_opcode aarch64_opcode;
1218
1219 /* Table describing all the AArch64 opcodes. */
1220 extern const aarch64_opcode aarch64_opcode_table[];
1221
1222 /* Opcode flags. */
1223 #define F_ALIAS (1 << 0)
1224 #define F_HAS_ALIAS (1 << 1)
1225 /* Disassembly preference priority 1-3 (the larger the higher). If nothing
1226 is specified, it is the priority 0 by default, i.e. the lowest priority. */
1227 #define F_P1 (1 << 2)
1228 #define F_P2 (2 << 2)
1229 #define F_P3 (3 << 2)
1230 /* Flag an instruction that is truly conditional executed, e.g. b.cond. */
1231 #define F_COND (1 << 4)
1232 /* Instruction has the field of 'sf'. */
1233 #define F_SF (1 << 5)
1234 /* Instruction has the field of 'size:Q'. */
1235 #define F_SIZEQ (1 << 6)
1236 /* Floating-point instruction has the field of 'type'. */
1237 #define F_FPTYPE (1 << 7)
1238 /* AdvSIMD scalar instruction has the field of 'size'. */
1239 #define F_SSIZE (1 << 8)
1240 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q". */
1241 #define F_T (1 << 9)
1242 /* Size of GPR operand in AdvSIMD instructions encoded in Q. */
1243 #define F_GPRSIZE_IN_Q (1 << 10)
1244 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22. */
1245 #define F_LDS_SIZE (1 << 11)
1246 /* Optional operand; assume maximum of 1 operand can be optional. */
1247 #define F_OPD0_OPT (1 << 12)
1248 #define F_OPD1_OPT (2 << 12)
1249 #define F_OPD2_OPT (3 << 12)
1250 #define F_OPD3_OPT (4 << 12)
1251 #define F_OPD4_OPT (5 << 12)
1252 /* Default value for the optional operand when omitted from the assembly. */
1253 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
1254 /* Instruction that is an alias of another instruction needs to be
1255 encoded/decoded by converting it to/from the real form, followed by
1256 the encoding/decoding according to the rules of the real opcode.
1257 This compares to the direct coding using the alias's information.
1258 N.B. this flag requires F_ALIAS to be used together. */
1259 #define F_CONV (1 << 20)
1260 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
1261 friendly pseudo instruction available only in the assembly code (thus will
1262 not show up in the disassembly). */
1263 #define F_PSEUDO (1 << 21)
1264 /* Instruction has miscellaneous encoding/decoding rules. */
1265 #define F_MISC (1 << 22)
1266 /* Instruction has the field of 'N'; used in conjunction with F_SF. */
1267 #define F_N (1 << 23)
1268 /* Opcode dependent field. */
1269 #define F_OD(X) (((X) & 0x7) << 24)
1270 /* Instruction has the field of 'sz'. */
1271 #define F_LSE_SZ (1 << 27)
1272 /* Require an exact qualifier match, even for NIL qualifiers. */
1273 #define F_STRICT (1ULL << 28)
1274 /* This system instruction is used to read system registers. */
1275 #define F_SYS_READ (1ULL << 29)
1276 /* This system instruction is used to write system registers. */
1277 #define F_SYS_WRITE (1ULL << 30)
1278 /* This instruction has an extra constraint on it that imposes a requirement on
1279 subsequent instructions. */
1280 #define F_SCAN (1ULL << 31)
1281 /* Instruction takes a pair of optional operands. If we specify the Nth operand
1282 to be optional, then we also implicitly specify (N+1)th operand to also be
1283 optional. */
1284 #define F_OPD_PAIR_OPT (1ULL << 32)
1285 /* This instruction does not allow the full range of values that the
1286 width of fields in the assembler instruction would theoretically
1287 allow. This impacts the constraintts on assembly but yelds no
1288 impact on disassembly. */
1289 #define F_OPD_NARROW (1ULL << 33)
1290 /* For the instruction with size[22:23] field. */
1291 #define F_OPD_SIZE (1ULL << 34)
1292 /* RCPC3 instruction has the field of 'size'. */
1293 #define F_RCPC3_SIZE (1ULL << 35)
1294 /* Next bit is 36. */
1295
1296 /* Instruction constraints. */
1297 /* This instruction has a predication constraint on the instruction at PC+4. */
1298 #define C_SCAN_MOVPRFX (1U << 0)
1299 /* This instruction's operation width is determined by the operand with the
1300 largest element size. */
1301 #define C_MAX_ELEM (1U << 1)
1302 #define C_SCAN_MOPS_P (1U << 2)
1303 #define C_SCAN_MOPS_M (2U << 2)
1304 #define C_SCAN_MOPS_E (3U << 2)
1305 #define C_SCAN_MOPS_PME (3U << 2)
1306 /* Next bit is 4. */
1307
1308 static inline bool
1309 alias_opcode_p (const aarch64_opcode *opcode)
1310 {
1311 return (opcode->flags & F_ALIAS) != 0;
1312 }
1313
1314 static inline bool
1315 opcode_has_alias (const aarch64_opcode *opcode)
1316 {
1317 return (opcode->flags & F_HAS_ALIAS) != 0;
1318 }
1319
1320 /* Priority for disassembling preference. */
1321 static inline int
1322 opcode_priority (const aarch64_opcode *opcode)
1323 {
1324 return (opcode->flags >> 2) & 0x3;
1325 }
1326
1327 static inline bool
1328 pseudo_opcode_p (const aarch64_opcode *opcode)
1329 {
1330 return (opcode->flags & F_PSEUDO) != 0lu;
1331 }
1332
1333 /* Deal with two possible scenarios: If F_OP_PAIR_OPT not set, as is the case
1334 by default, F_OPDn_OPT must equal IDX + 1, else F_OPDn_OPT must be in range
1335 [IDX, IDX + 1]. */
1336 static inline bool
1337 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
1338 {
1339 if (opcode->flags & F_OPD_PAIR_OPT)
1340 return (((opcode->flags >> 12) & 0x7) == idx
1341 || ((opcode->flags >> 12) & 0x7) == idx + 1);
1342 return ((opcode->flags >> 12) & 0x7) == idx + 1;
1343 }
1344
1345 static inline aarch64_insn
1346 get_optional_operand_default_value (const aarch64_opcode *opcode)
1347 {
1348 return (opcode->flags >> 15) & 0x1f;
1349 }
1350
1351 static inline unsigned int
1352 get_opcode_dependent_value (const aarch64_opcode *opcode)
1353 {
1354 return (opcode->flags >> 24) & 0x7;
1355 }
1356
1357 static inline bool
1358 opcode_has_special_coder (const aarch64_opcode *opcode)
1359 {
1360 return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
1361 | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND
1362 | F_OPD_SIZE | F_RCPC3_SIZE)) != 0;
1363 }
1364
1365 struct aarch64_name_value_pair
1367 {
1368 const char * name;
1369 aarch64_insn value;
1370 };
1371
1372 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
1373 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
1374 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
1375 extern const struct aarch64_name_value_pair aarch64_prfops [32];
1376 extern const struct aarch64_name_value_pair aarch64_hint_options [];
1377
1378 #define AARCH64_MAX_SYSREG_NAME_LEN 32
1379
1380 typedef struct
1381 {
1382 const char * name;
1383 aarch64_insn value;
1384 uint32_t flags;
1385
1386 /* A set of features, all of which are required for this system register to be
1387 available. */
1388 aarch64_feature_set features;
1389 } aarch64_sys_reg;
1390
1391 extern const aarch64_sys_reg aarch64_sys_regs [];
1392 extern const aarch64_sys_reg aarch64_pstatefields [];
1393 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
1394 extern bool aarch64_sys_reg_128bit_p (const uint32_t);
1395 extern bool aarch64_sys_reg_alias_p (const uint32_t);
1396 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
1397 const aarch64_sys_reg *);
1398
1399 typedef struct
1400 {
1401 const char *name;
1402 uint32_t value;
1403 uint32_t flags ;
1404
1405 /* A set of features, all of which are required for this system instruction to be
1406 available. */
1407 aarch64_feature_set features;
1408 } aarch64_sys_ins_reg;
1409
1410 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
1411 extern bool
1412 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
1413 const char *reg_name,
1414 uint32_t, const aarch64_feature_set *);
1415
1416 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
1417 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
1418 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
1419 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
1420 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
1421
1422 /* Shift/extending operator kinds.
1423 N.B. order is important; keep aarch64_operand_modifiers synced. */
1424 enum aarch64_modifier_kind
1425 {
1426 AARCH64_MOD_NONE,
1427 AARCH64_MOD_MSL,
1428 AARCH64_MOD_ROR,
1429 AARCH64_MOD_ASR,
1430 AARCH64_MOD_LSR,
1431 AARCH64_MOD_LSL,
1432 AARCH64_MOD_UXTB,
1433 AARCH64_MOD_UXTH,
1434 AARCH64_MOD_UXTW,
1435 AARCH64_MOD_UXTX,
1436 AARCH64_MOD_SXTB,
1437 AARCH64_MOD_SXTH,
1438 AARCH64_MOD_SXTW,
1439 AARCH64_MOD_SXTX,
1440 AARCH64_MOD_MUL,
1441 AARCH64_MOD_MUL_VL,
1442 };
1443
1444 bool
1445 aarch64_extend_operator_p (enum aarch64_modifier_kind);
1446
1447 enum aarch64_modifier_kind
1448 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
1449 /* Condition. */
1450
1451 typedef struct
1452 {
1453 /* A list of names with the first one as the disassembly preference;
1454 terminated by NULL if fewer than 3. */
1455 const char *names[4];
1456 aarch64_insn value;
1457 } aarch64_cond;
1458
1459 extern const aarch64_cond aarch64_conds[16];
1460
1461 const aarch64_cond* get_cond_from_value (aarch64_insn value);
1462 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
1463
1464 /* Information about a reference to part of ZA. */
1466 struct aarch64_indexed_za
1467 {
1468 /* Which tile is being accessed. Unused (and 0) for an index into ZA. */
1469 int regno;
1470
1471 struct
1472 {
1473 /* The 32-bit index register. */
1474 int regno;
1475
1476 /* The first (or only) immediate offset. */
1477 int64_t imm;
1478
1479 /* The last immediate offset minus the first immediate offset.
1480 Unlike the range size, this is guaranteed not to overflow
1481 when the end offset > the start offset. */
1482 uint64_t countm1;
1483 } index;
1484
1485 /* The vector group size, or 0 if none. */
1486 unsigned group_size : 8;
1487
1488 /* True if a tile access is vertical, false if it is horizontal.
1489 Unused (and 0) for an index into ZA. */
1490 unsigned v : 1;
1491 };
1492
1493 /* Information about a list of registers. */
1494 struct aarch64_reglist
1495 {
1496 unsigned first_regno : 8;
1497 unsigned num_regs : 8;
1498 /* The difference between the nth and the n+1th register. */
1499 unsigned stride : 8;
1500 /* 1 if it is a list of reg element. */
1501 unsigned has_index : 1;
1502 /* Lane index; valid only when has_index is 1. */
1503 int64_t index;
1504 };
1505
1506 /* Structure representing an operand. */
1507
1508 struct aarch64_opnd_info
1509 {
1510 enum aarch64_opnd type;
1511 aarch64_opnd_qualifier_t qualifier;
1512 int idx;
1513
1514 union
1515 {
1516 struct
1517 {
1518 unsigned regno;
1519 } reg;
1520 struct
1521 {
1522 unsigned int regno;
1523 int64_t index;
1524 } reglane;
1525 /* e.g. LVn. */
1526 struct aarch64_reglist reglist;
1527 /* e.g. immediate or pc relative address offset. */
1528 struct
1529 {
1530 int64_t value;
1531 unsigned is_fp : 1;
1532 } imm;
1533 /* e.g. address in STR (register offset). */
1534 struct
1535 {
1536 unsigned base_regno;
1537 struct
1538 {
1539 union
1540 {
1541 int imm;
1542 unsigned regno;
1543 };
1544 unsigned is_reg;
1545 } offset;
1546 unsigned pcrel : 1; /* PC-relative. */
1547 unsigned writeback : 1;
1548 unsigned preind : 1; /* Pre-indexed. */
1549 unsigned postind : 1; /* Post-indexed. */
1550 } addr;
1551
1552 struct
1553 {
1554 /* The encoding of the system register. */
1555 aarch64_insn value;
1556
1557 /* The system register flags. */
1558 uint32_t flags;
1559 } sysreg;
1560
1561 /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}] */
1562 struct aarch64_indexed_za indexed_za;
1563
1564 const aarch64_cond *cond;
1565 /* The encoding of the PSTATE field. */
1566 aarch64_insn pstatefield;
1567 const aarch64_sys_ins_reg *sysins_op;
1568 const struct aarch64_name_value_pair *barrier;
1569 const struct aarch64_name_value_pair *hint_option;
1570 const struct aarch64_name_value_pair *prfop;
1571 };
1572
1573 /* Operand shifter; in use when the operand is a register offset address,
1574 add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}. */
1575 struct
1576 {
1577 enum aarch64_modifier_kind kind;
1578 unsigned operator_present: 1; /* Only valid during encoding. */
1579 /* Value of the 'S' field in ld/st reg offset; used only in decoding. */
1580 unsigned amount_present: 1;
1581 int64_t amount;
1582 } shifter;
1583
1584 unsigned skip:1; /* Operand is not completed if there is a fixup needed
1585 to be done on it. In some (but not all) of these
1586 cases, we need to tell libopcodes to skip the
1587 constraint checking and the encoding for this
1588 operand, so that the libopcodes can pick up the
1589 right opcode before the operand is fixed-up. This
1590 flag should only be used during the
1591 assembling/encoding. */
1592 unsigned present:1; /* Whether this operand is present in the assembly
1593 line; not used during the disassembly. */
1594 };
1595
1596 typedef struct aarch64_opnd_info aarch64_opnd_info;
1597
1598 /* Structure representing an instruction.
1599
1600 It is used during both the assembling and disassembling. The assembler
1601 fills an aarch64_inst after a successful parsing and then passes it to the
1602 encoding routine to do the encoding. During the disassembling, the
1603 disassembler calls the decoding routine to decode a binary instruction; on a
1604 successful return, such a structure will be filled with information of the
1605 instruction; then the disassembler uses the information to print out the
1606 instruction. */
1607
1608 struct aarch64_inst
1609 {
1610 /* The value of the binary instruction. */
1611 aarch64_insn value;
1612
1613 /* Corresponding opcode entry. */
1614 const aarch64_opcode *opcode;
1615
1616 /* Condition for a truly conditional-executed instrutions, e.g. b.cond. */
1617 const aarch64_cond *cond;
1618
1619 /* Operands information. */
1620 aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
1621 };
1622
1623 /* Defining the HINT #imm values for the aarch64_hint_options. */
1624 #define HINT_OPD_CSYNC 0x11
1625 #define HINT_OPD_DSYNC 0x13
1626 #define HINT_OPD_C 0x22
1627 #define HINT_OPD_J 0x24
1628 #define HINT_OPD_JC 0x26
1629 #define HINT_OPD_NULL 0x00
1630
1631
1632 /* Diagnosis related declaration and interface. */
1634
1635 /* Operand error kind enumerators.
1636
1637 AARCH64_OPDE_RECOVERABLE
1638 Less severe error found during the parsing, very possibly because that
1639 GAS has picked up a wrong instruction template for the parsing.
1640
1641 AARCH64_OPDE_A_SHOULD_FOLLOW_B
1642 The instruction forms (or is expected to form) part of a sequence,
1643 but the preceding instruction in the sequence wasn't the expected one.
1644 The message refers to two strings: the name of the current instruction,
1645 followed by the name of the expected preceding instruction.
1646
1647 AARCH64_OPDE_EXPECTED_A_AFTER_B
1648 Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
1649 so that the current instruction is assumed to be the incorrect one:
1650 "since the previous instruction was B, the current one should be A".
1651
1652 AARCH64_OPDE_SYNTAX_ERROR
1653 General syntax error; it can be either a user error, or simply because
1654 that GAS is trying a wrong instruction template.
1655
1656 AARCH64_OPDE_FATAL_SYNTAX_ERROR
1657 Definitely a user syntax error.
1658
1659 AARCH64_OPDE_INVALID_VARIANT
1660 No syntax error, but the operands are not a valid combination, e.g.
1661 FMOV D0,S0
1662
1663 The following errors are only reported against an asm string that is
1664 syntactically valid and that has valid operand qualifiers.
1665
1666 AARCH64_OPDE_INVALID_VG_SIZE
1667 Error about a "VGx<n>" modifier in a ZA index not having the
1668 correct <n>. This error effectively forms a pair with
1669 AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number
1670 of vectors that an instruction operates on. However, the "VGx<n>"
1671 modifier is optional, whereas a register list always has a known
1672 and explicit length. It therefore seems better to place more
1673 importance on the register list length when selecting an opcode table
1674 entry. This in turn means that having an incorrect register length
1675 should be more severe than having an incorrect "VGx<n>".
1676
1677 AARCH64_OPDE_REG_LIST_LENGTH
1678 Error about a register list operand having an unexpected number of
1679 registers. This error is low severity because there might be another
1680 opcode entry that supports the given number of registers.
1681
1682 AARCH64_OPDE_REG_LIST_STRIDE
1683 Error about a register list operand having the correct number
1684 (and type) of registers, but an unexpected stride. This error is
1685 more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies
1686 that the length is known to be correct. However, it is lower than
1687 many other errors, since some instructions have forms that share
1688 the same number of registers but have different strides.
1689
1690 AARCH64_OPDE_UNTIED_IMMS
1691 The asm failed to use the same immediate for a destination operand
1692 and a tied source operand.
1693
1694 AARCH64_OPDE_UNTIED_OPERAND
1695 The asm failed to use the same register for a destination operand
1696 and a tied source operand.
1697
1698 AARCH64_OPDE_OUT_OF_RANGE
1699 Error about some immediate value out of a valid range.
1700
1701 AARCH64_OPDE_UNALIGNED
1702 Error about some immediate value not properly aligned (i.e. not being a
1703 multiple times of a certain value).
1704
1705 AARCH64_OPDE_OTHER_ERROR
1706 Error of the highest severity and used for any severe issue that does not
1707 fall into any of the above categories.
1708
1709 AARCH64_OPDE_INVALID_REGNO
1710 A register was syntactically valid and had the right type, but it was
1711 outside the range supported by the associated operand field. This is
1712 a high severity error because there are currently no instructions that
1713 would accept the operands that precede the erroneous one (if any) and
1714 yet still accept a wider range of registers.
1715
1716 AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
1717 AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the
1718 AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
1719 only libopcodes has the information about the valid variants of each
1720 instruction.
1721
1722 The enumerators have an increasing severity. This is helpful when there are
1723 multiple instruction templates available for a given mnemonic name (e.g.
1724 FMOV); this mechanism will help choose the most suitable template from which
1725 the generated diagnostics can most closely describe the issues, if any.
1726
1727 This enum needs to be kept up-to-date with operand_mismatch_kind_names
1728 in tc-aarch64.c. */
1729
1730 enum aarch64_operand_error_kind
1731 {
1732 AARCH64_OPDE_NIL,
1733 AARCH64_OPDE_RECOVERABLE,
1734 AARCH64_OPDE_A_SHOULD_FOLLOW_B,
1735 AARCH64_OPDE_EXPECTED_A_AFTER_B,
1736 AARCH64_OPDE_SYNTAX_ERROR,
1737 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
1738 AARCH64_OPDE_INVALID_VARIANT,
1739 AARCH64_OPDE_INVALID_VG_SIZE,
1740 AARCH64_OPDE_REG_LIST_LENGTH,
1741 AARCH64_OPDE_REG_LIST_STRIDE,
1742 AARCH64_OPDE_UNTIED_IMMS,
1743 AARCH64_OPDE_UNTIED_OPERAND,
1744 AARCH64_OPDE_OUT_OF_RANGE,
1745 AARCH64_OPDE_UNALIGNED,
1746 AARCH64_OPDE_OTHER_ERROR,
1747 AARCH64_OPDE_INVALID_REGNO
1748 };
1749
1750 /* N.B. GAS assumes that this structure work well with shallow copy. */
1751 struct aarch64_operand_error
1752 {
1753 enum aarch64_operand_error_kind kind;
1754 int index;
1755 const char *error;
1756 /* Some data for extra information. */
1757 union {
1758 int i;
1759 const char *s;
1760 } data[3];
1761 bool non_fatal;
1762 };
1763
1764 /* AArch64 sequence structure used to track instructions with F_SCAN
1765 dependencies for both assembler and disassembler. */
1766 struct aarch64_instr_sequence
1767 {
1768 /* The instructions in the sequence, starting with the one that
1769 caused it to be opened. */
1770 aarch64_inst *instr;
1771 /* The number of instructions already in the sequence. */
1772 int num_added_insns;
1773 /* The number of instructions allocated to the sequence. */
1774 int num_allocated_insns;
1775 };
1776
1777 /* Encoding entrypoint. */
1778
1779 extern bool
1780 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
1781 aarch64_insn *, aarch64_opnd_qualifier_t *,
1782 aarch64_operand_error *, aarch64_instr_sequence *);
1783
1784 extern const aarch64_opcode *
1785 aarch64_replace_opcode (struct aarch64_inst *,
1786 const aarch64_opcode *);
1787
1788 /* Given the opcode enumerator OP, return the pointer to the corresponding
1789 opcode entry. */
1790
1791 extern const aarch64_opcode *
1792 aarch64_get_opcode (enum aarch64_op);
1793
1794 /* An instance of this structure is passed to aarch64_print_operand, and
1795 the callback within this structure is used to apply styling to the
1796 disassembler output. This structure encapsulates the callback and a
1797 state pointer. */
1798
1799 struct aarch64_styler
1800 {
1801 /* The callback used to apply styling. Returns a string created from FMT
1802 and ARGS with STYLE applied to the string. STYLER is a pointer back
1803 to this object so that the callback can access the state member.
1804
1805 The string returned from this callback must remain valid until the
1806 call to aarch64_print_operand has completed. */
1807 const char *(*apply_style) (struct aarch64_styler *styler,
1808 enum disassembler_style style,
1809 const char *fmt,
1810 va_list args);
1811
1812 /* A pointer to a state object which can be used by the apply_style
1813 callback function. */
1814 void *state;
1815 };
1816
1817 /* Generate the string representation of an operand. */
1818 extern void
1819 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
1820 const aarch64_opnd_info *, int, int *, bfd_vma *,
1821 char **, char *, size_t,
1822 aarch64_feature_set features,
1823 struct aarch64_styler *styler);
1824
1825 /* Miscellaneous interface. */
1826
1827 extern int
1828 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
1829
1830 extern aarch64_opnd_qualifier_t
1831 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
1832 const aarch64_opnd_qualifier_t, int);
1833
1834 extern bool
1835 aarch64_is_destructive_by_operands (const aarch64_opcode *);
1836
1837 extern int
1838 aarch64_num_of_operands (const aarch64_opcode *);
1839
1840 extern int
1841 aarch64_stack_pointer_p (const aarch64_opnd_info *);
1842
1843 extern int
1844 aarch64_zero_register_p (const aarch64_opnd_info *);
1845
1846 extern enum err_type
1847 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
1848 aarch64_operand_error *);
1849
1850 extern void
1851 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
1852
1853 /* Given an operand qualifier, return the expected data element size
1854 of a qualified operand. */
1855 extern unsigned char
1856 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
1857
1858 extern enum aarch64_operand_class
1859 aarch64_get_operand_class (enum aarch64_opnd);
1860
1861 extern const char *
1862 aarch64_get_operand_name (enum aarch64_opnd);
1863
1864 extern const char *
1865 aarch64_get_operand_desc (enum aarch64_opnd);
1866
1867 extern bool
1868 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
1869
1870 extern bool
1871 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *);
1872
1873 extern int
1874 calc_ldst_datasize (const aarch64_opnd_info *opnds);
1875
1876 #ifdef DEBUG_AARCH64
1877 extern int debug_dump;
1878
1879 extern void
1880 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
1881
1882 #define DEBUG_TRACE(M, ...) \
1883 { \
1884 if (debug_dump) \
1885 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
1886 }
1887
1888 #define DEBUG_TRACE_IF(C, M, ...) \
1889 { \
1890 if (debug_dump && (C)) \
1891 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
1892 }
1893 #else /* !DEBUG_AARCH64 */
1894 #define DEBUG_TRACE(M, ...) ;
1895 #define DEBUG_TRACE_IF(C, M, ...) ;
1896 #endif /* DEBUG_AARCH64 */
1897
1898 extern const char *const aarch64_sve_pattern_array[32];
1899 extern const char *const aarch64_sve_prfop_array[16];
1900 extern const char *const aarch64_rprfmop_array[64];
1901 extern const char *const aarch64_sme_vlxn_array[2];
1902
1903 #ifdef __cplusplus
1904 }
1905 #endif
1906
1907 #endif /* OPCODE_AARCH64_H */
1908