aarch64.h revision 1.1.1.11 1 /* AArch64 assembler/disassembler support.
2
3 Copyright (C) 2009-2024 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GNU Binutils.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #ifndef OPCODE_AARCH64_H
23 #define OPCODE_AARCH64_H
24
25 #include "bfd.h"
26 #include <stdint.h>
27 #include <assert.h>
28 #include <stdlib.h>
29
30 #include "dis-asm.h"
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 /* The offset for pc-relative addressing is currently defined to be 0. */
37 #define AARCH64_PCREL_OFFSET 0
38
39 typedef uint32_t aarch64_insn;
40
41 /* An enum containing all known CPU features. The values act as bit positions
42 into aarch64_feature_set. */
43 enum aarch64_feature_bit {
44 /* All processors. */
45 AARCH64_FEATURE_V8,
46 /* ARMv8.6 processors. */
47 AARCH64_FEATURE_V8_6A,
48 /* Bfloat16 insns. */
49 AARCH64_FEATURE_BFLOAT16,
50 /* Armv8-A processors. */
51 AARCH64_FEATURE_V8A,
52 /* SVE2 instructions. */
53 AARCH64_FEATURE_SVE2,
54 /* ARMv8.2 processors. */
55 AARCH64_FEATURE_V8_2A,
56 /* ARMv8.3 processors. */
57 AARCH64_FEATURE_V8_3A,
58 AARCH64_FEATURE_SVE2_AES,
59 AARCH64_FEATURE_SVE2_BITPERM,
60 AARCH64_FEATURE_SVE2_SM4,
61 AARCH64_FEATURE_SVE2_SHA3,
62 /* ARMv8.4 processors. */
63 AARCH64_FEATURE_V8_4A,
64 /* Armv8-R processors. */
65 AARCH64_FEATURE_V8R,
66 /* Armv8.7 processors. */
67 AARCH64_FEATURE_V8_7A,
68 /* Scalable Matrix Extension. */
69 AARCH64_FEATURE_SME,
70 /* Atomic 64-byte load/store. */
71 AARCH64_FEATURE_LS64,
72 /* v8.3 Pointer Authentication. */
73 AARCH64_FEATURE_PAUTH,
74 /* FP instructions. */
75 AARCH64_FEATURE_FP,
76 /* SIMD instructions. */
77 AARCH64_FEATURE_SIMD,
78 /* CRC instructions. */
79 AARCH64_FEATURE_CRC,
80 /* LSE instructions. */
81 AARCH64_FEATURE_LSE,
82 /* PAN instructions. */
83 AARCH64_FEATURE_PAN,
84 /* LOR instructions. */
85 AARCH64_FEATURE_LOR,
86 /* v8.1 SIMD instructions. */
87 AARCH64_FEATURE_RDMA,
88 /* v8.1 features. */
89 AARCH64_FEATURE_V8_1A,
90 /* v8.2 FP16 instructions. */
91 AARCH64_FEATURE_F16,
92 /* RAS Extensions. */
93 AARCH64_FEATURE_RAS,
94 /* Statistical Profiling. */
95 AARCH64_FEATURE_PROFILE,
96 /* SVE instructions. */
97 AARCH64_FEATURE_SVE,
98 /* RCPC instructions. */
99 AARCH64_FEATURE_RCPC,
100 /* RCPC2 instructions. */
101 AARCH64_FEATURE_RCPC2,
102 /* Complex # instructions. */
103 AARCH64_FEATURE_COMPNUM,
104 /* JavaScript conversion instructions. */
105 AARCH64_FEATURE_JSCVT,
106 /* Dot Product instructions. */
107 AARCH64_FEATURE_DOTPROD,
108 /* SM3 & SM4 instructions. */
109 AARCH64_FEATURE_SM4,
110 /* SHA2 instructions. */
111 AARCH64_FEATURE_SHA2,
112 /* SHA3 instructions. */
113 AARCH64_FEATURE_SHA3,
114 /* AES instructions. */
115 AARCH64_FEATURE_AES,
116 /* v8.2 FP16FML ins. */
117 AARCH64_FEATURE_F16_FML,
118 /* ARMv8.5 processors. */
119 AARCH64_FEATURE_V8_5A,
120 /* v8.5 Flag Manipulation version 2. */
121 AARCH64_FEATURE_FLAGMANIP,
122 /* FRINT[32,64][Z,X] insns. */
123 AARCH64_FEATURE_FRINTTS,
124 /* SB instruction. */
125 AARCH64_FEATURE_SB,
126 /* Execution and Data Prediction Restriction instructions. */
127 AARCH64_FEATURE_PREDRES,
128 /* DC CVADP. */
129 AARCH64_FEATURE_CVADP,
130 /* Random Number instructions. */
131 AARCH64_FEATURE_RNG,
132 /* SCXTNUM_ELx. */
133 AARCH64_FEATURE_SCXTNUM,
134 /* ID_PFR2 instructions. */
135 AARCH64_FEATURE_ID_PFR2,
136 /* SSBS mechanism enabled. */
137 AARCH64_FEATURE_SSBS,
138 /* Memory Tagging Extension. */
139 AARCH64_FEATURE_MEMTAG,
140 /* Transactional Memory Extension. */
141 AARCH64_FEATURE_TME,
142 /* XS memory attribute. */
143 AARCH64_FEATURE_XS,
144 /* WFx instructions with timeout. */
145 AARCH64_FEATURE_WFXT,
146 /* Standardization of memory operations. */
147 AARCH64_FEATURE_MOPS,
148 /* Hinted conditional branches. */
149 AARCH64_FEATURE_HBC,
150 /* Matrix Multiply instructions. */
151 AARCH64_FEATURE_I8MM,
152 AARCH64_FEATURE_F32MM,
153 AARCH64_FEATURE_F64MM,
154 /* v8.4 Flag Manipulation. */
155 AARCH64_FEATURE_FLAGM,
156 /* Armv9.0-A processors. */
157 AARCH64_FEATURE_V9A,
158 /* SME F64F64. */
159 AARCH64_FEATURE_SME_F64F64,
160 /* SME I16I64. */
161 AARCH64_FEATURE_SME_I16I64,
162 /* Armv8.8 processors. */
163 AARCH64_FEATURE_V8_8A,
164 /* Common Short Sequence Compression instructions. */
165 AARCH64_FEATURE_CSSC,
166 /* Armv8.9-A processors. */
167 AARCH64_FEATURE_V8_9A,
168 /* Check Feature Status Extension. */
169 AARCH64_FEATURE_CHK,
170 /* Guarded Control Stack. */
171 AARCH64_FEATURE_GCS,
172 /* SPE Call Return branch records. */
173 AARCH64_FEATURE_SPE_CRR,
174 /* SPE Filter by data source. */
175 AARCH64_FEATURE_SPE_FDS,
176 /* Additional SPE events. */
177 AARCH64_FEATURE_SPEv1p4,
178 /* SME2. */
179 AARCH64_FEATURE_SME2,
180 /* Translation Hardening Extension. */
181 AARCH64_FEATURE_THE,
182 /* LSE128. */
183 AARCH64_FEATURE_LSE128,
184 /* ARMv8.9-A RAS Extensions. */
185 AARCH64_FEATURE_RASv2,
186 /* Delegated SError exceptions for EL3. */
187 AARCH64_FEATURE_E3DSE,
188 /* System Control Register2. */
189 AARCH64_FEATURE_SCTLR2,
190 /* Fine Grained Traps. */
191 AARCH64_FEATURE_FGT2,
192 /* Physical Fault Address. */
193 AARCH64_FEATURE_PFAR,
194 /* Address Translate Stage 1. */
195 AARCH64_FEATURE_ATS1A,
196 /* Memory Attribute Index Enhancement. */
197 AARCH64_FEATURE_AIE,
198 /* Stage 1 Permission Indirection Extension. */
199 AARCH64_FEATURE_S1PIE,
200 /* Stage 2 Permission Indirection Extension. */
201 AARCH64_FEATURE_S2PIE,
202 /* Stage 1 Permission Overlay Extension. */
203 AARCH64_FEATURE_S1POE,
204 /* Stage 2 Permission Overlay Extension. */
205 AARCH64_FEATURE_S2POE,
206 /* Extension to Translation Control Registers. */
207 AARCH64_FEATURE_TCR2,
208 /* Speculation Prediction Restriction instructions. */
209 AARCH64_FEATURE_PREDRES2,
210 /* Instrumentation Extension. */
211 AARCH64_FEATURE_ITE,
212 /* 128-bit page table descriptor, system registers
213 and isntructions. */
214 AARCH64_FEATURE_D128,
215 /* Armv8.9-A/Armv9.4-A architecture Debug extension. */
216 AARCH64_FEATURE_DEBUGv8p9,
217 /* Performance Monitors Extension. */
218 AARCH64_FEATURE_PMUv3p9,
219 /* Performance Monitors Snapshots Extension. */
220 AARCH64_FEATURE_PMUv3_SS,
221 /* Performance Monitors Instruction Counter Extension. */
222 AARCH64_FEATURE_PMUv3_ICNTR,
223 /* System Performance Monitors Extension */
224 AARCH64_FEATURE_SPMU,
225 /* System Performance Monitors Extension version 2 */
226 AARCH64_FEATURE_SPMU2,
227 /* Performance Monitors Synchronous-Exception-Based Event Extension. */
228 AARCH64_FEATURE_SEBEP,
229 /* SVE2.1 and SME2.1 non-widening BFloat16 instructions. */
230 AARCH64_FEATURE_B16B16,
231 /* SME2.1 instructions. */
232 AARCH64_FEATURE_SME2p1,
233 /* SVE2.1 instructions. */
234 AARCH64_FEATURE_SVE2p1,
235 /* RCPC3 instructions. */
236 AARCH64_FEATURE_RCPC3,
237 /* Enhanced Software Step Extension. */
238 AARCH64_FEATURE_STEP2,
239 /* Checked Pointer Arithmetic instructions. */
240 AARCH64_FEATURE_CPA,
241 /* FAMINMAX instructions. */
242 AARCH64_FEATURE_FAMINMAX,
243 /* FP8 instructions. */
244 AARCH64_FEATURE_FP8,
245 /* LUT instructions. */
246 AARCH64_FEATURE_LUT,
247 /* Branch Record Buffer Extension */
248 AARCH64_FEATURE_BRBE,
249 /* SME LUTv2 instructions. */
250 AARCH64_FEATURE_SME_LUTv2,
251 /* FP8FMA instructions. */
252 AARCH64_FEATURE_FP8FMA,
253 /* FP8DOT4 instructions. */
254 AARCH64_FEATURE_FP8DOT4,
255 /* FP8DOT2 instructions. */
256 AARCH64_FEATURE_FP8DOT2,
257 /* SSVE FP8FMA instructions. */
258 AARCH64_FEATURE_SSVE_FP8FMA,
259 /* SSVE FP8DOT4 instructions. */
260 AARCH64_FEATURE_SSVE_FP8DOT4,
261 /* SSVE FP8DOT2 instructions. */
262 AARCH64_FEATURE_SSVE_FP8DOT2,
263 /* SME F8F32 instructions. */
264 AARCH64_FEATURE_SME_F8F32,
265 /* SME F8F16 instructions. */
266 AARCH64_FEATURE_SME_F8F16,
267
268 /* Virtual features. These are used to gate instructions that are enabled
269 by either of two (or more) sets of command line flags. */
270 /* +fp8fma+sve or +ssve-fp8fma */
271 AARCH64_FEATURE_FP8FMA_SVE,
272 /* +fp8dot4+sve or +ssve-fp8dot4 */
273 AARCH64_FEATURE_FP8DOT4_SVE,
274 /* +fp8dot2+sve or +ssve-fp8dot2 */
275 AARCH64_FEATURE_FP8DOT2_SVE,
276 /* +sme-f16f16 or +sme-f8f16 */
277 AARCH64_FEATURE_SME_F16F16_F8F16,
278 /* Armv9.5-A processors. */
279 AARCH64_FEATURE_V9_5A,
280 AARCH64_NUM_FEATURES
281 };
282
283 /* These macros take an initial argument X that gives the index into
284 an aarch64_feature_set. The macros then return the bitmask for
285 that array index. */
286
287 /* A mask in which feature bit BIT is set and all other bits are clear. */
288 #define AARCH64_UINT64_BIT(X, BIT) \
289 ((X) == (BIT) / 64 ? 1ULL << (BIT) % 64 : 0)
290
291 /* A mask that includes only AARCH64_FEATURE_<NAME>. */
292 #define AARCH64_FEATBIT(X, NAME) \
293 AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME)
294
295 /* A mask of the features that are enabled by each architecture version,
296 excluding those that are inherited from other architecture versions. */
297 #define AARCH64_ARCH_V8A_FEATURES(X) (AARCH64_FEATBIT (X, V8A) \
298 | AARCH64_FEATBIT (X, FP) \
299 | AARCH64_FEATBIT (X, RAS) \
300 | AARCH64_FEATBIT (X, SIMD) \
301 | AARCH64_FEATBIT (X, CHK))
302 #define AARCH64_ARCH_V8_1A_FEATURES(X) (AARCH64_FEATBIT (X, V8_1A) \
303 | AARCH64_FEATBIT (X, CRC) \
304 | AARCH64_FEATBIT (X, LSE) \
305 | AARCH64_FEATBIT (X, PAN) \
306 | AARCH64_FEATBIT (X, LOR) \
307 | AARCH64_FEATBIT (X, RDMA))
308 #define AARCH64_ARCH_V8_2A_FEATURES(X) (AARCH64_FEATBIT (X, V8_2A))
309 #define AARCH64_ARCH_V8_3A_FEATURES(X) (AARCH64_FEATBIT (X, V8_3A) \
310 | AARCH64_FEATBIT (X, PAUTH) \
311 | AARCH64_FEATBIT (X, RCPC) \
312 | AARCH64_FEATBIT (X, COMPNUM) \
313 | AARCH64_FEATBIT (X, JSCVT))
314 #define AARCH64_ARCH_V8_4A_FEATURES(X) (AARCH64_FEATBIT (X, V8_4A) \
315 | AARCH64_FEATBIT (X, RCPC2) \
316 | AARCH64_FEATBIT (X, DOTPROD) \
317 | AARCH64_FEATBIT (X, FLAGM) \
318 | AARCH64_FEATBIT (X, F16_FML))
319 #define AARCH64_ARCH_V8_5A_FEATURES(X) (AARCH64_FEATBIT (X, V8_5A) \
320 | AARCH64_FEATBIT (X, FLAGMANIP) \
321 | AARCH64_FEATBIT (X, FRINTTS) \
322 | AARCH64_FEATBIT (X, SB) \
323 | AARCH64_FEATBIT (X, PREDRES) \
324 | AARCH64_FEATBIT (X, CVADP) \
325 | AARCH64_FEATBIT (X, SCXTNUM) \
326 | AARCH64_FEATBIT (X, ID_PFR2) \
327 | AARCH64_FEATBIT (X, SSBS))
328 #define AARCH64_ARCH_V8_6A_FEATURES(X) (AARCH64_FEATBIT (X, V8_6A) \
329 | AARCH64_FEATBIT (X, BFLOAT16) \
330 | AARCH64_FEATBIT (X, I8MM))
331 #define AARCH64_ARCH_V8_7A_FEATURES(X) (AARCH64_FEATBIT (X, V8_7A) \
332 | AARCH64_FEATBIT (X, XS) \
333 | AARCH64_FEATBIT (X, WFXT) \
334 | AARCH64_FEATBIT (X, LS64))
335 #define AARCH64_ARCH_V8_8A_FEATURES(X) (AARCH64_FEATBIT (X, V8_8A) \
336 | AARCH64_FEATBIT (X, MOPS) \
337 | AARCH64_FEATBIT (X, HBC))
338 #define AARCH64_ARCH_V8_9A_FEATURES(X) (AARCH64_FEATBIT (X, V8_9A) \
339 | AARCH64_FEATBIT (X, CSSC) \
340 | AARCH64_FEATBIT (X, SPEv1p4) \
341 | AARCH64_FEATBIT (X, SPE_CRR) \
342 | AARCH64_FEATBIT (X, SPE_FDS) \
343 | AARCH64_FEATBIT (X, RASv2) \
344 | AARCH64_FEATBIT (X, SCTLR2) \
345 | AARCH64_FEATBIT (X, FGT2) \
346 | AARCH64_FEATBIT (X, PFAR) \
347 | AARCH64_FEATBIT (X, ATS1A) \
348 | AARCH64_FEATBIT (X, AIE) \
349 | AARCH64_FEATBIT (X, S1PIE) \
350 | AARCH64_FEATBIT (X, S2PIE) \
351 | AARCH64_FEATBIT (X, S1POE) \
352 | AARCH64_FEATBIT (X, S2POE) \
353 | AARCH64_FEATBIT (X, TCR2) \
354 | AARCH64_FEATBIT (X, DEBUGv8p9) \
355 | AARCH64_FEATBIT (X, PMUv3p9) \
356 | AARCH64_FEATBIT (X, PMUv3_SS) \
357 | AARCH64_FEATBIT (X, PMUv3_ICNTR) \
358 | AARCH64_FEATBIT (X, SPMU) \
359 | AARCH64_FEATBIT (X, SEBEP) \
360 | AARCH64_FEATBIT (X, PREDRES2) \
361 )
362
363 #define AARCH64_ARCH_V9A_FEATURES(X) (AARCH64_FEATBIT (X, V9A) \
364 | AARCH64_FEATBIT (X, F16) \
365 | AARCH64_FEATBIT (X, SVE) \
366 | AARCH64_FEATBIT (X, SVE2))
367 #define AARCH64_ARCH_V9_1A_FEATURES(X) AARCH64_ARCH_V8_6A_FEATURES (X)
368 #define AARCH64_ARCH_V9_2A_FEATURES(X) AARCH64_ARCH_V8_7A_FEATURES (X)
369 #define AARCH64_ARCH_V9_3A_FEATURES(X) AARCH64_ARCH_V8_8A_FEATURES (X)
370 #define AARCH64_ARCH_V9_4A_FEATURES(X) (AARCH64_ARCH_V8_9A_FEATURES (X) \
371 | AARCH64_FEATBIT (X, SVE2p1))
372 #define AARCH64_ARCH_V9_5A_FEATURES(X) (AARCH64_FEATBIT (X, V9_5A) \
373 | AARCH64_FEATBIT (X, CPA) \
374 | AARCH64_FEATBIT (X, LUT) \
375 | AARCH64_FEATBIT (X, FAMINMAX)\
376 | AARCH64_FEATBIT (X, E3DSE) \
377 | AARCH64_FEATBIT (X, SPMU2) \
378 | AARCH64_FEATBIT (X, STEP2) \
379 )
380
381 /* Architectures are the sum of the base and extensions. */
382 #define AARCH64_ARCH_V8A(X) (AARCH64_FEATBIT (X, V8) \
383 | AARCH64_ARCH_V8A_FEATURES (X))
384 #define AARCH64_ARCH_V8_1A(X) (AARCH64_ARCH_V8A (X) \
385 | AARCH64_ARCH_V8_1A_FEATURES (X))
386 #define AARCH64_ARCH_V8_2A(X) (AARCH64_ARCH_V8_1A (X) \
387 | AARCH64_ARCH_V8_2A_FEATURES (X))
388 #define AARCH64_ARCH_V8_3A(X) (AARCH64_ARCH_V8_2A (X) \
389 | AARCH64_ARCH_V8_3A_FEATURES (X))
390 #define AARCH64_ARCH_V8_4A(X) (AARCH64_ARCH_V8_3A (X) \
391 | AARCH64_ARCH_V8_4A_FEATURES (X))
392 #define AARCH64_ARCH_V8_5A(X) (AARCH64_ARCH_V8_4A (X) \
393 | AARCH64_ARCH_V8_5A_FEATURES (X))
394 #define AARCH64_ARCH_V8_6A(X) (AARCH64_ARCH_V8_5A (X) \
395 | AARCH64_ARCH_V8_6A_FEATURES (X))
396 #define AARCH64_ARCH_V8_7A(X) (AARCH64_ARCH_V8_6A (X) \
397 | AARCH64_ARCH_V8_7A_FEATURES (X))
398 #define AARCH64_ARCH_V8_8A(X) (AARCH64_ARCH_V8_7A (X) \
399 | AARCH64_ARCH_V8_8A_FEATURES (X))
400 #define AARCH64_ARCH_V8_9A(X) (AARCH64_ARCH_V8_8A (X) \
401 | AARCH64_ARCH_V8_9A_FEATURES (X))
402 #define AARCH64_ARCH_V8R(X) ((AARCH64_ARCH_V8_4A (X) \
403 | AARCH64_FEATBIT (X, V8R)) \
404 & ~AARCH64_FEATBIT (X, V8A) \
405 & ~AARCH64_FEATBIT (X, LOR))
406
407 #define AARCH64_ARCH_V9A(X) (AARCH64_ARCH_V8_5A (X) \
408 | AARCH64_ARCH_V9A_FEATURES (X))
409 #define AARCH64_ARCH_V9_1A(X) (AARCH64_ARCH_V9A (X) \
410 | AARCH64_ARCH_V9_1A_FEATURES (X))
411 #define AARCH64_ARCH_V9_2A(X) (AARCH64_ARCH_V9_1A (X) \
412 | AARCH64_ARCH_V9_2A_FEATURES (X))
413 #define AARCH64_ARCH_V9_3A(X) (AARCH64_ARCH_V9_2A (X) \
414 | AARCH64_ARCH_V9_3A_FEATURES (X))
415 #define AARCH64_ARCH_V9_4A(X) (AARCH64_ARCH_V9_3A (X) \
416 | AARCH64_ARCH_V9_4A_FEATURES (X))
417 #define AARCH64_ARCH_V9_5A(X) (AARCH64_ARCH_V9_4A (X) \
418 | AARCH64_ARCH_V9_5A_FEATURES (X))
419
420 #define AARCH64_ARCH_NONE(X) 0
421
422 /* CPU-specific features. */
423 typedef struct {
424 uint64_t flags[(AARCH64_NUM_FEATURES + 63) / 64];
425 } aarch64_feature_set;
426
427 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT) \
428 ((~(CPU).flags[0] & AARCH64_FEATBIT (0, FEAT)) == 0 \
429 && (~(CPU).flags[1] & AARCH64_FEATBIT (1, FEAT)) == 0)
430
431 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \
432 ((~(CPU).flags[0] & (FEAT).flags[0]) == 0 \
433 && (~(CPU).flags[1] & (FEAT).flags[1]) == 0)
434
435 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \
436 (((CPU).flags[0] & (FEAT).flags[0]) != 0 \
437 || ((CPU).flags[1] & (FEAT).flags[1]) != 0)
438
439 #define AARCH64_SET_FEATURE(DEST, FEAT) \
440 ((DEST).flags[0] = FEAT (0), \
441 (DEST).flags[1] = FEAT (1))
442
443 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT) \
444 ((DEST).flags[0] = (SRC).flags[0] & ~AARCH64_FEATBIT (0, FEAT), \
445 (DEST).flags[1] = (SRC).flags[1] & ~AARCH64_FEATBIT (1, FEAT))
446
447 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2) \
448 do \
449 { \
450 (TARG).flags[0] = (F1).flags[0] | (F2).flags[0]; \
451 (TARG).flags[1] = (F1).flags[1] | (F2).flags[1]; \
452 } \
453 while (0)
454
455 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2) \
456 do \
457 { \
458 (TARG).flags[0] = (F1).flags[0] &~ (F2).flags[0]; \
459 (TARG).flags[1] = (F1).flags[1] &~ (F2).flags[1]; \
460 } \
461 while (0)
462
463 /* aarch64_feature_set initializers for no features and all features,
464 respectively. */
465 #define AARCH64_NO_FEATURES { { 0, 0 } }
466 #define AARCH64_ALL_FEATURES { { -1, -1 } }
467
468 /* An aarch64_feature_set initializer for a single feature,
469 AARCH64_FEATURE_<FEAT>. */
470 #define AARCH64_FEATURE(FEAT) \
471 { { AARCH64_FEATBIT (0, FEAT), AARCH64_FEATBIT (1, FEAT) } }
472
473 /* An aarch64_feature_set initializer for a specific architecture version,
474 including all the features that are enabled by default for that architecture
475 version. */
476 #define AARCH64_ARCH_FEATURES(ARCH) \
477 { { AARCH64_ARCH_##ARCH (0), AARCH64_ARCH_##ARCH (1) } }
478
479 /* Used by AARCH64_CPU_FEATURES. */
480 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \
481 (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X))
482 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \
483 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2))
484 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \
485 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__))
486 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \
487 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__))
488 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \
489 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__))
490 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \
491 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__))
492 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \
493 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__))
494 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \
495 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__))
496 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \
497 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__))
498
499 /* An aarch64_feature_set initializer for a CPU that implements architecture
500 version ARCH, and additionally provides the N features listed in "...". */
501 #define AARCH64_CPU_FEATURES(ARCH, N, ...) \
502 { { AARCH64_OR_FEATURES_##N (0, ARCH, __VA_ARGS__), \
503 AARCH64_OR_FEATURES_##N (1, ARCH, __VA_ARGS__) } }
504
505 /* An aarch64_feature_set initializer for the N features listed in "...". */
506 #define AARCH64_FEATURES(N, ...) \
507 AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__)
508
509 enum aarch64_operand_class
510 {
511 AARCH64_OPND_CLASS_NIL,
512 AARCH64_OPND_CLASS_INT_REG,
513 AARCH64_OPND_CLASS_MODIFIED_REG,
514 AARCH64_OPND_CLASS_FP_REG,
515 AARCH64_OPND_CLASS_SIMD_REG,
516 AARCH64_OPND_CLASS_SIMD_ELEMENT,
517 AARCH64_OPND_CLASS_SISD_REG,
518 AARCH64_OPND_CLASS_SIMD_REGLIST,
519 AARCH64_OPND_CLASS_SVE_REG,
520 AARCH64_OPND_CLASS_SVE_REGLIST,
521 AARCH64_OPND_CLASS_PRED_REG,
522 AARCH64_OPND_CLASS_ZA_ACCESS,
523 AARCH64_OPND_CLASS_ADDRESS,
524 AARCH64_OPND_CLASS_IMMEDIATE,
525 AARCH64_OPND_CLASS_SYSTEM,
526 AARCH64_OPND_CLASS_COND,
527 };
528
529 /* Operand code that helps both parsing and coding.
530 Keep AARCH64_OPERANDS synced. */
531
532 enum aarch64_opnd
533 {
534 AARCH64_OPND_NIL, /* no operand---MUST BE FIRST!*/
535
536 AARCH64_OPND_Rd, /* Integer register as destination. */
537 AARCH64_OPND_Rn, /* Integer register as source. */
538 AARCH64_OPND_Rm, /* Integer register as source. */
539 AARCH64_OPND_Rt, /* Integer register used in ld/st instructions. */
540 AARCH64_OPND_Rt2, /* Integer register used in ld/st pair instructions. */
541 AARCH64_OPND_X16, /* Integer register x16 in chkfeat instruction. */
542 AARCH64_OPND_Rt_LS64, /* Integer register used in LS64 instructions. */
543 AARCH64_OPND_Rt_SP, /* Integer Rt or SP used in STG instructions. */
544 AARCH64_OPND_Rs, /* Integer register used in ld/st exclusive. */
545 AARCH64_OPND_Ra, /* Integer register used in ddp_3src instructions. */
546 AARCH64_OPND_Rt_SYS, /* Integer register used in system instructions. */
547
548 AARCH64_OPND_Rd_SP, /* Integer Rd or SP. */
549 AARCH64_OPND_Rn_SP, /* Integer Rn or SP. */
550 AARCH64_OPND_Rm_SP, /* Integer Rm or SP. */
551 AARCH64_OPND_PAIRREG, /* Paired register operand. */
552 AARCH64_OPND_PAIRREG_OR_XZR, /* Paired register operand, optionally xzr. */
553 AARCH64_OPND_Rm_EXT, /* Integer Rm extended. */
554 AARCH64_OPND_Rm_SFT, /* Integer Rm shifted. */
555 AARCH64_OPND_Rm_LSL, /* Integer Rm shifted (LSL-only). */
556
557 AARCH64_OPND_Fd, /* Floating-point Fd. */
558 AARCH64_OPND_Fn, /* Floating-point Fn. */
559 AARCH64_OPND_Fm, /* Floating-point Fm. */
560 AARCH64_OPND_Fa, /* Floating-point Fa. */
561 AARCH64_OPND_Ft, /* Floating-point Ft. */
562 AARCH64_OPND_Ft2, /* Floating-point Ft2. */
563
564 AARCH64_OPND_Sd, /* AdvSIMD Scalar Sd. */
565 AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */
566 AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */
567
568 AARCH64_OPND_Va, /* AdvSIMD Vector Va. */
569 AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */
570 AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */
571 AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */
572 AARCH64_OPND_VdD1, /* AdvSIMD <Vd>.D[1]; for FMOV only. */
573 AARCH64_OPND_VnD1, /* AdvSIMD <Vn>.D[1]; for FMOV only. */
574 AARCH64_OPND_Ed, /* AdvSIMD Vector Element Vd. */
575 AARCH64_OPND_En, /* AdvSIMD Vector Element Vn. */
576 AARCH64_OPND_Em, /* AdvSIMD Vector Element Vm. */
577 AARCH64_OPND_Em16, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when
578 qualifier is S_H or S_2B. */
579 AARCH64_OPND_Em8, /* AdvSIMD Vector Element Vm restricted to V0 - V7,
580 used only with qualifier S_B. */
581 AARCH64_OPND_Em_INDEX1_14, /* AdvSIMD 1-bit encoded index in Vm at [14] */
582 AARCH64_OPND_Em_INDEX2_13, /* AdvSIMD 2-bit encoded index in Vm at [14:13] */
583 AARCH64_OPND_Em_INDEX3_12, /* AdvSIMD 3-bit encoded index in Vm at [14:12] */
584 AARCH64_OPND_LVn, /* AdvSIMD Vector register list used in e.g. TBL. */
585 AARCH64_OPND_LVt, /* AdvSIMD Vector register list used in ld/st. */
586 AARCH64_OPND_LVt_AL, /* AdvSIMD Vector register list for loading single
587 structure to all lanes. */
588 AARCH64_OPND_LVn_LUT, /* AdvSIMD Vector register list used in lut. */
589 AARCH64_OPND_LEt, /* AdvSIMD Vector Element list. */
590
591 AARCH64_OPND_CRn, /* Co-processor register in CRn field. */
592 AARCH64_OPND_CRm, /* Co-processor register in CRm field. */
593
594 AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */
595 AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */
596 AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */
597 AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */
598 AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */
599 AARCH64_OPND_SIMD_IMM_SFT, /* AdvSIMD modified immediate with shift. */
600 AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate. */
601 AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
602 (no encoding). */
603 AARCH64_OPND_IMM0, /* Immediate for #0. */
604 AARCH64_OPND_FPIMM0, /* Immediate for #0.0. */
605 AARCH64_OPND_FPIMM, /* Floating-point Immediate. */
606 AARCH64_OPND_IMMR, /* Immediate #<immr> in e.g. BFM. */
607 AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */
608 AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */
609 AARCH64_OPND_IMM, /* Immediate. */
610 AARCH64_OPND_IMM_2, /* Immediate. */
611 AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */
612 AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */
613 AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */
614 AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg. */
615 AARCH64_OPND_UIMM7, /* Unsigned 7-bit immediate in the CRm:op2 fields. */
616 AARCH64_OPND_UIMM10, /* Unsigned 10-bit immediate in addg/subg. */
617 AARCH64_OPND_BIT_NUM, /* Immediate. */
618 AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions. */
619 AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
620 AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions. */
621 AARCH64_OPND_SIMM5, /* 5-bit signed immediate in the imm5 field. */
622 AARCH64_OPND_NZCV, /* Flag bit specifier giving an alternative value for
623 each condition flag. */
624
625 AARCH64_OPND_LIMM, /* Logical Immediate. */
626 AARCH64_OPND_AIMM, /* Arithmetic immediate. */
627 AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */
628 AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */
629 AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */
630 AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */
631 AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */
632 AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */
633
634 AARCH64_OPND_COND, /* Standard condition as the last operand. */
635 AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */
636
637 AARCH64_OPND_ADDR_ADRP, /* Memory address for ADRP */
638 AARCH64_OPND_ADDR_PCREL14, /* 14-bit PC-relative address for e.g. TBZ. */
639 AARCH64_OPND_ADDR_PCREL19, /* 19-bit PC-relative address for e.g. LDR. */
640 AARCH64_OPND_ADDR_PCREL21, /* 21-bit PC-relative address for e.g. ADR. */
641 AARCH64_OPND_ADDR_PCREL26, /* 26-bit PC-relative address for e.g. BL. */
642
643 AARCH64_OPND_ADDR_SIMPLE, /* Address of ld/st exclusive. */
644 AARCH64_OPND_ADDR_REGOFF, /* Address of register offset. */
645 AARCH64_OPND_ADDR_SIMM7, /* Address of signed 7-bit immediate. */
646 AARCH64_OPND_ADDR_SIMM9, /* Address of signed 9-bit immediate. */
647 AARCH64_OPND_ADDR_SIMM9_2, /* Same as the above, but the immediate is
648 negative or unaligned and there is
649 no writeback allowed. This operand code
650 is only used to support the programmer-
651 friendly feature of using LDR/STR as the
652 the mnemonic name for LDUR/STUR instructions
653 wherever there is no ambiguity. */
654 AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */
655 AARCH64_OPND_ADDR_SIMM11, /* Address with a signed 11-bit (multiple of
656 16) immediate. */
657 AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */
658 AARCH64_OPND_ADDR_SIMM13, /* Address with a signed 13-bit (multiple of
659 16) immediate. */
660 AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */
661 AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */
662 AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */
663
664 AARCH64_OPND_SYSREG, /* System register operand. */
665 AARCH64_OPND_SYSREG128, /* 128-bit system register operand. */
666 AARCH64_OPND_PSTATEFIELD, /* PSTATE field name operand. */
667 AARCH64_OPND_SYSREG_AT, /* System register <at_op> operand. */
668 AARCH64_OPND_SYSREG_DC, /* System register <dc_op> operand. */
669 AARCH64_OPND_SYSREG_IC, /* System register <ic_op> operand. */
670 AARCH64_OPND_SYSREG_TLBI, /* System register <tlbi_op> operand. */
671 AARCH64_OPND_SYSREG_TLBIP, /* System register <tlbip_op> operand. */
672 AARCH64_OPND_SYSREG_SR, /* System register RCTX operand. */
673 AARCH64_OPND_BARRIER, /* Barrier operand. */
674 AARCH64_OPND_BARRIER_DSB_NXS, /* Barrier operand for DSB nXS variant. */
675 AARCH64_OPND_BARRIER_ISB, /* Barrier operand for ISB. */
676 AARCH64_OPND_PRFOP, /* Prefetch operation. */
677 AARCH64_OPND_RPRFMOP, /* Range prefetch operation. */
678 AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */
679 AARCH64_OPND_BARRIER_GCSB, /* Barrier operand for GCSB. */
680 AARCH64_OPND_BTI_TARGET, /* BTI {<target>}. */
681 AARCH64_OPND_BRBOP, /* BRB operation IALL or INJ in bit 5. */
682 AARCH64_OPND_Rt_IN_SYS_ALIASES, /* Defaulted and omitted Rt used in SYS aliases such as brb. */
683 AARCH64_OPND_LSE128_Rt, /* LSE128 <Xt1>. */
684 AARCH64_OPND_LSE128_Rt2, /* LSE128 <Xt2>. */
685 AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */
686 AARCH64_OPND_SVE_ADDR_RI_S4x32, /* SVE [<Xn|SP>, #<simm4>*32]. */
687 AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
688 AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
689 AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
690 AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */
691 AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */
692 AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */
693 AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */
694 AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */
695 AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */
696 AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */
697 AARCH64_OPND_SVE_ADDR_R, /* SVE [<Xn|SP>]. */
698 AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>, <Xm|XZR>]. */
699 AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */
700 AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */
701 AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */
702 AARCH64_OPND_SVE_ADDR_RR_LSL4, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4]. */
703 AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */
704 AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */
705 AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */
706 AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */
707 AARCH64_OPND_SVE_ADDR_RX_LSL4, /* SVE [<Xn|SP>, <Xm>, LSL #4]. */
708 AARCH64_OPND_SVE_ADDR_ZX, /* SVE [Zn.<T>{, <Xm>}]. */
709 AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */
710 AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */
711 AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */
712 AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */
713 AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
714 Bit 14 controls S/U choice. */
715 AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
716 Bit 22 controls S/U choice. */
717 AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
718 Bit 14 controls S/U choice. */
719 AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
720 Bit 22 controls S/U choice. */
721 AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
722 Bit 14 controls S/U choice. */
723 AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
724 Bit 22 controls S/U choice. */
725 AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
726 Bit 14 controls S/U choice. */
727 AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
728 Bit 22 controls S/U choice. */
729 AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */
730 AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */
731 AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */
732 AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */
733 AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */
734 AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */
735 AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */
736 AARCH64_OPND_SVE_AIMM, /* SVE unsigned arithmetic immediate. */
737 AARCH64_OPND_SVE_ASIMM, /* SVE signed arithmetic immediate. */
738 AARCH64_OPND_SVE_FPIMM8, /* SVE 8-bit floating-point immediate. */
739 AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */
740 AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */
741 AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */
742 AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */
743 AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */
744 AARCH64_OPND_SVE_IMM_ROT3, /* SVE cadd 1-bit rotate (90 or 270). */
745 AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */
746 AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */
747 AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */
748 AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */
749 AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */
750 AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */
751 AARCH64_OPND_SVE_Pd, /* SVE p0-p15 in Pd. */
752 AARCH64_OPND_SVE_PNd, /* SVE pn0-pn15 in Pd. */
753 AARCH64_OPND_SVE_Pg3, /* SVE p0-p7 in Pg. */
754 AARCH64_OPND_SVE_Pg4_5, /* SVE p0-p15 in Pg, bits [8,5]. */
755 AARCH64_OPND_SVE_Pg4_10, /* SVE p0-p15 in Pg, bits [13,10]. */
756 AARCH64_OPND_SVE_PNg4_10, /* SVE pn0-pn15 in Pg, bits [13,10]. */
757 AARCH64_OPND_SVE_Pg4_16, /* SVE p0-p15 in Pg, bits [19,16]. */
758 AARCH64_OPND_SVE_Pm, /* SVE p0-p15 in Pm. */
759 AARCH64_OPND_SVE_Pn, /* SVE p0-p15 in Pn. */
760 AARCH64_OPND_SVE_PNn, /* SVE pn0-pn15 in Pn. */
761 AARCH64_OPND_SVE_Pt, /* SVE p0-p15 in Pt. */
762 AARCH64_OPND_SVE_PNt, /* SVE pn0-pn15 in Pt. */
763 AARCH64_OPND_SVE_Rm, /* Integer Rm or ZR, alt. SVE position. */
764 AARCH64_OPND_SVE_Rn_SP, /* Integer Rn or SP, alt. SVE position. */
765 AARCH64_OPND_SVE_SHLIMM_PRED, /* SVE shift left amount (predicated). */
766 AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated). */
767 AARCH64_OPND_SVE_SHLIMM_UNPRED_22, /* SVE 3 bit shift left unpred. */
768 AARCH64_OPND_SVE_SHRIMM_PRED, /* SVE shift right amount (predicated). */
769 AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated). */
770 AARCH64_OPND_SVE_SHRIMM_UNPRED_22, /* SVE 3 bit shift right unpred. */
771 AARCH64_OPND_SVE_SIMM5, /* SVE signed 5-bit immediate. */
772 AARCH64_OPND_SVE_SIMM5B, /* SVE secondary signed 5-bit immediate. */
773 AARCH64_OPND_SVE_SIMM6, /* SVE signed 6-bit immediate. */
774 AARCH64_OPND_SVE_SIMM8, /* SVE signed 8-bit immediate. */
775 AARCH64_OPND_SVE_UIMM3, /* SVE unsigned 3-bit immediate. */
776 AARCH64_OPND_SVE_UIMM7, /* SVE unsigned 7-bit immediate. */
777 AARCH64_OPND_SVE_UIMM8, /* SVE unsigned 8-bit immediate. */
778 AARCH64_OPND_SVE_UIMM8_53, /* SVE split unsigned 8-bit immediate. */
779 AARCH64_OPND_SVE_UIMM4, /* SVE unsigned 4-bit immediate. */
780 AARCH64_OPND_SVE_VZn, /* Scalar SIMD&FP register in Zn field. */
781 AARCH64_OPND_SVE_Vd, /* Scalar SIMD&FP register in Vd. */
782 AARCH64_OPND_SVE_Vm, /* Scalar SIMD&FP register in Vm. */
783 AARCH64_OPND_SVE_Vn, /* Scalar SIMD&FP register in Vn. */
784 AARCH64_OPND_SME_ZA_array_vrsb_1, /* Tile to vector, two registers (B). */
785 AARCH64_OPND_SME_ZA_array_vrsh_1, /* Tile to vector, two registers (H). */
786 AARCH64_OPND_SME_ZA_array_vrss_1, /* Tile to vector, two registers (S). */
787 AARCH64_OPND_SME_ZA_array_vrsd_1, /* Tile to vector, two registers (D). */
788 AARCH64_OPND_SME_ZA_array_vrsb_2, /* Tile to vector, four registers (B). */
789 AARCH64_OPND_SME_ZA_array_vrsh_2, /* Tile to vector, four registers (H). */
790 AARCH64_OPND_SME_ZA_array_vrss_2, /* Tile to vector, four registers (S). */
791 AARCH64_OPND_SME_ZA_array_vrsd_2, /* Tile to vector, four registers (D). */
792 AARCH64_OPND_SME_ZA_ARRAY4, /* Tile to vector, single (BHSDQ). */
793 AARCH64_OPND_SVE_Za_5, /* SVE vector register in Za, bits [9,5]. */
794 AARCH64_OPND_SVE_Za_16, /* SVE vector register in Za, bits [20,16]. */
795 AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */
796 AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */
797 AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */
798 AARCH64_OPND_SVE_Zm1_23_INDEX, /* SVE bit index in Zm, bit 23. */
799 AARCH64_OPND_SVE_Zm2_22_INDEX, /* SVE bit index in Zm, bits [23,22]. */
800 AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */
801 AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11. */
802 AARCH64_OPND_SVE_Zm3_12_INDEX, /* SVE bit index in Zm, bits 12 plus bit [23,22]. */
803 AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19. */
804 AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */
805 AARCH64_OPND_SVE_Zm3_10_INDEX, /* z0-z7[0-15] in Zm3_INDEX plus bit 11:10. */
806 AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11. */
807 AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */
808 AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */
809 AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */
810 AARCH64_OPND_SVE_Zn_5_INDEX, /* Indexed SVE vector register, for DUPQ. */
811 AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */
812 AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */
813 AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */
814 AARCH64_OPND_SME_Zdnx2, /* SVE vector register list from [4:1]*2. */
815 AARCH64_OPND_SME_Zdnx4, /* SVE vector register list from [4:2]*4. */
816 AARCH64_OPND_SME_Zdnx4_STRIDED, /* SVE vector register list from [4:2]*4. */
817 AARCH64_OPND_SME_Zm, /* SVE vector register list in 4-bit Zm. */
818 AARCH64_OPND_SME_Zmx2, /* SVE vector register list from [20:17]*2. */
819 AARCH64_OPND_SME_Zmx4, /* SVE vector register list from [20:18]*4. */
820 AARCH64_OPND_SME_Znx2, /* SVE vector register list from [9:6]*2. */
821 AARCH64_OPND_SME_Znx2_BIT_INDEX, /* SVE vector register list encoding a bit index from [9:6]*2. */
822 AARCH64_OPND_SME_Znx4, /* SVE vector register list from [9:7]*4. */
823 AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23. */
824 AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19. */
825 AARCH64_OPND_SME_ZAda_1b, /* SME <ZAda>.H, 1-bits. */
826 AARCH64_OPND_SME_ZAda_2b, /* SME <ZAda>.S, 2-bits. */
827 AARCH64_OPND_SME_ZAda_3b, /* SME <ZAda>.D, 3-bits. */
828 AARCH64_OPND_SME_ZA_HV_idx_src, /* SME source ZA tile vector. */
829 AARCH64_OPND_SME_ZA_HV_idx_srcxN, /* SME N source ZA tile vectors. */
830 AARCH64_OPND_SME_ZA_HV_idx_dest, /* SME destination ZA tile vector. */
831 AARCH64_OPND_SME_ZA_HV_idx_destxN, /* SME N dest ZA tile vectors. */
832 AARCH64_OPND_SME_Pdx2, /* Predicate register list in [3:1]. */
833 AARCH64_OPND_SME_PdxN, /* Predicate register list in [3:0]. */
834 AARCH64_OPND_SME_Pm, /* SME scalable predicate register, bits [15:13]. */
835 AARCH64_OPND_SME_PNd3, /* Predicate-as-counter register, bits [3:0]. */
836 AARCH64_OPND_SME_PNg3, /* Predicate-as-counter register, bits [12:10]. */
837 AARCH64_OPND_SME_PNn, /* Predicate-as-counter register, bits [8:5]. */
838 AARCH64_OPND_SME_PNn3_INDEX1, /* Indexed pred-as-counter reg, bits [8:5]. */
839 AARCH64_OPND_SME_PNn3_INDEX2, /* Indexed pred-as-counter reg, bits [9:5]. */
840 AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles. */
841 AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector. */
842 AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3]. */
843 AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1]. */
844 AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3]. */
845 AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}]. */
846 AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}]. */
847 AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1]. */
848 AARCH64_OPND_SME_ZA_array_off4, /* SME ZA[<Wv>{, #<imm>}]. */
849 AARCH64_OPND_SME_ADDR_RI_U4xVL, /* SME [<Xn|SP>{, #<imm>, MUL VL}]. */
850 AARCH64_OPND_SME_SM_ZA, /* SME {SM | ZA}. */
851 AARCH64_OPND_SME_PnT_Wm_imm, /* SME <Pn>.<T>[<Wm>, #<imm>]. */
852 AARCH64_OPND_SME_SHRIMM4, /* 4-bit right shift, bits [19:16]. */
853 AARCH64_OPND_SME_SHRIMM5, /* size + 5-bit right shift, bits [23:22,20:16]. */
854 AARCH64_OPND_SME_Zm_INDEX1, /* Zn.T[index], bits [19:16,10]. */
855 AARCH64_OPND_SME_Zm_INDEX2, /* Zn.T[index], bits [19:16,11:10]. */
856 AARCH64_OPND_SME_Zm_INDEX2_3, /* Zn.T[index], bits [19:16,10,3]. */
857 AARCH64_OPND_SME_Zm_INDEX3_1, /* Zn.T[index], bits [19:16,10,2:1]. */
858 AARCH64_OPND_SME_Zm_INDEX3_2, /* Zn.T[index], bits [19:16,11:10,2]. */
859 AARCH64_OPND_SME_Zm_INDEX3_3, /* Zn.T[index], bits [19:16,11:10,3]. */
860 AARCH64_OPND_SME_Zm_INDEX3_10, /* Zn.T[index], bits [19:16,15,11:10]. */
861 AARCH64_OPND_SME_Zm_INDEX4_1, /* Zn.T[index], bits [19:16,11:10,2:1]. */
862 AARCH64_OPND_SME_Zm_INDEX4_2, /* Zn.T[index], bits [19:16,11:10,3:2]. */
863 AARCH64_OPND_SME_Zm_INDEX4_3, /* Zn.T[index], bits [19:16,15,11,10,3]. */
864 AARCH64_OPND_SME_Zm_INDEX4_10, /* Zn.T[index], bits [19:16,15,12:10]. */
865 AARCH64_OPND_SME_Zn_INDEX1_16, /* Zn[index], bits [9:5] and [16:16]. */
866 AARCH64_OPND_SME_Zn_INDEX2_15, /* Zn[index], bits [9:5] and [16:15]. */
867 AARCH64_OPND_SME_Zn_INDEX2_16, /* Zn[index], bits [9:5] and [17:16]. */
868 AARCH64_OPND_SME_Zn_INDEX3_14, /* Zn[index], bits [9:5] and [16:14]. */
869 AARCH64_OPND_SME_Zn_INDEX3_15, /* Zn[index], bits [9:5] and [17:15]. */
870 AARCH64_OPND_SME_Zn_INDEX4_14, /* Zn[index], bits [9:5] and [17:14]. */
871 AARCH64_OPND_SVE_Zn0_INDEX, /* Zn[index], bits [9:5]. */
872 AARCH64_OPND_SVE_Zn1_17_INDEX, /* Zn[index], bits [9:5,17]. */
873 AARCH64_OPND_SVE_Zn2_18_INDEX, /* Zn[index], bits [9:5,18:17]. */
874 AARCH64_OPND_SVE_Zn3_22_INDEX, /* Zn[index], bits [9:5,18:17,22]. */
875 AARCH64_OPND_SVE_Zd0_INDEX, /* Zn[index], bits [4:0]. */
876 AARCH64_OPND_SVE_Zd1_17_INDEX, /* Zn[index], bits [4:0,17]. */
877 AARCH64_OPND_SVE_Zd2_18_INDEX, /* Zn[index], bits [4:0,18:17]. */
878 AARCH64_OPND_SVE_Zd3_22_INDEX, /* Zn[index], bits [4:0,18:17,22]. */
879 AARCH64_OPND_SME_VLxN_10, /* VLx2 or VLx4, in bit 10. */
880 AARCH64_OPND_SME_VLxN_13, /* VLx2 or VLx4, in bit 13. */
881 AARCH64_OPND_SME_ZT0, /* The fixed token zt0/ZT0 (not encoded). */
882 AARCH64_OPND_SME_ZT0_INDEX, /* ZT0[<imm>], bits [14:12]. */
883 AARCH64_OPND_SME_ZT0_INDEX2_12, /* ZT0[<imm>], bits [13:12]. */
884 AARCH64_OPND_SME_ZT0_LIST, /* { zt0/ZT0 } (not encoded). */
885 AARCH64_OPND_TME_UIMM16, /* TME unsigned 16-bit immediate. */
886 AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */
887 AARCH64_OPND_MOPS_ADDR_Rd, /* [Rd]!, in bits [0, 4]. */
888 AARCH64_OPND_MOPS_ADDR_Rs, /* [Rs]!, in bits [16, 20]. */
889 AARCH64_OPND_MOPS_WB_Rn, /* Rn!, in bits [5, 9]. */
890 AARCH64_OPND_CSSC_SIMM8, /* CSSC signed 8-bit immediate. */
891 AARCH64_OPND_CSSC_UIMM8, /* CSSC unsigned 8-bit immediate. */
892 AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND, /* [<Xn|SP>]{, #<imm>}. */
893 AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB, /* [<Xn|SP>] or [<Xn|SP>, #<imm>]!. */
894 AARCH64_OPND_RCPC3_ADDR_POSTIND, /* [<Xn|SP>], #<imm>. */
895 AARCH64_OPND_RCPC3_ADDR_PREIND_WB, /* [<Xn|SP>, #<imm>]!. */
896 AARCH64_OPND_RCPC3_ADDR_OFFSET,
897 };
898
899 /* Qualifier constrains an operand. It either specifies a variant of an
900 operand type or limits values available to an operand type.
901
902 N.B. Order is important.
903 Keep aarch64_opnd_qualifiers (opcodes/aarch64-opc.c) synced. */
904
905 enum aarch64_opnd_qualifier
906 {
907 /* Indicating no further qualification on an operand. */
908 AARCH64_OPND_QLF_NIL,
909
910 /* Qualifying an operand which is a general purpose (integer) register;
911 indicating the operand data size or a specific register. */
912 AARCH64_OPND_QLF_W, /* Wn, WZR or WSP. */
913 AARCH64_OPND_QLF_X, /* Xn, XZR or XSP. */
914 AARCH64_OPND_QLF_WSP, /* WSP. */
915 AARCH64_OPND_QLF_SP, /* SP. */
916
917 /* Qualifying an operand which is a floating-point register, a SIMD
918 vector element or a SIMD vector element list; indicating operand data
919 size or the size of each SIMD vector element in the case of a SIMD
920 vector element list.
921 These qualifiers are also used to qualify an address operand to
922 indicate the size of data element a load/store instruction is
923 accessing.
924 They are also used for the immediate shift operand in e.g. SSHR. Such
925 a use is only for the ease of operand encoding/decoding and qualifier
926 sequence matching; such a use should not be applied widely; use the value
927 constraint qualifiers for immediate operands wherever possible. */
928 AARCH64_OPND_QLF_S_B,
929 AARCH64_OPND_QLF_S_H,
930 AARCH64_OPND_QLF_S_S,
931 AARCH64_OPND_QLF_S_D,
932 AARCH64_OPND_QLF_S_Q,
933 /* These type qualifiers have a special meaning in that they mean 2 x 1 byte,
934 4 x 1 byte or 2 x 2 byte are selected by the instruction. Other than that
935 they have no difference with AARCH64_OPND_QLF_S_B in encoding. They are
936 here purely for syntactical reasons and is an exception from normal
937 AArch64 disassembly scheme. */
938 AARCH64_OPND_QLF_S_2B,
939 AARCH64_OPND_QLF_S_4B,
940 AARCH64_OPND_QLF_S_2H,
941
942 /* Qualifying an operand which is a SIMD vector register or a SIMD vector
943 register list; indicating register shape.
944 They are also used for the immediate shift operand in e.g. SSHR. Such
945 a use is only for the ease of operand encoding/decoding and qualifier
946 sequence matching; such a use should not be applied widely; use the value
947 constraint qualifiers for immediate operands wherever possible. */
948 AARCH64_OPND_QLF_V_4B,
949 AARCH64_OPND_QLF_V_8B,
950 AARCH64_OPND_QLF_V_16B,
951 AARCH64_OPND_QLF_V_2H,
952 AARCH64_OPND_QLF_V_4H,
953 AARCH64_OPND_QLF_V_8H,
954 AARCH64_OPND_QLF_V_2S,
955 AARCH64_OPND_QLF_V_4S,
956 AARCH64_OPND_QLF_V_1D,
957 AARCH64_OPND_QLF_V_2D,
958 AARCH64_OPND_QLF_V_1Q,
959
960 AARCH64_OPND_QLF_P_Z,
961 AARCH64_OPND_QLF_P_M,
962
963 /* Used in scaled signed immediate that are scaled by a Tag granule
964 like in stg, st2g, etc. */
965 AARCH64_OPND_QLF_imm_tag,
966
967 /* Constraint on value. */
968 AARCH64_OPND_QLF_CR, /* CRn, CRm. */
969 AARCH64_OPND_QLF_imm_0_7,
970 AARCH64_OPND_QLF_imm_0_15,
971 AARCH64_OPND_QLF_imm_0_31,
972 AARCH64_OPND_QLF_imm_0_63,
973 AARCH64_OPND_QLF_imm_1_32,
974 AARCH64_OPND_QLF_imm_1_64,
975
976 /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
977 or shift-ones. */
978 AARCH64_OPND_QLF_LSL,
979 AARCH64_OPND_QLF_MSL,
980
981 /* Special qualifier helping retrieve qualifier information during the
982 decoding time (currently not in use). */
983 AARCH64_OPND_QLF_RETRIEVE,
984
985 /* Special qualifier used for indicating error in qualifier retrieval. */
986 AARCH64_OPND_QLF_ERR,
987 } ATTRIBUTE_PACKED;
988
989 /* Instruction class. */
991
992 enum aarch64_insn_class
993 {
994 aarch64_misc,
995 addsub_carry,
996 addsub_ext,
997 addsub_imm,
998 addsub_shift,
999 asimdall,
1000 asimddiff,
1001 asimdelem,
1002 asimdext,
1003 asimdimm,
1004 asimdins,
1005 asimdmisc,
1006 asimdperm,
1007 asimdsame,
1008 asimdshf,
1009 asimdtbl,
1010 asisddiff,
1011 asisdelem,
1012 asisdlse,
1013 asisdlsep,
1014 asisdlso,
1015 asisdlsop,
1016 asisdmisc,
1017 asisdone,
1018 asisdpair,
1019 asisdsame,
1020 asisdshf,
1021 bitfield,
1022 branch_imm,
1023 branch_reg,
1024 compbranch,
1025 condbranch,
1026 condcmp_imm,
1027 condcmp_reg,
1028 condsel,
1029 cryptoaes,
1030 cryptosha2,
1031 cryptosha3,
1032 dp_1src,
1033 dp_2src,
1034 dp_3src,
1035 exception,
1036 extract,
1037 float2fix,
1038 float2int,
1039 floatccmp,
1040 floatcmp,
1041 floatdp1,
1042 floatdp2,
1043 floatdp3,
1044 floatimm,
1045 floatsel,
1046 ldst_immpost,
1047 ldst_immpre,
1048 ldst_imm9, /* immpost or immpre */
1049 ldst_imm10, /* LDRAA/LDRAB */
1050 ldst_pos,
1051 ldst_regoff,
1052 ldst_unpriv,
1053 ldst_unscaled,
1054 ldstexcl,
1055 ldstnapair_offs,
1056 ldstpair_off,
1057 ldstpair_indexed,
1058 loadlit,
1059 log_imm,
1060 log_shift,
1061 lse_atomic,
1062 lse128_atomic,
1063 movewide,
1064 pcreladdr,
1065 ic_system,
1066 sme_fp_sd,
1067 sme_int_sd,
1068 sme_misc,
1069 sme_mov,
1070 sme_ldr,
1071 sme_psel,
1072 sme_shift,
1073 sme_size_12_bh,
1074 sme_size_12_bhs,
1075 sme_size_12_hs,
1076 sme_size_12_b,
1077 sme_size_22,
1078 sme_size_22_hsd,
1079 sme_sz_23,
1080 sme_str,
1081 sme_start,
1082 sme_stop,
1083 sme2_mov,
1084 sme2_movaz,
1085 sve_cpy,
1086 sve_index,
1087 sve_limm,
1088 sve_misc,
1089 sve_movprfx,
1090 sve_pred_zm,
1091 sve_shift_pred,
1092 sve_shift_unpred,
1093 sve_size_bhs,
1094 sve_size_bhsd,
1095 sve_size_hsd,
1096 sve_size_hsd2,
1097 sve_size_sd,
1098 sve_size_bh,
1099 sve_size_sd2,
1100 sve_size_13,
1101 sve_shift_tsz_hsd,
1102 sve_shift_tsz_bhsd,
1103 sve_size_tsz_bhs,
1104 testbranch,
1105 cryptosm3,
1106 cryptosm4,
1107 dotproduct,
1108 bfloat16,
1109 cssc,
1110 gcs,
1111 the,
1112 sve2_urqvs,
1113 sve_index1,
1114 rcpc3,
1115 lut,
1116 last_iclass = lut
1117 };
1118
1119 /* Opcode enumerators. */
1120
1121 enum aarch64_op
1122 {
1123 OP_NIL,
1124 OP_STRB_POS,
1125 OP_LDRB_POS,
1126 OP_LDRSB_POS,
1127 OP_STRH_POS,
1128 OP_LDRH_POS,
1129 OP_LDRSH_POS,
1130 OP_STR_POS,
1131 OP_LDR_POS,
1132 OP_STRF_POS,
1133 OP_LDRF_POS,
1134 OP_LDRSW_POS,
1135 OP_PRFM_POS,
1136
1137 OP_STURB,
1138 OP_LDURB,
1139 OP_LDURSB,
1140 OP_STURH,
1141 OP_LDURH,
1142 OP_LDURSH,
1143 OP_STUR,
1144 OP_LDUR,
1145 OP_STURV,
1146 OP_LDURV,
1147 OP_LDURSW,
1148 OP_PRFUM,
1149
1150 OP_LDR_LIT,
1151 OP_LDRV_LIT,
1152 OP_LDRSW_LIT,
1153 OP_PRFM_LIT,
1154
1155 OP_ADD,
1156 OP_B,
1157 OP_BL,
1158
1159 OP_MOVN,
1160 OP_MOVZ,
1161 OP_MOVK,
1162
1163 OP_MOV_IMM_LOG, /* MOV alias for moving bitmask immediate. */
1164 OP_MOV_IMM_WIDE, /* MOV alias for moving wide immediate. */
1165 OP_MOV_IMM_WIDEN, /* MOV alias for moving wide immediate (negated). */
1166
1167 OP_MOV_V, /* MOV alias for moving vector register. */
1168
1169 OP_ASR_IMM,
1170 OP_LSR_IMM,
1171 OP_LSL_IMM,
1172
1173 OP_BIC,
1174
1175 OP_UBFX,
1176 OP_BFXIL,
1177 OP_SBFX,
1178 OP_SBFIZ,
1179 OP_BFI,
1180 OP_BFC, /* ARMv8.2. */
1181 OP_UBFIZ,
1182 OP_UXTB,
1183 OP_UXTH,
1184 OP_UXTW,
1185
1186 OP_CINC,
1187 OP_CINV,
1188 OP_CNEG,
1189 OP_CSET,
1190 OP_CSETM,
1191
1192 OP_FCVT,
1193 OP_FCVTN,
1194 OP_FCVTN2,
1195 OP_FCVTL,
1196 OP_FCVTL2,
1197 OP_FCVTXN_S, /* Scalar version. */
1198
1199 OP_ROR_IMM,
1200
1201 OP_SXTL,
1202 OP_SXTL2,
1203 OP_UXTL,
1204 OP_UXTL2,
1205
1206 OP_MOV_P_P,
1207 OP_MOV_PN_PN,
1208 OP_MOV_Z_P_Z,
1209 OP_MOV_Z_V,
1210 OP_MOV_Z_Z,
1211 OP_MOV_Z_Zi,
1212 OP_MOVM_P_P_P,
1213 OP_MOVS_P_P,
1214 OP_MOVZS_P_P_P,
1215 OP_MOVZ_P_P_P,
1216 OP_NOTS_P_P_P_Z,
1217 OP_NOT_P_P_P_Z,
1218
1219 OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */
1220
1221 OP_TOTAL_NUM, /* Pseudo. */
1222 };
1223
1224 /* Error types. */
1225 enum err_type
1226 {
1227 ERR_OK,
1228 ERR_UND,
1229 ERR_UNP,
1230 ERR_NYI,
1231 ERR_VFI,
1232 ERR_NR_ENTRIES
1233 };
1234
1235 /* Maximum number of operands an instruction can have. */
1236 #define AARCH64_MAX_OPND_NUM 7
1237 /* Maximum number of qualifier sequences an instruction can have. */
1238 #define AARCH64_MAX_QLF_SEQ_NUM 10
1239 /* Operand qualifier typedef */
1240 typedef enum aarch64_opnd_qualifier aarch64_opnd_qualifier_t;
1241 /* Operand qualifier sequence typedef. */
1242 typedef aarch64_opnd_qualifier_t \
1243 aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
1244
1245 /* FIXME: improve the efficiency. */
1246 static inline bool
1247 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
1248 {
1249 int i;
1250 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1251 if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
1252 return false;
1253 return true;
1254 }
1255
1256 /* Forward declare error reporting type. */
1257 typedef struct aarch64_operand_error aarch64_operand_error;
1258 /* Forward declare instruction sequence type. */
1259 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
1260 /* Forward declare instruction definition. */
1261 typedef struct aarch64_inst aarch64_inst;
1262
1263 /* This structure holds information for a particular opcode. */
1264
1265 struct aarch64_opcode
1266 {
1267 /* The name of the mnemonic. */
1268 const char *name;
1269
1270 /* The opcode itself. Those bits which will be filled in with
1271 operands are zeroes. */
1272 aarch64_insn opcode;
1273
1274 /* The opcode mask. This is used by the disassembler. This is a
1275 mask containing ones indicating those bits which must match the
1276 opcode field, and zeroes indicating those bits which need not
1277 match (and are presumably filled in by operands). */
1278 aarch64_insn mask;
1279
1280 /* Instruction class. */
1281 enum aarch64_insn_class iclass;
1282
1283 /* Enumerator identifier. */
1284 enum aarch64_op op;
1285
1286 /* Which architecture variant provides this instruction. */
1287 const aarch64_feature_set *avariant;
1288
1289 /* An array of operand codes. Each code is an index into the
1290 operand table. They appear in the order which the operands must
1291 appear in assembly code, and are terminated by a zero. */
1292 enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
1293
1294 /* A list of operand qualifier code sequence. Each operand qualifier
1295 code qualifies the corresponding operand code. Each operand
1296 qualifier sequence specifies a valid opcode variant and related
1297 constraint on operands. */
1298 aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
1299
1300 /* Flags providing information about this instruction */
1301 uint64_t flags;
1302
1303 /* Extra constraints on the instruction that the verifier checks. */
1304 uint32_t constraints;
1305
1306 /* If nonzero, this operand and operand 0 are both registers and
1307 are required to have the same register number. */
1308 unsigned char tied_operand;
1309
1310 /* If non-NULL, a function to verify that a given instruction is valid. */
1311 enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
1312 bfd_vma, bool, aarch64_operand_error *,
1313 struct aarch64_instr_sequence *);
1314 };
1315
1316 typedef struct aarch64_opcode aarch64_opcode;
1317
1318 /* Table describing all the AArch64 opcodes. */
1319 extern const aarch64_opcode aarch64_opcode_table[];
1320
1321 /* Opcode flags. */
1322 #define F_ALIAS (1 << 0)
1323 #define F_HAS_ALIAS (1 << 1)
1324 /* Disassembly preference priority 1-3 (the larger the higher). If nothing
1325 is specified, it is the priority 0 by default, i.e. the lowest priority. */
1326 #define F_P1 (1 << 2)
1327 #define F_P2 (2 << 2)
1328 #define F_P3 (3 << 2)
1329 /* Flag an instruction that is truly conditional executed, e.g. b.cond. */
1330 #define F_COND (1 << 4)
1331 /* Instruction has the field of 'sf'. */
1332 #define F_SF (1 << 5)
1333 /* Instruction has the field of 'size:Q'. */
1334 #define F_SIZEQ (1 << 6)
1335 /* Floating-point instruction has the field of 'type'. */
1336 #define F_FPTYPE (1 << 7)
1337 /* AdvSIMD scalar instruction has the field of 'size'. */
1338 #define F_SSIZE (1 << 8)
1339 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q". */
1340 #define F_T (1 << 9)
1341 /* Size of GPR operand in AdvSIMD instructions encoded in Q. */
1342 #define F_GPRSIZE_IN_Q (1 << 10)
1343 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22. */
1344 #define F_LDS_SIZE (1 << 11)
1345 /* Optional operand; assume maximum of 1 operand can be optional. */
1346 #define F_OPD0_OPT (1 << 12)
1347 #define F_OPD1_OPT (2 << 12)
1348 #define F_OPD2_OPT (3 << 12)
1349 #define F_OPD3_OPT (4 << 12)
1350 #define F_OPD4_OPT (5 << 12)
1351 /* Default value for the optional operand when omitted from the assembly. */
1352 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
1353 /* Instruction that is an alias of another instruction needs to be
1354 encoded/decoded by converting it to/from the real form, followed by
1355 the encoding/decoding according to the rules of the real opcode.
1356 This compares to the direct coding using the alias's information.
1357 N.B. this flag requires F_ALIAS to be used together. */
1358 #define F_CONV (1 << 20)
1359 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
1360 friendly pseudo instruction available only in the assembly code (thus will
1361 not show up in the disassembly). */
1362 #define F_PSEUDO (1 << 21)
1363 /* Instruction has miscellaneous encoding/decoding rules. */
1364 #define F_MISC (1 << 22)
1365 /* Instruction has the field of 'N'; used in conjunction with F_SF. */
1366 #define F_N (1 << 23)
1367 /* Opcode dependent field. */
1368 #define F_OD(X) (((X) & 0x7) << 24)
1369 /* Instruction has the field of 'sz'. */
1370 #define F_LSE_SZ (1 << 27)
1371 /* Require an exact qualifier match, even for NIL qualifiers. */
1372 #define F_STRICT (1ULL << 28)
1373 /* This system instruction is used to read system registers. */
1374 #define F_SYS_READ (1ULL << 29)
1375 /* This system instruction is used to write system registers. */
1376 #define F_SYS_WRITE (1ULL << 30)
1377 /* This instruction has an extra constraint on it that imposes a requirement on
1378 subsequent instructions. */
1379 #define F_SCAN (1ULL << 31)
1380 /* Instruction takes a pair of optional operands. If we specify the Nth operand
1381 to be optional, then we also implicitly specify (N+1)th operand to also be
1382 optional. */
1383 #define F_OPD_PAIR_OPT (1ULL << 32)
1384 /* This instruction does not allow the full range of values that the
1385 width of fields in the assembler instruction would theoretically
1386 allow. This impacts the constraintts on assembly but yelds no
1387 impact on disassembly. */
1388 #define F_OPD_NARROW (1ULL << 33)
1389 /* For the instruction with size[22:23] field. */
1390 #define F_OPD_SIZE (1ULL << 34)
1391 /* RCPC3 instruction has the field of 'size'. */
1392 #define F_RCPC3_SIZE (1ULL << 35)
1393 /* This instruction need VGx2 or VGx4 mandatorily in the operand passed to
1394 assembler. */
1395 #define F_VG_REQ (1ULL << 36)
1396
1397 /* 4-bit flag field to indicate subclass of instructions.
1398 Note the overlap between the set of subclass flags in each logical category
1399 (F_LDST_*, F_ARITH_*, F_BRANCH_* etc.); The usage of flags as
1400 iclass-specific enums is intentional. */
1401 #define F_SUBCLASS (15ULL << 37)
1402
1403 #define F_LDST_LOAD (1ULL << 37)
1404 #define F_LDST_STORE (2ULL << 37)
1405 /* Subclasses to denote add, sub and mov insns. */
1406 #define F_ARITH_ADD (1ULL << 37)
1407 #define F_ARITH_SUB (2ULL << 37)
1408 #define F_ARITH_MOV (3ULL << 37)
1409 /* Subclasses to denote call and ret insns. */
1410 #define F_BRANCH_CALL (1ULL << 37)
1411 #define F_BRANCH_RET (2ULL << 37)
1412 /* Subclass to denote that only tag update is involved. */
1413 #define F_DP_TAG_ONLY (1ULL << 37)
1414
1415 #define F_SUBCLASS_OTHER (F_SUBCLASS)
1416 /* Next bit is 41. */
1417
1418 /* Instruction constraints. */
1419 /* This instruction has a predication constraint on the instruction at PC+4. */
1420 #define C_SCAN_MOVPRFX (1U << 0)
1421 /* This instruction's operation width is determined by the operand with the
1422 largest element size. */
1423 #define C_MAX_ELEM (1U << 1)
1424 #define C_SCAN_MOPS_P (1U << 2)
1425 #define C_SCAN_MOPS_M (2U << 2)
1426 #define C_SCAN_MOPS_E (3U << 2)
1427 #define C_SCAN_MOPS_PME (3U << 2)
1428 /* Next bit is 4. */
1429
1430 static inline bool
1431 alias_opcode_p (const aarch64_opcode *opcode)
1432 {
1433 return (opcode->flags & F_ALIAS) != 0;
1434 }
1435
1436 static inline bool
1437 opcode_has_alias (const aarch64_opcode *opcode)
1438 {
1439 return (opcode->flags & F_HAS_ALIAS) != 0;
1440 }
1441
1442 /* Priority for disassembling preference. */
1443 static inline int
1444 opcode_priority (const aarch64_opcode *opcode)
1445 {
1446 return (opcode->flags >> 2) & 0x3;
1447 }
1448
1449 static inline bool
1450 pseudo_opcode_p (const aarch64_opcode *opcode)
1451 {
1452 return (opcode->flags & F_PSEUDO) != 0lu;
1453 }
1454
1455 /* Whether the opcode has the specific subclass flag.
1456 N.B. The overlap between F_LDST_*, F_ARITH_*, and F_BRANCH_* etc. subclass
1457 flags means that the callers of this function have the responsibility of
1458 checking for the flags appropriate for the specific iclass. */
1459 static inline bool
1460 aarch64_opcode_subclass_p (const aarch64_opcode *opcode, uint64_t flag)
1461 {
1462 return ((opcode->flags & F_SUBCLASS) == flag);
1463 }
1464
1465 /* Deal with two possible scenarios: If F_OP_PAIR_OPT not set, as is the case
1466 by default, F_OPDn_OPT must equal IDX + 1, else F_OPDn_OPT must be in range
1467 [IDX, IDX + 1]. */
1468 static inline bool
1469 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
1470 {
1471 if (opcode->flags & F_OPD_PAIR_OPT)
1472 return (((opcode->flags >> 12) & 0x7) == idx
1473 || ((opcode->flags >> 12) & 0x7) == idx + 1);
1474 return ((opcode->flags >> 12) & 0x7) == idx + 1;
1475 }
1476
1477 static inline aarch64_insn
1478 get_optional_operand_default_value (const aarch64_opcode *opcode)
1479 {
1480 return (opcode->flags >> 15) & 0x1f;
1481 }
1482
1483 static inline unsigned int
1484 get_opcode_dependent_value (const aarch64_opcode *opcode)
1485 {
1486 return (opcode->flags >> 24) & 0x7;
1487 }
1488
1489 static inline bool
1490 get_opcode_dependent_vg_status (const aarch64_opcode *opcode)
1491 {
1492 return (opcode->flags >> 36) & 0x1;
1493 }
1494
1495 static inline bool
1496 opcode_has_special_coder (const aarch64_opcode *opcode)
1497 {
1498 return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
1499 | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND
1500 | F_OPD_SIZE | F_RCPC3_SIZE)) != 0;
1501 }
1502
1503 struct aarch64_name_value_pair
1505 {
1506 const char * name;
1507 aarch64_insn value;
1508 };
1509
1510 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
1511 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
1512 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
1513 extern const struct aarch64_name_value_pair aarch64_prfops [32];
1514 extern const struct aarch64_name_value_pair aarch64_hint_options [];
1515
1516 #define AARCH64_MAX_SYSREG_NAME_LEN 32
1517
1518 typedef struct
1519 {
1520 const char * name;
1521 aarch64_insn value;
1522 uint32_t flags;
1523
1524 /* A set of features, all of which are required for this system register to be
1525 available. */
1526 aarch64_feature_set features;
1527 } aarch64_sys_reg;
1528
1529 extern const aarch64_sys_reg aarch64_sys_regs [];
1530 extern const aarch64_sys_reg aarch64_pstatefields [];
1531 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
1532 extern bool aarch64_sys_reg_128bit_p (const uint32_t);
1533 extern bool aarch64_sys_reg_alias_p (const uint32_t);
1534 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
1535 const aarch64_sys_reg *);
1536
1537 typedef struct
1538 {
1539 const char *name;
1540 uint32_t value;
1541 uint32_t flags ;
1542
1543 /* A set of features, all of which are required for this system instruction to be
1544 available. */
1545 aarch64_feature_set features;
1546 } aarch64_sys_ins_reg;
1547
1548 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
1549 extern bool
1550 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
1551 const char *reg_name,
1552 uint32_t, const aarch64_feature_set *);
1553
1554 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
1555 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
1556 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
1557 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
1558 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
1559
1560 /* Shift/extending operator kinds.
1561 N.B. order is important; keep aarch64_operand_modifiers synced. */
1562 enum aarch64_modifier_kind
1563 {
1564 AARCH64_MOD_NONE,
1565 AARCH64_MOD_MSL,
1566 AARCH64_MOD_ROR,
1567 AARCH64_MOD_ASR,
1568 AARCH64_MOD_LSR,
1569 AARCH64_MOD_LSL,
1570 AARCH64_MOD_UXTB,
1571 AARCH64_MOD_UXTH,
1572 AARCH64_MOD_UXTW,
1573 AARCH64_MOD_UXTX,
1574 AARCH64_MOD_SXTB,
1575 AARCH64_MOD_SXTH,
1576 AARCH64_MOD_SXTW,
1577 AARCH64_MOD_SXTX,
1578 AARCH64_MOD_MUL,
1579 AARCH64_MOD_MUL_VL,
1580 };
1581
1582 bool
1583 aarch64_extend_operator_p (enum aarch64_modifier_kind);
1584
1585 enum aarch64_modifier_kind
1586 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
1587 /* Condition. */
1588
1589 typedef struct
1590 {
1591 /* A list of names with the first one as the disassembly preference;
1592 terminated by NULL if fewer than 3. */
1593 const char *names[4];
1594 aarch64_insn value;
1595 } aarch64_cond;
1596
1597 extern const aarch64_cond aarch64_conds[16];
1598
1599 const aarch64_cond* get_cond_from_value (aarch64_insn value);
1600 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
1601
1602 /* Information about a reference to part of ZA. */
1604 struct aarch64_indexed_za
1605 {
1606 /* Which tile is being accessed. Unused (and 0) for an index into ZA. */
1607 int regno;
1608
1609 struct
1610 {
1611 /* The 32-bit index register. */
1612 int regno;
1613
1614 /* The first (or only) immediate offset. */
1615 int64_t imm;
1616
1617 /* The last immediate offset minus the first immediate offset.
1618 Unlike the range size, this is guaranteed not to overflow
1619 when the end offset > the start offset. */
1620 uint64_t countm1;
1621 } index;
1622
1623 /* The vector group size, or 0 if none. */
1624 unsigned group_size : 8;
1625
1626 /* True if a tile access is vertical, false if it is horizontal.
1627 Unused (and 0) for an index into ZA. */
1628 unsigned v : 1;
1629 };
1630
1631 /* Information about a list of registers. */
1632 struct aarch64_reglist
1633 {
1634 unsigned first_regno : 8;
1635 unsigned num_regs : 8;
1636 /* The difference between the nth and the n+1th register. */
1637 unsigned stride : 8;
1638 /* 1 if it is a list of reg element. */
1639 unsigned has_index : 1;
1640 /* Lane index; valid only when has_index is 1. */
1641 int64_t index;
1642 };
1643
1644 /* Structure representing an operand. */
1645
1646 struct aarch64_opnd_info
1647 {
1648 enum aarch64_opnd type;
1649 aarch64_opnd_qualifier_t qualifier;
1650 int idx;
1651
1652 union
1653 {
1654 struct
1655 {
1656 unsigned regno;
1657 } reg;
1658 struct
1659 {
1660 unsigned int regno;
1661 int64_t index;
1662 } reglane;
1663 /* e.g. LVn. */
1664 struct aarch64_reglist reglist;
1665 /* e.g. immediate or pc relative address offset. */
1666 struct
1667 {
1668 int64_t value;
1669 unsigned is_fp : 1;
1670 } imm;
1671 /* e.g. address in STR (register offset). */
1672 struct
1673 {
1674 unsigned base_regno;
1675 struct
1676 {
1677 union
1678 {
1679 int imm;
1680 unsigned regno;
1681 };
1682 unsigned is_reg;
1683 } offset;
1684 unsigned pcrel : 1; /* PC-relative. */
1685 unsigned writeback : 1;
1686 unsigned preind : 1; /* Pre-indexed. */
1687 unsigned postind : 1; /* Post-indexed. */
1688 } addr;
1689
1690 struct
1691 {
1692 /* The encoding of the system register. */
1693 aarch64_insn value;
1694
1695 /* The system register flags. */
1696 uint32_t flags;
1697 } sysreg;
1698
1699 /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}] */
1700 struct aarch64_indexed_za indexed_za;
1701
1702 const aarch64_cond *cond;
1703 /* The encoding of the PSTATE field. */
1704 aarch64_insn pstatefield;
1705 const aarch64_sys_ins_reg *sysins_op;
1706 const struct aarch64_name_value_pair *barrier;
1707 const struct aarch64_name_value_pair *hint_option;
1708 const struct aarch64_name_value_pair *prfop;
1709 };
1710
1711 /* Operand shifter; in use when the operand is a register offset address,
1712 add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}. */
1713 struct
1714 {
1715 enum aarch64_modifier_kind kind;
1716 unsigned operator_present: 1; /* Only valid during encoding. */
1717 /* Value of the 'S' field in ld/st reg offset; used only in decoding. */
1718 unsigned amount_present: 1;
1719 int64_t amount;
1720 } shifter;
1721
1722 unsigned skip:1; /* Operand is not completed if there is a fixup needed
1723 to be done on it. In some (but not all) of these
1724 cases, we need to tell libopcodes to skip the
1725 constraint checking and the encoding for this
1726 operand, so that the libopcodes can pick up the
1727 right opcode before the operand is fixed-up. This
1728 flag should only be used during the
1729 assembling/encoding. */
1730 unsigned present:1; /* Whether this operand is present in the assembly
1731 line; not used during the disassembly. */
1732 };
1733
1734 typedef struct aarch64_opnd_info aarch64_opnd_info;
1735
1736 /* Structure representing an instruction.
1737
1738 It is used during both the assembling and disassembling. The assembler
1739 fills an aarch64_inst after a successful parsing and then passes it to the
1740 encoding routine to do the encoding. During the disassembling, the
1741 disassembler calls the decoding routine to decode a binary instruction; on a
1742 successful return, such a structure will be filled with information of the
1743 instruction; then the disassembler uses the information to print out the
1744 instruction. */
1745
1746 struct aarch64_inst
1747 {
1748 /* The value of the binary instruction. */
1749 aarch64_insn value;
1750
1751 /* Corresponding opcode entry. */
1752 const aarch64_opcode *opcode;
1753
1754 /* Condition for a truly conditional-executed instrutions, e.g. b.cond. */
1755 const aarch64_cond *cond;
1756
1757 /* Operands information. */
1758 aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
1759 };
1760
1761 /* Defining the HINT #imm values for the aarch64_hint_options. */
1762 #define HINT_OPD_CSYNC 0x11
1763 #define HINT_OPD_DSYNC 0x13
1764 #define HINT_OPD_C 0x22
1765 #define HINT_OPD_J 0x24
1766 #define HINT_OPD_JC 0x26
1767 #define HINT_OPD_NULL 0x00
1768
1769
1770 /* Diagnosis related declaration and interface. */
1772
1773 /* Operand error kind enumerators.
1774
1775 AARCH64_OPDE_RECOVERABLE
1776 Less severe error found during the parsing, very possibly because that
1777 GAS has picked up a wrong instruction template for the parsing.
1778
1779 AARCH64_OPDE_A_SHOULD_FOLLOW_B
1780 The instruction forms (or is expected to form) part of a sequence,
1781 but the preceding instruction in the sequence wasn't the expected one.
1782 The message refers to two strings: the name of the current instruction,
1783 followed by the name of the expected preceding instruction.
1784
1785 AARCH64_OPDE_EXPECTED_A_AFTER_B
1786 Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
1787 so that the current instruction is assumed to be the incorrect one:
1788 "since the previous instruction was B, the current one should be A".
1789
1790 AARCH64_OPDE_SYNTAX_ERROR
1791 General syntax error; it can be either a user error, or simply because
1792 that GAS is trying a wrong instruction template.
1793
1794 AARCH64_OPDE_FATAL_SYNTAX_ERROR
1795 Definitely a user syntax error.
1796
1797 AARCH64_OPDE_INVALID_VARIANT
1798 No syntax error, but the operands are not a valid combination, e.g.
1799 FMOV D0,S0
1800
1801 The following errors are only reported against an asm string that is
1802 syntactically valid and that has valid operand qualifiers.
1803
1804 AARCH64_OPDE_INVALID_VG_SIZE
1805 Error about a "VGx<n>" modifier in a ZA index not having the
1806 correct <n>. This error effectively forms a pair with
1807 AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number
1808 of vectors that an instruction operates on. However, the "VGx<n>"
1809 modifier is optional, whereas a register list always has a known
1810 and explicit length. It therefore seems better to place more
1811 importance on the register list length when selecting an opcode table
1812 entry. This in turn means that having an incorrect register length
1813 should be more severe than having an incorrect "VGx<n>".
1814
1815 AARCH64_OPDE_REG_LIST_LENGTH
1816 Error about a register list operand having an unexpected number of
1817 registers. This error is low severity because there might be another
1818 opcode entry that supports the given number of registers.
1819
1820 AARCH64_OPDE_REG_LIST_STRIDE
1821 Error about a register list operand having the correct number
1822 (and type) of registers, but an unexpected stride. This error is
1823 more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies
1824 that the length is known to be correct. However, it is lower than
1825 many other errors, since some instructions have forms that share
1826 the same number of registers but have different strides.
1827
1828 AARCH64_OPDE_UNTIED_IMMS
1829 The asm failed to use the same immediate for a destination operand
1830 and a tied source operand.
1831
1832 AARCH64_OPDE_UNTIED_OPERAND
1833 The asm failed to use the same register for a destination operand
1834 and a tied source operand.
1835
1836 AARCH64_OPDE_OUT_OF_RANGE
1837 Error about some immediate value out of a valid range.
1838
1839 AARCH64_OPDE_UNALIGNED
1840 Error about some immediate value not properly aligned (i.e. not being a
1841 multiple times of a certain value).
1842
1843 AARCH64_OPDE_OTHER_ERROR
1844 Error of the highest severity and used for any severe issue that does not
1845 fall into any of the above categories.
1846
1847 AARCH64_OPDE_INVALID_REGNO
1848 A register was syntactically valid and had the right type, but it was
1849 outside the range supported by the associated operand field. This is
1850 a high severity error because there are currently no instructions that
1851 would accept the operands that precede the erroneous one (if any) and
1852 yet still accept a wider range of registers.
1853
1854 AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
1855 AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the
1856 AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
1857 only libopcodes has the information about the valid variants of each
1858 instruction.
1859
1860 The enumerators have an increasing severity. This is helpful when there are
1861 multiple instruction templates available for a given mnemonic name (e.g.
1862 FMOV); this mechanism will help choose the most suitable template from which
1863 the generated diagnostics can most closely describe the issues, if any.
1864
1865 This enum needs to be kept up-to-date with operand_mismatch_kind_names
1866 in tc-aarch64.c. */
1867
1868 enum aarch64_operand_error_kind
1869 {
1870 AARCH64_OPDE_NIL,
1871 AARCH64_OPDE_RECOVERABLE,
1872 AARCH64_OPDE_A_SHOULD_FOLLOW_B,
1873 AARCH64_OPDE_EXPECTED_A_AFTER_B,
1874 AARCH64_OPDE_SYNTAX_ERROR,
1875 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
1876 AARCH64_OPDE_INVALID_VARIANT,
1877 AARCH64_OPDE_INVALID_VG_SIZE,
1878 AARCH64_OPDE_REG_LIST_LENGTH,
1879 AARCH64_OPDE_REG_LIST_STRIDE,
1880 AARCH64_OPDE_UNTIED_IMMS,
1881 AARCH64_OPDE_UNTIED_OPERAND,
1882 AARCH64_OPDE_OUT_OF_RANGE,
1883 AARCH64_OPDE_UNALIGNED,
1884 AARCH64_OPDE_OTHER_ERROR,
1885 AARCH64_OPDE_INVALID_REGNO
1886 };
1887
1888 /* N.B. GAS assumes that this structure work well with shallow copy. */
1889 struct aarch64_operand_error
1890 {
1891 enum aarch64_operand_error_kind kind;
1892 int index;
1893 const char *error;
1894 /* Some data for extra information. */
1895 union {
1896 int i;
1897 const char *s;
1898 } data[3];
1899 bool non_fatal;
1900 };
1901
1902 /* AArch64 sequence structure used to track instructions with F_SCAN
1903 dependencies for both assembler and disassembler. */
1904 struct aarch64_instr_sequence
1905 {
1906 /* The instructions in the sequence, starting with the one that
1907 caused it to be opened. */
1908 aarch64_inst *instr;
1909 /* The number of instructions already in the sequence. */
1910 int num_added_insns;
1911 /* The number of instructions allocated to the sequence. */
1912 int num_allocated_insns;
1913 };
1914
1915 /* Encoding entrypoint. */
1916
1917 extern bool
1918 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
1919 aarch64_insn *, aarch64_opnd_qualifier_t *,
1920 aarch64_operand_error *, aarch64_instr_sequence *);
1921
1922 extern const aarch64_opcode *
1923 aarch64_replace_opcode (struct aarch64_inst *,
1924 const aarch64_opcode *);
1925
1926 /* Given the opcode enumerator OP, return the pointer to the corresponding
1927 opcode entry. */
1928
1929 extern const aarch64_opcode *
1930 aarch64_get_opcode (enum aarch64_op);
1931
1932 /* An instance of this structure is passed to aarch64_print_operand, and
1933 the callback within this structure is used to apply styling to the
1934 disassembler output. This structure encapsulates the callback and a
1935 state pointer. */
1936
1937 struct aarch64_styler
1938 {
1939 /* The callback used to apply styling. Returns a string created from FMT
1940 and ARGS with STYLE applied to the string. STYLER is a pointer back
1941 to this object so that the callback can access the state member.
1942
1943 The string returned from this callback must remain valid until the
1944 call to aarch64_print_operand has completed. */
1945 const char *(*apply_style) (struct aarch64_styler *styler,
1946 enum disassembler_style style,
1947 const char *fmt,
1948 va_list args);
1949
1950 /* A pointer to a state object which can be used by the apply_style
1951 callback function. */
1952 void *state;
1953 };
1954
1955 /* Generate the string representation of an operand. */
1956 extern void
1957 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
1958 const aarch64_opnd_info *, int, int *, bfd_vma *,
1959 char **, char *, size_t,
1960 aarch64_feature_set features,
1961 struct aarch64_styler *styler);
1962
1963 /* Miscellaneous interface. */
1964
1965 extern int
1966 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
1967
1968 extern aarch64_opnd_qualifier_t
1969 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
1970 const aarch64_opnd_qualifier_t, int);
1971
1972 extern bool
1973 aarch64_is_destructive_by_operands (const aarch64_opcode *);
1974
1975 extern int
1976 aarch64_num_of_operands (const aarch64_opcode *);
1977
1978 extern bool
1979 aarch64_stack_pointer_p (const aarch64_opnd_info *);
1980
1981 extern int
1982 aarch64_zero_register_p (const aarch64_opnd_info *);
1983
1984 extern enum err_type
1985 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
1986 aarch64_operand_error *);
1987
1988 extern void
1989 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
1990
1991 /* Given an operand qualifier, return the expected data element size
1992 of a qualified operand. */
1993 extern unsigned char
1994 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
1995
1996 extern enum aarch64_operand_class
1997 aarch64_get_operand_class (enum aarch64_opnd);
1998
1999 extern const char *
2000 aarch64_get_operand_name (enum aarch64_opnd);
2001
2002 extern const char *
2003 aarch64_get_operand_desc (enum aarch64_opnd);
2004
2005 extern bool
2006 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
2007
2008 extern bool
2009 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *);
2010
2011 extern int
2012 calc_ldst_datasize (const aarch64_opnd_info *opnds);
2013
2014 #ifdef DEBUG_AARCH64
2015 extern int debug_dump;
2016
2017 extern void
2018 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
2019
2020 #define DEBUG_TRACE(M, ...) \
2021 { \
2022 if (debug_dump) \
2023 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
2024 }
2025
2026 #define DEBUG_TRACE_IF(C, M, ...) \
2027 { \
2028 if (debug_dump && (C)) \
2029 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
2030 }
2031 #else /* !DEBUG_AARCH64 */
2032 #define DEBUG_TRACE(M, ...) ;
2033 #define DEBUG_TRACE_IF(C, M, ...) ;
2034 #endif /* DEBUG_AARCH64 */
2035
2036 extern const char *const aarch64_sve_pattern_array[32];
2037 extern const char *const aarch64_sve_prfop_array[16];
2038 extern const char *const aarch64_rprfmop_array[64];
2039 extern const char *const aarch64_sme_vlxn_array[2];
2040 extern const char *const aarch64_brbop_array[2];
2041
2042 #ifdef __cplusplus
2043 }
2044 #endif
2045
2046 #endif /* OPCODE_AARCH64_H */
2047