aarch64.h revision 1.1.1.10 1 /* AArch64 assembler/disassembler support.
2
3 Copyright (C) 2009-2026 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GNU Binutils.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #ifndef OPCODE_AARCH64_H
23 #define OPCODE_AARCH64_H
24
25 #include "bfd.h"
26 #include <stdint.h>
27 #include <assert.h>
28 #include <stdlib.h>
29
30 #include "dis-asm.h"
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 /* The offset for pc-relative addressing is currently defined to be 0. */
37 #define AARCH64_PCREL_OFFSET 0
38
39 typedef uint32_t aarch64_insn;
40
41 /* An enum containing all known CPU features. The values act as bit positions
42 into aarch64_feature_set. */
43 enum aarch64_feature_bit {
44 /* Architecture versions. */
45 AARCH64_FEATURE_V8,
46 AARCH64_FEATURE_V8_1A,
47 AARCH64_FEATURE_V8_2A,
48 AARCH64_FEATURE_V8_3A,
49 AARCH64_FEATURE_V8_4A,
50 AARCH64_FEATURE_V8_5A,
51 AARCH64_FEATURE_V8_6A,
52 AARCH64_FEATURE_V8_7A,
53 AARCH64_FEATURE_V8_8A,
54 AARCH64_FEATURE_V8_9A,
55
56 AARCH64_FEATURE_V9A,
57 AARCH64_FEATURE_V9_1A,
58 AARCH64_FEATURE_V9_2A,
59 AARCH64_FEATURE_V9_3A,
60 AARCH64_FEATURE_V9_4A,
61 AARCH64_FEATURE_V9_5A,
62 AARCH64_FEATURE_V9_6A,
63 AARCH64_FEATURE_V9_7A,
64
65 /* Armv8-A processors only - this is unset for Armv8-R. */
66 AARCH64_FEATURE_V8A,
67 /* Armv8-R processors. */
68 AARCH64_FEATURE_V8R,
69
70 /* Bfloat16 insns. */
71 AARCH64_FEATURE_BFLOAT16,
72 /* SVE2 instructions. */
73 AARCH64_FEATURE_SVE2,
74 AARCH64_FEATURE_SVE2_AES,
75 AARCH64_FEATURE_SVE2_BITPERM,
76 AARCH64_FEATURE_SVE2_SM4,
77 AARCH64_FEATURE_SVE2_SHA3,
78 /* Scalable Matrix Extension. */
79 AARCH64_FEATURE_SME,
80 /* Atomic 64-byte load/store. */
81 AARCH64_FEATURE_LS64,
82 /* v8.3 Pointer Authentication. */
83 AARCH64_FEATURE_PAUTH,
84 /* FP instructions. */
85 AARCH64_FEATURE_FP,
86 /* SIMD instructions. */
87 AARCH64_FEATURE_SIMD,
88 /* CRC instructions. */
89 AARCH64_FEATURE_CRC,
90 /* LSE instructions. */
91 AARCH64_FEATURE_LSE,
92 /* LSFE instructions. */
93 AARCH64_FEATURE_LSFE,
94 /* PAN instructions. */
95 AARCH64_FEATURE_PAN,
96 /* LOR instructions. */
97 AARCH64_FEATURE_LOR,
98 /* v8.1 SIMD instructions. */
99 AARCH64_FEATURE_RDMA,
100 /* v8.2 FP16 instructions. */
101 AARCH64_FEATURE_F16,
102 /* RAS Extensions. */
103 AARCH64_FEATURE_RAS,
104 /* Statistical Profiling. */
105 AARCH64_FEATURE_PROFILE,
106 /* SVE instructions. */
107 AARCH64_FEATURE_SVE,
108 /* RCPC instructions. */
109 AARCH64_FEATURE_RCPC,
110 /* RCPC2 instructions. */
111 AARCH64_FEATURE_RCPC2,
112 /* Complex # instructions. */
113 AARCH64_FEATURE_COMPNUM,
114 /* JavaScript conversion instructions. */
115 AARCH64_FEATURE_JSCVT,
116 /* Dot Product instructions. */
117 AARCH64_FEATURE_DOTPROD,
118 /* SM3 & SM4 instructions. */
119 AARCH64_FEATURE_SM4,
120 /* SHA2 instructions. */
121 AARCH64_FEATURE_SHA2,
122 /* SHA3 instructions. */
123 AARCH64_FEATURE_SHA3,
124 /* AES instructions. */
125 AARCH64_FEATURE_AES,
126 /* v8.2 FP16FML ins. */
127 AARCH64_FEATURE_F16_FML,
128 /* v8.5 Flag Manipulation version 2. */
129 AARCH64_FEATURE_FLAGMANIP,
130 /* FRINT[32,64][Z,X] insns. */
131 AARCH64_FEATURE_FRINTTS,
132 /* SB instruction. */
133 AARCH64_FEATURE_SB,
134 /* Execution and Data Prediction Restriction instructions. */
135 AARCH64_FEATURE_PREDRES,
136 /* DC CVADP. */
137 AARCH64_FEATURE_CVADP,
138 /* Random Number instructions. */
139 AARCH64_FEATURE_RNG,
140 /* SSBS mechanism enabled. */
141 AARCH64_FEATURE_SSBS,
142 /* Compare and branch instructions. */
143 AARCH64_FEATURE_CMPBR,
144 /* Memory Tagging Extension. */
145 AARCH64_FEATURE_MEMTAG,
146 /* Outer Cacheable Cache Maintenance Operation. */
147 AARCH64_FEATURE_OCCMO,
148 /* Transactional Memory Extension. */
149 AARCH64_FEATURE_TME,
150 /* XS memory attribute. */
151 AARCH64_FEATURE_XS,
152 /* WFx instructions with timeout. */
153 AARCH64_FEATURE_WFXT,
154 /* Standardization of memory operations. */
155 AARCH64_FEATURE_MOPS,
156 /* Hinted conditional branches. */
157 AARCH64_FEATURE_HBC,
158 /* Matrix Multiply instructions. */
159 AARCH64_FEATURE_I8MM,
160 AARCH64_FEATURE_F32MM,
161 AARCH64_FEATURE_F64MM,
162 /* v8.4 Flag Manipulation. */
163 AARCH64_FEATURE_FLAGM,
164 /* SME F64F64. */
165 AARCH64_FEATURE_SME_F64F64,
166 /* SME I16I64. */
167 AARCH64_FEATURE_SME_I16I64,
168 /* Common Short Sequence Compression instructions. */
169 AARCH64_FEATURE_CSSC,
170 /* Check Feature Status Extension. */
171 AARCH64_FEATURE_CHK,
172 /* Guarded Control Stack. */
173 AARCH64_FEATURE_GCS,
174 /* SME2. */
175 AARCH64_FEATURE_SME2,
176 /* Translation Hardening Extension. */
177 AARCH64_FEATURE_THE,
178 /* LSE128. */
179 AARCH64_FEATURE_LSE128,
180 /* LSUI - Unprivileged Load Store. */
181 AARCH64_FEATURE_LSUI,
182 /* ARMv8.9-A RAS Extensions. */
183 AARCH64_FEATURE_RASv2,
184 /* Address Translate Stage 1. */
185 AARCH64_FEATURE_ATS1A,
186 /* Speculation Prediction Restriction instructions. */
187 AARCH64_FEATURE_PREDRES2,
188 /* Instrumentation Extension. */
189 AARCH64_FEATURE_ITE,
190 /* 128-bit page table descriptor, system registers
191 and instructions. */
192 AARCH64_FEATURE_D128,
193 /* SME2.1 instructions. */
194 AARCH64_FEATURE_SME2p1,
195 /* SVE2.1 instructions. */
196 AARCH64_FEATURE_SVE2p1,
197 /* SVE_F16F32MM instructions. */
198 AARCH64_FEATURE_SVE_F16F32MM,
199 /* F8F32MM instructions. */
200 AARCH64_FEATURE_F8F32MM,
201 /* F8F16MM instructions. */
202 AARCH64_FEATURE_F8F16MM,
203 /* SVE_PMULL128 extension. */
204 AARCH64_FEATURE_SVE_AES,
205 /* SVE AES2 instructions. */
206 AARCH64_FEATURE_SVE_AES2,
207 /* SSVE_AES extension. */
208 AARCH64_FEATURE_SSVE_AES,
209 /* SVE_BITPERM extension. */
210 AARCH64_FEATURE_SVE_BITPERM,
211 /* SSVE_BITPERM extension. */
212 AARCH64_FEATURE_SSVE_BITPERM,
213 /* RCPC3 instructions. */
214 AARCH64_FEATURE_RCPC3,
215 /* Checked Pointer Arithmetic instructions. */
216 AARCH64_FEATURE_CPA,
217 /* FAMINMAX instructions. */
218 AARCH64_FEATURE_FAMINMAX,
219 /* FP8 instructions. */
220 AARCH64_FEATURE_FP8,
221 /* LUT instructions. */
222 AARCH64_FEATURE_LUT,
223 /* Branch Record Buffer Extension */
224 AARCH64_FEATURE_BRBE,
225 /* SME LUTv2 instructions. */
226 AARCH64_FEATURE_SME_LUTv2,
227 /* FP8FMA instructions. */
228 AARCH64_FEATURE_FP8FMA,
229 /* FP8DOT4 instructions. */
230 AARCH64_FEATURE_FP8DOT4,
231 /* FP8DOT2 instructions. */
232 AARCH64_FEATURE_FP8DOT2,
233 /* SSVE FP8FMA instructions. */
234 AARCH64_FEATURE_SSVE_FP8FMA,
235 /* SSVE FP8DOT4 instructions. */
236 AARCH64_FEATURE_SSVE_FP8DOT4,
237 /* SSVE FP8DOT2 instructions. */
238 AARCH64_FEATURE_SSVE_FP8DOT2,
239 /* SME F8F32 instructions. */
240 AARCH64_FEATURE_SME_F8F32,
241 /* SME F8F16 instructions. */
242 AARCH64_FEATURE_SME_F8F16,
243 /* Non-widening half-precision FP16 to FP16 arithmetic for SME2. */
244 AARCH64_FEATURE_SME_F16F16,
245 /* FEAT_SVE_BFSCALE. */
246 AARCH64_FEATURE_SVE_BFSCALE,
247 /* SVE Z-targeting non-widening BFloat16 instructions. */
248 AARCH64_FEATURE_SVE_B16B16,
249 /* SME non-widening BFloat16 instructions. */
250 AARCH64_FEATURE_SME_B16B16,
251 /* SVE2.2. */
252 AARCH64_FEATURE_SVE2p2,
253 /* SME2.2. */
254 AARCH64_FEATURE_SME2p2,
255 /* FPRCVT instructions. */
256 AARCH64_FEATURE_FPRCVT,
257 /* Point of Physical Storage. */
258 AARCH64_FEATURE_PoPS,
259 /* GICv5 (Generic Interrupt Controller) CPU Interface Extension. */
260 AARCH64_FEATURE_GCIE,
261 /* SVE FEXPA instruction in streaming mode. */
262 AARCH64_FEATURE_SSVE_FEXPA,
263 /* SME TMOP instructions. */
264 AARCH64_FEATURE_SME_TMOP,
265 /* SME MOP4 instructions. */
266 AARCH64_FEATURE_SME_MOP4,
267 /* LSCP instructions. */
268 AARCH64_FEATURE_LSCP,
269 /* +mops-go */
270 AARCH64_FEATURE_MOPS_GO,
271 /* SVE2.3. */
272 AARCH64_FEATURE_SVE2p3,
273 /* SME2.3. */
274 AARCH64_FEATURE_SME2p3,
275 /* F16F32DOT instructions. */
276 AARCH64_FEATURE_F16F32DOT,
277 /* F16F32MM instructions. */
278 AARCH64_FEATURE_F16F32MM,
279 /* F16MM instructions. */
280 AARCH64_FEATURE_F16MM,
281 /* SVE B16MM instructions. */
282 AARCH64_FEATURE_SVE_B16MM,
283 /* POE2 instructions. */
284 AARCH64_FEATURE_POE2,
285 /* TEV instructions. */
286 AARCH64_FEATURE_TEV,
287 /* MPAMv2. */
288 AARCH64_FEATURE_MPAMv2,
289 /* MTETC. */
290 AARCH64_FEATURE_MTETC,
291 /* TLBI Domains. */
292 AARCH64_FEATURE_TLBID,
293
294 /* Virtual features. These are used to gate instructions that are enabled
295 by either of two (or more) sets of command line flags. */
296 /* +sve2 or +ssve-aes */
297 AARCH64_FEATURE_SVE2_SSVE_AES,
298 /* +sve or +ssve-fexpa */
299 AARCH64_FEATURE_SVE_SSVE_FEXPA,
300 /* +fp8fma+sve or +ssve-fp8fma */
301 AARCH64_FEATURE_FP8FMA_SVE,
302 /* +fp8dot4+sve or +ssve-fp8dot4 */
303 AARCH64_FEATURE_FP8DOT4_SVE,
304 /* +fp8dot2+sve or +ssve-fp8dot2 */
305 AARCH64_FEATURE_FP8DOT2_SVE,
306 /* +sme-f16f16 or +sme-f8f16 */
307 AARCH64_FEATURE_SME_F16F16_F8F16,
308 /* +sve or +sme2p2 */
309 AARCH64_FEATURE_SVE_SME2p2,
310 /* +sve2 or +sme2 */
311 AARCH64_FEATURE_SVE2_SME2,
312 /* +sve2p1 or +sme */
313 AARCH64_FEATURE_SVE2p1_SME,
314 /* +sve2p1 or +sme2 */
315 AARCH64_FEATURE_SVE2p1_SME2,
316 /* +sve2p1 or +sme2p1 */
317 AARCH64_FEATURE_SVE2p1_SME2p1,
318 /* +sve2p2 or +sme2p2 */
319 AARCH64_FEATURE_SVE2p2_SME2p2,
320 /* +sve2p3 or +sme2p3 */
321 AARCH64_FEATURE_SVE2p3_SME2p3,
322 /* +d128 or +tlbid */
323 AARCH64_FEATURE_D128_TLBID,
324 AARCH64_NUM_FEATURES
325 };
326
327 typedef uint64_t aarch64_feature_word;
328 #define AARCH64_BITS_PER_FEATURE_WORD 64
329
330 #define AA64_REPLICATE(SEP, BODY, ...) \
331 BODY (0, __VA_ARGS__) SEP \
332 BODY (1, __VA_ARGS__) SEP \
333 BODY (2, __VA_ARGS__)
334
335 /* Some useful SEP operators for use with replication. */
336 #define REP_COMMA ,
337 #define REP_SEMICOLON ;
338 #define REP_OR_OR ||
339 #define REP_AND_AND &&
340 #define REP_PLUS +
341
342 /* Not currently needed, but if an empty SEP is required define:
343 #define REP_NO_SEP
344 Then use REP_NO_SEP in the SEP field. */
345
346 /* Used to generate one instance of VAL for each value of ELT (ELT is
347 not otherwise used). */
348 #define AA64_REPVAL(ELT, VAL) VAL
349
350 /* static_assert requires C11 (or C++11) or later. Support older
351 versions by disabling this check since compilers without this are
352 pretty uncommon these days. */
353 #if ((defined __STDC_VERSION__ && __STDC_VERSION__ >= 201112L) \
354 || (defined __cplusplus && __cplusplus >= 201103L))
355 static_assert ((AA64_REPLICATE (REP_PLUS, AA64_REPVAL,
356 AARCH64_BITS_PER_FEATURE_WORD))
357 >= AARCH64_NUM_FEATURES,
358 "Insufficient repetitions in AA64_REPLICATE()");
359 #endif
360
361 /* These macros take an initial argument X that gives the index into
362 an aarch64_feature_set. The macros then return the bitmask for
363 that array index. */
364
365 /* A mask in which feature bit BIT is set and all other bits are clear. */
366 #define AARCH64_UINT64_BIT(X, BIT) \
367 ((X) == (BIT) / AARCH64_BITS_PER_FEATURE_WORD \
368 ? 1ULL << (BIT) % AARCH64_BITS_PER_FEATURE_WORD \
369 : 0)
370
371 /* A mask that includes only AARCH64_FEATURE_<NAME>. */
372 #define AARCH64_FEATBIT(X, NAME) \
373 AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME)
374
375 /* A mask of the features that are enabled by each architecture version,
376 excluding those that are inherited from other architecture versions. */
377 #define AARCH64_ARCH_V8A_FEATURES(X) (AARCH64_FEATBIT (X, V8A) \
378 | AARCH64_FEATBIT (X, FP) \
379 | AARCH64_FEATBIT (X, RAS) \
380 | AARCH64_FEATBIT (X, SIMD) \
381 | AARCH64_FEATBIT (X, CHK))
382 #define AARCH64_ARCH_V8_1A_FEATURES(X) (AARCH64_FEATBIT (X, V8_1A) \
383 | AARCH64_FEATBIT (X, CRC) \
384 | AARCH64_FEATBIT (X, LSE) \
385 | AARCH64_FEATBIT (X, PAN) \
386 | AARCH64_FEATBIT (X, LOR) \
387 | AARCH64_FEATBIT (X, RDMA))
388 #define AARCH64_ARCH_V8_2A_FEATURES(X) (AARCH64_FEATBIT (X, V8_2A))
389 #define AARCH64_ARCH_V8_3A_FEATURES(X) (AARCH64_FEATBIT (X, V8_3A) \
390 | AARCH64_FEATBIT (X, PAUTH) \
391 | AARCH64_FEATBIT (X, RCPC) \
392 | AARCH64_FEATBIT (X, COMPNUM) \
393 | AARCH64_FEATBIT (X, JSCVT))
394 #define AARCH64_ARCH_V8_4A_FEATURES(X) (AARCH64_FEATBIT (X, V8_4A) \
395 | AARCH64_FEATBIT (X, RCPC2) \
396 | AARCH64_FEATBIT (X, DOTPROD) \
397 | AARCH64_FEATBIT (X, FLAGM) \
398 | AARCH64_FEATBIT (X, F16_FML))
399 #define AARCH64_ARCH_V8_5A_FEATURES(X) (AARCH64_FEATBIT (X, V8_5A) \
400 | AARCH64_FEATBIT (X, FLAGMANIP) \
401 | AARCH64_FEATBIT (X, FRINTTS) \
402 | AARCH64_FEATBIT (X, SB) \
403 | AARCH64_FEATBIT (X, PREDRES) \
404 | AARCH64_FEATBIT (X, CVADP) \
405 | AARCH64_FEATBIT (X, SSBS))
406 #define AARCH64_ARCH_V8_6A_FEATURES(X) (AARCH64_FEATBIT (X, V8_6A) \
407 | AARCH64_FEATBIT (X, BFLOAT16) \
408 | AARCH64_FEATBIT (X, I8MM))
409 #define AARCH64_ARCH_V8_7A_FEATURES(X) (AARCH64_FEATBIT (X, V8_7A) \
410 | AARCH64_FEATBIT (X, XS) \
411 | AARCH64_FEATBIT (X, WFXT) \
412 | AARCH64_FEATBIT (X, LS64))
413 #define AARCH64_ARCH_V8_8A_FEATURES(X) (AARCH64_FEATBIT (X, V8_8A) \
414 | AARCH64_FEATBIT (X, MOPS) \
415 | AARCH64_FEATBIT (X, HBC))
416 #define AARCH64_ARCH_V8_9A_FEATURES(X) (AARCH64_FEATBIT (X, V8_9A) \
417 | AARCH64_FEATBIT (X, CSSC) \
418 | AARCH64_FEATBIT (X, RASv2) \
419 | AARCH64_FEATBIT (X, ATS1A) \
420 | AARCH64_FEATBIT (X, PREDRES2) \
421 )
422
423 #define AARCH64_ARCH_V9A_FEATURES(X) (AARCH64_FEATBIT (X, V9A) \
424 | AARCH64_FEATBIT (X, F16) \
425 | AARCH64_FEATBIT (X, SVE) \
426 | AARCH64_FEATBIT (X, SVE2))
427 #define AARCH64_ARCH_V9_1A_FEATURES(X) (AARCH64_FEATBIT (X, V9_1A) \
428 | AARCH64_ARCH_V8_6A_FEATURES (X))
429 #define AARCH64_ARCH_V9_2A_FEATURES(X) (AARCH64_FEATBIT (X, V9_2A) \
430 | AARCH64_ARCH_V8_7A_FEATURES (X))
431 #define AARCH64_ARCH_V9_3A_FEATURES(X) (AARCH64_FEATBIT (X, V9_3A) \
432 | AARCH64_ARCH_V8_8A_FEATURES (X))
433 #define AARCH64_ARCH_V9_4A_FEATURES(X) (AARCH64_FEATBIT (X, V9_4A) \
434 | AARCH64_ARCH_V8_9A_FEATURES (X) \
435 | AARCH64_FEATBIT (X, SVE2p1))
436 #define AARCH64_ARCH_V9_5A_FEATURES(X) (AARCH64_FEATBIT (X, V9_5A) \
437 | AARCH64_FEATBIT (X, CPA) \
438 | AARCH64_FEATBIT (X, LUT) \
439 | AARCH64_FEATBIT (X, FAMINMAX)\
440 )
441 #define AARCH64_ARCH_V9_6A_FEATURES(X) (AARCH64_FEATBIT (X, V9_6A) \
442 | AARCH64_FEATBIT (X, CMPBR) \
443 | AARCH64_FEATBIT (X, LSUI) \
444 | AARCH64_FEATBIT (X, OCCMO))
445 #define AARCH64_ARCH_V9_7A_FEATURES(X) (AARCH64_FEATBIT (X, V9_7A) \
446 | AARCH64_FEATBIT (X, F16F32DOT) \
447 | AARCH64_FEATBIT (X, SVE2p2) \
448 | AARCH64_FEATBIT (X, SVE2p3))
449
450 /* Architectures are the sum of the base and extensions. */
451 #define AARCH64_ARCH_V8A(X) (AARCH64_FEATBIT (X, V8) \
452 | AARCH64_ARCH_V8A_FEATURES (X))
453 #define AARCH64_ARCH_V8_1A(X) (AARCH64_ARCH_V8A (X) \
454 | AARCH64_ARCH_V8_1A_FEATURES (X))
455 #define AARCH64_ARCH_V8_2A(X) (AARCH64_ARCH_V8_1A (X) \
456 | AARCH64_ARCH_V8_2A_FEATURES (X))
457 #define AARCH64_ARCH_V8_3A(X) (AARCH64_ARCH_V8_2A (X) \
458 | AARCH64_ARCH_V8_3A_FEATURES (X))
459 #define AARCH64_ARCH_V8_4A(X) (AARCH64_ARCH_V8_3A (X) \
460 | AARCH64_ARCH_V8_4A_FEATURES (X))
461 #define AARCH64_ARCH_V8_5A(X) (AARCH64_ARCH_V8_4A (X) \
462 | AARCH64_ARCH_V8_5A_FEATURES (X))
463 #define AARCH64_ARCH_V8_6A(X) (AARCH64_ARCH_V8_5A (X) \
464 | AARCH64_ARCH_V8_6A_FEATURES (X))
465 #define AARCH64_ARCH_V8_7A(X) (AARCH64_ARCH_V8_6A (X) \
466 | AARCH64_ARCH_V8_7A_FEATURES (X))
467 #define AARCH64_ARCH_V8_8A(X) (AARCH64_ARCH_V8_7A (X) \
468 | AARCH64_ARCH_V8_8A_FEATURES (X))
469 #define AARCH64_ARCH_V8_9A(X) (AARCH64_ARCH_V8_8A (X) \
470 | AARCH64_ARCH_V8_9A_FEATURES (X))
471 #define AARCH64_ARCH_V8R(X) ((AARCH64_ARCH_V8_4A (X) \
472 | AARCH64_FEATBIT (X, V8R)) \
473 & ~AARCH64_FEATBIT (X, V8A) \
474 & ~AARCH64_FEATBIT (X, LOR))
475
476 #define AARCH64_ARCH_V9A(X) (AARCH64_ARCH_V8_5A (X) \
477 | AARCH64_ARCH_V9A_FEATURES (X))
478 #define AARCH64_ARCH_V9_1A(X) (AARCH64_ARCH_V9A (X) \
479 | AARCH64_ARCH_V9_1A_FEATURES (X))
480 #define AARCH64_ARCH_V9_2A(X) (AARCH64_ARCH_V9_1A (X) \
481 | AARCH64_ARCH_V9_2A_FEATURES (X))
482 #define AARCH64_ARCH_V9_3A(X) (AARCH64_ARCH_V9_2A (X) \
483 | AARCH64_ARCH_V9_3A_FEATURES (X))
484 #define AARCH64_ARCH_V9_4A(X) (AARCH64_ARCH_V9_3A (X) \
485 | AARCH64_ARCH_V9_4A_FEATURES (X))
486 #define AARCH64_ARCH_V9_5A(X) (AARCH64_ARCH_V9_4A (X) \
487 | AARCH64_ARCH_V9_5A_FEATURES (X))
488 #define AARCH64_ARCH_V9_6A(X) (AARCH64_ARCH_V9_5A (X) \
489 | AARCH64_ARCH_V9_6A_FEATURES (X))
490 #define AARCH64_ARCH_V9_7A(X) (AARCH64_ARCH_V9_6A (X) \
491 | AARCH64_ARCH_V9_7A_FEATURES (X))
492
493 #define AARCH64_ARCH_NONE(X) 0
494
495 /* CPU-specific features. */
496 typedef struct {
497 aarch64_feature_word flags[AA64_REPLICATE (REP_PLUS, AA64_REPVAL, 1)];
498 } aarch64_feature_set;
499
500 #define AARCH64_CPU_HAS_FEATURE_BODY(ELT, CPU, FEAT) \
501 ((~(CPU).flags[ELT] & AARCH64_FEATBIT (ELT, FEAT)) == 0)
502 #define AARCH64_CPU_HAS_FEATURE(CPU, FEAT) \
503 (AA64_REPLICATE (REP_AND_AND, AARCH64_CPU_HAS_FEATURE_BODY, CPU, FEAT))
504
505 #define AARCH64_CPU_HAS_ALL_FEATURES_BODY(ELT, CPU, FEAT) \
506 ((~(CPU).flags[ELT] & (FEAT).flags[ELT]) == 0)
507 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU, FEAT) \
508 (AA64_REPLICATE (REP_AND_AND, AARCH64_CPU_HAS_ALL_FEATURES_BODY, CPU, FEAT))
509
510 #define AARCH64_CPU_HAS_ANY_FEATURES_BODY(ELT, CPU, FEAT) \
511 (((CPU).flags[ELT] & (FEAT).flags[ELT]) != 0)
512 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \
513 (AA64_REPLICATE (REP_OR_OR, AARCH64_CPU_HAS_ANY_FEATURES_BODY, CPU, FEAT))
514
515 #define AARCH64_SET_FEATURE_BODY(ELT, DEST, FEAT) \
516 (DEST).flags[ELT] = FEAT (ELT)
517 #define AARCH64_SET_FEATURE(DEST, FEAT) \
518 (AA64_REPLICATE (REP_COMMA, AARCH64_SET_FEATURE_BODY, DEST, FEAT))
519
520 #define AARCH64_CLEAR_FEATURE_BODY(ELT, DEST, SRC, FEAT) \
521 (DEST).flags[ELT] = ((SRC).flags[ELT] \
522 & ~AARCH64_FEATBIT (ELT, FEAT))
523 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT) \
524 (AA64_REPLICATE (REP_COMMA, AARCH64_CLEAR_FEATURE_BODY, DEST, SRC, FEAT))
525
526 #define AARCH64_MERGE_FEATURE_SETS_BODY(ELT, TARG, F1, F2) \
527 (TARG).flags[ELT] = (F1).flags[ELT] | (F2).flags[ELT];
528 #define AARCH64_MERGE_FEATURE_SETS(TARG, F1, F2) \
529 do \
530 { \
531 AA64_REPLICATE (REP_SEMICOLON, \
532 AARCH64_MERGE_FEATURE_SETS_BODY, TARG, F1, F2); \
533 } \
534 while (0)
535
536 #define AARCH64_CLEAR_FEATURES_BODY(ELT, TARG, F1, F2) \
537 (TARG).flags[ELT] = (F1).flags[ELT] &~ (F2).flags[ELT];
538 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2) \
539 do \
540 { \
541 AA64_REPLICATE (REP_SEMICOLON, \
542 AARCH64_CLEAR_FEATURES_BODY, TARG, F1, F2); \
543 } \
544 while (0)
545
546 /* aarch64_feature_set initializers for no features and all features,
547 respectively. */
548 #define AARCH64_NO_FEATURES { { AA64_REPLICATE (REP_COMMA, AA64_REPVAL, 0) } }
549 #define AARCH64_ALL_FEATURES { { AA64_REPLICATE (REP_COMMA, AA64_REPVAL, -1) } }
550
551 /* An aarch64_feature_set initializer for a single feature,
552 AARCH64_FEATURE_<FEAT>. */
553 #define AARCH64_FEATURE_BODY(ELT, FEAT) \
554 AARCH64_FEATBIT (ELT, FEAT)
555 #define AARCH64_FEATURE(FEAT) \
556 { { AA64_REPLICATE (REP_COMMA, AARCH64_FEATURE_BODY, FEAT) } }
557
558 /* An aarch64_feature_set initializer for a specific architecture version,
559 including all the features that are enabled by default for that architecture
560 version. */
561 #define AARCH64_ARCH_FEATURES_BODY(ELT, ARCH) \
562 AARCH64_ARCH_##ARCH (ELT)
563 #define AARCH64_ARCH_FEATURES(ARCH) \
564 { { AA64_REPLICATE (REP_COMMA, AARCH64_ARCH_FEATURES_BODY, ARCH) } }
565
566 /* Used by AARCH64_CPU_FEATURES. */
567 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \
568 (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X))
569 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \
570 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2))
571 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \
572 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__))
573 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \
574 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__))
575 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \
576 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__))
577 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \
578 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__))
579 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \
580 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__))
581 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \
582 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__))
583 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \
584 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__))
585
586 /* An aarch64_feature_set initializer for a CPU that implements architecture
587 version ARCH, and additionally provides the N features listed in "...". */
588 #define AARCH64_CPU_FEATURES_BODY(ELT, ARCH, N, ...) \
589 AARCH64_OR_FEATURES_##N (ELT, ARCH, __VA_ARGS__)
590 #define AARCH64_CPU_FEATURES(ARCH, N, ...) \
591 { { AA64_REPLICATE (REP_COMMA, AARCH64_CPU_FEATURES_BODY, \
592 ARCH, N, __VA_ARGS__) } }
593
594 /* An aarch64_feature_set initializer for the N features listed in "...". */
595 #define AARCH64_FEATURES(N, ...) \
596 AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__)
597
598 enum aarch64_operand_class
599 {
600 AARCH64_OPND_CLASS_NIL,
601 AARCH64_OPND_CLASS_INT_REG,
602 AARCH64_OPND_CLASS_MODIFIED_REG,
603 AARCH64_OPND_CLASS_FP_REG,
604 AARCH64_OPND_CLASS_SIMD_REG,
605 AARCH64_OPND_CLASS_SIMD_ELEMENT,
606 AARCH64_OPND_CLASS_SISD_REG,
607 AARCH64_OPND_CLASS_SIMD_REGLIST,
608 AARCH64_OPND_CLASS_SVE_REG,
609 AARCH64_OPND_CLASS_SVE_REGLIST,
610 AARCH64_OPND_CLASS_PRED_REG,
611 AARCH64_OPND_CLASS_ZA_ACCESS,
612 AARCH64_OPND_CLASS_ADDRESS,
613 AARCH64_OPND_CLASS_IMMEDIATE,
614 AARCH64_OPND_CLASS_SYSTEM,
615 AARCH64_OPND_CLASS_COND,
616 };
617
618 /* Operand code that helps both parsing and coding.
619 Keep AARCH64_OPERANDS synced. */
620
621 enum aarch64_opnd
622 {
623 AARCH64_OPND_NIL, /* no operand---MUST BE FIRST!*/
624
625 AARCH64_OPND_Rd, /* Integer register as destination. */
626 AARCH64_OPND_Rn, /* Integer register as source. */
627 AARCH64_OPND_Rm, /* Integer register as source. */
628 AARCH64_OPND_Rt, /* Integer register used in ld/st instructions. */
629 AARCH64_OPND_Rt2, /* Integer register used in ld/st pair instructions. */
630 AARCH64_OPND_X16, /* Integer register x16 in chkfeat instruction. */
631 AARCH64_OPND_Rt_LS64, /* Integer register used in LS64 instructions. */
632 AARCH64_OPND_Rt_SP, /* Integer Rt or SP used in STG instructions. */
633 AARCH64_OPND_Rs, /* Integer register used in ld/st exclusive. */
634 AARCH64_OPND_Ra, /* Integer register used in ddp_3src instructions. */
635 AARCH64_OPND_Rt_SYS, /* Integer register used in system instructions. */
636
637 AARCH64_OPND_Rd_SP, /* Integer Rd or SP. */
638 AARCH64_OPND_Rn_SP, /* Integer Rn or SP. */
639 AARCH64_OPND_Rm_SP, /* Integer Rm or SP. */
640 AARCH64_OPND_PAIRREG, /* Paired register operand. */
641 AARCH64_OPND_PAIRREG_OR_XZR, /* Paired register operand, optionally xzr. */
642 AARCH64_OPND_Rm_EXT, /* Integer Rm extended. */
643 AARCH64_OPND_Rm_SFT, /* Integer Rm shifted. */
644 AARCH64_OPND_Rm_LSL, /* Integer Rm shifted (LSL-only). */
645
646 AARCH64_OPND_Fd, /* Floating-point Fd. */
647 AARCH64_OPND_Fn, /* Floating-point Fn. */
648 AARCH64_OPND_Fm, /* Floating-point Fm. */
649 AARCH64_OPND_Fa, /* Floating-point Fa. */
650 AARCH64_OPND_Ft, /* Floating-point Ft. */
651 AARCH64_OPND_Ft2, /* Floating-point Ft2. */
652
653 AARCH64_OPND_Sd, /* AdvSIMD Scalar Sd. */
654 AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */
655 AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */
656
657 AARCH64_OPND_Va, /* AdvSIMD Vector Va. */
658 AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */
659 AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */
660 AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */
661 AARCH64_OPND_VdD1, /* AdvSIMD <Vd>.D[1]; for FMOV only. */
662 AARCH64_OPND_VnD1, /* AdvSIMD <Vn>.D[1]; for FMOV only. */
663 AARCH64_OPND_Ed, /* AdvSIMD Vector Element Vd. */
664 AARCH64_OPND_En, /* AdvSIMD Vector Element Vn. */
665 AARCH64_OPND_Em, /* AdvSIMD Vector Element Vm. */
666 AARCH64_OPND_Em16, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when
667 qualifier is S_H or S_2B. */
668 AARCH64_OPND_Em8, /* AdvSIMD Vector Element Vm restricted to V0 - V7,
669 used only with qualifier S_B. */
670 AARCH64_OPND_Em_INDEX1_14, /* AdvSIMD 1-bit encoded index in Vm at [14] */
671 AARCH64_OPND_Em_INDEX2_13, /* AdvSIMD 2-bit encoded index in Vm at [14:13] */
672 AARCH64_OPND_Em_INDEX3_12, /* AdvSIMD 3-bit encoded index in Vm at [14:12] */
673 AARCH64_OPND_LVn, /* AdvSIMD Vector register list used in e.g. TBL. */
674 AARCH64_OPND_LVt, /* AdvSIMD Vector register list used in ld/st. */
675 AARCH64_OPND_LVt_AL, /* AdvSIMD Vector register list for loading single
676 structure to all lanes. */
677 AARCH64_OPND_LVn_LUT, /* AdvSIMD Vector register list used in lut. */
678 AARCH64_OPND_LEt, /* AdvSIMD Vector Element list. */
679
680 AARCH64_OPND_CRn, /* Co-processor register in CRn field. */
681 AARCH64_OPND_CRm, /* Co-processor register in CRm field. */
682
683 AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */
684 AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */
685 AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */
686 AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */
687 AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */
688 AARCH64_OPND_SIMD_IMM_SFT, /* AdvSIMD modified immediate with shift. */
689 AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate. */
690 AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
691 (no encoding). */
692 AARCH64_OPND_IMM0, /* Immediate for #0. */
693 AARCH64_OPND_FPIMM0, /* Immediate for #0.0. */
694 AARCH64_OPND_FPIMM, /* Floating-point Immediate. */
695 AARCH64_OPND_IMMR, /* Immediate #<immr> in e.g. BFM. */
696 AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */
697 AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */
698 AARCH64_OPND_IMM, /* Immediate. */
699 AARCH64_OPND_IMM_2, /* Immediate. */
700 AARCH64_OPND_IMMP1_2, /* Immediate plus 1. */
701 AARCH64_OPND_IMMS1_2, /* Immediate minus 1. */
702 AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */
703 AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */
704 AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */
705 AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg. */
706 AARCH64_OPND_UIMM7, /* Unsigned 7-bit immediate in the CRm:op2 fields. */
707 AARCH64_OPND_UIMM10, /* Unsigned 10-bit immediate in addg/subg. */
708 AARCH64_OPND_BIT_NUM, /* Immediate. */
709 AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions. */
710 AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
711 AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions. */
712 AARCH64_OPND_SIMM5, /* 5-bit signed immediate in the imm5 field. */
713 AARCH64_OPND_NOT_BALANCED_10, /* an optional not balanced indicator (NB). */
714 AARCH64_OPND_NOT_BALANCED_17, /* an optional not balanced indicator (NB). */
715 AARCH64_OPND_NZCV, /* Flag bit specifier giving an alternative value for
716 each condition flag. */
717
718 AARCH64_OPND_LIMM, /* Logical Immediate. */
719 AARCH64_OPND_AIMM, /* Arithmetic immediate. */
720 AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */
721 AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */
722 AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */
723 AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */
724 AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */
725 AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */
726
727 AARCH64_OPND_COND, /* Standard condition as the last operand. */
728 AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */
729
730 AARCH64_OPND_ADDR_ADRP, /* Memory address for ADRP */
731 AARCH64_OPND_ADDR_PCREL9, /* 9-bit PC-relative address for e.g. CB<cc>. */
732 AARCH64_OPND_ADDR_PCREL14, /* 14-bit PC-relative address for e.g. TBZ. */
733 AARCH64_OPND_ADDR_PCREL19, /* 19-bit PC-relative address for e.g. LDR. */
734 AARCH64_OPND_ADDR_PCREL21, /* 21-bit PC-relative address for e.g. ADR. */
735 AARCH64_OPND_ADDR_PCREL26, /* 26-bit PC-relative address for e.g. BL. */
736
737 AARCH64_OPND_ADDR_SIMPLE, /* Address of ld/st exclusive. */
738 AARCH64_OPND_ADDR_REGOFF, /* Address of register offset. */
739 AARCH64_OPND_ADDR_SIMM7, /* Address of signed 7-bit immediate. */
740 AARCH64_OPND_ADDR_SIMM9, /* Address of signed 9-bit immediate. */
741 AARCH64_OPND_ADDR_SIMM9_2, /* Same as the above, but the immediate is
742 negative or unaligned and there is
743 no writeback allowed. This operand code
744 is only used to support the programmer-
745 friendly feature of using LDR/STR as the
746 the mnemonic name for LDUR/STUR instructions
747 wherever there is no ambiguity. */
748 AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */
749 AARCH64_OPND_ADDR_SIMM11, /* Address with a signed 11-bit (multiple of
750 16) immediate. */
751 AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */
752 AARCH64_OPND_ADDR_SIMM13, /* Address with a signed 13-bit (multiple of
753 16) immediate. */
754 AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */
755 AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */
756 AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */
757
758 AARCH64_OPND_SYSREG, /* System register operand. */
759 AARCH64_OPND_SYSREG128, /* 128-bit system register operand. */
760 AARCH64_OPND_PSTATEFIELD, /* PSTATE field name operand. */
761 AARCH64_OPND_SYSREG_AT, /* System register <at_op> operand. */
762 AARCH64_OPND_SYSREG_DC, /* System register <dc_op> operand. */
763 AARCH64_OPND_SYSREG_IC, /* System register <ic_op> operand. */
764 AARCH64_OPND_SYSREG_TLBI, /* System register <tlbi_op> operand. */
765 AARCH64_OPND_SYSREG_TLBIP, /* System register <tlbip_op> operand. */
766 AARCH64_OPND_SYSREG_PLBI, /* System register <plbi_op> operand. */
767 AARCH64_OPND_SYSREG_MLBI, /* System register <mlbi_op> operand. */
768 AARCH64_OPND_SYSREG_SR, /* System register RCTX operand. */
769 AARCH64_OPND_BARRIER, /* Barrier operand. */
770 AARCH64_OPND_BARRIER_DSB_NXS, /* Barrier operand for DSB nXS variant. */
771 AARCH64_OPND_BARRIER_ISB, /* Barrier operand for ISB. */
772 AARCH64_OPND_PRFOP, /* Prefetch operation. */
773 AARCH64_OPND_RPRFMOP, /* Range prefetch operation. */
774 AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */
775 AARCH64_OPND_BARRIER_GCSB, /* Barrier operand for GCSB. */
776 AARCH64_OPND_BTI_TARGET, /* BTI {<target>}. */
777 AARCH64_OPND_STSHH_POLICY, /* STSHH {<policy>}. */
778 AARCH64_OPND_SHUH_PHINT, /* SHUH Priority Hint. */
779 AARCH64_OPND_BRBOP, /* BRB operation IALL or INJ in bit 5. */
780 AARCH64_OPND_Rt_IN_SYS_ALIASES, /* Defaulted and omitted Rt used in SYS aliases such as brb. */
781 AARCH64_OPND_LSE128_Rt, /* LSE128 <Xt1>. */
782 AARCH64_OPND_LSE128_Rt2, /* LSE128 <Xt2>. */
783 AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */
784 AARCH64_OPND_SVE_ADDR_RI_S4x32, /* SVE [<Xn|SP>, #<simm4>*32]. */
785 AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
786 AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
787 AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
788 AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */
789 AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */
790 AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */
791 AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */
792 AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */
793 AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */
794 AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */
795 AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>{, <Xm|XZR>}]. */
796 AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #1}]. */
797 AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #2}]. */
798 AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #3}]. */
799 AARCH64_OPND_SVE_ADDR_RR_LSL4, /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #4}]. */
800 AARCH64_OPND_SVE_ADDR_RM, /* SVE [<Xn|SP>, <Xm|XZR>]. */
801 AARCH64_OPND_SVE_ADDR_RM_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */
802 AARCH64_OPND_SVE_ADDR_RM_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */
803 AARCH64_OPND_SVE_ADDR_RM_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */
804 AARCH64_OPND_SVE_ADDR_RM_LSL4, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4]. */
805 AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */
806 AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */
807 AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */
808 AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */
809 AARCH64_OPND_SVE_ADDR_RX_LSL4, /* SVE [<Xn|SP>, <Xm>, LSL #4]. */
810 AARCH64_OPND_SVE_ADDR_ZX, /* SVE [Zn.<T>{, <Xm>}]. */
811 AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */
812 AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */
813 AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */
814 AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */
815 AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
816 Bit 14 controls S/U choice. */
817 AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
818 Bit 22 controls S/U choice. */
819 AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
820 Bit 14 controls S/U choice. */
821 AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
822 Bit 22 controls S/U choice. */
823 AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
824 Bit 14 controls S/U choice. */
825 AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
826 Bit 22 controls S/U choice. */
827 AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
828 Bit 14 controls S/U choice. */
829 AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
830 Bit 22 controls S/U choice. */
831 AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */
832 AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */
833 AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */
834 AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */
835 AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */
836 AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */
837 AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */
838 AARCH64_OPND_SVE_AIMM, /* SVE unsigned arithmetic immediate. */
839 AARCH64_OPND_SVE_ASIMM, /* SVE signed arithmetic immediate. */
840 AARCH64_OPND_SVE_FPIMM8, /* SVE 8-bit floating-point immediate. */
841 AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */
842 AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */
843 AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */
844 AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */
845 AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */
846 AARCH64_OPND_SVE_IMM_ROT3, /* SVE cadd 1-bit rotate (90 or 270). */
847 AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */
848 AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */
849 AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */
850 AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */
851 AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */
852 AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */
853 AARCH64_OPND_SVE_Pd, /* SVE p0-p15 in Pd. */
854 AARCH64_OPND_SVE_PNd, /* SVE pn0-pn15 in Pd. */
855 AARCH64_OPND_SVE_Pg3, /* SVE p0-p7 in Pg. */
856 AARCH64_OPND_SVE_Pg4_5, /* SVE p0-p15 in Pg, bits [8,5]. */
857 AARCH64_OPND_SVE_Pg4_10, /* SVE p0-p15 in Pg, bits [13,10]. */
858 AARCH64_OPND_SVE_PNg4_10, /* SVE pn0-pn15 in Pg, bits [13,10]. */
859 AARCH64_OPND_SVE_Pg4_16, /* SVE p0-p15 in Pg, bits [19,16]. */
860 AARCH64_OPND_SVE_Pm, /* SVE p0-p15 in Pm. */
861 AARCH64_OPND_SVE_Pn, /* SVE p0-p15 in Pn. */
862 AARCH64_OPND_SVE_PNn, /* SVE pn0-pn15 in Pn. */
863 AARCH64_OPND_SVE_Pt, /* SVE p0-p15 in Pt. */
864 AARCH64_OPND_SVE_PNt, /* SVE pn0-pn15 in Pt. */
865 AARCH64_OPND_SVE_Rm, /* Integer Rm or ZR, alt. SVE position. */
866 AARCH64_OPND_SVE_Rn_SP, /* Integer Rn or SP, alt. SVE position. */
867 AARCH64_OPND_SVE_SHLIMM_PRED, /* SVE shift left amount (predicated). */
868 AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated). */
869 AARCH64_OPND_SVE_SHLIMM_UNPRED_22, /* SVE 3 bit shift left unpred. */
870 AARCH64_OPND_SVE_SHRIMM_PRED, /* SVE shift right amount (predicated). */
871 AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated). */
872 AARCH64_OPND_SVE_SHRIMM_UNPRED_22, /* SVE 3 bit shift right unpred. */
873 AARCH64_OPND_SVE_SIMM5, /* SVE signed 5-bit immediate. */
874 AARCH64_OPND_SVE_SIMM5B, /* SVE secondary signed 5-bit immediate. */
875 AARCH64_OPND_SVE_SIMM6, /* SVE signed 6-bit immediate. */
876 AARCH64_OPND_SVE_SIMM8, /* SVE signed 8-bit immediate. */
877 AARCH64_OPND_SVE_UIMM3, /* SVE unsigned 3-bit immediate. */
878 AARCH64_OPND_SVE_UIMM7, /* SVE unsigned 7-bit immediate. */
879 AARCH64_OPND_SVE_UIMM8, /* SVE unsigned 8-bit immediate. */
880 AARCH64_OPND_SVE_UIMM8_53, /* SVE split unsigned 8-bit immediate. */
881 AARCH64_OPND_SVE_UIMM4, /* SVE unsigned 4-bit immediate. */
882 AARCH64_OPND_SVE_VZn, /* Scalar SIMD&FP register in Zn field. */
883 AARCH64_OPND_SVE_Vd, /* Scalar SIMD&FP register in Vd. */
884 AARCH64_OPND_SVE_Vm, /* Scalar SIMD&FP register in Vm. */
885 AARCH64_OPND_SVE_Vn, /* Scalar SIMD&FP register in Vn. */
886 AARCH64_OPND_SME_ZA_array_vrsb_1, /* Tile to vector, two registers (B). */
887 AARCH64_OPND_SME_ZA_array_vrsh_1, /* Tile to vector, two registers (H). */
888 AARCH64_OPND_SME_ZA_array_vrss_1, /* Tile to vector, two registers (S). */
889 AARCH64_OPND_SME_ZA_array_vrsd_1, /* Tile to vector, two registers (D). */
890 AARCH64_OPND_SME_ZA_array_vrsb_2, /* Tile to vector, four registers (B). */
891 AARCH64_OPND_SME_ZA_array_vrsh_2, /* Tile to vector, four registers (H). */
892 AARCH64_OPND_SME_ZA_array_vrss_2, /* Tile to vector, four registers (S). */
893 AARCH64_OPND_SME_ZA_array_vrsd_2, /* Tile to vector, four registers (D). */
894 AARCH64_OPND_SME_ZA_ARRAY4, /* Tile to vector, single (BHSDQ). */
895 AARCH64_OPND_SVE_Za_5, /* SVE vector register in Za, bits [9,5]. */
896 AARCH64_OPND_SVE_Za_16, /* SVE vector register in Za, bits [20,16]. */
897 AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */
898 AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */
899 AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */
900 AARCH64_OPND_SVE_Zm1_23_INDEX, /* SVE bit index in Zm, bit 23. */
901 AARCH64_OPND_SVE_Zm2_22_INDEX, /* SVE bit index in Zm, bits [23,22]. */
902 AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */
903 AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11. */
904 AARCH64_OPND_SVE_Zm3_12_INDEX, /* SVE bit index in Zm, bits 12 plus bit [23,22]. */
905 AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19. */
906 AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */
907 AARCH64_OPND_SVE_Zm3_10_INDEX, /* z0-z7[0-15] in Zm3_INDEX plus bit 11:10. */
908 AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11. */
909 AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */
910 AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */
911 AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */
912 AARCH64_OPND_SVE_Zn_5_INDEX, /* Indexed SVE vector register, for DUPQ. */
913 AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */
914 AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */
915 AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */
916 AARCH64_OPND_SME_Zdnx2, /* SVE vector register list from [4:1]*2. */
917 AARCH64_OPND_SME_Zdnx4, /* SVE vector register list from [4:2]*4. */
918 AARCH64_OPND_SME_Zm, /* SVE vector register in 4-bit Zm. */
919 AARCH64_OPND_SME_Zm_17, /* SVE vector register in [20:17]. */
920 AARCH64_OPND_SME_Zn_6_3, /* SVE vector register in [8:6]*2. */
921 AARCH64_OPND_SME_Zm_17_3, /* SVE vector register in [19:17]*2+16. */
922 AARCH64_OPND_SME_Znx2_6_3, /* SVE vector register list from [8:6]*2. */
923 AARCH64_OPND_SME_Zmx2_17_3, /* SVE vector register list from [19:17]*2+16. */
924 AARCH64_OPND_SME_Zmx2_INDEX_22, /* SVE vector register list in [20:16].with index in 22 */
925 AARCH64_OPND_SME_Zmx2, /* SVE vector register list from [20:17]*2. */
926 AARCH64_OPND_SME_Zmx4, /* SVE vector register list from [20:18]*4. */
927 AARCH64_OPND_SME_Znx2, /* SVE vector register list from [9:6]*2. */
928 AARCH64_OPND_SME_Znx2_BIT_INDEX, /* SVE vector register list encoding a bit index from [9:6]*2. */
929 AARCH64_OPND_SME_Znx4, /* SVE vector register list from [9:7]*4. */
930 AARCH64_OPND_SME_Zn7xN_UNTYPED, /* SVE vector register list from [9:7]. */
931 AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23. */
932 AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19. */
933 AARCH64_OPND_SME_ZAda_1b, /* SME <ZAda>.H, 1-bits. */
934 AARCH64_OPND_SME_ZAda_2b, /* SME <ZAda>.S, 2-bits. */
935 AARCH64_OPND_SME_ZAda_3b, /* SME <ZAda>.D, 3-bits. */
936 AARCH64_OPND_SME_ZA_HV_idx_src, /* SME source ZA tile vector. */
937 AARCH64_OPND_SME_ZA_HV_idx_srcxN, /* SME N source ZA tile vectors. */
938 AARCH64_OPND_SME_ZA_HV_idx_dest, /* SME destination ZA tile vector. */
939 AARCH64_OPND_SME_ZA_HV_idx_destxN, /* SME N dest ZA tile vectors. */
940 AARCH64_OPND_SME_Pdx2, /* Predicate register list in [3:1]. */
941 AARCH64_OPND_SME_PdxN, /* Predicate register list in [3:0]. */
942 AARCH64_OPND_SME_Pm, /* SME scalable predicate register, bits [15:13]. */
943 AARCH64_OPND_SME_PNd3, /* Predicate-as-counter register, bits [3:0]. */
944 AARCH64_OPND_SME_PNg3, /* Predicate-as-counter register, bits [12:10]. */
945 AARCH64_OPND_SME_PNn, /* Predicate-as-counter register, bits [8:5]. */
946 AARCH64_OPND_SME_PNn3_INDEX1, /* Indexed pred-as-counter reg, bits [8:5]. */
947 AARCH64_OPND_SME_PNn3_INDEX2, /* Indexed pred-as-counter reg, bits [9:5]. */
948 AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles. */
949 AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector. */
950 AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3]. */
951 AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1]. */
952 AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3]. */
953 AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}]. */
954 AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}]. */
955 AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1]. */
956 AARCH64_OPND_SME_ZA_array_off4, /* SME ZA[<Wv>{, #<imm>}]. */
957 AARCH64_OPND_SME_ADDR_RI_U4xVL, /* SME [<Xn|SP>{, #<imm>, MUL VL}]. */
958 AARCH64_OPND_SME_SM_ZA, /* SME {SM | ZA}. */
959 AARCH64_OPND_SME_PnT_Wm_imm, /* SME <Pn>.<T>[<Wm>, #<imm>]. */
960 AARCH64_OPND_SME_SHRIMM3, /* 3-bit right shift, bits [18:16]. */
961 AARCH64_OPND_SME_SHRIMM4, /* 4-bit right shift, bits [19:16]. */
962 AARCH64_OPND_SME_SHRIMM5, /* size + 5-bit right shift, bits [23:22,20:16]. */
963 AARCH64_OPND_SME_Zk_INDEX, /* Zk[index], bits [12:10,5:4]. */
964 AARCH64_OPND_SME_Zm_INDEX1, /* Zn.T[index], bits [19:16,10]. */
965 AARCH64_OPND_SME_Zm_INDEX2, /* Zn.T[index], bits [19:16,11:10]. */
966 AARCH64_OPND_SME_Zm_INDEX2_3, /* Zn.T[index], bits [19:16,10,3]. */
967 AARCH64_OPND_SME_Zm_INDEX3_1, /* Zn.T[index], bits [19:16,10,2:1]. */
968 AARCH64_OPND_SME_Zm_INDEX3_2, /* Zn.T[index], bits [19:16,11:10,2]. */
969 AARCH64_OPND_SME_Zm_INDEX3_3, /* Zn.T[index], bits [19:16,11:10,3]. */
970 AARCH64_OPND_SME_Zm_INDEX3_10, /* Zn.T[index], bits [19:16,15,11:10]. */
971 AARCH64_OPND_SME_Zm_INDEX4_1, /* Zn.T[index], bits [19:16,11:10,2:1]. */
972 AARCH64_OPND_SME_Zm_INDEX4_2, /* Zn.T[index], bits [19:16,11:10,3:2]. */
973 AARCH64_OPND_SME_Zm_INDEX4_3, /* Zn.T[index], bits [19:16,15,11,10,3]. */
974 AARCH64_OPND_SME_Zm_INDEX4_10, /* Zn.T[index], bits [19:16,15,12:10]. */
975 AARCH64_OPND_SME_Zn_INDEX1_16, /* Zn[index], bits [9:5] and [16:16]. */
976 AARCH64_OPND_SME_Zn_INDEX2_15, /* Zn[index], bits [9:5] and [16:15]. */
977 AARCH64_OPND_SME_Zn_INDEX2_16, /* Zn[index], bits [9:5] and [17:16]. */
978 AARCH64_OPND_SME_Zn_INDEX2_19, /* Zn[index], bits [9:5] and [20:19]. */
979 AARCH64_OPND_SME_Zn_INDEX3_14, /* Zn[index], bits [9:5] and [16:14]. */
980 AARCH64_OPND_SME_Zn_INDEX3_15, /* Zn[index], bits [9:5] and [17:15]. */
981 AARCH64_OPND_SME_Zn_INDEX4_14, /* Zn[index], bits [9:5] and [17:14]. */
982 AARCH64_OPND_SVE_Zn0_INDEX, /* Zn[index], bits [9:5]. */
983 AARCH64_OPND_SVE_Zn1_17_INDEX, /* Zn[index], bits [9:5,17]. */
984 AARCH64_OPND_SVE_Zn2_18_INDEX, /* Zn[index], bits [9:5,18:17]. */
985 AARCH64_OPND_SVE_Zn3_22_INDEX, /* Zn[index], bits [9:5,18:17,22]. */
986 AARCH64_OPND_SVE_Zd0_INDEX, /* Zn[index], bits [4:0]. */
987 AARCH64_OPND_SVE_Zd1_17_INDEX, /* Zn[index], bits [4:0,17]. */
988 AARCH64_OPND_SVE_Zd2_18_INDEX, /* Zn[index], bits [4:0,18:17]. */
989 AARCH64_OPND_SVE_Zd3_22_INDEX, /* Zn[index], bits [4:0,18:17,22]. */
990 AARCH64_OPND_SME_VLxN_10, /* VLx2 or VLx4, in bit 10. */
991 AARCH64_OPND_SME_VLxN_13, /* VLx2 or VLx4, in bit 13. */
992 AARCH64_OPND_SME_ZT0, /* The fixed token zt0/ZT0 (not encoded). */
993 AARCH64_OPND_SME_ZT0_INDEX, /* ZT0[<imm>], bits [14:12]. */
994 AARCH64_OPND_SME_ZT0_INDEX_MUL_VL,/* ZT0[<imm>], bits [13:12]. */
995 AARCH64_OPND_SME_ZT0_LIST, /* { zt0/ZT0 } (not encoded). */
996 AARCH64_OPND_TME_UIMM16, /* TME unsigned 16-bit immediate. */
997 AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */
998 AARCH64_OPND_MOPS_ADDR_Rd, /* [Rd]!, in bits [0, 4]. */
999 AARCH64_OPND_MOPS_ADDR_Rs, /* [Rs]!, in bits [16, 20]. */
1000 AARCH64_OPND_MOPS_WB_Rn, /* Rn!, in bits [5, 9]. */
1001 AARCH64_OPND_CSSC_SIMM8, /* CSSC signed 8-bit immediate. */
1002 AARCH64_OPND_CSSC_UIMM8, /* CSSC unsigned 8-bit immediate. */
1003 AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND, /* [<Xn|SP>]{, #<imm>}. */
1004 AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB, /* [<Xn|SP>] or [<Xn|SP>, #<imm>]!. */
1005 AARCH64_OPND_RCPC3_ADDR_POSTIND, /* [<Xn|SP>], #<imm>. */
1006 AARCH64_OPND_RCPC3_ADDR_PREIND_WB, /* [<Xn|SP>, #<imm>]!. */
1007 AARCH64_OPND_RCPC3_ADDR_OFFSET,
1008 AARCH64_OPND_GIC,
1009 AARCH64_OPND_GICR,
1010 AARCH64_OPND_GSB,
1011 };
1012
1013 /* Qualifier constrains an operand. It either specifies a variant of an
1014 operand type or limits values available to an operand type.
1015
1016 N.B. Order is important.
1017 Keep aarch64_opnd_qualifiers (opcodes/aarch64-opc.c) synced. */
1018
1019 enum aarch64_opnd_qualifier
1020 {
1021 /* Indicating no further qualification on an operand. */
1022 AARCH64_OPND_QLF_NIL,
1023
1024 /* Qualifying an operand which is a general purpose (integer) register;
1025 indicating the operand data size or a specific register. */
1026 AARCH64_OPND_QLF_W, /* Wn, WZR or WSP. */
1027 AARCH64_OPND_QLF_X, /* Xn, XZR or XSP. */
1028 AARCH64_OPND_QLF_WSP, /* WSP. */
1029 AARCH64_OPND_QLF_SP, /* SP. */
1030
1031 /* Qualifying an operand which is a floating-point register, a SIMD
1032 vector element or a SIMD vector element list; indicating operand data
1033 size or the size of each SIMD vector element in the case of a SIMD
1034 vector element list.
1035 These qualifiers are also used to qualify an address operand to
1036 indicate the size of data element a load/store instruction is
1037 accessing.
1038 They are also used for the immediate shift operand in e.g. SSHR. Such
1039 a use is only for the ease of operand encoding/decoding and qualifier
1040 sequence matching; such a use should not be applied widely; use the value
1041 constraint qualifiers for immediate operands wherever possible. */
1042 AARCH64_OPND_QLF_S_B,
1043 AARCH64_OPND_QLF_S_H,
1044 AARCH64_OPND_QLF_S_S,
1045 AARCH64_OPND_QLF_S_D,
1046 AARCH64_OPND_QLF_S_Q,
1047 /* These type qualifiers have a special meaning in that they mean 2 x 1 byte,
1048 4 x 1 byte or 2 x 2 byte are selected by the instruction. Other than that
1049 they have no difference with AARCH64_OPND_QLF_S_B in encoding. They are
1050 here purely for syntactical reasons and is an exception from normal
1051 AArch64 disassembly scheme. */
1052 AARCH64_OPND_QLF_S_2B,
1053 AARCH64_OPND_QLF_S_4B,
1054 AARCH64_OPND_QLF_S_2H,
1055
1056 /* Qualifying an operand which is a SIMD vector register or a SIMD vector
1057 register list; indicating register shape.
1058 They are also used for the immediate shift operand in e.g. SSHR. Such
1059 a use is only for the ease of operand encoding/decoding and qualifier
1060 sequence matching; such a use should not be applied widely; use the value
1061 constraint qualifiers for immediate operands wherever possible. */
1062 AARCH64_OPND_QLF_V_4B,
1063 AARCH64_OPND_QLF_V_8B,
1064 AARCH64_OPND_QLF_V_16B,
1065 AARCH64_OPND_QLF_V_2H,
1066 AARCH64_OPND_QLF_V_4H,
1067 AARCH64_OPND_QLF_V_8H,
1068 AARCH64_OPND_QLF_V_2S,
1069 AARCH64_OPND_QLF_V_4S,
1070 AARCH64_OPND_QLF_V_1D,
1071 AARCH64_OPND_QLF_V_2D,
1072 AARCH64_OPND_QLF_V_1Q,
1073
1074 AARCH64_OPND_QLF_P_Z,
1075 AARCH64_OPND_QLF_P_M,
1076
1077 /* Used in scaled signed immediate that are scaled by a Tag granule
1078 like in stg, st2g, etc. */
1079 AARCH64_OPND_QLF_imm_tag,
1080
1081 /* Constraint on value. */
1082 AARCH64_OPND_QLF_CR, /* CRn, CRm. */
1083 AARCH64_OPND_QLF_imm_0_7,
1084 AARCH64_OPND_QLF_imm_0_15,
1085 AARCH64_OPND_QLF_imm_0_31,
1086 AARCH64_OPND_QLF_imm_0_63,
1087 AARCH64_OPND_QLF_imm_1_32,
1088 AARCH64_OPND_QLF_imm_1_64,
1089
1090 /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
1091 or shift-ones. */
1092 AARCH64_OPND_QLF_LSL,
1093 AARCH64_OPND_QLF_MSL,
1094
1095 /* Special qualifier helping retrieve qualifier information during the
1096 decoding time (currently not in use). */
1097 AARCH64_OPND_QLF_RETRIEVE,
1098
1099 /* Special qualifier used for indicating error in qualifier retrieval. */
1100 AARCH64_OPND_QLF_ERR,
1101 } ATTRIBUTE_PACKED;
1102
1103 /* Instruction class. */
1105
1106 enum aarch64_insn_class
1107 {
1108 aarch64_misc,
1109 addsub_carry,
1110 addsub_ext,
1111 addsub_imm,
1112 addsub_shift,
1113 asimdall,
1114 asimddiff,
1115 asimdelem,
1116 asimdext,
1117 asimdimm,
1118 asimdins,
1119 asimdmisc,
1120 asimdperm,
1121 asimdsame,
1122 asimdshf,
1123 asimdtbl,
1124 asisddiff,
1125 asisdelem,
1126 asisdlse,
1127 asisdlsep,
1128 asisdlso,
1129 asisdlsop,
1130 asisdmisc,
1131 asisdone,
1132 asisdpair,
1133 asisdsame,
1134 asisdshf,
1135 bitfield,
1136 branch_imm,
1137 branch_reg,
1138 compbranch,
1139 condbranch,
1140 condcmp_imm,
1141 condcmp_reg,
1142 condsel,
1143 cryptoaes,
1144 cryptosha2,
1145 cryptosha3,
1146 dp_1src,
1147 dp_2src,
1148 dp_3src,
1149 exception,
1150 extract,
1151 float2fix,
1152 float2int,
1153 floatccmp,
1154 floatcmp,
1155 floatdp1,
1156 floatdp2,
1157 floatdp3,
1158 floatimm,
1159 floatsel,
1160 fprcvtfloat2int,
1161 fprcvtint2float,
1162 ldst_immpost,
1163 ldst_immpre,
1164 ldst_imm9, /* immpost or immpre */
1165 ldst_imm10, /* LDRAA/LDRAB */
1166 ldst_pos,
1167 ldst_regoff,
1168 ldst_unpriv,
1169 ldst_unscaled,
1170 ldstexcl,
1171 ldstnapair_offs,
1172 ldstpair_off,
1173 ldstpair_indexed,
1174 loadlit,
1175 log_imm,
1176 log_shift,
1177 lse_atomic,
1178 lse128_atomic,
1179 movewide,
1180 pcreladdr,
1181 ic_system,
1182 sme_fp_sd,
1183 sme_int_sd,
1184 sme_misc,
1185 sme_mov,
1186 sme_ldr,
1187 sme_psel,
1188 sme_shift,
1189 sme_size_12_bh,
1190 sme_size_12_bhs,
1191 sme_size_12_hs,
1192 sme_size_12_b,
1193 sme_size_22,
1194 sme_size_22_hsd,
1195 sme_sz_23,
1196 sme_str,
1197 sme_start,
1198 sme_stop,
1199 sme2_mov,
1200 sve_cpy,
1201 sve_index,
1202 sve_limm,
1203 sve_misc,
1204 sve_movprfx,
1205 sve_pred_zm,
1206 sve_shift_pred,
1207 sve_shift_unpred,
1208 sve_size_bh,
1209 sve_size_bhs,
1210 sve_size_bhsd,
1211 sve_size_hsd,
1212 sve_size_hsd2,
1213 sve_size_hsd3,
1214 sve_size_sd,
1215 sve_size_sd2,
1216 sve_size_sd3,
1217 sve_size_sd4,
1218 sve_size_13,
1219 sve_shift_tsz_hsd,
1220 sve_shift_tsz_bhsd,
1221 sve_size_tsz_bhs,
1222 testbranch,
1223 cryptosm3,
1224 cryptosm4,
1225 dotproduct,
1226 bfloat16,
1227 cssc,
1228 gcs,
1229 the,
1230 sve2_urqvs,
1231 sve_index1,
1232 rcpc3,
1233 lut,
1234 last_iclass = lut
1235 };
1236
1237 /* Opcode enumerators. */
1238
1239 enum aarch64_op
1240 {
1241 OP_NIL,
1242 OP_STRB_POS,
1243 OP_LDRB_POS,
1244 OP_LDRSB_POS,
1245 OP_STRH_POS,
1246 OP_LDRH_POS,
1247 OP_LDRSH_POS,
1248 OP_STR_POS,
1249 OP_LDR_POS,
1250 OP_STRF_POS,
1251 OP_LDRF_POS,
1252 OP_LDRSW_POS,
1253 OP_PRFM_POS,
1254
1255 OP_STURB,
1256 OP_LDURB,
1257 OP_LDURSB,
1258 OP_STURH,
1259 OP_LDURH,
1260 OP_LDURSH,
1261 OP_STUR,
1262 OP_LDUR,
1263 OP_STURV,
1264 OP_LDURV,
1265 OP_LDURSW,
1266 OP_PRFUM,
1267
1268 OP_LDR_LIT,
1269 OP_LDRV_LIT,
1270 OP_LDRSW_LIT,
1271 OP_PRFM_LIT,
1272
1273 OP_ADD,
1274 OP_B,
1275 OP_BL,
1276
1277 OP_MOVN,
1278 OP_MOVZ,
1279 OP_MOVK,
1280
1281 OP_MOV_IMM_LOG, /* MOV alias for moving bitmask immediate. */
1282 OP_MOV_IMM_WIDE, /* MOV alias for moving wide immediate. */
1283 OP_MOV_IMM_WIDEN, /* MOV alias for moving wide immediate (negated). */
1284
1285 OP_MOV_V, /* MOV alias for moving vector register. */
1286
1287 OP_ASR_IMM,
1288 OP_LSR_IMM,
1289 OP_LSL_IMM,
1290
1291 OP_BIC,
1292
1293 OP_UBFX,
1294 OP_BFXIL,
1295 OP_SBFX,
1296 OP_SBFIZ,
1297 OP_BFI,
1298 OP_BFC, /* ARMv8.2. */
1299 OP_UBFIZ,
1300 OP_UXTB,
1301 OP_UXTH,
1302 OP_UXTW,
1303
1304 OP_CINC,
1305 OP_CINV,
1306 OP_CNEG,
1307 OP_CSET,
1308 OP_CSETM,
1309
1310 OP_FCVT,
1311 OP_FCVTN,
1312 OP_FCVTN2,
1313 OP_FCVTL,
1314 OP_FCVTL2,
1315 OP_FCVTXN_S, /* Scalar version. */
1316
1317 OP_ROR_IMM,
1318
1319 OP_SXTL,
1320 OP_SXTL2,
1321 OP_UXTL,
1322 OP_UXTL2,
1323
1324 OP_MOV_P_P,
1325 OP_MOV_PN_PN,
1326 OP_MOV_Z_P_Z,
1327 OP_MOV_Z_V,
1328 OP_MOV_Z_Z,
1329 OP_MOV_Z_Zi,
1330 OP_MOVM_P_P_P,
1331 OP_MOVS_P_P,
1332 OP_MOVZS_P_P_P,
1333 OP_MOVZ_P_P_P,
1334 OP_NOTS_P_P_P_Z,
1335 OP_NOT_P_P_P_Z,
1336
1337 OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */
1338
1339 OP_TOTAL_NUM, /* Pseudo. */
1340 };
1341
1342 /* Error types. */
1343 enum err_type
1344 {
1345 ERR_OK,
1346 ERR_UND,
1347 ERR_UNP,
1348 ERR_VFI,
1349 ERR_NR_ENTRIES
1350 };
1351
1352 /* Maximum number of operands an instruction can have. */
1353 #define AARCH64_MAX_OPND_NUM 7
1354 /* Maximum number of qualifier sequences an instruction can have. */
1355 #define AARCH64_MAX_QLF_SEQ_NUM 10
1356 /* Operand qualifier typedef */
1357 typedef enum aarch64_opnd_qualifier aarch64_opnd_qualifier_t;
1358 /* Operand qualifier sequence typedef. */
1359 typedef aarch64_opnd_qualifier_t \
1360 aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
1361
1362 /* FIXME: improve the efficiency. */
1363 static inline bool
1364 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
1365 {
1366 int i;
1367 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1368 if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
1369 return false;
1370 return true;
1371 }
1372
1373 /* Forward declare error reporting type. */
1374 typedef struct aarch64_operand_error aarch64_operand_error;
1375 /* Forward declare instruction sequence type. */
1376 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
1377 /* Forward declare instruction definition. */
1378 typedef struct aarch64_inst aarch64_inst;
1379
1380 /* This structure holds information for a particular opcode. */
1381
1382 struct aarch64_opcode
1383 {
1384 /* The name of the mnemonic. */
1385 const char *name;
1386
1387 /* The opcode itself. Those bits which will be filled in with
1388 operands are zeroes. */
1389 aarch64_insn opcode;
1390
1391 /* The opcode mask. This is used by the disassembler. This is a
1392 mask containing ones indicating those bits which must match the
1393 opcode field, and zeroes indicating those bits which need not
1394 match (and are presumably filled in by operands). */
1395 aarch64_insn mask;
1396
1397 /* Instruction class. */
1398 enum aarch64_insn_class iclass;
1399
1400 /* Enumerator identifier. */
1401 enum aarch64_op op;
1402
1403 /* Which architecture variant provides this instruction. */
1404 const aarch64_feature_set *avariant;
1405
1406 /* An array of operand codes. Each code is an index into the
1407 operand table. They appear in the order which the operands must
1408 appear in assembly code, and are terminated by a zero. */
1409 enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
1410
1411 /* A list of operand qualifier code sequence. Each operand qualifier
1412 code qualifies the corresponding operand code. Each operand
1413 qualifier sequence specifies a valid opcode variant and related
1414 constraint on operands. */
1415 aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
1416
1417 /* Flags providing information about this instruction */
1418 uint64_t flags;
1419
1420 /* Extra constraints on the instruction that the verifier checks. */
1421 uint32_t constraints;
1422
1423 /* If nonzero, this operand and operand 0 are both registers and
1424 are required to have the same register number. */
1425 unsigned char tied_operand;
1426
1427 /* If non-NULL, a function to verify that a given instruction is valid. */
1428 enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
1429 bfd_vma, bool, aarch64_operand_error *,
1430 struct aarch64_instr_sequence *);
1431 };
1432
1433 typedef struct aarch64_opcode aarch64_opcode;
1434
1435 /* Table describing all the AArch64 opcodes. */
1436 extern const aarch64_opcode aarch64_opcode_table[];
1437
1438 /* Opcode flags. */
1439 #define F_ALIAS (1 << 0)
1440 #define F_HAS_ALIAS (1 << 1)
1441 /* Disassembly preference priority 1-3 (the larger the higher). If nothing
1442 is specified, it is the priority 0 by default, i.e. the lowest priority. */
1443 #define F_P1 (1 << 2)
1444 #define F_P2 (2 << 2)
1445 #define F_P3 (3 << 2)
1446 /* Flag an instruction that is truly conditional executed, e.g. b.cond. */
1447 #define F_COND (1 << 4)
1448 /* Instruction has the field of 'sf'. */
1449 #define F_SF (1 << 5)
1450 /* Instruction has the field of 'size:Q'. */
1451 #define F_SIZEQ (1 << 6)
1452 /* Floating-point instruction has the field of 'type'. */
1453 #define F_FPTYPE (1 << 7)
1454 /* AdvSIMD scalar instruction has the field of 'size'. */
1455 #define F_SSIZE (1 << 8)
1456 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q". */
1457 #define F_T (1 << 9)
1458 /* Size of GPR operand in AdvSIMD instructions encoded in Q. */
1459 #define F_GPRSIZE_IN_Q (1 << 10)
1460 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22. */
1461 #define F_LDS_SIZE (1 << 11)
1462 /* Optional operand; assume maximum of 1 operand can be optional. */
1463 #define F_OPD0_OPT (1 << 12)
1464 #define F_OPD1_OPT (2 << 12)
1465 #define F_OPD2_OPT (3 << 12)
1466 #define F_OPD3_OPT (4 << 12)
1467 #define F_OPD4_OPT (5 << 12)
1468 /* Default value for the optional operand when omitted from the assembly. */
1469 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
1470 /* Instruction that is an alias of another instruction needs to be
1471 encoded/decoded by converting it to/from the real form, followed by
1472 the encoding/decoding according to the rules of the real opcode.
1473 This compares to the direct coding using the alias's information.
1474 N.B. this flag requires F_ALIAS to be used together. */
1475 #define F_CONV (1 << 20)
1476 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
1477 friendly pseudo instruction available only in the assembly code (thus will
1478 not show up in the disassembly). */
1479 #define F_PSEUDO (1 << 21)
1480 /* Instruction has miscellaneous encoding/decoding rules. */
1481 #define F_MISC (1 << 22)
1482 /* Instruction has the field of 'N'; used in conjunction with F_SF. */
1483 #define F_N (1 << 23)
1484 /* Opcode dependent field. */
1485 #define F_OD(X) (((X) & 0x7) << 24)
1486 /* Instruction has the field of 'sz'. */
1487 #define F_LSE_SZ (1 << 27)
1488 /* Require an exact qualifier match, even for NIL qualifiers. */
1489 #define F_STRICT (1ULL << 28)
1490 /* This system instruction is used to read system registers. */
1491 #define F_SYS_READ (1ULL << 29)
1492 /* This system instruction is used to write system registers. */
1493 #define F_SYS_WRITE (1ULL << 30)
1494 /* This instruction has an extra constraint on it that imposes a requirement on
1495 subsequent instructions. */
1496 #define F_SCAN (1ULL << 31)
1497 /* Instruction takes a pair of optional operands. If we specify the Nth operand
1498 to be optional, then we also implicitly specify (N+1)th operand to also be
1499 optional. */
1500 #define F_OPD_PAIR_OPT (1ULL << 32)
1501 /* This instruction does not allow the full range of values that the
1502 width of fields in the assembler instruction would theoretically
1503 allow. This impacts the constraints on assembly but yields no
1504 impact on disassembly. */
1505 #define F_OPD_NARROW (1ULL << 33)
1506 /* For the instruction with size[22:23] field. */
1507 #define F_OPD_SIZE (1ULL << 34)
1508 /* RCPC3 instruction has the field of 'size'. */
1509 #define F_RCPC3_SIZE (1ULL << 35)
1510 /* This instruction need VGx2 or VGx4 mandatorily in the operand passed to
1511 assembler. */
1512 #define F_VG_REQ (1ULL << 36)
1513
1514 /* 4-bit flag field to indicate subclass of instructions.
1515 Note the overlap between the set of subclass flags in each logical category
1516 (F_LDST_*, F_ARITH_*, F_BRANCH_* etc.); The usage of flags as
1517 iclass-specific enums is intentional. */
1518 #define F_SUBCLASS (15ULL << 37)
1519
1520 #define F_LDST_LOAD (1ULL << 37)
1521 #define F_LDST_STORE (2ULL << 37)
1522 /* Subclasses to denote add, sub and mov insns. */
1523 #define F_ARITH_ADD (1ULL << 37)
1524 #define F_ARITH_SUB (2ULL << 37)
1525 #define F_ARITH_MOV (3ULL << 37)
1526 /* Subclasses to denote call and ret insns. */
1527 #define F_BRANCH_CALL (1ULL << 37)
1528 #define F_BRANCH_RET (2ULL << 37)
1529 /* Subclass to denote that only tag update is involved. */
1530 #define F_DP_TAG_ONLY (1ULL << 37)
1531
1532 #define F_SUBCLASS_OTHER (F_SUBCLASS)
1533
1534 /* For LSFE instructions with size[30:31] field. */
1535 #define F_LSFE_SZ (1ULL << 41)
1536
1537 /* When parsing immediate values, register names should not be misinterpreted
1538 as symbols. However, for backwards compatibility we need to permit some
1539 newer register names within older instructions. These flags specify which
1540 register names are invalid immediate value, and are required for all
1541 instructions with immediate operands (and are otherwise ignored). */
1542 #define F_INVALID_IMM_SYMS (3ULL << 42)
1543
1544 /* Any GP or SIMD register except WSP/SP. */
1545 #define F_INVALID_IMM_SYMS_1 (1ULL << 42)
1546
1547 /* As above, plus WSP/SP, and Z and P registers. */
1548 #define F_INVALID_IMM_SYMS_2 (2ULL << 42)
1549
1550 /* As above, plus PN registers. */
1551 #define F_INVALID_IMM_SYMS_3 (3ULL << 42)
1552
1553 /* Next bit is 44. */
1554
1555 /* Instruction constraints. */
1556 /* This instruction has a predication constraint on the instruction at PC+4. */
1557 #define C_SCAN_MOVPRFX (1U << 0)
1558 /* This instruction's operation width is determined by the operand with the
1559 largest element size. */
1560 #define C_MAX_ELEM (1U << 1)
1561 #define C_SCAN_MOPS_P (1U << 2)
1562 #define C_SCAN_MOPS_M (2U << 2)
1563 #define C_SCAN_MOPS_E (3U << 2)
1564 #define C_SCAN_MOPS_PME (3U << 2)
1565 /* Next bit is 4. */
1566
1567 static inline bool
1568 alias_opcode_p (const aarch64_opcode *opcode)
1569 {
1570 return (opcode->flags & F_ALIAS) != 0;
1571 }
1572
1573 static inline bool
1574 opcode_has_alias (const aarch64_opcode *opcode)
1575 {
1576 return (opcode->flags & F_HAS_ALIAS) != 0;
1577 }
1578
1579 /* Priority for disassembling preference. */
1580 static inline int
1581 opcode_priority (const aarch64_opcode *opcode)
1582 {
1583 return (opcode->flags >> 2) & 0x3;
1584 }
1585
1586 static inline bool
1587 pseudo_opcode_p (const aarch64_opcode *opcode)
1588 {
1589 return (opcode->flags & F_PSEUDO) != 0lu;
1590 }
1591
1592 /* Whether the opcode has the specific subclass flag.
1593 N.B. The overlap between F_LDST_*, F_ARITH_*, and F_BRANCH_* etc. subclass
1594 flags means that the callers of this function have the responsibility of
1595 checking for the flags appropriate for the specific iclass. */
1596 static inline bool
1597 aarch64_opcode_subclass_p (const aarch64_opcode *opcode, uint64_t flag)
1598 {
1599 return ((opcode->flags & F_SUBCLASS) == flag);
1600 }
1601
1602 /* Deal with two possible scenarios: If F_OP_PAIR_OPT not set, as is the case
1603 by default, F_OPDn_OPT must equal IDX + 1, else F_OPDn_OPT must be in range
1604 [IDX, IDX + 1]. */
1605 static inline bool
1606 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
1607 {
1608 if (opcode->flags & F_OPD_PAIR_OPT)
1609 return (((opcode->flags >> 12) & 0x7) == idx
1610 || ((opcode->flags >> 12) & 0x7) == idx + 1);
1611 return ((opcode->flags >> 12) & 0x7) == idx + 1;
1612 }
1613
1614 static inline aarch64_insn
1615 get_optional_operand_default_value (const aarch64_opcode *opcode)
1616 {
1617 return (opcode->flags >> 15) & 0x1f;
1618 }
1619
1620 static inline unsigned int
1621 get_opcode_dependent_value (const aarch64_opcode *opcode)
1622 {
1623 return (opcode->flags >> 24) & 0x7;
1624 }
1625
1626 static inline bool
1627 get_opcode_dependent_vg_status (const aarch64_opcode *opcode)
1628 {
1629 return (opcode->flags >> 36) & 0x1;
1630 }
1631
1632 static inline bool
1633 opcode_has_special_coder (const aarch64_opcode *opcode)
1634 {
1635 return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
1636 | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND
1637 | F_OPD_SIZE | F_RCPC3_SIZE | F_LSFE_SZ )) != 0;
1638 }
1639
1640 struct aarch64_name_value_pair
1642 {
1643 const char * name;
1644 aarch64_insn value;
1645 };
1646
1647 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
1648 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
1649 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
1650 extern const struct aarch64_name_value_pair aarch64_prfops [32];
1651 extern const struct aarch64_name_value_pair aarch64_hint_options [];
1652
1653 #define AARCH64_MAX_SYSREG_NAME_LEN 32
1654
1655 typedef struct
1656 {
1657 const char * name;
1658 aarch64_insn value;
1659 uint32_t flags;
1660
1661 /* A set of features, all of which are required for this system register to be
1662 available. */
1663 aarch64_feature_set features;
1664 } aarch64_sys_reg;
1665
1666 extern const aarch64_sys_reg aarch64_sys_regs [];
1667 extern const aarch64_sys_reg aarch64_pstatefields [];
1668 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
1669 extern bool aarch64_sys_reg_128bit_p (const uint32_t);
1670 extern bool aarch64_sys_reg_alias_p (const uint32_t);
1671 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
1672 const aarch64_sys_reg *);
1673
1674 typedef struct
1675 {
1676 const char *name;
1677 uint32_t value;
1678 uint32_t flags ;
1679
1680 /* A set of features, all of which are required for this system instruction to be
1681 available. */
1682 aarch64_feature_set features;
1683 } aarch64_sys_ins_reg;
1684
1685 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
1686 extern bool aarch64_sys_ins_reg_tlbid_xt (const aarch64_sys_ins_reg *);
1687 extern bool
1688 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
1689 const char *reg_name,
1690 const aarch64_feature_set *);
1691
1692 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
1693 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
1694 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
1695 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
1696 extern const aarch64_sys_ins_reg aarch64_sys_regs_plbi [];
1697 extern const aarch64_sys_ins_reg aarch64_sys_regs_mlbi [];
1698 extern const aarch64_sys_ins_reg aarch64_sys_ins_gic [];
1699 extern const aarch64_sys_ins_reg aarch64_sys_ins_gicr [];
1700 extern const aarch64_sys_ins_reg aarch64_sys_ins_gsb [];
1701 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
1702
1703 /* Shift/extending operator kinds.
1704 N.B. order is important; keep aarch64_operand_modifiers synced. */
1705 enum aarch64_modifier_kind
1706 {
1707 AARCH64_MOD_NONE,
1708 AARCH64_MOD_MSL,
1709 AARCH64_MOD_ROR,
1710 AARCH64_MOD_ASR,
1711 AARCH64_MOD_LSR,
1712 AARCH64_MOD_LSL,
1713 AARCH64_MOD_UXTB,
1714 AARCH64_MOD_UXTH,
1715 AARCH64_MOD_UXTW,
1716 AARCH64_MOD_UXTX,
1717 AARCH64_MOD_SXTB,
1718 AARCH64_MOD_SXTH,
1719 AARCH64_MOD_SXTW,
1720 AARCH64_MOD_SXTX,
1721 AARCH64_MOD_MUL,
1722 AARCH64_MOD_MUL_VL,
1723 };
1724
1725 bool
1726 aarch64_extend_operator_p (enum aarch64_modifier_kind);
1727
1728 enum aarch64_modifier_kind
1729 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
1730 /* Condition. */
1731
1732 typedef struct
1733 {
1734 /* A list of names with the first one as the disassembly preference;
1735 terminated by NULL if fewer than 3. */
1736 const char *names[4];
1737 aarch64_insn value;
1738 } aarch64_cond;
1739
1740 extern const aarch64_cond aarch64_conds[16];
1741
1742 const aarch64_cond* get_cond_from_value (aarch64_insn value);
1743 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
1744
1745 /* Information about a reference to part of ZA. */
1747 struct aarch64_indexed_za
1748 {
1749 /* Which tile is being accessed. Unused (and 0) for an index into ZA. */
1750 int regno;
1751
1752 struct
1753 {
1754 /* The 32-bit index register. */
1755 int regno;
1756
1757 /* The first (or only) immediate offset. */
1758 int64_t imm;
1759
1760 /* The last immediate offset minus the first immediate offset.
1761 Unlike the range size, this is guaranteed not to overflow
1762 when the end offset > the start offset. */
1763 uint64_t countm1;
1764 } index;
1765
1766 /* The vector group size, or 0 if none. */
1767 unsigned group_size : 8;
1768
1769 /* True if a tile access is vertical, false if it is horizontal.
1770 Unused (and 0) for an index into ZA. */
1771 unsigned v : 1;
1772 };
1773
1774 /* Information about a list of registers. */
1775 struct aarch64_reglist
1776 {
1777 unsigned first_regno : 8;
1778 unsigned num_regs : 8;
1779 /* The difference between the nth and the n+1th register. */
1780 unsigned stride : 8;
1781 /* 1 if it is a list of reg element. */
1782 unsigned has_index : 1;
1783 /* Lane index; valid only when has_index is 1. */
1784 int64_t index;
1785 };
1786
1787 /* Structure representing an operand. */
1788
1789 struct aarch64_opnd_info
1790 {
1791 enum aarch64_opnd type;
1792 aarch64_opnd_qualifier_t qualifier;
1793 int idx;
1794
1795 union
1796 {
1797 struct
1798 {
1799 unsigned regno;
1800 } reg;
1801 struct
1802 {
1803 unsigned int regno;
1804 int64_t index;
1805 } reglane;
1806 /* e.g. LVn. */
1807 struct aarch64_reglist reglist;
1808 /* e.g. immediate or pc relative address offset. */
1809 struct
1810 {
1811 int64_t value;
1812 unsigned is_fp : 1;
1813 } imm;
1814 /* e.g. address in STR (register offset). */
1815 struct
1816 {
1817 unsigned base_regno;
1818 struct
1819 {
1820 union
1821 {
1822 int imm;
1823 unsigned regno;
1824 };
1825 unsigned is_reg;
1826 } offset;
1827 unsigned pcrel : 1; /* PC-relative. */
1828 unsigned writeback : 1;
1829 unsigned preind : 1; /* Pre-indexed. */
1830 unsigned postind : 1; /* Post-indexed. */
1831 } addr;
1832
1833 struct
1834 {
1835 /* The encoding of the system register. */
1836 aarch64_insn value;
1837
1838 /* The system register flags. During assembly this contains the
1839 flags from aarch64-sys-regs.def. During disassembly this stores
1840 either F_REG_READ or F_REG_WRITE, depending upon the opcode. */
1841 uint32_t flags;
1842 } sysreg;
1843
1844 /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}] */
1845 struct aarch64_indexed_za indexed_za;
1846
1847 const aarch64_cond *cond;
1848 /* The encoding of the PSTATE field. */
1849 aarch64_insn pstatefield;
1850 const aarch64_sys_ins_reg *sysins_op;
1851 const struct aarch64_name_value_pair *barrier;
1852 const struct aarch64_name_value_pair *hint_option;
1853 const struct aarch64_name_value_pair *prfop;
1854 };
1855
1856 /* Operand shifter; in use when the operand is a register offset address,
1857 add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}. */
1858 struct
1859 {
1860 enum aarch64_modifier_kind kind;
1861 unsigned operator_present: 1; /* Only valid during encoding. */
1862 /* Value of the 'S' field in ld/st reg offset; used only in decoding. */
1863 unsigned amount_present: 1;
1864 int64_t amount;
1865 } shifter;
1866
1867 unsigned skip:1; /* Operand is not completed if there is a fixup needed
1868 to be done on it. In some (but not all) of these
1869 cases, we need to tell libopcodes to skip the
1870 constraint checking and the encoding for this
1871 operand, so that the libopcodes can pick up the
1872 right opcode before the operand is fixed-up. This
1873 flag should only be used during the
1874 assembling/encoding. */
1875 unsigned present:1; /* Whether this operand is present in the assembly
1876 line; not used during the disassembly. */
1877 };
1878
1879 typedef struct aarch64_opnd_info aarch64_opnd_info;
1880
1881 /* Structure representing an instruction.
1882
1883 It is used during both the assembling and disassembling. The assembler
1884 fills an aarch64_inst after a successful parsing and then passes it to the
1885 encoding routine to do the encoding. During the disassembling, the
1886 disassembler calls the decoding routine to decode a binary instruction; on a
1887 successful return, such a structure will be filled with information of the
1888 instruction; then the disassembler uses the information to print out the
1889 instruction. */
1890
1891 struct aarch64_inst
1892 {
1893 /* The value of the binary instruction. */
1894 aarch64_insn value;
1895
1896 /* Corresponding opcode entry. */
1897 const aarch64_opcode *opcode;
1898
1899 /* Condition for a truly conditional-executed instruction, e.g. b.cond. */
1900 const aarch64_cond *cond;
1901
1902 /* Operands information. */
1903 aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
1904 };
1905
1906 /* Defining the HINT #imm values for the aarch64_hint_options. */
1907 #define HINT_OPD_CSYNC 0x11
1908 #define HINT_OPD_DSYNC 0x13
1909 #define HINT_OPD_R 0x20
1910 #define HINT_OPD_C 0x22
1911 #define HINT_OPD_J 0x24
1912 #define HINT_OPD_JC 0x26
1913 #define HINT_OPD_KEEP 0x30
1914 #define HINT_OPD_STRM 0x31
1915 #define HINT_OPD_NPHINT 0x32
1916 #define HINT_OPD_PHINT 0x33
1917 #define HINT_OPD_NULL 0x00
1918
1919
1920 /* Diagnosis related declaration and interface. */
1922
1923 /* Operand error kind enumerators.
1924
1925 AARCH64_OPDE_RECOVERABLE
1926 Less severe error found during the parsing, very possibly because that
1927 GAS has picked up a wrong instruction template for the parsing.
1928
1929 AARCH64_OPDE_A_SHOULD_FOLLOW_B
1930 The instruction forms (or is expected to form) part of a sequence,
1931 but the preceding instruction in the sequence wasn't the expected one.
1932 The message refers to two strings: the name of the current instruction,
1933 followed by the name of the expected preceding instruction.
1934
1935 AARCH64_OPDE_EXPECTED_A_AFTER_B
1936 Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
1937 so that the current instruction is assumed to be the incorrect one:
1938 "since the previous instruction was B, the current one should be A".
1939
1940 AARCH64_OPDE_SYNTAX_ERROR
1941 General syntax error; it can be either a user error, or simply because
1942 that GAS is trying a wrong instruction template.
1943
1944 AARCH64_OPDE_FATAL_SYNTAX_ERROR
1945 Definitely a user syntax error.
1946
1947 AARCH64_OPDE_INVALID_VARIANT
1948 No syntax error, but the operands are not a valid combination, e.g.
1949 FMOV D0,S0
1950
1951 The following errors are only reported against an asm string that is
1952 syntactically valid and that has valid operand qualifiers.
1953
1954 AARCH64_OPDE_INVALID_VG_SIZE
1955 Error about a "VGx<n>" modifier in a ZA index not having the
1956 correct <n>. This error effectively forms a pair with
1957 AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number
1958 of vectors that an instruction operates on. However, the "VGx<n>"
1959 modifier is optional, whereas a register list always has a known
1960 and explicit length. It therefore seems better to place more
1961 importance on the register list length when selecting an opcode table
1962 entry. This in turn means that having an incorrect register length
1963 should be more severe than having an incorrect "VGx<n>".
1964
1965 AARCH64_OPDE_REG_LIST_LENGTH
1966 Error about a register list operand having an unexpected number of
1967 registers. This error is low severity because there might be another
1968 opcode entry that supports the given number of registers.
1969
1970 AARCH64_OPDE_REG_LIST_STRIDE
1971 Error about a register list operand having the correct number
1972 (and type) of registers, but an unexpected stride. This error is
1973 more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies
1974 that the length is known to be correct. However, it is lower than
1975 many other errors, since some instructions have forms that share
1976 the same number of registers but have different strides.
1977
1978 AARCH64_OPDE_UNTIED_IMMS
1979 The asm failed to use the same immediate for a destination operand
1980 and a tied source operand.
1981
1982 AARCH64_OPDE_UNTIED_OPERAND
1983 The asm failed to use the same register for a destination operand
1984 and a tied source operand.
1985
1986 AARCH64_OPDE_OUT_OF_RANGE
1987 Error about some immediate value out of a valid range.
1988
1989 AARCH64_OPDE_UNALIGNED
1990 Error about some immediate value not properly aligned (i.e. not being a
1991 multiple times of a certain value).
1992
1993 AARCH64_OPDE_OTHER_ERROR
1994 Error of the highest severity and used for any severe issue that does not
1995 fall into any of the above categories.
1996
1997 AARCH64_OPDE_INVALID_REGNO
1998 A register was syntactically valid and had the right type, but it was
1999 outside the range supported by the associated operand field. This is
2000 a high severity error because there are currently no instructions that
2001 would accept the operands that precede the erroneous one (if any) and
2002 yet still accept a wider range of registers.
2003
2004 AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
2005 AARCH64_OPDE_FATAL_SYNTAX_ERROR are only detected by GAS while the
2006 AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
2007 only libopcodes has the information about the valid variants of each
2008 instruction.
2009
2010 The enumerators have an increasing severity. This is helpful when there are
2011 multiple instruction templates available for a given mnemonic name (e.g.
2012 FMOV); this mechanism will help choose the most suitable template from which
2013 the generated diagnostics can most closely describe the issues, if any.
2014
2015 This enum needs to be kept up-to-date with operand_mismatch_kind_names
2016 in tc-aarch64.c. */
2017
2018 enum aarch64_operand_error_kind
2019 {
2020 AARCH64_OPDE_NIL,
2021 AARCH64_OPDE_RECOVERABLE,
2022 AARCH64_OPDE_A_SHOULD_FOLLOW_B,
2023 AARCH64_OPDE_EXPECTED_A_AFTER_B,
2024 AARCH64_OPDE_SYNTAX_ERROR,
2025 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
2026 AARCH64_OPDE_INVALID_VARIANT,
2027 AARCH64_OPDE_INVALID_VG_SIZE,
2028 AARCH64_OPDE_REG_LIST_LENGTH,
2029 AARCH64_OPDE_REG_LIST_STRIDE,
2030 AARCH64_OPDE_UNTIED_IMMS,
2031 AARCH64_OPDE_UNTIED_OPERAND,
2032 AARCH64_OPDE_OUT_OF_RANGE,
2033 AARCH64_OPDE_UNALIGNED,
2034 AARCH64_OPDE_OTHER_ERROR,
2035 AARCH64_OPDE_INVALID_REGNO
2036 };
2037
2038 /* N.B. GAS assumes that this structure work well with shallow copy. */
2039 struct aarch64_operand_error
2040 {
2041 enum aarch64_operand_error_kind kind;
2042 int index;
2043 const char *error;
2044 /* Some data for extra information. */
2045 union {
2046 int i;
2047 const char *s;
2048 } data[3];
2049 bool non_fatal;
2050 };
2051
2052 /* AArch64 sequence structure used to track instructions with F_SCAN
2053 dependencies for both assembler and disassembler. */
2054 struct aarch64_instr_sequence
2055 {
2056 /* The instructions in the sequence, starting with the one that
2057 caused it to be opened. */
2058 aarch64_inst *instr;
2059 /* The number of instructions already in the sequence. */
2060 int num_added_insns;
2061 /* The number of instructions allocated to the sequence. */
2062 int num_allocated_insns;
2063 };
2064
2065 /* Encoding entrypoint. */
2066
2067 extern bool
2068 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
2069 aarch64_insn *, aarch64_opnd_qualifier_t *,
2070 aarch64_operand_error *, aarch64_instr_sequence *);
2071
2072 extern const aarch64_opcode *
2073 aarch64_replace_opcode (struct aarch64_inst *,
2074 const aarch64_opcode *);
2075
2076 /* Given the opcode enumerator OP, return the pointer to the corresponding
2077 opcode entry. */
2078
2079 extern const aarch64_opcode *
2080 aarch64_get_opcode (enum aarch64_op);
2081
2082 /* An instance of this structure is passed to aarch64_print_operand, and
2083 the callback within this structure is used to apply styling to the
2084 disassembler output. This structure encapsulates the callback and a
2085 state pointer. */
2086
2087 struct aarch64_styler
2088 {
2089 /* The callback used to apply styling. Returns a string created from FMT
2090 and ARGS with STYLE applied to the string. STYLER is a pointer back
2091 to this object so that the callback can access the state member.
2092
2093 The string returned from this callback must remain valid until the
2094 call to aarch64_print_operand has completed. */
2095 const char *(*apply_style) (struct aarch64_styler *styler,
2096 enum disassembler_style style,
2097 const char *fmt,
2098 va_list args);
2099
2100 /* A pointer to a state object which can be used by the apply_style
2101 callback function. */
2102 void *state;
2103 };
2104
2105 /* Generate the string representation of an operand. */
2106 extern void
2107 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
2108 const aarch64_opnd_info *, int, int *, bfd_vma *,
2109 char **, char *, size_t,
2110 aarch64_feature_set features,
2111 struct aarch64_styler *styler);
2112
2113 /* Miscellaneous interface. */
2114
2115 extern int
2116 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
2117
2118 extern aarch64_opnd_qualifier_t
2119 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
2120 const aarch64_opnd_qualifier_t, int);
2121
2122 extern bool
2123 aarch64_is_destructive_by_operands (const aarch64_opcode *);
2124
2125 extern int
2126 aarch64_num_of_operands (const aarch64_opcode *);
2127
2128 extern bool
2129 aarch64_stack_pointer_p (const aarch64_opnd_info *);
2130
2131 extern int
2132 aarch64_zero_register_p (const aarch64_opnd_info *);
2133
2134 extern enum err_type
2135 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
2136 aarch64_operand_error *);
2137
2138 extern void
2139 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
2140
2141 /* Given an operand qualifier, return the expected data element size
2142 of a qualified operand. */
2143 extern unsigned char
2144 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
2145
2146 extern enum aarch64_operand_class
2147 aarch64_get_operand_class (enum aarch64_opnd);
2148
2149 extern const char *
2150 aarch64_get_operand_name (enum aarch64_opnd);
2151
2152 extern const char *
2153 aarch64_get_operand_desc (enum aarch64_opnd);
2154
2155 extern bool
2156 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
2157
2158 extern bool
2159 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *);
2160
2161 extern int
2162 calc_ldst_datasize (const aarch64_opnd_info *opnds);
2163
2164 #ifdef DEBUG_AARCH64
2165 extern int debug_dump;
2166
2167 extern void
2168 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
2169
2170 #define DEBUG_TRACE(M, ...) \
2171 { \
2172 if (debug_dump) \
2173 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
2174 }
2175
2176 #define DEBUG_TRACE_IF(C, M, ...) \
2177 { \
2178 if (debug_dump && (C)) \
2179 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
2180 }
2181 #else /* !DEBUG_AARCH64 */
2182 #define DEBUG_TRACE(M, ...) ;
2183 #define DEBUG_TRACE_IF(C, M, ...) ;
2184 #endif /* DEBUG_AARCH64 */
2185
2186 extern const char *const aarch64_sve_pattern_array[32];
2187 extern const char *const aarch64_sve_prfop_array[16];
2188 extern const char *const aarch64_rprfmop_array[64];
2189 extern const char *const aarch64_sme_vlxn_array[2];
2190 extern const char *const aarch64_brbop_array[2];
2191
2192 #ifdef __cplusplus
2193 }
2194 #endif
2195
2196 #endif /* OPCODE_AARCH64_H */
2197