aarch64-opc.c revision 1.7 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201
202 const aarch64_field fields[] =
204 {
205 { 0, 0 }, /* NIL. */
206 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
207 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
208 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
209 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
210 { 5, 19 }, /* imm19: e.g. in CBZ. */
211 { 5, 19 }, /* immhi: e.g. in ADRP. */
212 { 29, 2 }, /* immlo: e.g. in ADRP. */
213 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
214 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
215 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
216 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
217 { 0, 5 }, /* Rt: in load/store instructions. */
218 { 0, 5 }, /* Rd: in many integer instructions. */
219 { 5, 5 }, /* Rn: in many integer instructions. */
220 { 10, 5 }, /* Rt2: in load/store pair instructions. */
221 { 10, 5 }, /* Ra: in fp instructions. */
222 { 5, 3 }, /* op2: in the system instructions. */
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 12, 4 }, /* CRn: in the system instructions. */
225 { 16, 3 }, /* op1: in the system instructions. */
226 { 19, 2 }, /* op0: in the system instructions. */
227 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
228 { 12, 4 }, /* cond: condition flags as a source operand. */
229 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
230 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
231 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
232 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
233 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
234 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
235 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
236 { 12, 1 }, /* S: in load/store reg offset instructions. */
237 { 21, 2 }, /* hw: in move wide constant instructions. */
238 { 22, 2 }, /* opc: in load/store reg offset instructions. */
239 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
240 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
241 { 22, 2 }, /* type: floating point type field in fp data inst. */
242 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
243 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
246 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
247 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
248 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
249 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
250 { 5, 14 }, /* imm14: in test bit and branch instructions. */
251 { 5, 16 }, /* imm16: in exception instructions. */
252 { 0, 26 }, /* imm26: in unconditional branch instructions. */
253 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
254 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
255 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
256 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
257 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
258 { 22, 1 }, /* N: in logical (immediate) instructions. */
259 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
260 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
261 { 31, 1 }, /* sf: in integer data processing instructions. */
262 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
263 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
264 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
265 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
266 { 31, 1 }, /* b5: in the test bit and branch instructions. */
267 { 19, 5 }, /* b40: in the test bit and branch instructions. */
268 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
269 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
270 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
271 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
272 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
273 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
274 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
275 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
276 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
277 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
278 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
279 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
280 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
281 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
282 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
283 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
284 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
285 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
287 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
288 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
289 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
290 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
291 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
292 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
293 { 5, 1 }, /* SVE_i1: single-bit immediate. */
294 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
295 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
296 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
297 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
298 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
299 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
300 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
301 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
302 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
303 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
304 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
305 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
306 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
307 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
308 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
309 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
310 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
311 { 16, 4 }, /* SVE_tsz: triangular size select. */
312 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
313 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
314 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
315 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
316 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
317 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
318 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
319 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
320 };
321
322 enum aarch64_operand_class
323 aarch64_get_operand_class (enum aarch64_opnd type)
324 {
325 return aarch64_operands[type].op_class;
326 }
327
328 const char *
329 aarch64_get_operand_name (enum aarch64_opnd type)
330 {
331 return aarch64_operands[type].name;
332 }
333
334 /* Get operand description string.
335 This is usually for the diagnosis purpose. */
336 const char *
337 aarch64_get_operand_desc (enum aarch64_opnd type)
338 {
339 return aarch64_operands[type].desc;
340 }
341
342 /* Table of all conditional affixes. */
343 const aarch64_cond aarch64_conds[16] =
344 {
345 {{"eq", "none"}, 0x0},
346 {{"ne", "any"}, 0x1},
347 {{"cs", "hs", "nlast"}, 0x2},
348 {{"cc", "lo", "ul", "last"}, 0x3},
349 {{"mi", "first"}, 0x4},
350 {{"pl", "nfrst"}, 0x5},
351 {{"vs"}, 0x6},
352 {{"vc"}, 0x7},
353 {{"hi", "pmore"}, 0x8},
354 {{"ls", "plast"}, 0x9},
355 {{"ge", "tcont"}, 0xa},
356 {{"lt", "tstop"}, 0xb},
357 {{"gt"}, 0xc},
358 {{"le"}, 0xd},
359 {{"al"}, 0xe},
360 {{"nv"}, 0xf},
361 };
362
363 const aarch64_cond *
364 get_cond_from_value (aarch64_insn value)
365 {
366 assert (value < 16);
367 return &aarch64_conds[(unsigned int) value];
368 }
369
370 const aarch64_cond *
371 get_inverted_cond (const aarch64_cond *cond)
372 {
373 return &aarch64_conds[cond->value ^ 0x1];
374 }
375
376 /* Table describing the operand extension/shifting operators; indexed by
377 enum aarch64_modifier_kind.
378
379 The value column provides the most common values for encoding modifiers,
380 which enables table-driven encoding/decoding for the modifiers. */
381 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
382 {
383 {"none", 0x0},
384 {"msl", 0x0},
385 {"ror", 0x3},
386 {"asr", 0x2},
387 {"lsr", 0x1},
388 {"lsl", 0x0},
389 {"uxtb", 0x0},
390 {"uxth", 0x1},
391 {"uxtw", 0x2},
392 {"uxtx", 0x3},
393 {"sxtb", 0x4},
394 {"sxth", 0x5},
395 {"sxtw", 0x6},
396 {"sxtx", 0x7},
397 {"mul", 0x0},
398 {"mul vl", 0x0},
399 {NULL, 0},
400 };
401
402 enum aarch64_modifier_kind
403 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
404 {
405 return desc - aarch64_operand_modifiers;
406 }
407
408 aarch64_insn
409 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
410 {
411 return aarch64_operand_modifiers[kind].value;
412 }
413
414 enum aarch64_modifier_kind
415 aarch64_get_operand_modifier_from_value (aarch64_insn value,
416 bfd_boolean extend_p)
417 {
418 if (extend_p == TRUE)
419 return AARCH64_MOD_UXTB + value;
420 else
421 return AARCH64_MOD_LSL - value;
422 }
423
424 bfd_boolean
425 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
426 {
427 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
428 ? TRUE : FALSE;
429 }
430
431 static inline bfd_boolean
432 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
433 {
434 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
435 ? TRUE : FALSE;
436 }
437
438 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
439 {
440 { "#0x00", 0x0 },
441 { "oshld", 0x1 },
442 { "oshst", 0x2 },
443 { "osh", 0x3 },
444 { "#0x04", 0x4 },
445 { "nshld", 0x5 },
446 { "nshst", 0x6 },
447 { "nsh", 0x7 },
448 { "#0x08", 0x8 },
449 { "ishld", 0x9 },
450 { "ishst", 0xa },
451 { "ish", 0xb },
452 { "#0x0c", 0xc },
453 { "ld", 0xd },
454 { "st", 0xe },
455 { "sy", 0xf },
456 };
457
458 /* Table describing the operands supported by the aliases of the HINT
459 instruction.
460
461 The name column is the operand that is accepted for the alias. The value
462 column is the hint number of the alias. The list of operands is terminated
463 by NULL in the name column. */
464
465 const struct aarch64_name_value_pair aarch64_hint_options[] =
466 {
467 { "csync", 0x11 }, /* PSB CSYNC. */
468 { NULL, 0x0 },
469 };
470
471 /* op -> op: load = 0 instruction = 1 store = 2
472 l -> level: 1-3
473 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
474 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
475 const struct aarch64_name_value_pair aarch64_prfops[32] =
476 {
477 { "pldl1keep", B(0, 1, 0) },
478 { "pldl1strm", B(0, 1, 1) },
479 { "pldl2keep", B(0, 2, 0) },
480 { "pldl2strm", B(0, 2, 1) },
481 { "pldl3keep", B(0, 3, 0) },
482 { "pldl3strm", B(0, 3, 1) },
483 { NULL, 0x06 },
484 { NULL, 0x07 },
485 { "plil1keep", B(1, 1, 0) },
486 { "plil1strm", B(1, 1, 1) },
487 { "plil2keep", B(1, 2, 0) },
488 { "plil2strm", B(1, 2, 1) },
489 { "plil3keep", B(1, 3, 0) },
490 { "plil3strm", B(1, 3, 1) },
491 { NULL, 0x0e },
492 { NULL, 0x0f },
493 { "pstl1keep", B(2, 1, 0) },
494 { "pstl1strm", B(2, 1, 1) },
495 { "pstl2keep", B(2, 2, 0) },
496 { "pstl2strm", B(2, 2, 1) },
497 { "pstl3keep", B(2, 3, 0) },
498 { "pstl3strm", B(2, 3, 1) },
499 { NULL, 0x16 },
500 { NULL, 0x17 },
501 { NULL, 0x18 },
502 { NULL, 0x19 },
503 { NULL, 0x1a },
504 { NULL, 0x1b },
505 { NULL, 0x1c },
506 { NULL, 0x1d },
507 { NULL, 0x1e },
508 { NULL, 0x1f },
509 };
510 #undef B
511
512 /* Utilities on value constraint. */
514
515 static inline int
516 value_in_range_p (int64_t value, int low, int high)
517 {
518 return (value >= low && value <= high) ? 1 : 0;
519 }
520
521 /* Return true if VALUE is a multiple of ALIGN. */
522 static inline int
523 value_aligned_p (int64_t value, int align)
524 {
525 return (value % align) == 0;
526 }
527
528 /* A signed value fits in a field. */
529 static inline int
530 value_fit_signed_field_p (int64_t value, unsigned width)
531 {
532 assert (width < 32);
533 if (width < sizeof (value) * 8)
534 {
535 int64_t lim = (int64_t)1 << (width - 1);
536 if (value >= -lim && value < lim)
537 return 1;
538 }
539 return 0;
540 }
541
542 /* An unsigned value fits in a field. */
543 static inline int
544 value_fit_unsigned_field_p (int64_t value, unsigned width)
545 {
546 assert (width < 32);
547 if (width < sizeof (value) * 8)
548 {
549 int64_t lim = (int64_t)1 << width;
550 if (value >= 0 && value < lim)
551 return 1;
552 }
553 return 0;
554 }
555
556 /* Return 1 if OPERAND is SP or WSP. */
557 int
558 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
559 {
560 return ((aarch64_get_operand_class (operand->type)
561 == AARCH64_OPND_CLASS_INT_REG)
562 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
563 && operand->reg.regno == 31);
564 }
565
566 /* Return 1 if OPERAND is XZR or WZP. */
567 int
568 aarch64_zero_register_p (const aarch64_opnd_info *operand)
569 {
570 return ((aarch64_get_operand_class (operand->type)
571 == AARCH64_OPND_CLASS_INT_REG)
572 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
573 && operand->reg.regno == 31);
574 }
575
576 /* Return true if the operand *OPERAND that has the operand code
577 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
578 qualified by the qualifier TARGET. */
579
580 static inline int
581 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
582 aarch64_opnd_qualifier_t target)
583 {
584 switch (operand->qualifier)
585 {
586 case AARCH64_OPND_QLF_W:
587 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
588 return 1;
589 break;
590 case AARCH64_OPND_QLF_X:
591 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
592 return 1;
593 break;
594 case AARCH64_OPND_QLF_WSP:
595 if (target == AARCH64_OPND_QLF_W
596 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
597 return 1;
598 break;
599 case AARCH64_OPND_QLF_SP:
600 if (target == AARCH64_OPND_QLF_X
601 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
602 return 1;
603 break;
604 default:
605 break;
606 }
607
608 return 0;
609 }
610
611 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
612 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
613
614 Return NIL if more than one expected qualifiers are found. */
615
616 aarch64_opnd_qualifier_t
617 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
618 int idx,
619 const aarch64_opnd_qualifier_t known_qlf,
620 int known_idx)
621 {
622 int i, saved_i;
623
624 /* Special case.
625
626 When the known qualifier is NIL, we have to assume that there is only
627 one qualifier sequence in the *QSEQ_LIST and return the corresponding
628 qualifier directly. One scenario is that for instruction
629 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
630 which has only one possible valid qualifier sequence
631 NIL, S_D
632 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
633 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
634
635 Because the qualifier NIL has dual roles in the qualifier sequence:
636 it can mean no qualifier for the operand, or the qualifer sequence is
637 not in use (when all qualifiers in the sequence are NILs), we have to
638 handle this special case here. */
639 if (known_qlf == AARCH64_OPND_NIL)
640 {
641 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
642 return qseq_list[0][idx];
643 }
644
645 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
646 {
647 if (qseq_list[i][known_idx] == known_qlf)
648 {
649 if (saved_i != -1)
650 /* More than one sequences are found to have KNOWN_QLF at
651 KNOWN_IDX. */
652 return AARCH64_OPND_NIL;
653 saved_i = i;
654 }
655 }
656
657 return qseq_list[saved_i][idx];
658 }
659
660 enum operand_qualifier_kind
661 {
662 OQK_NIL,
663 OQK_OPD_VARIANT,
664 OQK_VALUE_IN_RANGE,
665 OQK_MISC,
666 };
667
668 /* Operand qualifier description. */
669 struct operand_qualifier_data
670 {
671 /* The usage of the three data fields depends on the qualifier kind. */
672 int data0;
673 int data1;
674 int data2;
675 /* Description. */
676 const char *desc;
677 /* Kind. */
678 enum operand_qualifier_kind kind;
679 };
680
681 /* Indexed by the operand qualifier enumerators. */
682 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
683 {
684 {0, 0, 0, "NIL", OQK_NIL},
685
686 /* Operand variant qualifiers.
687 First 3 fields:
688 element size, number of elements and common value for encoding. */
689
690 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
691 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
692 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
693 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
694
695 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
696 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
697 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
698 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
699 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
700
701 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
702 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
703 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
704 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
705 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
706 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
707 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
708 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
709 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
710 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
711
712 {0, 0, 0, "z", OQK_OPD_VARIANT},
713 {0, 0, 0, "m", OQK_OPD_VARIANT},
714
715 /* Qualifiers constraining the value range.
716 First 3 fields:
717 Lower bound, higher bound, unused. */
718
719 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
720 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
721 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
722 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
723 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
724 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
725 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
726
727 /* Qualifiers for miscellaneous purpose.
728 First 3 fields:
729 unused, unused and unused. */
730
731 {0, 0, 0, "lsl", 0},
732 {0, 0, 0, "msl", 0},
733
734 {0, 0, 0, "retrieving", 0},
735 };
736
737 static inline bfd_boolean
738 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
739 {
740 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
741 ? TRUE : FALSE;
742 }
743
744 static inline bfd_boolean
745 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
746 {
747 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
748 ? TRUE : FALSE;
749 }
750
751 const char*
752 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
753 {
754 return aarch64_opnd_qualifiers[qualifier].desc;
755 }
756
757 /* Given an operand qualifier, return the expected data element size
758 of a qualified operand. */
759 unsigned char
760 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
761 {
762 assert (operand_variant_qualifier_p (qualifier) == TRUE);
763 return aarch64_opnd_qualifiers[qualifier].data0;
764 }
765
766 unsigned char
767 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
768 {
769 assert (operand_variant_qualifier_p (qualifier) == TRUE);
770 return aarch64_opnd_qualifiers[qualifier].data1;
771 }
772
773 aarch64_insn
774 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
775 {
776 assert (operand_variant_qualifier_p (qualifier) == TRUE);
777 return aarch64_opnd_qualifiers[qualifier].data2;
778 }
779
780 static int
781 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
782 {
783 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
784 return aarch64_opnd_qualifiers[qualifier].data0;
785 }
786
787 static int
788 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
789 {
790 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
791 return aarch64_opnd_qualifiers[qualifier].data1;
792 }
793
794 #ifdef DEBUG_AARCH64
795 void
796 aarch64_verbose (const char *str, ...)
797 {
798 va_list ap;
799 va_start (ap, str);
800 printf ("#### ");
801 vprintf (str, ap);
802 printf ("\n");
803 va_end (ap);
804 }
805
806 static inline void
807 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
808 {
809 int i;
810 printf ("#### \t");
811 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
812 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
813 printf ("\n");
814 }
815
816 static void
817 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
818 const aarch64_opnd_qualifier_t *qualifier)
819 {
820 int i;
821 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
822
823 aarch64_verbose ("dump_match_qualifiers:");
824 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
825 curr[i] = opnd[i].qualifier;
826 dump_qualifier_sequence (curr);
827 aarch64_verbose ("against");
828 dump_qualifier_sequence (qualifier);
829 }
830 #endif /* DEBUG_AARCH64 */
831
832 /* TODO improve this, we can have an extra field at the runtime to
833 store the number of operands rather than calculating it every time. */
834
835 int
836 aarch64_num_of_operands (const aarch64_opcode *opcode)
837 {
838 int i = 0;
839 const enum aarch64_opnd *opnds = opcode->operands;
840 while (opnds[i++] != AARCH64_OPND_NIL)
841 ;
842 --i;
843 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
844 return i;
845 }
846
847 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
848 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
849
850 N.B. on the entry, it is very likely that only some operands in *INST
851 have had their qualifiers been established.
852
853 If STOP_AT is not -1, the function will only try to match
854 the qualifier sequence for operands before and including the operand
855 of index STOP_AT; and on success *RET will only be filled with the first
856 (STOP_AT+1) qualifiers.
857
858 A couple examples of the matching algorithm:
859
860 X,W,NIL should match
861 X,W,NIL
862
863 NIL,NIL should match
864 X ,NIL
865
866 Apart from serving the main encoding routine, this can also be called
867 during or after the operand decoding. */
868
869 int
870 aarch64_find_best_match (const aarch64_inst *inst,
871 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
872 int stop_at, aarch64_opnd_qualifier_t *ret)
873 {
874 int found = 0;
875 int i, num_opnds;
876 const aarch64_opnd_qualifier_t *qualifiers;
877
878 num_opnds = aarch64_num_of_operands (inst->opcode);
879 if (num_opnds == 0)
880 {
881 DEBUG_TRACE ("SUCCEED: no operand");
882 return 1;
883 }
884
885 if (stop_at < 0 || stop_at >= num_opnds)
886 stop_at = num_opnds - 1;
887
888 /* For each pattern. */
889 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
890 {
891 int j;
892 qualifiers = *qualifiers_list;
893
894 /* Start as positive. */
895 found = 1;
896
897 DEBUG_TRACE ("%d", i);
898 #ifdef DEBUG_AARCH64
899 if (debug_dump)
900 dump_match_qualifiers (inst->operands, qualifiers);
901 #endif
902
903 /* Most opcodes has much fewer patterns in the list.
904 First NIL qualifier indicates the end in the list. */
905 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
906 {
907 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
908 if (i)
909 found = 0;
910 break;
911 }
912
913 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
914 {
915 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
916 {
917 /* Either the operand does not have qualifier, or the qualifier
918 for the operand needs to be deduced from the qualifier
919 sequence.
920 In the latter case, any constraint checking related with
921 the obtained qualifier should be done later in
922 operand_general_constraint_met_p. */
923 continue;
924 }
925 else if (*qualifiers != inst->operands[j].qualifier)
926 {
927 /* Unless the target qualifier can also qualify the operand
928 (which has already had a non-nil qualifier), non-equal
929 qualifiers are generally un-matched. */
930 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
931 continue;
932 else
933 {
934 found = 0;
935 break;
936 }
937 }
938 else
939 continue; /* Equal qualifiers are certainly matched. */
940 }
941
942 /* Qualifiers established. */
943 if (found == 1)
944 break;
945 }
946
947 if (found == 1)
948 {
949 /* Fill the result in *RET. */
950 int j;
951 qualifiers = *qualifiers_list;
952
953 DEBUG_TRACE ("complete qualifiers using list %d", i);
954 #ifdef DEBUG_AARCH64
955 if (debug_dump)
956 dump_qualifier_sequence (qualifiers);
957 #endif
958
959 for (j = 0; j <= stop_at; ++j, ++qualifiers)
960 ret[j] = *qualifiers;
961 for (; j < AARCH64_MAX_OPND_NUM; ++j)
962 ret[j] = AARCH64_OPND_QLF_NIL;
963
964 DEBUG_TRACE ("SUCCESS");
965 return 1;
966 }
967
968 DEBUG_TRACE ("FAIL");
969 return 0;
970 }
971
972 /* Operand qualifier matching and resolving.
973
974 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
975 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
976
977 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
978 succeeds. */
979
980 static int
981 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
982 {
983 int i, nops;
984 aarch64_opnd_qualifier_seq_t qualifiers;
985
986 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
987 qualifiers))
988 {
989 DEBUG_TRACE ("matching FAIL");
990 return 0;
991 }
992
993 if (inst->opcode->flags & F_STRICT)
994 {
995 /* Require an exact qualifier match, even for NIL qualifiers. */
996 nops = aarch64_num_of_operands (inst->opcode);
997 for (i = 0; i < nops; ++i)
998 if (inst->operands[i].qualifier != qualifiers[i])
999 return FALSE;
1000 }
1001
1002 /* Update the qualifiers. */
1003 if (update_p == TRUE)
1004 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1005 {
1006 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1007 break;
1008 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1009 "update %s with %s for operand %d",
1010 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1011 aarch64_get_qualifier_name (qualifiers[i]), i);
1012 inst->operands[i].qualifier = qualifiers[i];
1013 }
1014
1015 DEBUG_TRACE ("matching SUCCESS");
1016 return 1;
1017 }
1018
1019 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1020 register by MOVZ.
1021
1022 IS32 indicates whether value is a 32-bit immediate or not.
1023 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1024 amount will be returned in *SHIFT_AMOUNT. */
1025
1026 bfd_boolean
1027 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1028 {
1029 int amount;
1030
1031 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1032
1033 if (is32)
1034 {
1035 /* Allow all zeros or all ones in top 32-bits, so that
1036 32-bit constant expressions like ~0x80000000 are
1037 permitted. */
1038 uint64_t ext = value;
1039 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1040 /* Immediate out of range. */
1041 return FALSE;
1042 value &= (int64_t) 0xffffffff;
1043 }
1044
1045 /* first, try movz then movn */
1046 amount = -1;
1047 if ((value & ((int64_t) 0xffff << 0)) == value)
1048 amount = 0;
1049 else if ((value & ((int64_t) 0xffff << 16)) == value)
1050 amount = 16;
1051 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1052 amount = 32;
1053 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1054 amount = 48;
1055
1056 if (amount == -1)
1057 {
1058 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1059 return FALSE;
1060 }
1061
1062 if (shift_amount != NULL)
1063 *shift_amount = amount;
1064
1065 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1066
1067 return TRUE;
1068 }
1069
1070 /* Build the accepted values for immediate logical SIMD instructions.
1071
1072 The standard encodings of the immediate value are:
1073 N imms immr SIMD size R S
1074 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1075 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1076 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1077 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1078 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1079 0 11110s 00000r 2 UInt(r) UInt(s)
1080 where all-ones value of S is reserved.
1081
1082 Let's call E the SIMD size.
1083
1084 The immediate value is: S+1 bits '1' rotated to the right by R.
1085
1086 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1087 (remember S != E - 1). */
1088
1089 #define TOTAL_IMM_NB 5334
1090
1091 typedef struct
1092 {
1093 uint64_t imm;
1094 aarch64_insn encoding;
1095 } simd_imm_encoding;
1096
1097 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1098
1099 static int
1100 simd_imm_encoding_cmp(const void *i1, const void *i2)
1101 {
1102 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1103 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1104
1105 if (imm1->imm < imm2->imm)
1106 return -1;
1107 if (imm1->imm > imm2->imm)
1108 return +1;
1109 return 0;
1110 }
1111
1112 /* immediate bitfield standard encoding
1113 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1114 1 ssssss rrrrrr 64 rrrrrr ssssss
1115 0 0sssss 0rrrrr 32 rrrrr sssss
1116 0 10ssss 00rrrr 16 rrrr ssss
1117 0 110sss 000rrr 8 rrr sss
1118 0 1110ss 0000rr 4 rr ss
1119 0 11110s 00000r 2 r s */
1120 static inline int
1121 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1122 {
1123 return (is64 << 12) | (r << 6) | s;
1124 }
1125
1126 static void
1127 build_immediate_table (void)
1128 {
1129 uint32_t log_e, e, s, r, s_mask;
1130 uint64_t mask, imm;
1131 int nb_imms;
1132 int is64;
1133
1134 nb_imms = 0;
1135 for (log_e = 1; log_e <= 6; log_e++)
1136 {
1137 /* Get element size. */
1138 e = 1u << log_e;
1139 if (log_e == 6)
1140 {
1141 is64 = 1;
1142 mask = 0xffffffffffffffffull;
1143 s_mask = 0;
1144 }
1145 else
1146 {
1147 is64 = 0;
1148 mask = (1ull << e) - 1;
1149 /* log_e s_mask
1150 1 ((1 << 4) - 1) << 2 = 111100
1151 2 ((1 << 3) - 1) << 3 = 111000
1152 3 ((1 << 2) - 1) << 4 = 110000
1153 4 ((1 << 1) - 1) << 5 = 100000
1154 5 ((1 << 0) - 1) << 6 = 000000 */
1155 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1156 }
1157 for (s = 0; s < e - 1; s++)
1158 for (r = 0; r < e; r++)
1159 {
1160 /* s+1 consecutive bits to 1 (s < 63) */
1161 imm = (1ull << (s + 1)) - 1;
1162 /* rotate right by r */
1163 if (r != 0)
1164 imm = (imm >> r) | ((imm << (e - r)) & mask);
1165 /* replicate the constant depending on SIMD size */
1166 switch (log_e)
1167 {
1168 case 1: imm = (imm << 2) | imm;
1169 /* Fall through. */
1170 case 2: imm = (imm << 4) | imm;
1171 /* Fall through. */
1172 case 3: imm = (imm << 8) | imm;
1173 /* Fall through. */
1174 case 4: imm = (imm << 16) | imm;
1175 /* Fall through. */
1176 case 5: imm = (imm << 32) | imm;
1177 /* Fall through. */
1178 case 6: break;
1179 default: abort ();
1180 }
1181 simd_immediates[nb_imms].imm = imm;
1182 simd_immediates[nb_imms].encoding =
1183 encode_immediate_bitfield(is64, s | s_mask, r);
1184 nb_imms++;
1185 }
1186 }
1187 assert (nb_imms == TOTAL_IMM_NB);
1188 qsort(simd_immediates, nb_imms,
1189 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1190 }
1191
1192 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1193 be accepted by logical (immediate) instructions
1194 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1195
1196 ESIZE is the number of bytes in the decoded immediate value.
1197 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1198 VALUE will be returned in *ENCODING. */
1199
1200 bfd_boolean
1201 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1202 {
1203 simd_imm_encoding imm_enc;
1204 const simd_imm_encoding *imm_encoding;
1205 static bfd_boolean initialized = FALSE;
1206 uint64_t upper;
1207 int i;
1208
1209 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1210 value, is32);
1211
1212 if (initialized == FALSE)
1213 {
1214 build_immediate_table ();
1215 initialized = TRUE;
1216 }
1217
1218 /* Allow all zeros or all ones in top bits, so that
1219 constant expressions like ~1 are permitted. */
1220 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1221 if ((value & ~upper) != value && (value | upper) != value)
1222 return FALSE;
1223
1224 /* Replicate to a full 64-bit value. */
1225 value &= ~upper;
1226 for (i = esize * 8; i < 64; i *= 2)
1227 value |= (value << i);
1228
1229 imm_enc.imm = value;
1230 imm_encoding = (const simd_imm_encoding *)
1231 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1232 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1233 if (imm_encoding == NULL)
1234 {
1235 DEBUG_TRACE ("exit with FALSE");
1236 return FALSE;
1237 }
1238 if (encoding != NULL)
1239 *encoding = imm_encoding->encoding;
1240 DEBUG_TRACE ("exit with TRUE");
1241 return TRUE;
1242 }
1243
1244 /* If 64-bit immediate IMM is in the format of
1245 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1246 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1247 of value "abcdefgh". Otherwise return -1. */
1248 int
1249 aarch64_shrink_expanded_imm8 (uint64_t imm)
1250 {
1251 int i, ret;
1252 uint32_t byte;
1253
1254 ret = 0;
1255 for (i = 0; i < 8; i++)
1256 {
1257 byte = (imm >> (8 * i)) & 0xff;
1258 if (byte == 0xff)
1259 ret |= 1 << i;
1260 else if (byte != 0x00)
1261 return -1;
1262 }
1263 return ret;
1264 }
1265
1266 /* Utility inline functions for operand_general_constraint_met_p. */
1267
1268 static inline void
1269 set_error (aarch64_operand_error *mismatch_detail,
1270 enum aarch64_operand_error_kind kind, int idx,
1271 const char* error)
1272 {
1273 if (mismatch_detail == NULL)
1274 return;
1275 mismatch_detail->kind = kind;
1276 mismatch_detail->index = idx;
1277 mismatch_detail->error = error;
1278 }
1279
1280 static inline void
1281 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1282 const char* error)
1283 {
1284 if (mismatch_detail == NULL)
1285 return;
1286 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1287 }
1288
1289 static inline void
1290 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1291 int idx, int lower_bound, int upper_bound,
1292 const char* error)
1293 {
1294 if (mismatch_detail == NULL)
1295 return;
1296 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1297 mismatch_detail->data[0] = lower_bound;
1298 mismatch_detail->data[1] = upper_bound;
1299 }
1300
1301 static inline void
1302 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1303 int idx, int lower_bound, int upper_bound)
1304 {
1305 if (mismatch_detail == NULL)
1306 return;
1307 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1308 _("immediate value"));
1309 }
1310
1311 static inline void
1312 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1313 int idx, int lower_bound, int upper_bound)
1314 {
1315 if (mismatch_detail == NULL)
1316 return;
1317 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1318 _("immediate offset"));
1319 }
1320
1321 static inline void
1322 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1323 int idx, int lower_bound, int upper_bound)
1324 {
1325 if (mismatch_detail == NULL)
1326 return;
1327 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1328 _("register number"));
1329 }
1330
1331 static inline void
1332 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1333 int idx, int lower_bound, int upper_bound)
1334 {
1335 if (mismatch_detail == NULL)
1336 return;
1337 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1338 _("register element index"));
1339 }
1340
1341 static inline void
1342 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1343 int idx, int lower_bound, int upper_bound)
1344 {
1345 if (mismatch_detail == NULL)
1346 return;
1347 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1348 _("shift amount"));
1349 }
1350
1351 /* Report that the MUL modifier in operand IDX should be in the range
1352 [LOWER_BOUND, UPPER_BOUND]. */
1353 static inline void
1354 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1355 int idx, int lower_bound, int upper_bound)
1356 {
1357 if (mismatch_detail == NULL)
1358 return;
1359 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1360 _("multiplier"));
1361 }
1362
1363 static inline void
1364 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1365 int alignment)
1366 {
1367 if (mismatch_detail == NULL)
1368 return;
1369 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1370 mismatch_detail->data[0] = alignment;
1371 }
1372
1373 static inline void
1374 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1375 int expected_num)
1376 {
1377 if (mismatch_detail == NULL)
1378 return;
1379 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1380 mismatch_detail->data[0] = expected_num;
1381 }
1382
1383 static inline void
1384 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1385 const char* error)
1386 {
1387 if (mismatch_detail == NULL)
1388 return;
1389 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1390 }
1391
1392 /* General constraint checking based on operand code.
1393
1394 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1395 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1396
1397 This function has to be called after the qualifiers for all operands
1398 have been resolved.
1399
1400 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1401 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1402 of error message during the disassembling where error message is not
1403 wanted. We avoid the dynamic construction of strings of error messages
1404 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1405 use a combination of error code, static string and some integer data to
1406 represent an error. */
1407
1408 static int
1409 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1410 enum aarch64_opnd type,
1411 const aarch64_opcode *opcode,
1412 aarch64_operand_error *mismatch_detail)
1413 {
1414 unsigned num, modifiers, shift;
1415 unsigned char size;
1416 int64_t imm, min_value, max_value;
1417 uint64_t uvalue, mask;
1418 const aarch64_opnd_info *opnd = opnds + idx;
1419 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1420
1421 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1422
1423 switch (aarch64_operands[type].op_class)
1424 {
1425 case AARCH64_OPND_CLASS_INT_REG:
1426 /* Check pair reg constraints for cas* instructions. */
1427 if (type == AARCH64_OPND_PAIRREG)
1428 {
1429 assert (idx == 1 || idx == 3);
1430 if (opnds[idx - 1].reg.regno % 2 != 0)
1431 {
1432 set_syntax_error (mismatch_detail, idx - 1,
1433 _("reg pair must start from even reg"));
1434 return 0;
1435 }
1436 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1437 {
1438 set_syntax_error (mismatch_detail, idx,
1439 _("reg pair must be contiguous"));
1440 return 0;
1441 }
1442 break;
1443 }
1444
1445 /* <Xt> may be optional in some IC and TLBI instructions. */
1446 if (type == AARCH64_OPND_Rt_SYS)
1447 {
1448 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1449 == AARCH64_OPND_CLASS_SYSTEM));
1450 if (opnds[1].present
1451 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1452 {
1453 set_other_error (mismatch_detail, idx, _("extraneous register"));
1454 return 0;
1455 }
1456 if (!opnds[1].present
1457 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1458 {
1459 set_other_error (mismatch_detail, idx, _("missing register"));
1460 return 0;
1461 }
1462 }
1463 switch (qualifier)
1464 {
1465 case AARCH64_OPND_QLF_WSP:
1466 case AARCH64_OPND_QLF_SP:
1467 if (!aarch64_stack_pointer_p (opnd))
1468 {
1469 set_other_error (mismatch_detail, idx,
1470 _("stack pointer register expected"));
1471 return 0;
1472 }
1473 break;
1474 default:
1475 break;
1476 }
1477 break;
1478
1479 case AARCH64_OPND_CLASS_SVE_REG:
1480 switch (type)
1481 {
1482 case AARCH64_OPND_SVE_Zm3_INDEX:
1483 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1484 case AARCH64_OPND_SVE_Zm4_INDEX:
1485 size = get_operand_fields_width (get_operand_from_code (type));
1486 shift = get_operand_specific_data (&aarch64_operands[type]);
1487 mask = (1 << shift) - 1;
1488 if (opnd->reg.regno > mask)
1489 {
1490 assert (mask == 7 || mask == 15);
1491 set_other_error (mismatch_detail, idx,
1492 mask == 15
1493 ? _("z0-z15 expected")
1494 : _("z0-z7 expected"));
1495 return 0;
1496 }
1497 mask = (1 << (size - shift)) - 1;
1498 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1499 {
1500 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1501 return 0;
1502 }
1503 break;
1504
1505 case AARCH64_OPND_SVE_Zn_INDEX:
1506 size = aarch64_get_qualifier_esize (opnd->qualifier);
1507 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1508 {
1509 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1510 0, 64 / size - 1);
1511 return 0;
1512 }
1513 break;
1514
1515 case AARCH64_OPND_SVE_ZnxN:
1516 case AARCH64_OPND_SVE_ZtxN:
1517 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1518 {
1519 set_other_error (mismatch_detail, idx,
1520 _("invalid register list"));
1521 return 0;
1522 }
1523 break;
1524
1525 default:
1526 break;
1527 }
1528 break;
1529
1530 case AARCH64_OPND_CLASS_PRED_REG:
1531 if (opnd->reg.regno >= 8
1532 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1533 {
1534 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1535 return 0;
1536 }
1537 break;
1538
1539 case AARCH64_OPND_CLASS_COND:
1540 if (type == AARCH64_OPND_COND1
1541 && (opnds[idx].cond->value & 0xe) == 0xe)
1542 {
1543 /* Not allow AL or NV. */
1544 set_syntax_error (mismatch_detail, idx, NULL);
1545 }
1546 break;
1547
1548 case AARCH64_OPND_CLASS_ADDRESS:
1549 /* Check writeback. */
1550 switch (opcode->iclass)
1551 {
1552 case ldst_pos:
1553 case ldst_unscaled:
1554 case ldstnapair_offs:
1555 case ldstpair_off:
1556 case ldst_unpriv:
1557 if (opnd->addr.writeback == 1)
1558 {
1559 set_syntax_error (mismatch_detail, idx,
1560 _("unexpected address writeback"));
1561 return 0;
1562 }
1563 break;
1564 case ldst_imm10:
1565 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1566 {
1567 set_syntax_error (mismatch_detail, idx,
1568 _("unexpected address writeback"));
1569 return 0;
1570 }
1571 break;
1572 case ldst_imm9:
1573 case ldstpair_indexed:
1574 case asisdlsep:
1575 case asisdlsop:
1576 if (opnd->addr.writeback == 0)
1577 {
1578 set_syntax_error (mismatch_detail, idx,
1579 _("address writeback expected"));
1580 return 0;
1581 }
1582 break;
1583 default:
1584 assert (opnd->addr.writeback == 0);
1585 break;
1586 }
1587 switch (type)
1588 {
1589 case AARCH64_OPND_ADDR_SIMM7:
1590 /* Scaled signed 7 bits immediate offset. */
1591 /* Get the size of the data element that is accessed, which may be
1592 different from that of the source register size,
1593 e.g. in strb/ldrb. */
1594 size = aarch64_get_qualifier_esize (opnd->qualifier);
1595 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1596 {
1597 set_offset_out_of_range_error (mismatch_detail, idx,
1598 -64 * size, 63 * size);
1599 return 0;
1600 }
1601 if (!value_aligned_p (opnd->addr.offset.imm, size))
1602 {
1603 set_unaligned_error (mismatch_detail, idx, size);
1604 return 0;
1605 }
1606 break;
1607 case AARCH64_OPND_ADDR_SIMM9:
1608 /* Unscaled signed 9 bits immediate offset. */
1609 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1610 {
1611 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1612 return 0;
1613 }
1614 break;
1615
1616 case AARCH64_OPND_ADDR_SIMM9_2:
1617 /* Unscaled signed 9 bits immediate offset, which has to be negative
1618 or unaligned. */
1619 size = aarch64_get_qualifier_esize (qualifier);
1620 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1621 && !value_aligned_p (opnd->addr.offset.imm, size))
1622 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1623 return 1;
1624 set_other_error (mismatch_detail, idx,
1625 _("negative or unaligned offset expected"));
1626 return 0;
1627
1628 case AARCH64_OPND_ADDR_SIMM10:
1629 /* Scaled signed 10 bits immediate offset. */
1630 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1631 {
1632 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1633 return 0;
1634 }
1635 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1636 {
1637 set_unaligned_error (mismatch_detail, idx, 8);
1638 return 0;
1639 }
1640 break;
1641
1642 case AARCH64_OPND_SIMD_ADDR_POST:
1643 /* AdvSIMD load/store multiple structures, post-index. */
1644 assert (idx == 1);
1645 if (opnd->addr.offset.is_reg)
1646 {
1647 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1648 return 1;
1649 else
1650 {
1651 set_other_error (mismatch_detail, idx,
1652 _("invalid register offset"));
1653 return 0;
1654 }
1655 }
1656 else
1657 {
1658 const aarch64_opnd_info *prev = &opnds[idx-1];
1659 unsigned num_bytes; /* total number of bytes transferred. */
1660 /* The opcode dependent area stores the number of elements in
1661 each structure to be loaded/stored. */
1662 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1663 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1664 /* Special handling of loading single structure to all lane. */
1665 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1666 * aarch64_get_qualifier_esize (prev->qualifier);
1667 else
1668 num_bytes = prev->reglist.num_regs
1669 * aarch64_get_qualifier_esize (prev->qualifier)
1670 * aarch64_get_qualifier_nelem (prev->qualifier);
1671 if ((int) num_bytes != opnd->addr.offset.imm)
1672 {
1673 set_other_error (mismatch_detail, idx,
1674 _("invalid post-increment amount"));
1675 return 0;
1676 }
1677 }
1678 break;
1679
1680 case AARCH64_OPND_ADDR_REGOFF:
1681 /* Get the size of the data element that is accessed, which may be
1682 different from that of the source register size,
1683 e.g. in strb/ldrb. */
1684 size = aarch64_get_qualifier_esize (opnd->qualifier);
1685 /* It is either no shift or shift by the binary logarithm of SIZE. */
1686 if (opnd->shifter.amount != 0
1687 && opnd->shifter.amount != (int)get_logsz (size))
1688 {
1689 set_other_error (mismatch_detail, idx,
1690 _("invalid shift amount"));
1691 return 0;
1692 }
1693 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1694 operators. */
1695 switch (opnd->shifter.kind)
1696 {
1697 case AARCH64_MOD_UXTW:
1698 case AARCH64_MOD_LSL:
1699 case AARCH64_MOD_SXTW:
1700 case AARCH64_MOD_SXTX: break;
1701 default:
1702 set_other_error (mismatch_detail, idx,
1703 _("invalid extend/shift operator"));
1704 return 0;
1705 }
1706 break;
1707
1708 case AARCH64_OPND_ADDR_UIMM12:
1709 imm = opnd->addr.offset.imm;
1710 /* Get the size of the data element that is accessed, which may be
1711 different from that of the source register size,
1712 e.g. in strb/ldrb. */
1713 size = aarch64_get_qualifier_esize (qualifier);
1714 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1715 {
1716 set_offset_out_of_range_error (mismatch_detail, idx,
1717 0, 4095 * size);
1718 return 0;
1719 }
1720 if (!value_aligned_p (opnd->addr.offset.imm, size))
1721 {
1722 set_unaligned_error (mismatch_detail, idx, size);
1723 return 0;
1724 }
1725 break;
1726
1727 case AARCH64_OPND_ADDR_PCREL14:
1728 case AARCH64_OPND_ADDR_PCREL19:
1729 case AARCH64_OPND_ADDR_PCREL21:
1730 case AARCH64_OPND_ADDR_PCREL26:
1731 imm = opnd->imm.value;
1732 if (operand_need_shift_by_two (get_operand_from_code (type)))
1733 {
1734 /* The offset value in a PC-relative branch instruction is alway
1735 4-byte aligned and is encoded without the lowest 2 bits. */
1736 if (!value_aligned_p (imm, 4))
1737 {
1738 set_unaligned_error (mismatch_detail, idx, 4);
1739 return 0;
1740 }
1741 /* Right shift by 2 so that we can carry out the following check
1742 canonically. */
1743 imm >>= 2;
1744 }
1745 size = get_operand_fields_width (get_operand_from_code (type));
1746 if (!value_fit_signed_field_p (imm, size))
1747 {
1748 set_other_error (mismatch_detail, idx,
1749 _("immediate out of range"));
1750 return 0;
1751 }
1752 break;
1753
1754 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1755 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1756 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1757 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1758 min_value = -8;
1759 max_value = 7;
1760 sve_imm_offset_vl:
1761 assert (!opnd->addr.offset.is_reg);
1762 assert (opnd->addr.preind);
1763 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1764 min_value *= num;
1765 max_value *= num;
1766 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1767 || (opnd->shifter.operator_present
1768 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1769 {
1770 set_other_error (mismatch_detail, idx,
1771 _("invalid addressing mode"));
1772 return 0;
1773 }
1774 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1775 {
1776 set_offset_out_of_range_error (mismatch_detail, idx,
1777 min_value, max_value);
1778 return 0;
1779 }
1780 if (!value_aligned_p (opnd->addr.offset.imm, num))
1781 {
1782 set_unaligned_error (mismatch_detail, idx, num);
1783 return 0;
1784 }
1785 break;
1786
1787 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1788 min_value = -32;
1789 max_value = 31;
1790 goto sve_imm_offset_vl;
1791
1792 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1793 min_value = -256;
1794 max_value = 255;
1795 goto sve_imm_offset_vl;
1796
1797 case AARCH64_OPND_SVE_ADDR_RI_U6:
1798 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1799 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1800 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1801 min_value = 0;
1802 max_value = 63;
1803 sve_imm_offset:
1804 assert (!opnd->addr.offset.is_reg);
1805 assert (opnd->addr.preind);
1806 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1807 min_value *= num;
1808 max_value *= num;
1809 if (opnd->shifter.operator_present
1810 || opnd->shifter.amount_present)
1811 {
1812 set_other_error (mismatch_detail, idx,
1813 _("invalid addressing mode"));
1814 return 0;
1815 }
1816 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1817 {
1818 set_offset_out_of_range_error (mismatch_detail, idx,
1819 min_value, max_value);
1820 return 0;
1821 }
1822 if (!value_aligned_p (opnd->addr.offset.imm, num))
1823 {
1824 set_unaligned_error (mismatch_detail, idx, num);
1825 return 0;
1826 }
1827 break;
1828
1829 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1830 min_value = -8;
1831 max_value = 7;
1832 goto sve_imm_offset;
1833
1834 case AARCH64_OPND_SVE_ADDR_RR:
1835 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1836 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1837 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1838 case AARCH64_OPND_SVE_ADDR_RX:
1839 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1840 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1841 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1842 case AARCH64_OPND_SVE_ADDR_RZ:
1843 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1844 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1845 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1846 modifiers = 1 << AARCH64_MOD_LSL;
1847 sve_rr_operand:
1848 assert (opnd->addr.offset.is_reg);
1849 assert (opnd->addr.preind);
1850 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1851 && opnd->addr.offset.regno == 31)
1852 {
1853 set_other_error (mismatch_detail, idx,
1854 _("index register xzr is not allowed"));
1855 return 0;
1856 }
1857 if (((1 << opnd->shifter.kind) & modifiers) == 0
1858 || (opnd->shifter.amount
1859 != get_operand_specific_data (&aarch64_operands[type])))
1860 {
1861 set_other_error (mismatch_detail, idx,
1862 _("invalid addressing mode"));
1863 return 0;
1864 }
1865 break;
1866
1867 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1868 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1869 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1870 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1871 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1875 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1876 goto sve_rr_operand;
1877
1878 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1879 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1880 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1881 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1882 min_value = 0;
1883 max_value = 31;
1884 goto sve_imm_offset;
1885
1886 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1887 modifiers = 1 << AARCH64_MOD_LSL;
1888 sve_zz_operand:
1889 assert (opnd->addr.offset.is_reg);
1890 assert (opnd->addr.preind);
1891 if (((1 << opnd->shifter.kind) & modifiers) == 0
1892 || opnd->shifter.amount < 0
1893 || opnd->shifter.amount > 3)
1894 {
1895 set_other_error (mismatch_detail, idx,
1896 _("invalid addressing mode"));
1897 return 0;
1898 }
1899 break;
1900
1901 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1902 modifiers = (1 << AARCH64_MOD_SXTW);
1903 goto sve_zz_operand;
1904
1905 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1906 modifiers = 1 << AARCH64_MOD_UXTW;
1907 goto sve_zz_operand;
1908
1909 default:
1910 break;
1911 }
1912 break;
1913
1914 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1915 if (type == AARCH64_OPND_LEt)
1916 {
1917 /* Get the upper bound for the element index. */
1918 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1919 if (!value_in_range_p (opnd->reglist.index, 0, num))
1920 {
1921 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1922 return 0;
1923 }
1924 }
1925 /* The opcode dependent area stores the number of elements in
1926 each structure to be loaded/stored. */
1927 num = get_opcode_dependent_value (opcode);
1928 switch (type)
1929 {
1930 case AARCH64_OPND_LVt:
1931 assert (num >= 1 && num <= 4);
1932 /* Unless LD1/ST1, the number of registers should be equal to that
1933 of the structure elements. */
1934 if (num != 1 && opnd->reglist.num_regs != num)
1935 {
1936 set_reg_list_error (mismatch_detail, idx, num);
1937 return 0;
1938 }
1939 break;
1940 case AARCH64_OPND_LVt_AL:
1941 case AARCH64_OPND_LEt:
1942 assert (num >= 1 && num <= 4);
1943 /* The number of registers should be equal to that of the structure
1944 elements. */
1945 if (opnd->reglist.num_regs != num)
1946 {
1947 set_reg_list_error (mismatch_detail, idx, num);
1948 return 0;
1949 }
1950 break;
1951 default:
1952 break;
1953 }
1954 break;
1955
1956 case AARCH64_OPND_CLASS_IMMEDIATE:
1957 /* Constraint check on immediate operand. */
1958 imm = opnd->imm.value;
1959 /* E.g. imm_0_31 constrains value to be 0..31. */
1960 if (qualifier_value_in_range_constraint_p (qualifier)
1961 && !value_in_range_p (imm, get_lower_bound (qualifier),
1962 get_upper_bound (qualifier)))
1963 {
1964 set_imm_out_of_range_error (mismatch_detail, idx,
1965 get_lower_bound (qualifier),
1966 get_upper_bound (qualifier));
1967 return 0;
1968 }
1969
1970 switch (type)
1971 {
1972 case AARCH64_OPND_AIMM:
1973 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1974 {
1975 set_other_error (mismatch_detail, idx,
1976 _("invalid shift operator"));
1977 return 0;
1978 }
1979 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1980 {
1981 set_other_error (mismatch_detail, idx,
1982 _("shift amount must be 0 or 12"));
1983 return 0;
1984 }
1985 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1986 {
1987 set_other_error (mismatch_detail, idx,
1988 _("immediate out of range"));
1989 return 0;
1990 }
1991 break;
1992
1993 case AARCH64_OPND_HALF:
1994 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1995 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1996 {
1997 set_other_error (mismatch_detail, idx,
1998 _("invalid shift operator"));
1999 return 0;
2000 }
2001 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2002 if (!value_aligned_p (opnd->shifter.amount, 16))
2003 {
2004 set_other_error (mismatch_detail, idx,
2005 _("shift amount must be a multiple of 16"));
2006 return 0;
2007 }
2008 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2009 {
2010 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2011 0, size * 8 - 16);
2012 return 0;
2013 }
2014 if (opnd->imm.value < 0)
2015 {
2016 set_other_error (mismatch_detail, idx,
2017 _("negative immediate value not allowed"));
2018 return 0;
2019 }
2020 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2021 {
2022 set_other_error (mismatch_detail, idx,
2023 _("immediate out of range"));
2024 return 0;
2025 }
2026 break;
2027
2028 case AARCH64_OPND_IMM_MOV:
2029 {
2030 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2031 imm = opnd->imm.value;
2032 assert (idx == 1);
2033 switch (opcode->op)
2034 {
2035 case OP_MOV_IMM_WIDEN:
2036 imm = ~imm;
2037 /* Fall through. */
2038 case OP_MOV_IMM_WIDE:
2039 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2040 {
2041 set_other_error (mismatch_detail, idx,
2042 _("immediate out of range"));
2043 return 0;
2044 }
2045 break;
2046 case OP_MOV_IMM_LOG:
2047 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2048 {
2049 set_other_error (mismatch_detail, idx,
2050 _("immediate out of range"));
2051 return 0;
2052 }
2053 break;
2054 default:
2055 assert (0);
2056 return 0;
2057 }
2058 }
2059 break;
2060
2061 case AARCH64_OPND_NZCV:
2062 case AARCH64_OPND_CCMP_IMM:
2063 case AARCH64_OPND_EXCEPTION:
2064 case AARCH64_OPND_UIMM4:
2065 case AARCH64_OPND_UIMM7:
2066 case AARCH64_OPND_UIMM3_OP1:
2067 case AARCH64_OPND_UIMM3_OP2:
2068 case AARCH64_OPND_SVE_UIMM3:
2069 case AARCH64_OPND_SVE_UIMM7:
2070 case AARCH64_OPND_SVE_UIMM8:
2071 case AARCH64_OPND_SVE_UIMM8_53:
2072 size = get_operand_fields_width (get_operand_from_code (type));
2073 assert (size < 32);
2074 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2075 {
2076 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2077 (1 << size) - 1);
2078 return 0;
2079 }
2080 break;
2081
2082 case AARCH64_OPND_SIMM5:
2083 case AARCH64_OPND_SVE_SIMM5:
2084 case AARCH64_OPND_SVE_SIMM5B:
2085 case AARCH64_OPND_SVE_SIMM6:
2086 case AARCH64_OPND_SVE_SIMM8:
2087 size = get_operand_fields_width (get_operand_from_code (type));
2088 assert (size < 32);
2089 if (!value_fit_signed_field_p (opnd->imm.value, size))
2090 {
2091 set_imm_out_of_range_error (mismatch_detail, idx,
2092 -(1 << (size - 1)),
2093 (1 << (size - 1)) - 1);
2094 return 0;
2095 }
2096 break;
2097
2098 case AARCH64_OPND_WIDTH:
2099 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2100 && opnds[0].type == AARCH64_OPND_Rd);
2101 size = get_upper_bound (qualifier);
2102 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2103 /* lsb+width <= reg.size */
2104 {
2105 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2106 size - opnds[idx-1].imm.value);
2107 return 0;
2108 }
2109 break;
2110
2111 case AARCH64_OPND_LIMM:
2112 case AARCH64_OPND_SVE_LIMM:
2113 {
2114 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2115 uint64_t uimm = opnd->imm.value;
2116 if (opcode->op == OP_BIC)
2117 uimm = ~uimm;
2118 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2119 {
2120 set_other_error (mismatch_detail, idx,
2121 _("immediate out of range"));
2122 return 0;
2123 }
2124 }
2125 break;
2126
2127 case AARCH64_OPND_IMM0:
2128 case AARCH64_OPND_FPIMM0:
2129 if (opnd->imm.value != 0)
2130 {
2131 set_other_error (mismatch_detail, idx,
2132 _("immediate zero expected"));
2133 return 0;
2134 }
2135 break;
2136
2137 case AARCH64_OPND_IMM_ROT1:
2138 case AARCH64_OPND_IMM_ROT2:
2139 case AARCH64_OPND_SVE_IMM_ROT2:
2140 if (opnd->imm.value != 0
2141 && opnd->imm.value != 90
2142 && opnd->imm.value != 180
2143 && opnd->imm.value != 270)
2144 {
2145 set_other_error (mismatch_detail, idx,
2146 _("rotate expected to be 0, 90, 180 or 270"));
2147 return 0;
2148 }
2149 break;
2150
2151 case AARCH64_OPND_IMM_ROT3:
2152 case AARCH64_OPND_SVE_IMM_ROT1:
2153 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2154 {
2155 set_other_error (mismatch_detail, idx,
2156 _("rotate expected to be 90 or 270"));
2157 return 0;
2158 }
2159 break;
2160
2161 case AARCH64_OPND_SHLL_IMM:
2162 assert (idx == 2);
2163 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2164 if (opnd->imm.value != size)
2165 {
2166 set_other_error (mismatch_detail, idx,
2167 _("invalid shift amount"));
2168 return 0;
2169 }
2170 break;
2171
2172 case AARCH64_OPND_IMM_VLSL:
2173 size = aarch64_get_qualifier_esize (qualifier);
2174 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2175 {
2176 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2177 size * 8 - 1);
2178 return 0;
2179 }
2180 break;
2181
2182 case AARCH64_OPND_IMM_VLSR:
2183 size = aarch64_get_qualifier_esize (qualifier);
2184 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2185 {
2186 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2187 return 0;
2188 }
2189 break;
2190
2191 case AARCH64_OPND_SIMD_IMM:
2192 case AARCH64_OPND_SIMD_IMM_SFT:
2193 /* Qualifier check. */
2194 switch (qualifier)
2195 {
2196 case AARCH64_OPND_QLF_LSL:
2197 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2198 {
2199 set_other_error (mismatch_detail, idx,
2200 _("invalid shift operator"));
2201 return 0;
2202 }
2203 break;
2204 case AARCH64_OPND_QLF_MSL:
2205 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2206 {
2207 set_other_error (mismatch_detail, idx,
2208 _("invalid shift operator"));
2209 return 0;
2210 }
2211 break;
2212 case AARCH64_OPND_QLF_NIL:
2213 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2214 {
2215 set_other_error (mismatch_detail, idx,
2216 _("shift is not permitted"));
2217 return 0;
2218 }
2219 break;
2220 default:
2221 assert (0);
2222 return 0;
2223 }
2224 /* Is the immediate valid? */
2225 assert (idx == 1);
2226 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2227 {
2228 /* uimm8 or simm8 */
2229 if (!value_in_range_p (opnd->imm.value, -128, 255))
2230 {
2231 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2232 return 0;
2233 }
2234 }
2235 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2236 {
2237 /* uimm64 is not
2238 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2239 ffffffffgggggggghhhhhhhh'. */
2240 set_other_error (mismatch_detail, idx,
2241 _("invalid value for immediate"));
2242 return 0;
2243 }
2244 /* Is the shift amount valid? */
2245 switch (opnd->shifter.kind)
2246 {
2247 case AARCH64_MOD_LSL:
2248 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2249 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2250 {
2251 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2252 (size - 1) * 8);
2253 return 0;
2254 }
2255 if (!value_aligned_p (opnd->shifter.amount, 8))
2256 {
2257 set_unaligned_error (mismatch_detail, idx, 8);
2258 return 0;
2259 }
2260 break;
2261 case AARCH64_MOD_MSL:
2262 /* Only 8 and 16 are valid shift amount. */
2263 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2264 {
2265 set_other_error (mismatch_detail, idx,
2266 _("shift amount must be 0 or 16"));
2267 return 0;
2268 }
2269 break;
2270 default:
2271 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2272 {
2273 set_other_error (mismatch_detail, idx,
2274 _("invalid shift operator"));
2275 return 0;
2276 }
2277 break;
2278 }
2279 break;
2280
2281 case AARCH64_OPND_FPIMM:
2282 case AARCH64_OPND_SIMD_FPIMM:
2283 case AARCH64_OPND_SVE_FPIMM8:
2284 if (opnd->imm.is_fp == 0)
2285 {
2286 set_other_error (mismatch_detail, idx,
2287 _("floating-point immediate expected"));
2288 return 0;
2289 }
2290 /* The value is expected to be an 8-bit floating-point constant with
2291 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2292 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2293 instruction). */
2294 if (!value_in_range_p (opnd->imm.value, 0, 255))
2295 {
2296 set_other_error (mismatch_detail, idx,
2297 _("immediate out of range"));
2298 return 0;
2299 }
2300 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2301 {
2302 set_other_error (mismatch_detail, idx,
2303 _("invalid shift operator"));
2304 return 0;
2305 }
2306 break;
2307
2308 case AARCH64_OPND_SVE_AIMM:
2309 min_value = 0;
2310 sve_aimm:
2311 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2312 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2313 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2314 uvalue = opnd->imm.value;
2315 shift = opnd->shifter.amount;
2316 if (size == 1)
2317 {
2318 if (shift != 0)
2319 {
2320 set_other_error (mismatch_detail, idx,
2321 _("no shift amount allowed for"
2322 " 8-bit constants"));
2323 return 0;
2324 }
2325 }
2326 else
2327 {
2328 if (shift != 0 && shift != 8)
2329 {
2330 set_other_error (mismatch_detail, idx,
2331 _("shift amount must be 0 or 8"));
2332 return 0;
2333 }
2334 if (shift == 0 && (uvalue & 0xff) == 0)
2335 {
2336 shift = 8;
2337 uvalue = (int64_t) uvalue / 256;
2338 }
2339 }
2340 mask >>= shift;
2341 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2342 {
2343 set_other_error (mismatch_detail, idx,
2344 _("immediate too big for element size"));
2345 return 0;
2346 }
2347 uvalue = (uvalue - min_value) & mask;
2348 if (uvalue > 0xff)
2349 {
2350 set_other_error (mismatch_detail, idx,
2351 _("invalid arithmetic immediate"));
2352 return 0;
2353 }
2354 break;
2355
2356 case AARCH64_OPND_SVE_ASIMM:
2357 min_value = -128;
2358 goto sve_aimm;
2359
2360 case AARCH64_OPND_SVE_I1_HALF_ONE:
2361 assert (opnd->imm.is_fp);
2362 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2363 {
2364 set_other_error (mismatch_detail, idx,
2365 _("floating-point value must be 0.5 or 1.0"));
2366 return 0;
2367 }
2368 break;
2369
2370 case AARCH64_OPND_SVE_I1_HALF_TWO:
2371 assert (opnd->imm.is_fp);
2372 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2373 {
2374 set_other_error (mismatch_detail, idx,
2375 _("floating-point value must be 0.5 or 2.0"));
2376 return 0;
2377 }
2378 break;
2379
2380 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2381 assert (opnd->imm.is_fp);
2382 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2383 {
2384 set_other_error (mismatch_detail, idx,
2385 _("floating-point value must be 0.0 or 1.0"));
2386 return 0;
2387 }
2388 break;
2389
2390 case AARCH64_OPND_SVE_INV_LIMM:
2391 {
2392 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2393 uint64_t uimm = ~opnd->imm.value;
2394 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2395 {
2396 set_other_error (mismatch_detail, idx,
2397 _("immediate out of range"));
2398 return 0;
2399 }
2400 }
2401 break;
2402
2403 case AARCH64_OPND_SVE_LIMM_MOV:
2404 {
2405 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2406 uint64_t uimm = opnd->imm.value;
2407 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2408 {
2409 set_other_error (mismatch_detail, idx,
2410 _("immediate out of range"));
2411 return 0;
2412 }
2413 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2414 {
2415 set_other_error (mismatch_detail, idx,
2416 _("invalid replicated MOV immediate"));
2417 return 0;
2418 }
2419 }
2420 break;
2421
2422 case AARCH64_OPND_SVE_PATTERN_SCALED:
2423 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2424 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2425 {
2426 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2427 return 0;
2428 }
2429 break;
2430
2431 case AARCH64_OPND_SVE_SHLIMM_PRED:
2432 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2433 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2434 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2435 {
2436 set_imm_out_of_range_error (mismatch_detail, idx,
2437 0, 8 * size - 1);
2438 return 0;
2439 }
2440 break;
2441
2442 case AARCH64_OPND_SVE_SHRIMM_PRED:
2443 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2444 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2445 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2446 {
2447 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2448 return 0;
2449 }
2450 break;
2451
2452 default:
2453 break;
2454 }
2455 break;
2456
2457 case AARCH64_OPND_CLASS_SYSTEM:
2458 switch (type)
2459 {
2460 case AARCH64_OPND_PSTATEFIELD:
2461 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2462 /* MSR UAO, #uimm4
2463 MSR PAN, #uimm4
2464 The immediate must be #0 or #1. */
2465 if ((opnd->pstatefield == 0x03 /* UAO. */
2466 || opnd->pstatefield == 0x04) /* PAN. */
2467 && opnds[1].imm.value > 1)
2468 {
2469 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2470 return 0;
2471 }
2472 /* MSR SPSel, #uimm4
2473 Uses uimm4 as a control value to select the stack pointer: if
2474 bit 0 is set it selects the current exception level's stack
2475 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2476 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2477 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2478 {
2479 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2480 return 0;
2481 }
2482 break;
2483 default:
2484 break;
2485 }
2486 break;
2487
2488 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2489 /* Get the upper bound for the element index. */
2490 if (opcode->op == OP_FCMLA_ELEM)
2491 /* FCMLA index range depends on the vector size of other operands
2492 and is halfed because complex numbers take two elements. */
2493 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2494 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2495 else
2496 num = 16;
2497 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2498
2499 /* Index out-of-range. */
2500 if (!value_in_range_p (opnd->reglane.index, 0, num))
2501 {
2502 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2503 return 0;
2504 }
2505 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2506 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2507 number is encoded in "size:M:Rm":
2508 size <Vm>
2509 00 RESERVED
2510 01 0:Rm
2511 10 M:Rm
2512 11 RESERVED */
2513 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2514 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2515 {
2516 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2517 return 0;
2518 }
2519 break;
2520
2521 case AARCH64_OPND_CLASS_MODIFIED_REG:
2522 assert (idx == 1 || idx == 2);
2523 switch (type)
2524 {
2525 case AARCH64_OPND_Rm_EXT:
2526 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2527 && opnd->shifter.kind != AARCH64_MOD_LSL)
2528 {
2529 set_other_error (mismatch_detail, idx,
2530 _("extend operator expected"));
2531 return 0;
2532 }
2533 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2534 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2535 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2536 case. */
2537 if (!aarch64_stack_pointer_p (opnds + 0)
2538 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2539 {
2540 if (!opnd->shifter.operator_present)
2541 {
2542 set_other_error (mismatch_detail, idx,
2543 _("missing extend operator"));
2544 return 0;
2545 }
2546 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2547 {
2548 set_other_error (mismatch_detail, idx,
2549 _("'LSL' operator not allowed"));
2550 return 0;
2551 }
2552 }
2553 assert (opnd->shifter.operator_present /* Default to LSL. */
2554 || opnd->shifter.kind == AARCH64_MOD_LSL);
2555 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2556 {
2557 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2558 return 0;
2559 }
2560 /* In the 64-bit form, the final register operand is written as Wm
2561 for all but the (possibly omitted) UXTX/LSL and SXTX
2562 operators.
2563 N.B. GAS allows X register to be used with any operator as a
2564 programming convenience. */
2565 if (qualifier == AARCH64_OPND_QLF_X
2566 && opnd->shifter.kind != AARCH64_MOD_LSL
2567 && opnd->shifter.kind != AARCH64_MOD_UXTX
2568 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2569 {
2570 set_other_error (mismatch_detail, idx, _("W register expected"));
2571 return 0;
2572 }
2573 break;
2574
2575 case AARCH64_OPND_Rm_SFT:
2576 /* ROR is not available to the shifted register operand in
2577 arithmetic instructions. */
2578 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2579 {
2580 set_other_error (mismatch_detail, idx,
2581 _("shift operator expected"));
2582 return 0;
2583 }
2584 if (opnd->shifter.kind == AARCH64_MOD_ROR
2585 && opcode->iclass != log_shift)
2586 {
2587 set_other_error (mismatch_detail, idx,
2588 _("'ROR' operator not allowed"));
2589 return 0;
2590 }
2591 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2592 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2593 {
2594 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2595 return 0;
2596 }
2597 break;
2598
2599 default:
2600 break;
2601 }
2602 break;
2603
2604 default:
2605 break;
2606 }
2607
2608 return 1;
2609 }
2610
2611 /* Main entrypoint for the operand constraint checking.
2612
2613 Return 1 if operands of *INST meet the constraint applied by the operand
2614 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2615 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2616 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2617 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2618 error kind when it is notified that an instruction does not pass the check).
2619
2620 Un-determined operand qualifiers may get established during the process. */
2621
2622 int
2623 aarch64_match_operands_constraint (aarch64_inst *inst,
2624 aarch64_operand_error *mismatch_detail)
2625 {
2626 int i;
2627
2628 DEBUG_TRACE ("enter");
2629
2630 /* Check for cases where a source register needs to be the same as the
2631 destination register. Do this before matching qualifiers since if
2632 an instruction has both invalid tying and invalid qualifiers,
2633 the error about qualifiers would suggest several alternative
2634 instructions that also have invalid tying. */
2635 i = inst->opcode->tied_operand;
2636 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2637 {
2638 if (mismatch_detail)
2639 {
2640 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2641 mismatch_detail->index = i;
2642 mismatch_detail->error = NULL;
2643 }
2644 return 0;
2645 }
2646
2647 /* Match operands' qualifier.
2648 *INST has already had qualifier establish for some, if not all, of
2649 its operands; we need to find out whether these established
2650 qualifiers match one of the qualifier sequence in
2651 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2652 with the corresponding qualifier in such a sequence.
2653 Only basic operand constraint checking is done here; the more thorough
2654 constraint checking will carried out by operand_general_constraint_met_p,
2655 which has be to called after this in order to get all of the operands'
2656 qualifiers established. */
2657 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2658 {
2659 DEBUG_TRACE ("FAIL on operand qualifier matching");
2660 if (mismatch_detail)
2661 {
2662 /* Return an error type to indicate that it is the qualifier
2663 matching failure; we don't care about which operand as there
2664 are enough information in the opcode table to reproduce it. */
2665 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2666 mismatch_detail->index = -1;
2667 mismatch_detail->error = NULL;
2668 }
2669 return 0;
2670 }
2671
2672 /* Match operands' constraint. */
2673 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2674 {
2675 enum aarch64_opnd type = inst->opcode->operands[i];
2676 if (type == AARCH64_OPND_NIL)
2677 break;
2678 if (inst->operands[i].skip)
2679 {
2680 DEBUG_TRACE ("skip the incomplete operand %d", i);
2681 continue;
2682 }
2683 if (operand_general_constraint_met_p (inst->operands, i, type,
2684 inst->opcode, mismatch_detail) == 0)
2685 {
2686 DEBUG_TRACE ("FAIL on operand %d", i);
2687 return 0;
2688 }
2689 }
2690
2691 DEBUG_TRACE ("PASS");
2692
2693 return 1;
2694 }
2695
2696 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2697 Also updates the TYPE of each INST->OPERANDS with the corresponding
2698 value of OPCODE->OPERANDS.
2699
2700 Note that some operand qualifiers may need to be manually cleared by
2701 the caller before it further calls the aarch64_opcode_encode; by
2702 doing this, it helps the qualifier matching facilities work
2703 properly. */
2704
2705 const aarch64_opcode*
2706 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2707 {
2708 int i;
2709 const aarch64_opcode *old = inst->opcode;
2710
2711 inst->opcode = opcode;
2712
2713 /* Update the operand types. */
2714 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2715 {
2716 inst->operands[i].type = opcode->operands[i];
2717 if (opcode->operands[i] == AARCH64_OPND_NIL)
2718 break;
2719 }
2720
2721 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2722
2723 return old;
2724 }
2725
2726 int
2727 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2728 {
2729 int i;
2730 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2731 if (operands[i] == operand)
2732 return i;
2733 else if (operands[i] == AARCH64_OPND_NIL)
2734 break;
2735 return -1;
2736 }
2737
2738 /* R0...R30, followed by FOR31. */
2740 #define BANK(R, FOR31) \
2741 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2742 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2743 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2744 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2745 /* [0][0] 32-bit integer regs with sp Wn
2746 [0][1] 64-bit integer regs with sp Xn sf=1
2747 [1][0] 32-bit integer regs with #0 Wn
2748 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2749 static const char *int_reg[2][2][32] = {
2750 #define R32(X) "w" #X
2751 #define R64(X) "x" #X
2752 { BANK (R32, "wsp"), BANK (R64, "sp") },
2753 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2754 #undef R64
2755 #undef R32
2756 };
2757
2758 /* Names of the SVE vector registers, first with .S suffixes,
2759 then with .D suffixes. */
2760
2761 static const char *sve_reg[2][32] = {
2762 #define ZS(X) "z" #X ".s"
2763 #define ZD(X) "z" #X ".d"
2764 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2765 #undef ZD
2766 #undef ZS
2767 };
2768 #undef BANK
2769
2770 /* Return the integer register name.
2771 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2772
2773 static inline const char *
2774 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2775 {
2776 const int has_zr = sp_reg_p ? 0 : 1;
2777 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2778 return int_reg[has_zr][is_64][regno];
2779 }
2780
2781 /* Like get_int_reg_name, but IS_64 is always 1. */
2782
2783 static inline const char *
2784 get_64bit_int_reg_name (int regno, int sp_reg_p)
2785 {
2786 const int has_zr = sp_reg_p ? 0 : 1;
2787 return int_reg[has_zr][1][regno];
2788 }
2789
2790 /* Get the name of the integer offset register in OPND, using the shift type
2791 to decide whether it's a word or doubleword. */
2792
2793 static inline const char *
2794 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2795 {
2796 switch (opnd->shifter.kind)
2797 {
2798 case AARCH64_MOD_UXTW:
2799 case AARCH64_MOD_SXTW:
2800 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2801
2802 case AARCH64_MOD_LSL:
2803 case AARCH64_MOD_SXTX:
2804 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2805
2806 default:
2807 abort ();
2808 }
2809 }
2810
2811 /* Get the name of the SVE vector offset register in OPND, using the operand
2812 qualifier to decide whether the suffix should be .S or .D. */
2813
2814 static inline const char *
2815 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2816 {
2817 assert (qualifier == AARCH64_OPND_QLF_S_S
2818 || qualifier == AARCH64_OPND_QLF_S_D);
2819 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2820 }
2821
2822 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2823
2824 typedef union
2825 {
2826 uint64_t i;
2827 double d;
2828 } double_conv_t;
2829
2830 typedef union
2831 {
2832 uint32_t i;
2833 float f;
2834 } single_conv_t;
2835
2836 typedef union
2837 {
2838 uint32_t i;
2839 float f;
2840 } half_conv_t;
2841
2842 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2843 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2844 (depending on the type of the instruction). IMM8 will be expanded to a
2845 single-precision floating-point value (SIZE == 4) or a double-precision
2846 floating-point value (SIZE == 8). A half-precision floating-point value
2847 (SIZE == 2) is expanded to a single-precision floating-point value. The
2848 expanded value is returned. */
2849
2850 static uint64_t
2851 expand_fp_imm (int size, uint32_t imm8)
2852 {
2853 uint64_t imm;
2854 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2855
2856 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2857 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2858 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2859 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2860 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2861 if (size == 8)
2862 {
2863 imm = (imm8_7 << (63-32)) /* imm8<7> */
2864 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2865 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2866 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2867 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2868 imm <<= 32;
2869 }
2870 else if (size == 4 || size == 2)
2871 {
2872 imm = (imm8_7 << 31) /* imm8<7> */
2873 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2874 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2875 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2876 }
2877 else
2878 {
2879 /* An unsupported size. */
2880 assert (0);
2881 }
2882
2883 return imm;
2884 }
2885
2886 /* Produce the string representation of the register list operand *OPND
2887 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2888 the register name that comes before the register number, such as "v". */
2889 static void
2890 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2891 const char *prefix)
2892 {
2893 const int num_regs = opnd->reglist.num_regs;
2894 const int first_reg = opnd->reglist.first_regno;
2895 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2896 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2897 char tb[8]; /* Temporary buffer. */
2898
2899 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2900 assert (num_regs >= 1 && num_regs <= 4);
2901
2902 /* Prepare the index if any. */
2903 if (opnd->reglist.has_index)
2904 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2905 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2906 else
2907 tb[0] = '\0';
2908
2909 /* The hyphenated form is preferred for disassembly if there are
2910 more than two registers in the list, and the register numbers
2911 are monotonically increasing in increments of one. */
2912 if (num_regs > 2 && last_reg > first_reg)
2913 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2914 prefix, last_reg, qlf_name, tb);
2915 else
2916 {
2917 const int reg0 = first_reg;
2918 const int reg1 = (first_reg + 1) & 0x1f;
2919 const int reg2 = (first_reg + 2) & 0x1f;
2920 const int reg3 = (first_reg + 3) & 0x1f;
2921
2922 switch (num_regs)
2923 {
2924 case 1:
2925 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2926 break;
2927 case 2:
2928 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2929 prefix, reg1, qlf_name, tb);
2930 break;
2931 case 3:
2932 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2933 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2934 prefix, reg2, qlf_name, tb);
2935 break;
2936 case 4:
2937 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2938 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2939 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2940 break;
2941 }
2942 }
2943 }
2944
2945 /* Print the register+immediate address in OPND to BUF, which has SIZE
2946 characters. BASE is the name of the base register. */
2947
2948 static void
2949 print_immediate_offset_address (char *buf, size_t size,
2950 const aarch64_opnd_info *opnd,
2951 const char *base)
2952 {
2953 if (opnd->addr.writeback)
2954 {
2955 if (opnd->addr.preind)
2956 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2957 else
2958 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2959 }
2960 else
2961 {
2962 if (opnd->shifter.operator_present)
2963 {
2964 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2965 snprintf (buf, size, "[%s, #%d, mul vl]",
2966 base, opnd->addr.offset.imm);
2967 }
2968 else if (opnd->addr.offset.imm)
2969 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2970 else
2971 snprintf (buf, size, "[%s]", base);
2972 }
2973 }
2974
2975 /* Produce the string representation of the register offset address operand
2976 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2977 the names of the base and offset registers. */
2978 static void
2979 print_register_offset_address (char *buf, size_t size,
2980 const aarch64_opnd_info *opnd,
2981 const char *base, const char *offset)
2982 {
2983 char tb[16]; /* Temporary buffer. */
2984 bfd_boolean print_extend_p = TRUE;
2985 bfd_boolean print_amount_p = TRUE;
2986 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2987
2988 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2989 || !opnd->shifter.amount_present))
2990 {
2991 /* Not print the shift/extend amount when the amount is zero and
2992 when it is not the special case of 8-bit load/store instruction. */
2993 print_amount_p = FALSE;
2994 /* Likewise, no need to print the shift operator LSL in such a
2995 situation. */
2996 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2997 print_extend_p = FALSE;
2998 }
2999
3000 /* Prepare for the extend/shift. */
3001 if (print_extend_p)
3002 {
3003 if (print_amount_p)
3004 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3005 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3006 (opnd->shifter.amount % 100));
3007 else
3008 snprintf (tb, sizeof (tb), ", %s", shift_name);
3009 }
3010 else
3011 tb[0] = '\0';
3012
3013 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3014 }
3015
3016 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3017 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3018 PC, PCREL_P and ADDRESS are used to pass in and return information about
3019 the PC-relative address calculation, where the PC value is passed in
3020 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3021 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3022 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3023
3024 The function serves both the disassembler and the assembler diagnostics
3025 issuer, which is the reason why it lives in this file. */
3026
3027 void
3028 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3029 const aarch64_opcode *opcode,
3030 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3031 bfd_vma *address)
3032 {
3033 unsigned int i, num_conds;
3034 const char *name = NULL;
3035 const aarch64_opnd_info *opnd = opnds + idx;
3036 enum aarch64_modifier_kind kind;
3037 uint64_t addr, enum_value;
3038
3039 buf[0] = '\0';
3040 if (pcrel_p)
3041 *pcrel_p = 0;
3042
3043 switch (opnd->type)
3044 {
3045 case AARCH64_OPND_Rd:
3046 case AARCH64_OPND_Rn:
3047 case AARCH64_OPND_Rm:
3048 case AARCH64_OPND_Rt:
3049 case AARCH64_OPND_Rt2:
3050 case AARCH64_OPND_Rs:
3051 case AARCH64_OPND_Ra:
3052 case AARCH64_OPND_Rt_SYS:
3053 case AARCH64_OPND_PAIRREG:
3054 case AARCH64_OPND_SVE_Rm:
3055 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3056 the <ic_op>, therefore we we use opnd->present to override the
3057 generic optional-ness information. */
3058 if (opnd->type == AARCH64_OPND_Rt_SYS)
3059 {
3060 if (!opnd->present)
3061 break;
3062 }
3063 /* Omit the operand, e.g. RET. */
3064 else if (optional_operand_p (opcode, idx)
3065 && (opnd->reg.regno
3066 == get_optional_operand_default_value (opcode)))
3067 break;
3068 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3069 || opnd->qualifier == AARCH64_OPND_QLF_X);
3070 snprintf (buf, size, "%s",
3071 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3072 break;
3073
3074 case AARCH64_OPND_Rd_SP:
3075 case AARCH64_OPND_Rn_SP:
3076 case AARCH64_OPND_SVE_Rn_SP:
3077 case AARCH64_OPND_Rm_SP:
3078 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3079 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3080 || opnd->qualifier == AARCH64_OPND_QLF_X
3081 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3082 snprintf (buf, size, "%s",
3083 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3084 break;
3085
3086 case AARCH64_OPND_Rm_EXT:
3087 kind = opnd->shifter.kind;
3088 assert (idx == 1 || idx == 2);
3089 if ((aarch64_stack_pointer_p (opnds)
3090 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3091 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3092 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3093 && kind == AARCH64_MOD_UXTW)
3094 || (opnd->qualifier == AARCH64_OPND_QLF_X
3095 && kind == AARCH64_MOD_UXTX)))
3096 {
3097 /* 'LSL' is the preferred form in this case. */
3098 kind = AARCH64_MOD_LSL;
3099 if (opnd->shifter.amount == 0)
3100 {
3101 /* Shifter omitted. */
3102 snprintf (buf, size, "%s",
3103 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3104 break;
3105 }
3106 }
3107 if (opnd->shifter.amount)
3108 snprintf (buf, size, "%s, %s #%" PRIi64,
3109 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3110 aarch64_operand_modifiers[kind].name,
3111 opnd->shifter.amount);
3112 else
3113 snprintf (buf, size, "%s, %s",
3114 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3115 aarch64_operand_modifiers[kind].name);
3116 break;
3117
3118 case AARCH64_OPND_Rm_SFT:
3119 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3120 || opnd->qualifier == AARCH64_OPND_QLF_X);
3121 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3122 snprintf (buf, size, "%s",
3123 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3124 else
3125 snprintf (buf, size, "%s, %s #%" PRIi64,
3126 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3127 aarch64_operand_modifiers[opnd->shifter.kind].name,
3128 opnd->shifter.amount);
3129 break;
3130
3131 case AARCH64_OPND_Fd:
3132 case AARCH64_OPND_Fn:
3133 case AARCH64_OPND_Fm:
3134 case AARCH64_OPND_Fa:
3135 case AARCH64_OPND_Ft:
3136 case AARCH64_OPND_Ft2:
3137 case AARCH64_OPND_Sd:
3138 case AARCH64_OPND_Sn:
3139 case AARCH64_OPND_Sm:
3140 case AARCH64_OPND_SVE_VZn:
3141 case AARCH64_OPND_SVE_Vd:
3142 case AARCH64_OPND_SVE_Vm:
3143 case AARCH64_OPND_SVE_Vn:
3144 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3145 opnd->reg.regno);
3146 break;
3147
3148 case AARCH64_OPND_Vd:
3149 case AARCH64_OPND_Vn:
3150 case AARCH64_OPND_Vm:
3151 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3152 aarch64_get_qualifier_name (opnd->qualifier));
3153 break;
3154
3155 case AARCH64_OPND_Ed:
3156 case AARCH64_OPND_En:
3157 case AARCH64_OPND_Em:
3158 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3159 aarch64_get_qualifier_name (opnd->qualifier),
3160 opnd->reglane.index);
3161 break;
3162
3163 case AARCH64_OPND_VdD1:
3164 case AARCH64_OPND_VnD1:
3165 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3166 break;
3167
3168 case AARCH64_OPND_LVn:
3169 case AARCH64_OPND_LVt:
3170 case AARCH64_OPND_LVt_AL:
3171 case AARCH64_OPND_LEt:
3172 print_register_list (buf, size, opnd, "v");
3173 break;
3174
3175 case AARCH64_OPND_SVE_Pd:
3176 case AARCH64_OPND_SVE_Pg3:
3177 case AARCH64_OPND_SVE_Pg4_5:
3178 case AARCH64_OPND_SVE_Pg4_10:
3179 case AARCH64_OPND_SVE_Pg4_16:
3180 case AARCH64_OPND_SVE_Pm:
3181 case AARCH64_OPND_SVE_Pn:
3182 case AARCH64_OPND_SVE_Pt:
3183 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3184 snprintf (buf, size, "p%d", opnd->reg.regno);
3185 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3186 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3187 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3188 aarch64_get_qualifier_name (opnd->qualifier));
3189 else
3190 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3191 aarch64_get_qualifier_name (opnd->qualifier));
3192 break;
3193
3194 case AARCH64_OPND_SVE_Za_5:
3195 case AARCH64_OPND_SVE_Za_16:
3196 case AARCH64_OPND_SVE_Zd:
3197 case AARCH64_OPND_SVE_Zm_5:
3198 case AARCH64_OPND_SVE_Zm_16:
3199 case AARCH64_OPND_SVE_Zn:
3200 case AARCH64_OPND_SVE_Zt:
3201 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3202 snprintf (buf, size, "z%d", opnd->reg.regno);
3203 else
3204 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3205 aarch64_get_qualifier_name (opnd->qualifier));
3206 break;
3207
3208 case AARCH64_OPND_SVE_ZnxN:
3209 case AARCH64_OPND_SVE_ZtxN:
3210 print_register_list (buf, size, opnd, "z");
3211 break;
3212
3213 case AARCH64_OPND_SVE_Zm3_INDEX:
3214 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3215 case AARCH64_OPND_SVE_Zm4_INDEX:
3216 case AARCH64_OPND_SVE_Zn_INDEX:
3217 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3218 aarch64_get_qualifier_name (opnd->qualifier),
3219 opnd->reglane.index);
3220 break;
3221
3222 case AARCH64_OPND_CRn:
3223 case AARCH64_OPND_CRm:
3224 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3225 break;
3226
3227 case AARCH64_OPND_IDX:
3228 case AARCH64_OPND_IMM:
3229 case AARCH64_OPND_WIDTH:
3230 case AARCH64_OPND_UIMM3_OP1:
3231 case AARCH64_OPND_UIMM3_OP2:
3232 case AARCH64_OPND_BIT_NUM:
3233 case AARCH64_OPND_IMM_VLSL:
3234 case AARCH64_OPND_IMM_VLSR:
3235 case AARCH64_OPND_SHLL_IMM:
3236 case AARCH64_OPND_IMM0:
3237 case AARCH64_OPND_IMMR:
3238 case AARCH64_OPND_IMMS:
3239 case AARCH64_OPND_FBITS:
3240 case AARCH64_OPND_SIMM5:
3241 case AARCH64_OPND_SVE_SHLIMM_PRED:
3242 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3243 case AARCH64_OPND_SVE_SHRIMM_PRED:
3244 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3245 case AARCH64_OPND_SVE_SIMM5:
3246 case AARCH64_OPND_SVE_SIMM5B:
3247 case AARCH64_OPND_SVE_SIMM6:
3248 case AARCH64_OPND_SVE_SIMM8:
3249 case AARCH64_OPND_SVE_UIMM3:
3250 case AARCH64_OPND_SVE_UIMM7:
3251 case AARCH64_OPND_SVE_UIMM8:
3252 case AARCH64_OPND_SVE_UIMM8_53:
3253 case AARCH64_OPND_IMM_ROT1:
3254 case AARCH64_OPND_IMM_ROT2:
3255 case AARCH64_OPND_IMM_ROT3:
3256 case AARCH64_OPND_SVE_IMM_ROT1:
3257 case AARCH64_OPND_SVE_IMM_ROT2:
3258 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3259 break;
3260
3261 case AARCH64_OPND_SVE_I1_HALF_ONE:
3262 case AARCH64_OPND_SVE_I1_HALF_TWO:
3263 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3264 {
3265 single_conv_t c;
3266 c.i = opnd->imm.value;
3267 snprintf (buf, size, "#%.1f", c.f);
3268 break;
3269 }
3270
3271 case AARCH64_OPND_SVE_PATTERN:
3272 if (optional_operand_p (opcode, idx)
3273 && opnd->imm.value == get_optional_operand_default_value (opcode))
3274 break;
3275 enum_value = opnd->imm.value;
3276 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3277 if (aarch64_sve_pattern_array[enum_value])
3278 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3279 else
3280 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3281 break;
3282
3283 case AARCH64_OPND_SVE_PATTERN_SCALED:
3284 if (optional_operand_p (opcode, idx)
3285 && !opnd->shifter.operator_present
3286 && opnd->imm.value == get_optional_operand_default_value (opcode))
3287 break;
3288 enum_value = opnd->imm.value;
3289 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3290 if (aarch64_sve_pattern_array[opnd->imm.value])
3291 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3292 else
3293 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3294 if (opnd->shifter.operator_present)
3295 {
3296 size_t len = strlen (buf);
3297 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3298 aarch64_operand_modifiers[opnd->shifter.kind].name,
3299 opnd->shifter.amount);
3300 }
3301 break;
3302
3303 case AARCH64_OPND_SVE_PRFOP:
3304 enum_value = opnd->imm.value;
3305 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3306 if (aarch64_sve_prfop_array[enum_value])
3307 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3308 else
3309 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3310 break;
3311
3312 case AARCH64_OPND_IMM_MOV:
3313 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3314 {
3315 case 4: /* e.g. MOV Wd, #<imm32>. */
3316 {
3317 int imm32 = opnd->imm.value;
3318 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3319 }
3320 break;
3321 case 8: /* e.g. MOV Xd, #<imm64>. */
3322 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3323 opnd->imm.value, opnd->imm.value);
3324 break;
3325 default: assert (0);
3326 }
3327 break;
3328
3329 case AARCH64_OPND_FPIMM0:
3330 snprintf (buf, size, "#0.0");
3331 break;
3332
3333 case AARCH64_OPND_LIMM:
3334 case AARCH64_OPND_AIMM:
3335 case AARCH64_OPND_HALF:
3336 case AARCH64_OPND_SVE_INV_LIMM:
3337 case AARCH64_OPND_SVE_LIMM:
3338 case AARCH64_OPND_SVE_LIMM_MOV:
3339 if (opnd->shifter.amount)
3340 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3341 opnd->shifter.amount);
3342 else
3343 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3344 break;
3345
3346 case AARCH64_OPND_SIMD_IMM:
3347 case AARCH64_OPND_SIMD_IMM_SFT:
3348 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3349 || opnd->shifter.kind == AARCH64_MOD_NONE)
3350 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3351 else
3352 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3353 aarch64_operand_modifiers[opnd->shifter.kind].name,
3354 opnd->shifter.amount);
3355 break;
3356
3357 case AARCH64_OPND_SVE_AIMM:
3358 case AARCH64_OPND_SVE_ASIMM:
3359 if (opnd->shifter.amount)
3360 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3361 opnd->shifter.amount);
3362 else
3363 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3364 break;
3365
3366 case AARCH64_OPND_FPIMM:
3367 case AARCH64_OPND_SIMD_FPIMM:
3368 case AARCH64_OPND_SVE_FPIMM8:
3369 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3370 {
3371 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3372 {
3373 half_conv_t c;
3374 c.i = expand_fp_imm (2, opnd->imm.value);
3375 snprintf (buf, size, "#%.18e", c.f);
3376 }
3377 break;
3378 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3379 {
3380 single_conv_t c;
3381 c.i = expand_fp_imm (4, opnd->imm.value);
3382 snprintf (buf, size, "#%.18e", c.f);
3383 }
3384 break;
3385 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3386 {
3387 double_conv_t c;
3388 c.i = expand_fp_imm (8, opnd->imm.value);
3389 snprintf (buf, size, "#%.18e", c.d);
3390 }
3391 break;
3392 default: assert (0);
3393 }
3394 break;
3395
3396 case AARCH64_OPND_CCMP_IMM:
3397 case AARCH64_OPND_NZCV:
3398 case AARCH64_OPND_EXCEPTION:
3399 case AARCH64_OPND_UIMM4:
3400 case AARCH64_OPND_UIMM7:
3401 if (optional_operand_p (opcode, idx) == TRUE
3402 && (opnd->imm.value ==
3403 (int64_t) get_optional_operand_default_value (opcode)))
3404 /* Omit the operand, e.g. DCPS1. */
3405 break;
3406 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3407 break;
3408
3409 case AARCH64_OPND_COND:
3410 case AARCH64_OPND_COND1:
3411 snprintf (buf, size, "%s", opnd->cond->names[0]);
3412 num_conds = ARRAY_SIZE (opnd->cond->names);
3413 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3414 {
3415 size_t len = strlen (buf);
3416 if (i == 1)
3417 snprintf (buf + len, size - len, " // %s = %s",
3418 opnd->cond->names[0], opnd->cond->names[i]);
3419 else
3420 snprintf (buf + len, size - len, ", %s",
3421 opnd->cond->names[i]);
3422 }
3423 break;
3424
3425 case AARCH64_OPND_ADDR_ADRP:
3426 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3427 + opnd->imm.value;
3428 if (pcrel_p)
3429 *pcrel_p = 1;
3430 if (address)
3431 *address = addr;
3432 /* This is not necessary during the disassembling, as print_address_func
3433 in the disassemble_info will take care of the printing. But some
3434 other callers may be still interested in getting the string in *STR,
3435 so here we do snprintf regardless. */
3436 snprintf (buf, size, "#0x%" PRIx64, addr);
3437 break;
3438
3439 case AARCH64_OPND_ADDR_PCREL14:
3440 case AARCH64_OPND_ADDR_PCREL19:
3441 case AARCH64_OPND_ADDR_PCREL21:
3442 case AARCH64_OPND_ADDR_PCREL26:
3443 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3444 if (pcrel_p)
3445 *pcrel_p = 1;
3446 if (address)
3447 *address = addr;
3448 /* This is not necessary during the disassembling, as print_address_func
3449 in the disassemble_info will take care of the printing. But some
3450 other callers may be still interested in getting the string in *STR,
3451 so here we do snprintf regardless. */
3452 snprintf (buf, size, "#0x%" PRIx64, addr);
3453 break;
3454
3455 case AARCH64_OPND_ADDR_SIMPLE:
3456 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3457 case AARCH64_OPND_SIMD_ADDR_POST:
3458 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3459 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3460 {
3461 if (opnd->addr.offset.is_reg)
3462 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3463 else
3464 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3465 }
3466 else
3467 snprintf (buf, size, "[%s]", name);
3468 break;
3469
3470 case AARCH64_OPND_ADDR_REGOFF:
3471 case AARCH64_OPND_SVE_ADDR_RR:
3472 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3473 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3474 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3475 case AARCH64_OPND_SVE_ADDR_RX:
3476 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3477 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3478 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3479 print_register_offset_address
3480 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3481 get_offset_int_reg_name (opnd));
3482 break;
3483
3484 case AARCH64_OPND_SVE_ADDR_RZ:
3485 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3486 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3487 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3488 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3489 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3490 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3491 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3492 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3493 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3494 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3495 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3496 print_register_offset_address
3497 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3498 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3499 break;
3500
3501 case AARCH64_OPND_ADDR_SIMM7:
3502 case AARCH64_OPND_ADDR_SIMM9:
3503 case AARCH64_OPND_ADDR_SIMM9_2:
3504 case AARCH64_OPND_ADDR_SIMM10:
3505 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3506 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3507 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3508 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3509 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3510 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3511 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3512 case AARCH64_OPND_SVE_ADDR_RI_U6:
3513 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3514 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3515 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3516 print_immediate_offset_address
3517 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3518 break;
3519
3520 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3521 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3522 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3523 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3524 print_immediate_offset_address
3525 (buf, size, opnd,
3526 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3527 break;
3528
3529 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3530 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3531 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3532 print_register_offset_address
3533 (buf, size, opnd,
3534 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3535 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3536 break;
3537
3538 case AARCH64_OPND_ADDR_UIMM12:
3539 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3540 if (opnd->addr.offset.imm)
3541 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3542 else
3543 snprintf (buf, size, "[%s]", name);
3544 break;
3545
3546 case AARCH64_OPND_SYSREG:
3547 for (i = 0; aarch64_sys_regs[i].name; ++i)
3548 if (aarch64_sys_regs[i].value == opnd->sysreg
3549 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3550 break;
3551 if (aarch64_sys_regs[i].name)
3552 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3553 else
3554 {
3555 /* Implementation defined system register. */
3556 unsigned int value = opnd->sysreg;
3557 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3558 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3559 value & 0x7);
3560 }
3561 break;
3562
3563 case AARCH64_OPND_PSTATEFIELD:
3564 for (i = 0; aarch64_pstatefields[i].name; ++i)
3565 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3566 break;
3567 assert (aarch64_pstatefields[i].name);
3568 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3569 break;
3570
3571 case AARCH64_OPND_SYSREG_AT:
3572 case AARCH64_OPND_SYSREG_DC:
3573 case AARCH64_OPND_SYSREG_IC:
3574 case AARCH64_OPND_SYSREG_TLBI:
3575 snprintf (buf, size, "%s", opnd->sysins_op->name);
3576 break;
3577
3578 case AARCH64_OPND_BARRIER:
3579 snprintf (buf, size, "%s", opnd->barrier->name);
3580 break;
3581
3582 case AARCH64_OPND_BARRIER_ISB:
3583 /* Operand can be omitted, e.g. in DCPS1. */
3584 if (! optional_operand_p (opcode, idx)
3585 || (opnd->barrier->value
3586 != get_optional_operand_default_value (opcode)))
3587 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3588 break;
3589
3590 case AARCH64_OPND_PRFOP:
3591 if (opnd->prfop->name != NULL)
3592 snprintf (buf, size, "%s", opnd->prfop->name);
3593 else
3594 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3595 break;
3596
3597 case AARCH64_OPND_BARRIER_PSB:
3598 snprintf (buf, size, "%s", opnd->hint_option->name);
3599 break;
3600
3601 default:
3602 assert (0);
3603 }
3604 }
3605
3606 #define CPENC(op0,op1,crn,crm,op2) \
3608 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3609 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3610 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3611 /* for 3.9.10 System Instructions */
3612 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3613
3614 #define C0 0
3615 #define C1 1
3616 #define C2 2
3617 #define C3 3
3618 #define C4 4
3619 #define C5 5
3620 #define C6 6
3621 #define C7 7
3622 #define C8 8
3623 #define C9 9
3624 #define C10 10
3625 #define C11 11
3626 #define C12 12
3627 #define C13 13
3628 #define C14 14
3629 #define C15 15
3630
3631 #ifdef F_DEPRECATED
3632 #undef F_DEPRECATED
3633 #endif
3634 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3635
3636 #ifdef F_ARCHEXT
3637 #undef F_ARCHEXT
3638 #endif
3639 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3640
3641 #ifdef F_HASXT
3642 #undef F_HASXT
3643 #endif
3644 #define F_HASXT 0x4 /* System instruction register <Xt>
3645 operand. */
3646
3647
3648 /* TODO there are two more issues need to be resolved
3649 1. handle read-only and write-only system registers
3650 2. handle cpu-implementation-defined system registers. */
3651 const aarch64_sys_reg aarch64_sys_regs [] =
3652 {
3653 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3654 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3655 { "elr_el1", CPEN_(0,C0,1), 0 },
3656 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3657 { "sp_el0", CPEN_(0,C1,0), 0 },
3658 { "spsel", CPEN_(0,C2,0), 0 },
3659 { "daif", CPEN_(3,C2,1), 0 },
3660 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3661 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3662 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3663 { "nzcv", CPEN_(3,C2,0), 0 },
3664 { "fpcr", CPEN_(3,C4,0), 0 },
3665 { "fpsr", CPEN_(3,C4,1), 0 },
3666 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3667 { "dlr_el0", CPEN_(3,C5,1), 0 },
3668 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3669 { "elr_el2", CPEN_(4,C0,1), 0 },
3670 { "sp_el1", CPEN_(4,C1,0), 0 },
3671 { "spsr_irq", CPEN_(4,C3,0), 0 },
3672 { "spsr_abt", CPEN_(4,C3,1), 0 },
3673 { "spsr_und", CPEN_(4,C3,2), 0 },
3674 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3675 { "spsr_el3", CPEN_(6,C0,0), 0 },
3676 { "elr_el3", CPEN_(6,C0,1), 0 },
3677 { "sp_el2", CPEN_(6,C1,0), 0 },
3678 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3679 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3680 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3681 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3682 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3683 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3684 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3685 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3686 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3687 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3688 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3689 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3690 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3691 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3692 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3693 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3694 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3695 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3696 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3697 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3698 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3699 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3700 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3701 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3702 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3703 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3704 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3705 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3706 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3707 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3708 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3709 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3710 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3711 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3712 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3713 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3714 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3715 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3716 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT }, /* RO */
3717 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3718 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3719 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3720 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3721 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3722 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3723 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3724 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3725 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3726 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3727 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3728 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3729 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3730 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3731 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3732 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3733 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3734 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3735 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3736 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3737 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3738 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3739 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3740 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3741 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3742 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3743 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3744 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3745 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3746 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3747 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3748 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3749 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3750 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3751 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3752 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3753 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3754 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3755 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3756 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3757 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3758 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3759 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3760 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3761 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3762 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3763 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3764 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3765 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3766 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3767 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3768 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3769 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3770 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3771 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3772 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3773 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3774 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3775 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3776 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3777 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3778 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3779 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3780 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3781 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3782 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3783 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3784 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3785 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3786 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3787 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3788 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3789 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3790 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3791 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3792 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3793 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3794 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3795 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3796 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3797 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3798 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3799 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3800 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3801 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3802 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3803 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3804 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3805 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3806 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3807 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3808 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3809 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3810 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3811 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3812 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3813 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3814 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3815 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3816 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3817 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3818 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3819 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3820 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3821 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3822 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3823 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3824 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3825 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3826 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3827 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3828 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3829 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3830 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3831 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3832 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3833 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3834 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3835 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3836 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3837 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3838 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3839 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3840 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3841 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3842 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3843 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3844 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3845 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3846 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3847 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3848 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3849 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3850 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3851 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3852 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3853 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3854 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3855 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3856 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3857 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3858 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3859 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3860 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3861 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3862 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3863 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3864 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3865 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3866 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3867 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3868 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3869 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3870 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3871 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3872 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3873 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3874 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3875 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3876 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3877 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3878 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3879 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3880 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3881 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3882 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3883 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3884 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3885 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3886 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3887 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3888 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3889 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3890 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3891 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3892 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3893 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3894 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3895 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3896 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3897 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3898 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3899 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3900 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3901 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3902 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3903 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3904 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3905 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3906 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3907 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3908 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3909 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3910 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3911 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3912 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3913 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3914 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3915 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3916 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3917 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3918 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3919 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3920 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3921 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3922 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3923 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3924 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3925 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3926 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3927 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3928 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3929 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3930 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3931 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3932 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3933 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3934 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3935 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3936 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3937 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3938 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3939 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3940 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3941 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3942 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3943 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3944 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3945 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3946 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3947 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3948 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3949 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3950 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3951 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3952 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3953 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3954 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3955 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3956 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3957 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3958 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3959 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3960 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3961 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3962 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3963 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3964 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3965 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3966 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3967 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3968 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3969 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3970 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3971 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3972 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3973 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3974 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3975 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3976 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3977 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3978 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3979 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3980 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3981 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3982 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3983 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3984 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3985 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3986 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3987 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3988 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3989 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3990 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3991 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3992 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3993 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3994 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3995 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3996 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3997 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3998 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3999 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4000 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4001 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4002 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4003 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4004 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4005 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4006 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4007 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4008 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4009 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4010 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4011 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4012 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4013 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4014 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4015 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4016 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4017 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4018 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4019 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4020 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4021 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4022 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4023 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4024 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4025 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4026 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4027 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4028 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4029 { 0, CPENC(0,0,0,0,0), 0 },
4030 };
4031
4032 bfd_boolean
4033 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4034 {
4035 return (reg->flags & F_DEPRECATED) != 0;
4036 }
4037
4038 bfd_boolean
4039 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4040 const aarch64_sys_reg *reg)
4041 {
4042 if (!(reg->flags & F_ARCHEXT))
4043 return TRUE;
4044
4045 /* PAN. Values are from aarch64_sys_regs. */
4046 if (reg->value == CPEN_(0,C2,3)
4047 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4048 return FALSE;
4049
4050 /* Virtualization host extensions: system registers. */
4051 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4052 || reg->value == CPENC (3, 4, C13, C0, 1)
4053 || reg->value == CPENC (3, 4, C14, C3, 0)
4054 || reg->value == CPENC (3, 4, C14, C3, 1)
4055 || reg->value == CPENC (3, 4, C14, C3, 2))
4056 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4057 return FALSE;
4058
4059 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4060 if ((reg->value == CPEN_ (5, C0, 0)
4061 || reg->value == CPEN_ (5, C0, 1)
4062 || reg->value == CPENC (3, 5, C1, C0, 0)
4063 || reg->value == CPENC (3, 5, C1, C0, 2)
4064 || reg->value == CPENC (3, 5, C2, C0, 0)
4065 || reg->value == CPENC (3, 5, C2, C0, 1)
4066 || reg->value == CPENC (3, 5, C2, C0, 2)
4067 || reg->value == CPENC (3, 5, C5, C1, 0)
4068 || reg->value == CPENC (3, 5, C5, C1, 1)
4069 || reg->value == CPENC (3, 5, C5, C2, 0)
4070 || reg->value == CPENC (3, 5, C6, C0, 0)
4071 || reg->value == CPENC (3, 5, C10, C2, 0)
4072 || reg->value == CPENC (3, 5, C10, C3, 0)
4073 || reg->value == CPENC (3, 5, C12, C0, 0)
4074 || reg->value == CPENC (3, 5, C13, C0, 1)
4075 || reg->value == CPENC (3, 5, C14, C1, 0))
4076 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4077 return FALSE;
4078
4079 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4080 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4081 || reg->value == CPENC (3, 5, C14, C2, 1)
4082 || reg->value == CPENC (3, 5, C14, C2, 2)
4083 || reg->value == CPENC (3, 5, C14, C3, 0)
4084 || reg->value == CPENC (3, 5, C14, C3, 1)
4085 || reg->value == CPENC (3, 5, C14, C3, 2))
4086 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4087 return FALSE;
4088
4089 /* ARMv8.2 features. */
4090
4091 /* ID_AA64MMFR2_EL1. */
4092 if (reg->value == CPENC (3, 0, C0, C7, 2)
4093 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4094 return FALSE;
4095
4096 /* PSTATE.UAO. */
4097 if (reg->value == CPEN_ (0, C2, 4)
4098 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4099 return FALSE;
4100
4101 /* RAS extension. */
4102
4103 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4104 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4105 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4106 || reg->value == CPENC (3, 0, C5, C3, 1)
4107 || reg->value == CPENC (3, 0, C5, C3, 2)
4108 || reg->value == CPENC (3, 0, C5, C3, 3)
4109 || reg->value == CPENC (3, 0, C5, C4, 0)
4110 || reg->value == CPENC (3, 0, C5, C4, 1)
4111 || reg->value == CPENC (3, 0, C5, C4, 2)
4112 || reg->value == CPENC (3, 0, C5, C4, 3)
4113 || reg->value == CPENC (3, 0, C5, C5, 0)
4114 || reg->value == CPENC (3, 0, C5, C5, 1))
4115 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4116 return FALSE;
4117
4118 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4119 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4120 || reg->value == CPENC (3, 0, C12, C1, 1)
4121 || reg->value == CPENC (3, 4, C12, C1, 1))
4122 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4123 return FALSE;
4124
4125 /* Statistical Profiling extension. */
4126 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4127 || reg->value == CPENC (3, 0, C9, C10, 1)
4128 || reg->value == CPENC (3, 0, C9, C10, 3)
4129 || reg->value == CPENC (3, 0, C9, C10, 7)
4130 || reg->value == CPENC (3, 0, C9, C9, 0)
4131 || reg->value == CPENC (3, 0, C9, C9, 2)
4132 || reg->value == CPENC (3, 0, C9, C9, 3)
4133 || reg->value == CPENC (3, 0, C9, C9, 4)
4134 || reg->value == CPENC (3, 0, C9, C9, 5)
4135 || reg->value == CPENC (3, 0, C9, C9, 6)
4136 || reg->value == CPENC (3, 0, C9, C9, 7)
4137 || reg->value == CPENC (3, 4, C9, C9, 0)
4138 || reg->value == CPENC (3, 5, C9, C9, 0))
4139 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4140 return FALSE;
4141
4142 /* ARMv8.3 Pointer authentication keys. */
4143 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4144 || reg->value == CPENC (3, 0, C2, C1, 1)
4145 || reg->value == CPENC (3, 0, C2, C1, 2)
4146 || reg->value == CPENC (3, 0, C2, C1, 3)
4147 || reg->value == CPENC (3, 0, C2, C2, 0)
4148 || reg->value == CPENC (3, 0, C2, C2, 1)
4149 || reg->value == CPENC (3, 0, C2, C2, 2)
4150 || reg->value == CPENC (3, 0, C2, C2, 3)
4151 || reg->value == CPENC (3, 0, C2, C3, 0)
4152 || reg->value == CPENC (3, 0, C2, C3, 1))
4153 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4154 return FALSE;
4155
4156 /* SVE. */
4157 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4158 || reg->value == CPENC (3, 0, C1, C2, 0)
4159 || reg->value == CPENC (3, 4, C1, C2, 0)
4160 || reg->value == CPENC (3, 6, C1, C2, 0)
4161 || reg->value == CPENC (3, 5, C1, C2, 0)
4162 || reg->value == CPENC (3, 0, C0, C0, 7))
4163 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4164 return FALSE;
4165
4166 return TRUE;
4167 }
4168
4169 const aarch64_sys_reg aarch64_pstatefields [] =
4170 {
4171 { "spsel", 0x05, 0 },
4172 { "daifset", 0x1e, 0 },
4173 { "daifclr", 0x1f, 0 },
4174 { "pan", 0x04, F_ARCHEXT },
4175 { "uao", 0x03, F_ARCHEXT },
4176 { 0, CPENC(0,0,0,0,0), 0 },
4177 };
4178
4179 bfd_boolean
4180 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4181 const aarch64_sys_reg *reg)
4182 {
4183 if (!(reg->flags & F_ARCHEXT))
4184 return TRUE;
4185
4186 /* PAN. Values are from aarch64_pstatefields. */
4187 if (reg->value == 0x04
4188 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4189 return FALSE;
4190
4191 /* UAO. Values are from aarch64_pstatefields. */
4192 if (reg->value == 0x03
4193 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4194 return FALSE;
4195
4196 return TRUE;
4197 }
4198
4199 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4200 {
4201 { "ialluis", CPENS(0,C7,C1,0), 0 },
4202 { "iallu", CPENS(0,C7,C5,0), 0 },
4203 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4204 { 0, CPENS(0,0,0,0), 0 }
4205 };
4206
4207 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4208 {
4209 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4210 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4211 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4212 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4213 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4214 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4215 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4216 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4217 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4218 { 0, CPENS(0,0,0,0), 0 }
4219 };
4220
4221 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4222 {
4223 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4224 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4225 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4226 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4227 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4228 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4229 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4230 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4231 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4232 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4233 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4234 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4235 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4236 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4237 { 0, CPENS(0,0,0,0), 0 }
4238 };
4239
4240 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4241 {
4242 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4243 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4244 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4245 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4246 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4247 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4248 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4249 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4250 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4251 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4252 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4253 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4254 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4255 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4256 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4257 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4258 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4259 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4260 { "alle2", CPENS(4,C8,C7,0), 0 },
4261 { "alle2is", CPENS(4,C8,C3,0), 0 },
4262 { "alle1", CPENS(4,C8,C7,4), 0 },
4263 { "alle1is", CPENS(4,C8,C3,4), 0 },
4264 { "alle3", CPENS(6,C8,C7,0), 0 },
4265 { "alle3is", CPENS(6,C8,C3,0), 0 },
4266 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4267 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4268 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4269 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4270 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4271 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4272 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4273 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4274 { 0, CPENS(0,0,0,0), 0 }
4275 };
4276
4277 bfd_boolean
4278 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4279 {
4280 return (sys_ins_reg->flags & F_HASXT) != 0;
4281 }
4282
4283 extern bfd_boolean
4284 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4285 const aarch64_sys_ins_reg *reg)
4286 {
4287 if (!(reg->flags & F_ARCHEXT))
4288 return TRUE;
4289
4290 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4291 if (reg->value == CPENS (3, C7, C12, 1)
4292 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4293 return FALSE;
4294
4295 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4296 if ((reg->value == CPENS (0, C7, C9, 0)
4297 || reg->value == CPENS (0, C7, C9, 1))
4298 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4299 return FALSE;
4300
4301 return TRUE;
4302 }
4303
4304 #undef C0
4305 #undef C1
4306 #undef C2
4307 #undef C3
4308 #undef C4
4309 #undef C5
4310 #undef C6
4311 #undef C7
4312 #undef C8
4313 #undef C9
4314 #undef C10
4315 #undef C11
4316 #undef C12
4317 #undef C13
4318 #undef C14
4319 #undef C15
4320
4321 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4322 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4323
4324 static bfd_boolean
4325 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4326 const aarch64_insn insn)
4327 {
4328 int t = BITS (insn, 4, 0);
4329 int n = BITS (insn, 9, 5);
4330 int t2 = BITS (insn, 14, 10);
4331
4332 if (BIT (insn, 23))
4333 {
4334 /* Write back enabled. */
4335 if ((t == n || t2 == n) && n != 31)
4336 return FALSE;
4337 }
4338
4339 if (BIT (insn, 22))
4340 {
4341 /* Load */
4342 if (t == t2)
4343 return FALSE;
4344 }
4345
4346 return TRUE;
4347 }
4348
4349 /* Return true if VALUE cannot be moved into an SVE register using DUP
4350 (with any element size, not just ESIZE) and if using DUPM would
4351 therefore be OK. ESIZE is the number of bytes in the immediate. */
4352
4353 bfd_boolean
4354 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4355 {
4356 int64_t svalue = uvalue;
4357 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4358
4359 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4360 return FALSE;
4361 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4362 {
4363 svalue = (int32_t) uvalue;
4364 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4365 {
4366 svalue = (int16_t) uvalue;
4367 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4368 return FALSE;
4369 }
4370 }
4371 if ((svalue & 0xff) == 0)
4372 svalue /= 256;
4373 return svalue < -128 || svalue >= 128;
4374 }
4375
4376 /* Include the opcode description table as well as the operand description
4377 table. */
4378 #define VERIFIER(x) verify_##x
4379 #include "aarch64-tbl.h"
4380