aarch64-opc.c revision 1.5.2.1 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201
202 const aarch64_field fields[] =
204 {
205 { 0, 0 }, /* NIL. */
206 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
207 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
208 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
209 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
210 { 5, 19 }, /* imm19: e.g. in CBZ. */
211 { 5, 19 }, /* immhi: e.g. in ADRP. */
212 { 29, 2 }, /* immlo: e.g. in ADRP. */
213 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
214 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
215 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
216 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
217 { 0, 5 }, /* Rt: in load/store instructions. */
218 { 0, 5 }, /* Rd: in many integer instructions. */
219 { 5, 5 }, /* Rn: in many integer instructions. */
220 { 10, 5 }, /* Rt2: in load/store pair instructions. */
221 { 10, 5 }, /* Ra: in fp instructions. */
222 { 5, 3 }, /* op2: in the system instructions. */
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 12, 4 }, /* CRn: in the system instructions. */
225 { 16, 3 }, /* op1: in the system instructions. */
226 { 19, 2 }, /* op0: in the system instructions. */
227 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
228 { 12, 4 }, /* cond: condition flags as a source operand. */
229 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
230 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
231 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
232 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
233 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
234 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
235 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
236 { 12, 1 }, /* S: in load/store reg offset instructions. */
237 { 21, 2 }, /* hw: in move wide constant instructions. */
238 { 22, 2 }, /* opc: in load/store reg offset instructions. */
239 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
240 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
241 { 22, 2 }, /* type: floating point type field in fp data inst. */
242 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
243 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
244 { 15, 6 }, /* imm6_2: in rmif instructions. */
245 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
246 { 0, 4 }, /* imm4_2: in rmif instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
298 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
299 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
300 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
301 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
302 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
303 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
304 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
305 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
306 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
307 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
308 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
309 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
310 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
311 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
312 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
313 { 16, 4 }, /* SVE_tsz: triangular size select. */
314 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
315 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
316 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
317 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
318 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
319 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
320 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
321 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
322 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
323 };
324
325 enum aarch64_operand_class
326 aarch64_get_operand_class (enum aarch64_opnd type)
327 {
328 return aarch64_operands[type].op_class;
329 }
330
331 const char *
332 aarch64_get_operand_name (enum aarch64_opnd type)
333 {
334 return aarch64_operands[type].name;
335 }
336
337 /* Get operand description string.
338 This is usually for the diagnosis purpose. */
339 const char *
340 aarch64_get_operand_desc (enum aarch64_opnd type)
341 {
342 return aarch64_operands[type].desc;
343 }
344
345 /* Table of all conditional affixes. */
346 const aarch64_cond aarch64_conds[16] =
347 {
348 {{"eq", "none"}, 0x0},
349 {{"ne", "any"}, 0x1},
350 {{"cs", "hs", "nlast"}, 0x2},
351 {{"cc", "lo", "ul", "last"}, 0x3},
352 {{"mi", "first"}, 0x4},
353 {{"pl", "nfrst"}, 0x5},
354 {{"vs"}, 0x6},
355 {{"vc"}, 0x7},
356 {{"hi", "pmore"}, 0x8},
357 {{"ls", "plast"}, 0x9},
358 {{"ge", "tcont"}, 0xa},
359 {{"lt", "tstop"}, 0xb},
360 {{"gt"}, 0xc},
361 {{"le"}, 0xd},
362 {{"al"}, 0xe},
363 {{"nv"}, 0xf},
364 };
365
366 const aarch64_cond *
367 get_cond_from_value (aarch64_insn value)
368 {
369 assert (value < 16);
370 return &aarch64_conds[(unsigned int) value];
371 }
372
373 const aarch64_cond *
374 get_inverted_cond (const aarch64_cond *cond)
375 {
376 return &aarch64_conds[cond->value ^ 0x1];
377 }
378
379 /* Table describing the operand extension/shifting operators; indexed by
380 enum aarch64_modifier_kind.
381
382 The value column provides the most common values for encoding modifiers,
383 which enables table-driven encoding/decoding for the modifiers. */
384 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
385 {
386 {"none", 0x0},
387 {"msl", 0x0},
388 {"ror", 0x3},
389 {"asr", 0x2},
390 {"lsr", 0x1},
391 {"lsl", 0x0},
392 {"uxtb", 0x0},
393 {"uxth", 0x1},
394 {"uxtw", 0x2},
395 {"uxtx", 0x3},
396 {"sxtb", 0x4},
397 {"sxth", 0x5},
398 {"sxtw", 0x6},
399 {"sxtx", 0x7},
400 {"mul", 0x0},
401 {"mul vl", 0x0},
402 {NULL, 0},
403 };
404
405 enum aarch64_modifier_kind
406 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
407 {
408 return desc - aarch64_operand_modifiers;
409 }
410
411 aarch64_insn
412 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
413 {
414 return aarch64_operand_modifiers[kind].value;
415 }
416
417 enum aarch64_modifier_kind
418 aarch64_get_operand_modifier_from_value (aarch64_insn value,
419 bfd_boolean extend_p)
420 {
421 if (extend_p == TRUE)
422 return AARCH64_MOD_UXTB + value;
423 else
424 return AARCH64_MOD_LSL - value;
425 }
426
427 bfd_boolean
428 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
429 {
430 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
431 ? TRUE : FALSE;
432 }
433
434 static inline bfd_boolean
435 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
436 {
437 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
438 ? TRUE : FALSE;
439 }
440
441 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
442 {
443 { "#0x00", 0x0 },
444 { "oshld", 0x1 },
445 { "oshst", 0x2 },
446 { "osh", 0x3 },
447 { "#0x04", 0x4 },
448 { "nshld", 0x5 },
449 { "nshst", 0x6 },
450 { "nsh", 0x7 },
451 { "#0x08", 0x8 },
452 { "ishld", 0x9 },
453 { "ishst", 0xa },
454 { "ish", 0xb },
455 { "#0x0c", 0xc },
456 { "ld", 0xd },
457 { "st", 0xe },
458 { "sy", 0xf },
459 };
460
461 /* Table describing the operands supported by the aliases of the HINT
462 instruction.
463
464 The name column is the operand that is accepted for the alias. The value
465 column is the hint number of the alias. The list of operands is terminated
466 by NULL in the name column. */
467
468 const struct aarch64_name_value_pair aarch64_hint_options[] =
469 {
470 { "csync", 0x11 }, /* PSB CSYNC. */
471 { NULL, 0x0 },
472 };
473
474 /* op -> op: load = 0 instruction = 1 store = 2
475 l -> level: 1-3
476 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
477 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
478 const struct aarch64_name_value_pair aarch64_prfops[32] =
479 {
480 { "pldl1keep", B(0, 1, 0) },
481 { "pldl1strm", B(0, 1, 1) },
482 { "pldl2keep", B(0, 2, 0) },
483 { "pldl2strm", B(0, 2, 1) },
484 { "pldl3keep", B(0, 3, 0) },
485 { "pldl3strm", B(0, 3, 1) },
486 { NULL, 0x06 },
487 { NULL, 0x07 },
488 { "plil1keep", B(1, 1, 0) },
489 { "plil1strm", B(1, 1, 1) },
490 { "plil2keep", B(1, 2, 0) },
491 { "plil2strm", B(1, 2, 1) },
492 { "plil3keep", B(1, 3, 0) },
493 { "plil3strm", B(1, 3, 1) },
494 { NULL, 0x0e },
495 { NULL, 0x0f },
496 { "pstl1keep", B(2, 1, 0) },
497 { "pstl1strm", B(2, 1, 1) },
498 { "pstl2keep", B(2, 2, 0) },
499 { "pstl2strm", B(2, 2, 1) },
500 { "pstl3keep", B(2, 3, 0) },
501 { "pstl3strm", B(2, 3, 1) },
502 { NULL, 0x16 },
503 { NULL, 0x17 },
504 { NULL, 0x18 },
505 { NULL, 0x19 },
506 { NULL, 0x1a },
507 { NULL, 0x1b },
508 { NULL, 0x1c },
509 { NULL, 0x1d },
510 { NULL, 0x1e },
511 { NULL, 0x1f },
512 };
513 #undef B
514
515 /* Utilities on value constraint. */
517
518 static inline int
519 value_in_range_p (int64_t value, int low, int high)
520 {
521 return (value >= low && value <= high) ? 1 : 0;
522 }
523
524 /* Return true if VALUE is a multiple of ALIGN. */
525 static inline int
526 value_aligned_p (int64_t value, int align)
527 {
528 return (value % align) == 0;
529 }
530
531 /* A signed value fits in a field. */
532 static inline int
533 value_fit_signed_field_p (int64_t value, unsigned width)
534 {
535 assert (width < 32);
536 if (width < sizeof (value) * 8)
537 {
538 int64_t lim = (int64_t)1 << (width - 1);
539 if (value >= -lim && value < lim)
540 return 1;
541 }
542 return 0;
543 }
544
545 /* An unsigned value fits in a field. */
546 static inline int
547 value_fit_unsigned_field_p (int64_t value, unsigned width)
548 {
549 assert (width < 32);
550 if (width < sizeof (value) * 8)
551 {
552 int64_t lim = (int64_t)1 << width;
553 if (value >= 0 && value < lim)
554 return 1;
555 }
556 return 0;
557 }
558
559 /* Return 1 if OPERAND is SP or WSP. */
560 int
561 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
562 {
563 return ((aarch64_get_operand_class (operand->type)
564 == AARCH64_OPND_CLASS_INT_REG)
565 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
566 && operand->reg.regno == 31);
567 }
568
569 /* Return 1 if OPERAND is XZR or WZP. */
570 int
571 aarch64_zero_register_p (const aarch64_opnd_info *operand)
572 {
573 return ((aarch64_get_operand_class (operand->type)
574 == AARCH64_OPND_CLASS_INT_REG)
575 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
576 && operand->reg.regno == 31);
577 }
578
579 /* Return true if the operand *OPERAND that has the operand code
580 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
581 qualified by the qualifier TARGET. */
582
583 static inline int
584 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
585 aarch64_opnd_qualifier_t target)
586 {
587 switch (operand->qualifier)
588 {
589 case AARCH64_OPND_QLF_W:
590 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
591 return 1;
592 break;
593 case AARCH64_OPND_QLF_X:
594 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
595 return 1;
596 break;
597 case AARCH64_OPND_QLF_WSP:
598 if (target == AARCH64_OPND_QLF_W
599 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
600 return 1;
601 break;
602 case AARCH64_OPND_QLF_SP:
603 if (target == AARCH64_OPND_QLF_X
604 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
605 return 1;
606 break;
607 default:
608 break;
609 }
610
611 return 0;
612 }
613
614 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
615 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
616
617 Return NIL if more than one expected qualifiers are found. */
618
619 aarch64_opnd_qualifier_t
620 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
621 int idx,
622 const aarch64_opnd_qualifier_t known_qlf,
623 int known_idx)
624 {
625 int i, saved_i;
626
627 /* Special case.
628
629 When the known qualifier is NIL, we have to assume that there is only
630 one qualifier sequence in the *QSEQ_LIST and return the corresponding
631 qualifier directly. One scenario is that for instruction
632 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
633 which has only one possible valid qualifier sequence
634 NIL, S_D
635 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
636 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
637
638 Because the qualifier NIL has dual roles in the qualifier sequence:
639 it can mean no qualifier for the operand, or the qualifer sequence is
640 not in use (when all qualifiers in the sequence are NILs), we have to
641 handle this special case here. */
642 if (known_qlf == AARCH64_OPND_NIL)
643 {
644 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
645 return qseq_list[0][idx];
646 }
647
648 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
649 {
650 if (qseq_list[i][known_idx] == known_qlf)
651 {
652 if (saved_i != -1)
653 /* More than one sequences are found to have KNOWN_QLF at
654 KNOWN_IDX. */
655 return AARCH64_OPND_NIL;
656 saved_i = i;
657 }
658 }
659
660 return qseq_list[saved_i][idx];
661 }
662
663 enum operand_qualifier_kind
664 {
665 OQK_NIL,
666 OQK_OPD_VARIANT,
667 OQK_VALUE_IN_RANGE,
668 OQK_MISC,
669 };
670
671 /* Operand qualifier description. */
672 struct operand_qualifier_data
673 {
674 /* The usage of the three data fields depends on the qualifier kind. */
675 int data0;
676 int data1;
677 int data2;
678 /* Description. */
679 const char *desc;
680 /* Kind. */
681 enum operand_qualifier_kind kind;
682 };
683
684 /* Indexed by the operand qualifier enumerators. */
685 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
686 {
687 {0, 0, 0, "NIL", OQK_NIL},
688
689 /* Operand variant qualifiers.
690 First 3 fields:
691 element size, number of elements and common value for encoding. */
692
693 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
694 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
695 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
696 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
697
698 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
699 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
700 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
701 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
702 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
703 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
704
705 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
706 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
707 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
708 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
709 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
710 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
711 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
712 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
713 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
714 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
715 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
716
717 {0, 0, 0, "z", OQK_OPD_VARIANT},
718 {0, 0, 0, "m", OQK_OPD_VARIANT},
719
720 /* Qualifiers constraining the value range.
721 First 3 fields:
722 Lower bound, higher bound, unused. */
723
724 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
725 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
726 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
727 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
728 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
729 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
730 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
731
732 /* Qualifiers for miscellaneous purpose.
733 First 3 fields:
734 unused, unused and unused. */
735
736 {0, 0, 0, "lsl", 0},
737 {0, 0, 0, "msl", 0},
738
739 {0, 0, 0, "retrieving", 0},
740 };
741
742 static inline bfd_boolean
743 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
744 {
745 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
746 ? TRUE : FALSE;
747 }
748
749 static inline bfd_boolean
750 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
751 {
752 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
753 ? TRUE : FALSE;
754 }
755
756 const char*
757 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
758 {
759 return aarch64_opnd_qualifiers[qualifier].desc;
760 }
761
762 /* Given an operand qualifier, return the expected data element size
763 of a qualified operand. */
764 unsigned char
765 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
766 {
767 assert (operand_variant_qualifier_p (qualifier) == TRUE);
768 return aarch64_opnd_qualifiers[qualifier].data0;
769 }
770
771 unsigned char
772 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
773 {
774 assert (operand_variant_qualifier_p (qualifier) == TRUE);
775 return aarch64_opnd_qualifiers[qualifier].data1;
776 }
777
778 aarch64_insn
779 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
780 {
781 assert (operand_variant_qualifier_p (qualifier) == TRUE);
782 return aarch64_opnd_qualifiers[qualifier].data2;
783 }
784
785 static int
786 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
787 {
788 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
789 return aarch64_opnd_qualifiers[qualifier].data0;
790 }
791
792 static int
793 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
794 {
795 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
796 return aarch64_opnd_qualifiers[qualifier].data1;
797 }
798
799 #ifdef DEBUG_AARCH64
800 void
801 aarch64_verbose (const char *str, ...)
802 {
803 va_list ap;
804 va_start (ap, str);
805 printf ("#### ");
806 vprintf (str, ap);
807 printf ("\n");
808 va_end (ap);
809 }
810
811 static inline void
812 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
813 {
814 int i;
815 printf ("#### \t");
816 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
817 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
818 printf ("\n");
819 }
820
821 static void
822 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
823 const aarch64_opnd_qualifier_t *qualifier)
824 {
825 int i;
826 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
827
828 aarch64_verbose ("dump_match_qualifiers:");
829 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
830 curr[i] = opnd[i].qualifier;
831 dump_qualifier_sequence (curr);
832 aarch64_verbose ("against");
833 dump_qualifier_sequence (qualifier);
834 }
835 #endif /* DEBUG_AARCH64 */
836
837 /* TODO improve this, we can have an extra field at the runtime to
838 store the number of operands rather than calculating it every time. */
839
840 int
841 aarch64_num_of_operands (const aarch64_opcode *opcode)
842 {
843 int i = 0;
844 const enum aarch64_opnd *opnds = opcode->operands;
845 while (opnds[i++] != AARCH64_OPND_NIL)
846 ;
847 --i;
848 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
849 return i;
850 }
851
852 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
853 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
854
855 N.B. on the entry, it is very likely that only some operands in *INST
856 have had their qualifiers been established.
857
858 If STOP_AT is not -1, the function will only try to match
859 the qualifier sequence for operands before and including the operand
860 of index STOP_AT; and on success *RET will only be filled with the first
861 (STOP_AT+1) qualifiers.
862
863 A couple examples of the matching algorithm:
864
865 X,W,NIL should match
866 X,W,NIL
867
868 NIL,NIL should match
869 X ,NIL
870
871 Apart from serving the main encoding routine, this can also be called
872 during or after the operand decoding. */
873
874 int
875 aarch64_find_best_match (const aarch64_inst *inst,
876 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
877 int stop_at, aarch64_opnd_qualifier_t *ret)
878 {
879 int found = 0;
880 int i, num_opnds;
881 const aarch64_opnd_qualifier_t *qualifiers;
882
883 num_opnds = aarch64_num_of_operands (inst->opcode);
884 if (num_opnds == 0)
885 {
886 DEBUG_TRACE ("SUCCEED: no operand");
887 return 1;
888 }
889
890 if (stop_at < 0 || stop_at >= num_opnds)
891 stop_at = num_opnds - 1;
892
893 /* For each pattern. */
894 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
895 {
896 int j;
897 qualifiers = *qualifiers_list;
898
899 /* Start as positive. */
900 found = 1;
901
902 DEBUG_TRACE ("%d", i);
903 #ifdef DEBUG_AARCH64
904 if (debug_dump)
905 dump_match_qualifiers (inst->operands, qualifiers);
906 #endif
907
908 /* Most opcodes has much fewer patterns in the list.
909 First NIL qualifier indicates the end in the list. */
910 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
911 {
912 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
913 if (i)
914 found = 0;
915 break;
916 }
917
918 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
919 {
920 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
921 {
922 /* Either the operand does not have qualifier, or the qualifier
923 for the operand needs to be deduced from the qualifier
924 sequence.
925 In the latter case, any constraint checking related with
926 the obtained qualifier should be done later in
927 operand_general_constraint_met_p. */
928 continue;
929 }
930 else if (*qualifiers != inst->operands[j].qualifier)
931 {
932 /* Unless the target qualifier can also qualify the operand
933 (which has already had a non-nil qualifier), non-equal
934 qualifiers are generally un-matched. */
935 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
936 continue;
937 else
938 {
939 found = 0;
940 break;
941 }
942 }
943 else
944 continue; /* Equal qualifiers are certainly matched. */
945 }
946
947 /* Qualifiers established. */
948 if (found == 1)
949 break;
950 }
951
952 if (found == 1)
953 {
954 /* Fill the result in *RET. */
955 int j;
956 qualifiers = *qualifiers_list;
957
958 DEBUG_TRACE ("complete qualifiers using list %d", i);
959 #ifdef DEBUG_AARCH64
960 if (debug_dump)
961 dump_qualifier_sequence (qualifiers);
962 #endif
963
964 for (j = 0; j <= stop_at; ++j, ++qualifiers)
965 ret[j] = *qualifiers;
966 for (; j < AARCH64_MAX_OPND_NUM; ++j)
967 ret[j] = AARCH64_OPND_QLF_NIL;
968
969 DEBUG_TRACE ("SUCCESS");
970 return 1;
971 }
972
973 DEBUG_TRACE ("FAIL");
974 return 0;
975 }
976
977 /* Operand qualifier matching and resolving.
978
979 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
980 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
981
982 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
983 succeeds. */
984
985 static int
986 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
987 {
988 int i, nops;
989 aarch64_opnd_qualifier_seq_t qualifiers;
990
991 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
992 qualifiers))
993 {
994 DEBUG_TRACE ("matching FAIL");
995 return 0;
996 }
997
998 if (inst->opcode->flags & F_STRICT)
999 {
1000 /* Require an exact qualifier match, even for NIL qualifiers. */
1001 nops = aarch64_num_of_operands (inst->opcode);
1002 for (i = 0; i < nops; ++i)
1003 if (inst->operands[i].qualifier != qualifiers[i])
1004 return FALSE;
1005 }
1006
1007 /* Update the qualifiers. */
1008 if (update_p == TRUE)
1009 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1010 {
1011 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1012 break;
1013 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1014 "update %s with %s for operand %d",
1015 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1016 aarch64_get_qualifier_name (qualifiers[i]), i);
1017 inst->operands[i].qualifier = qualifiers[i];
1018 }
1019
1020 DEBUG_TRACE ("matching SUCCESS");
1021 return 1;
1022 }
1023
1024 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1025 register by MOVZ.
1026
1027 IS32 indicates whether value is a 32-bit immediate or not.
1028 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1029 amount will be returned in *SHIFT_AMOUNT. */
1030
1031 bfd_boolean
1032 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1033 {
1034 int amount;
1035
1036 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1037
1038 if (is32)
1039 {
1040 /* Allow all zeros or all ones in top 32-bits, so that
1041 32-bit constant expressions like ~0x80000000 are
1042 permitted. */
1043 uint64_t ext = value;
1044 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1045 /* Immediate out of range. */
1046 return FALSE;
1047 value &= (int64_t) 0xffffffff;
1048 }
1049
1050 /* first, try movz then movn */
1051 amount = -1;
1052 if ((value & ((int64_t) 0xffff << 0)) == value)
1053 amount = 0;
1054 else if ((value & ((int64_t) 0xffff << 16)) == value)
1055 amount = 16;
1056 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1057 amount = 32;
1058 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1059 amount = 48;
1060
1061 if (amount == -1)
1062 {
1063 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1064 return FALSE;
1065 }
1066
1067 if (shift_amount != NULL)
1068 *shift_amount = amount;
1069
1070 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1071
1072 return TRUE;
1073 }
1074
1075 /* Build the accepted values for immediate logical SIMD instructions.
1076
1077 The standard encodings of the immediate value are:
1078 N imms immr SIMD size R S
1079 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1080 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1081 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1082 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1083 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1084 0 11110s 00000r 2 UInt(r) UInt(s)
1085 where all-ones value of S is reserved.
1086
1087 Let's call E the SIMD size.
1088
1089 The immediate value is: S+1 bits '1' rotated to the right by R.
1090
1091 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1092 (remember S != E - 1). */
1093
1094 #define TOTAL_IMM_NB 5334
1095
1096 typedef struct
1097 {
1098 uint64_t imm;
1099 aarch64_insn encoding;
1100 } simd_imm_encoding;
1101
1102 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1103
1104 static int
1105 simd_imm_encoding_cmp(const void *i1, const void *i2)
1106 {
1107 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1108 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1109
1110 if (imm1->imm < imm2->imm)
1111 return -1;
1112 if (imm1->imm > imm2->imm)
1113 return +1;
1114 return 0;
1115 }
1116
1117 /* immediate bitfield standard encoding
1118 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1119 1 ssssss rrrrrr 64 rrrrrr ssssss
1120 0 0sssss 0rrrrr 32 rrrrr sssss
1121 0 10ssss 00rrrr 16 rrrr ssss
1122 0 110sss 000rrr 8 rrr sss
1123 0 1110ss 0000rr 4 rr ss
1124 0 11110s 00000r 2 r s */
1125 static inline int
1126 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1127 {
1128 return (is64 << 12) | (r << 6) | s;
1129 }
1130
1131 static void
1132 build_immediate_table (void)
1133 {
1134 uint32_t log_e, e, s, r, s_mask;
1135 uint64_t mask, imm;
1136 int nb_imms;
1137 int is64;
1138
1139 nb_imms = 0;
1140 for (log_e = 1; log_e <= 6; log_e++)
1141 {
1142 /* Get element size. */
1143 e = 1u << log_e;
1144 if (log_e == 6)
1145 {
1146 is64 = 1;
1147 mask = 0xffffffffffffffffull;
1148 s_mask = 0;
1149 }
1150 else
1151 {
1152 is64 = 0;
1153 mask = (1ull << e) - 1;
1154 /* log_e s_mask
1155 1 ((1 << 4) - 1) << 2 = 111100
1156 2 ((1 << 3) - 1) << 3 = 111000
1157 3 ((1 << 2) - 1) << 4 = 110000
1158 4 ((1 << 1) - 1) << 5 = 100000
1159 5 ((1 << 0) - 1) << 6 = 000000 */
1160 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1161 }
1162 for (s = 0; s < e - 1; s++)
1163 for (r = 0; r < e; r++)
1164 {
1165 /* s+1 consecutive bits to 1 (s < 63) */
1166 imm = (1ull << (s + 1)) - 1;
1167 /* rotate right by r */
1168 if (r != 0)
1169 imm = (imm >> r) | ((imm << (e - r)) & mask);
1170 /* replicate the constant depending on SIMD size */
1171 switch (log_e)
1172 {
1173 case 1: imm = (imm << 2) | imm;
1174 /* Fall through. */
1175 case 2: imm = (imm << 4) | imm;
1176 /* Fall through. */
1177 case 3: imm = (imm << 8) | imm;
1178 /* Fall through. */
1179 case 4: imm = (imm << 16) | imm;
1180 /* Fall through. */
1181 case 5: imm = (imm << 32) | imm;
1182 /* Fall through. */
1183 case 6: break;
1184 default: abort ();
1185 }
1186 simd_immediates[nb_imms].imm = imm;
1187 simd_immediates[nb_imms].encoding =
1188 encode_immediate_bitfield(is64, s | s_mask, r);
1189 nb_imms++;
1190 }
1191 }
1192 assert (nb_imms == TOTAL_IMM_NB);
1193 qsort(simd_immediates, nb_imms,
1194 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1195 }
1196
1197 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1198 be accepted by logical (immediate) instructions
1199 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1200
1201 ESIZE is the number of bytes in the decoded immediate value.
1202 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1203 VALUE will be returned in *ENCODING. */
1204
1205 bfd_boolean
1206 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1207 {
1208 simd_imm_encoding imm_enc;
1209 const simd_imm_encoding *imm_encoding;
1210 static bfd_boolean initialized = FALSE;
1211 uint64_t upper;
1212 int i;
1213
1214 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1215 value, esize);
1216
1217 if (!initialized)
1218 {
1219 build_immediate_table ();
1220 initialized = TRUE;
1221 }
1222
1223 /* Allow all zeros or all ones in top bits, so that
1224 constant expressions like ~1 are permitted. */
1225 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1226 if ((value & ~upper) != value && (value | upper) != value)
1227 return FALSE;
1228
1229 /* Replicate to a full 64-bit value. */
1230 value &= ~upper;
1231 for (i = esize * 8; i < 64; i *= 2)
1232 value |= (value << i);
1233
1234 imm_enc.imm = value;
1235 imm_encoding = (const simd_imm_encoding *)
1236 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1237 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1238 if (imm_encoding == NULL)
1239 {
1240 DEBUG_TRACE ("exit with FALSE");
1241 return FALSE;
1242 }
1243 if (encoding != NULL)
1244 *encoding = imm_encoding->encoding;
1245 DEBUG_TRACE ("exit with TRUE");
1246 return TRUE;
1247 }
1248
1249 /* If 64-bit immediate IMM is in the format of
1250 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1251 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1252 of value "abcdefgh". Otherwise return -1. */
1253 int
1254 aarch64_shrink_expanded_imm8 (uint64_t imm)
1255 {
1256 int i, ret;
1257 uint32_t byte;
1258
1259 ret = 0;
1260 for (i = 0; i < 8; i++)
1261 {
1262 byte = (imm >> (8 * i)) & 0xff;
1263 if (byte == 0xff)
1264 ret |= 1 << i;
1265 else if (byte != 0x00)
1266 return -1;
1267 }
1268 return ret;
1269 }
1270
1271 /* Utility inline functions for operand_general_constraint_met_p. */
1272
1273 static inline void
1274 set_error (aarch64_operand_error *mismatch_detail,
1275 enum aarch64_operand_error_kind kind, int idx,
1276 const char* error)
1277 {
1278 if (mismatch_detail == NULL)
1279 return;
1280 mismatch_detail->kind = kind;
1281 mismatch_detail->index = idx;
1282 mismatch_detail->error = error;
1283 }
1284
1285 static inline void
1286 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1287 const char* error)
1288 {
1289 if (mismatch_detail == NULL)
1290 return;
1291 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1292 }
1293
1294 static inline void
1295 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1296 int idx, int lower_bound, int upper_bound,
1297 const char* error)
1298 {
1299 if (mismatch_detail == NULL)
1300 return;
1301 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1302 mismatch_detail->data[0] = lower_bound;
1303 mismatch_detail->data[1] = upper_bound;
1304 }
1305
1306 static inline void
1307 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1308 int idx, int lower_bound, int upper_bound)
1309 {
1310 if (mismatch_detail == NULL)
1311 return;
1312 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1313 _("immediate value"));
1314 }
1315
1316 static inline void
1317 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1318 int idx, int lower_bound, int upper_bound)
1319 {
1320 if (mismatch_detail == NULL)
1321 return;
1322 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1323 _("immediate offset"));
1324 }
1325
1326 static inline void
1327 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1328 int idx, int lower_bound, int upper_bound)
1329 {
1330 if (mismatch_detail == NULL)
1331 return;
1332 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1333 _("register number"));
1334 }
1335
1336 static inline void
1337 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1338 int idx, int lower_bound, int upper_bound)
1339 {
1340 if (mismatch_detail == NULL)
1341 return;
1342 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1343 _("register element index"));
1344 }
1345
1346 static inline void
1347 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1348 int idx, int lower_bound, int upper_bound)
1349 {
1350 if (mismatch_detail == NULL)
1351 return;
1352 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1353 _("shift amount"));
1354 }
1355
1356 /* Report that the MUL modifier in operand IDX should be in the range
1357 [LOWER_BOUND, UPPER_BOUND]. */
1358 static inline void
1359 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1360 int idx, int lower_bound, int upper_bound)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1365 _("multiplier"));
1366 }
1367
1368 static inline void
1369 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1370 int alignment)
1371 {
1372 if (mismatch_detail == NULL)
1373 return;
1374 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1375 mismatch_detail->data[0] = alignment;
1376 }
1377
1378 static inline void
1379 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1380 int expected_num)
1381 {
1382 if (mismatch_detail == NULL)
1383 return;
1384 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1385 mismatch_detail->data[0] = expected_num;
1386 }
1387
1388 static inline void
1389 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1390 const char* error)
1391 {
1392 if (mismatch_detail == NULL)
1393 return;
1394 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1395 }
1396
1397 /* General constraint checking based on operand code.
1398
1399 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1400 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1401
1402 This function has to be called after the qualifiers for all operands
1403 have been resolved.
1404
1405 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1406 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1407 of error message during the disassembling where error message is not
1408 wanted. We avoid the dynamic construction of strings of error messages
1409 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1410 use a combination of error code, static string and some integer data to
1411 represent an error. */
1412
1413 static int
1414 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1415 enum aarch64_opnd type,
1416 const aarch64_opcode *opcode,
1417 aarch64_operand_error *mismatch_detail)
1418 {
1419 unsigned num, modifiers, shift;
1420 unsigned char size;
1421 int64_t imm, min_value, max_value;
1422 uint64_t uvalue, mask;
1423 const aarch64_opnd_info *opnd = opnds + idx;
1424 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1425
1426 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1427
1428 switch (aarch64_operands[type].op_class)
1429 {
1430 case AARCH64_OPND_CLASS_INT_REG:
1431 /* Check pair reg constraints for cas* instructions. */
1432 if (type == AARCH64_OPND_PAIRREG)
1433 {
1434 assert (idx == 1 || idx == 3);
1435 if (opnds[idx - 1].reg.regno % 2 != 0)
1436 {
1437 set_syntax_error (mismatch_detail, idx - 1,
1438 _("reg pair must start from even reg"));
1439 return 0;
1440 }
1441 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1442 {
1443 set_syntax_error (mismatch_detail, idx,
1444 _("reg pair must be contiguous"));
1445 return 0;
1446 }
1447 break;
1448 }
1449
1450 /* <Xt> may be optional in some IC and TLBI instructions. */
1451 if (type == AARCH64_OPND_Rt_SYS)
1452 {
1453 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1454 == AARCH64_OPND_CLASS_SYSTEM));
1455 if (opnds[1].present
1456 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1457 {
1458 set_other_error (mismatch_detail, idx, _("extraneous register"));
1459 return 0;
1460 }
1461 if (!opnds[1].present
1462 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1463 {
1464 set_other_error (mismatch_detail, idx, _("missing register"));
1465 return 0;
1466 }
1467 }
1468 switch (qualifier)
1469 {
1470 case AARCH64_OPND_QLF_WSP:
1471 case AARCH64_OPND_QLF_SP:
1472 if (!aarch64_stack_pointer_p (opnd))
1473 {
1474 set_other_error (mismatch_detail, idx,
1475 _("stack pointer register expected"));
1476 return 0;
1477 }
1478 break;
1479 default:
1480 break;
1481 }
1482 break;
1483
1484 case AARCH64_OPND_CLASS_SVE_REG:
1485 switch (type)
1486 {
1487 case AARCH64_OPND_SVE_Zm3_INDEX:
1488 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1489 case AARCH64_OPND_SVE_Zm4_INDEX:
1490 size = get_operand_fields_width (get_operand_from_code (type));
1491 shift = get_operand_specific_data (&aarch64_operands[type]);
1492 mask = (1 << shift) - 1;
1493 if (opnd->reg.regno > mask)
1494 {
1495 assert (mask == 7 || mask == 15);
1496 set_other_error (mismatch_detail, idx,
1497 mask == 15
1498 ? _("z0-z15 expected")
1499 : _("z0-z7 expected"));
1500 return 0;
1501 }
1502 mask = (1 << (size - shift)) - 1;
1503 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1504 {
1505 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1506 return 0;
1507 }
1508 break;
1509
1510 case AARCH64_OPND_SVE_Zn_INDEX:
1511 size = aarch64_get_qualifier_esize (opnd->qualifier);
1512 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1513 {
1514 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1515 0, 64 / size - 1);
1516 return 0;
1517 }
1518 break;
1519
1520 case AARCH64_OPND_SVE_ZnxN:
1521 case AARCH64_OPND_SVE_ZtxN:
1522 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1523 {
1524 set_other_error (mismatch_detail, idx,
1525 _("invalid register list"));
1526 return 0;
1527 }
1528 break;
1529
1530 default:
1531 break;
1532 }
1533 break;
1534
1535 case AARCH64_OPND_CLASS_PRED_REG:
1536 if (opnd->reg.regno >= 8
1537 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1538 {
1539 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1540 return 0;
1541 }
1542 break;
1543
1544 case AARCH64_OPND_CLASS_COND:
1545 if (type == AARCH64_OPND_COND1
1546 && (opnds[idx].cond->value & 0xe) == 0xe)
1547 {
1548 /* Not allow AL or NV. */
1549 set_syntax_error (mismatch_detail, idx, NULL);
1550 }
1551 break;
1552
1553 case AARCH64_OPND_CLASS_ADDRESS:
1554 /* Check writeback. */
1555 switch (opcode->iclass)
1556 {
1557 case ldst_pos:
1558 case ldst_unscaled:
1559 case ldstnapair_offs:
1560 case ldstpair_off:
1561 case ldst_unpriv:
1562 if (opnd->addr.writeback == 1)
1563 {
1564 set_syntax_error (mismatch_detail, idx,
1565 _("unexpected address writeback"));
1566 return 0;
1567 }
1568 break;
1569 case ldst_imm10:
1570 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1571 {
1572 set_syntax_error (mismatch_detail, idx,
1573 _("unexpected address writeback"));
1574 return 0;
1575 }
1576 break;
1577 case ldst_imm9:
1578 case ldstpair_indexed:
1579 case asisdlsep:
1580 case asisdlsop:
1581 if (opnd->addr.writeback == 0)
1582 {
1583 set_syntax_error (mismatch_detail, idx,
1584 _("address writeback expected"));
1585 return 0;
1586 }
1587 break;
1588 default:
1589 assert (opnd->addr.writeback == 0);
1590 break;
1591 }
1592 switch (type)
1593 {
1594 case AARCH64_OPND_ADDR_SIMM7:
1595 /* Scaled signed 7 bits immediate offset. */
1596 /* Get the size of the data element that is accessed, which may be
1597 different from that of the source register size,
1598 e.g. in strb/ldrb. */
1599 size = aarch64_get_qualifier_esize (opnd->qualifier);
1600 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1601 {
1602 set_offset_out_of_range_error (mismatch_detail, idx,
1603 -64 * size, 63 * size);
1604 return 0;
1605 }
1606 if (!value_aligned_p (opnd->addr.offset.imm, size))
1607 {
1608 set_unaligned_error (mismatch_detail, idx, size);
1609 return 0;
1610 }
1611 break;
1612 case AARCH64_OPND_ADDR_OFFSET:
1613 case AARCH64_OPND_ADDR_SIMM9:
1614 /* Unscaled signed 9 bits immediate offset. */
1615 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1616 {
1617 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1618 return 0;
1619 }
1620 break;
1621
1622 case AARCH64_OPND_ADDR_SIMM9_2:
1623 /* Unscaled signed 9 bits immediate offset, which has to be negative
1624 or unaligned. */
1625 size = aarch64_get_qualifier_esize (qualifier);
1626 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1627 && !value_aligned_p (opnd->addr.offset.imm, size))
1628 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1629 return 1;
1630 set_other_error (mismatch_detail, idx,
1631 _("negative or unaligned offset expected"));
1632 return 0;
1633
1634 case AARCH64_OPND_ADDR_SIMM10:
1635 /* Scaled signed 10 bits immediate offset. */
1636 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1637 {
1638 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1639 return 0;
1640 }
1641 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1642 {
1643 set_unaligned_error (mismatch_detail, idx, 8);
1644 return 0;
1645 }
1646 break;
1647
1648 case AARCH64_OPND_SIMD_ADDR_POST:
1649 /* AdvSIMD load/store multiple structures, post-index. */
1650 assert (idx == 1);
1651 if (opnd->addr.offset.is_reg)
1652 {
1653 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1654 return 1;
1655 else
1656 {
1657 set_other_error (mismatch_detail, idx,
1658 _("invalid register offset"));
1659 return 0;
1660 }
1661 }
1662 else
1663 {
1664 const aarch64_opnd_info *prev = &opnds[idx-1];
1665 unsigned num_bytes; /* total number of bytes transferred. */
1666 /* The opcode dependent area stores the number of elements in
1667 each structure to be loaded/stored. */
1668 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1669 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1670 /* Special handling of loading single structure to all lane. */
1671 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1672 * aarch64_get_qualifier_esize (prev->qualifier);
1673 else
1674 num_bytes = prev->reglist.num_regs
1675 * aarch64_get_qualifier_esize (prev->qualifier)
1676 * aarch64_get_qualifier_nelem (prev->qualifier);
1677 if ((int) num_bytes != opnd->addr.offset.imm)
1678 {
1679 set_other_error (mismatch_detail, idx,
1680 _("invalid post-increment amount"));
1681 return 0;
1682 }
1683 }
1684 break;
1685
1686 case AARCH64_OPND_ADDR_REGOFF:
1687 /* Get the size of the data element that is accessed, which may be
1688 different from that of the source register size,
1689 e.g. in strb/ldrb. */
1690 size = aarch64_get_qualifier_esize (opnd->qualifier);
1691 /* It is either no shift or shift by the binary logarithm of SIZE. */
1692 if (opnd->shifter.amount != 0
1693 && opnd->shifter.amount != (int)get_logsz (size))
1694 {
1695 set_other_error (mismatch_detail, idx,
1696 _("invalid shift amount"));
1697 return 0;
1698 }
1699 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1700 operators. */
1701 switch (opnd->shifter.kind)
1702 {
1703 case AARCH64_MOD_UXTW:
1704 case AARCH64_MOD_LSL:
1705 case AARCH64_MOD_SXTW:
1706 case AARCH64_MOD_SXTX: break;
1707 default:
1708 set_other_error (mismatch_detail, idx,
1709 _("invalid extend/shift operator"));
1710 return 0;
1711 }
1712 break;
1713
1714 case AARCH64_OPND_ADDR_UIMM12:
1715 imm = opnd->addr.offset.imm;
1716 /* Get the size of the data element that is accessed, which may be
1717 different from that of the source register size,
1718 e.g. in strb/ldrb. */
1719 size = aarch64_get_qualifier_esize (qualifier);
1720 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1721 {
1722 set_offset_out_of_range_error (mismatch_detail, idx,
1723 0, 4095 * size);
1724 return 0;
1725 }
1726 if (!value_aligned_p (opnd->addr.offset.imm, size))
1727 {
1728 set_unaligned_error (mismatch_detail, idx, size);
1729 return 0;
1730 }
1731 break;
1732
1733 case AARCH64_OPND_ADDR_PCREL14:
1734 case AARCH64_OPND_ADDR_PCREL19:
1735 case AARCH64_OPND_ADDR_PCREL21:
1736 case AARCH64_OPND_ADDR_PCREL26:
1737 imm = opnd->imm.value;
1738 if (operand_need_shift_by_two (get_operand_from_code (type)))
1739 {
1740 /* The offset value in a PC-relative branch instruction is alway
1741 4-byte aligned and is encoded without the lowest 2 bits. */
1742 if (!value_aligned_p (imm, 4))
1743 {
1744 set_unaligned_error (mismatch_detail, idx, 4);
1745 return 0;
1746 }
1747 /* Right shift by 2 so that we can carry out the following check
1748 canonically. */
1749 imm >>= 2;
1750 }
1751 size = get_operand_fields_width (get_operand_from_code (type));
1752 if (!value_fit_signed_field_p (imm, size))
1753 {
1754 set_other_error (mismatch_detail, idx,
1755 _("immediate out of range"));
1756 return 0;
1757 }
1758 break;
1759
1760 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1761 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1762 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1763 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1764 min_value = -8;
1765 max_value = 7;
1766 sve_imm_offset_vl:
1767 assert (!opnd->addr.offset.is_reg);
1768 assert (opnd->addr.preind);
1769 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1770 min_value *= num;
1771 max_value *= num;
1772 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1773 || (opnd->shifter.operator_present
1774 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1775 {
1776 set_other_error (mismatch_detail, idx,
1777 _("invalid addressing mode"));
1778 return 0;
1779 }
1780 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1781 {
1782 set_offset_out_of_range_error (mismatch_detail, idx,
1783 min_value, max_value);
1784 return 0;
1785 }
1786 if (!value_aligned_p (opnd->addr.offset.imm, num))
1787 {
1788 set_unaligned_error (mismatch_detail, idx, num);
1789 return 0;
1790 }
1791 break;
1792
1793 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1794 min_value = -32;
1795 max_value = 31;
1796 goto sve_imm_offset_vl;
1797
1798 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1799 min_value = -256;
1800 max_value = 255;
1801 goto sve_imm_offset_vl;
1802
1803 case AARCH64_OPND_SVE_ADDR_RI_U6:
1804 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1805 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1806 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1807 min_value = 0;
1808 max_value = 63;
1809 sve_imm_offset:
1810 assert (!opnd->addr.offset.is_reg);
1811 assert (opnd->addr.preind);
1812 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1813 min_value *= num;
1814 max_value *= num;
1815 if (opnd->shifter.operator_present
1816 || opnd->shifter.amount_present)
1817 {
1818 set_other_error (mismatch_detail, idx,
1819 _("invalid addressing mode"));
1820 return 0;
1821 }
1822 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1823 {
1824 set_offset_out_of_range_error (mismatch_detail, idx,
1825 min_value, max_value);
1826 return 0;
1827 }
1828 if (!value_aligned_p (opnd->addr.offset.imm, num))
1829 {
1830 set_unaligned_error (mismatch_detail, idx, num);
1831 return 0;
1832 }
1833 break;
1834
1835 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1836 min_value = -8;
1837 max_value = 7;
1838 goto sve_imm_offset;
1839
1840 case AARCH64_OPND_SVE_ADDR_R:
1841 case AARCH64_OPND_SVE_ADDR_RR:
1842 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1843 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1844 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1845 case AARCH64_OPND_SVE_ADDR_RX:
1846 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1847 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1848 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1849 case AARCH64_OPND_SVE_ADDR_RZ:
1850 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1851 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1852 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1853 modifiers = 1 << AARCH64_MOD_LSL;
1854 sve_rr_operand:
1855 assert (opnd->addr.offset.is_reg);
1856 assert (opnd->addr.preind);
1857 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1858 && opnd->addr.offset.regno == 31)
1859 {
1860 set_other_error (mismatch_detail, idx,
1861 _("index register xzr is not allowed"));
1862 return 0;
1863 }
1864 if (((1 << opnd->shifter.kind) & modifiers) == 0
1865 || (opnd->shifter.amount
1866 != get_operand_specific_data (&aarch64_operands[type])))
1867 {
1868 set_other_error (mismatch_detail, idx,
1869 _("invalid addressing mode"));
1870 return 0;
1871 }
1872 break;
1873
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1877 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1878 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1879 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1880 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1881 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1882 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1883 goto sve_rr_operand;
1884
1885 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1886 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1887 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1888 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1889 min_value = 0;
1890 max_value = 31;
1891 goto sve_imm_offset;
1892
1893 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1894 modifiers = 1 << AARCH64_MOD_LSL;
1895 sve_zz_operand:
1896 assert (opnd->addr.offset.is_reg);
1897 assert (opnd->addr.preind);
1898 if (((1 << opnd->shifter.kind) & modifiers) == 0
1899 || opnd->shifter.amount < 0
1900 || opnd->shifter.amount > 3)
1901 {
1902 set_other_error (mismatch_detail, idx,
1903 _("invalid addressing mode"));
1904 return 0;
1905 }
1906 break;
1907
1908 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1909 modifiers = (1 << AARCH64_MOD_SXTW);
1910 goto sve_zz_operand;
1911
1912 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1913 modifiers = 1 << AARCH64_MOD_UXTW;
1914 goto sve_zz_operand;
1915
1916 default:
1917 break;
1918 }
1919 break;
1920
1921 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1922 if (type == AARCH64_OPND_LEt)
1923 {
1924 /* Get the upper bound for the element index. */
1925 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1926 if (!value_in_range_p (opnd->reglist.index, 0, num))
1927 {
1928 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1929 return 0;
1930 }
1931 }
1932 /* The opcode dependent area stores the number of elements in
1933 each structure to be loaded/stored. */
1934 num = get_opcode_dependent_value (opcode);
1935 switch (type)
1936 {
1937 case AARCH64_OPND_LVt:
1938 assert (num >= 1 && num <= 4);
1939 /* Unless LD1/ST1, the number of registers should be equal to that
1940 of the structure elements. */
1941 if (num != 1 && opnd->reglist.num_regs != num)
1942 {
1943 set_reg_list_error (mismatch_detail, idx, num);
1944 return 0;
1945 }
1946 break;
1947 case AARCH64_OPND_LVt_AL:
1948 case AARCH64_OPND_LEt:
1949 assert (num >= 1 && num <= 4);
1950 /* The number of registers should be equal to that of the structure
1951 elements. */
1952 if (opnd->reglist.num_regs != num)
1953 {
1954 set_reg_list_error (mismatch_detail, idx, num);
1955 return 0;
1956 }
1957 break;
1958 default:
1959 break;
1960 }
1961 break;
1962
1963 case AARCH64_OPND_CLASS_IMMEDIATE:
1964 /* Constraint check on immediate operand. */
1965 imm = opnd->imm.value;
1966 /* E.g. imm_0_31 constrains value to be 0..31. */
1967 if (qualifier_value_in_range_constraint_p (qualifier)
1968 && !value_in_range_p (imm, get_lower_bound (qualifier),
1969 get_upper_bound (qualifier)))
1970 {
1971 set_imm_out_of_range_error (mismatch_detail, idx,
1972 get_lower_bound (qualifier),
1973 get_upper_bound (qualifier));
1974 return 0;
1975 }
1976
1977 switch (type)
1978 {
1979 case AARCH64_OPND_AIMM:
1980 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1981 {
1982 set_other_error (mismatch_detail, idx,
1983 _("invalid shift operator"));
1984 return 0;
1985 }
1986 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1987 {
1988 set_other_error (mismatch_detail, idx,
1989 _("shift amount must be 0 or 12"));
1990 return 0;
1991 }
1992 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1993 {
1994 set_other_error (mismatch_detail, idx,
1995 _("immediate out of range"));
1996 return 0;
1997 }
1998 break;
1999
2000 case AARCH64_OPND_HALF:
2001 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2002 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2003 {
2004 set_other_error (mismatch_detail, idx,
2005 _("invalid shift operator"));
2006 return 0;
2007 }
2008 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2009 if (!value_aligned_p (opnd->shifter.amount, 16))
2010 {
2011 set_other_error (mismatch_detail, idx,
2012 _("shift amount must be a multiple of 16"));
2013 return 0;
2014 }
2015 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2016 {
2017 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2018 0, size * 8 - 16);
2019 return 0;
2020 }
2021 if (opnd->imm.value < 0)
2022 {
2023 set_other_error (mismatch_detail, idx,
2024 _("negative immediate value not allowed"));
2025 return 0;
2026 }
2027 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2028 {
2029 set_other_error (mismatch_detail, idx,
2030 _("immediate out of range"));
2031 return 0;
2032 }
2033 break;
2034
2035 case AARCH64_OPND_IMM_MOV:
2036 {
2037 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2038 imm = opnd->imm.value;
2039 assert (idx == 1);
2040 switch (opcode->op)
2041 {
2042 case OP_MOV_IMM_WIDEN:
2043 imm = ~imm;
2044 /* Fall through. */
2045 case OP_MOV_IMM_WIDE:
2046 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2047 {
2048 set_other_error (mismatch_detail, idx,
2049 _("immediate out of range"));
2050 return 0;
2051 }
2052 break;
2053 case OP_MOV_IMM_LOG:
2054 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2055 {
2056 set_other_error (mismatch_detail, idx,
2057 _("immediate out of range"));
2058 return 0;
2059 }
2060 break;
2061 default:
2062 assert (0);
2063 return 0;
2064 }
2065 }
2066 break;
2067
2068 case AARCH64_OPND_NZCV:
2069 case AARCH64_OPND_CCMP_IMM:
2070 case AARCH64_OPND_EXCEPTION:
2071 case AARCH64_OPND_UIMM4:
2072 case AARCH64_OPND_UIMM7:
2073 case AARCH64_OPND_UIMM3_OP1:
2074 case AARCH64_OPND_UIMM3_OP2:
2075 case AARCH64_OPND_SVE_UIMM3:
2076 case AARCH64_OPND_SVE_UIMM7:
2077 case AARCH64_OPND_SVE_UIMM8:
2078 case AARCH64_OPND_SVE_UIMM8_53:
2079 size = get_operand_fields_width (get_operand_from_code (type));
2080 assert (size < 32);
2081 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2082 {
2083 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2084 (1 << size) - 1);
2085 return 0;
2086 }
2087 break;
2088
2089 case AARCH64_OPND_SIMM5:
2090 case AARCH64_OPND_SVE_SIMM5:
2091 case AARCH64_OPND_SVE_SIMM5B:
2092 case AARCH64_OPND_SVE_SIMM6:
2093 case AARCH64_OPND_SVE_SIMM8:
2094 size = get_operand_fields_width (get_operand_from_code (type));
2095 assert (size < 32);
2096 if (!value_fit_signed_field_p (opnd->imm.value, size))
2097 {
2098 set_imm_out_of_range_error (mismatch_detail, idx,
2099 -(1 << (size - 1)),
2100 (1 << (size - 1)) - 1);
2101 return 0;
2102 }
2103 break;
2104
2105 case AARCH64_OPND_WIDTH:
2106 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2107 && opnds[0].type == AARCH64_OPND_Rd);
2108 size = get_upper_bound (qualifier);
2109 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2110 /* lsb+width <= reg.size */
2111 {
2112 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2113 size - opnds[idx-1].imm.value);
2114 return 0;
2115 }
2116 break;
2117
2118 case AARCH64_OPND_LIMM:
2119 case AARCH64_OPND_SVE_LIMM:
2120 {
2121 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2122 uint64_t uimm = opnd->imm.value;
2123 if (opcode->op == OP_BIC)
2124 uimm = ~uimm;
2125 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2126 {
2127 set_other_error (mismatch_detail, idx,
2128 _("immediate out of range"));
2129 return 0;
2130 }
2131 }
2132 break;
2133
2134 case AARCH64_OPND_IMM0:
2135 case AARCH64_OPND_FPIMM0:
2136 if (opnd->imm.value != 0)
2137 {
2138 set_other_error (mismatch_detail, idx,
2139 _("immediate zero expected"));
2140 return 0;
2141 }
2142 break;
2143
2144 case AARCH64_OPND_IMM_ROT1:
2145 case AARCH64_OPND_IMM_ROT2:
2146 case AARCH64_OPND_SVE_IMM_ROT2:
2147 if (opnd->imm.value != 0
2148 && opnd->imm.value != 90
2149 && opnd->imm.value != 180
2150 && opnd->imm.value != 270)
2151 {
2152 set_other_error (mismatch_detail, idx,
2153 _("rotate expected to be 0, 90, 180 or 270"));
2154 return 0;
2155 }
2156 break;
2157
2158 case AARCH64_OPND_IMM_ROT3:
2159 case AARCH64_OPND_SVE_IMM_ROT1:
2160 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2161 {
2162 set_other_error (mismatch_detail, idx,
2163 _("rotate expected to be 90 or 270"));
2164 return 0;
2165 }
2166 break;
2167
2168 case AARCH64_OPND_SHLL_IMM:
2169 assert (idx == 2);
2170 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2171 if (opnd->imm.value != size)
2172 {
2173 set_other_error (mismatch_detail, idx,
2174 _("invalid shift amount"));
2175 return 0;
2176 }
2177 break;
2178
2179 case AARCH64_OPND_IMM_VLSL:
2180 size = aarch64_get_qualifier_esize (qualifier);
2181 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2182 {
2183 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2184 size * 8 - 1);
2185 return 0;
2186 }
2187 break;
2188
2189 case AARCH64_OPND_IMM_VLSR:
2190 size = aarch64_get_qualifier_esize (qualifier);
2191 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2192 {
2193 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2194 return 0;
2195 }
2196 break;
2197
2198 case AARCH64_OPND_SIMD_IMM:
2199 case AARCH64_OPND_SIMD_IMM_SFT:
2200 /* Qualifier check. */
2201 switch (qualifier)
2202 {
2203 case AARCH64_OPND_QLF_LSL:
2204 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2205 {
2206 set_other_error (mismatch_detail, idx,
2207 _("invalid shift operator"));
2208 return 0;
2209 }
2210 break;
2211 case AARCH64_OPND_QLF_MSL:
2212 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2213 {
2214 set_other_error (mismatch_detail, idx,
2215 _("invalid shift operator"));
2216 return 0;
2217 }
2218 break;
2219 case AARCH64_OPND_QLF_NIL:
2220 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2221 {
2222 set_other_error (mismatch_detail, idx,
2223 _("shift is not permitted"));
2224 return 0;
2225 }
2226 break;
2227 default:
2228 assert (0);
2229 return 0;
2230 }
2231 /* Is the immediate valid? */
2232 assert (idx == 1);
2233 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2234 {
2235 /* uimm8 or simm8 */
2236 if (!value_in_range_p (opnd->imm.value, -128, 255))
2237 {
2238 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2239 return 0;
2240 }
2241 }
2242 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2243 {
2244 /* uimm64 is not
2245 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2246 ffffffffgggggggghhhhhhhh'. */
2247 set_other_error (mismatch_detail, idx,
2248 _("invalid value for immediate"));
2249 return 0;
2250 }
2251 /* Is the shift amount valid? */
2252 switch (opnd->shifter.kind)
2253 {
2254 case AARCH64_MOD_LSL:
2255 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2256 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2257 {
2258 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2259 (size - 1) * 8);
2260 return 0;
2261 }
2262 if (!value_aligned_p (opnd->shifter.amount, 8))
2263 {
2264 set_unaligned_error (mismatch_detail, idx, 8);
2265 return 0;
2266 }
2267 break;
2268 case AARCH64_MOD_MSL:
2269 /* Only 8 and 16 are valid shift amount. */
2270 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2271 {
2272 set_other_error (mismatch_detail, idx,
2273 _("shift amount must be 0 or 16"));
2274 return 0;
2275 }
2276 break;
2277 default:
2278 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2279 {
2280 set_other_error (mismatch_detail, idx,
2281 _("invalid shift operator"));
2282 return 0;
2283 }
2284 break;
2285 }
2286 break;
2287
2288 case AARCH64_OPND_FPIMM:
2289 case AARCH64_OPND_SIMD_FPIMM:
2290 case AARCH64_OPND_SVE_FPIMM8:
2291 if (opnd->imm.is_fp == 0)
2292 {
2293 set_other_error (mismatch_detail, idx,
2294 _("floating-point immediate expected"));
2295 return 0;
2296 }
2297 /* The value is expected to be an 8-bit floating-point constant with
2298 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2299 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2300 instruction). */
2301 if (!value_in_range_p (opnd->imm.value, 0, 255))
2302 {
2303 set_other_error (mismatch_detail, idx,
2304 _("immediate out of range"));
2305 return 0;
2306 }
2307 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2308 {
2309 set_other_error (mismatch_detail, idx,
2310 _("invalid shift operator"));
2311 return 0;
2312 }
2313 break;
2314
2315 case AARCH64_OPND_SVE_AIMM:
2316 min_value = 0;
2317 sve_aimm:
2318 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2319 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2320 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2321 uvalue = opnd->imm.value;
2322 shift = opnd->shifter.amount;
2323 if (size == 1)
2324 {
2325 if (shift != 0)
2326 {
2327 set_other_error (mismatch_detail, idx,
2328 _("no shift amount allowed for"
2329 " 8-bit constants"));
2330 return 0;
2331 }
2332 }
2333 else
2334 {
2335 if (shift != 0 && shift != 8)
2336 {
2337 set_other_error (mismatch_detail, idx,
2338 _("shift amount must be 0 or 8"));
2339 return 0;
2340 }
2341 if (shift == 0 && (uvalue & 0xff) == 0)
2342 {
2343 shift = 8;
2344 uvalue = (int64_t) uvalue / 256;
2345 }
2346 }
2347 mask >>= shift;
2348 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2349 {
2350 set_other_error (mismatch_detail, idx,
2351 _("immediate too big for element size"));
2352 return 0;
2353 }
2354 uvalue = (uvalue - min_value) & mask;
2355 if (uvalue > 0xff)
2356 {
2357 set_other_error (mismatch_detail, idx,
2358 _("invalid arithmetic immediate"));
2359 return 0;
2360 }
2361 break;
2362
2363 case AARCH64_OPND_SVE_ASIMM:
2364 min_value = -128;
2365 goto sve_aimm;
2366
2367 case AARCH64_OPND_SVE_I1_HALF_ONE:
2368 assert (opnd->imm.is_fp);
2369 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2370 {
2371 set_other_error (mismatch_detail, idx,
2372 _("floating-point value must be 0.5 or 1.0"));
2373 return 0;
2374 }
2375 break;
2376
2377 case AARCH64_OPND_SVE_I1_HALF_TWO:
2378 assert (opnd->imm.is_fp);
2379 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2380 {
2381 set_other_error (mismatch_detail, idx,
2382 _("floating-point value must be 0.5 or 2.0"));
2383 return 0;
2384 }
2385 break;
2386
2387 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2388 assert (opnd->imm.is_fp);
2389 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2390 {
2391 set_other_error (mismatch_detail, idx,
2392 _("floating-point value must be 0.0 or 1.0"));
2393 return 0;
2394 }
2395 break;
2396
2397 case AARCH64_OPND_SVE_INV_LIMM:
2398 {
2399 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2400 uint64_t uimm = ~opnd->imm.value;
2401 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2402 {
2403 set_other_error (mismatch_detail, idx,
2404 _("immediate out of range"));
2405 return 0;
2406 }
2407 }
2408 break;
2409
2410 case AARCH64_OPND_SVE_LIMM_MOV:
2411 {
2412 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2413 uint64_t uimm = opnd->imm.value;
2414 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2415 {
2416 set_other_error (mismatch_detail, idx,
2417 _("immediate out of range"));
2418 return 0;
2419 }
2420 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2421 {
2422 set_other_error (mismatch_detail, idx,
2423 _("invalid replicated MOV immediate"));
2424 return 0;
2425 }
2426 }
2427 break;
2428
2429 case AARCH64_OPND_SVE_PATTERN_SCALED:
2430 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2431 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2432 {
2433 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2434 return 0;
2435 }
2436 break;
2437
2438 case AARCH64_OPND_SVE_SHLIMM_PRED:
2439 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2440 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2441 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2442 {
2443 set_imm_out_of_range_error (mismatch_detail, idx,
2444 0, 8 * size - 1);
2445 return 0;
2446 }
2447 break;
2448
2449 case AARCH64_OPND_SVE_SHRIMM_PRED:
2450 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2451 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2452 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2453 {
2454 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2455 return 0;
2456 }
2457 break;
2458
2459 default:
2460 break;
2461 }
2462 break;
2463
2464 case AARCH64_OPND_CLASS_SYSTEM:
2465 switch (type)
2466 {
2467 case AARCH64_OPND_PSTATEFIELD:
2468 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2469 /* MSR UAO, #uimm4
2470 MSR PAN, #uimm4
2471 The immediate must be #0 or #1. */
2472 if ((opnd->pstatefield == 0x03 /* UAO. */
2473 || opnd->pstatefield == 0x04 /* PAN. */
2474 || opnd->pstatefield == 0x1a) /* DIT. */
2475 && opnds[1].imm.value > 1)
2476 {
2477 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2478 return 0;
2479 }
2480 /* MSR SPSel, #uimm4
2481 Uses uimm4 as a control value to select the stack pointer: if
2482 bit 0 is set it selects the current exception level's stack
2483 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2484 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2485 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2486 {
2487 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2488 return 0;
2489 }
2490 break;
2491 default:
2492 break;
2493 }
2494 break;
2495
2496 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2497 /* Get the upper bound for the element index. */
2498 if (opcode->op == OP_FCMLA_ELEM)
2499 /* FCMLA index range depends on the vector size of other operands
2500 and is halfed because complex numbers take two elements. */
2501 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2502 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2503 else
2504 num = 16;
2505 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2506
2507 /* Index out-of-range. */
2508 if (!value_in_range_p (opnd->reglane.index, 0, num))
2509 {
2510 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2511 return 0;
2512 }
2513 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2514 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2515 number is encoded in "size:M:Rm":
2516 size <Vm>
2517 00 RESERVED
2518 01 0:Rm
2519 10 M:Rm
2520 11 RESERVED */
2521 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2522 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2523 {
2524 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2525 return 0;
2526 }
2527 break;
2528
2529 case AARCH64_OPND_CLASS_MODIFIED_REG:
2530 assert (idx == 1 || idx == 2);
2531 switch (type)
2532 {
2533 case AARCH64_OPND_Rm_EXT:
2534 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2535 && opnd->shifter.kind != AARCH64_MOD_LSL)
2536 {
2537 set_other_error (mismatch_detail, idx,
2538 _("extend operator expected"));
2539 return 0;
2540 }
2541 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2542 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2543 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2544 case. */
2545 if (!aarch64_stack_pointer_p (opnds + 0)
2546 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2547 {
2548 if (!opnd->shifter.operator_present)
2549 {
2550 set_other_error (mismatch_detail, idx,
2551 _("missing extend operator"));
2552 return 0;
2553 }
2554 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2555 {
2556 set_other_error (mismatch_detail, idx,
2557 _("'LSL' operator not allowed"));
2558 return 0;
2559 }
2560 }
2561 assert (opnd->shifter.operator_present /* Default to LSL. */
2562 || opnd->shifter.kind == AARCH64_MOD_LSL);
2563 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2564 {
2565 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2566 return 0;
2567 }
2568 /* In the 64-bit form, the final register operand is written as Wm
2569 for all but the (possibly omitted) UXTX/LSL and SXTX
2570 operators.
2571 N.B. GAS allows X register to be used with any operator as a
2572 programming convenience. */
2573 if (qualifier == AARCH64_OPND_QLF_X
2574 && opnd->shifter.kind != AARCH64_MOD_LSL
2575 && opnd->shifter.kind != AARCH64_MOD_UXTX
2576 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2577 {
2578 set_other_error (mismatch_detail, idx, _("W register expected"));
2579 return 0;
2580 }
2581 break;
2582
2583 case AARCH64_OPND_Rm_SFT:
2584 /* ROR is not available to the shifted register operand in
2585 arithmetic instructions. */
2586 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2587 {
2588 set_other_error (mismatch_detail, idx,
2589 _("shift operator expected"));
2590 return 0;
2591 }
2592 if (opnd->shifter.kind == AARCH64_MOD_ROR
2593 && opcode->iclass != log_shift)
2594 {
2595 set_other_error (mismatch_detail, idx,
2596 _("'ROR' operator not allowed"));
2597 return 0;
2598 }
2599 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2600 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2601 {
2602 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2603 return 0;
2604 }
2605 break;
2606
2607 default:
2608 break;
2609 }
2610 break;
2611
2612 default:
2613 break;
2614 }
2615
2616 return 1;
2617 }
2618
2619 /* Main entrypoint for the operand constraint checking.
2620
2621 Return 1 if operands of *INST meet the constraint applied by the operand
2622 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2623 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2624 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2625 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2626 error kind when it is notified that an instruction does not pass the check).
2627
2628 Un-determined operand qualifiers may get established during the process. */
2629
2630 int
2631 aarch64_match_operands_constraint (aarch64_inst *inst,
2632 aarch64_operand_error *mismatch_detail)
2633 {
2634 int i;
2635
2636 DEBUG_TRACE ("enter");
2637
2638 /* Check for cases where a source register needs to be the same as the
2639 destination register. Do this before matching qualifiers since if
2640 an instruction has both invalid tying and invalid qualifiers,
2641 the error about qualifiers would suggest several alternative
2642 instructions that also have invalid tying. */
2643 i = inst->opcode->tied_operand;
2644 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2645 {
2646 if (mismatch_detail)
2647 {
2648 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2649 mismatch_detail->index = i;
2650 mismatch_detail->error = NULL;
2651 }
2652 return 0;
2653 }
2654
2655 /* Match operands' qualifier.
2656 *INST has already had qualifier establish for some, if not all, of
2657 its operands; we need to find out whether these established
2658 qualifiers match one of the qualifier sequence in
2659 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2660 with the corresponding qualifier in such a sequence.
2661 Only basic operand constraint checking is done here; the more thorough
2662 constraint checking will carried out by operand_general_constraint_met_p,
2663 which has be to called after this in order to get all of the operands'
2664 qualifiers established. */
2665 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2666 {
2667 DEBUG_TRACE ("FAIL on operand qualifier matching");
2668 if (mismatch_detail)
2669 {
2670 /* Return an error type to indicate that it is the qualifier
2671 matching failure; we don't care about which operand as there
2672 are enough information in the opcode table to reproduce it. */
2673 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2674 mismatch_detail->index = -1;
2675 mismatch_detail->error = NULL;
2676 }
2677 return 0;
2678 }
2679
2680 /* Match operands' constraint. */
2681 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2682 {
2683 enum aarch64_opnd type = inst->opcode->operands[i];
2684 if (type == AARCH64_OPND_NIL)
2685 break;
2686 if (inst->operands[i].skip)
2687 {
2688 DEBUG_TRACE ("skip the incomplete operand %d", i);
2689 continue;
2690 }
2691 if (operand_general_constraint_met_p (inst->operands, i, type,
2692 inst->opcode, mismatch_detail) == 0)
2693 {
2694 DEBUG_TRACE ("FAIL on operand %d", i);
2695 return 0;
2696 }
2697 }
2698
2699 DEBUG_TRACE ("PASS");
2700
2701 return 1;
2702 }
2703
2704 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2705 Also updates the TYPE of each INST->OPERANDS with the corresponding
2706 value of OPCODE->OPERANDS.
2707
2708 Note that some operand qualifiers may need to be manually cleared by
2709 the caller before it further calls the aarch64_opcode_encode; by
2710 doing this, it helps the qualifier matching facilities work
2711 properly. */
2712
2713 const aarch64_opcode*
2714 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2715 {
2716 int i;
2717 const aarch64_opcode *old = inst->opcode;
2718
2719 inst->opcode = opcode;
2720
2721 /* Update the operand types. */
2722 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2723 {
2724 inst->operands[i].type = opcode->operands[i];
2725 if (opcode->operands[i] == AARCH64_OPND_NIL)
2726 break;
2727 }
2728
2729 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2730
2731 return old;
2732 }
2733
2734 int
2735 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2736 {
2737 int i;
2738 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2739 if (operands[i] == operand)
2740 return i;
2741 else if (operands[i] == AARCH64_OPND_NIL)
2742 break;
2743 return -1;
2744 }
2745
2746 /* R0...R30, followed by FOR31. */
2748 #define BANK(R, FOR31) \
2749 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2750 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2751 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2752 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2753 /* [0][0] 32-bit integer regs with sp Wn
2754 [0][1] 64-bit integer regs with sp Xn sf=1
2755 [1][0] 32-bit integer regs with #0 Wn
2756 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2757 static const char *int_reg[2][2][32] = {
2758 #define R32(X) "w" #X
2759 #define R64(X) "x" #X
2760 { BANK (R32, "wsp"), BANK (R64, "sp") },
2761 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2762 #undef R64
2763 #undef R32
2764 };
2765
2766 /* Names of the SVE vector registers, first with .S suffixes,
2767 then with .D suffixes. */
2768
2769 static const char *sve_reg[2][32] = {
2770 #define ZS(X) "z" #X ".s"
2771 #define ZD(X) "z" #X ".d"
2772 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2773 #undef ZD
2774 #undef ZS
2775 };
2776 #undef BANK
2777
2778 /* Return the integer register name.
2779 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2780
2781 static inline const char *
2782 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2783 {
2784 const int has_zr = sp_reg_p ? 0 : 1;
2785 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2786 return int_reg[has_zr][is_64][regno];
2787 }
2788
2789 /* Like get_int_reg_name, but IS_64 is always 1. */
2790
2791 static inline const char *
2792 get_64bit_int_reg_name (int regno, int sp_reg_p)
2793 {
2794 const int has_zr = sp_reg_p ? 0 : 1;
2795 return int_reg[has_zr][1][regno];
2796 }
2797
2798 /* Get the name of the integer offset register in OPND, using the shift type
2799 to decide whether it's a word or doubleword. */
2800
2801 static inline const char *
2802 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2803 {
2804 switch (opnd->shifter.kind)
2805 {
2806 case AARCH64_MOD_UXTW:
2807 case AARCH64_MOD_SXTW:
2808 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2809
2810 case AARCH64_MOD_LSL:
2811 case AARCH64_MOD_SXTX:
2812 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2813
2814 default:
2815 abort ();
2816 }
2817 }
2818
2819 /* Get the name of the SVE vector offset register in OPND, using the operand
2820 qualifier to decide whether the suffix should be .S or .D. */
2821
2822 static inline const char *
2823 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2824 {
2825 assert (qualifier == AARCH64_OPND_QLF_S_S
2826 || qualifier == AARCH64_OPND_QLF_S_D);
2827 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2828 }
2829
2830 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2831
2832 typedef union
2833 {
2834 uint64_t i;
2835 double d;
2836 } double_conv_t;
2837
2838 typedef union
2839 {
2840 uint32_t i;
2841 float f;
2842 } single_conv_t;
2843
2844 typedef union
2845 {
2846 uint32_t i;
2847 float f;
2848 } half_conv_t;
2849
2850 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2851 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2852 (depending on the type of the instruction). IMM8 will be expanded to a
2853 single-precision floating-point value (SIZE == 4) or a double-precision
2854 floating-point value (SIZE == 8). A half-precision floating-point value
2855 (SIZE == 2) is expanded to a single-precision floating-point value. The
2856 expanded value is returned. */
2857
2858 static uint64_t
2859 expand_fp_imm (int size, uint32_t imm8)
2860 {
2861 uint64_t imm = 0;
2862 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2863
2864 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2865 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2866 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2867 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2868 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2869 if (size == 8)
2870 {
2871 imm = (imm8_7 << (63-32)) /* imm8<7> */
2872 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2873 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2874 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2875 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2876 imm <<= 32;
2877 }
2878 else if (size == 4 || size == 2)
2879 {
2880 imm = (imm8_7 << 31) /* imm8<7> */
2881 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2882 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2883 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2884 }
2885 else
2886 {
2887 /* An unsupported size. */
2888 assert (0);
2889 }
2890
2891 return imm;
2892 }
2893
2894 /* Produce the string representation of the register list operand *OPND
2895 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2896 the register name that comes before the register number, such as "v". */
2897 static void
2898 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2899 const char *prefix)
2900 {
2901 const int num_regs = opnd->reglist.num_regs;
2902 const int first_reg = opnd->reglist.first_regno;
2903 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2904 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2905 char tb[8]; /* Temporary buffer. */
2906
2907 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2908 assert (num_regs >= 1 && num_regs <= 4);
2909
2910 /* Prepare the index if any. */
2911 if (opnd->reglist.has_index)
2912 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2913 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2914 else
2915 tb[0] = '\0';
2916
2917 /* The hyphenated form is preferred for disassembly if there are
2918 more than two registers in the list, and the register numbers
2919 are monotonically increasing in increments of one. */
2920 if (num_regs > 2 && last_reg > first_reg)
2921 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2922 prefix, last_reg, qlf_name, tb);
2923 else
2924 {
2925 const int reg0 = first_reg;
2926 const int reg1 = (first_reg + 1) & 0x1f;
2927 const int reg2 = (first_reg + 2) & 0x1f;
2928 const int reg3 = (first_reg + 3) & 0x1f;
2929
2930 switch (num_regs)
2931 {
2932 case 1:
2933 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2934 break;
2935 case 2:
2936 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2937 prefix, reg1, qlf_name, tb);
2938 break;
2939 case 3:
2940 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2941 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2942 prefix, reg2, qlf_name, tb);
2943 break;
2944 case 4:
2945 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2946 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2947 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2948 break;
2949 }
2950 }
2951 }
2952
2953 /* Print the register+immediate address in OPND to BUF, which has SIZE
2954 characters. BASE is the name of the base register. */
2955
2956 static void
2957 print_immediate_offset_address (char *buf, size_t size,
2958 const aarch64_opnd_info *opnd,
2959 const char *base)
2960 {
2961 if (opnd->addr.writeback)
2962 {
2963 if (opnd->addr.preind)
2964 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2965 else
2966 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2967 }
2968 else
2969 {
2970 if (opnd->shifter.operator_present)
2971 {
2972 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2973 snprintf (buf, size, "[%s, #%d, mul vl]",
2974 base, opnd->addr.offset.imm);
2975 }
2976 else if (opnd->addr.offset.imm)
2977 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2978 else
2979 snprintf (buf, size, "[%s]", base);
2980 }
2981 }
2982
2983 /* Produce the string representation of the register offset address operand
2984 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2985 the names of the base and offset registers. */
2986 static void
2987 print_register_offset_address (char *buf, size_t size,
2988 const aarch64_opnd_info *opnd,
2989 const char *base, const char *offset)
2990 {
2991 char tb[16]; /* Temporary buffer. */
2992 bfd_boolean print_extend_p = TRUE;
2993 bfd_boolean print_amount_p = TRUE;
2994 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2995
2996 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2997 || !opnd->shifter.amount_present))
2998 {
2999 /* Not print the shift/extend amount when the amount is zero and
3000 when it is not the special case of 8-bit load/store instruction. */
3001 print_amount_p = FALSE;
3002 /* Likewise, no need to print the shift operator LSL in such a
3003 situation. */
3004 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3005 print_extend_p = FALSE;
3006 }
3007
3008 /* Prepare for the extend/shift. */
3009 if (print_extend_p)
3010 {
3011 if (print_amount_p)
3012 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3013 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3014 (opnd->shifter.amount % 100));
3015 else
3016 snprintf (tb, sizeof (tb), ", %s", shift_name);
3017 }
3018 else
3019 tb[0] = '\0';
3020
3021 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3022 }
3023
3024 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3025 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3026 PC, PCREL_P and ADDRESS are used to pass in and return information about
3027 the PC-relative address calculation, where the PC value is passed in
3028 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3029 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3030 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3031
3032 The function serves both the disassembler and the assembler diagnostics
3033 issuer, which is the reason why it lives in this file. */
3034
3035 void
3036 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3037 const aarch64_opcode *opcode,
3038 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3039 bfd_vma *address, char** notes ATTRIBUTE_UNUSED)
3040 {
3041 unsigned int i, num_conds;
3042 const char *name = NULL;
3043 const aarch64_opnd_info *opnd = opnds + idx;
3044 enum aarch64_modifier_kind kind;
3045 uint64_t addr, enum_value;
3046
3047 buf[0] = '\0';
3048 if (pcrel_p)
3049 *pcrel_p = 0;
3050
3051 switch (opnd->type)
3052 {
3053 case AARCH64_OPND_Rd:
3054 case AARCH64_OPND_Rn:
3055 case AARCH64_OPND_Rm:
3056 case AARCH64_OPND_Rt:
3057 case AARCH64_OPND_Rt2:
3058 case AARCH64_OPND_Rs:
3059 case AARCH64_OPND_Ra:
3060 case AARCH64_OPND_Rt_SYS:
3061 case AARCH64_OPND_PAIRREG:
3062 case AARCH64_OPND_SVE_Rm:
3063 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3064 the <ic_op>, therefore we use opnd->present to override the
3065 generic optional-ness information. */
3066 if (opnd->type == AARCH64_OPND_Rt_SYS)
3067 {
3068 if (!opnd->present)
3069 break;
3070 }
3071 /* Omit the operand, e.g. RET. */
3072 else if (optional_operand_p (opcode, idx)
3073 && (opnd->reg.regno
3074 == get_optional_operand_default_value (opcode)))
3075 break;
3076 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3077 || opnd->qualifier == AARCH64_OPND_QLF_X);
3078 snprintf (buf, size, "%s",
3079 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3080 break;
3081
3082 case AARCH64_OPND_Rd_SP:
3083 case AARCH64_OPND_Rn_SP:
3084 case AARCH64_OPND_SVE_Rn_SP:
3085 case AARCH64_OPND_Rm_SP:
3086 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3087 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3088 || opnd->qualifier == AARCH64_OPND_QLF_X
3089 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3090 snprintf (buf, size, "%s",
3091 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3092 break;
3093
3094 case AARCH64_OPND_Rm_EXT:
3095 kind = opnd->shifter.kind;
3096 assert (idx == 1 || idx == 2);
3097 if ((aarch64_stack_pointer_p (opnds)
3098 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3099 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3100 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3101 && kind == AARCH64_MOD_UXTW)
3102 || (opnd->qualifier == AARCH64_OPND_QLF_X
3103 && kind == AARCH64_MOD_UXTX)))
3104 {
3105 /* 'LSL' is the preferred form in this case. */
3106 kind = AARCH64_MOD_LSL;
3107 if (opnd->shifter.amount == 0)
3108 {
3109 /* Shifter omitted. */
3110 snprintf (buf, size, "%s",
3111 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3112 break;
3113 }
3114 }
3115 if (opnd->shifter.amount)
3116 snprintf (buf, size, "%s, %s #%" PRIi64,
3117 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3118 aarch64_operand_modifiers[kind].name,
3119 opnd->shifter.amount);
3120 else
3121 snprintf (buf, size, "%s, %s",
3122 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3123 aarch64_operand_modifiers[kind].name);
3124 break;
3125
3126 case AARCH64_OPND_Rm_SFT:
3127 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3128 || opnd->qualifier == AARCH64_OPND_QLF_X);
3129 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3130 snprintf (buf, size, "%s",
3131 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3132 else
3133 snprintf (buf, size, "%s, %s #%" PRIi64,
3134 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3135 aarch64_operand_modifiers[opnd->shifter.kind].name,
3136 opnd->shifter.amount);
3137 break;
3138
3139 case AARCH64_OPND_Fd:
3140 case AARCH64_OPND_Fn:
3141 case AARCH64_OPND_Fm:
3142 case AARCH64_OPND_Fa:
3143 case AARCH64_OPND_Ft:
3144 case AARCH64_OPND_Ft2:
3145 case AARCH64_OPND_Sd:
3146 case AARCH64_OPND_Sn:
3147 case AARCH64_OPND_Sm:
3148 case AARCH64_OPND_SVE_VZn:
3149 case AARCH64_OPND_SVE_Vd:
3150 case AARCH64_OPND_SVE_Vm:
3151 case AARCH64_OPND_SVE_Vn:
3152 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3153 opnd->reg.regno);
3154 break;
3155
3156 case AARCH64_OPND_Va:
3157 case AARCH64_OPND_Vd:
3158 case AARCH64_OPND_Vn:
3159 case AARCH64_OPND_Vm:
3160 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3161 aarch64_get_qualifier_name (opnd->qualifier));
3162 break;
3163
3164 case AARCH64_OPND_Ed:
3165 case AARCH64_OPND_En:
3166 case AARCH64_OPND_Em:
3167 case AARCH64_OPND_Em16:
3168 case AARCH64_OPND_SM3_IMM2:
3169 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3170 aarch64_get_qualifier_name (opnd->qualifier),
3171 opnd->reglane.index);
3172 break;
3173
3174 case AARCH64_OPND_VdD1:
3175 case AARCH64_OPND_VnD1:
3176 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3177 break;
3178
3179 case AARCH64_OPND_LVn:
3180 case AARCH64_OPND_LVt:
3181 case AARCH64_OPND_LVt_AL:
3182 case AARCH64_OPND_LEt:
3183 print_register_list (buf, size, opnd, "v");
3184 break;
3185
3186 case AARCH64_OPND_SVE_Pd:
3187 case AARCH64_OPND_SVE_Pg3:
3188 case AARCH64_OPND_SVE_Pg4_5:
3189 case AARCH64_OPND_SVE_Pg4_10:
3190 case AARCH64_OPND_SVE_Pg4_16:
3191 case AARCH64_OPND_SVE_Pm:
3192 case AARCH64_OPND_SVE_Pn:
3193 case AARCH64_OPND_SVE_Pt:
3194 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3195 snprintf (buf, size, "p%d", opnd->reg.regno);
3196 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3197 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3198 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3199 aarch64_get_qualifier_name (opnd->qualifier));
3200 else
3201 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3202 aarch64_get_qualifier_name (opnd->qualifier));
3203 break;
3204
3205 case AARCH64_OPND_SVE_Za_5:
3206 case AARCH64_OPND_SVE_Za_16:
3207 case AARCH64_OPND_SVE_Zd:
3208 case AARCH64_OPND_SVE_Zm_5:
3209 case AARCH64_OPND_SVE_Zm_16:
3210 case AARCH64_OPND_SVE_Zn:
3211 case AARCH64_OPND_SVE_Zt:
3212 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3213 snprintf (buf, size, "z%d", opnd->reg.regno);
3214 else
3215 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3216 aarch64_get_qualifier_name (opnd->qualifier));
3217 break;
3218
3219 case AARCH64_OPND_SVE_ZnxN:
3220 case AARCH64_OPND_SVE_ZtxN:
3221 print_register_list (buf, size, opnd, "z");
3222 break;
3223
3224 case AARCH64_OPND_SVE_Zm3_INDEX:
3225 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3226 case AARCH64_OPND_SVE_Zm4_INDEX:
3227 case AARCH64_OPND_SVE_Zn_INDEX:
3228 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3229 aarch64_get_qualifier_name (opnd->qualifier),
3230 opnd->reglane.index);
3231 break;
3232
3233 case AARCH64_OPND_CRn:
3234 case AARCH64_OPND_CRm:
3235 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3236 break;
3237
3238 case AARCH64_OPND_IDX:
3239 case AARCH64_OPND_MASK:
3240 case AARCH64_OPND_IMM:
3241 case AARCH64_OPND_IMM_2:
3242 case AARCH64_OPND_WIDTH:
3243 case AARCH64_OPND_UIMM3_OP1:
3244 case AARCH64_OPND_UIMM3_OP2:
3245 case AARCH64_OPND_BIT_NUM:
3246 case AARCH64_OPND_IMM_VLSL:
3247 case AARCH64_OPND_IMM_VLSR:
3248 case AARCH64_OPND_SHLL_IMM:
3249 case AARCH64_OPND_IMM0:
3250 case AARCH64_OPND_IMMR:
3251 case AARCH64_OPND_IMMS:
3252 case AARCH64_OPND_FBITS:
3253 case AARCH64_OPND_SIMM5:
3254 case AARCH64_OPND_SVE_SHLIMM_PRED:
3255 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3256 case AARCH64_OPND_SVE_SHRIMM_PRED:
3257 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3258 case AARCH64_OPND_SVE_SIMM5:
3259 case AARCH64_OPND_SVE_SIMM5B:
3260 case AARCH64_OPND_SVE_SIMM6:
3261 case AARCH64_OPND_SVE_SIMM8:
3262 case AARCH64_OPND_SVE_UIMM3:
3263 case AARCH64_OPND_SVE_UIMM7:
3264 case AARCH64_OPND_SVE_UIMM8:
3265 case AARCH64_OPND_SVE_UIMM8_53:
3266 case AARCH64_OPND_IMM_ROT1:
3267 case AARCH64_OPND_IMM_ROT2:
3268 case AARCH64_OPND_IMM_ROT3:
3269 case AARCH64_OPND_SVE_IMM_ROT1:
3270 case AARCH64_OPND_SVE_IMM_ROT2:
3271 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3272 break;
3273
3274 case AARCH64_OPND_SVE_I1_HALF_ONE:
3275 case AARCH64_OPND_SVE_I1_HALF_TWO:
3276 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3277 {
3278 single_conv_t c;
3279 c.i = opnd->imm.value;
3280 snprintf (buf, size, "#%.1f", c.f);
3281 break;
3282 }
3283
3284 case AARCH64_OPND_SVE_PATTERN:
3285 if (optional_operand_p (opcode, idx)
3286 && opnd->imm.value == get_optional_operand_default_value (opcode))
3287 break;
3288 enum_value = opnd->imm.value;
3289 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3290 if (aarch64_sve_pattern_array[enum_value])
3291 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3292 else
3293 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3294 break;
3295
3296 case AARCH64_OPND_SVE_PATTERN_SCALED:
3297 if (optional_operand_p (opcode, idx)
3298 && !opnd->shifter.operator_present
3299 && opnd->imm.value == get_optional_operand_default_value (opcode))
3300 break;
3301 enum_value = opnd->imm.value;
3302 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3303 if (aarch64_sve_pattern_array[opnd->imm.value])
3304 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3305 else
3306 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3307 if (opnd->shifter.operator_present)
3308 {
3309 size_t len = strlen (buf);
3310 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3311 aarch64_operand_modifiers[opnd->shifter.kind].name,
3312 opnd->shifter.amount);
3313 }
3314 break;
3315
3316 case AARCH64_OPND_SVE_PRFOP:
3317 enum_value = opnd->imm.value;
3318 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3319 if (aarch64_sve_prfop_array[enum_value])
3320 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3321 else
3322 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3323 break;
3324
3325 case AARCH64_OPND_IMM_MOV:
3326 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3327 {
3328 case 4: /* e.g. MOV Wd, #<imm32>. */
3329 {
3330 int imm32 = opnd->imm.value;
3331 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3332 }
3333 break;
3334 case 8: /* e.g. MOV Xd, #<imm64>. */
3335 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3336 opnd->imm.value, opnd->imm.value);
3337 break;
3338 default: assert (0);
3339 }
3340 break;
3341
3342 case AARCH64_OPND_FPIMM0:
3343 snprintf (buf, size, "#0.0");
3344 break;
3345
3346 case AARCH64_OPND_LIMM:
3347 case AARCH64_OPND_AIMM:
3348 case AARCH64_OPND_HALF:
3349 case AARCH64_OPND_SVE_INV_LIMM:
3350 case AARCH64_OPND_SVE_LIMM:
3351 case AARCH64_OPND_SVE_LIMM_MOV:
3352 if (opnd->shifter.amount)
3353 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3354 opnd->shifter.amount);
3355 else
3356 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3357 break;
3358
3359 case AARCH64_OPND_SIMD_IMM:
3360 case AARCH64_OPND_SIMD_IMM_SFT:
3361 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3362 || opnd->shifter.kind == AARCH64_MOD_NONE)
3363 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3364 else
3365 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3366 aarch64_operand_modifiers[opnd->shifter.kind].name,
3367 opnd->shifter.amount);
3368 break;
3369
3370 case AARCH64_OPND_SVE_AIMM:
3371 case AARCH64_OPND_SVE_ASIMM:
3372 if (opnd->shifter.amount)
3373 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3374 opnd->shifter.amount);
3375 else
3376 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3377 break;
3378
3379 case AARCH64_OPND_FPIMM:
3380 case AARCH64_OPND_SIMD_FPIMM:
3381 case AARCH64_OPND_SVE_FPIMM8:
3382 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3383 {
3384 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3385 {
3386 half_conv_t c;
3387 c.i = expand_fp_imm (2, opnd->imm.value);
3388 snprintf (buf, size, "#%.18e", c.f);
3389 }
3390 break;
3391 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3392 {
3393 single_conv_t c;
3394 c.i = expand_fp_imm (4, opnd->imm.value);
3395 snprintf (buf, size, "#%.18e", c.f);
3396 }
3397 break;
3398 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3399 {
3400 double_conv_t c;
3401 c.i = expand_fp_imm (8, opnd->imm.value);
3402 snprintf (buf, size, "#%.18e", c.d);
3403 }
3404 break;
3405 default: assert (0);
3406 }
3407 break;
3408
3409 case AARCH64_OPND_CCMP_IMM:
3410 case AARCH64_OPND_NZCV:
3411 case AARCH64_OPND_EXCEPTION:
3412 case AARCH64_OPND_UIMM4:
3413 case AARCH64_OPND_UIMM7:
3414 if (optional_operand_p (opcode, idx) == TRUE
3415 && (opnd->imm.value ==
3416 (int64_t) get_optional_operand_default_value (opcode)))
3417 /* Omit the operand, e.g. DCPS1. */
3418 break;
3419 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3420 break;
3421
3422 case AARCH64_OPND_COND:
3423 case AARCH64_OPND_COND1:
3424 snprintf (buf, size, "%s", opnd->cond->names[0]);
3425 num_conds = ARRAY_SIZE (opnd->cond->names);
3426 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3427 {
3428 size_t len = strlen (buf);
3429 if (i == 1)
3430 snprintf (buf + len, size - len, " // %s = %s",
3431 opnd->cond->names[0], opnd->cond->names[i]);
3432 else
3433 snprintf (buf + len, size - len, ", %s",
3434 opnd->cond->names[i]);
3435 }
3436 break;
3437
3438 case AARCH64_OPND_ADDR_ADRP:
3439 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3440 + opnd->imm.value;
3441 if (pcrel_p)
3442 *pcrel_p = 1;
3443 if (address)
3444 *address = addr;
3445 /* This is not necessary during the disassembling, as print_address_func
3446 in the disassemble_info will take care of the printing. But some
3447 other callers may be still interested in getting the string in *STR,
3448 so here we do snprintf regardless. */
3449 snprintf (buf, size, "#0x%" PRIx64, addr);
3450 break;
3451
3452 case AARCH64_OPND_ADDR_PCREL14:
3453 case AARCH64_OPND_ADDR_PCREL19:
3454 case AARCH64_OPND_ADDR_PCREL21:
3455 case AARCH64_OPND_ADDR_PCREL26:
3456 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3457 if (pcrel_p)
3458 *pcrel_p = 1;
3459 if (address)
3460 *address = addr;
3461 /* This is not necessary during the disassembling, as print_address_func
3462 in the disassemble_info will take care of the printing. But some
3463 other callers may be still interested in getting the string in *STR,
3464 so here we do snprintf regardless. */
3465 snprintf (buf, size, "#0x%" PRIx64, addr);
3466 break;
3467
3468 case AARCH64_OPND_ADDR_SIMPLE:
3469 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3470 case AARCH64_OPND_SIMD_ADDR_POST:
3471 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3472 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3473 {
3474 if (opnd->addr.offset.is_reg)
3475 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3476 else
3477 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3478 }
3479 else
3480 snprintf (buf, size, "[%s]", name);
3481 break;
3482
3483 case AARCH64_OPND_ADDR_REGOFF:
3484 case AARCH64_OPND_SVE_ADDR_R:
3485 case AARCH64_OPND_SVE_ADDR_RR:
3486 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3487 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3488 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3489 case AARCH64_OPND_SVE_ADDR_RX:
3490 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3491 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3492 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3493 print_register_offset_address
3494 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3495 get_offset_int_reg_name (opnd));
3496 break;
3497
3498 case AARCH64_OPND_SVE_ADDR_RZ:
3499 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3500 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3501 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3502 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3503 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3504 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3505 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3506 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3507 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3508 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3509 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3510 print_register_offset_address
3511 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3512 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3513 break;
3514
3515 case AARCH64_OPND_ADDR_SIMM7:
3516 case AARCH64_OPND_ADDR_SIMM9:
3517 case AARCH64_OPND_ADDR_SIMM9_2:
3518 case AARCH64_OPND_ADDR_SIMM10:
3519 case AARCH64_OPND_ADDR_OFFSET:
3520 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3521 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3522 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3523 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3524 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3525 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3526 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3527 case AARCH64_OPND_SVE_ADDR_RI_U6:
3528 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3529 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3530 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3531 print_immediate_offset_address
3532 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3533 break;
3534
3535 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3536 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3537 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3538 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3539 print_immediate_offset_address
3540 (buf, size, opnd,
3541 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3542 break;
3543
3544 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3545 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3546 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3547 print_register_offset_address
3548 (buf, size, opnd,
3549 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3550 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3551 break;
3552
3553 case AARCH64_OPND_ADDR_UIMM12:
3554 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3555 if (opnd->addr.offset.imm)
3556 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3557 else
3558 snprintf (buf, size, "[%s]", name);
3559 break;
3560
3561 case AARCH64_OPND_SYSREG:
3562 for (i = 0; aarch64_sys_regs[i].name; ++i)
3563 {
3564 bfd_boolean exact_match
3565 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3566 == opnd->sysreg.flags;
3567
3568 /* Try and find an exact match, But if that fails, return the first
3569 partial match that was found. */
3570 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3571 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3572 && (name == NULL || exact_match))
3573 {
3574 name = aarch64_sys_regs[i].name;
3575 if (exact_match)
3576 {
3577 if (notes)
3578 *notes = NULL;
3579 break;
3580 }
3581
3582 /* If we didn't match exactly, that means the presense of a flag
3583 indicates what we didn't want for this instruction. e.g. If
3584 F_REG_READ is there, that means we were looking for a write
3585 register. See aarch64_ext_sysreg. */
3586 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3587 *notes = _("reading from a write-only register.");
3588 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3589 *notes = _("writing to a read-only register.");
3590 }
3591 }
3592
3593 if (name)
3594 snprintf (buf, size, "%s", name);
3595 else
3596 {
3597 /* Implementation defined system register. */
3598 unsigned int value = opnd->sysreg.value;
3599 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3600 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3601 value & 0x7);
3602 }
3603 break;
3604
3605 case AARCH64_OPND_PSTATEFIELD:
3606 for (i = 0; aarch64_pstatefields[i].name; ++i)
3607 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3608 break;
3609 assert (aarch64_pstatefields[i].name);
3610 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3611 break;
3612
3613 case AARCH64_OPND_SYSREG_AT:
3614 case AARCH64_OPND_SYSREG_DC:
3615 case AARCH64_OPND_SYSREG_IC:
3616 case AARCH64_OPND_SYSREG_TLBI:
3617 snprintf (buf, size, "%s", opnd->sysins_op->name);
3618 break;
3619
3620 case AARCH64_OPND_BARRIER:
3621 snprintf (buf, size, "%s", opnd->barrier->name);
3622 break;
3623
3624 case AARCH64_OPND_BARRIER_ISB:
3625 /* Operand can be omitted, e.g. in DCPS1. */
3626 if (! optional_operand_p (opcode, idx)
3627 || (opnd->barrier->value
3628 != get_optional_operand_default_value (opcode)))
3629 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3630 break;
3631
3632 case AARCH64_OPND_PRFOP:
3633 if (opnd->prfop->name != NULL)
3634 snprintf (buf, size, "%s", opnd->prfop->name);
3635 else
3636 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3637 break;
3638
3639 case AARCH64_OPND_BARRIER_PSB:
3640 snprintf (buf, size, "%s", opnd->hint_option->name);
3641 break;
3642
3643 default:
3644 assert (0);
3645 }
3646 }
3647
3648 #define CPENC(op0,op1,crn,crm,op2) \
3650 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3651 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3652 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3653 /* for 3.9.10 System Instructions */
3654 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3655
3656 #define C0 0
3657 #define C1 1
3658 #define C2 2
3659 #define C3 3
3660 #define C4 4
3661 #define C5 5
3662 #define C6 6
3663 #define C7 7
3664 #define C8 8
3665 #define C9 9
3666 #define C10 10
3667 #define C11 11
3668 #define C12 12
3669 #define C13 13
3670 #define C14 14
3671 #define C15 15
3672
3673 /* TODO there is one more issues need to be resolved
3674 1. handle cpu-implementation-defined system registers. */
3675 const aarch64_sys_reg aarch64_sys_regs [] =
3676 {
3677 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3678 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3679 { "elr_el1", CPEN_(0,C0,1), 0 },
3680 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3681 { "sp_el0", CPEN_(0,C1,0), 0 },
3682 { "spsel", CPEN_(0,C2,0), 0 },
3683 { "daif", CPEN_(3,C2,1), 0 },
3684 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3685 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3686 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3687 { "nzcv", CPEN_(3,C2,0), 0 },
3688 { "fpcr", CPEN_(3,C4,0), 0 },
3689 { "fpsr", CPEN_(3,C4,1), 0 },
3690 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3691 { "dlr_el0", CPEN_(3,C5,1), 0 },
3692 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3693 { "elr_el2", CPEN_(4,C0,1), 0 },
3694 { "sp_el1", CPEN_(4,C1,0), 0 },
3695 { "spsr_irq", CPEN_(4,C3,0), 0 },
3696 { "spsr_abt", CPEN_(4,C3,1), 0 },
3697 { "spsr_und", CPEN_(4,C3,2), 0 },
3698 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3699 { "spsr_el3", CPEN_(6,C0,0), 0 },
3700 { "elr_el3", CPEN_(6,C0,1), 0 },
3701 { "sp_el2", CPEN_(6,C1,0), 0 },
3702 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3703 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3704 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3705 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3706 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3707 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3708 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3709 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3710 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3711 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3712 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3713 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3714 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3715 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3716 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3717 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3718 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3719 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3720 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3721 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3722 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3723 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3724 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3725 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3726 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3727 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3728 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3729 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3730 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3731 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3732 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3733 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3734 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3735 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3736 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3737 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3738 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3739 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3740 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3741 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3742 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3743 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3744 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3745 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3746 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3747 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3748 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3749 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3750 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3751 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3752 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3753 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3754 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3755 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3756 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3757 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3758 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3759 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3760 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3761 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3762 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3763 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3764 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3765 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3766 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3767 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3768 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3769 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3770 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3771 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3772 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3773 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3774 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3775 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3776 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3777 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3778 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3779 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3780 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3781 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3782 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3783 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3784 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3785 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3786 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3787 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3788 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3789 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3790 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3791 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3792 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3793 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3794 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3795 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3796 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3797 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3798 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3799 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3800 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3801 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3802 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3803 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3804 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3805 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3806 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3807 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3808 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3809 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3810 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3811 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3812 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3813 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3814 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3815 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3816 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3817 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3818 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3819 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3820 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3821 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3822 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3823 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3824 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3825 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3826 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3827 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3828 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3829 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3830 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3831 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3832 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3833 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3834 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3835 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3836 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3837 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3838 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3839 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3840 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3841 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3842 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3843 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3844 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3845 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3846 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3847 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3848 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3849 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3850 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3851 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3852 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3853 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3854 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3855 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3856 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3857 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3858 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3859 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3860 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3861 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3862 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3863 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3864 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3865 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3866 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3867 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3868 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3869 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3870 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3871 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3872 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3873 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3874 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3875 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3876 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3877 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3878 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3879 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3880 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3881 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
3882 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3883 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3884 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
3885 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
3886 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
3887 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
3888 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3889 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3890 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3891 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3892 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3893 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3894 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3895 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3896 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3897 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3898 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3899 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3900 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3901 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3902 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3903 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3904 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3905 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3906 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3907 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3908 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3909 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3910 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3911 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3912 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3913 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3914 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3915 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3916 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3917 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3918 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3919 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3920 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3921 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3922 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3923 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3924 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3925 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3926 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3927 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3928 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3929 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3930 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3931 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3932 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3933 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3934 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3935 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3936 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3937 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3938 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3939 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3940 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3941 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3942 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3943 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3944 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3945 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3946 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3947 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3948 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3949 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3950 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3951 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3952 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3953 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3954 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
3955 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
3956 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
3957 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3958 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3959 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3960 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3961 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
3962 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3963 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3964 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3965 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
3966 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3967 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3968 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3969 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3970 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3971 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3972 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
3973 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3974 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3975 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3976 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3977 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3978 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3979 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
3980 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3981 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
3982 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
3983 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3984 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3985 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3986 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3987 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3988 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3989 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3990 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3991 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3992 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3993 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3994 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3995 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3996 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3997 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3998 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3999 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4000 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4001 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4002 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4003 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4004 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4005 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4006 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4007 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4008 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4009 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4010 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4011 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4012 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4013 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4014 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4015 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4016 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4017 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4018 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4019 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4020 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4021 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4022 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4023 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4024 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4025 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4026 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4027 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4028 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4029 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4030 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4031 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4032 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4033 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4034 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4035 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4036 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4037 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4038 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4039 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4040 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4041 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4042 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4043 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4044 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4045 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4046 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4047 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4048 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4049 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4050 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4051 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4052 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4053
4054 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4055 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4056 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4057 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4058 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4059 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4060 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4061 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4062 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4063 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4064 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4065 { 0, CPENC(0,0,0,0,0), 0 },
4066 };
4067
4068 bfd_boolean
4069 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4070 {
4071 return (reg->flags & F_DEPRECATED) != 0;
4072 }
4073
4074 bfd_boolean
4075 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4076 const aarch64_sys_reg *reg)
4077 {
4078 if (!(reg->flags & F_ARCHEXT))
4079 return TRUE;
4080
4081 /* PAN. Values are from aarch64_sys_regs. */
4082 if (reg->value == CPEN_(0,C2,3)
4083 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4084 return FALSE;
4085
4086 /* Virtualization host extensions: system registers. */
4087 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4088 || reg->value == CPENC (3, 4, C13, C0, 1)
4089 || reg->value == CPENC (3, 4, C14, C3, 0)
4090 || reg->value == CPENC (3, 4, C14, C3, 1)
4091 || reg->value == CPENC (3, 4, C14, C3, 2))
4092 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4093 return FALSE;
4094
4095 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4096 if ((reg->value == CPEN_ (5, C0, 0)
4097 || reg->value == CPEN_ (5, C0, 1)
4098 || reg->value == CPENC (3, 5, C1, C0, 0)
4099 || reg->value == CPENC (3, 5, C1, C0, 2)
4100 || reg->value == CPENC (3, 5, C2, C0, 0)
4101 || reg->value == CPENC (3, 5, C2, C0, 1)
4102 || reg->value == CPENC (3, 5, C2, C0, 2)
4103 || reg->value == CPENC (3, 5, C5, C1, 0)
4104 || reg->value == CPENC (3, 5, C5, C1, 1)
4105 || reg->value == CPENC (3, 5, C5, C2, 0)
4106 || reg->value == CPENC (3, 5, C6, C0, 0)
4107 || reg->value == CPENC (3, 5, C10, C2, 0)
4108 || reg->value == CPENC (3, 5, C10, C3, 0)
4109 || reg->value == CPENC (3, 5, C12, C0, 0)
4110 || reg->value == CPENC (3, 5, C13, C0, 1)
4111 || reg->value == CPENC (3, 5, C14, C1, 0))
4112 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4113 return FALSE;
4114
4115 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4116 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4117 || reg->value == CPENC (3, 5, C14, C2, 1)
4118 || reg->value == CPENC (3, 5, C14, C2, 2)
4119 || reg->value == CPENC (3, 5, C14, C3, 0)
4120 || reg->value == CPENC (3, 5, C14, C3, 1)
4121 || reg->value == CPENC (3, 5, C14, C3, 2))
4122 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4123 return FALSE;
4124
4125 /* ARMv8.2 features. */
4126
4127 /* ID_AA64MMFR2_EL1. */
4128 if (reg->value == CPENC (3, 0, C0, C7, 2)
4129 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4130 return FALSE;
4131
4132 /* PSTATE.UAO. */
4133 if (reg->value == CPEN_ (0, C2, 4)
4134 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4135 return FALSE;
4136
4137 /* RAS extension. */
4138
4139 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4140 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4141 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4142 || reg->value == CPENC (3, 0, C5, C3, 1)
4143 || reg->value == CPENC (3, 0, C5, C3, 2)
4144 || reg->value == CPENC (3, 0, C5, C3, 3)
4145 || reg->value == CPENC (3, 0, C5, C4, 0)
4146 || reg->value == CPENC (3, 0, C5, C4, 1)
4147 || reg->value == CPENC (3, 0, C5, C4, 2)
4148 || reg->value == CPENC (3, 0, C5, C4, 3)
4149 || reg->value == CPENC (3, 0, C5, C5, 0)
4150 || reg->value == CPENC (3, 0, C5, C5, 1))
4151 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4152 return FALSE;
4153
4154 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4155 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4156 || reg->value == CPENC (3, 0, C12, C1, 1)
4157 || reg->value == CPENC (3, 4, C12, C1, 1))
4158 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4159 return FALSE;
4160
4161 /* Statistical Profiling extension. */
4162 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4163 || reg->value == CPENC (3, 0, C9, C10, 1)
4164 || reg->value == CPENC (3, 0, C9, C10, 3)
4165 || reg->value == CPENC (3, 0, C9, C10, 7)
4166 || reg->value == CPENC (3, 0, C9, C9, 0)
4167 || reg->value == CPENC (3, 0, C9, C9, 2)
4168 || reg->value == CPENC (3, 0, C9, C9, 3)
4169 || reg->value == CPENC (3, 0, C9, C9, 4)
4170 || reg->value == CPENC (3, 0, C9, C9, 5)
4171 || reg->value == CPENC (3, 0, C9, C9, 6)
4172 || reg->value == CPENC (3, 0, C9, C9, 7)
4173 || reg->value == CPENC (3, 4, C9, C9, 0)
4174 || reg->value == CPENC (3, 5, C9, C9, 0))
4175 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4176 return FALSE;
4177
4178 /* ARMv8.3 Pointer authentication keys. */
4179 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4180 || reg->value == CPENC (3, 0, C2, C1, 1)
4181 || reg->value == CPENC (3, 0, C2, C1, 2)
4182 || reg->value == CPENC (3, 0, C2, C1, 3)
4183 || reg->value == CPENC (3, 0, C2, C2, 0)
4184 || reg->value == CPENC (3, 0, C2, C2, 1)
4185 || reg->value == CPENC (3, 0, C2, C2, 2)
4186 || reg->value == CPENC (3, 0, C2, C2, 3)
4187 || reg->value == CPENC (3, 0, C2, C3, 0)
4188 || reg->value == CPENC (3, 0, C2, C3, 1))
4189 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4190 return FALSE;
4191
4192 /* SVE. */
4193 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4194 || reg->value == CPENC (3, 0, C1, C2, 0)
4195 || reg->value == CPENC (3, 4, C1, C2, 0)
4196 || reg->value == CPENC (3, 6, C1, C2, 0)
4197 || reg->value == CPENC (3, 5, C1, C2, 0)
4198 || reg->value == CPENC (3, 0, C0, C0, 7))
4199 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4200 return FALSE;
4201
4202 /* ARMv8.4 features. */
4203
4204 /* PSTATE.DIT. */
4205 if (reg->value == CPEN_ (3, C2, 5)
4206 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4207 return FALSE;
4208
4209 /* Virtualization extensions. */
4210 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4211 || reg->value == CPENC(3, 4, C2, C6, 0)
4212 || reg->value == CPENC(3, 4, C14, C4, 0)
4213 || reg->value == CPENC(3, 4, C14, C4, 2)
4214 || reg->value == CPENC(3, 4, C14, C4, 1)
4215 || reg->value == CPENC(3, 4, C14, C5, 0)
4216 || reg->value == CPENC(3, 4, C14, C5, 2)
4217 || reg->value == CPENC(3, 4, C14, C5, 1)
4218 || reg->value == CPENC(3, 4, C1, C3, 1)
4219 || reg->value == CPENC(3, 4, C2, C2, 0))
4220 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4221 return FALSE;
4222
4223 /* ARMv8.4 TLB instructions. */
4224 if ((reg->value == CPENS (0, C8, C1, 0)
4225 || reg->value == CPENS (0, C8, C1, 1)
4226 || reg->value == CPENS (0, C8, C1, 2)
4227 || reg->value == CPENS (0, C8, C1, 3)
4228 || reg->value == CPENS (0, C8, C1, 5)
4229 || reg->value == CPENS (0, C8, C1, 7)
4230 || reg->value == CPENS (4, C8, C4, 0)
4231 || reg->value == CPENS (4, C8, C4, 4)
4232 || reg->value == CPENS (4, C8, C1, 1)
4233 || reg->value == CPENS (4, C8, C1, 5)
4234 || reg->value == CPENS (4, C8, C1, 6)
4235 || reg->value == CPENS (6, C8, C1, 1)
4236 || reg->value == CPENS (6, C8, C1, 5)
4237 || reg->value == CPENS (4, C8, C1, 0)
4238 || reg->value == CPENS (4, C8, C1, 4)
4239 || reg->value == CPENS (6, C8, C1, 0)
4240 || reg->value == CPENS (0, C8, C6, 1)
4241 || reg->value == CPENS (0, C8, C6, 3)
4242 || reg->value == CPENS (0, C8, C6, 5)
4243 || reg->value == CPENS (0, C8, C6, 7)
4244 || reg->value == CPENS (0, C8, C2, 1)
4245 || reg->value == CPENS (0, C8, C2, 3)
4246 || reg->value == CPENS (0, C8, C2, 5)
4247 || reg->value == CPENS (0, C8, C2, 7)
4248 || reg->value == CPENS (0, C8, C5, 1)
4249 || reg->value == CPENS (0, C8, C5, 3)
4250 || reg->value == CPENS (0, C8, C5, 5)
4251 || reg->value == CPENS (0, C8, C5, 7)
4252 || reg->value == CPENS (4, C8, C0, 2)
4253 || reg->value == CPENS (4, C8, C0, 6)
4254 || reg->value == CPENS (4, C8, C4, 2)
4255 || reg->value == CPENS (4, C8, C4, 6)
4256 || reg->value == CPENS (4, C8, C4, 3)
4257 || reg->value == CPENS (4, C8, C4, 7)
4258 || reg->value == CPENS (4, C8, C6, 1)
4259 || reg->value == CPENS (4, C8, C6, 5)
4260 || reg->value == CPENS (4, C8, C2, 1)
4261 || reg->value == CPENS (4, C8, C2, 5)
4262 || reg->value == CPENS (4, C8, C5, 1)
4263 || reg->value == CPENS (4, C8, C5, 5)
4264 || reg->value == CPENS (6, C8, C6, 1)
4265 || reg->value == CPENS (6, C8, C6, 5)
4266 || reg->value == CPENS (6, C8, C2, 1)
4267 || reg->value == CPENS (6, C8, C2, 5)
4268 || reg->value == CPENS (6, C8, C5, 1)
4269 || reg->value == CPENS (6, C8, C5, 5))
4270 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4271 return FALSE;
4272
4273 return TRUE;
4274 }
4275
4276 /* The CPENC below is fairly misleading, the fields
4277 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4278 by ins_pstatefield, which just shifts the value by the width of the fields
4279 in a loop. So if you CPENC them only the first value will be set, the rest
4280 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4281 value of 0b110000000001000000 (0x30040) while what you want is
4282 0b011010 (0x1a). */
4283 const aarch64_sys_reg aarch64_pstatefields [] =
4284 {
4285 { "spsel", 0x05, 0 },
4286 { "daifset", 0x1e, 0 },
4287 { "daifclr", 0x1f, 0 },
4288 { "pan", 0x04, F_ARCHEXT },
4289 { "uao", 0x03, F_ARCHEXT },
4290 { "dit", 0x1a, F_ARCHEXT },
4291 { 0, CPENC(0,0,0,0,0), 0 },
4292 };
4293
4294 bfd_boolean
4295 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4296 const aarch64_sys_reg *reg)
4297 {
4298 if (!(reg->flags & F_ARCHEXT))
4299 return TRUE;
4300
4301 /* PAN. Values are from aarch64_pstatefields. */
4302 if (reg->value == 0x04
4303 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4304 return FALSE;
4305
4306 /* UAO. Values are from aarch64_pstatefields. */
4307 if (reg->value == 0x03
4308 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4309 return FALSE;
4310
4311 /* DIT. Values are from aarch64_pstatefields. */
4312 if (reg->value == 0x1a
4313 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4314 return FALSE;
4315
4316 return TRUE;
4317 }
4318
4319 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4320 {
4321 { "ialluis", CPENS(0,C7,C1,0), 0 },
4322 { "iallu", CPENS(0,C7,C5,0), 0 },
4323 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4324 { 0, CPENS(0,0,0,0), 0 }
4325 };
4326
4327 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4328 {
4329 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4330 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4331 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4332 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4333 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4334 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4335 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4336 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4337 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4338 { 0, CPENS(0,0,0,0), 0 }
4339 };
4340
4341 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4342 {
4343 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4344 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4345 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4346 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4347 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4348 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4349 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4350 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4351 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4352 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4353 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4354 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4355 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4356 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4357 { 0, CPENS(0,0,0,0), 0 }
4358 };
4359
4360 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4361 {
4362 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4363 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4364 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4365 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4366 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4367 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4368 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4369 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4370 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4371 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4372 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4373 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4374 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4375 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4376 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4377 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4378 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4379 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4380 { "alle2", CPENS(4,C8,C7,0), 0 },
4381 { "alle2is", CPENS(4,C8,C3,0), 0 },
4382 { "alle1", CPENS(4,C8,C7,4), 0 },
4383 { "alle1is", CPENS(4,C8,C3,4), 0 },
4384 { "alle3", CPENS(6,C8,C7,0), 0 },
4385 { "alle3is", CPENS(6,C8,C3,0), 0 },
4386 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4387 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4388 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4389 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4390 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4391 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4392 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4393 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4394
4395 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4396 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4397 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4398 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4399 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4400 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4401 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4402 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4403 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4404 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4405 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4406 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4407 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4408 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4409 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4410 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4411
4412 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4413 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4414 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4415 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4416 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4417 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4418 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4419 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4420 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4421 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4422 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4423 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4424 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4425 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4426 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4427 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4428 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4429 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4430 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4431 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4432 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4433 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4434 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4435 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4436 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4437 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4438 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4439 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4440 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4441 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4442
4443 { 0, CPENS(0,0,0,0), 0 }
4444 };
4445
4446 bfd_boolean
4447 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4448 {
4449 return (sys_ins_reg->flags & F_HASXT) != 0;
4450 }
4451
4452 extern bfd_boolean
4453 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4454 const aarch64_sys_ins_reg *reg)
4455 {
4456 if (!(reg->flags & F_ARCHEXT))
4457 return TRUE;
4458
4459 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4460 if (reg->value == CPENS (3, C7, C12, 1)
4461 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4462 return FALSE;
4463
4464 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4465 if ((reg->value == CPENS (0, C7, C9, 0)
4466 || reg->value == CPENS (0, C7, C9, 1))
4467 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4468 return FALSE;
4469
4470 return TRUE;
4471 }
4472
4473 #undef C0
4474 #undef C1
4475 #undef C2
4476 #undef C3
4477 #undef C4
4478 #undef C5
4479 #undef C6
4480 #undef C7
4481 #undef C8
4482 #undef C9
4483 #undef C10
4484 #undef C11
4485 #undef C12
4486 #undef C13
4487 #undef C14
4488 #undef C15
4489
4490 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4491 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4492
4493 static bfd_boolean
4494 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4495 const aarch64_insn insn)
4496 {
4497 int t = BITS (insn, 4, 0);
4498 int n = BITS (insn, 9, 5);
4499 int t2 = BITS (insn, 14, 10);
4500
4501 if (BIT (insn, 23))
4502 {
4503 /* Write back enabled. */
4504 if ((t == n || t2 == n) && n != 31)
4505 return FALSE;
4506 }
4507
4508 if (BIT (insn, 22))
4509 {
4510 /* Load */
4511 if (t == t2)
4512 return FALSE;
4513 }
4514
4515 return TRUE;
4516 }
4517
4518 /* Return true if VALUE cannot be moved into an SVE register using DUP
4519 (with any element size, not just ESIZE) and if using DUPM would
4520 therefore be OK. ESIZE is the number of bytes in the immediate. */
4521
4522 bfd_boolean
4523 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4524 {
4525 int64_t svalue = uvalue;
4526 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4527
4528 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4529 return FALSE;
4530 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4531 {
4532 svalue = (int32_t) uvalue;
4533 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4534 {
4535 svalue = (int16_t) uvalue;
4536 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4537 return FALSE;
4538 }
4539 }
4540 if ((svalue & 0xff) == 0)
4541 svalue /= 256;
4542 return svalue < -128 || svalue >= 128;
4543 }
4544
4545 /* Include the opcode description table as well as the operand description
4546 table. */
4547 #define VERIFIER(x) verify_##x
4548 #include "aarch64-tbl.h"
4549