aarch64-opc.c revision 1.1.1.2 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30
31 #include "aarch64-opc.h"
32
33 #ifdef DEBUG_AARCH64
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
36
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42 {
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46 }
47
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50 {
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54 }
55
56 enum data_pattern
57 {
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63 };
64
65 static const char significant_operand_index [] =
66 {
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72 };
73
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81 {
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120 }
121
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130 int
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132 {
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135 }
136
137 const aarch64_field fields[] =
139 {
140 { 0, 0 }, /* NIL. */
141 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
142 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
143 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
144 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
145 { 5, 19 }, /* imm19: e.g. in CBZ. */
146 { 5, 19 }, /* immhi: e.g. in ADRP. */
147 { 29, 2 }, /* immlo: e.g. in ADRP. */
148 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
149 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
150 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
151 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
152 { 0, 5 }, /* Rt: in load/store instructions. */
153 { 0, 5 }, /* Rd: in many integer instructions. */
154 { 5, 5 }, /* Rn: in many integer instructions. */
155 { 10, 5 }, /* Rt2: in load/store pair instructions. */
156 { 10, 5 }, /* Ra: in fp instructions. */
157 { 5, 3 }, /* op2: in the system instructions. */
158 { 8, 4 }, /* CRm: in the system instructions. */
159 { 12, 4 }, /* CRn: in the system instructions. */
160 { 16, 3 }, /* op1: in the system instructions. */
161 { 19, 2 }, /* op0: in the system instructions. */
162 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
163 { 12, 4 }, /* cond: condition flags as a source operand. */
164 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
165 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
166 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
167 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
168 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
169 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
170 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
171 { 12, 1 }, /* S: in load/store reg offset instructions. */
172 { 21, 2 }, /* hw: in move wide constant instructions. */
173 { 22, 2 }, /* opc: in load/store reg offset instructions. */
174 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
175 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
176 { 22, 2 }, /* type: floating point type field in fp data inst. */
177 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
178 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
179 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
180 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
181 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
182 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
183 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
184 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
185 { 5, 14 }, /* imm14: in test bit and branch instructions. */
186 { 5, 16 }, /* imm16: in exception instructions. */
187 { 0, 26 }, /* imm26: in unconditional branch instructions. */
188 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
189 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
190 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
191 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
192 { 22, 1 }, /* N: in logical (immediate) instructions. */
193 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
194 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
195 { 31, 1 }, /* sf: in integer data processing instructions. */
196 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
197 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
198 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
199 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
200 { 31, 1 }, /* b5: in the test bit and branch instructions. */
201 { 19, 5 }, /* b40: in the test bit and branch instructions. */
202 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
203 };
204
205 enum aarch64_operand_class
206 aarch64_get_operand_class (enum aarch64_opnd type)
207 {
208 return aarch64_operands[type].op_class;
209 }
210
211 const char *
212 aarch64_get_operand_name (enum aarch64_opnd type)
213 {
214 return aarch64_operands[type].name;
215 }
216
217 /* Get operand description string.
218 This is usually for the diagnosis purpose. */
219 const char *
220 aarch64_get_operand_desc (enum aarch64_opnd type)
221 {
222 return aarch64_operands[type].desc;
223 }
224
225 /* Table of all conditional affixes. */
226 const aarch64_cond aarch64_conds[16] =
227 {
228 {{"eq"}, 0x0},
229 {{"ne"}, 0x1},
230 {{"cs", "hs"}, 0x2},
231 {{"cc", "lo", "ul"}, 0x3},
232 {{"mi"}, 0x4},
233 {{"pl"}, 0x5},
234 {{"vs"}, 0x6},
235 {{"vc"}, 0x7},
236 {{"hi"}, 0x8},
237 {{"ls"}, 0x9},
238 {{"ge"}, 0xa},
239 {{"lt"}, 0xb},
240 {{"gt"}, 0xc},
241 {{"le"}, 0xd},
242 {{"al"}, 0xe},
243 {{"nv"}, 0xf},
244 };
245
246 const aarch64_cond *
247 get_cond_from_value (aarch64_insn value)
248 {
249 assert (value < 16);
250 return &aarch64_conds[(unsigned int) value];
251 }
252
253 const aarch64_cond *
254 get_inverted_cond (const aarch64_cond *cond)
255 {
256 return &aarch64_conds[cond->value ^ 0x1];
257 }
258
259 /* Table describing the operand extension/shifting operators; indexed by
260 enum aarch64_modifier_kind.
261
262 The value column provides the most common values for encoding modifiers,
263 which enables table-driven encoding/decoding for the modifiers. */
264 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
265 {
266 {"none", 0x0},
267 {"msl", 0x0},
268 {"ror", 0x3},
269 {"asr", 0x2},
270 {"lsr", 0x1},
271 {"lsl", 0x0},
272 {"uxtb", 0x0},
273 {"uxth", 0x1},
274 {"uxtw", 0x2},
275 {"uxtx", 0x3},
276 {"sxtb", 0x4},
277 {"sxth", 0x5},
278 {"sxtw", 0x6},
279 {"sxtx", 0x7},
280 {NULL, 0},
281 };
282
283 enum aarch64_modifier_kind
284 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
285 {
286 return desc - aarch64_operand_modifiers;
287 }
288
289 aarch64_insn
290 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
291 {
292 return aarch64_operand_modifiers[kind].value;
293 }
294
295 enum aarch64_modifier_kind
296 aarch64_get_operand_modifier_from_value (aarch64_insn value,
297 bfd_boolean extend_p)
298 {
299 if (extend_p == TRUE)
300 return AARCH64_MOD_UXTB + value;
301 else
302 return AARCH64_MOD_LSL - value;
303 }
304
305 bfd_boolean
306 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
307 {
308 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
309 ? TRUE : FALSE;
310 }
311
312 static inline bfd_boolean
313 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
314 {
315 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
316 ? TRUE : FALSE;
317 }
318
319 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
320 {
321 { "#0x00", 0x0 },
322 { "oshld", 0x1 },
323 { "oshst", 0x2 },
324 { "osh", 0x3 },
325 { "#0x04", 0x4 },
326 { "nshld", 0x5 },
327 { "nshst", 0x6 },
328 { "nsh", 0x7 },
329 { "#0x08", 0x8 },
330 { "ishld", 0x9 },
331 { "ishst", 0xa },
332 { "ish", 0xb },
333 { "#0x0c", 0xc },
334 { "ld", 0xd },
335 { "st", 0xe },
336 { "sy", 0xf },
337 };
338
339 /* Table describing the operands supported by the aliases of the HINT
340 instruction.
341
342 The name column is the operand that is accepted for the alias. The value
343 column is the hint number of the alias. The list of operands is terminated
344 by NULL in the name column. */
345
346 const struct aarch64_name_value_pair aarch64_hint_options[] =
347 {
348 { "csync", 0x11 }, /* PSB CSYNC. */
349 { NULL, 0x0 },
350 };
351
352 /* op -> op: load = 0 instruction = 1 store = 2
353 l -> level: 1-3
354 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
355 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
356 const struct aarch64_name_value_pair aarch64_prfops[32] =
357 {
358 { "pldl1keep", B(0, 1, 0) },
359 { "pldl1strm", B(0, 1, 1) },
360 { "pldl2keep", B(0, 2, 0) },
361 { "pldl2strm", B(0, 2, 1) },
362 { "pldl3keep", B(0, 3, 0) },
363 { "pldl3strm", B(0, 3, 1) },
364 { NULL, 0x06 },
365 { NULL, 0x07 },
366 { "plil1keep", B(1, 1, 0) },
367 { "plil1strm", B(1, 1, 1) },
368 { "plil2keep", B(1, 2, 0) },
369 { "plil2strm", B(1, 2, 1) },
370 { "plil3keep", B(1, 3, 0) },
371 { "plil3strm", B(1, 3, 1) },
372 { NULL, 0x0e },
373 { NULL, 0x0f },
374 { "pstl1keep", B(2, 1, 0) },
375 { "pstl1strm", B(2, 1, 1) },
376 { "pstl2keep", B(2, 2, 0) },
377 { "pstl2strm", B(2, 2, 1) },
378 { "pstl3keep", B(2, 3, 0) },
379 { "pstl3strm", B(2, 3, 1) },
380 { NULL, 0x16 },
381 { NULL, 0x17 },
382 { NULL, 0x18 },
383 { NULL, 0x19 },
384 { NULL, 0x1a },
385 { NULL, 0x1b },
386 { NULL, 0x1c },
387 { NULL, 0x1d },
388 { NULL, 0x1e },
389 { NULL, 0x1f },
390 };
391 #undef B
392
393 /* Utilities on value constraint. */
395
396 static inline int
397 value_in_range_p (int64_t value, int low, int high)
398 {
399 return (value >= low && value <= high) ? 1 : 0;
400 }
401
402 static inline int
403 value_aligned_p (int64_t value, int align)
404 {
405 return ((value & (align - 1)) == 0) ? 1 : 0;
406 }
407
408 /* A signed value fits in a field. */
409 static inline int
410 value_fit_signed_field_p (int64_t value, unsigned width)
411 {
412 assert (width < 32);
413 if (width < sizeof (value) * 8)
414 {
415 int64_t lim = (int64_t)1 << (width - 1);
416 if (value >= -lim && value < lim)
417 return 1;
418 }
419 return 0;
420 }
421
422 /* An unsigned value fits in a field. */
423 static inline int
424 value_fit_unsigned_field_p (int64_t value, unsigned width)
425 {
426 assert (width < 32);
427 if (width < sizeof (value) * 8)
428 {
429 int64_t lim = (int64_t)1 << width;
430 if (value >= 0 && value < lim)
431 return 1;
432 }
433 return 0;
434 }
435
436 /* Return 1 if OPERAND is SP or WSP. */
437 int
438 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
439 {
440 return ((aarch64_get_operand_class (operand->type)
441 == AARCH64_OPND_CLASS_INT_REG)
442 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
443 && operand->reg.regno == 31);
444 }
445
446 /* Return 1 if OPERAND is XZR or WZP. */
447 int
448 aarch64_zero_register_p (const aarch64_opnd_info *operand)
449 {
450 return ((aarch64_get_operand_class (operand->type)
451 == AARCH64_OPND_CLASS_INT_REG)
452 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
453 && operand->reg.regno == 31);
454 }
455
456 /* Return true if the operand *OPERAND that has the operand code
457 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
458 qualified by the qualifier TARGET. */
459
460 static inline int
461 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
462 aarch64_opnd_qualifier_t target)
463 {
464 switch (operand->qualifier)
465 {
466 case AARCH64_OPND_QLF_W:
467 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
468 return 1;
469 break;
470 case AARCH64_OPND_QLF_X:
471 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
472 return 1;
473 break;
474 case AARCH64_OPND_QLF_WSP:
475 if (target == AARCH64_OPND_QLF_W
476 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
477 return 1;
478 break;
479 case AARCH64_OPND_QLF_SP:
480 if (target == AARCH64_OPND_QLF_X
481 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
482 return 1;
483 break;
484 default:
485 break;
486 }
487
488 return 0;
489 }
490
491 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
492 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
493
494 Return NIL if more than one expected qualifiers are found. */
495
496 aarch64_opnd_qualifier_t
497 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
498 int idx,
499 const aarch64_opnd_qualifier_t known_qlf,
500 int known_idx)
501 {
502 int i, saved_i;
503
504 /* Special case.
505
506 When the known qualifier is NIL, we have to assume that there is only
507 one qualifier sequence in the *QSEQ_LIST and return the corresponding
508 qualifier directly. One scenario is that for instruction
509 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
510 which has only one possible valid qualifier sequence
511 NIL, S_D
512 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
513 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
514
515 Because the qualifier NIL has dual roles in the qualifier sequence:
516 it can mean no qualifier for the operand, or the qualifer sequence is
517 not in use (when all qualifiers in the sequence are NILs), we have to
518 handle this special case here. */
519 if (known_qlf == AARCH64_OPND_NIL)
520 {
521 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
522 return qseq_list[0][idx];
523 }
524
525 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
526 {
527 if (qseq_list[i][known_idx] == known_qlf)
528 {
529 if (saved_i != -1)
530 /* More than one sequences are found to have KNOWN_QLF at
531 KNOWN_IDX. */
532 return AARCH64_OPND_NIL;
533 saved_i = i;
534 }
535 }
536
537 return qseq_list[saved_i][idx];
538 }
539
540 enum operand_qualifier_kind
541 {
542 OQK_NIL,
543 OQK_OPD_VARIANT,
544 OQK_VALUE_IN_RANGE,
545 OQK_MISC,
546 };
547
548 /* Operand qualifier description. */
549 struct operand_qualifier_data
550 {
551 /* The usage of the three data fields depends on the qualifier kind. */
552 int data0;
553 int data1;
554 int data2;
555 /* Description. */
556 const char *desc;
557 /* Kind. */
558 enum operand_qualifier_kind kind;
559 };
560
561 /* Indexed by the operand qualifier enumerators. */
562 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
563 {
564 {0, 0, 0, "NIL", OQK_NIL},
565
566 /* Operand variant qualifiers.
567 First 3 fields:
568 element size, number of elements and common value for encoding. */
569
570 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
571 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
572 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
573 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
574
575 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
576 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
577 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
578 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
579 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
580
581 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
582 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
583 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
584 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
585 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
586 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
587 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
588 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
589 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
590 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
591
592 /* Qualifiers constraining the value range.
593 First 3 fields:
594 Lower bound, higher bound, unused. */
595
596 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
597 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
598 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
599 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
600 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
601 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
602
603 /* Qualifiers for miscellaneous purpose.
604 First 3 fields:
605 unused, unused and unused. */
606
607 {0, 0, 0, "lsl", 0},
608 {0, 0, 0, "msl", 0},
609
610 {0, 0, 0, "retrieving", 0},
611 };
612
613 static inline bfd_boolean
614 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
615 {
616 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
617 ? TRUE : FALSE;
618 }
619
620 static inline bfd_boolean
621 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
622 {
623 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
624 ? TRUE : FALSE;
625 }
626
627 const char*
628 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
629 {
630 return aarch64_opnd_qualifiers[qualifier].desc;
631 }
632
633 /* Given an operand qualifier, return the expected data element size
634 of a qualified operand. */
635 unsigned char
636 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
637 {
638 assert (operand_variant_qualifier_p (qualifier) == TRUE);
639 return aarch64_opnd_qualifiers[qualifier].data0;
640 }
641
642 unsigned char
643 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
644 {
645 assert (operand_variant_qualifier_p (qualifier) == TRUE);
646 return aarch64_opnd_qualifiers[qualifier].data1;
647 }
648
649 aarch64_insn
650 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
651 {
652 assert (operand_variant_qualifier_p (qualifier) == TRUE);
653 return aarch64_opnd_qualifiers[qualifier].data2;
654 }
655
656 static int
657 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
658 {
659 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
660 return aarch64_opnd_qualifiers[qualifier].data0;
661 }
662
663 static int
664 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
665 {
666 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
667 return aarch64_opnd_qualifiers[qualifier].data1;
668 }
669
670 #ifdef DEBUG_AARCH64
671 void
672 aarch64_verbose (const char *str, ...)
673 {
674 va_list ap;
675 va_start (ap, str);
676 printf ("#### ");
677 vprintf (str, ap);
678 printf ("\n");
679 va_end (ap);
680 }
681
682 static inline void
683 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
684 {
685 int i;
686 printf ("#### \t");
687 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
688 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
689 printf ("\n");
690 }
691
692 static void
693 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
694 const aarch64_opnd_qualifier_t *qualifier)
695 {
696 int i;
697 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
698
699 aarch64_verbose ("dump_match_qualifiers:");
700 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
701 curr[i] = opnd[i].qualifier;
702 dump_qualifier_sequence (curr);
703 aarch64_verbose ("against");
704 dump_qualifier_sequence (qualifier);
705 }
706 #endif /* DEBUG_AARCH64 */
707
708 /* TODO improve this, we can have an extra field at the runtime to
709 store the number of operands rather than calculating it every time. */
710
711 int
712 aarch64_num_of_operands (const aarch64_opcode *opcode)
713 {
714 int i = 0;
715 const enum aarch64_opnd *opnds = opcode->operands;
716 while (opnds[i++] != AARCH64_OPND_NIL)
717 ;
718 --i;
719 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
720 return i;
721 }
722
723 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
724 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
725
726 N.B. on the entry, it is very likely that only some operands in *INST
727 have had their qualifiers been established.
728
729 If STOP_AT is not -1, the function will only try to match
730 the qualifier sequence for operands before and including the operand
731 of index STOP_AT; and on success *RET will only be filled with the first
732 (STOP_AT+1) qualifiers.
733
734 A couple examples of the matching algorithm:
735
736 X,W,NIL should match
737 X,W,NIL
738
739 NIL,NIL should match
740 X ,NIL
741
742 Apart from serving the main encoding routine, this can also be called
743 during or after the operand decoding. */
744
745 int
746 aarch64_find_best_match (const aarch64_inst *inst,
747 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
748 int stop_at, aarch64_opnd_qualifier_t *ret)
749 {
750 int found = 0;
751 int i, num_opnds;
752 const aarch64_opnd_qualifier_t *qualifiers;
753
754 num_opnds = aarch64_num_of_operands (inst->opcode);
755 if (num_opnds == 0)
756 {
757 DEBUG_TRACE ("SUCCEED: no operand");
758 return 1;
759 }
760
761 if (stop_at < 0 || stop_at >= num_opnds)
762 stop_at = num_opnds - 1;
763
764 /* For each pattern. */
765 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
766 {
767 int j;
768 qualifiers = *qualifiers_list;
769
770 /* Start as positive. */
771 found = 1;
772
773 DEBUG_TRACE ("%d", i);
774 #ifdef DEBUG_AARCH64
775 if (debug_dump)
776 dump_match_qualifiers (inst->operands, qualifiers);
777 #endif
778
779 /* Most opcodes has much fewer patterns in the list.
780 First NIL qualifier indicates the end in the list. */
781 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
782 {
783 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
784 if (i)
785 found = 0;
786 break;
787 }
788
789 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
790 {
791 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
792 {
793 /* Either the operand does not have qualifier, or the qualifier
794 for the operand needs to be deduced from the qualifier
795 sequence.
796 In the latter case, any constraint checking related with
797 the obtained qualifier should be done later in
798 operand_general_constraint_met_p. */
799 continue;
800 }
801 else if (*qualifiers != inst->operands[j].qualifier)
802 {
803 /* Unless the target qualifier can also qualify the operand
804 (which has already had a non-nil qualifier), non-equal
805 qualifiers are generally un-matched. */
806 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
807 continue;
808 else
809 {
810 found = 0;
811 break;
812 }
813 }
814 else
815 continue; /* Equal qualifiers are certainly matched. */
816 }
817
818 /* Qualifiers established. */
819 if (found == 1)
820 break;
821 }
822
823 if (found == 1)
824 {
825 /* Fill the result in *RET. */
826 int j;
827 qualifiers = *qualifiers_list;
828
829 DEBUG_TRACE ("complete qualifiers using list %d", i);
830 #ifdef DEBUG_AARCH64
831 if (debug_dump)
832 dump_qualifier_sequence (qualifiers);
833 #endif
834
835 for (j = 0; j <= stop_at; ++j, ++qualifiers)
836 ret[j] = *qualifiers;
837 for (; j < AARCH64_MAX_OPND_NUM; ++j)
838 ret[j] = AARCH64_OPND_QLF_NIL;
839
840 DEBUG_TRACE ("SUCCESS");
841 return 1;
842 }
843
844 DEBUG_TRACE ("FAIL");
845 return 0;
846 }
847
848 /* Operand qualifier matching and resolving.
849
850 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
851 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
852
853 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
854 succeeds. */
855
856 static int
857 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
858 {
859 int i;
860 aarch64_opnd_qualifier_seq_t qualifiers;
861
862 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
863 qualifiers))
864 {
865 DEBUG_TRACE ("matching FAIL");
866 return 0;
867 }
868
869 /* Update the qualifiers. */
870 if (update_p == TRUE)
871 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
872 {
873 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
874 break;
875 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
876 "update %s with %s for operand %d",
877 aarch64_get_qualifier_name (inst->operands[i].qualifier),
878 aarch64_get_qualifier_name (qualifiers[i]), i);
879 inst->operands[i].qualifier = qualifiers[i];
880 }
881
882 DEBUG_TRACE ("matching SUCCESS");
883 return 1;
884 }
885
886 /* Return TRUE if VALUE is a wide constant that can be moved into a general
887 register by MOVZ.
888
889 IS32 indicates whether value is a 32-bit immediate or not.
890 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
891 amount will be returned in *SHIFT_AMOUNT. */
892
893 bfd_boolean
894 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
895 {
896 int amount;
897
898 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
899
900 if (is32)
901 {
902 /* Allow all zeros or all ones in top 32-bits, so that
903 32-bit constant expressions like ~0x80000000 are
904 permitted. */
905 uint64_t ext = value;
906 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
907 /* Immediate out of range. */
908 return FALSE;
909 value &= (int64_t) 0xffffffff;
910 }
911
912 /* first, try movz then movn */
913 amount = -1;
914 if ((value & ((int64_t) 0xffff << 0)) == value)
915 amount = 0;
916 else if ((value & ((int64_t) 0xffff << 16)) == value)
917 amount = 16;
918 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
919 amount = 32;
920 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
921 amount = 48;
922
923 if (amount == -1)
924 {
925 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
926 return FALSE;
927 }
928
929 if (shift_amount != NULL)
930 *shift_amount = amount;
931
932 DEBUG_TRACE ("exit TRUE with amount %d", amount);
933
934 return TRUE;
935 }
936
937 /* Build the accepted values for immediate logical SIMD instructions.
938
939 The standard encodings of the immediate value are:
940 N imms immr SIMD size R S
941 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
942 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
943 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
944 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
945 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
946 0 11110s 00000r 2 UInt(r) UInt(s)
947 where all-ones value of S is reserved.
948
949 Let's call E the SIMD size.
950
951 The immediate value is: S+1 bits '1' rotated to the right by R.
952
953 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
954 (remember S != E - 1). */
955
956 #define TOTAL_IMM_NB 5334
957
958 typedef struct
959 {
960 uint64_t imm;
961 aarch64_insn encoding;
962 } simd_imm_encoding;
963
964 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
965
966 static int
967 simd_imm_encoding_cmp(const void *i1, const void *i2)
968 {
969 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
970 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
971
972 if (imm1->imm < imm2->imm)
973 return -1;
974 if (imm1->imm > imm2->imm)
975 return +1;
976 return 0;
977 }
978
979 /* immediate bitfield standard encoding
980 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
981 1 ssssss rrrrrr 64 rrrrrr ssssss
982 0 0sssss 0rrrrr 32 rrrrr sssss
983 0 10ssss 00rrrr 16 rrrr ssss
984 0 110sss 000rrr 8 rrr sss
985 0 1110ss 0000rr 4 rr ss
986 0 11110s 00000r 2 r s */
987 static inline int
988 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
989 {
990 return (is64 << 12) | (r << 6) | s;
991 }
992
993 static void
994 build_immediate_table (void)
995 {
996 uint32_t log_e, e, s, r, s_mask;
997 uint64_t mask, imm;
998 int nb_imms;
999 int is64;
1000
1001 nb_imms = 0;
1002 for (log_e = 1; log_e <= 6; log_e++)
1003 {
1004 /* Get element size. */
1005 e = 1u << log_e;
1006 if (log_e == 6)
1007 {
1008 is64 = 1;
1009 mask = 0xffffffffffffffffull;
1010 s_mask = 0;
1011 }
1012 else
1013 {
1014 is64 = 0;
1015 mask = (1ull << e) - 1;
1016 /* log_e s_mask
1017 1 ((1 << 4) - 1) << 2 = 111100
1018 2 ((1 << 3) - 1) << 3 = 111000
1019 3 ((1 << 2) - 1) << 4 = 110000
1020 4 ((1 << 1) - 1) << 5 = 100000
1021 5 ((1 << 0) - 1) << 6 = 000000 */
1022 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1023 }
1024 for (s = 0; s < e - 1; s++)
1025 for (r = 0; r < e; r++)
1026 {
1027 /* s+1 consecutive bits to 1 (s < 63) */
1028 imm = (1ull << (s + 1)) - 1;
1029 /* rotate right by r */
1030 if (r != 0)
1031 imm = (imm >> r) | ((imm << (e - r)) & mask);
1032 /* replicate the constant depending on SIMD size */
1033 switch (log_e)
1034 {
1035 case 1: imm = (imm << 2) | imm;
1036 case 2: imm = (imm << 4) | imm;
1037 case 3: imm = (imm << 8) | imm;
1038 case 4: imm = (imm << 16) | imm;
1039 case 5: imm = (imm << 32) | imm;
1040 case 6: break;
1041 default: abort ();
1042 }
1043 simd_immediates[nb_imms].imm = imm;
1044 simd_immediates[nb_imms].encoding =
1045 encode_immediate_bitfield(is64, s | s_mask, r);
1046 nb_imms++;
1047 }
1048 }
1049 assert (nb_imms == TOTAL_IMM_NB);
1050 qsort(simd_immediates, nb_imms,
1051 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1052 }
1053
1054 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1055 be accepted by logical (immediate) instructions
1056 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1057
1058 IS32 indicates whether or not VALUE is a 32-bit immediate.
1059 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1060 VALUE will be returned in *ENCODING. */
1061
1062 bfd_boolean
1063 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1064 {
1065 simd_imm_encoding imm_enc;
1066 const simd_imm_encoding *imm_encoding;
1067 static bfd_boolean initialized = FALSE;
1068
1069 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1070 value, is32);
1071
1072 if (initialized == FALSE)
1073 {
1074 build_immediate_table ();
1075 initialized = TRUE;
1076 }
1077
1078 if (is32)
1079 {
1080 /* Allow all zeros or all ones in top 32-bits, so that
1081 constant expressions like ~1 are permitted. */
1082 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1083 return FALSE;
1084
1085 /* Replicate the 32 lower bits to the 32 upper bits. */
1086 value &= 0xffffffff;
1087 value |= value << 32;
1088 }
1089
1090 imm_enc.imm = value;
1091 imm_encoding = (const simd_imm_encoding *)
1092 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1093 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1094 if (imm_encoding == NULL)
1095 {
1096 DEBUG_TRACE ("exit with FALSE");
1097 return FALSE;
1098 }
1099 if (encoding != NULL)
1100 *encoding = imm_encoding->encoding;
1101 DEBUG_TRACE ("exit with TRUE");
1102 return TRUE;
1103 }
1104
1105 /* If 64-bit immediate IMM is in the format of
1106 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1107 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1108 of value "abcdefgh". Otherwise return -1. */
1109 int
1110 aarch64_shrink_expanded_imm8 (uint64_t imm)
1111 {
1112 int i, ret;
1113 uint32_t byte;
1114
1115 ret = 0;
1116 for (i = 0; i < 8; i++)
1117 {
1118 byte = (imm >> (8 * i)) & 0xff;
1119 if (byte == 0xff)
1120 ret |= 1 << i;
1121 else if (byte != 0x00)
1122 return -1;
1123 }
1124 return ret;
1125 }
1126
1127 /* Utility inline functions for operand_general_constraint_met_p. */
1128
1129 static inline void
1130 set_error (aarch64_operand_error *mismatch_detail,
1131 enum aarch64_operand_error_kind kind, int idx,
1132 const char* error)
1133 {
1134 if (mismatch_detail == NULL)
1135 return;
1136 mismatch_detail->kind = kind;
1137 mismatch_detail->index = idx;
1138 mismatch_detail->error = error;
1139 }
1140
1141 static inline void
1142 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1143 const char* error)
1144 {
1145 if (mismatch_detail == NULL)
1146 return;
1147 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1148 }
1149
1150 static inline void
1151 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1152 int idx, int lower_bound, int upper_bound,
1153 const char* error)
1154 {
1155 if (mismatch_detail == NULL)
1156 return;
1157 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1158 mismatch_detail->data[0] = lower_bound;
1159 mismatch_detail->data[1] = upper_bound;
1160 }
1161
1162 static inline void
1163 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1164 int idx, int lower_bound, int upper_bound)
1165 {
1166 if (mismatch_detail == NULL)
1167 return;
1168 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1169 _("immediate value"));
1170 }
1171
1172 static inline void
1173 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1174 int idx, int lower_bound, int upper_bound)
1175 {
1176 if (mismatch_detail == NULL)
1177 return;
1178 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1179 _("immediate offset"));
1180 }
1181
1182 static inline void
1183 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1184 int idx, int lower_bound, int upper_bound)
1185 {
1186 if (mismatch_detail == NULL)
1187 return;
1188 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1189 _("register number"));
1190 }
1191
1192 static inline void
1193 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1194 int idx, int lower_bound, int upper_bound)
1195 {
1196 if (mismatch_detail == NULL)
1197 return;
1198 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1199 _("register element index"));
1200 }
1201
1202 static inline void
1203 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1204 int idx, int lower_bound, int upper_bound)
1205 {
1206 if (mismatch_detail == NULL)
1207 return;
1208 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1209 _("shift amount"));
1210 }
1211
1212 static inline void
1213 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1214 int alignment)
1215 {
1216 if (mismatch_detail == NULL)
1217 return;
1218 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1219 mismatch_detail->data[0] = alignment;
1220 }
1221
1222 static inline void
1223 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1224 int expected_num)
1225 {
1226 if (mismatch_detail == NULL)
1227 return;
1228 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1229 mismatch_detail->data[0] = expected_num;
1230 }
1231
1232 static inline void
1233 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1234 const char* error)
1235 {
1236 if (mismatch_detail == NULL)
1237 return;
1238 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1239 }
1240
1241 /* General constraint checking based on operand code.
1242
1243 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1244 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1245
1246 This function has to be called after the qualifiers for all operands
1247 have been resolved.
1248
1249 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1250 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1251 of error message during the disassembling where error message is not
1252 wanted. We avoid the dynamic construction of strings of error messages
1253 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1254 use a combination of error code, static string and some integer data to
1255 represent an error. */
1256
1257 static int
1258 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1259 enum aarch64_opnd type,
1260 const aarch64_opcode *opcode,
1261 aarch64_operand_error *mismatch_detail)
1262 {
1263 unsigned num;
1264 unsigned char size;
1265 int64_t imm;
1266 const aarch64_opnd_info *opnd = opnds + idx;
1267 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1268
1269 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1270
1271 switch (aarch64_operands[type].op_class)
1272 {
1273 case AARCH64_OPND_CLASS_INT_REG:
1274 /* Check pair reg constraints for cas* instructions. */
1275 if (type == AARCH64_OPND_PAIRREG)
1276 {
1277 assert (idx == 1 || idx == 3);
1278 if (opnds[idx - 1].reg.regno % 2 != 0)
1279 {
1280 set_syntax_error (mismatch_detail, idx - 1,
1281 _("reg pair must start from even reg"));
1282 return 0;
1283 }
1284 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1285 {
1286 set_syntax_error (mismatch_detail, idx,
1287 _("reg pair must be contiguous"));
1288 return 0;
1289 }
1290 break;
1291 }
1292
1293 /* <Xt> may be optional in some IC and TLBI instructions. */
1294 if (type == AARCH64_OPND_Rt_SYS)
1295 {
1296 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1297 == AARCH64_OPND_CLASS_SYSTEM));
1298 if (opnds[1].present
1299 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1300 {
1301 set_other_error (mismatch_detail, idx, _("extraneous register"));
1302 return 0;
1303 }
1304 if (!opnds[1].present
1305 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1306 {
1307 set_other_error (mismatch_detail, idx, _("missing register"));
1308 return 0;
1309 }
1310 }
1311 switch (qualifier)
1312 {
1313 case AARCH64_OPND_QLF_WSP:
1314 case AARCH64_OPND_QLF_SP:
1315 if (!aarch64_stack_pointer_p (opnd))
1316 {
1317 set_other_error (mismatch_detail, idx,
1318 _("stack pointer register expected"));
1319 return 0;
1320 }
1321 break;
1322 default:
1323 break;
1324 }
1325 break;
1326
1327 case AARCH64_OPND_CLASS_COND:
1328 if (type == AARCH64_OPND_COND1
1329 && (opnds[idx].cond->value & 0xe) == 0xe)
1330 {
1331 /* Not allow AL or NV. */
1332 set_syntax_error (mismatch_detail, idx, NULL);
1333 }
1334 break;
1335
1336 case AARCH64_OPND_CLASS_ADDRESS:
1337 /* Check writeback. */
1338 switch (opcode->iclass)
1339 {
1340 case ldst_pos:
1341 case ldst_unscaled:
1342 case ldstnapair_offs:
1343 case ldstpair_off:
1344 case ldst_unpriv:
1345 if (opnd->addr.writeback == 1)
1346 {
1347 set_syntax_error (mismatch_detail, idx,
1348 _("unexpected address writeback"));
1349 return 0;
1350 }
1351 break;
1352 case ldst_imm9:
1353 case ldstpair_indexed:
1354 case asisdlsep:
1355 case asisdlsop:
1356 if (opnd->addr.writeback == 0)
1357 {
1358 set_syntax_error (mismatch_detail, idx,
1359 _("address writeback expected"));
1360 return 0;
1361 }
1362 break;
1363 default:
1364 assert (opnd->addr.writeback == 0);
1365 break;
1366 }
1367 switch (type)
1368 {
1369 case AARCH64_OPND_ADDR_SIMM7:
1370 /* Scaled signed 7 bits immediate offset. */
1371 /* Get the size of the data element that is accessed, which may be
1372 different from that of the source register size,
1373 e.g. in strb/ldrb. */
1374 size = aarch64_get_qualifier_esize (opnd->qualifier);
1375 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1376 {
1377 set_offset_out_of_range_error (mismatch_detail, idx,
1378 -64 * size, 63 * size);
1379 return 0;
1380 }
1381 if (!value_aligned_p (opnd->addr.offset.imm, size))
1382 {
1383 set_unaligned_error (mismatch_detail, idx, size);
1384 return 0;
1385 }
1386 break;
1387 case AARCH64_OPND_ADDR_SIMM9:
1388 /* Unscaled signed 9 bits immediate offset. */
1389 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1390 {
1391 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1392 return 0;
1393 }
1394 break;
1395
1396 case AARCH64_OPND_ADDR_SIMM9_2:
1397 /* Unscaled signed 9 bits immediate offset, which has to be negative
1398 or unaligned. */
1399 size = aarch64_get_qualifier_esize (qualifier);
1400 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1401 && !value_aligned_p (opnd->addr.offset.imm, size))
1402 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1403 return 1;
1404 set_other_error (mismatch_detail, idx,
1405 _("negative or unaligned offset expected"));
1406 return 0;
1407
1408 case AARCH64_OPND_SIMD_ADDR_POST:
1409 /* AdvSIMD load/store multiple structures, post-index. */
1410 assert (idx == 1);
1411 if (opnd->addr.offset.is_reg)
1412 {
1413 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1414 return 1;
1415 else
1416 {
1417 set_other_error (mismatch_detail, idx,
1418 _("invalid register offset"));
1419 return 0;
1420 }
1421 }
1422 else
1423 {
1424 const aarch64_opnd_info *prev = &opnds[idx-1];
1425 unsigned num_bytes; /* total number of bytes transferred. */
1426 /* The opcode dependent area stores the number of elements in
1427 each structure to be loaded/stored. */
1428 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1429 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1430 /* Special handling of loading single structure to all lane. */
1431 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1432 * aarch64_get_qualifier_esize (prev->qualifier);
1433 else
1434 num_bytes = prev->reglist.num_regs
1435 * aarch64_get_qualifier_esize (prev->qualifier)
1436 * aarch64_get_qualifier_nelem (prev->qualifier);
1437 if ((int) num_bytes != opnd->addr.offset.imm)
1438 {
1439 set_other_error (mismatch_detail, idx,
1440 _("invalid post-increment amount"));
1441 return 0;
1442 }
1443 }
1444 break;
1445
1446 case AARCH64_OPND_ADDR_REGOFF:
1447 /* Get the size of the data element that is accessed, which may be
1448 different from that of the source register size,
1449 e.g. in strb/ldrb. */
1450 size = aarch64_get_qualifier_esize (opnd->qualifier);
1451 /* It is either no shift or shift by the binary logarithm of SIZE. */
1452 if (opnd->shifter.amount != 0
1453 && opnd->shifter.amount != (int)get_logsz (size))
1454 {
1455 set_other_error (mismatch_detail, idx,
1456 _("invalid shift amount"));
1457 return 0;
1458 }
1459 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1460 operators. */
1461 switch (opnd->shifter.kind)
1462 {
1463 case AARCH64_MOD_UXTW:
1464 case AARCH64_MOD_LSL:
1465 case AARCH64_MOD_SXTW:
1466 case AARCH64_MOD_SXTX: break;
1467 default:
1468 set_other_error (mismatch_detail, idx,
1469 _("invalid extend/shift operator"));
1470 return 0;
1471 }
1472 break;
1473
1474 case AARCH64_OPND_ADDR_UIMM12:
1475 imm = opnd->addr.offset.imm;
1476 /* Get the size of the data element that is accessed, which may be
1477 different from that of the source register size,
1478 e.g. in strb/ldrb. */
1479 size = aarch64_get_qualifier_esize (qualifier);
1480 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1481 {
1482 set_offset_out_of_range_error (mismatch_detail, idx,
1483 0, 4095 * size);
1484 return 0;
1485 }
1486 if (!value_aligned_p (opnd->addr.offset.imm, size))
1487 {
1488 set_unaligned_error (mismatch_detail, idx, size);
1489 return 0;
1490 }
1491 break;
1492
1493 case AARCH64_OPND_ADDR_PCREL14:
1494 case AARCH64_OPND_ADDR_PCREL19:
1495 case AARCH64_OPND_ADDR_PCREL21:
1496 case AARCH64_OPND_ADDR_PCREL26:
1497 imm = opnd->imm.value;
1498 if (operand_need_shift_by_two (get_operand_from_code (type)))
1499 {
1500 /* The offset value in a PC-relative branch instruction is alway
1501 4-byte aligned and is encoded without the lowest 2 bits. */
1502 if (!value_aligned_p (imm, 4))
1503 {
1504 set_unaligned_error (mismatch_detail, idx, 4);
1505 return 0;
1506 }
1507 /* Right shift by 2 so that we can carry out the following check
1508 canonically. */
1509 imm >>= 2;
1510 }
1511 size = get_operand_fields_width (get_operand_from_code (type));
1512 if (!value_fit_signed_field_p (imm, size))
1513 {
1514 set_other_error (mismatch_detail, idx,
1515 _("immediate out of range"));
1516 return 0;
1517 }
1518 break;
1519
1520 default:
1521 break;
1522 }
1523 break;
1524
1525 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1526 /* The opcode dependent area stores the number of elements in
1527 each structure to be loaded/stored. */
1528 num = get_opcode_dependent_value (opcode);
1529 switch (type)
1530 {
1531 case AARCH64_OPND_LVt:
1532 assert (num >= 1 && num <= 4);
1533 /* Unless LD1/ST1, the number of registers should be equal to that
1534 of the structure elements. */
1535 if (num != 1 && opnd->reglist.num_regs != num)
1536 {
1537 set_reg_list_error (mismatch_detail, idx, num);
1538 return 0;
1539 }
1540 break;
1541 case AARCH64_OPND_LVt_AL:
1542 case AARCH64_OPND_LEt:
1543 assert (num >= 1 && num <= 4);
1544 /* The number of registers should be equal to that of the structure
1545 elements. */
1546 if (opnd->reglist.num_regs != num)
1547 {
1548 set_reg_list_error (mismatch_detail, idx, num);
1549 return 0;
1550 }
1551 break;
1552 default:
1553 break;
1554 }
1555 break;
1556
1557 case AARCH64_OPND_CLASS_IMMEDIATE:
1558 /* Constraint check on immediate operand. */
1559 imm = opnd->imm.value;
1560 /* E.g. imm_0_31 constrains value to be 0..31. */
1561 if (qualifier_value_in_range_constraint_p (qualifier)
1562 && !value_in_range_p (imm, get_lower_bound (qualifier),
1563 get_upper_bound (qualifier)))
1564 {
1565 set_imm_out_of_range_error (mismatch_detail, idx,
1566 get_lower_bound (qualifier),
1567 get_upper_bound (qualifier));
1568 return 0;
1569 }
1570
1571 switch (type)
1572 {
1573 case AARCH64_OPND_AIMM:
1574 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1575 {
1576 set_other_error (mismatch_detail, idx,
1577 _("invalid shift operator"));
1578 return 0;
1579 }
1580 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1581 {
1582 set_other_error (mismatch_detail, idx,
1583 _("shift amount expected to be 0 or 12"));
1584 return 0;
1585 }
1586 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1587 {
1588 set_other_error (mismatch_detail, idx,
1589 _("immediate out of range"));
1590 return 0;
1591 }
1592 break;
1593
1594 case AARCH64_OPND_HALF:
1595 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1596 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1597 {
1598 set_other_error (mismatch_detail, idx,
1599 _("invalid shift operator"));
1600 return 0;
1601 }
1602 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1603 if (!value_aligned_p (opnd->shifter.amount, 16))
1604 {
1605 set_other_error (mismatch_detail, idx,
1606 _("shift amount should be a multiple of 16"));
1607 return 0;
1608 }
1609 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1610 {
1611 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1612 0, size * 8 - 16);
1613 return 0;
1614 }
1615 if (opnd->imm.value < 0)
1616 {
1617 set_other_error (mismatch_detail, idx,
1618 _("negative immediate value not allowed"));
1619 return 0;
1620 }
1621 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1622 {
1623 set_other_error (mismatch_detail, idx,
1624 _("immediate out of range"));
1625 return 0;
1626 }
1627 break;
1628
1629 case AARCH64_OPND_IMM_MOV:
1630 {
1631 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1632 imm = opnd->imm.value;
1633 assert (idx == 1);
1634 switch (opcode->op)
1635 {
1636 case OP_MOV_IMM_WIDEN:
1637 imm = ~imm;
1638 /* Fall through... */
1639 case OP_MOV_IMM_WIDE:
1640 if (!aarch64_wide_constant_p (imm, is32, NULL))
1641 {
1642 set_other_error (mismatch_detail, idx,
1643 _("immediate out of range"));
1644 return 0;
1645 }
1646 break;
1647 case OP_MOV_IMM_LOG:
1648 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1649 {
1650 set_other_error (mismatch_detail, idx,
1651 _("immediate out of range"));
1652 return 0;
1653 }
1654 break;
1655 default:
1656 assert (0);
1657 return 0;
1658 }
1659 }
1660 break;
1661
1662 case AARCH64_OPND_NZCV:
1663 case AARCH64_OPND_CCMP_IMM:
1664 case AARCH64_OPND_EXCEPTION:
1665 case AARCH64_OPND_UIMM4:
1666 case AARCH64_OPND_UIMM7:
1667 case AARCH64_OPND_UIMM3_OP1:
1668 case AARCH64_OPND_UIMM3_OP2:
1669 size = get_operand_fields_width (get_operand_from_code (type));
1670 assert (size < 32);
1671 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1672 {
1673 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1674 (1 << size) - 1);
1675 return 0;
1676 }
1677 break;
1678
1679 case AARCH64_OPND_WIDTH:
1680 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1681 && opnds[0].type == AARCH64_OPND_Rd);
1682 size = get_upper_bound (qualifier);
1683 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1684 /* lsb+width <= reg.size */
1685 {
1686 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1687 size - opnds[idx-1].imm.value);
1688 return 0;
1689 }
1690 break;
1691
1692 case AARCH64_OPND_LIMM:
1693 {
1694 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1695 uint64_t uimm = opnd->imm.value;
1696 if (opcode->op == OP_BIC)
1697 uimm = ~uimm;
1698 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1699 {
1700 set_other_error (mismatch_detail, idx,
1701 _("immediate out of range"));
1702 return 0;
1703 }
1704 }
1705 break;
1706
1707 case AARCH64_OPND_IMM0:
1708 case AARCH64_OPND_FPIMM0:
1709 if (opnd->imm.value != 0)
1710 {
1711 set_other_error (mismatch_detail, idx,
1712 _("immediate zero expected"));
1713 return 0;
1714 }
1715 break;
1716
1717 case AARCH64_OPND_SHLL_IMM:
1718 assert (idx == 2);
1719 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1720 if (opnd->imm.value != size)
1721 {
1722 set_other_error (mismatch_detail, idx,
1723 _("invalid shift amount"));
1724 return 0;
1725 }
1726 break;
1727
1728 case AARCH64_OPND_IMM_VLSL:
1729 size = aarch64_get_qualifier_esize (qualifier);
1730 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1731 {
1732 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1733 size * 8 - 1);
1734 return 0;
1735 }
1736 break;
1737
1738 case AARCH64_OPND_IMM_VLSR:
1739 size = aarch64_get_qualifier_esize (qualifier);
1740 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1741 {
1742 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1743 return 0;
1744 }
1745 break;
1746
1747 case AARCH64_OPND_SIMD_IMM:
1748 case AARCH64_OPND_SIMD_IMM_SFT:
1749 /* Qualifier check. */
1750 switch (qualifier)
1751 {
1752 case AARCH64_OPND_QLF_LSL:
1753 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1754 {
1755 set_other_error (mismatch_detail, idx,
1756 _("invalid shift operator"));
1757 return 0;
1758 }
1759 break;
1760 case AARCH64_OPND_QLF_MSL:
1761 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1762 {
1763 set_other_error (mismatch_detail, idx,
1764 _("invalid shift operator"));
1765 return 0;
1766 }
1767 break;
1768 case AARCH64_OPND_QLF_NIL:
1769 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1770 {
1771 set_other_error (mismatch_detail, idx,
1772 _("shift is not permitted"));
1773 return 0;
1774 }
1775 break;
1776 default:
1777 assert (0);
1778 return 0;
1779 }
1780 /* Is the immediate valid? */
1781 assert (idx == 1);
1782 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1783 {
1784 /* uimm8 or simm8 */
1785 if (!value_in_range_p (opnd->imm.value, -128, 255))
1786 {
1787 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1788 return 0;
1789 }
1790 }
1791 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1792 {
1793 /* uimm64 is not
1794 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1795 ffffffffgggggggghhhhhhhh'. */
1796 set_other_error (mismatch_detail, idx,
1797 _("invalid value for immediate"));
1798 return 0;
1799 }
1800 /* Is the shift amount valid? */
1801 switch (opnd->shifter.kind)
1802 {
1803 case AARCH64_MOD_LSL:
1804 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1805 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1806 {
1807 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1808 (size - 1) * 8);
1809 return 0;
1810 }
1811 if (!value_aligned_p (opnd->shifter.amount, 8))
1812 {
1813 set_unaligned_error (mismatch_detail, idx, 8);
1814 return 0;
1815 }
1816 break;
1817 case AARCH64_MOD_MSL:
1818 /* Only 8 and 16 are valid shift amount. */
1819 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1820 {
1821 set_other_error (mismatch_detail, idx,
1822 _("shift amount expected to be 0 or 16"));
1823 return 0;
1824 }
1825 break;
1826 default:
1827 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1828 {
1829 set_other_error (mismatch_detail, idx,
1830 _("invalid shift operator"));
1831 return 0;
1832 }
1833 break;
1834 }
1835 break;
1836
1837 case AARCH64_OPND_FPIMM:
1838 case AARCH64_OPND_SIMD_FPIMM:
1839 if (opnd->imm.is_fp == 0)
1840 {
1841 set_other_error (mismatch_detail, idx,
1842 _("floating-point immediate expected"));
1843 return 0;
1844 }
1845 /* The value is expected to be an 8-bit floating-point constant with
1846 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1847 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1848 instruction). */
1849 if (!value_in_range_p (opnd->imm.value, 0, 255))
1850 {
1851 set_other_error (mismatch_detail, idx,
1852 _("immediate out of range"));
1853 return 0;
1854 }
1855 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1856 {
1857 set_other_error (mismatch_detail, idx,
1858 _("invalid shift operator"));
1859 return 0;
1860 }
1861 break;
1862
1863 default:
1864 break;
1865 }
1866 break;
1867
1868 case AARCH64_OPND_CLASS_CP_REG:
1869 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1870 valid range: C0 - C15. */
1871 if (opnd->reg.regno > 15)
1872 {
1873 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1874 return 0;
1875 }
1876 break;
1877
1878 case AARCH64_OPND_CLASS_SYSTEM:
1879 switch (type)
1880 {
1881 case AARCH64_OPND_PSTATEFIELD:
1882 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1883 /* MSR SPSel, #uimm4
1884 Uses uimm4 as a control value to select the stack pointer: if
1885 bit 0 is set it selects the current exception level's stack
1886 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1887 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1888 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1889 {
1890 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1891 return 0;
1892 }
1893 break;
1894 default:
1895 break;
1896 }
1897 break;
1898
1899 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1900 /* Get the upper bound for the element index. */
1901 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1902 /* Index out-of-range. */
1903 if (!value_in_range_p (opnd->reglane.index, 0, num))
1904 {
1905 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1906 return 0;
1907 }
1908 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1909 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1910 number is encoded in "size:M:Rm":
1911 size <Vm>
1912 00 RESERVED
1913 01 0:Rm
1914 10 M:Rm
1915 11 RESERVED */
1916 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1917 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1918 {
1919 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1920 return 0;
1921 }
1922 break;
1923
1924 case AARCH64_OPND_CLASS_MODIFIED_REG:
1925 assert (idx == 1 || idx == 2);
1926 switch (type)
1927 {
1928 case AARCH64_OPND_Rm_EXT:
1929 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1930 && opnd->shifter.kind != AARCH64_MOD_LSL)
1931 {
1932 set_other_error (mismatch_detail, idx,
1933 _("extend operator expected"));
1934 return 0;
1935 }
1936 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1937 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1938 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1939 case. */
1940 if (!aarch64_stack_pointer_p (opnds + 0)
1941 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1942 {
1943 if (!opnd->shifter.operator_present)
1944 {
1945 set_other_error (mismatch_detail, idx,
1946 _("missing extend operator"));
1947 return 0;
1948 }
1949 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1950 {
1951 set_other_error (mismatch_detail, idx,
1952 _("'LSL' operator not allowed"));
1953 return 0;
1954 }
1955 }
1956 assert (opnd->shifter.operator_present /* Default to LSL. */
1957 || opnd->shifter.kind == AARCH64_MOD_LSL);
1958 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1959 {
1960 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1961 return 0;
1962 }
1963 /* In the 64-bit form, the final register operand is written as Wm
1964 for all but the (possibly omitted) UXTX/LSL and SXTX
1965 operators.
1966 N.B. GAS allows X register to be used with any operator as a
1967 programming convenience. */
1968 if (qualifier == AARCH64_OPND_QLF_X
1969 && opnd->shifter.kind != AARCH64_MOD_LSL
1970 && opnd->shifter.kind != AARCH64_MOD_UXTX
1971 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1972 {
1973 set_other_error (mismatch_detail, idx, _("W register expected"));
1974 return 0;
1975 }
1976 break;
1977
1978 case AARCH64_OPND_Rm_SFT:
1979 /* ROR is not available to the shifted register operand in
1980 arithmetic instructions. */
1981 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1982 {
1983 set_other_error (mismatch_detail, idx,
1984 _("shift operator expected"));
1985 return 0;
1986 }
1987 if (opnd->shifter.kind == AARCH64_MOD_ROR
1988 && opcode->iclass != log_shift)
1989 {
1990 set_other_error (mismatch_detail, idx,
1991 _("'ROR' operator not allowed"));
1992 return 0;
1993 }
1994 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1995 if (!value_in_range_p (opnd->shifter.amount, 0, num))
1996 {
1997 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1998 return 0;
1999 }
2000 break;
2001
2002 default:
2003 break;
2004 }
2005 break;
2006
2007 default:
2008 break;
2009 }
2010
2011 return 1;
2012 }
2013
2014 /* Main entrypoint for the operand constraint checking.
2015
2016 Return 1 if operands of *INST meet the constraint applied by the operand
2017 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2018 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2019 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2020 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2021 error kind when it is notified that an instruction does not pass the check).
2022
2023 Un-determined operand qualifiers may get established during the process. */
2024
2025 int
2026 aarch64_match_operands_constraint (aarch64_inst *inst,
2027 aarch64_operand_error *mismatch_detail)
2028 {
2029 int i;
2030
2031 DEBUG_TRACE ("enter");
2032
2033 /* Match operands' qualifier.
2034 *INST has already had qualifier establish for some, if not all, of
2035 its operands; we need to find out whether these established
2036 qualifiers match one of the qualifier sequence in
2037 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2038 with the corresponding qualifier in such a sequence.
2039 Only basic operand constraint checking is done here; the more thorough
2040 constraint checking will carried out by operand_general_constraint_met_p,
2041 which has be to called after this in order to get all of the operands'
2042 qualifiers established. */
2043 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2044 {
2045 DEBUG_TRACE ("FAIL on operand qualifier matching");
2046 if (mismatch_detail)
2047 {
2048 /* Return an error type to indicate that it is the qualifier
2049 matching failure; we don't care about which operand as there
2050 are enough information in the opcode table to reproduce it. */
2051 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2052 mismatch_detail->index = -1;
2053 mismatch_detail->error = NULL;
2054 }
2055 return 0;
2056 }
2057
2058 /* Match operands' constraint. */
2059 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2060 {
2061 enum aarch64_opnd type = inst->opcode->operands[i];
2062 if (type == AARCH64_OPND_NIL)
2063 break;
2064 if (inst->operands[i].skip)
2065 {
2066 DEBUG_TRACE ("skip the incomplete operand %d", i);
2067 continue;
2068 }
2069 if (operand_general_constraint_met_p (inst->operands, i, type,
2070 inst->opcode, mismatch_detail) == 0)
2071 {
2072 DEBUG_TRACE ("FAIL on operand %d", i);
2073 return 0;
2074 }
2075 }
2076
2077 DEBUG_TRACE ("PASS");
2078
2079 return 1;
2080 }
2081
2082 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2083 Also updates the TYPE of each INST->OPERANDS with the corresponding
2084 value of OPCODE->OPERANDS.
2085
2086 Note that some operand qualifiers may need to be manually cleared by
2087 the caller before it further calls the aarch64_opcode_encode; by
2088 doing this, it helps the qualifier matching facilities work
2089 properly. */
2090
2091 const aarch64_opcode*
2092 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2093 {
2094 int i;
2095 const aarch64_opcode *old = inst->opcode;
2096
2097 inst->opcode = opcode;
2098
2099 /* Update the operand types. */
2100 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2101 {
2102 inst->operands[i].type = opcode->operands[i];
2103 if (opcode->operands[i] == AARCH64_OPND_NIL)
2104 break;
2105 }
2106
2107 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2108
2109 return old;
2110 }
2111
2112 int
2113 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2114 {
2115 int i;
2116 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2117 if (operands[i] == operand)
2118 return i;
2119 else if (operands[i] == AARCH64_OPND_NIL)
2120 break;
2121 return -1;
2122 }
2123
2124 /* [0][0] 32-bit integer regs with sp Wn
2126 [0][1] 64-bit integer regs with sp Xn sf=1
2127 [1][0] 32-bit integer regs with #0 Wn
2128 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2129 static const char *int_reg[2][2][32] = {
2130 #define R32 "w"
2131 #define R64 "x"
2132 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2133 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2134 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2135 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2136 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2137 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2138 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2139 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2140 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2141 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2142 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2143 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2144 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2145 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2146 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2147 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2148 #undef R64
2149 #undef R32
2150 };
2151
2152 /* Return the integer register name.
2153 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2154
2155 static inline const char *
2156 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2157 {
2158 const int has_zr = sp_reg_p ? 0 : 1;
2159 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2160 return int_reg[has_zr][is_64][regno];
2161 }
2162
2163 /* Like get_int_reg_name, but IS_64 is always 1. */
2164
2165 static inline const char *
2166 get_64bit_int_reg_name (int regno, int sp_reg_p)
2167 {
2168 const int has_zr = sp_reg_p ? 0 : 1;
2169 return int_reg[has_zr][1][regno];
2170 }
2171
2172 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2173
2174 typedef union
2175 {
2176 uint64_t i;
2177 double d;
2178 } double_conv_t;
2179
2180 typedef union
2181 {
2182 uint32_t i;
2183 float f;
2184 } single_conv_t;
2185
2186 typedef union
2187 {
2188 uint32_t i;
2189 float f;
2190 } half_conv_t;
2191
2192 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2193 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2194 (depending on the type of the instruction). IMM8 will be expanded to a
2195 single-precision floating-point value (SIZE == 4) or a double-precision
2196 floating-point value (SIZE == 8). A half-precision floating-point value
2197 (SIZE == 2) is expanded to a single-precision floating-point value. The
2198 expanded value is returned. */
2199
2200 static uint64_t
2201 expand_fp_imm (int size, uint32_t imm8)
2202 {
2203 uint64_t imm;
2204 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2205
2206 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2207 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2208 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2209 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2210 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2211 if (size == 8)
2212 {
2213 imm = (imm8_7 << (63-32)) /* imm8<7> */
2214 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2215 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2216 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2217 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2218 imm <<= 32;
2219 }
2220 else if (size == 4 || size == 2)
2221 {
2222 imm = (imm8_7 << 31) /* imm8<7> */
2223 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2224 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2225 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2226 }
2227 else
2228 {
2229 /* An unsupported size. */
2230 assert (0);
2231 }
2232
2233 return imm;
2234 }
2235
2236 /* Produce the string representation of the register list operand *OPND
2237 in the buffer pointed by BUF of size SIZE. */
2238 static void
2239 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2240 {
2241 const int num_regs = opnd->reglist.num_regs;
2242 const int first_reg = opnd->reglist.first_regno;
2243 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2244 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2245 char tb[8]; /* Temporary buffer. */
2246
2247 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2248 assert (num_regs >= 1 && num_regs <= 4);
2249
2250 /* Prepare the index if any. */
2251 if (opnd->reglist.has_index)
2252 snprintf (tb, 8, "[%d]", opnd->reglist.index);
2253 else
2254 tb[0] = '\0';
2255
2256 /* The hyphenated form is preferred for disassembly if there are
2257 more than two registers in the list, and the register numbers
2258 are monotonically increasing in increments of one. */
2259 if (num_regs > 2 && last_reg > first_reg)
2260 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2261 last_reg, qlf_name, tb);
2262 else
2263 {
2264 const int reg0 = first_reg;
2265 const int reg1 = (first_reg + 1) & 0x1f;
2266 const int reg2 = (first_reg + 2) & 0x1f;
2267 const int reg3 = (first_reg + 3) & 0x1f;
2268
2269 switch (num_regs)
2270 {
2271 case 1:
2272 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2273 break;
2274 case 2:
2275 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2276 reg1, qlf_name, tb);
2277 break;
2278 case 3:
2279 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2280 reg1, qlf_name, reg2, qlf_name, tb);
2281 break;
2282 case 4:
2283 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2284 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2285 reg3, qlf_name, tb);
2286 break;
2287 }
2288 }
2289 }
2290
2291 /* Produce the string representation of the register offset address operand
2292 *OPND in the buffer pointed by BUF of size SIZE. */
2293 static void
2294 print_register_offset_address (char *buf, size_t size,
2295 const aarch64_opnd_info *opnd)
2296 {
2297 const size_t tblen = 16;
2298 char tb[tblen]; /* Temporary buffer. */
2299 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2300 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2301 bfd_boolean print_extend_p = TRUE;
2302 bfd_boolean print_amount_p = TRUE;
2303 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2304
2305 switch (opnd->shifter.kind)
2306 {
2307 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2308 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2309 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2310 case AARCH64_MOD_SXTX: break;
2311 default: assert (0);
2312 }
2313
2314 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2315 || !opnd->shifter.amount_present))
2316 {
2317 /* Not print the shift/extend amount when the amount is zero and
2318 when it is not the special case of 8-bit load/store instruction. */
2319 print_amount_p = FALSE;
2320 /* Likewise, no need to print the shift operator LSL in such a
2321 situation. */
2322 if (lsl_p)
2323 print_extend_p = FALSE;
2324 }
2325
2326 /* Prepare for the extend/shift. */
2327 if (print_extend_p)
2328 {
2329 if (print_amount_p)
2330 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2331 else
2332 snprintf (tb, tblen, ",%s", shift_name);
2333 }
2334 else
2335 tb[0] = '\0';
2336
2337 snprintf (buf, size, "[%s,%s%s]",
2338 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2339 get_int_reg_name (opnd->addr.offset.regno,
2340 wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
2341 0 /* sp_reg_p */),
2342 tb);
2343 }
2344
2345 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2346 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2347 PC, PCREL_P and ADDRESS are used to pass in and return information about
2348 the PC-relative address calculation, where the PC value is passed in
2349 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2350 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2351 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2352
2353 The function serves both the disassembler and the assembler diagnostics
2354 issuer, which is the reason why it lives in this file. */
2355
2356 void
2357 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2358 const aarch64_opcode *opcode,
2359 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2360 bfd_vma *address)
2361 {
2362 int i;
2363 const char *name = NULL;
2364 const aarch64_opnd_info *opnd = opnds + idx;
2365 enum aarch64_modifier_kind kind;
2366 uint64_t addr;
2367
2368 buf[0] = '\0';
2369 if (pcrel_p)
2370 *pcrel_p = 0;
2371
2372 switch (opnd->type)
2373 {
2374 case AARCH64_OPND_Rd:
2375 case AARCH64_OPND_Rn:
2376 case AARCH64_OPND_Rm:
2377 case AARCH64_OPND_Rt:
2378 case AARCH64_OPND_Rt2:
2379 case AARCH64_OPND_Rs:
2380 case AARCH64_OPND_Ra:
2381 case AARCH64_OPND_Rt_SYS:
2382 case AARCH64_OPND_PAIRREG:
2383 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2384 the <ic_op>, therefore we we use opnd->present to override the
2385 generic optional-ness information. */
2386 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2387 break;
2388 /* Omit the operand, e.g. RET. */
2389 if (optional_operand_p (opcode, idx)
2390 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2391 break;
2392 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2393 || opnd->qualifier == AARCH64_OPND_QLF_X);
2394 snprintf (buf, size, "%s",
2395 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2396 break;
2397
2398 case AARCH64_OPND_Rd_SP:
2399 case AARCH64_OPND_Rn_SP:
2400 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2401 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2402 || opnd->qualifier == AARCH64_OPND_QLF_X
2403 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2404 snprintf (buf, size, "%s",
2405 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2406 break;
2407
2408 case AARCH64_OPND_Rm_EXT:
2409 kind = opnd->shifter.kind;
2410 assert (idx == 1 || idx == 2);
2411 if ((aarch64_stack_pointer_p (opnds)
2412 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2413 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2414 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2415 && kind == AARCH64_MOD_UXTW)
2416 || (opnd->qualifier == AARCH64_OPND_QLF_X
2417 && kind == AARCH64_MOD_UXTX)))
2418 {
2419 /* 'LSL' is the preferred form in this case. */
2420 kind = AARCH64_MOD_LSL;
2421 if (opnd->shifter.amount == 0)
2422 {
2423 /* Shifter omitted. */
2424 snprintf (buf, size, "%s",
2425 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2426 break;
2427 }
2428 }
2429 if (opnd->shifter.amount)
2430 snprintf (buf, size, "%s, %s #%d",
2431 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2432 aarch64_operand_modifiers[kind].name,
2433 opnd->shifter.amount);
2434 else
2435 snprintf (buf, size, "%s, %s",
2436 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2437 aarch64_operand_modifiers[kind].name);
2438 break;
2439
2440 case AARCH64_OPND_Rm_SFT:
2441 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2442 || opnd->qualifier == AARCH64_OPND_QLF_X);
2443 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2444 snprintf (buf, size, "%s",
2445 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2446 else
2447 snprintf (buf, size, "%s, %s #%d",
2448 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2449 aarch64_operand_modifiers[opnd->shifter.kind].name,
2450 opnd->shifter.amount);
2451 break;
2452
2453 case AARCH64_OPND_Fd:
2454 case AARCH64_OPND_Fn:
2455 case AARCH64_OPND_Fm:
2456 case AARCH64_OPND_Fa:
2457 case AARCH64_OPND_Ft:
2458 case AARCH64_OPND_Ft2:
2459 case AARCH64_OPND_Sd:
2460 case AARCH64_OPND_Sn:
2461 case AARCH64_OPND_Sm:
2462 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2463 opnd->reg.regno);
2464 break;
2465
2466 case AARCH64_OPND_Vd:
2467 case AARCH64_OPND_Vn:
2468 case AARCH64_OPND_Vm:
2469 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2470 aarch64_get_qualifier_name (opnd->qualifier));
2471 break;
2472
2473 case AARCH64_OPND_Ed:
2474 case AARCH64_OPND_En:
2475 case AARCH64_OPND_Em:
2476 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2477 aarch64_get_qualifier_name (opnd->qualifier),
2478 opnd->reglane.index);
2479 break;
2480
2481 case AARCH64_OPND_VdD1:
2482 case AARCH64_OPND_VnD1:
2483 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2484 break;
2485
2486 case AARCH64_OPND_LVn:
2487 case AARCH64_OPND_LVt:
2488 case AARCH64_OPND_LVt_AL:
2489 case AARCH64_OPND_LEt:
2490 print_register_list (buf, size, opnd);
2491 break;
2492
2493 case AARCH64_OPND_Cn:
2494 case AARCH64_OPND_Cm:
2495 snprintf (buf, size, "C%d", opnd->reg.regno);
2496 break;
2497
2498 case AARCH64_OPND_IDX:
2499 case AARCH64_OPND_IMM:
2500 case AARCH64_OPND_WIDTH:
2501 case AARCH64_OPND_UIMM3_OP1:
2502 case AARCH64_OPND_UIMM3_OP2:
2503 case AARCH64_OPND_BIT_NUM:
2504 case AARCH64_OPND_IMM_VLSL:
2505 case AARCH64_OPND_IMM_VLSR:
2506 case AARCH64_OPND_SHLL_IMM:
2507 case AARCH64_OPND_IMM0:
2508 case AARCH64_OPND_IMMR:
2509 case AARCH64_OPND_IMMS:
2510 case AARCH64_OPND_FBITS:
2511 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2512 break;
2513
2514 case AARCH64_OPND_IMM_MOV:
2515 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2516 {
2517 case 4: /* e.g. MOV Wd, #<imm32>. */
2518 {
2519 int imm32 = opnd->imm.value;
2520 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2521 }
2522 break;
2523 case 8: /* e.g. MOV Xd, #<imm64>. */
2524 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2525 opnd->imm.value, opnd->imm.value);
2526 break;
2527 default: assert (0);
2528 }
2529 break;
2530
2531 case AARCH64_OPND_FPIMM0:
2532 snprintf (buf, size, "#0.0");
2533 break;
2534
2535 case AARCH64_OPND_LIMM:
2536 case AARCH64_OPND_AIMM:
2537 case AARCH64_OPND_HALF:
2538 if (opnd->shifter.amount)
2539 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2540 opnd->shifter.amount);
2541 else
2542 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2543 break;
2544
2545 case AARCH64_OPND_SIMD_IMM:
2546 case AARCH64_OPND_SIMD_IMM_SFT:
2547 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2548 || opnd->shifter.kind == AARCH64_MOD_NONE)
2549 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2550 else
2551 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2552 aarch64_operand_modifiers[opnd->shifter.kind].name,
2553 opnd->shifter.amount);
2554 break;
2555
2556 case AARCH64_OPND_FPIMM:
2557 case AARCH64_OPND_SIMD_FPIMM:
2558 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2559 {
2560 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2561 {
2562 half_conv_t c;
2563 c.i = expand_fp_imm (2, opnd->imm.value);
2564 snprintf (buf, size, "#%.18e", c.f);
2565 }
2566 break;
2567 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2568 {
2569 single_conv_t c;
2570 c.i = expand_fp_imm (4, opnd->imm.value);
2571 snprintf (buf, size, "#%.18e", c.f);
2572 }
2573 break;
2574 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2575 {
2576 double_conv_t c;
2577 c.i = expand_fp_imm (8, opnd->imm.value);
2578 snprintf (buf, size, "#%.18e", c.d);
2579 }
2580 break;
2581 default: assert (0);
2582 }
2583 break;
2584
2585 case AARCH64_OPND_CCMP_IMM:
2586 case AARCH64_OPND_NZCV:
2587 case AARCH64_OPND_EXCEPTION:
2588 case AARCH64_OPND_UIMM4:
2589 case AARCH64_OPND_UIMM7:
2590 if (optional_operand_p (opcode, idx) == TRUE
2591 && (opnd->imm.value ==
2592 (int64_t) get_optional_operand_default_value (opcode)))
2593 /* Omit the operand, e.g. DCPS1. */
2594 break;
2595 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2596 break;
2597
2598 case AARCH64_OPND_COND:
2599 case AARCH64_OPND_COND1:
2600 snprintf (buf, size, "%s", opnd->cond->names[0]);
2601 break;
2602
2603 case AARCH64_OPND_ADDR_ADRP:
2604 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2605 + opnd->imm.value;
2606 if (pcrel_p)
2607 *pcrel_p = 1;
2608 if (address)
2609 *address = addr;
2610 /* This is not necessary during the disassembling, as print_address_func
2611 in the disassemble_info will take care of the printing. But some
2612 other callers may be still interested in getting the string in *STR,
2613 so here we do snprintf regardless. */
2614 snprintf (buf, size, "#0x%" PRIx64, addr);
2615 break;
2616
2617 case AARCH64_OPND_ADDR_PCREL14:
2618 case AARCH64_OPND_ADDR_PCREL19:
2619 case AARCH64_OPND_ADDR_PCREL21:
2620 case AARCH64_OPND_ADDR_PCREL26:
2621 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2622 if (pcrel_p)
2623 *pcrel_p = 1;
2624 if (address)
2625 *address = addr;
2626 /* This is not necessary during the disassembling, as print_address_func
2627 in the disassemble_info will take care of the printing. But some
2628 other callers may be still interested in getting the string in *STR,
2629 so here we do snprintf regardless. */
2630 snprintf (buf, size, "#0x%" PRIx64, addr);
2631 break;
2632
2633 case AARCH64_OPND_ADDR_SIMPLE:
2634 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2635 case AARCH64_OPND_SIMD_ADDR_POST:
2636 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2637 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2638 {
2639 if (opnd->addr.offset.is_reg)
2640 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2641 else
2642 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2643 }
2644 else
2645 snprintf (buf, size, "[%s]", name);
2646 break;
2647
2648 case AARCH64_OPND_ADDR_REGOFF:
2649 print_register_offset_address (buf, size, opnd);
2650 break;
2651
2652 case AARCH64_OPND_ADDR_SIMM7:
2653 case AARCH64_OPND_ADDR_SIMM9:
2654 case AARCH64_OPND_ADDR_SIMM9_2:
2655 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2656 if (opnd->addr.writeback)
2657 {
2658 if (opnd->addr.preind)
2659 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2660 else
2661 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2662 }
2663 else
2664 {
2665 if (opnd->addr.offset.imm)
2666 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2667 else
2668 snprintf (buf, size, "[%s]", name);
2669 }
2670 break;
2671
2672 case AARCH64_OPND_ADDR_UIMM12:
2673 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2674 if (opnd->addr.offset.imm)
2675 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2676 else
2677 snprintf (buf, size, "[%s]", name);
2678 break;
2679
2680 case AARCH64_OPND_SYSREG:
2681 for (i = 0; aarch64_sys_regs[i].name; ++i)
2682 if (aarch64_sys_regs[i].value == opnd->sysreg
2683 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2684 break;
2685 if (aarch64_sys_regs[i].name)
2686 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2687 else
2688 {
2689 /* Implementation defined system register. */
2690 unsigned int value = opnd->sysreg;
2691 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2692 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2693 value & 0x7);
2694 }
2695 break;
2696
2697 case AARCH64_OPND_PSTATEFIELD:
2698 for (i = 0; aarch64_pstatefields[i].name; ++i)
2699 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2700 break;
2701 assert (aarch64_pstatefields[i].name);
2702 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2703 break;
2704
2705 case AARCH64_OPND_SYSREG_AT:
2706 case AARCH64_OPND_SYSREG_DC:
2707 case AARCH64_OPND_SYSREG_IC:
2708 case AARCH64_OPND_SYSREG_TLBI:
2709 snprintf (buf, size, "%s", opnd->sysins_op->name);
2710 break;
2711
2712 case AARCH64_OPND_BARRIER:
2713 snprintf (buf, size, "%s", opnd->barrier->name);
2714 break;
2715
2716 case AARCH64_OPND_BARRIER_ISB:
2717 /* Operand can be omitted, e.g. in DCPS1. */
2718 if (! optional_operand_p (opcode, idx)
2719 || (opnd->barrier->value
2720 != get_optional_operand_default_value (opcode)))
2721 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2722 break;
2723
2724 case AARCH64_OPND_PRFOP:
2725 if (opnd->prfop->name != NULL)
2726 snprintf (buf, size, "%s", opnd->prfop->name);
2727 else
2728 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2729 break;
2730
2731 case AARCH64_OPND_BARRIER_PSB:
2732 snprintf (buf, size, "%s", opnd->hint_option->name);
2733 break;
2734
2735 default:
2736 assert (0);
2737 }
2738 }
2739
2740 #define CPENC(op0,op1,crn,crm,op2) \
2742 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2743 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2744 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2745 /* for 3.9.10 System Instructions */
2746 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2747
2748 #define C0 0
2749 #define C1 1
2750 #define C2 2
2751 #define C3 3
2752 #define C4 4
2753 #define C5 5
2754 #define C6 6
2755 #define C7 7
2756 #define C8 8
2757 #define C9 9
2758 #define C10 10
2759 #define C11 11
2760 #define C12 12
2761 #define C13 13
2762 #define C14 14
2763 #define C15 15
2764
2765 #ifdef F_DEPRECATED
2766 #undef F_DEPRECATED
2767 #endif
2768 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2769
2770 #ifdef F_ARCHEXT
2771 #undef F_ARCHEXT
2772 #endif
2773 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2774
2775 #ifdef F_HASXT
2776 #undef F_HASXT
2777 #endif
2778 #define F_HASXT 0x4 /* System instruction register <Xt>
2779 operand. */
2780
2781
2782 /* TODO there are two more issues need to be resolved
2783 1. handle read-only and write-only system registers
2784 2. handle cpu-implementation-defined system registers. */
2785 const aarch64_sys_reg aarch64_sys_regs [] =
2786 {
2787 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
2788 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
2789 { "elr_el1", CPEN_(0,C0,1), 0 },
2790 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
2791 { "sp_el0", CPEN_(0,C1,0), 0 },
2792 { "spsel", CPEN_(0,C2,0), 0 },
2793 { "daif", CPEN_(3,C2,1), 0 },
2794 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
2795 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
2796 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
2797 { "nzcv", CPEN_(3,C2,0), 0 },
2798 { "fpcr", CPEN_(3,C4,0), 0 },
2799 { "fpsr", CPEN_(3,C4,1), 0 },
2800 { "dspsr_el0", CPEN_(3,C5,0), 0 },
2801 { "dlr_el0", CPEN_(3,C5,1), 0 },
2802 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
2803 { "elr_el2", CPEN_(4,C0,1), 0 },
2804 { "sp_el1", CPEN_(4,C1,0), 0 },
2805 { "spsr_irq", CPEN_(4,C3,0), 0 },
2806 { "spsr_abt", CPEN_(4,C3,1), 0 },
2807 { "spsr_und", CPEN_(4,C3,2), 0 },
2808 { "spsr_fiq", CPEN_(4,C3,3), 0 },
2809 { "spsr_el3", CPEN_(6,C0,0), 0 },
2810 { "elr_el3", CPEN_(6,C0,1), 0 },
2811 { "sp_el2", CPEN_(6,C1,0), 0 },
2812 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
2813 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
2814 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
2815 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
2816 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
2817 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
2818 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
2819 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
2820 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
2821 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
2822 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
2823 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
2824 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
2825 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
2826 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
2827 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
2828 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
2829 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
2830 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
2831 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
2832 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
2833 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
2834 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
2835 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
2836 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
2837 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
2838 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
2839 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
2840 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
2841 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
2842 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
2843 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
2844 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
2845 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
2846 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
2847 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
2848 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
2849 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
2850 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
2851 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
2852 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
2853 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
2854 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
2855 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
2856 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
2857 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
2858 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
2859 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
2860 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
2861 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
2862 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
2863 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
2864 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
2865 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
2866 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
2867 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
2868 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
2869 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
2870 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
2871 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
2872 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
2873 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
2874 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
2875 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
2876 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
2877 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
2878 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
2879 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
2880 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
2881 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
2882 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
2883 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
2884 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
2885 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
2886 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
2887 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
2888 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
2889 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
2890 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
2891 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
2892 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
2893 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
2894 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
2895 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
2896 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
2897 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
2898 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
2899 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
2900 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
2901 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
2902 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
2903 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
2904 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
2905 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
2906 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
2907 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
2908 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
2909 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
2910 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
2911 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
2912 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
2913 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
2914 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
2915 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
2916 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
2917 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
2918 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
2919 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
2920 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
2921 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
2922 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
2923 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
2924 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
2925 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
2926 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
2927 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
2928 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
2929 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
2930 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
2931 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
2932 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
2933 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
2934 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
2935 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
2936 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
2937 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
2938 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
2939 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
2940 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
2941 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
2942 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
2943 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
2944 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
2945 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
2946 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
2947 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
2948 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
2949 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
2950 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
2951 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
2952 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
2953 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
2954 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
2955 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
2956 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
2957 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
2958 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
2959 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
2960 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
2961 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
2962 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
2963 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
2964 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
2965 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
2966 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
2967 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
2968 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
2969 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
2970 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
2971 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
2972 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
2973 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
2974 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
2975 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
2976 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
2977 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
2978 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
2979 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
2980 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
2981 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
2982 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
2983 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
2984 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
2985 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
2986 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
2987 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
2988 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
2989 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
2990 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
2991 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
2992 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
2993 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
2994 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
2995 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
2996 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
2997 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
2998 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
2999 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3000 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3001 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3002 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3003 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3004 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3005 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3006 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3007 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3008 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3009 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3010 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3011 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3012 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3013 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3014 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3015 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3016 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3017 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3018 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3019 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3020 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3021 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3022 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3023 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3024 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3025 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3026 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3027 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3028 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3029 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3030 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3031 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3032 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3033 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3034 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3035 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3036 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3037 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3038 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3039 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3040 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3041 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3042 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3043 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3044 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3045 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3046 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3047 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3048 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3049 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3050 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3051 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3052 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3053 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3054 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3055 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3056 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3057 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3058 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3059 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3060 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3061 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3062 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3063 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3064 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3065 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3066 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3067 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3068 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3069 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3070 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3071 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3072 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3073 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3074 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3075 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3076 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3077 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3078 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3079 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3080 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3081 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3082 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3083 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3084 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3085 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3086 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3087 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3088 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3089 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3090 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3091 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3092 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3093 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3094 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3095 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3096 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3097 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3098 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3099 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3100 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3101 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3102 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3103 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3104 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3105 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3106 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3107 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3108 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3109 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3110 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3111 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3112 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3113 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3114 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3115 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3116 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3117 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3118 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3119 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3120 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3121 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3122 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3123 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3124 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3125 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3126 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3127 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3128 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3129 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3130 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3131 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3132 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3133 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3134 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3135 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3136 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3137 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3138 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3139 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3140 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3141 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3142 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3143 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3144 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3145 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3146 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3147 { 0, CPENC(0,0,0,0,0), 0 },
3148 };
3149
3150 bfd_boolean
3151 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3152 {
3153 return (reg->flags & F_DEPRECATED) != 0;
3154 }
3155
3156 bfd_boolean
3157 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3158 const aarch64_sys_reg *reg)
3159 {
3160 if (!(reg->flags & F_ARCHEXT))
3161 return TRUE;
3162
3163 /* PAN. Values are from aarch64_sys_regs. */
3164 if (reg->value == CPEN_(0,C2,3)
3165 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3166 return FALSE;
3167
3168 /* Virtualization host extensions: system registers. */
3169 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3170 || reg->value == CPENC (3, 4, C13, C0, 1)
3171 || reg->value == CPENC (3, 4, C14, C3, 0)
3172 || reg->value == CPENC (3, 4, C14, C3, 1)
3173 || reg->value == CPENC (3, 4, C14, C3, 2))
3174 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3175 return FALSE;
3176
3177 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3178 if ((reg->value == CPEN_ (5, C0, 0)
3179 || reg->value == CPEN_ (5, C0, 1)
3180 || reg->value == CPENC (3, 5, C1, C0, 0)
3181 || reg->value == CPENC (3, 5, C1, C0, 2)
3182 || reg->value == CPENC (3, 5, C2, C0, 0)
3183 || reg->value == CPENC (3, 5, C2, C0, 1)
3184 || reg->value == CPENC (3, 5, C2, C0, 2)
3185 || reg->value == CPENC (3, 5, C5, C1, 0)
3186 || reg->value == CPENC (3, 5, C5, C1, 1)
3187 || reg->value == CPENC (3, 5, C5, C2, 0)
3188 || reg->value == CPENC (3, 5, C6, C0, 0)
3189 || reg->value == CPENC (3, 5, C10, C2, 0)
3190 || reg->value == CPENC (3, 5, C10, C3, 0)
3191 || reg->value == CPENC (3, 5, C12, C0, 0)
3192 || reg->value == CPENC (3, 5, C13, C0, 1)
3193 || reg->value == CPENC (3, 5, C14, C1, 0))
3194 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3195 return FALSE;
3196
3197 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3198 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3199 || reg->value == CPENC (3, 5, C14, C2, 1)
3200 || reg->value == CPENC (3, 5, C14, C2, 2)
3201 || reg->value == CPENC (3, 5, C14, C3, 0)
3202 || reg->value == CPENC (3, 5, C14, C3, 1)
3203 || reg->value == CPENC (3, 5, C14, C3, 2))
3204 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3205 return FALSE;
3206
3207 /* ARMv8.2 features. */
3208
3209 /* ID_AA64MMFR2_EL1. */
3210 if (reg->value == CPENC (3, 0, C0, C7, 2)
3211 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3212 return FALSE;
3213
3214 /* PSTATE.UAO. */
3215 if (reg->value == CPEN_ (0, C2, 4)
3216 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3217 return FALSE;
3218
3219 /* RAS extension. */
3220
3221 /* ERRIDR_EL1 and ERRSELR_EL1. */
3222 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3223 || reg->value == CPENC (3, 0, C5, C3, 1))
3224 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3225 return FALSE;
3226
3227 /* ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1, ERXMISC0_EL1 AND
3228 ERXMISC1_EL1. */
3229 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3230 || reg->value == CPENC (3, 0, C5, C3 ,1)
3231 || reg->value == CPENC (3, 0, C5, C3, 2)
3232 || reg->value == CPENC (3, 0, C5, C3, 3)
3233 || reg->value == CPENC (3, 0, C5, C5, 0)
3234 || reg->value == CPENC (3, 0, C5, C5, 1))
3235 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3236 return FALSE;
3237
3238 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3239 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3240 || reg->value == CPENC (3, 0, C12, C1, 1)
3241 || reg->value == CPENC (3, 4, C12, C1, 1))
3242 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3243 return FALSE;
3244
3245 /* Statistical Profiling extension. */
3246 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3247 || reg->value == CPENC (3, 0, C9, C10, 1)
3248 || reg->value == CPENC (3, 0, C9, C10, 3)
3249 || reg->value == CPENC (3, 0, C9, C10, 7)
3250 || reg->value == CPENC (3, 0, C9, C9, 0)
3251 || reg->value == CPENC (3, 0, C9, C9, 2)
3252 || reg->value == CPENC (3, 0, C9, C9, 3)
3253 || reg->value == CPENC (3, 0, C9, C9, 4)
3254 || reg->value == CPENC (3, 0, C9, C9, 5)
3255 || reg->value == CPENC (3, 0, C9, C9, 6)
3256 || reg->value == CPENC (3, 0, C9, C9, 7)
3257 || reg->value == CPENC (3, 4, C9, C9, 0)
3258 || reg->value == CPENC (3, 5, C9, C9, 0))
3259 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3260 return FALSE;
3261
3262 return TRUE;
3263 }
3264
3265 const aarch64_sys_reg aarch64_pstatefields [] =
3266 {
3267 { "spsel", 0x05, 0 },
3268 { "daifset", 0x1e, 0 },
3269 { "daifclr", 0x1f, 0 },
3270 { "pan", 0x04, F_ARCHEXT },
3271 { "uao", 0x03, F_ARCHEXT },
3272 { 0, CPENC(0,0,0,0,0), 0 },
3273 };
3274
3275 bfd_boolean
3276 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3277 const aarch64_sys_reg *reg)
3278 {
3279 if (!(reg->flags & F_ARCHEXT))
3280 return TRUE;
3281
3282 /* PAN. Values are from aarch64_pstatefields. */
3283 if (reg->value == 0x04
3284 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3285 return FALSE;
3286
3287 /* UAO. Values are from aarch64_pstatefields. */
3288 if (reg->value == 0x03
3289 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3290 return FALSE;
3291
3292 return TRUE;
3293 }
3294
3295 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3296 {
3297 { "ialluis", CPENS(0,C7,C1,0), 0 },
3298 { "iallu", CPENS(0,C7,C5,0), 0 },
3299 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3300 { 0, CPENS(0,0,0,0), 0 }
3301 };
3302
3303 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3304 {
3305 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3306 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3307 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3308 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3309 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3310 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3311 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3312 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3313 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3314 { 0, CPENS(0,0,0,0), 0 }
3315 };
3316
3317 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3318 {
3319 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3320 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3321 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3322 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3323 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3324 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3325 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3326 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3327 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3328 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3329 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3330 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3331 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3332 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3333 { 0, CPENS(0,0,0,0), 0 }
3334 };
3335
3336 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3337 {
3338 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3339 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3340 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3341 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3342 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3343 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3344 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3345 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3346 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3347 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3348 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3349 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3350 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3351 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3352 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3353 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3354 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3355 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3356 { "alle2", CPENS(4,C8,C7,0), 0 },
3357 { "alle2is", CPENS(4,C8,C3,0), 0 },
3358 { "alle1", CPENS(4,C8,C7,4), 0 },
3359 { "alle1is", CPENS(4,C8,C3,4), 0 },
3360 { "alle3", CPENS(6,C8,C7,0), 0 },
3361 { "alle3is", CPENS(6,C8,C3,0), 0 },
3362 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3363 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3364 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3365 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3366 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3367 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3368 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3369 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3370 { 0, CPENS(0,0,0,0), 0 }
3371 };
3372
3373 bfd_boolean
3374 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3375 {
3376 return (sys_ins_reg->flags & F_HASXT) != 0;
3377 }
3378
3379 extern bfd_boolean
3380 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3381 const aarch64_sys_ins_reg *reg)
3382 {
3383 if (!(reg->flags & F_ARCHEXT))
3384 return TRUE;
3385
3386 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3387 if (reg->value == CPENS (3, C7, C12, 1)
3388 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3389 return FALSE;
3390
3391 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3392 if ((reg->value == CPENS (0, C7, C9, 0)
3393 || reg->value == CPENS (0, C7, C9, 1))
3394 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3395 return FALSE;
3396
3397 return TRUE;
3398 }
3399
3400 #undef C0
3401 #undef C1
3402 #undef C2
3403 #undef C3
3404 #undef C4
3405 #undef C5
3406 #undef C6
3407 #undef C7
3408 #undef C8
3409 #undef C9
3410 #undef C10
3411 #undef C11
3412 #undef C12
3413 #undef C13
3414 #undef C14
3415 #undef C15
3416
3417 /* Include the opcode description table as well as the operand description
3418 table. */
3419 #include "aarch64-tbl.h"
3420