aarch64-opc.c revision 1.1 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30
31 #include "aarch64-opc.h"
32
33 #ifdef DEBUG_AARCH64
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
36
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42 {
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46 }
47
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50 {
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54 }
55
56 enum data_pattern
57 {
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63 };
64
65 static const char significant_operand_index [] =
66 {
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72 };
73
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81 {
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120 }
121
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130 int
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132 {
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135 }
136
137 const aarch64_field fields[] =
139 {
140 { 0, 0 }, /* NIL. */
141 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
142 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
143 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
144 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
145 { 5, 19 }, /* imm19: e.g. in CBZ. */
146 { 5, 19 }, /* immhi: e.g. in ADRP. */
147 { 29, 2 }, /* immlo: e.g. in ADRP. */
148 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
149 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
150 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
151 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
152 { 0, 5 }, /* Rt: in load/store instructions. */
153 { 0, 5 }, /* Rd: in many integer instructions. */
154 { 5, 5 }, /* Rn: in many integer instructions. */
155 { 10, 5 }, /* Rt2: in load/store pair instructions. */
156 { 10, 5 }, /* Ra: in fp instructions. */
157 { 5, 3 }, /* op2: in the system instructions. */
158 { 8, 4 }, /* CRm: in the system instructions. */
159 { 12, 4 }, /* CRn: in the system instructions. */
160 { 16, 3 }, /* op1: in the system instructions. */
161 { 19, 2 }, /* op0: in the system instructions. */
162 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
163 { 12, 4 }, /* cond: condition flags as a source operand. */
164 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
165 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
166 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
167 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
168 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
169 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
170 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
171 { 12, 1 }, /* S: in load/store reg offset instructions. */
172 { 21, 2 }, /* hw: in move wide constant instructions. */
173 { 22, 2 }, /* opc: in load/store reg offset instructions. */
174 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
175 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
176 { 22, 2 }, /* type: floating point type field in fp data inst. */
177 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
178 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
179 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
180 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
181 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
182 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
183 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
184 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
185 { 5, 14 }, /* imm14: in test bit and branch instructions. */
186 { 5, 16 }, /* imm16: in exception instructions. */
187 { 0, 26 }, /* imm26: in unconditional branch instructions. */
188 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
189 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
190 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
191 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
192 { 22, 1 }, /* N: in logical (immediate) instructions. */
193 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
194 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
195 { 31, 1 }, /* sf: in integer data processing instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
202 };
203
204 enum aarch64_operand_class
205 aarch64_get_operand_class (enum aarch64_opnd type)
206 {
207 return aarch64_operands[type].op_class;
208 }
209
210 const char*
211 aarch64_get_operand_name (enum aarch64_opnd type)
212 {
213 return aarch64_operands[type].name;
214 }
215
216 /* Get operand description string.
217 This is usually for the diagnosis purpose. */
218 const char*
219 aarch64_get_operand_desc (enum aarch64_opnd type)
220 {
221 return aarch64_operands[type].desc;
222 }
223
224 /* Table of all conditional affixes. */
225 const aarch64_cond aarch64_conds[16] =
226 {
227 {{"eq"}, 0x0},
228 {{"ne"}, 0x1},
229 {{"cs", "hs"}, 0x2},
230 {{"cc", "lo", "ul"}, 0x3},
231 {{"mi"}, 0x4},
232 {{"pl"}, 0x5},
233 {{"vs"}, 0x6},
234 {{"vc"}, 0x7},
235 {{"hi"}, 0x8},
236 {{"ls"}, 0x9},
237 {{"ge"}, 0xa},
238 {{"lt"}, 0xb},
239 {{"gt"}, 0xc},
240 {{"le"}, 0xd},
241 {{"al"}, 0xe},
242 {{"nv"}, 0xf},
243 };
244
245 const aarch64_cond*
246 get_cond_from_value (aarch64_insn value)
247 {
248 assert (value < 16);
249 return &aarch64_conds[(unsigned int) value];
250 }
251
252 const aarch64_cond*
253 get_inverted_cond (const aarch64_cond *cond)
254 {
255 return &aarch64_conds[cond->value ^ 0x1];
256 }
257
258 /* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
260
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
264 {
265 {"none", 0x0},
266 {"msl", 0x0},
267 {"ror", 0x3},
268 {"asr", 0x2},
269 {"lsr", 0x1},
270 {"lsl", 0x0},
271 {"uxtb", 0x0},
272 {"uxth", 0x1},
273 {"uxtw", 0x2},
274 {"uxtx", 0x3},
275 {"sxtb", 0x4},
276 {"sxth", 0x5},
277 {"sxtw", 0x6},
278 {"sxtx", 0x7},
279 {NULL, 0},
280 };
281
282 enum aarch64_modifier_kind
283 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
284 {
285 return desc - aarch64_operand_modifiers;
286 }
287
288 aarch64_insn
289 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
290 {
291 return aarch64_operand_modifiers[kind].value;
292 }
293
294 enum aarch64_modifier_kind
295 aarch64_get_operand_modifier_from_value (aarch64_insn value,
296 bfd_boolean extend_p)
297 {
298 if (extend_p == TRUE)
299 return AARCH64_MOD_UXTB + value;
300 else
301 return AARCH64_MOD_LSL - value;
302 }
303
304 bfd_boolean
305 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
306 {
307 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
308 ? TRUE : FALSE;
309 }
310
311 static inline bfd_boolean
312 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
313 {
314 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
315 ? TRUE : FALSE;
316 }
317
318 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
319 {
320 { "#0x00", 0x0 },
321 { "oshld", 0x1 },
322 { "oshst", 0x2 },
323 { "osh", 0x3 },
324 { "#0x04", 0x4 },
325 { "nshld", 0x5 },
326 { "nshst", 0x6 },
327 { "nsh", 0x7 },
328 { "#0x08", 0x8 },
329 { "ishld", 0x9 },
330 { "ishst", 0xa },
331 { "ish", 0xb },
332 { "#0x0c", 0xc },
333 { "ld", 0xd },
334 { "st", 0xe },
335 { "sy", 0xf },
336 };
337
338 /* op -> op: load = 0 instruction = 1 store = 2
339 l -> level: 1-3
340 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
341 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
342 const struct aarch64_name_value_pair aarch64_prfops[32] =
343 {
344 { "pldl1keep", B(0, 1, 0) },
345 { "pldl1strm", B(0, 1, 1) },
346 { "pldl2keep", B(0, 2, 0) },
347 { "pldl2strm", B(0, 2, 1) },
348 { "pldl3keep", B(0, 3, 0) },
349 { "pldl3strm", B(0, 3, 1) },
350 { NULL, 0x06 },
351 { NULL, 0x07 },
352 { "plil1keep", B(1, 1, 0) },
353 { "plil1strm", B(1, 1, 1) },
354 { "plil2keep", B(1, 2, 0) },
355 { "plil2strm", B(1, 2, 1) },
356 { "plil3keep", B(1, 3, 0) },
357 { "plil3strm", B(1, 3, 1) },
358 { NULL, 0x0e },
359 { NULL, 0x0f },
360 { "pstl1keep", B(2, 1, 0) },
361 { "pstl1strm", B(2, 1, 1) },
362 { "pstl2keep", B(2, 2, 0) },
363 { "pstl2strm", B(2, 2, 1) },
364 { "pstl3keep", B(2, 3, 0) },
365 { "pstl3strm", B(2, 3, 1) },
366 { NULL, 0x16 },
367 { NULL, 0x17 },
368 { NULL, 0x18 },
369 { NULL, 0x19 },
370 { NULL, 0x1a },
371 { NULL, 0x1b },
372 { NULL, 0x1c },
373 { NULL, 0x1d },
374 { NULL, 0x1e },
375 { NULL, 0x1f },
376 };
377 #undef B
378
379 /* Utilities on value constraint. */
381
382 static inline int
383 value_in_range_p (int64_t value, int low, int high)
384 {
385 return (value >= low && value <= high) ? 1 : 0;
386 }
387
388 static inline int
389 value_aligned_p (int64_t value, int align)
390 {
391 return ((value & (align - 1)) == 0) ? 1 : 0;
392 }
393
394 /* A signed value fits in a field. */
395 static inline int
396 value_fit_signed_field_p (int64_t value, unsigned width)
397 {
398 assert (width < 32);
399 if (width < sizeof (value) * 8)
400 {
401 int64_t lim = (int64_t)1 << (width - 1);
402 if (value >= -lim && value < lim)
403 return 1;
404 }
405 return 0;
406 }
407
408 /* An unsigned value fits in a field. */
409 static inline int
410 value_fit_unsigned_field_p (int64_t value, unsigned width)
411 {
412 assert (width < 32);
413 if (width < sizeof (value) * 8)
414 {
415 int64_t lim = (int64_t)1 << width;
416 if (value >= 0 && value < lim)
417 return 1;
418 }
419 return 0;
420 }
421
422 /* Return 1 if OPERAND is SP or WSP. */
423 int
424 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
425 {
426 return ((aarch64_get_operand_class (operand->type)
427 == AARCH64_OPND_CLASS_INT_REG)
428 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
429 && operand->reg.regno == 31);
430 }
431
432 /* Return 1 if OPERAND is XZR or WZP. */
433 int
434 aarch64_zero_register_p (const aarch64_opnd_info *operand)
435 {
436 return ((aarch64_get_operand_class (operand->type)
437 == AARCH64_OPND_CLASS_INT_REG)
438 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
439 && operand->reg.regno == 31);
440 }
441
442 /* Return true if the operand *OPERAND that has the operand code
443 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
444 qualified by the qualifier TARGET. */
445
446 static inline int
447 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
448 aarch64_opnd_qualifier_t target)
449 {
450 switch (operand->qualifier)
451 {
452 case AARCH64_OPND_QLF_W:
453 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
454 return 1;
455 break;
456 case AARCH64_OPND_QLF_X:
457 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
458 return 1;
459 break;
460 case AARCH64_OPND_QLF_WSP:
461 if (target == AARCH64_OPND_QLF_W
462 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
463 return 1;
464 break;
465 case AARCH64_OPND_QLF_SP:
466 if (target == AARCH64_OPND_QLF_X
467 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
468 return 1;
469 break;
470 default:
471 break;
472 }
473
474 return 0;
475 }
476
477 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
478 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
479
480 Return NIL if more than one expected qualifiers are found. */
481
482 aarch64_opnd_qualifier_t
483 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
484 int idx,
485 const aarch64_opnd_qualifier_t known_qlf,
486 int known_idx)
487 {
488 int i, saved_i;
489
490 /* Special case.
491
492 When the known qualifier is NIL, we have to assume that there is only
493 one qualifier sequence in the *QSEQ_LIST and return the corresponding
494 qualifier directly. One scenario is that for instruction
495 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
496 which has only one possible valid qualifier sequence
497 NIL, S_D
498 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
499 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
500
501 Because the qualifier NIL has dual roles in the qualifier sequence:
502 it can mean no qualifier for the operand, or the qualifer sequence is
503 not in use (when all qualifiers in the sequence are NILs), we have to
504 handle this special case here. */
505 if (known_qlf == AARCH64_OPND_NIL)
506 {
507 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
508 return qseq_list[0][idx];
509 }
510
511 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
512 {
513 if (qseq_list[i][known_idx] == known_qlf)
514 {
515 if (saved_i != -1)
516 /* More than one sequences are found to have KNOWN_QLF at
517 KNOWN_IDX. */
518 return AARCH64_OPND_NIL;
519 saved_i = i;
520 }
521 }
522
523 return qseq_list[saved_i][idx];
524 }
525
526 enum operand_qualifier_kind
527 {
528 OQK_NIL,
529 OQK_OPD_VARIANT,
530 OQK_VALUE_IN_RANGE,
531 OQK_MISC,
532 };
533
534 /* Operand qualifier description. */
535 struct operand_qualifier_data
536 {
537 /* The usage of the three data fields depends on the qualifier kind. */
538 int data0;
539 int data1;
540 int data2;
541 /* Description. */
542 const char *desc;
543 /* Kind. */
544 enum operand_qualifier_kind kind;
545 };
546
547 /* Indexed by the operand qualifier enumerators. */
548 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
549 {
550 {0, 0, 0, "NIL", OQK_NIL},
551
552 /* Operand variant qualifiers.
553 First 3 fields:
554 element size, number of elements and common value for encoding. */
555
556 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
557 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
558 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
559 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
560
561 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
562 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
563 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
564 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
565 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
566
567 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
568 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
569 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
570 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
571 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
572 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
573 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
574 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
575 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
576
577 /* Qualifiers constraining the value range.
578 First 3 fields:
579 Lower bound, higher bound, unused. */
580
581 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
582 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
583 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
584 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
585 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
586 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
587
588 /* Qualifiers for miscellaneous purpose.
589 First 3 fields:
590 unused, unused and unused. */
591
592 {0, 0, 0, "lsl", 0},
593 {0, 0, 0, "msl", 0},
594
595 {0, 0, 0, "retrieving", 0},
596 };
597
598 static inline bfd_boolean
599 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
600 {
601 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
602 ? TRUE : FALSE;
603 }
604
605 static inline bfd_boolean
606 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
607 {
608 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
609 ? TRUE : FALSE;
610 }
611
612 const char*
613 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
614 {
615 return aarch64_opnd_qualifiers[qualifier].desc;
616 }
617
618 /* Given an operand qualifier, return the expected data element size
619 of a qualified operand. */
620 unsigned char
621 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
622 {
623 assert (operand_variant_qualifier_p (qualifier) == TRUE);
624 return aarch64_opnd_qualifiers[qualifier].data0;
625 }
626
627 unsigned char
628 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
629 {
630 assert (operand_variant_qualifier_p (qualifier) == TRUE);
631 return aarch64_opnd_qualifiers[qualifier].data1;
632 }
633
634 aarch64_insn
635 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
636 {
637 assert (operand_variant_qualifier_p (qualifier) == TRUE);
638 return aarch64_opnd_qualifiers[qualifier].data2;
639 }
640
641 static int
642 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
643 {
644 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
645 return aarch64_opnd_qualifiers[qualifier].data0;
646 }
647
648 static int
649 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
650 {
651 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
652 return aarch64_opnd_qualifiers[qualifier].data1;
653 }
654
655 #ifdef DEBUG_AARCH64
656 void
657 aarch64_verbose (const char *str, ...)
658 {
659 va_list ap;
660 va_start (ap, str);
661 printf ("#### ");
662 vprintf (str, ap);
663 printf ("\n");
664 va_end (ap);
665 }
666
667 static inline void
668 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
669 {
670 int i;
671 printf ("#### \t");
672 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
673 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
674 printf ("\n");
675 }
676
677 static void
678 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
679 const aarch64_opnd_qualifier_t *qualifier)
680 {
681 int i;
682 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
683
684 aarch64_verbose ("dump_match_qualifiers:");
685 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
686 curr[i] = opnd[i].qualifier;
687 dump_qualifier_sequence (curr);
688 aarch64_verbose ("against");
689 dump_qualifier_sequence (qualifier);
690 }
691 #endif /* DEBUG_AARCH64 */
692
693 /* TODO improve this, we can have an extra field at the runtime to
694 store the number of operands rather than calculating it every time. */
695
696 int
697 aarch64_num_of_operands (const aarch64_opcode *opcode)
698 {
699 int i = 0;
700 const enum aarch64_opnd *opnds = opcode->operands;
701 while (opnds[i++] != AARCH64_OPND_NIL)
702 ;
703 --i;
704 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
705 return i;
706 }
707
708 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
709 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
710
711 N.B. on the entry, it is very likely that only some operands in *INST
712 have had their qualifiers been established.
713
714 If STOP_AT is not -1, the function will only try to match
715 the qualifier sequence for operands before and including the operand
716 of index STOP_AT; and on success *RET will only be filled with the first
717 (STOP_AT+1) qualifiers.
718
719 A couple examples of the matching algorithm:
720
721 X,W,NIL should match
722 X,W,NIL
723
724 NIL,NIL should match
725 X ,NIL
726
727 Apart from serving the main encoding routine, this can also be called
728 during or after the operand decoding. */
729
730 int
731 aarch64_find_best_match (const aarch64_inst *inst,
732 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
733 int stop_at, aarch64_opnd_qualifier_t *ret)
734 {
735 int found = 0;
736 int i, num_opnds;
737 const aarch64_opnd_qualifier_t *qualifiers;
738
739 num_opnds = aarch64_num_of_operands (inst->opcode);
740 if (num_opnds == 0)
741 {
742 DEBUG_TRACE ("SUCCEED: no operand");
743 return 1;
744 }
745
746 if (stop_at < 0 || stop_at >= num_opnds)
747 stop_at = num_opnds - 1;
748
749 /* For each pattern. */
750 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
751 {
752 int j;
753 qualifiers = *qualifiers_list;
754
755 /* Start as positive. */
756 found = 1;
757
758 DEBUG_TRACE ("%d", i);
759 #ifdef DEBUG_AARCH64
760 if (debug_dump)
761 dump_match_qualifiers (inst->operands, qualifiers);
762 #endif
763
764 /* Most opcodes has much fewer patterns in the list.
765 First NIL qualifier indicates the end in the list. */
766 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
767 {
768 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
769 if (i)
770 found = 0;
771 break;
772 }
773
774 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
775 {
776 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
777 {
778 /* Either the operand does not have qualifier, or the qualifier
779 for the operand needs to be deduced from the qualifier
780 sequence.
781 In the latter case, any constraint checking related with
782 the obtained qualifier should be done later in
783 operand_general_constraint_met_p. */
784 continue;
785 }
786 else if (*qualifiers != inst->operands[j].qualifier)
787 {
788 /* Unless the target qualifier can also qualify the operand
789 (which has already had a non-nil qualifier), non-equal
790 qualifiers are generally un-matched. */
791 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
792 continue;
793 else
794 {
795 found = 0;
796 break;
797 }
798 }
799 else
800 continue; /* Equal qualifiers are certainly matched. */
801 }
802
803 /* Qualifiers established. */
804 if (found == 1)
805 break;
806 }
807
808 if (found == 1)
809 {
810 /* Fill the result in *RET. */
811 int j;
812 qualifiers = *qualifiers_list;
813
814 DEBUG_TRACE ("complete qualifiers using list %d", i);
815 #ifdef DEBUG_AARCH64
816 if (debug_dump)
817 dump_qualifier_sequence (qualifiers);
818 #endif
819
820 for (j = 0; j <= stop_at; ++j, ++qualifiers)
821 ret[j] = *qualifiers;
822 for (; j < AARCH64_MAX_OPND_NUM; ++j)
823 ret[j] = AARCH64_OPND_QLF_NIL;
824
825 DEBUG_TRACE ("SUCCESS");
826 return 1;
827 }
828
829 DEBUG_TRACE ("FAIL");
830 return 0;
831 }
832
833 /* Operand qualifier matching and resolving.
834
835 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
836 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
837
838 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
839 succeeds. */
840
841 static int
842 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
843 {
844 int i;
845 aarch64_opnd_qualifier_seq_t qualifiers;
846
847 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
848 qualifiers))
849 {
850 DEBUG_TRACE ("matching FAIL");
851 return 0;
852 }
853
854 /* Update the qualifiers. */
855 if (update_p == TRUE)
856 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
857 {
858 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
859 break;
860 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
861 "update %s with %s for operand %d",
862 aarch64_get_qualifier_name (inst->operands[i].qualifier),
863 aarch64_get_qualifier_name (qualifiers[i]), i);
864 inst->operands[i].qualifier = qualifiers[i];
865 }
866
867 DEBUG_TRACE ("matching SUCCESS");
868 return 1;
869 }
870
871 /* Return TRUE if VALUE is a wide constant that can be moved into a general
872 register by MOVZ.
873
874 IS32 indicates whether value is a 32-bit immediate or not.
875 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
876 amount will be returned in *SHIFT_AMOUNT. */
877
878 bfd_boolean
879 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
880 {
881 int amount;
882
883 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
884
885 if (is32)
886 {
887 /* Allow all zeros or all ones in top 32-bits, so that
888 32-bit constant expressions like ~0x80000000 are
889 permitted. */
890 uint64_t ext = value;
891 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
892 /* Immediate out of range. */
893 return FALSE;
894 value &= (int64_t) 0xffffffff;
895 }
896
897 /* first, try movz then movn */
898 amount = -1;
899 if ((value & ((int64_t) 0xffff << 0)) == value)
900 amount = 0;
901 else if ((value & ((int64_t) 0xffff << 16)) == value)
902 amount = 16;
903 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
904 amount = 32;
905 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
906 amount = 48;
907
908 if (amount == -1)
909 {
910 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
911 return FALSE;
912 }
913
914 if (shift_amount != NULL)
915 *shift_amount = amount;
916
917 DEBUG_TRACE ("exit TRUE with amount %d", amount);
918
919 return TRUE;
920 }
921
922 /* Build the accepted values for immediate logical SIMD instructions.
923
924 The standard encodings of the immediate value are:
925 N imms immr SIMD size R S
926 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
927 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
928 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
929 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
930 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
931 0 11110s 00000r 2 UInt(r) UInt(s)
932 where all-ones value of S is reserved.
933
934 Let's call E the SIMD size.
935
936 The immediate value is: S+1 bits '1' rotated to the right by R.
937
938 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
939 (remember S != E - 1). */
940
941 #define TOTAL_IMM_NB 5334
942
943 typedef struct
944 {
945 uint64_t imm;
946 aarch64_insn encoding;
947 } simd_imm_encoding;
948
949 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
950
951 static int
952 simd_imm_encoding_cmp(const void *i1, const void *i2)
953 {
954 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
955 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
956
957 if (imm1->imm < imm2->imm)
958 return -1;
959 if (imm1->imm > imm2->imm)
960 return +1;
961 return 0;
962 }
963
964 /* immediate bitfield standard encoding
965 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
966 1 ssssss rrrrrr 64 rrrrrr ssssss
967 0 0sssss 0rrrrr 32 rrrrr sssss
968 0 10ssss 00rrrr 16 rrrr ssss
969 0 110sss 000rrr 8 rrr sss
970 0 1110ss 0000rr 4 rr ss
971 0 11110s 00000r 2 r s */
972 static inline int
973 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
974 {
975 return (is64 << 12) | (r << 6) | s;
976 }
977
978 static void
979 build_immediate_table (void)
980 {
981 uint32_t log_e, e, s, r, s_mask;
982 uint64_t mask, imm;
983 int nb_imms;
984 int is64;
985
986 nb_imms = 0;
987 for (log_e = 1; log_e <= 6; log_e++)
988 {
989 /* Get element size. */
990 e = 1u << log_e;
991 if (log_e == 6)
992 {
993 is64 = 1;
994 mask = 0xffffffffffffffffull;
995 s_mask = 0;
996 }
997 else
998 {
999 is64 = 0;
1000 mask = (1ull << e) - 1;
1001 /* log_e s_mask
1002 1 ((1 << 4) - 1) << 2 = 111100
1003 2 ((1 << 3) - 1) << 3 = 111000
1004 3 ((1 << 2) - 1) << 4 = 110000
1005 4 ((1 << 1) - 1) << 5 = 100000
1006 5 ((1 << 0) - 1) << 6 = 000000 */
1007 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1008 }
1009 for (s = 0; s < e - 1; s++)
1010 for (r = 0; r < e; r++)
1011 {
1012 /* s+1 consecutive bits to 1 (s < 63) */
1013 imm = (1ull << (s + 1)) - 1;
1014 /* rotate right by r */
1015 if (r != 0)
1016 imm = (imm >> r) | ((imm << (e - r)) & mask);
1017 /* replicate the constant depending on SIMD size */
1018 switch (log_e)
1019 {
1020 case 1: imm = (imm << 2) | imm;
1021 case 2: imm = (imm << 4) | imm;
1022 case 3: imm = (imm << 8) | imm;
1023 case 4: imm = (imm << 16) | imm;
1024 case 5: imm = (imm << 32) | imm;
1025 case 6: break;
1026 default: abort ();
1027 }
1028 simd_immediates[nb_imms].imm = imm;
1029 simd_immediates[nb_imms].encoding =
1030 encode_immediate_bitfield(is64, s | s_mask, r);
1031 nb_imms++;
1032 }
1033 }
1034 assert (nb_imms == TOTAL_IMM_NB);
1035 qsort(simd_immediates, nb_imms,
1036 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1037 }
1038
1039 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1040 be accepted by logical (immediate) instructions
1041 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1042
1043 IS32 indicates whether or not VALUE is a 32-bit immediate.
1044 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1045 VALUE will be returned in *ENCODING. */
1046
1047 bfd_boolean
1048 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1049 {
1050 simd_imm_encoding imm_enc;
1051 const simd_imm_encoding *imm_encoding;
1052 static bfd_boolean initialized = FALSE;
1053
1054 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1055 value, is32);
1056
1057 if (initialized == FALSE)
1058 {
1059 build_immediate_table ();
1060 initialized = TRUE;
1061 }
1062
1063 if (is32)
1064 {
1065 /* Allow all zeros or all ones in top 32-bits, so that
1066 constant expressions like ~1 are permitted. */
1067 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1068 return 0xffffffff;
1069 /* Replicate the 32 lower bits to the 32 upper bits. */
1070 value &= 0xffffffff;
1071 value |= value << 32;
1072 }
1073
1074 imm_enc.imm = value;
1075 imm_encoding = (const simd_imm_encoding *)
1076 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1077 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1078 if (imm_encoding == NULL)
1079 {
1080 DEBUG_TRACE ("exit with FALSE");
1081 return FALSE;
1082 }
1083 if (encoding != NULL)
1084 *encoding = imm_encoding->encoding;
1085 DEBUG_TRACE ("exit with TRUE");
1086 return TRUE;
1087 }
1088
1089 /* If 64-bit immediate IMM is in the format of
1090 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1091 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1092 of value "abcdefgh". Otherwise return -1. */
1093 int
1094 aarch64_shrink_expanded_imm8 (uint64_t imm)
1095 {
1096 int i, ret;
1097 uint32_t byte;
1098
1099 ret = 0;
1100 for (i = 0; i < 8; i++)
1101 {
1102 byte = (imm >> (8 * i)) & 0xff;
1103 if (byte == 0xff)
1104 ret |= 1 << i;
1105 else if (byte != 0x00)
1106 return -1;
1107 }
1108 return ret;
1109 }
1110
1111 /* Utility inline functions for operand_general_constraint_met_p. */
1112
1113 static inline void
1114 set_error (aarch64_operand_error *mismatch_detail,
1115 enum aarch64_operand_error_kind kind, int idx,
1116 const char* error)
1117 {
1118 if (mismatch_detail == NULL)
1119 return;
1120 mismatch_detail->kind = kind;
1121 mismatch_detail->index = idx;
1122 mismatch_detail->error = error;
1123 }
1124
1125 static inline void
1126 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1127 int idx, int lower_bound, int upper_bound,
1128 const char* error)
1129 {
1130 if (mismatch_detail == NULL)
1131 return;
1132 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1133 mismatch_detail->data[0] = lower_bound;
1134 mismatch_detail->data[1] = upper_bound;
1135 }
1136
1137 static inline void
1138 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1139 int idx, int lower_bound, int upper_bound)
1140 {
1141 if (mismatch_detail == NULL)
1142 return;
1143 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1144 _("immediate value"));
1145 }
1146
1147 static inline void
1148 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1149 int idx, int lower_bound, int upper_bound)
1150 {
1151 if (mismatch_detail == NULL)
1152 return;
1153 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1154 _("immediate offset"));
1155 }
1156
1157 static inline void
1158 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1159 int idx, int lower_bound, int upper_bound)
1160 {
1161 if (mismatch_detail == NULL)
1162 return;
1163 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1164 _("register number"));
1165 }
1166
1167 static inline void
1168 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1169 int idx, int lower_bound, int upper_bound)
1170 {
1171 if (mismatch_detail == NULL)
1172 return;
1173 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1174 _("register element index"));
1175 }
1176
1177 static inline void
1178 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1179 int idx, int lower_bound, int upper_bound)
1180 {
1181 if (mismatch_detail == NULL)
1182 return;
1183 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1184 _("shift amount"));
1185 }
1186
1187 static inline void
1188 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1189 int alignment)
1190 {
1191 if (mismatch_detail == NULL)
1192 return;
1193 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1194 mismatch_detail->data[0] = alignment;
1195 }
1196
1197 static inline void
1198 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1199 int expected_num)
1200 {
1201 if (mismatch_detail == NULL)
1202 return;
1203 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1204 mismatch_detail->data[0] = expected_num;
1205 }
1206
1207 static inline void
1208 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1209 const char* error)
1210 {
1211 if (mismatch_detail == NULL)
1212 return;
1213 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1214 }
1215
1216 /* General constraint checking based on operand code.
1217
1218 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1219 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1220
1221 This function has to be called after the qualifiers for all operands
1222 have been resolved.
1223
1224 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1225 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1226 of error message during the disassembling where error message is not
1227 wanted. We avoid the dynamic construction of strings of error messages
1228 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1229 use a combination of error code, static string and some integer data to
1230 represent an error. */
1231
1232 static int
1233 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1234 enum aarch64_opnd type,
1235 const aarch64_opcode *opcode,
1236 aarch64_operand_error *mismatch_detail)
1237 {
1238 unsigned num;
1239 unsigned char size;
1240 int64_t imm;
1241 const aarch64_opnd_info *opnd = opnds + idx;
1242 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1243
1244 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1245
1246 switch (aarch64_operands[type].op_class)
1247 {
1248 case AARCH64_OPND_CLASS_INT_REG:
1249 /* <Xt> may be optional in some IC and TLBI instructions. */
1250 if (type == AARCH64_OPND_Rt_SYS)
1251 {
1252 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1253 == AARCH64_OPND_CLASS_SYSTEM));
1254 if (opnds[1].present && !opnds[0].sysins_op->has_xt)
1255 {
1256 set_other_error (mismatch_detail, idx, _("extraneous register"));
1257 return 0;
1258 }
1259 if (!opnds[1].present && opnds[0].sysins_op->has_xt)
1260 {
1261 set_other_error (mismatch_detail, idx, _("missing register"));
1262 return 0;
1263 }
1264 }
1265 switch (qualifier)
1266 {
1267 case AARCH64_OPND_QLF_WSP:
1268 case AARCH64_OPND_QLF_SP:
1269 if (!aarch64_stack_pointer_p (opnd))
1270 {
1271 set_other_error (mismatch_detail, idx,
1272 _("stack pointer register expected"));
1273 return 0;
1274 }
1275 break;
1276 default:
1277 break;
1278 }
1279 break;
1280
1281 case AARCH64_OPND_CLASS_ADDRESS:
1282 /* Check writeback. */
1283 switch (opcode->iclass)
1284 {
1285 case ldst_pos:
1286 case ldst_unscaled:
1287 case ldstnapair_offs:
1288 case ldstpair_off:
1289 case ldst_unpriv:
1290 if (opnd->addr.writeback == 1)
1291 {
1292 set_other_error (mismatch_detail, idx,
1293 _("unexpected address writeback"));
1294 return 0;
1295 }
1296 break;
1297 case ldst_imm9:
1298 case ldstpair_indexed:
1299 case asisdlsep:
1300 case asisdlsop:
1301 if (opnd->addr.writeback == 0)
1302 {
1303 set_other_error (mismatch_detail, idx,
1304 _("address writeback expected"));
1305 return 0;
1306 }
1307 break;
1308 default:
1309 assert (opnd->addr.writeback == 0);
1310 break;
1311 }
1312 switch (type)
1313 {
1314 case AARCH64_OPND_ADDR_SIMM7:
1315 /* Scaled signed 7 bits immediate offset. */
1316 /* Get the size of the data element that is accessed, which may be
1317 different from that of the source register size,
1318 e.g. in strb/ldrb. */
1319 size = aarch64_get_qualifier_esize (opnd->qualifier);
1320 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1321 {
1322 set_offset_out_of_range_error (mismatch_detail, idx,
1323 -64 * size, 63 * size);
1324 return 0;
1325 }
1326 if (!value_aligned_p (opnd->addr.offset.imm, size))
1327 {
1328 set_unaligned_error (mismatch_detail, idx, size);
1329 return 0;
1330 }
1331 break;
1332 case AARCH64_OPND_ADDR_SIMM9:
1333 /* Unscaled signed 9 bits immediate offset. */
1334 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1335 {
1336 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1337 return 0;
1338 }
1339 break;
1340
1341 case AARCH64_OPND_ADDR_SIMM9_2:
1342 /* Unscaled signed 9 bits immediate offset, which has to be negative
1343 or unaligned. */
1344 size = aarch64_get_qualifier_esize (qualifier);
1345 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1346 && !value_aligned_p (opnd->addr.offset.imm, size))
1347 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1348 return 1;
1349 set_other_error (mismatch_detail, idx,
1350 _("negative or unaligned offset expected"));
1351 return 0;
1352
1353 case AARCH64_OPND_SIMD_ADDR_POST:
1354 /* AdvSIMD load/store multiple structures, post-index. */
1355 assert (idx == 1);
1356 if (opnd->addr.offset.is_reg)
1357 {
1358 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1359 return 1;
1360 else
1361 {
1362 set_other_error (mismatch_detail, idx,
1363 _("invalid register offset"));
1364 return 0;
1365 }
1366 }
1367 else
1368 {
1369 const aarch64_opnd_info *prev = &opnds[idx-1];
1370 unsigned num_bytes; /* total number of bytes transferred. */
1371 /* The opcode dependent area stores the number of elements in
1372 each structure to be loaded/stored. */
1373 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1374 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1375 /* Special handling of loading single structure to all lane. */
1376 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1377 * aarch64_get_qualifier_esize (prev->qualifier);
1378 else
1379 num_bytes = prev->reglist.num_regs
1380 * aarch64_get_qualifier_esize (prev->qualifier)
1381 * aarch64_get_qualifier_nelem (prev->qualifier);
1382 if ((int) num_bytes != opnd->addr.offset.imm)
1383 {
1384 set_other_error (mismatch_detail, idx,
1385 _("invalid post-increment amount"));
1386 return 0;
1387 }
1388 }
1389 break;
1390
1391 case AARCH64_OPND_ADDR_REGOFF:
1392 /* Get the size of the data element that is accessed, which may be
1393 different from that of the source register size,
1394 e.g. in strb/ldrb. */
1395 size = aarch64_get_qualifier_esize (opnd->qualifier);
1396 /* It is either no shift or shift by the binary logarithm of SIZE. */
1397 if (opnd->shifter.amount != 0
1398 && opnd->shifter.amount != (int)get_logsz (size))
1399 {
1400 set_other_error (mismatch_detail, idx,
1401 _("invalid shift amount"));
1402 return 0;
1403 }
1404 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1405 operators. */
1406 switch (opnd->shifter.kind)
1407 {
1408 case AARCH64_MOD_UXTW:
1409 case AARCH64_MOD_LSL:
1410 case AARCH64_MOD_SXTW:
1411 case AARCH64_MOD_SXTX: break;
1412 default:
1413 set_other_error (mismatch_detail, idx,
1414 _("invalid extend/shift operator"));
1415 return 0;
1416 }
1417 break;
1418
1419 case AARCH64_OPND_ADDR_UIMM12:
1420 imm = opnd->addr.offset.imm;
1421 /* Get the size of the data element that is accessed, which may be
1422 different from that of the source register size,
1423 e.g. in strb/ldrb. */
1424 size = aarch64_get_qualifier_esize (qualifier);
1425 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1426 {
1427 set_offset_out_of_range_error (mismatch_detail, idx,
1428 0, 4095 * size);
1429 return 0;
1430 }
1431 if (!value_aligned_p (opnd->addr.offset.imm, size))
1432 {
1433 set_unaligned_error (mismatch_detail, idx, size);
1434 return 0;
1435 }
1436 break;
1437
1438 case AARCH64_OPND_ADDR_PCREL14:
1439 case AARCH64_OPND_ADDR_PCREL19:
1440 case AARCH64_OPND_ADDR_PCREL21:
1441 case AARCH64_OPND_ADDR_PCREL26:
1442 imm = opnd->imm.value;
1443 if (operand_need_shift_by_two (get_operand_from_code (type)))
1444 {
1445 /* The offset value in a PC-relative branch instruction is alway
1446 4-byte aligned and is encoded without the lowest 2 bits. */
1447 if (!value_aligned_p (imm, 4))
1448 {
1449 set_unaligned_error (mismatch_detail, idx, 4);
1450 return 0;
1451 }
1452 /* Right shift by 2 so that we can carry out the following check
1453 canonically. */
1454 imm >>= 2;
1455 }
1456 size = get_operand_fields_width (get_operand_from_code (type));
1457 if (!value_fit_signed_field_p (imm, size))
1458 {
1459 set_other_error (mismatch_detail, idx,
1460 _("immediate out of range"));
1461 return 0;
1462 }
1463 break;
1464
1465 default:
1466 break;
1467 }
1468 break;
1469
1470 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1471 /* The opcode dependent area stores the number of elements in
1472 each structure to be loaded/stored. */
1473 num = get_opcode_dependent_value (opcode);
1474 switch (type)
1475 {
1476 case AARCH64_OPND_LVt:
1477 assert (num >= 1 && num <= 4);
1478 /* Unless LD1/ST1, the number of registers should be equal to that
1479 of the structure elements. */
1480 if (num != 1 && opnd->reglist.num_regs != num)
1481 {
1482 set_reg_list_error (mismatch_detail, idx, num);
1483 return 0;
1484 }
1485 break;
1486 case AARCH64_OPND_LVt_AL:
1487 case AARCH64_OPND_LEt:
1488 assert (num >= 1 && num <= 4);
1489 /* The number of registers should be equal to that of the structure
1490 elements. */
1491 if (opnd->reglist.num_regs != num)
1492 {
1493 set_reg_list_error (mismatch_detail, idx, num);
1494 return 0;
1495 }
1496 break;
1497 default:
1498 break;
1499 }
1500 break;
1501
1502 case AARCH64_OPND_CLASS_IMMEDIATE:
1503 /* Constraint check on immediate operand. */
1504 imm = opnd->imm.value;
1505 /* E.g. imm_0_31 constrains value to be 0..31. */
1506 if (qualifier_value_in_range_constraint_p (qualifier)
1507 && !value_in_range_p (imm, get_lower_bound (qualifier),
1508 get_upper_bound (qualifier)))
1509 {
1510 set_imm_out_of_range_error (mismatch_detail, idx,
1511 get_lower_bound (qualifier),
1512 get_upper_bound (qualifier));
1513 return 0;
1514 }
1515
1516 switch (type)
1517 {
1518 case AARCH64_OPND_AIMM:
1519 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1520 {
1521 set_other_error (mismatch_detail, idx,
1522 _("invalid shift operator"));
1523 return 0;
1524 }
1525 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1526 {
1527 set_other_error (mismatch_detail, idx,
1528 _("shift amount expected to be 0 or 12"));
1529 return 0;
1530 }
1531 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1532 {
1533 set_other_error (mismatch_detail, idx,
1534 _("immediate out of range"));
1535 return 0;
1536 }
1537 break;
1538
1539 case AARCH64_OPND_HALF:
1540 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1541 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1542 {
1543 set_other_error (mismatch_detail, idx,
1544 _("invalid shift operator"));
1545 return 0;
1546 }
1547 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1548 if (!value_aligned_p (opnd->shifter.amount, 16))
1549 {
1550 set_other_error (mismatch_detail, idx,
1551 _("shift amount should be a multiple of 16"));
1552 return 0;
1553 }
1554 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1555 {
1556 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1557 0, size * 8 - 16);
1558 return 0;
1559 }
1560 if (opnd->imm.value < 0)
1561 {
1562 set_other_error (mismatch_detail, idx,
1563 _("negative immediate value not allowed"));
1564 return 0;
1565 }
1566 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1567 {
1568 set_other_error (mismatch_detail, idx,
1569 _("immediate out of range"));
1570 return 0;
1571 }
1572 break;
1573
1574 case AARCH64_OPND_IMM_MOV:
1575 {
1576 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1577 imm = opnd->imm.value;
1578 assert (idx == 1);
1579 switch (opcode->op)
1580 {
1581 case OP_MOV_IMM_WIDEN:
1582 imm = ~imm;
1583 /* Fall through... */
1584 case OP_MOV_IMM_WIDE:
1585 if (!aarch64_wide_constant_p (imm, is32, NULL))
1586 {
1587 set_other_error (mismatch_detail, idx,
1588 _("immediate out of range"));
1589 return 0;
1590 }
1591 break;
1592 case OP_MOV_IMM_LOG:
1593 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1594 {
1595 set_other_error (mismatch_detail, idx,
1596 _("immediate out of range"));
1597 return 0;
1598 }
1599 break;
1600 default:
1601 assert (0);
1602 return 0;
1603 }
1604 }
1605 break;
1606
1607 case AARCH64_OPND_NZCV:
1608 case AARCH64_OPND_CCMP_IMM:
1609 case AARCH64_OPND_EXCEPTION:
1610 case AARCH64_OPND_UIMM4:
1611 case AARCH64_OPND_UIMM7:
1612 case AARCH64_OPND_UIMM3_OP1:
1613 case AARCH64_OPND_UIMM3_OP2:
1614 size = get_operand_fields_width (get_operand_from_code (type));
1615 assert (size < 32);
1616 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1617 {
1618 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1619 (1 << size) - 1);
1620 return 0;
1621 }
1622 break;
1623
1624 case AARCH64_OPND_WIDTH:
1625 assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM
1626 && opnds[0].type == AARCH64_OPND_Rd);
1627 size = get_upper_bound (qualifier);
1628 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1629 /* lsb+width <= reg.size */
1630 {
1631 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1632 size - opnds[idx-1].imm.value);
1633 return 0;
1634 }
1635 break;
1636
1637 case AARCH64_OPND_LIMM:
1638 {
1639 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1640 uint64_t uimm = opnd->imm.value;
1641 if (opcode->op == OP_BIC)
1642 uimm = ~uimm;
1643 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1644 {
1645 set_other_error (mismatch_detail, idx,
1646 _("immediate out of range"));
1647 return 0;
1648 }
1649 }
1650 break;
1651
1652 case AARCH64_OPND_IMM0:
1653 case AARCH64_OPND_FPIMM0:
1654 if (opnd->imm.value != 0)
1655 {
1656 set_other_error (mismatch_detail, idx,
1657 _("immediate zero expected"));
1658 return 0;
1659 }
1660 break;
1661
1662 case AARCH64_OPND_SHLL_IMM:
1663 assert (idx == 2);
1664 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1665 if (opnd->imm.value != size)
1666 {
1667 set_other_error (mismatch_detail, idx,
1668 _("invalid shift amount"));
1669 return 0;
1670 }
1671 break;
1672
1673 case AARCH64_OPND_IMM_VLSL:
1674 size = aarch64_get_qualifier_esize (qualifier);
1675 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1676 {
1677 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1678 size * 8 - 1);
1679 return 0;
1680 }
1681 break;
1682
1683 case AARCH64_OPND_IMM_VLSR:
1684 size = aarch64_get_qualifier_esize (qualifier);
1685 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1686 {
1687 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1688 return 0;
1689 }
1690 break;
1691
1692 case AARCH64_OPND_SIMD_IMM:
1693 case AARCH64_OPND_SIMD_IMM_SFT:
1694 /* Qualifier check. */
1695 switch (qualifier)
1696 {
1697 case AARCH64_OPND_QLF_LSL:
1698 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1699 {
1700 set_other_error (mismatch_detail, idx,
1701 _("invalid shift operator"));
1702 return 0;
1703 }
1704 break;
1705 case AARCH64_OPND_QLF_MSL:
1706 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1707 {
1708 set_other_error (mismatch_detail, idx,
1709 _("invalid shift operator"));
1710 return 0;
1711 }
1712 break;
1713 case AARCH64_OPND_QLF_NIL:
1714 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1715 {
1716 set_other_error (mismatch_detail, idx,
1717 _("shift is not permitted"));
1718 return 0;
1719 }
1720 break;
1721 default:
1722 assert (0);
1723 return 0;
1724 }
1725 /* Is the immediate valid? */
1726 assert (idx == 1);
1727 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1728 {
1729 /* uimm8 */
1730 if (!value_in_range_p (opnd->imm.value, 0, 255))
1731 {
1732 set_imm_out_of_range_error (mismatch_detail, idx, 0, 255);
1733 return 0;
1734 }
1735 }
1736 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1737 {
1738 /* uimm64 is not
1739 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1740 ffffffffgggggggghhhhhhhh'. */
1741 set_other_error (mismatch_detail, idx,
1742 _("invalid value for immediate"));
1743 return 0;
1744 }
1745 /* Is the shift amount valid? */
1746 switch (opnd->shifter.kind)
1747 {
1748 case AARCH64_MOD_LSL:
1749 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1750 if (!value_aligned_p (opnd->shifter.amount, 8))
1751 {
1752 set_unaligned_error (mismatch_detail, idx, 8);
1753 return 0;
1754 }
1755 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1756 {
1757 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1758 (size - 1) * 8);
1759 return 0;
1760 }
1761 break;
1762 case AARCH64_MOD_MSL:
1763 /* Only 8 and 16 are valid shift amount. */
1764 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1765 {
1766 set_other_error (mismatch_detail, idx,
1767 _("shift amount expected to be 0 or 16"));
1768 return 0;
1769 }
1770 break;
1771 default:
1772 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1773 {
1774 set_other_error (mismatch_detail, idx,
1775 _("invalid shift operator"));
1776 return 0;
1777 }
1778 break;
1779 }
1780 break;
1781
1782 case AARCH64_OPND_FPIMM:
1783 case AARCH64_OPND_SIMD_FPIMM:
1784 if (opnd->imm.is_fp == 0)
1785 {
1786 set_other_error (mismatch_detail, idx,
1787 _("floating-point immediate expected"));
1788 return 0;
1789 }
1790 /* The value is expected to be an 8-bit floating-point constant with
1791 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1792 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1793 instruction). */
1794 if (!value_in_range_p (opnd->imm.value, 0, 255))
1795 {
1796 set_other_error (mismatch_detail, idx,
1797 _("immediate out of range"));
1798 return 0;
1799 }
1800 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1801 {
1802 set_other_error (mismatch_detail, idx,
1803 _("invalid shift operator"));
1804 return 0;
1805 }
1806 break;
1807
1808 default:
1809 break;
1810 }
1811 break;
1812
1813 case AARCH64_OPND_CLASS_CP_REG:
1814 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1815 valid range: C0 - C15. */
1816 if (opnd->reg.regno > 15)
1817 {
1818 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1819 return 0;
1820 }
1821 break;
1822
1823 case AARCH64_OPND_CLASS_SYSTEM:
1824 switch (type)
1825 {
1826 case AARCH64_OPND_PSTATEFIELD:
1827 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1828 /* MSR SPSel, #uimm4
1829 Uses uimm4 as a control value to select the stack pointer: if
1830 bit 0 is set it selects the current exception level's stack
1831 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1832 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1833 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1834 {
1835 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1836 return 0;
1837 }
1838 break;
1839 default:
1840 break;
1841 }
1842 break;
1843
1844 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1845 /* Get the upper bound for the element index. */
1846 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1847 /* Index out-of-range. */
1848 if (!value_in_range_p (opnd->reglane.index, 0, num))
1849 {
1850 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1851 return 0;
1852 }
1853 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1854 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1855 number is encoded in "size:M:Rm":
1856 size <Vm>
1857 00 RESERVED
1858 01 0:Rm
1859 10 M:Rm
1860 11 RESERVED */
1861 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1862 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1863 {
1864 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1865 return 0;
1866 }
1867 break;
1868
1869 case AARCH64_OPND_CLASS_MODIFIED_REG:
1870 assert (idx == 1 || idx == 2);
1871 switch (type)
1872 {
1873 case AARCH64_OPND_Rm_EXT:
1874 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1875 && opnd->shifter.kind != AARCH64_MOD_LSL)
1876 {
1877 set_other_error (mismatch_detail, idx,
1878 _("extend operator expected"));
1879 return 0;
1880 }
1881 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1882 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1883 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1884 case. */
1885 if (!aarch64_stack_pointer_p (opnds + 0)
1886 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1887 {
1888 if (!opnd->shifter.operator_present)
1889 {
1890 set_other_error (mismatch_detail, idx,
1891 _("missing extend operator"));
1892 return 0;
1893 }
1894 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1895 {
1896 set_other_error (mismatch_detail, idx,
1897 _("'LSL' operator not allowed"));
1898 return 0;
1899 }
1900 }
1901 assert (opnd->shifter.operator_present /* Default to LSL. */
1902 || opnd->shifter.kind == AARCH64_MOD_LSL);
1903 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1904 {
1905 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1906 return 0;
1907 }
1908 /* In the 64-bit form, the final register operand is written as Wm
1909 for all but the (possibly omitted) UXTX/LSL and SXTX
1910 operators.
1911 N.B. GAS allows X register to be used with any operator as a
1912 programming convenience. */
1913 if (qualifier == AARCH64_OPND_QLF_X
1914 && opnd->shifter.kind != AARCH64_MOD_LSL
1915 && opnd->shifter.kind != AARCH64_MOD_UXTX
1916 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1917 {
1918 set_other_error (mismatch_detail, idx, _("W register expected"));
1919 return 0;
1920 }
1921 break;
1922
1923 case AARCH64_OPND_Rm_SFT:
1924 /* ROR is not available to the shifted register operand in
1925 arithmetic instructions. */
1926 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1927 {
1928 set_other_error (mismatch_detail, idx,
1929 _("shift operator expected"));
1930 return 0;
1931 }
1932 if (opnd->shifter.kind == AARCH64_MOD_ROR
1933 && opcode->iclass != log_shift)
1934 {
1935 set_other_error (mismatch_detail, idx,
1936 _("'ROR' operator not allowed"));
1937 return 0;
1938 }
1939 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1940 if (!value_in_range_p (opnd->shifter.amount, 0, num))
1941 {
1942 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1943 return 0;
1944 }
1945 break;
1946
1947 default:
1948 break;
1949 }
1950 break;
1951
1952 default:
1953 break;
1954 }
1955
1956 return 1;
1957 }
1958
1959 /* Main entrypoint for the operand constraint checking.
1960
1961 Return 1 if operands of *INST meet the constraint applied by the operand
1962 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
1963 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
1964 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
1965 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
1966 error kind when it is notified that an instruction does not pass the check).
1967
1968 Un-determined operand qualifiers may get established during the process. */
1969
1970 int
1971 aarch64_match_operands_constraint (aarch64_inst *inst,
1972 aarch64_operand_error *mismatch_detail)
1973 {
1974 int i;
1975
1976 DEBUG_TRACE ("enter");
1977
1978 /* Match operands' qualifier.
1979 *INST has already had qualifier establish for some, if not all, of
1980 its operands; we need to find out whether these established
1981 qualifiers match one of the qualifier sequence in
1982 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
1983 with the corresponding qualifier in such a sequence.
1984 Only basic operand constraint checking is done here; the more thorough
1985 constraint checking will carried out by operand_general_constraint_met_p,
1986 which has be to called after this in order to get all of the operands'
1987 qualifiers established. */
1988 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
1989 {
1990 DEBUG_TRACE ("FAIL on operand qualifier matching");
1991 if (mismatch_detail)
1992 {
1993 /* Return an error type to indicate that it is the qualifier
1994 matching failure; we don't care about which operand as there
1995 are enough information in the opcode table to reproduce it. */
1996 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
1997 mismatch_detail->index = -1;
1998 mismatch_detail->error = NULL;
1999 }
2000 return 0;
2001 }
2002
2003 /* Match operands' constraint. */
2004 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2005 {
2006 enum aarch64_opnd type = inst->opcode->operands[i];
2007 if (type == AARCH64_OPND_NIL)
2008 break;
2009 if (inst->operands[i].skip)
2010 {
2011 DEBUG_TRACE ("skip the incomplete operand %d", i);
2012 continue;
2013 }
2014 if (operand_general_constraint_met_p (inst->operands, i, type,
2015 inst->opcode, mismatch_detail) == 0)
2016 {
2017 DEBUG_TRACE ("FAIL on operand %d", i);
2018 return 0;
2019 }
2020 }
2021
2022 DEBUG_TRACE ("PASS");
2023
2024 return 1;
2025 }
2026
2027 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2028 Also updates the TYPE of each INST->OPERANDS with the corresponding
2029 value of OPCODE->OPERANDS.
2030
2031 Note that some operand qualifiers may need to be manually cleared by
2032 the caller before it further calls the aarch64_opcode_encode; by
2033 doing this, it helps the qualifier matching facilities work
2034 properly. */
2035
2036 const aarch64_opcode*
2037 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2038 {
2039 int i;
2040 const aarch64_opcode *old = inst->opcode;
2041
2042 inst->opcode = opcode;
2043
2044 /* Update the operand types. */
2045 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2046 {
2047 inst->operands[i].type = opcode->operands[i];
2048 if (opcode->operands[i] == AARCH64_OPND_NIL)
2049 break;
2050 }
2051
2052 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2053
2054 return old;
2055 }
2056
2057 int
2058 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2059 {
2060 int i;
2061 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2062 if (operands[i] == operand)
2063 return i;
2064 else if (operands[i] == AARCH64_OPND_NIL)
2065 break;
2066 return -1;
2067 }
2068
2069 /* [0][0] 32-bit integer regs with sp Wn
2071 [0][1] 64-bit integer regs with sp Xn sf=1
2072 [1][0] 32-bit integer regs with #0 Wn
2073 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2074 static const char *int_reg[2][2][32] = {
2075 #define R32 "w"
2076 #define R64 "x"
2077 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2078 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2079 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2080 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2081 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2082 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2083 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2084 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2085 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2086 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2087 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2088 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2089 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2090 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2091 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2092 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2093 #undef R64
2094 #undef R32
2095 };
2096
2097 /* Return the integer register name.
2098 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2099
2100 static inline const char *
2101 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2102 {
2103 const int has_zr = sp_reg_p ? 0 : 1;
2104 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2105 return int_reg[has_zr][is_64][regno];
2106 }
2107
2108 /* Like get_int_reg_name, but IS_64 is always 1. */
2109
2110 static inline const char *
2111 get_64bit_int_reg_name (int regno, int sp_reg_p)
2112 {
2113 const int has_zr = sp_reg_p ? 0 : 1;
2114 return int_reg[has_zr][1][regno];
2115 }
2116
2117 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2118
2119 typedef union
2120 {
2121 uint64_t i;
2122 double d;
2123 } double_conv_t;
2124
2125 typedef union
2126 {
2127 uint32_t i;
2128 float f;
2129 } single_conv_t;
2130
2131 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2132 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2133 (depending on the type of the instruction). IMM8 will be expanded to a
2134 single-precision floating-point value (IS_DP == 0) or a double-precision
2135 floating-point value (IS_DP == 1). The expanded value is returned. */
2136
2137 static uint64_t
2138 expand_fp_imm (int is_dp, uint32_t imm8)
2139 {
2140 uint64_t imm;
2141 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2142
2143 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2144 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2145 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2146 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2147 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2148 if (is_dp)
2149 {
2150 imm = (imm8_7 << (63-32)) /* imm8<7> */
2151 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2152 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2153 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2154 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2155 imm <<= 32;
2156 }
2157 else
2158 {
2159 imm = (imm8_7 << 31) /* imm8<7> */
2160 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2161 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2162 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2163 }
2164
2165 return imm;
2166 }
2167
2168 /* Produce the string representation of the register list operand *OPND
2169 in the buffer pointed by BUF of size SIZE. */
2170 static void
2171 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2172 {
2173 const int num_regs = opnd->reglist.num_regs;
2174 const int first_reg = opnd->reglist.first_regno;
2175 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2176 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2177 char tb[8]; /* Temporary buffer. */
2178
2179 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2180 assert (num_regs >= 1 && num_regs <= 4);
2181
2182 /* Prepare the index if any. */
2183 if (opnd->reglist.has_index)
2184 snprintf (tb, 8, "[%d]", opnd->reglist.index);
2185 else
2186 tb[0] = '\0';
2187
2188 /* The hyphenated form is preferred for disassembly if there are
2189 more than two registers in the list, and the register numbers
2190 are monotonically increasing in increments of one. */
2191 if (num_regs > 2 && last_reg > first_reg)
2192 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2193 last_reg, qlf_name, tb);
2194 else
2195 {
2196 const int reg0 = first_reg;
2197 const int reg1 = (first_reg + 1) & 0x1f;
2198 const int reg2 = (first_reg + 2) & 0x1f;
2199 const int reg3 = (first_reg + 3) & 0x1f;
2200
2201 switch (num_regs)
2202 {
2203 case 1:
2204 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2205 break;
2206 case 2:
2207 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2208 reg1, qlf_name, tb);
2209 break;
2210 case 3:
2211 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2212 reg1, qlf_name, reg2, qlf_name, tb);
2213 break;
2214 case 4:
2215 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2216 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2217 reg3, qlf_name, tb);
2218 break;
2219 }
2220 }
2221 }
2222
2223 /* Produce the string representation of the register offset address operand
2224 *OPND in the buffer pointed by BUF of size SIZE. */
2225 static void
2226 print_register_offset_address (char *buf, size_t size,
2227 const aarch64_opnd_info *opnd)
2228 {
2229 const size_t tblen = 16;
2230 char tb[tblen]; /* Temporary buffer. */
2231 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2232 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2233 bfd_boolean print_extend_p = TRUE;
2234 bfd_boolean print_amount_p = TRUE;
2235 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2236
2237 switch (opnd->shifter.kind)
2238 {
2239 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2240 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2241 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2242 case AARCH64_MOD_SXTX: break;
2243 default: assert (0);
2244 }
2245
2246 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2247 || !opnd->shifter.amount_present))
2248 {
2249 /* Not print the shift/extend amount when the amount is zero and
2250 when it is not the special case of 8-bit load/store instruction. */
2251 print_amount_p = FALSE;
2252 /* Likewise, no need to print the shift operator LSL in such a
2253 situation. */
2254 if (lsl_p)
2255 print_extend_p = FALSE;
2256 }
2257
2258 /* Prepare for the extend/shift. */
2259 if (print_extend_p)
2260 {
2261 if (print_amount_p)
2262 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2263 else
2264 snprintf (tb, tblen, ",%s", shift_name);
2265 }
2266 else
2267 tb[0] = '\0';
2268
2269 snprintf (buf, size, "[%s,%c%d%s]",
2270 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2271 wm_p ? 'w' : 'x', opnd->addr.offset.regno, tb);
2272 }
2273
2274 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2275 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2276 PC, PCREL_P and ADDRESS are used to pass in and return information about
2277 the PC-relative address calculation, where the PC value is passed in
2278 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2279 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2280 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2281
2282 The function serves both the disassembler and the assembler diagnostics
2283 issuer, which is the reason why it lives in this file. */
2284
2285 void
2286 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2287 const aarch64_opcode *opcode,
2288 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2289 bfd_vma *address)
2290 {
2291 int i;
2292 const char *name = NULL;
2293 const aarch64_opnd_info *opnd = opnds + idx;
2294 enum aarch64_modifier_kind kind;
2295 uint64_t addr;
2296
2297 buf[0] = '\0';
2298 if (pcrel_p)
2299 *pcrel_p = 0;
2300
2301 switch (opnd->type)
2302 {
2303 case AARCH64_OPND_Rd:
2304 case AARCH64_OPND_Rn:
2305 case AARCH64_OPND_Rm:
2306 case AARCH64_OPND_Rt:
2307 case AARCH64_OPND_Rt2:
2308 case AARCH64_OPND_Rs:
2309 case AARCH64_OPND_Ra:
2310 case AARCH64_OPND_Rt_SYS:
2311 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2312 the <ic_op>, therefore we we use opnd->present to override the
2313 generic optional-ness information. */
2314 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2315 break;
2316 /* Omit the operand, e.g. RET. */
2317 if (optional_operand_p (opcode, idx)
2318 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2319 break;
2320 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2321 || opnd->qualifier == AARCH64_OPND_QLF_X);
2322 snprintf (buf, size, "%s",
2323 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2324 break;
2325
2326 case AARCH64_OPND_Rd_SP:
2327 case AARCH64_OPND_Rn_SP:
2328 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2329 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2330 || opnd->qualifier == AARCH64_OPND_QLF_X
2331 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2332 snprintf (buf, size, "%s",
2333 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2334 break;
2335
2336 case AARCH64_OPND_Rm_EXT:
2337 kind = opnd->shifter.kind;
2338 assert (idx == 1 || idx == 2);
2339 if ((aarch64_stack_pointer_p (opnds)
2340 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2341 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2342 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2343 && kind == AARCH64_MOD_UXTW)
2344 || (opnd->qualifier == AARCH64_OPND_QLF_X
2345 && kind == AARCH64_MOD_UXTX)))
2346 {
2347 /* 'LSL' is the preferred form in this case. */
2348 kind = AARCH64_MOD_LSL;
2349 if (opnd->shifter.amount == 0)
2350 {
2351 /* Shifter omitted. */
2352 snprintf (buf, size, "%s",
2353 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2354 break;
2355 }
2356 }
2357 if (opnd->shifter.amount)
2358 snprintf (buf, size, "%s, %s #%d",
2359 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2360 aarch64_operand_modifiers[kind].name,
2361 opnd->shifter.amount);
2362 else
2363 snprintf (buf, size, "%s, %s",
2364 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2365 aarch64_operand_modifiers[kind].name);
2366 break;
2367
2368 case AARCH64_OPND_Rm_SFT:
2369 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2370 || opnd->qualifier == AARCH64_OPND_QLF_X);
2371 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2372 snprintf (buf, size, "%s",
2373 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2374 else
2375 snprintf (buf, size, "%s, %s #%d",
2376 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2377 aarch64_operand_modifiers[opnd->shifter.kind].name,
2378 opnd->shifter.amount);
2379 break;
2380
2381 case AARCH64_OPND_Fd:
2382 case AARCH64_OPND_Fn:
2383 case AARCH64_OPND_Fm:
2384 case AARCH64_OPND_Fa:
2385 case AARCH64_OPND_Ft:
2386 case AARCH64_OPND_Ft2:
2387 case AARCH64_OPND_Sd:
2388 case AARCH64_OPND_Sn:
2389 case AARCH64_OPND_Sm:
2390 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2391 opnd->reg.regno);
2392 break;
2393
2394 case AARCH64_OPND_Vd:
2395 case AARCH64_OPND_Vn:
2396 case AARCH64_OPND_Vm:
2397 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2398 aarch64_get_qualifier_name (opnd->qualifier));
2399 break;
2400
2401 case AARCH64_OPND_Ed:
2402 case AARCH64_OPND_En:
2403 case AARCH64_OPND_Em:
2404 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2405 aarch64_get_qualifier_name (opnd->qualifier),
2406 opnd->reglane.index);
2407 break;
2408
2409 case AARCH64_OPND_VdD1:
2410 case AARCH64_OPND_VnD1:
2411 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2412 break;
2413
2414 case AARCH64_OPND_LVn:
2415 case AARCH64_OPND_LVt:
2416 case AARCH64_OPND_LVt_AL:
2417 case AARCH64_OPND_LEt:
2418 print_register_list (buf, size, opnd);
2419 break;
2420
2421 case AARCH64_OPND_Cn:
2422 case AARCH64_OPND_Cm:
2423 snprintf (buf, size, "C%d", opnd->reg.regno);
2424 break;
2425
2426 case AARCH64_OPND_IDX:
2427 case AARCH64_OPND_IMM:
2428 case AARCH64_OPND_WIDTH:
2429 case AARCH64_OPND_UIMM3_OP1:
2430 case AARCH64_OPND_UIMM3_OP2:
2431 case AARCH64_OPND_BIT_NUM:
2432 case AARCH64_OPND_IMM_VLSL:
2433 case AARCH64_OPND_IMM_VLSR:
2434 case AARCH64_OPND_SHLL_IMM:
2435 case AARCH64_OPND_IMM0:
2436 case AARCH64_OPND_IMMR:
2437 case AARCH64_OPND_IMMS:
2438 case AARCH64_OPND_FBITS:
2439 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2440 break;
2441
2442 case AARCH64_OPND_IMM_MOV:
2443 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2444 {
2445 case 4: /* e.g. MOV Wd, #<imm32>. */
2446 {
2447 int imm32 = opnd->imm.value;
2448 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2449 }
2450 break;
2451 case 8: /* e.g. MOV Xd, #<imm64>. */
2452 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2453 opnd->imm.value, opnd->imm.value);
2454 break;
2455 default: assert (0);
2456 }
2457 break;
2458
2459 case AARCH64_OPND_FPIMM0:
2460 snprintf (buf, size, "#0.0");
2461 break;
2462
2463 case AARCH64_OPND_LIMM:
2464 case AARCH64_OPND_AIMM:
2465 case AARCH64_OPND_HALF:
2466 if (opnd->shifter.amount)
2467 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2468 opnd->shifter.amount);
2469 else
2470 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2471 break;
2472
2473 case AARCH64_OPND_SIMD_IMM:
2474 case AARCH64_OPND_SIMD_IMM_SFT:
2475 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2476 || opnd->shifter.kind == AARCH64_MOD_NONE)
2477 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2478 else
2479 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2480 aarch64_operand_modifiers[opnd->shifter.kind].name,
2481 opnd->shifter.amount);
2482 break;
2483
2484 case AARCH64_OPND_FPIMM:
2485 case AARCH64_OPND_SIMD_FPIMM:
2486 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2487 {
2488 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2489 {
2490 single_conv_t c;
2491 c.i = expand_fp_imm (0, opnd->imm.value);
2492 snprintf (buf, size, "#%.18e", c.f);
2493 }
2494 break;
2495 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2496 {
2497 double_conv_t c;
2498 c.i = expand_fp_imm (1, opnd->imm.value);
2499 snprintf (buf, size, "#%.18e", c.d);
2500 }
2501 break;
2502 default: assert (0);
2503 }
2504 break;
2505
2506 case AARCH64_OPND_CCMP_IMM:
2507 case AARCH64_OPND_NZCV:
2508 case AARCH64_OPND_EXCEPTION:
2509 case AARCH64_OPND_UIMM4:
2510 case AARCH64_OPND_UIMM7:
2511 if (optional_operand_p (opcode, idx) == TRUE
2512 && (opnd->imm.value ==
2513 (int64_t) get_optional_operand_default_value (opcode)))
2514 /* Omit the operand, e.g. DCPS1. */
2515 break;
2516 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2517 break;
2518
2519 case AARCH64_OPND_COND:
2520 snprintf (buf, size, "%s", opnd->cond->names[0]);
2521 break;
2522
2523 case AARCH64_OPND_ADDR_ADRP:
2524 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2525 + opnd->imm.value;
2526 if (pcrel_p)
2527 *pcrel_p = 1;
2528 if (address)
2529 *address = addr;
2530 /* This is not necessary during the disassembling, as print_address_func
2531 in the disassemble_info will take care of the printing. But some
2532 other callers may be still interested in getting the string in *STR,
2533 so here we do snprintf regardless. */
2534 snprintf (buf, size, "#0x%" PRIx64, addr);
2535 break;
2536
2537 case AARCH64_OPND_ADDR_PCREL14:
2538 case AARCH64_OPND_ADDR_PCREL19:
2539 case AARCH64_OPND_ADDR_PCREL21:
2540 case AARCH64_OPND_ADDR_PCREL26:
2541 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2542 if (pcrel_p)
2543 *pcrel_p = 1;
2544 if (address)
2545 *address = addr;
2546 /* This is not necessary during the disassembling, as print_address_func
2547 in the disassemble_info will take care of the printing. But some
2548 other callers may be still interested in getting the string in *STR,
2549 so here we do snprintf regardless. */
2550 snprintf (buf, size, "#0x%" PRIx64, addr);
2551 break;
2552
2553 case AARCH64_OPND_ADDR_SIMPLE:
2554 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2555 case AARCH64_OPND_SIMD_ADDR_POST:
2556 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2557 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2558 {
2559 if (opnd->addr.offset.is_reg)
2560 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2561 else
2562 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2563 }
2564 else
2565 snprintf (buf, size, "[%s]", name);
2566 break;
2567
2568 case AARCH64_OPND_ADDR_REGOFF:
2569 print_register_offset_address (buf, size, opnd);
2570 break;
2571
2572 case AARCH64_OPND_ADDR_SIMM7:
2573 case AARCH64_OPND_ADDR_SIMM9:
2574 case AARCH64_OPND_ADDR_SIMM9_2:
2575 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2576 if (opnd->addr.writeback)
2577 {
2578 if (opnd->addr.preind)
2579 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2580 else
2581 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2582 }
2583 else
2584 {
2585 if (opnd->addr.offset.imm)
2586 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2587 else
2588 snprintf (buf, size, "[%s]", name);
2589 }
2590 break;
2591
2592 case AARCH64_OPND_ADDR_UIMM12:
2593 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2594 if (opnd->addr.offset.imm)
2595 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2596 else
2597 snprintf (buf, size, "[%s]", name);
2598 break;
2599
2600 case AARCH64_OPND_SYSREG:
2601 for (i = 0; aarch64_sys_regs[i].name; ++i)
2602 if (aarch64_sys_regs[i].value == opnd->sysreg)
2603 break;
2604 if (aarch64_sys_regs[i].name)
2605 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2606 else
2607 {
2608 /* Implementation defined system register. */
2609 unsigned int value = opnd->sysreg;
2610 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2611 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2612 value & 0x7);
2613 }
2614 break;
2615
2616 case AARCH64_OPND_PSTATEFIELD:
2617 for (i = 0; aarch64_pstatefields[i].name; ++i)
2618 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2619 break;
2620 assert (aarch64_pstatefields[i].name);
2621 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2622 break;
2623
2624 case AARCH64_OPND_SYSREG_AT:
2625 case AARCH64_OPND_SYSREG_DC:
2626 case AARCH64_OPND_SYSREG_IC:
2627 case AARCH64_OPND_SYSREG_TLBI:
2628 snprintf (buf, size, "%s", opnd->sysins_op->template);
2629 break;
2630
2631 case AARCH64_OPND_BARRIER:
2632 snprintf (buf, size, "%s", opnd->barrier->name);
2633 break;
2634
2635 case AARCH64_OPND_BARRIER_ISB:
2636 /* Operand can be omitted, e.g. in DCPS1. */
2637 if (! optional_operand_p (opcode, idx)
2638 || (opnd->barrier->value
2639 != get_optional_operand_default_value (opcode)))
2640 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2641 break;
2642
2643 case AARCH64_OPND_PRFOP:
2644 if (opnd->prfop->name != NULL)
2645 snprintf (buf, size, "%s", opnd->prfop->name);
2646 else
2647 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2648 break;
2649
2650 default:
2651 assert (0);
2652 }
2653 }
2654
2655 #define CPENC(op0,op1,crn,crm,op2) \
2657 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2658 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2659 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2660 /* for 3.9.10 System Instructions */
2661 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2662
2663 #define C0 0
2664 #define C1 1
2665 #define C2 2
2666 #define C3 3
2667 #define C4 4
2668 #define C5 5
2669 #define C6 6
2670 #define C7 7
2671 #define C8 8
2672 #define C9 9
2673 #define C10 10
2674 #define C11 11
2675 #define C12 12
2676 #define C13 13
2677 #define C14 14
2678 #define C15 15
2679
2680 /* TODO there are two more issues need to be resolved
2681 1. handle read-only and write-only system registers
2682 2. handle cpu-implementation-defined system registers. */
2683 const struct aarch64_name_value_pair aarch64_sys_regs [] =
2684 {
2685 { "spsr_el1", CPEN_(0,C0,0) }, /* = spsr_svc */
2686 { "elr_el1", CPEN_(0,C0,1) },
2687 { "sp_el0", CPEN_(0,C1,0) },
2688 { "spsel", CPEN_(0,C2,0) },
2689 { "daif", CPEN_(3,C2,1) },
2690 { "currentel", CPEN_(0,C2,2) }, /* RO */
2691 { "nzcv", CPEN_(3,C2,0) },
2692 { "fpcr", CPEN_(3,C4,0) },
2693 { "fpsr", CPEN_(3,C4,1) },
2694 { "dspsr_el0", CPEN_(3,C5,0) },
2695 { "dlr_el0", CPEN_(3,C5,1) },
2696 { "spsr_el2", CPEN_(4,C0,0) }, /* = spsr_hyp */
2697 { "elr_el2", CPEN_(4,C0,1) },
2698 { "sp_el1", CPEN_(4,C1,0) },
2699 { "spsr_irq", CPEN_(4,C3,0) },
2700 { "spsr_abt", CPEN_(4,C3,1) },
2701 { "spsr_und", CPEN_(4,C3,2) },
2702 { "spsr_fiq", CPEN_(4,C3,3) },
2703 { "spsr_el3", CPEN_(6,C0,0) },
2704 { "elr_el3", CPEN_(6,C0,1) },
2705 { "sp_el2", CPEN_(6,C1,0) },
2706 { "spsr_svc", CPEN_(0,C0,0) }, /* = spsr_el1 */
2707 { "spsr_hyp", CPEN_(4,C0,0) }, /* = spsr_el2 */
2708 { "midr_el1", CPENC(3,0,C0,C0,0) }, /* RO */
2709 { "ctr_el0", CPENC(3,3,C0,C0,1) }, /* RO */
2710 { "mpidr_el1", CPENC(3,0,C0,C0,5) }, /* RO */
2711 { "revidr_el1", CPENC(3,0,C0,C0,6) }, /* RO */
2712 { "aidr_el1", CPENC(3,1,C0,C0,7) }, /* RO */
2713 { "dczid_el0", CPENC(3,3,C0,C0,7) }, /* RO */
2714 { "id_dfr0_el1", CPENC(3,0,C0,C1,2) }, /* RO */
2715 { "id_pfr0_el1", CPENC(3,0,C0,C1,0) }, /* RO */
2716 { "id_pfr1_el1", CPENC(3,0,C0,C1,1) }, /* RO */
2717 { "id_afr0_el1", CPENC(3,0,C0,C1,3) }, /* RO */
2718 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4) }, /* RO */
2719 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5) }, /* RO */
2720 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6) }, /* RO */
2721 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7) }, /* RO */
2722 { "id_isar0_el1", CPENC(3,0,C0,C2,0) }, /* RO */
2723 { "id_isar1_el1", CPENC(3,0,C0,C2,1) }, /* RO */
2724 { "id_isar2_el1", CPENC(3,0,C0,C2,2) }, /* RO */
2725 { "id_isar3_el1", CPENC(3,0,C0,C2,3) }, /* RO */
2726 { "id_isar4_el1", CPENC(3,0,C0,C2,4) }, /* RO */
2727 { "id_isar5_el1", CPENC(3,0,C0,C2,5) }, /* RO */
2728 { "mvfr0_el1", CPENC(3,0,C0,C3,0) }, /* RO */
2729 { "mvfr1_el1", CPENC(3,0,C0,C3,1) }, /* RO */
2730 { "mvfr2_el1", CPENC(3,0,C0,C3,2) }, /* RO */
2731 { "ccsidr_el1", CPENC(3,1,C0,C0,0) }, /* RO */
2732 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0) }, /* RO */
2733 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1) }, /* RO */
2734 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0) }, /* RO */
2735 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1) }, /* RO */
2736 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0) }, /* RO */
2737 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1) }, /* RO */
2738 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0) }, /* RO */
2739 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1) }, /* RO */
2740 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4) }, /* RO */
2741 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5) }, /* RO */
2742 { "clidr_el1", CPENC(3,1,C0,C0,1) }, /* RO */
2743 { "csselr_el1", CPENC(3,2,C0,C0,0) }, /* RO */
2744 { "vpidr_el2", CPENC(3,4,C0,C0,0) },
2745 { "vmpidr_el2", CPENC(3,4,C0,C0,5) },
2746 { "sctlr_el1", CPENC(3,0,C1,C0,0) },
2747 { "sctlr_el2", CPENC(3,4,C1,C0,0) },
2748 { "sctlr_el3", CPENC(3,6,C1,C0,0) },
2749 { "actlr_el1", CPENC(3,0,C1,C0,1) },
2750 { "actlr_el2", CPENC(3,4,C1,C0,1) },
2751 { "actlr_el3", CPENC(3,6,C1,C0,1) },
2752 { "cpacr_el1", CPENC(3,0,C1,C0,2) },
2753 { "cptr_el2", CPENC(3,4,C1,C1,2) },
2754 { "cptr_el3", CPENC(3,6,C1,C1,2) },
2755 { "scr_el3", CPENC(3,6,C1,C1,0) },
2756 { "hcr_el2", CPENC(3,4,C1,C1,0) },
2757 { "mdcr_el2", CPENC(3,4,C1,C1,1) },
2758 { "mdcr_el3", CPENC(3,6,C1,C3,1) },
2759 { "hstr_el2", CPENC(3,4,C1,C1,3) },
2760 { "hacr_el2", CPENC(3,4,C1,C1,7) },
2761 { "ttbr0_el1", CPENC(3,0,C2,C0,0) },
2762 { "ttbr1_el1", CPENC(3,0,C2,C0,1) },
2763 { "ttbr0_el2", CPENC(3,4,C2,C0,0) },
2764 { "ttbr0_el3", CPENC(3,6,C2,C0,0) },
2765 { "vttbr_el2", CPENC(3,4,C2,C1,0) },
2766 { "tcr_el1", CPENC(3,0,C2,C0,2) },
2767 { "tcr_el2", CPENC(3,4,C2,C0,2) },
2768 { "tcr_el3", CPENC(3,6,C2,C0,2) },
2769 { "vtcr_el2", CPENC(3,4,C2,C1,2) },
2770 { "afsr0_el1", CPENC(3,0,C5,C1,0) },
2771 { "afsr1_el1", CPENC(3,0,C5,C1,1) },
2772 { "afsr0_el2", CPENC(3,4,C5,C1,0) },
2773 { "afsr1_el2", CPENC(3,4,C5,C1,1) },
2774 { "afsr0_el3", CPENC(3,6,C5,C1,0) },
2775 { "afsr1_el3", CPENC(3,6,C5,C1,1) },
2776 { "esr_el1", CPENC(3,0,C5,C2,0) },
2777 { "esr_el2", CPENC(3,4,C5,C2,0) },
2778 { "esr_el3", CPENC(3,6,C5,C2,0) },
2779 { "fpexc32_el2", CPENC(3,4,C5,C3,0) },
2780 { "far_el1", CPENC(3,0,C6,C0,0) },
2781 { "far_el2", CPENC(3,4,C6,C0,0) },
2782 { "far_el3", CPENC(3,6,C6,C0,0) },
2783 { "hpfar_el2", CPENC(3,4,C6,C0,4) },
2784 { "par_el1", CPENC(3,0,C7,C4,0) },
2785 { "mair_el1", CPENC(3,0,C10,C2,0) },
2786 { "mair_el2", CPENC(3,4,C10,C2,0) },
2787 { "mair_el3", CPENC(3,6,C10,C2,0) },
2788 { "amair_el1", CPENC(3,0,C10,C3,0) },
2789 { "amair_el2", CPENC(3,4,C10,C3,0) },
2790 { "amair_el3", CPENC(3,6,C10,C3,0) },
2791 { "vbar_el1", CPENC(3,0,C12,C0,0) },
2792 { "vbar_el2", CPENC(3,4,C12,C0,0) },
2793 { "vbar_el3", CPENC(3,6,C12,C0,0) },
2794 { "rvbar_el1", CPENC(3,0,C12,C0,1) }, /* RO */
2795 { "rvbar_el2", CPENC(3,4,C12,C0,1) }, /* RO */
2796 { "rvbar_el3", CPENC(3,6,C12,C0,1) }, /* RO */
2797 { "rmr_el1", CPENC(3,0,C12,C0,2) },
2798 { "rmr_el2", CPENC(3,4,C12,C0,2) },
2799 { "rmr_el3", CPENC(3,6,C12,C0,2) },
2800 { "isr_el1", CPENC(3,0,C12,C1,0) }, /* RO */
2801 { "contextidr_el1", CPENC(3,0,C13,C0,1) },
2802 { "tpidr_el0", CPENC(3,3,C13,C0,2) },
2803 { "tpidrro_el0", CPENC(3,3,C13,C0,3) }, /* RO */
2804 { "tpidr_el1", CPENC(3,0,C13,C0,4) },
2805 { "tpidr_el2", CPENC(3,4,C13,C0,2) },
2806 { "tpidr_el3", CPENC(3,6,C13,C0,2) },
2807 { "teecr32_el1", CPENC(2,2,C0, C0,0) }, /* See section 3.9.7.1 */
2808 { "cntfrq_el0", CPENC(3,3,C14,C0,0) }, /* RO */
2809 { "cntpct_el0", CPENC(3,3,C14,C0,1) }, /* RO */
2810 { "cntvct_el0", CPENC(3,3,C14,C0,2) }, /* RO */
2811 { "cntvoff_el2", CPENC(3,4,C14,C0,3) },
2812 { "cntkctl_el1", CPENC(3,0,C14,C1,0) },
2813 { "cnthctl_el2", CPENC(3,4,C14,C1,0) },
2814 { "cntp_tval_el0", CPENC(3,3,C14,C2,0) },
2815 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1) },
2816 { "cntp_cval_el0", CPENC(3,3,C14,C2,2) },
2817 { "cntv_tval_el0", CPENC(3,3,C14,C3,0) },
2818 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1) },
2819 { "cntv_cval_el0", CPENC(3,3,C14,C3,2) },
2820 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0) },
2821 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1) },
2822 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2) },
2823 { "cntps_tval_el1", CPENC(3,7,C14,C2,0) },
2824 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1) },
2825 { "cntps_cval_el1", CPENC(3,7,C14,C2,2) },
2826 { "dacr32_el2", CPENC(3,4,C3,C0,0) },
2827 { "ifsr32_el2", CPENC(3,4,C5,C0,1) },
2828 { "teehbr32_el1", CPENC(2,2,C1,C0,0) },
2829 { "sder32_el3", CPENC(3,6,C1,C1,1) },
2830 { "mdscr_el1", CPENC(2,0,C0, C2, 2) },
2831 { "mdccsr_el0", CPENC(2,3,C0, C1, 0) }, /* r */
2832 { "mdccint_el1", CPENC(2,0,C0, C2, 0) },
2833 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0) },
2834 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0) }, /* r */
2835 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0) }, /* w */
2836 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2) }, /* r */
2837 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2) }, /* w */
2838 { "oseccr_el1", CPENC(2,0,C0, C6, 2) },
2839 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0) },
2840 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4) },
2841 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4) },
2842 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4) },
2843 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4) },
2844 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4) },
2845 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4) },
2846 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4) },
2847 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4) },
2848 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4) },
2849 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4) },
2850 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4) },
2851 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4) },
2852 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4) },
2853 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4) },
2854 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4) },
2855 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4) },
2856 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5) },
2857 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5) },
2858 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5) },
2859 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5) },
2860 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5) },
2861 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5) },
2862 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5) },
2863 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5) },
2864 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5) },
2865 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5) },
2866 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5) },
2867 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5) },
2868 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5) },
2869 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5) },
2870 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5) },
2871 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5) },
2872 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6) },
2873 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6) },
2874 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6) },
2875 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6) },
2876 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6) },
2877 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6) },
2878 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6) },
2879 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6) },
2880 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6) },
2881 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6) },
2882 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6) },
2883 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6) },
2884 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6) },
2885 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6) },
2886 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6) },
2887 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6) },
2888 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7) },
2889 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7) },
2890 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7) },
2891 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7) },
2892 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7) },
2893 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7) },
2894 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7) },
2895 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7) },
2896 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7) },
2897 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7) },
2898 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7) },
2899 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7) },
2900 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7) },
2901 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7) },
2902 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7) },
2903 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7) },
2904 { "mdrar_el1", CPENC(2,0,C1, C0, 0) }, /* r */
2905 { "oslar_el1", CPENC(2,0,C1, C0, 4) }, /* w */
2906 { "oslsr_el1", CPENC(2,0,C1, C1, 4) }, /* r */
2907 { "osdlr_el1", CPENC(2,0,C1, C3, 4) },
2908 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4) },
2909 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6) },
2910 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6) },
2911 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6) }, /* r */
2912
2913 { "pmcr_el0", CPENC(3,3,C9,C12, 0) },
2914 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1) },
2915 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2) },
2916 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3) },
2917 { "pmswinc_el0", CPENC(3,3,C9,C12, 4) }, /* w */
2918 { "pmselr_el0", CPENC(3,3,C9,C12, 5) },
2919 { "pmceid0_el0", CPENC(3,3,C9,C12, 6) }, /* r */
2920 { "pmceid1_el0", CPENC(3,3,C9,C12, 7) }, /* r */
2921 { "pmccntr_el0", CPENC(3,3,C9,C13, 0) },
2922 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1) },
2923 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2) },
2924 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0) },
2925 { "pmintenset_el1", CPENC(3,0,C9,C14, 1) },
2926 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2) },
2927 { "pmovsset_el0", CPENC(3,3,C9,C14, 3) },
2928 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0) },
2929 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1) },
2930 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2) },
2931 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3) },
2932 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4) },
2933 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5) },
2934 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6) },
2935 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7) },
2936 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0) },
2937 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1) },
2938 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2) },
2939 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3) },
2940 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4) },
2941 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5) },
2942 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6) },
2943 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7) },
2944 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0) },
2945 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1) },
2946 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2) },
2947 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3) },
2948 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4) },
2949 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5) },
2950 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6) },
2951 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7) },
2952 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0) },
2953 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1) },
2954 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2) },
2955 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3) },
2956 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4) },
2957 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5) },
2958 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6) },
2959 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0) },
2960 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1) },
2961 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2) },
2962 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3) },
2963 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4) },
2964 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5) },
2965 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6) },
2966 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7) },
2967 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0) },
2968 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1) },
2969 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2) },
2970 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3) },
2971 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4) },
2972 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5) },
2973 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6) },
2974 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7) },
2975 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0) },
2976 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1) },
2977 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2) },
2978 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3) },
2979 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4) },
2980 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5) },
2981 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6) },
2982 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7) },
2983 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0) },
2984 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1) },
2985 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2) },
2986 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3) },
2987 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4) },
2988 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5) },
2989 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6) },
2990 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7) },
2991 { 0, CPENC(0,0,0,0,0) },
2992 };
2993
2994 const struct aarch64_name_value_pair aarch64_pstatefields [] =
2995 {
2996 { "spsel", 0x05 },
2997 { "daifset", 0x1e },
2998 { "daifclr", 0x1f },
2999 { 0, CPENC(0,0,0,0,0) },
3000 };
3001
3002 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3003 {
3004 { "ialluis", CPENS(0,C7,C1,0), 0 },
3005 { "iallu", CPENS(0,C7,C5,0), 0 },
3006 { "ivau", CPENS(3,C7,C5,1), 1 },
3007 { 0, CPENS(0,0,0,0), 0 }
3008 };
3009
3010 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3011 {
3012 { "zva", CPENS(3,C7,C4,1), 1 },
3013 { "ivac", CPENS(0,C7,C6,1), 1 },
3014 { "isw", CPENS(0,C7,C6,2), 1 },
3015 { "cvac", CPENS(3,C7,C10,1), 1 },
3016 { "csw", CPENS(0,C7,C10,2), 1 },
3017 { "cvau", CPENS(3,C7,C11,1), 1 },
3018 { "civac", CPENS(3,C7,C14,1), 1 },
3019 { "cisw", CPENS(0,C7,C14,2), 1 },
3020 { 0, CPENS(0,0,0,0), 0 }
3021 };
3022
3023 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3024 {
3025 { "s1e1r", CPENS(0,C7,C8,0), 1 },
3026 { "s1e1w", CPENS(0,C7,C8,1), 1 },
3027 { "s1e0r", CPENS(0,C7,C8,2), 1 },
3028 { "s1e0w", CPENS(0,C7,C8,3), 1 },
3029 { "s12e1r", CPENS(4,C7,C8,4), 1 },
3030 { "s12e1w", CPENS(4,C7,C8,5), 1 },
3031 { "s12e0r", CPENS(4,C7,C8,6), 1 },
3032 { "s12e0w", CPENS(4,C7,C8,7), 1 },
3033 { "s1e2r", CPENS(4,C7,C8,0), 1 },
3034 { "s1e2w", CPENS(4,C7,C8,1), 1 },
3035 { "s1e3r", CPENS(6,C7,C8,0), 1 },
3036 { "s1e3w", CPENS(6,C7,C8,1), 1 },
3037 { 0, CPENS(0,0,0,0), 0 }
3038 };
3039
3040 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3041 {
3042 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3043 { "vae1", CPENS(0,C8,C7,1), 1 },
3044 { "aside1", CPENS(0,C8,C7,2), 1 },
3045 { "vaae1", CPENS(0,C8,C7,3), 1 },
3046 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3047 { "vae1is", CPENS(0,C8,C3,1), 1 },
3048 { "aside1is", CPENS(0,C8,C3,2), 1 },
3049 { "vaae1is", CPENS(0,C8,C3,3), 1 },
3050 { "ipas2e1is", CPENS(4,C8,C0,1), 1 },
3051 { "ipas2le1is",CPENS(4,C8,C0,5), 1 },
3052 { "ipas2e1", CPENS(4,C8,C4,1), 1 },
3053 { "ipas2le1", CPENS(4,C8,C4,5), 1 },
3054 { "vae2", CPENS(4,C8,C7,1), 1 },
3055 { "vae2is", CPENS(4,C8,C3,1), 1 },
3056 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3057 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3058 { "vae3", CPENS(6,C8,C7,1), 1 },
3059 { "vae3is", CPENS(6,C8,C3,1), 1 },
3060 { "alle2", CPENS(4,C8,C7,0), 0 },
3061 { "alle2is", CPENS(4,C8,C3,0), 0 },
3062 { "alle1", CPENS(4,C8,C7,4), 0 },
3063 { "alle1is", CPENS(4,C8,C3,4), 0 },
3064 { "alle3", CPENS(6,C8,C7,0), 0 },
3065 { "alle3is", CPENS(6,C8,C3,0), 0 },
3066 { "vale1is", CPENS(0,C8,C3,5), 1 },
3067 { "vale2is", CPENS(4,C8,C3,5), 1 },
3068 { "vale3is", CPENS(6,C8,C3,5), 1 },
3069 { "vaale1is", CPENS(0,C8,C3,7), 1 },
3070 { "vale1", CPENS(0,C8,C7,5), 1 },
3071 { "vale2", CPENS(4,C8,C7,5), 1 },
3072 { "vale3", CPENS(6,C8,C7,5), 1 },
3073 { "vaale1", CPENS(0,C8,C7,7), 1 },
3074 { 0, CPENS(0,0,0,0), 0 }
3075 };
3076
3077 #undef C0
3078 #undef C1
3079 #undef C2
3080 #undef C3
3081 #undef C4
3082 #undef C5
3083 #undef C6
3084 #undef C7
3085 #undef C8
3086 #undef C9
3087 #undef C10
3088 #undef C11
3089 #undef C12
3090 #undef C13
3091 #undef C14
3092 #undef C15
3093
3094 /* Include the opcode description table as well as the operand description
3095 table. */
3096 #include "aarch64-tbl.h"
3097