aarch64-dis.c revision 1.10 1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2025 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 #include "safe-ctype.h"
29 #include "obstack.h"
30
31 #define obstack_chunk_alloc xmalloc
32 #define obstack_chunk_free free
33
34 #define INSNLEN 4
35
36 /* This character is used to encode style information within the output
37 buffers. See get_style_text and print_operands for more details. */
38 #define STYLE_MARKER_CHAR '\002'
39
40 /* Cached mapping symbol state. */
41 enum map_type
42 {
43 MAP_INSN,
44 MAP_DATA
45 };
46
47 static aarch64_feature_set arch_variant; /* See select_aarch64_variant. */
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_stop_offset = 0;
51 static bfd_vma last_mapping_addr = 0;
52
53 /* Other options */
54 static int no_aliases = 0; /* If set disassemble as most general inst. */
55 static int no_notes = 1; /* If set do not print disassemble notes in the
57 output as comments. */
58
59 /* Currently active instruction sequence. */
60 static aarch64_instr_sequence insn_sequence;
61
62 static void
63 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
64 {
65 }
66
67 static void
68 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
69 {
70 /* Try to match options that are simple flags */
71 if (startswith (option, "no-aliases"))
72 {
73 no_aliases = 1;
74 return;
75 }
76
77 if (startswith (option, "aliases"))
78 {
79 no_aliases = 0;
80 return;
81 }
82
83 if (startswith (option, "no-notes"))
84 {
85 no_notes = 1;
86 return;
87 }
88
89 if (startswith (option, "notes"))
90 {
91 no_notes = 0;
92 return;
93 }
94
95 #ifdef DEBUG_AARCH64
96 if (startswith (option, "debug_dump"))
97 {
98 debug_dump = 1;
99 return;
100 }
101 #endif /* DEBUG_AARCH64 */
102
103 /* Invalid option. */
104 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
105 }
106
107 static void
108 parse_aarch64_dis_options (const char *options)
109 {
110 const char *option_end;
111
112 if (options == NULL)
113 return;
114
115 while (*options != '\0')
116 {
117 /* Skip empty options. */
118 if (*options == ',')
119 {
120 options++;
121 continue;
122 }
123
124 /* We know that *options is neither NUL or a comma. */
125 option_end = options + 1;
126 while (*option_end != ',' && *option_end != '\0')
127 option_end++;
128
129 parse_aarch64_dis_option (options, option_end - options);
130
131 /* Go on to the next one. If option_end points to a comma, it
132 will be skipped above. */
133 options = option_end;
134 }
135 }
136
137 /* Functions doing the instruction disassembling. */
139
140 /* The unnamed arguments consist of the number of fields and information about
141 these fields where the VALUE will be extracted from CODE and returned.
142 MASK can be zero or the base mask of the opcode.
143
144 N.B. the fields are required to be in such an order than the most signficant
145 field for VALUE comes the first, e.g. the <index> in
146 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
147 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
148 the order of H, L, M. */
149
150 aarch64_insn
151 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
152 {
153 uint32_t num;
154 const aarch64_field *field;
155 enum aarch64_field_kind kind;
156 va_list va;
157
158 va_start (va, mask);
159 num = va_arg (va, uint32_t);
160 assert (num <= 5);
161 aarch64_insn value = 0x0;
162 while (num--)
163 {
164 kind = va_arg (va, enum aarch64_field_kind);
165 field = &fields[kind];
166 value <<= field->width;
167 value |= extract_field (kind, code, mask);
168 }
169 va_end (va);
170 return value;
171 }
172
173 /* Extract the value of all fields in SELF->fields after START from
174 instruction CODE. The least significant bit comes from the final field. */
175
176 static aarch64_insn
177 extract_all_fields_after (const aarch64_operand *self, unsigned int start,
178 aarch64_insn code)
179 {
180 aarch64_insn value;
181 unsigned int i;
182 enum aarch64_field_kind kind;
183
184 value = 0;
185 for (i = start;
186 i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
187 {
188 kind = self->fields[i];
189 value <<= fields[kind].width;
190 value |= extract_field (kind, code, 0);
191 }
192 return value;
193 }
194
195 /* Extract the value of all fields in SELF->fields from instruction CODE.
196 The least significant bit comes from the final field. */
197
198 static aarch64_insn
199 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
200 {
201 return extract_all_fields_after (self, 0, code);
202 }
203
204 /* Sign-extend bit I of VALUE. */
205 static inline uint64_t
206 sign_extend (aarch64_insn value, unsigned i)
207 {
208 uint64_t ret, sign;
209
210 assert (i < 32);
211 ret = value;
212 sign = (uint64_t) 1 << i;
213 return ((ret & (sign + sign - 1)) ^ sign) - sign;
214 }
215
216 /* N.B. the following inline helpfer functions create a dependency on the
217 order of operand qualifier enumerators. */
218
219 /* Given VALUE, return qualifier for a general purpose register. */
220 static inline enum aarch64_opnd_qualifier
221 get_greg_qualifier_from_value (aarch64_insn value)
222 {
223 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
224 if (value <= 0x1
225 && aarch64_get_qualifier_standard_value (qualifier) == value)
226 return qualifier;
227 return AARCH64_OPND_QLF_ERR;
228 }
229
230 /* Given VALUE, return qualifier for a vector register. This does not support
231 decoding instructions that accept the 2H vector type. */
232
233 static inline enum aarch64_opnd_qualifier
234 get_vreg_qualifier_from_value (aarch64_insn value)
235 {
236 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
237
238 /* Instructions using vector type 2H should not call this function. Skip over
239 the 2H qualifier. */
240 if (qualifier >= AARCH64_OPND_QLF_V_2H)
241 qualifier += 1;
242
243 if (value <= 0x8
244 && aarch64_get_qualifier_standard_value (qualifier) == value)
245 return qualifier;
246 return AARCH64_OPND_QLF_ERR;
247 }
248
249 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
250 static inline enum aarch64_opnd_qualifier
251 get_sreg_qualifier_from_value (aarch64_insn value)
252 {
253 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
254
255 if (value <= 0x4
256 && aarch64_get_qualifier_standard_value (qualifier) == value)
257 return qualifier;
258 return AARCH64_OPND_QLF_ERR;
259 }
260
261 /* Given the instruction in *INST which is probably half way through the
262 decoding and our caller wants to know the expected qualifier for operand
263 I. Return such a qualifier if we can establish it; otherwise return
264 AARCH64_OPND_QLF_NIL. */
265
266 static aarch64_opnd_qualifier_t
267 get_expected_qualifier (const aarch64_inst *inst, int i)
268 {
269 aarch64_opnd_qualifier_seq_t qualifiers;
270 /* Should not be called if the qualifier is known. */
271 if (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL)
272 {
273 int invalid_count;
274 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
275 i, qualifiers, &invalid_count))
276 return qualifiers[i];
277 else
278 return AARCH64_OPND_QLF_NIL;
279 }
280 else
281 return AARCH64_OPND_QLF_ERR;
282 }
283
284 /* Operand extractors. */
285
286 bool
287 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
288 aarch64_opnd_info *info ATTRIBUTE_UNUSED,
289 const aarch64_insn code ATTRIBUTE_UNUSED,
290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
292 {
293 return true;
294 }
295
296 bool
297 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
298 const aarch64_insn code,
299 const aarch64_inst *inst ATTRIBUTE_UNUSED,
300 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
301 {
302 info->reg.regno = (extract_field (self->fields[0], code, 0)
303 + get_operand_specific_data (self));
304 return true;
305 }
306
307 bool
308 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
309 const aarch64_insn code ATTRIBUTE_UNUSED,
310 const aarch64_inst *inst ATTRIBUTE_UNUSED,
311 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
312 {
313 assert (info->idx == 1
314 || info->idx == 2
315 || info->idx == 3
316 || info->idx == 5);
317
318 unsigned prev_regno = inst->operands[info->idx - 1].reg.regno;
319 info->reg.regno = (prev_regno == 0x1f) ? 0x1f
320 : prev_regno + 1;
321 return true;
322 }
323
324 /* e.g. IC <ic_op>{, <Xt>}. */
325 bool
326 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
327 const aarch64_insn code,
328 const aarch64_inst *inst ATTRIBUTE_UNUSED,
329 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
330 {
331 info->reg.regno = extract_field (self->fields[0], code, 0);
332 assert (info->idx == 1
333 && (aarch64_get_operand_class (inst->operands[0].type)
334 == AARCH64_OPND_CLASS_SYSTEM));
335 /* This will make the constraint checking happy and more importantly will
336 help the disassembler determine whether this operand is optional or
337 not. */
338 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
339
340 return true;
341 }
342
343 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
344 bool
345 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
346 const aarch64_insn code,
347 const aarch64_inst *inst ATTRIBUTE_UNUSED,
348 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
349 {
350 /* regno */
351 info->reglane.regno = extract_field (self->fields[0], code,
352 inst->opcode->mask);
353
354 /* Index and/or type. */
355 if (inst->opcode->iclass == asisdone
356 || inst->opcode->iclass == asimdins)
357 {
358 if (info->type == AARCH64_OPND_En
359 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
360 {
361 unsigned shift;
362 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
363 assert (info->idx == 1); /* Vn */
364 aarch64_insn value = extract_field (FLD_imm4_11, code, 0);
365 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
366 info->qualifier = get_expected_qualifier (inst, info->idx);
367 if (info->qualifier == AARCH64_OPND_QLF_ERR)
368 return 0;
369 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
370 info->reglane.index = value >> shift;
371 }
372 else
373 {
374 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
375 imm5<3:0> <V>
376 0000 RESERVED
377 xxx1 B
378 xx10 H
379 x100 S
380 1000 D */
381 int pos = -1;
382 aarch64_insn value = extract_field (FLD_imm5, code, 0);
383 while (++pos <= 3 && (value & 0x1) == 0)
384 value >>= 1;
385 if (pos > 3)
386 return false;
387 info->qualifier = get_sreg_qualifier_from_value (pos);
388 if (info->qualifier == AARCH64_OPND_QLF_ERR)
389 return 0;
390 info->reglane.index = (unsigned) (value >> 1);
391 }
392 }
393 else if (inst->opcode->iclass == dotproduct)
394 {
395 /* Need information in other operand(s) to help decoding. */
396 info->qualifier = get_expected_qualifier (inst, info->idx);
397 if (info->qualifier == AARCH64_OPND_QLF_ERR)
398 return 0;
399 switch (info->qualifier)
400 {
401 case AARCH64_OPND_QLF_S_4B:
402 case AARCH64_OPND_QLF_S_2H:
403 /* L:H */
404 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
405 info->reglane.regno &= 0x1f;
406 break;
407 case AARCH64_OPND_QLF_S_2B:
408 /* h:l:m */
409 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
410 FLD_M);
411 info->reglane.regno &= 0xf;
412 break;
413 default:
414 return false;
415 }
416 }
417 else if (inst->opcode->iclass == cryptosm3)
418 {
419 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
420 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
421 }
422 else
423 {
424 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
425 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
426
427 /* Need information in other operand(s) to help decoding. */
428 info->qualifier = get_expected_qualifier (inst, info->idx);
429 if (info->qualifier == AARCH64_OPND_QLF_ERR)
430 return 0;
431 switch (info->qualifier)
432 {
433 case AARCH64_OPND_QLF_S_B:
434 /* H:imm3 */
435 info->reglane.index = extract_fields (code, 0, 2, FLD_H,
436 FLD_imm3_19);
437 info->reglane.regno &= 0x7;
438 break;
439
440 case AARCH64_OPND_QLF_S_H:
441 case AARCH64_OPND_QLF_S_2B:
442 if (info->type == AARCH64_OPND_Em16)
443 {
444 /* h:l:m */
445 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
446 FLD_M);
447 info->reglane.regno &= 0xf;
448 }
449 else
450 {
451 /* h:l */
452 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
453 }
454 break;
455 case AARCH64_OPND_QLF_S_S:
456 case AARCH64_OPND_QLF_S_4B:
457 /* h:l */
458 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
459 break;
460 case AARCH64_OPND_QLF_S_D:
461 /* H */
462 info->reglane.index = extract_field (FLD_H, code, 0);
463 break;
464 default:
465 return false;
466 }
467
468 if (inst->opcode->op == OP_FCMLA_ELEM
469 && info->qualifier != AARCH64_OPND_QLF_S_H)
470 {
471 /* Complex operand takes two elements. */
472 if (info->reglane.index & 1)
473 return false;
474 info->reglane.index /= 2;
475 }
476 }
477
478 return true;
479 }
480
481 bool
482 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
483 const aarch64_insn code,
484 const aarch64_inst *inst ATTRIBUTE_UNUSED,
485 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
486 {
487 /* R */
488 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
489 /* len */
490 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
491 info->reglist.stride = 1;
492 return true;
493 }
494
495 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
496 bool
497 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
498 aarch64_opnd_info *info, const aarch64_insn code,
499 const aarch64_inst *inst,
500 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
501 {
502 aarch64_insn value;
503 /* Number of elements in each structure to be loaded/stored. */
504 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
505
506 struct
507 {
508 unsigned is_reserved;
509 unsigned num_regs;
510 unsigned num_elements;
511 } data [] =
512 { {0, 4, 4},
513 {1, 4, 4},
514 {0, 4, 1},
515 {0, 4, 2},
516 {0, 3, 3},
517 {1, 3, 3},
518 {0, 3, 1},
519 {0, 1, 1},
520 {0, 2, 2},
521 {1, 2, 2},
522 {0, 2, 1},
523 };
524
525 /* Rt */
526 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
527 /* opcode */
528 value = extract_field (FLD_opcode, code, 0);
529 /* PR 21595: Check for a bogus value. */
530 if (value >= ARRAY_SIZE (data))
531 return false;
532 if (expected_num != data[value].num_elements || data[value].is_reserved)
533 return false;
534 info->reglist.num_regs = data[value].num_regs;
535 info->reglist.stride = 1;
536
537 return true;
538 }
539
540 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
541 lanes instructions. */
542 bool
543 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
544 aarch64_opnd_info *info, const aarch64_insn code,
545 const aarch64_inst *inst,
546 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
547 {
548 aarch64_insn value;
549
550 /* Rt */
551 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
552 /* S */
553 value = extract_field (FLD_S, code, 0);
554
555 /* Number of registers is equal to the number of elements in
556 each structure to be loaded/stored. */
557 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
558 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
559
560 /* Except when it is LD1R. */
561 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
562 info->reglist.num_regs = 2;
563
564 info->reglist.stride = 1;
565 return true;
566 }
567
568 /* Decode AdvSIMD vector register list for AdvSIMD lut instructions.
569 The number of of registers in the list is determined by the opcode
570 flag. */
571 bool
572 aarch64_ext_lut_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
573 const aarch64_insn code,
574 const aarch64_inst *inst ATTRIBUTE_UNUSED,
575 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
576 {
577 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
578 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
579 info->reglist.stride = 1;
580 return true;
581 }
582
583 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
584 load/store single element instructions. */
585 bool
586 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
587 aarch64_opnd_info *info, const aarch64_insn code,
588 const aarch64_inst *inst ATTRIBUTE_UNUSED,
589 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
590 {
591 aarch64_field field = {0, 0};
592 aarch64_insn QSsize; /* fields Q:S:size. */
593 aarch64_insn opcodeh2; /* opcode<2:1> */
594
595 /* Rt */
596 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
597
598 /* Decode the index, opcode<2:1> and size. */
599 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
600 opcodeh2 = extract_field_2 (&field, code, 0);
601 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
602 switch (opcodeh2)
603 {
604 case 0x0:
605 info->qualifier = AARCH64_OPND_QLF_S_B;
606 /* Index encoded in "Q:S:size". */
607 info->reglist.index = QSsize;
608 break;
609 case 0x1:
610 if (QSsize & 0x1)
611 /* UND. */
612 return false;
613 info->qualifier = AARCH64_OPND_QLF_S_H;
614 /* Index encoded in "Q:S:size<1>". */
615 info->reglist.index = QSsize >> 1;
616 break;
617 case 0x2:
618 if ((QSsize >> 1) & 0x1)
619 /* UND. */
620 return false;
621 if ((QSsize & 0x1) == 0)
622 {
623 info->qualifier = AARCH64_OPND_QLF_S_S;
624 /* Index encoded in "Q:S". */
625 info->reglist.index = QSsize >> 2;
626 }
627 else
628 {
629 if (extract_field (FLD_S, code, 0))
630 /* UND */
631 return false;
632 info->qualifier = AARCH64_OPND_QLF_S_D;
633 /* Index encoded in "Q". */
634 info->reglist.index = QSsize >> 3;
635 }
636 break;
637 default:
638 return false;
639 }
640
641 info->reglist.has_index = 1;
642 info->reglist.num_regs = 0;
643 info->reglist.stride = 1;
644 /* Number of registers is equal to the number of elements in
645 each structure to be loaded/stored. */
646 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
647 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
648
649 return true;
650 }
651
652 /* Decode fields immh:immb and/or Q for e.g.
653 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
654 or SSHR <V><d>, <V><n>, #<shift>. */
655
656 bool
657 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
658 aarch64_opnd_info *info, const aarch64_insn code,
659 const aarch64_inst *inst,
660 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
661 {
662 int pos;
663 aarch64_insn Q, imm, immh;
664 enum aarch64_insn_class iclass = inst->opcode->iclass;
665
666 immh = extract_field (FLD_immh, code, 0);
667 if (immh == 0)
668 return false;
669 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
670 pos = 4;
671 /* Get highest set bit in immh. */
672 while (--pos >= 0 && (immh & 0x8) == 0)
673 immh <<= 1;
674
675 assert ((iclass == asimdshf || iclass == asisdshf)
676 && (info->type == AARCH64_OPND_IMM_VLSR
677 || info->type == AARCH64_OPND_IMM_VLSL));
678
679 if (iclass == asimdshf)
680 {
681 Q = extract_field (FLD_Q, code, 0);
682 /* immh Q <T>
683 0000 x SEE AdvSIMD modified immediate
684 0001 0 8B
685 0001 1 16B
686 001x 0 4H
687 001x 1 8H
688 01xx 0 2S
689 01xx 1 4S
690 1xxx 0 RESERVED
691 1xxx 1 2D */
692 info->qualifier =
693 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
694 if (info->qualifier == AARCH64_OPND_QLF_ERR)
695 return false;
696 }
697 else
698 {
699 info->qualifier = get_sreg_qualifier_from_value (pos);
700 if (info->qualifier == AARCH64_OPND_QLF_ERR)
701 return 0;
702 }
703
704 if (info->type == AARCH64_OPND_IMM_VLSR)
705 /* immh <shift>
706 0000 SEE AdvSIMD modified immediate
707 0001 (16-UInt(immh:immb))
708 001x (32-UInt(immh:immb))
709 01xx (64-UInt(immh:immb))
710 1xxx (128-UInt(immh:immb)) */
711 info->imm.value = (16 << pos) - imm;
712 else
713 /* immh:immb
714 immh <shift>
715 0000 SEE AdvSIMD modified immediate
716 0001 (UInt(immh:immb)-8)
717 001x (UInt(immh:immb)-16)
718 01xx (UInt(immh:immb)-32)
719 1xxx (UInt(immh:immb)-64) */
720 info->imm.value = imm - (8 << pos);
721
722 return true;
723 }
724
725 /* Decode shift immediate for e.g. sshr (imm). */
726 bool
727 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
728 aarch64_opnd_info *info, const aarch64_insn code,
729 const aarch64_inst *inst ATTRIBUTE_UNUSED,
730 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
731 {
732 int64_t imm;
733 aarch64_insn val;
734 val = extract_field (FLD_size, code, 0);
735 switch (val)
736 {
737 case 0: imm = 8; break;
738 case 1: imm = 16; break;
739 case 2: imm = 32; break;
740 default: return false;
741 }
742 info->imm.value = imm;
743 return true;
744 }
745
746 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
747 value in the field(s) will be extracted as unsigned immediate value. */
748 bool
749 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
750 const aarch64_insn code,
751 const aarch64_inst *inst,
752 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
753 {
754 uint64_t imm;
755
756 imm = extract_all_fields (self, code);
757
758 if (operand_need_sign_extension (self))
759 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
760
761 if (operand_need_shift_by_two (self))
762 imm <<= 2;
763 else if (operand_need_shift_by_three (self))
764 imm <<= 3;
765 else if (operand_need_shift_by_four (self))
766 imm <<= 4;
767
768 if (info->type == AARCH64_OPND_ADDR_ADRP)
769 imm <<= 12;
770
771 if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
772 && inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
773 imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
774
775 info->imm.value = imm;
776 return true;
777 }
778
779 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
780 bool
781 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
782 const aarch64_insn code,
783 const aarch64_inst *inst ATTRIBUTE_UNUSED,
784 aarch64_operand_error *errors)
785 {
786 aarch64_ext_imm (self, info, code, inst, errors);
787 info->shifter.kind = AARCH64_MOD_LSL;
788 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
789 return true;
790 }
791
792 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
793 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
794 bool
795 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
796 aarch64_opnd_info *info,
797 const aarch64_insn code,
798 const aarch64_inst *inst ATTRIBUTE_UNUSED,
799 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
800 {
801 uint64_t imm;
802 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
803 aarch64_field field = {0, 0};
804
805 assert (info->idx == 1);
806
807 if (info->type == AARCH64_OPND_SIMD_FPIMM)
808 info->imm.is_fp = 1;
809
810 /* a:b:c:d:e:f:g:h */
811 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
812 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
813 {
814 /* Either MOVI <Dd>, #<imm>
815 or MOVI <Vd>.2D, #<imm>.
816 <imm> is a 64-bit immediate
817 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
818 encoded in "a:b:c:d:e:f:g:h". */
819 int i;
820 unsigned abcdefgh = imm;
821 for (imm = 0ull, i = 0; i < 8; i++)
822 if (((abcdefgh >> i) & 0x1) != 0)
823 imm |= 0xffull << (8 * i);
824 }
825 info->imm.value = imm;
826
827 /* cmode */
828 info->qualifier = get_expected_qualifier (inst, info->idx);
829 if (info->qualifier == AARCH64_OPND_QLF_ERR)
830 return 0;
831 switch (info->qualifier)
832 {
833 case AARCH64_OPND_QLF_NIL:
834 /* no shift */
835 info->shifter.kind = AARCH64_MOD_NONE;
836 return 1;
837 case AARCH64_OPND_QLF_LSL:
838 /* shift zeros */
839 info->shifter.kind = AARCH64_MOD_LSL;
840 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
841 {
842 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
843 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
844 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
845 default: return false;
846 }
847 /* 00: 0; 01: 8; 10:16; 11:24. */
848 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
849 break;
850 case AARCH64_OPND_QLF_MSL:
851 /* shift ones */
852 info->shifter.kind = AARCH64_MOD_MSL;
853 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
854 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
855 break;
856 default:
857 return false;
858 }
859
860 return true;
861 }
862
863 /* Decode an 8-bit floating-point immediate. */
864 bool
865 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
866 const aarch64_insn code,
867 const aarch64_inst *inst ATTRIBUTE_UNUSED,
868 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
869 {
870 info->imm.value = extract_all_fields (self, code);
871 info->imm.is_fp = 1;
872 return true;
873 }
874
875 /* Decode a 1-bit rotate immediate (#90 or #270). */
876 bool
877 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
878 const aarch64_insn code,
879 const aarch64_inst *inst ATTRIBUTE_UNUSED,
880 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
881 {
882 uint64_t rot = extract_field (self->fields[0], code, 0);
883 assert (rot < 2U);
884 info->imm.value = rot * 180 + 90;
885 return true;
886 }
887
888 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
889 bool
890 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
891 const aarch64_insn code,
892 const aarch64_inst *inst ATTRIBUTE_UNUSED,
893 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
894 {
895 uint64_t rot = extract_field (self->fields[0], code, 0);
896 assert (rot < 4U);
897 info->imm.value = rot * 90;
898 return true;
899 }
900
901 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
902 bool
903 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
904 aarch64_opnd_info *info, const aarch64_insn code,
905 const aarch64_inst *inst ATTRIBUTE_UNUSED,
906 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
907 {
908 info->imm.value = 64- extract_field (FLD_scale, code, 0);
909 return true;
910 }
911
912 /* Decode arithmetic immediate for e.g.
913 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
914 bool
915 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
916 aarch64_opnd_info *info, const aarch64_insn code,
917 const aarch64_inst *inst ATTRIBUTE_UNUSED,
918 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
919 {
920 aarch64_insn value;
921
922 info->shifter.kind = AARCH64_MOD_LSL;
923 /* shift */
924 value = extract_field (FLD_shift, code, 0);
925 if (value >= 2)
926 return false;
927 info->shifter.amount = value ? 12 : 0;
928 /* imm12 (unsigned) */
929 info->imm.value = extract_field (FLD_imm12, code, 0);
930
931 return true;
932 }
933
934 /* Return true if VALUE is a valid logical immediate encoding, storing the
935 decoded value in *RESULT if so. ESIZE is the number of bytes in the
936 decoded immediate. */
937 static bool
938 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
939 {
940 uint64_t imm, mask;
941 uint32_t N, R, S;
942 unsigned simd_size;
943
944 /* value is N:immr:imms. */
945 S = value & 0x3f;
946 R = (value >> 6) & 0x3f;
947 N = (value >> 12) & 0x1;
948
949 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
950 (in other words, right rotated by R), then replicated. */
951 if (N != 0)
952 {
953 simd_size = 64;
954 mask = 0xffffffffffffffffull;
955 }
956 else
957 {
958 switch (S)
959 {
960 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
961 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
962 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
963 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
964 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
965 default: return false;
966 }
967 mask = (1ull << simd_size) - 1;
968 /* Top bits are IGNORED. */
969 R &= simd_size - 1;
970 }
971
972 if (simd_size > esize * 8)
973 return false;
974
975 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
976 if (S == simd_size - 1)
977 return false;
978 /* S+1 consecutive bits to 1. */
979 /* NOTE: S can't be 63 due to detection above. */
980 imm = (1ull << (S + 1)) - 1;
981 /* Rotate to the left by simd_size - R. */
982 if (R != 0)
983 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
984 /* Replicate the value according to SIMD size. */
985 switch (simd_size)
986 {
987 case 2: imm = (imm << 2) | imm;
988 /* Fall through. */
989 case 4: imm = (imm << 4) | imm;
990 /* Fall through. */
991 case 8: imm = (imm << 8) | imm;
992 /* Fall through. */
993 case 16: imm = (imm << 16) | imm;
994 /* Fall through. */
995 case 32: imm = (imm << 32) | imm;
996 /* Fall through. */
997 case 64: break;
998 default: return 0;
999 }
1000
1001 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
1002
1003 return true;
1004 }
1005
1006 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
1007 bool
1008 aarch64_ext_limm (const aarch64_operand *self,
1009 aarch64_opnd_info *info, const aarch64_insn code,
1010 const aarch64_inst *inst,
1011 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1012 {
1013 uint32_t esize;
1014 aarch64_insn value;
1015
1016 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
1017 self->fields[2]);
1018 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1019 return decode_limm (esize, value, &info->imm.value);
1020 }
1021
1022 /* Decode a logical immediate for the BIC alias of AND (etc.). */
1023 bool
1024 aarch64_ext_inv_limm (const aarch64_operand *self,
1025 aarch64_opnd_info *info, const aarch64_insn code,
1026 const aarch64_inst *inst,
1027 aarch64_operand_error *errors)
1028 {
1029 if (!aarch64_ext_limm (self, info, code, inst, errors))
1030 return false;
1031 info->imm.value = ~info->imm.value;
1032 return true;
1033 }
1034
1035 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
1036 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
1037 bool
1038 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
1039 aarch64_opnd_info *info,
1040 const aarch64_insn code, const aarch64_inst *inst,
1041 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1042 {
1043 aarch64_insn value;
1044
1045 /* Rt */
1046 info->reg.regno = extract_field (FLD_Rt, code, 0);
1047
1048 /* size */
1049 value = extract_field (FLD_ldst_size, code, 0);
1050 if (inst->opcode->iclass == ldstpair_indexed
1051 || inst->opcode->iclass == ldstnapair_offs
1052 || inst->opcode->iclass == ldstpair_off
1053 || inst->opcode->iclass == loadlit)
1054 {
1055 enum aarch64_opnd_qualifier qualifier;
1056 switch (value)
1057 {
1058 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1059 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1060 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
1061 default: return false;
1062 }
1063 info->qualifier = qualifier;
1064 }
1065 else
1066 {
1067 /* opc1:size */
1068 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
1069 if (value > 0x4)
1070 return false;
1071 info->qualifier = get_sreg_qualifier_from_value (value);
1072 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1073 return false;
1074 }
1075
1076 return true;
1077 }
1078
1079 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
1080 bool
1081 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
1082 aarch64_opnd_info *info,
1083 aarch64_insn code,
1084 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1085 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1086 {
1087 /* Rn */
1088 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1089 return true;
1090 }
1091
1092 /* Decode the address operand for rcpc3 instructions with optional load/store
1093 datasize offset, e.g. STILPP <Xs>, <Xt>, [<Xn|SP>{,#-16}]! and
1094 LIDAP <Xs>, <Xt>, [<Xn|SP>]{,#-16}. */
1095 bool
1096 aarch64_ext_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1097 aarch64_opnd_info *info,
1098 aarch64_insn code,
1099 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1100 aarch64_operand_error *err ATTRIBUTE_UNUSED)
1101 {
1102 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1103 if (!extract_field (FLD_opc2, code, 0))
1104 {
1105 info->addr.writeback = 1;
1106
1107 enum aarch64_opnd type;
1108 for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
1109 {
1110 aarch64_opnd_info opnd = info[i];
1111 type = opnd.type;
1112 if (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS)
1113 break;
1114 }
1115
1116 assert (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS);
1117 int offset = calc_ldst_datasize (inst->operands);
1118
1119 switch (type)
1120 {
1121 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
1122 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
1123 info->addr.offset.imm = -offset;
1124 info->addr.preind = 1;
1125 break;
1126 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
1127 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
1128 info->addr.offset.imm = offset;
1129 info->addr.postind = 1;
1130 break;
1131 default:
1132 return false;
1133 }
1134 }
1135 return true;
1136 }
1137
1138 bool
1139 aarch64_ext_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1140 aarch64_opnd_info *info,
1141 aarch64_insn code,
1142 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1143 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1144 {
1145 info->qualifier = get_expected_qualifier (inst, info->idx);
1146 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1147 return 0;
1148
1149 /* Rn */
1150 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1151
1152 /* simm9 */
1153 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1154 info->addr.offset.imm = sign_extend (imm, 8);
1155 return true;
1156 }
1157
1158 /* Decode the address operand for e.g.
1159 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
1160 bool
1161 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1162 aarch64_opnd_info *info,
1163 aarch64_insn code, const aarch64_inst *inst,
1164 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1165 {
1166 info->qualifier = get_expected_qualifier (inst, info->idx);
1167 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1168 return 0;
1169
1170 /* Rn */
1171 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1172
1173 /* simm9 */
1174 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1175 info->addr.offset.imm = sign_extend (imm, 8);
1176 if (extract_field (self->fields[2], code, 0) == 1) {
1177 info->addr.writeback = 1;
1178 info->addr.preind = 1;
1179 }
1180 return true;
1181 }
1182
1183 /* Decode the address operand for e.g.
1184 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1185 bool
1186 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1187 aarch64_opnd_info *info,
1188 aarch64_insn code, const aarch64_inst *inst,
1189 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1190 {
1191 aarch64_insn S, value;
1192
1193 /* Rn */
1194 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1195 /* Rm */
1196 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1197 /* option */
1198 value = extract_field (FLD_option, code, 0);
1199 info->shifter.kind =
1200 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1201 /* Fix-up the shifter kind; although the table-driven approach is
1202 efficient, it is slightly inflexible, thus needing this fix-up. */
1203 if (info->shifter.kind == AARCH64_MOD_UXTX)
1204 info->shifter.kind = AARCH64_MOD_LSL;
1205 /* S */
1206 S = extract_field (FLD_S, code, 0);
1207 if (S == 0)
1208 {
1209 info->shifter.amount = 0;
1210 info->shifter.amount_present = 0;
1211 }
1212 else
1213 {
1214 int size;
1215 /* Need information in other operand(s) to help achieve the decoding
1216 from 'S' field. */
1217 info->qualifier = get_expected_qualifier (inst, info->idx);
1218 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1219 return 0;
1220 /* Get the size of the data element that is accessed, which may be
1221 different from that of the source register size, e.g. in strb/ldrb. */
1222 size = aarch64_get_qualifier_esize (info->qualifier);
1223 info->shifter.amount = get_logsz (size);
1224 info->shifter.amount_present = 1;
1225 }
1226
1227 return true;
1228 }
1229
1230 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1231 bool
1232 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1233 aarch64_insn code, const aarch64_inst *inst,
1234 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1235 {
1236 aarch64_insn imm;
1237 info->qualifier = get_expected_qualifier (inst, info->idx);
1238 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1239 return 0;
1240
1241 /* Rn */
1242 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1243 /* simm (imm9 or imm7) */
1244 imm = extract_field (self->fields[0], code, 0);
1245 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1246 if (self->fields[0] == FLD_imm7
1247 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1248 /* scaled immediate in ld/st pair instructions. */
1249 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1250 /* qualifier */
1251 if (inst->opcode->iclass == ldst_unscaled
1252 || inst->opcode->iclass == ldstnapair_offs
1253 || inst->opcode->iclass == ldstpair_off
1254 || inst->opcode->iclass == ldst_unpriv)
1255 info->addr.writeback = 0;
1256 else
1257 {
1258 /* pre/post- index */
1259 info->addr.writeback = 1;
1260 if (extract_field (self->fields[1], code, 0) == 1)
1261 info->addr.preind = 1;
1262 else
1263 info->addr.postind = 1;
1264 }
1265
1266 return true;
1267 }
1268
1269 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1270 bool
1271 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1272 aarch64_insn code,
1273 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1274 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1275 {
1276 int shift;
1277 info->qualifier = get_expected_qualifier (inst, info->idx);
1278 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1279 return 0;
1280 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1281 /* Rn */
1282 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1283 /* uimm12 */
1284 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1285 return true;
1286 }
1287
1288 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1289 bool
1290 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1291 aarch64_insn code,
1292 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1293 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1294 {
1295 aarch64_insn imm;
1296
1297 info->qualifier = get_expected_qualifier (inst, info->idx);
1298 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1299 return 0;
1300 /* Rn */
1301 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1302 /* simm10 */
1303 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1304 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1305 if (extract_field (self->fields[3], code, 0) == 1) {
1306 info->addr.writeback = 1;
1307 info->addr.preind = 1;
1308 }
1309 return true;
1310 }
1311
1312 /* Decode the address operand for e.g.
1313 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1314 bool
1315 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1316 aarch64_opnd_info *info,
1317 aarch64_insn code, const aarch64_inst *inst,
1318 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1319 {
1320 /* The opcode dependent area stores the number of elements in
1321 each structure to be loaded/stored. */
1322 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1323
1324 /* Rn */
1325 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1326 /* Rm | #<amount> */
1327 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1328 if (info->addr.offset.regno == 31)
1329 {
1330 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1331 /* Special handling of loading single structure to all lane. */
1332 info->addr.offset.imm = (is_ld1r ? 1
1333 : inst->operands[0].reglist.num_regs)
1334 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1335 else
1336 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1337 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1338 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1339 }
1340 else
1341 info->addr.offset.is_reg = 1;
1342 info->addr.writeback = 1;
1343
1344 return true;
1345 }
1346
1347 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1348 bool
1349 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1350 aarch64_opnd_info *info,
1351 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1352 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1353 {
1354 aarch64_insn value;
1355 /* cond */
1356 value = extract_field (FLD_cond, code, 0);
1357 info->cond = get_cond_from_value (value);
1358 return true;
1359 }
1360
1361 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1362 bool
1363 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1364 aarch64_opnd_info *info,
1365 aarch64_insn code,
1366 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1367 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1368 {
1369 /* op0:op1:CRn:CRm:op2 */
1370 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1371 FLD_CRm, FLD_op2);
1372 info->sysreg.flags = 0;
1373
1374 /* If a system instruction, check which restrictions should be on the register
1375 value during decoding, these will be enforced then. */
1376 if (inst->opcode->iclass == ic_system)
1377 {
1378 /* Check to see if it's read-only, else check if it's write only.
1379 if it's both or unspecified don't care. */
1380 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1381 info->sysreg.flags = F_REG_READ;
1382 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1383 == F_SYS_WRITE)
1384 info->sysreg.flags = F_REG_WRITE;
1385 }
1386
1387 return true;
1388 }
1389
1390 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1391 bool
1392 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1393 aarch64_opnd_info *info, aarch64_insn code,
1394 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1395 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1396 {
1397 int i;
1398 aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
1399 /* op1:op2 */
1400 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1401 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1402 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1403 {
1404 /* PSTATEFIELD name can be encoded partially in CRm[3:1]. */
1405 uint32_t flags = aarch64_pstatefields[i].flags;
1406 if ((flags & F_REG_IN_CRM)
1407 && ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
1408 continue;
1409 info->sysreg.flags = flags;
1410 return true;
1411 }
1412 /* Reserved value in <pstatefield>. */
1413 return false;
1414 }
1415
1416 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1417 bool
1418 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1419 aarch64_opnd_info *info,
1420 aarch64_insn code,
1421 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1422 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1423 {
1424 int i;
1425 aarch64_insn value;
1426 const aarch64_sys_ins_reg *sysins_ops;
1427 /* op0:op1:CRn:CRm:op2 */
1428 value = extract_fields (code, 0, 5,
1429 FLD_op0, FLD_op1, FLD_CRn,
1430 FLD_CRm, FLD_op2);
1431
1432 switch (info->type)
1433 {
1434 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1435 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1436 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1437 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1438 case AARCH64_OPND_SYSREG_TLBIP: sysins_ops = aarch64_sys_regs_tlbi; break;
1439 case AARCH64_OPND_SYSREG_SR:
1440 sysins_ops = aarch64_sys_regs_sr;
1441 /* Let's remove op2 for rctx. Refer to comments in the definition of
1442 aarch64_sys_regs_sr[]. */
1443 value = value & ~(0x7);
1444 break;
1445 default: return false;
1446 }
1447
1448 for (i = 0; sysins_ops[i].name != NULL; ++i)
1449 if (sysins_ops[i].value == value)
1450 {
1451 info->sysins_op = sysins_ops + i;
1452 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1453 info->sysins_op->name,
1454 (unsigned)info->sysins_op->value,
1455 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1456 return true;
1457 }
1458
1459 return false;
1460 }
1461
1462 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1463
1464 bool
1465 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1466 aarch64_opnd_info *info,
1467 aarch64_insn code,
1468 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1469 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1470 {
1471 /* CRm */
1472 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1473 return true;
1474 }
1475
1476 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>. */
1477
1478 bool
1479 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1480 aarch64_opnd_info *info,
1481 aarch64_insn code,
1482 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1483 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1484 {
1485 /* For the DSB nXS barrier variant immediate is encoded in 2-bit field. */
1486 aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1487 info->barrier = aarch64_barrier_dsb_nxs_options + field;
1488 return true;
1489 }
1490
1491 /* Decode the prefetch operation option operand for e.g.
1492 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1493
1494 bool
1495 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1496 aarch64_opnd_info *info,
1497 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1498 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1499 {
1500 /* prfop in Rt */
1501 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1502 return true;
1503 }
1504
1505 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1506 to the matching name/value pair in aarch64_hint_options. */
1507
1508 bool
1509 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1510 aarch64_opnd_info *info,
1511 aarch64_insn code,
1512 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1513 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1514 {
1515 /* CRm:op2. */
1516 unsigned hint_number;
1517 int i;
1518
1519 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1520
1521 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1522 {
1523 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1524 {
1525 info->hint_option = &(aarch64_hint_options[i]);
1526 return true;
1527 }
1528 }
1529
1530 return false;
1531 }
1532
1533 /* Decode the extended register operand for e.g.
1534 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1535 bool
1536 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1537 aarch64_opnd_info *info,
1538 aarch64_insn code,
1539 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1540 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1541 {
1542 aarch64_insn value;
1543
1544 /* Rm */
1545 info->reg.regno = extract_field (FLD_Rm, code, 0);
1546 /* option */
1547 value = extract_field (FLD_option, code, 0);
1548 info->shifter.kind =
1549 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1550 /* imm3 */
1551 info->shifter.amount = extract_field (FLD_imm3_10, code, 0);
1552
1553 /* This makes the constraint checking happy. */
1554 info->shifter.operator_present = 1;
1555
1556 /* Assume inst->operands[0].qualifier has been resolved. */
1557 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1558 info->qualifier = AARCH64_OPND_QLF_W;
1559 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1560 && (info->shifter.kind == AARCH64_MOD_UXTX
1561 || info->shifter.kind == AARCH64_MOD_SXTX))
1562 info->qualifier = AARCH64_OPND_QLF_X;
1563
1564 return true;
1565 }
1566
1567 /* Decode the shifted register operand for e.g.
1568 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1569 bool
1570 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1571 aarch64_opnd_info *info,
1572 aarch64_insn code,
1573 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1574 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1575 {
1576 aarch64_insn value;
1577
1578 /* Rm */
1579 info->reg.regno = extract_field (FLD_Rm, code, 0);
1580 /* shift */
1581 value = extract_field (FLD_shift, code, 0);
1582 info->shifter.kind =
1583 aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1584 if (info->shifter.kind == AARCH64_MOD_ROR
1585 && inst->opcode->iclass != log_shift)
1586 /* ROR is not available for the shifted register operand in arithmetic
1587 instructions. */
1588 return false;
1589 /* imm6 */
1590 info->shifter.amount = extract_field (FLD_imm6_10, code, 0);
1591
1592 /* This makes the constraint checking happy. */
1593 info->shifter.operator_present = 1;
1594
1595 return true;
1596 }
1597
1598 /* Decode the LSL-shifted register operand for e.g.
1599 ADDPT <Xd|SP>, <Xn|SP>, <Xm>{, LSL #<amount>}. */
1600 bool
1601 aarch64_ext_reg_lsl_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1602 aarch64_opnd_info *info,
1603 aarch64_insn code,
1604 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1605 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1606 {
1607 /* Rm */
1608 info->reg.regno = extract_field (FLD_Rm, code, 0);
1609 /* imm3 */
1610 info->shifter.kind = AARCH64_MOD_LSL;
1611 info->shifter.amount = extract_field (FLD_imm3_10, code, 0);
1612 return true;
1613 }
1614
1615 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1616 where <offset> is given by the OFFSET parameter and where <factor> is
1617 1 plus SELF's operand-dependent value. fields[0] specifies the field
1618 that holds <base>. */
1619 static bool
1620 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1621 aarch64_opnd_info *info, aarch64_insn code,
1622 int64_t offset)
1623 {
1624 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1625 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1626 info->addr.offset.is_reg = false;
1627 info->addr.writeback = false;
1628 info->addr.preind = true;
1629 if (offset != 0)
1630 info->shifter.kind = AARCH64_MOD_MUL_VL;
1631 info->shifter.amount = 1;
1632 info->shifter.operator_present = (info->addr.offset.imm != 0);
1633 info->shifter.amount_present = false;
1634 return true;
1635 }
1636
1637 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1638 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1639 SELF's operand-dependent value. fields[0] specifies the field that
1640 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1641 bool
1642 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1643 aarch64_opnd_info *info, aarch64_insn code,
1644 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1645 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1646 {
1647 int offset;
1648
1649 offset = extract_field (FLD_SVE_imm4, code, 0);
1650 offset = ((offset + 8) & 15) - 8;
1651 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1652 }
1653
1654 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1655 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1656 SELF's operand-dependent value. fields[0] specifies the field that
1657 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1658 bool
1659 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1660 aarch64_opnd_info *info, aarch64_insn code,
1661 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1662 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1663 {
1664 int offset;
1665
1666 offset = extract_field (FLD_SVE_imm6, code, 0);
1667 offset = (((offset + 32) & 63) - 32);
1668 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1669 }
1670
1671 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1672 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1673 SELF's operand-dependent value. fields[0] specifies the field that
1674 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1675 and imm3 fields, with imm3 being the less-significant part. */
1676 bool
1677 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1678 aarch64_opnd_info *info,
1679 aarch64_insn code,
1680 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1681 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1682 {
1683 int offset;
1684
1685 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3_10);
1686 offset = (((offset + 256) & 511) - 256);
1687 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1688 }
1689
1690 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1691 is given by the OFFSET parameter and where <shift> is SELF's operand-
1692 dependent value. fields[0] specifies the base register field <base>. */
1693 static bool
1694 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1695 aarch64_opnd_info *info, aarch64_insn code,
1696 int64_t offset)
1697 {
1698 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1699 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1700 info->addr.offset.is_reg = false;
1701 info->addr.writeback = false;
1702 info->addr.preind = true;
1703 info->shifter.operator_present = false;
1704 info->shifter.amount_present = false;
1705 return true;
1706 }
1707
1708 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1709 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1710 value. fields[0] specifies the base register field. */
1711 bool
1712 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1713 aarch64_opnd_info *info, aarch64_insn code,
1714 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1715 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1716 {
1717 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1718 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1719 }
1720
1721 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1722 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1723 value. fields[0] specifies the base register field. */
1724 bool
1725 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1726 aarch64_opnd_info *info, aarch64_insn code,
1727 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1728 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1729 {
1730 int offset = extract_field (FLD_SVE_imm6, code, 0);
1731 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1732 }
1733
1734 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1735 is SELF's operand-dependent value. fields[0] specifies the base
1736 register field and fields[1] specifies the offset register field. */
1737 bool
1738 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1739 aarch64_opnd_info *info, aarch64_insn code,
1740 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1741 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1742 {
1743 int index_regno;
1744
1745 index_regno = extract_field (self->fields[1], code, 0);
1746 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1747 return false;
1748
1749 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1750 info->addr.offset.regno = index_regno;
1751 info->addr.offset.is_reg = true;
1752 info->addr.writeback = false;
1753 info->addr.preind = true;
1754 info->shifter.kind = AARCH64_MOD_LSL;
1755 info->shifter.amount = get_operand_specific_data (self);
1756 info->shifter.operator_present = (info->shifter.amount != 0);
1757 info->shifter.amount_present = (info->shifter.amount != 0);
1758 return true;
1759 }
1760
1761 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1762 <shift> is SELF's operand-dependent value. fields[0] specifies the
1763 base register field, fields[1] specifies the offset register field and
1764 fields[2] is a single-bit field that selects SXTW over UXTW. */
1765 bool
1766 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1767 aarch64_opnd_info *info, aarch64_insn code,
1768 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1769 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1770 {
1771 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1772 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1773 info->addr.offset.is_reg = true;
1774 info->addr.writeback = false;
1775 info->addr.preind = true;
1776 if (extract_field (self->fields[2], code, 0))
1777 info->shifter.kind = AARCH64_MOD_SXTW;
1778 else
1779 info->shifter.kind = AARCH64_MOD_UXTW;
1780 info->shifter.amount = get_operand_specific_data (self);
1781 info->shifter.operator_present = true;
1782 info->shifter.amount_present = (info->shifter.amount != 0);
1783 return true;
1784 }
1785
1786 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1787 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1788 fields[0] specifies the base register field. */
1789 bool
1790 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1791 aarch64_opnd_info *info, aarch64_insn code,
1792 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1793 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1794 {
1795 int offset = extract_field (FLD_imm5, code, 0);
1796 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1797 }
1798
1799 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1800 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1801 number. fields[0] specifies the base register field and fields[1]
1802 specifies the offset register field. */
1803 static bool
1804 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1805 aarch64_insn code, enum aarch64_modifier_kind kind)
1806 {
1807 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1808 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1809 info->addr.offset.is_reg = true;
1810 info->addr.writeback = false;
1811 info->addr.preind = true;
1812 info->shifter.kind = kind;
1813 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1814 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1815 || info->shifter.amount != 0);
1816 info->shifter.amount_present = (info->shifter.amount != 0);
1817 return true;
1818 }
1819
1820 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1821 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1822 field and fields[1] specifies the offset register field. */
1823 bool
1824 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1825 aarch64_opnd_info *info, aarch64_insn code,
1826 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1827 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1828 {
1829 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1830 }
1831
1832 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1833 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1834 field and fields[1] specifies the offset register field. */
1835 bool
1836 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1837 aarch64_opnd_info *info, aarch64_insn code,
1838 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1839 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1840 {
1841 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1842 }
1843
1844 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1845 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1846 field and fields[1] specifies the offset register field. */
1847 bool
1848 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1849 aarch64_opnd_info *info, aarch64_insn code,
1850 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1851 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1852 {
1853 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1854 }
1855
1856 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1857 has the raw field value and that the low 8 bits decode to VALUE. */
1858 static bool
1859 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1860 {
1861 info->shifter.kind = AARCH64_MOD_LSL;
1862 info->shifter.amount = 0;
1863 if (info->imm.value & 0x100)
1864 {
1865 if (value == 0)
1866 /* Decode 0x100 as #0, LSL #8. */
1867 info->shifter.amount = 8;
1868 else
1869 value *= 256;
1870 }
1871 info->shifter.operator_present = (info->shifter.amount != 0);
1872 info->shifter.amount_present = (info->shifter.amount != 0);
1873 info->imm.value = value;
1874 return true;
1875 }
1876
1877 /* Decode an SVE ADD/SUB immediate. */
1878 bool
1879 aarch64_ext_sve_aimm (const aarch64_operand *self,
1880 aarch64_opnd_info *info, const aarch64_insn code,
1881 const aarch64_inst *inst,
1882 aarch64_operand_error *errors)
1883 {
1884 return (aarch64_ext_imm (self, info, code, inst, errors)
1885 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1886 }
1887
1888 bool
1889 aarch64_ext_sve_aligned_reglist (const aarch64_operand *self,
1890 aarch64_opnd_info *info, aarch64_insn code,
1891 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1892 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1893 {
1894 unsigned int num_regs = get_operand_specific_data (self);
1895 unsigned int val = extract_field (self->fields[0], code, 0);
1896 info->reglist.first_regno = val * num_regs;
1897 info->reglist.num_regs = num_regs;
1898 info->reglist.stride = 1;
1899 return true;
1900 }
1901
1902 /* Decode an SVE CPY/DUP immediate. */
1903 bool
1904 aarch64_ext_sve_asimm (const aarch64_operand *self,
1905 aarch64_opnd_info *info, const aarch64_insn code,
1906 const aarch64_inst *inst,
1907 aarch64_operand_error *errors)
1908 {
1909 return (aarch64_ext_imm (self, info, code, inst, errors)
1910 && decode_sve_aimm (info, (int8_t) info->imm.value));
1911 }
1912
1913 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1914 The fields array specifies which field to use. */
1915 bool
1916 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1917 aarch64_opnd_info *info, aarch64_insn code,
1918 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1919 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1920 {
1921 if (extract_field (self->fields[0], code, 0))
1922 info->imm.value = 0x3f800000;
1923 else
1924 info->imm.value = 0x3f000000;
1925 info->imm.is_fp = true;
1926 return true;
1927 }
1928
1929 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1930 The fields array specifies which field to use. */
1931 bool
1932 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1933 aarch64_opnd_info *info, aarch64_insn code,
1934 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1935 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1936 {
1937 if (extract_field (self->fields[0], code, 0))
1938 info->imm.value = 0x40000000;
1939 else
1940 info->imm.value = 0x3f000000;
1941 info->imm.is_fp = true;
1942 return true;
1943 }
1944
1945 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1946 The fields array specifies which field to use. */
1947 bool
1948 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1949 aarch64_opnd_info *info, aarch64_insn code,
1950 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1951 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1952 {
1953 if (extract_field (self->fields[0], code, 0))
1954 info->imm.value = 0x3f800000;
1955 else
1956 info->imm.value = 0x0;
1957 info->imm.is_fp = true;
1958 return true;
1959 }
1960
1961 /* Decode SME instruction such as MOVZA ZA tile slice to vector. */
1962 bool
1963 aarch64_ext_sme_za_tile_to_vec (const aarch64_operand *self,
1964 aarch64_opnd_info *info, aarch64_insn code,
1965 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1966 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1967 {
1968 aarch64_insn Qsize; /* fields Q:S:size. */
1969 int fld_v = extract_field (self->fields[0], code, 0);
1970 int fld_rv = extract_field (self->fields[1], code, 0);
1971 int fld_zan_imm = extract_field (FLD_imm4_5, code, 0);
1972
1973 Qsize = extract_fields (inst->value, 0, 2, FLD_SME_size_22, FLD_SME_Q);
1974 switch (Qsize)
1975 {
1976 case 0x0:
1977 info->qualifier = AARCH64_OPND_QLF_S_B;
1978 info->indexed_za.regno = 0;
1979 info->indexed_za.index.imm = fld_zan_imm;
1980 break;
1981 case 0x2:
1982 info->qualifier = AARCH64_OPND_QLF_S_H;
1983 info->indexed_za.regno = fld_zan_imm >> 3;
1984 info->indexed_za.index.imm = fld_zan_imm & 0x07;
1985 break;
1986 case 0x4:
1987 info->qualifier = AARCH64_OPND_QLF_S_S;
1988 info->indexed_za.regno = fld_zan_imm >> 2;
1989 info->indexed_za.index.imm = fld_zan_imm & 0x03;
1990 break;
1991 case 0x6:
1992 info->qualifier = AARCH64_OPND_QLF_S_D;
1993 info->indexed_za.regno = fld_zan_imm >> 1;
1994 info->indexed_za.index.imm = fld_zan_imm & 0x01;
1995 break;
1996 case 0x7:
1997 info->qualifier = AARCH64_OPND_QLF_S_Q;
1998 info->indexed_za.regno = fld_zan_imm;
1999 break;
2000 default:
2001 return false;
2002 }
2003
2004 info->indexed_za.index.regno = fld_rv + 12;
2005 info->indexed_za.v = fld_v;
2006
2007 return true;
2008 }
2009
2010 /* Decode ZA tile vector, vector indicator, vector selector, qualifier and
2011 immediate on numerous SME instruction fields such as MOVA. */
2012 bool
2013 aarch64_ext_sme_za_hv_tiles (const aarch64_operand *self,
2014 aarch64_opnd_info *info, aarch64_insn code,
2015 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2016 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2017 {
2018 int fld_size = extract_field (self->fields[0], code, 0);
2019 int fld_q = extract_field (self->fields[1], code, 0);
2020 int fld_v = extract_field (self->fields[2], code, 0);
2021 int fld_rv = extract_field (self->fields[3], code, 0);
2022 int fld_zan_imm = extract_field (self->fields[4], code, 0);
2023
2024 /* Deduce qualifier encoded in size and Q fields. */
2025 if (fld_size == 0)
2026 {
2027 info->indexed_za.regno = 0;
2028 info->indexed_za.index.imm = fld_zan_imm;
2029 }
2030 else if (fld_size == 1)
2031 {
2032 info->indexed_za.regno = fld_zan_imm >> 3;
2033 info->indexed_za.index.imm = fld_zan_imm & 0x07;
2034 }
2035 else if (fld_size == 2)
2036 {
2037 info->indexed_za.regno = fld_zan_imm >> 2;
2038 info->indexed_za.index.imm = fld_zan_imm & 0x03;
2039 }
2040 else if (fld_size == 3 && fld_q == 0)
2041 {
2042 info->indexed_za.regno = fld_zan_imm >> 1;
2043 info->indexed_za.index.imm = fld_zan_imm & 0x01;
2044 }
2045 else if (fld_size == 3 && fld_q == 1)
2046 {
2047 info->indexed_za.regno = fld_zan_imm;
2048 info->indexed_za.index.imm = 0;
2049 }
2050 else
2051 return false;
2052
2053 info->indexed_za.index.regno = fld_rv + 12;
2054 info->indexed_za.v = fld_v;
2055
2056 return true;
2057 }
2058
2059 bool
2060 aarch64_ext_sme_za_hv_tiles_range (const aarch64_operand *self,
2061 aarch64_opnd_info *info, aarch64_insn code,
2062 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2063 aarch64_operand_error *errors
2064 ATTRIBUTE_UNUSED)
2065 {
2066 int ebytes = aarch64_get_qualifier_esize (info->qualifier);
2067 int range_size = get_opcode_dependent_value (inst->opcode);
2068 int fld_v = extract_field (self->fields[0], code, 0);
2069 int fld_rv = extract_field (self->fields[1], code, 0);
2070 int fld_zan_imm = extract_field (self->fields[2], code, 0);
2071 int max_value = 16 / range_size / ebytes;
2072
2073 if (max_value == 0)
2074 max_value = 1;
2075
2076 int regno = fld_zan_imm / max_value;
2077 if (regno >= ebytes)
2078 return false;
2079
2080 info->indexed_za.regno = regno;
2081 info->indexed_za.index.imm = (fld_zan_imm % max_value) * range_size;
2082 info->indexed_za.index.countm1 = range_size - 1;
2083 info->indexed_za.index.regno = fld_rv + 12;
2084 info->indexed_za.v = fld_v;
2085
2086 return true;
2087 }
2088
2089 /* Decode in SME instruction ZERO list of up to eight 64-bit element tile names
2090 separated by commas, encoded in the "imm8" field.
2091
2092 For programmer convenience an assembler must also accept the names of
2093 32-bit, 16-bit and 8-bit element tiles which are converted into the
2094 corresponding set of 64-bit element tiles.
2095 */
2096 bool
2097 aarch64_ext_sme_za_list (const aarch64_operand *self,
2098 aarch64_opnd_info *info, aarch64_insn code,
2099 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2100 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2101 {
2102 int mask = extract_field (self->fields[0], code, 0);
2103 info->imm.value = mask;
2104 return true;
2105 }
2106
2107 /* Decode ZA array vector select register (Rv field), optional vector and
2108 memory offset (imm4_11 field).
2109 */
2110 bool
2111 aarch64_ext_sme_za_array (const aarch64_operand *self,
2112 aarch64_opnd_info *info, aarch64_insn code,
2113 const aarch64_inst *inst,
2114 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2115 {
2116 int regno = extract_field (self->fields[0], code, 0);
2117 if (info->type == AARCH64_OPND_SME_ZA_array_off4)
2118 regno += 12;
2119 else
2120 regno += 8;
2121 int imm = extract_field (self->fields[1], code, 0);
2122 int num_offsets = get_operand_specific_data (self);
2123 if (num_offsets == 0)
2124 num_offsets = 1;
2125 info->indexed_za.index.regno = regno;
2126 info->indexed_za.index.imm = imm * num_offsets;
2127 info->indexed_za.index.countm1 = num_offsets - 1;
2128 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2129 return true;
2130 }
2131
2132 /* Decode two ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds. */
2133 bool
2134 aarch64_ext_sme_za_vrs1 (const aarch64_operand *self,
2135 aarch64_opnd_info *info, aarch64_insn code,
2136 const aarch64_inst *inst,
2137 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2138 {
2139 int v = extract_field (self->fields[0], code, 0);
2140 int regno = 12 + extract_field (self->fields[1], code, 0);
2141 int imm, za_reg, num_offset = 2;
2142
2143 switch (info->qualifier)
2144 {
2145 case AARCH64_OPND_QLF_S_B:
2146 imm = extract_field (self->fields[2], code, 0);
2147 info->indexed_za.index.imm = imm * num_offset;
2148 break;
2149 case AARCH64_OPND_QLF_S_H:
2150 case AARCH64_OPND_QLF_S_S:
2151 za_reg = extract_field (self->fields[2], code, 0);
2152 imm = extract_field (self->fields[3], code, 0);
2153 info->indexed_za.index.imm = imm * num_offset;
2154 info->indexed_za.regno = za_reg;
2155 break;
2156 case AARCH64_OPND_QLF_S_D:
2157 za_reg = extract_field (self->fields[2], code, 0);
2158 info->indexed_za.regno = za_reg;
2159 break;
2160 default:
2161 return false;
2162 }
2163
2164 info->indexed_za.index.regno = regno;
2165 info->indexed_za.index.countm1 = num_offset - 1;
2166 info->indexed_za.v = v;
2167 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2168 return true;
2169 }
2170
2171 /* Decode four ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds. */
2172 bool
2173 aarch64_ext_sme_za_vrs2 (const aarch64_operand *self,
2174 aarch64_opnd_info *info, aarch64_insn code,
2175 const aarch64_inst *inst,
2176 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2177 {
2178 int v = extract_field (self->fields[0], code, 0);
2179 int regno = 12 + extract_field (self->fields[1], code, 0);
2180 int imm, za_reg, num_offset =4;
2181
2182 switch (info->qualifier)
2183 {
2184 case AARCH64_OPND_QLF_S_B:
2185 imm = extract_field (self->fields[2], code, 0);
2186 info->indexed_za.index.imm = imm * num_offset;
2187 break;
2188 case AARCH64_OPND_QLF_S_H:
2189 za_reg = extract_field (self->fields[2], code, 0);
2190 imm = extract_field (self->fields[3], code, 0);
2191 info->indexed_za.index.imm = imm * num_offset;
2192 info->indexed_za.regno = za_reg;
2193 break;
2194 case AARCH64_OPND_QLF_S_S:
2195 case AARCH64_OPND_QLF_S_D:
2196 za_reg = extract_field (self->fields[2], code, 0);
2197 info->indexed_za.regno = za_reg;
2198 break;
2199 default:
2200 return false;
2201 }
2202
2203 info->indexed_za.index.regno = regno;
2204 info->indexed_za.index.countm1 = num_offset - 1;
2205 info->indexed_za.v = v;
2206 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2207 return true;
2208 }
2209
2210 bool
2211 aarch64_ext_sme_addr_ri_u4xvl (const aarch64_operand *self,
2212 aarch64_opnd_info *info, aarch64_insn code,
2213 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2215 {
2216 int regno = extract_field (self->fields[0], code, 0);
2217 int imm = extract_field (self->fields[1], code, 0);
2218 info->addr.base_regno = regno;
2219 info->addr.offset.imm = imm;
2220 /* MUL VL operator is always present for this operand. */
2221 info->shifter.kind = AARCH64_MOD_MUL_VL;
2222 info->shifter.operator_present = (imm != 0);
2223 return true;
2224 }
2225
2226 /* Decode {SM|ZA} filed for SMSTART and SMSTOP instructions. */
2227 bool
2228 aarch64_ext_sme_sm_za (const aarch64_operand *self,
2229 aarch64_opnd_info *info, aarch64_insn code,
2230 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2231 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2232 {
2233 info->pstatefield = 0x1b;
2234 aarch64_insn fld_crm = extract_field (self->fields[0], code, 0);
2235 fld_crm >>= 1; /* CRm[3:1]. */
2236
2237 if (fld_crm == 0x1)
2238 info->reg.regno = 's';
2239 else if (fld_crm == 0x2)
2240 info->reg.regno = 'z';
2241 else
2242 return false;
2243
2244 return true;
2245 }
2246
2247 bool
2248 aarch64_ext_sme_pred_reg_with_index (const aarch64_operand *self,
2249 aarch64_opnd_info *info, aarch64_insn code,
2250 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2251 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2252 {
2253 aarch64_insn fld_rm = extract_field (self->fields[0], code, 0);
2254 aarch64_insn fld_pn = extract_field (self->fields[1], code, 0);
2255 aarch64_insn fld_i1 = extract_field (self->fields[2], code, 0);
2256 aarch64_insn fld_tszh = extract_field (self->fields[3], code, 0);
2257 aarch64_insn fld_tszl = extract_field (self->fields[4], code, 0);
2258 int imm;
2259
2260 info->indexed_za.regno = fld_pn;
2261 info->indexed_za.index.regno = fld_rm + 12;
2262
2263 if (fld_tszl & 0x1)
2264 imm = (fld_i1 << 3) | (fld_tszh << 2) | (fld_tszl >> 1);
2265 else if (fld_tszl & 0x2)
2266 imm = (fld_i1 << 2) | (fld_tszh << 1) | (fld_tszl >> 2);
2267 else if (fld_tszl & 0x4)
2268 imm = (fld_i1 << 1) | fld_tszh;
2269 else if (fld_tszh)
2270 imm = fld_i1;
2271 else
2272 return false;
2273
2274 info->indexed_za.index.imm = imm;
2275 return true;
2276 }
2277
2278 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
2279 array specifies which field to use for Zn. MM is encoded in the
2280 concatenation of imm5 and SVE_tszh, with imm5 being the less
2281 significant part. */
2282 bool
2283 aarch64_ext_sve_index (const aarch64_operand *self,
2284 aarch64_opnd_info *info, aarch64_insn code,
2285 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2286 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2287 {
2288 int val;
2289
2290 info->reglane.regno = extract_field (self->fields[0], code, 0);
2291 val = extract_all_fields_after (self, 1, code);
2292 if ((val & 31) == 0)
2293 return 0;
2294 while ((val & 1) == 0)
2295 val /= 2;
2296 info->reglane.index = val / 2;
2297 return true;
2298 }
2299
2300 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
2301 bool
2302 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
2303 aarch64_opnd_info *info, const aarch64_insn code,
2304 const aarch64_inst *inst,
2305 aarch64_operand_error *errors)
2306 {
2307 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
2308 return (aarch64_ext_limm (self, info, code, inst, errors)
2309 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
2310 }
2311
2312 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
2313 and where MM occupies the most-significant part. The operand-dependent
2314 value specifies the number of bits in Zn. */
2315 bool
2316 aarch64_ext_sve_quad_index (const aarch64_operand *self,
2317 aarch64_opnd_info *info, aarch64_insn code,
2318 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2319 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2320 {
2321 unsigned int reg_bits = get_operand_specific_data (self);
2322 unsigned int val = extract_all_fields (self, code);
2323 info->reglane.regno = val & ((1 << reg_bits) - 1);
2324 info->reglane.index = val >> reg_bits;
2325 return true;
2326 }
2327
2328 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
2329 to use for Zn. The opcode-dependent value specifies the number
2330 of registers in the list. */
2331 bool
2332 aarch64_ext_sve_reglist (const aarch64_operand *self,
2333 aarch64_opnd_info *info, aarch64_insn code,
2334 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2335 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2336 {
2337 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2338 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
2339 info->reglist.stride = 1;
2340 return true;
2341 }
2342
2343 /* Decode {Zn.<T> , Zm.<T>}. The fields array specifies which field
2344 to use for Zn. The opcode-dependent value specifies the number
2345 of registers in the list. */
2346 bool
2347 aarch64_ext_sve_reglist_zt (const aarch64_operand *self,
2348 aarch64_opnd_info *info, aarch64_insn code,
2349 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2350 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2351 {
2352 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2353 info->reglist.num_regs = get_operand_specific_data (self);
2354 info->reglist.stride = 1;
2355 return true;
2356 }
2357
2358 /* Decode a strided register list. The first field holds the top bit
2359 (0 or 16) and the second field holds the lower bits. The stride is
2360 16 divided by the list length. */
2361 bool
2362 aarch64_ext_sve_strided_reglist (const aarch64_operand *self,
2363 aarch64_opnd_info *info, aarch64_insn code,
2364 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2365 aarch64_operand_error *errors
2366 ATTRIBUTE_UNUSED)
2367 {
2368 unsigned int upper = extract_field (self->fields[0], code, 0);
2369 unsigned int lower = extract_field (self->fields[1], code, 0);
2370 info->reglist.first_regno = upper * 16 + lower;
2371 info->reglist.num_regs = get_operand_specific_data (self);
2372 info->reglist.stride = 16 / info->reglist.num_regs;
2373 return true;
2374 }
2375
2376 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
2377 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
2378 field. */
2379 bool
2380 aarch64_ext_sve_scale (const aarch64_operand *self,
2381 aarch64_opnd_info *info, aarch64_insn code,
2382 const aarch64_inst *inst, aarch64_operand_error *errors)
2383 {
2384 int val;
2385
2386 if (!aarch64_ext_imm (self, info, code, inst, errors))
2387 return false;
2388 val = extract_field (FLD_SVE_imm4, code, 0);
2389 info->shifter.kind = AARCH64_MOD_MUL;
2390 info->shifter.amount = val + 1;
2391 info->shifter.operator_present = (val != 0);
2392 info->shifter.amount_present = (val != 0);
2393 return true;
2394 }
2395
2396 /* Return the top set bit in VALUE, which is expected to be relatively
2397 small. */
2398 static uint64_t
2399 get_top_bit (uint64_t value)
2400 {
2401 while ((value & -value) != value)
2402 value -= value & -value;
2403 return value;
2404 }
2405
2406 /* Decode an SVE shift-left immediate. */
2407 bool
2408 aarch64_ext_sve_shlimm (const aarch64_operand *self,
2409 aarch64_opnd_info *info, const aarch64_insn code,
2410 const aarch64_inst *inst, aarch64_operand_error *errors)
2411 {
2412 if (!aarch64_ext_imm (self, info, code, inst, errors)
2413 || info->imm.value == 0)
2414 return false;
2415
2416 info->imm.value -= get_top_bit (info->imm.value);
2417 return true;
2418 }
2419
2420 /* Decode an SVE shift-right immediate. */
2421 bool
2422 aarch64_ext_sve_shrimm (const aarch64_operand *self,
2423 aarch64_opnd_info *info, const aarch64_insn code,
2424 const aarch64_inst *inst, aarch64_operand_error *errors)
2425 {
2426 if (!aarch64_ext_imm (self, info, code, inst, errors)
2427 || info->imm.value == 0)
2428 return false;
2429
2430 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
2431 return true;
2432 }
2433
2434 /* Decode X0-X30. Register 31 is unallocated. */
2435 bool
2436 aarch64_ext_x0_to_x30 (const aarch64_operand *self, aarch64_opnd_info *info,
2437 const aarch64_insn code,
2438 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2439 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2440 {
2441 info->reg.regno = extract_field (self->fields[0], code, 0);
2442 return info->reg.regno <= 30;
2443 }
2444
2445 /* Decode an indexed register, with the first field being the register
2446 number and the remaining fields being the index. */
2447 bool
2448 aarch64_ext_simple_index (const aarch64_operand *self, aarch64_opnd_info *info,
2449 const aarch64_insn code,
2450 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2451 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2452 {
2453 int bias = get_operand_specific_data (self);
2454 info->reglane.regno = extract_field (self->fields[0], code, 0) + bias;
2455 info->reglane.index = extract_all_fields_after (self, 1, code);
2456 return true;
2457 }
2458
2459 /* Decode a plain shift-right immediate, when there is only a single
2460 element size. */
2461 bool
2462 aarch64_ext_plain_shrimm (const aarch64_operand *self, aarch64_opnd_info *info,
2463 const aarch64_insn code,
2464 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2465 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2466 {
2467 unsigned int base = 1 << get_operand_field_width (self, 0);
2468 info->imm.value = base - extract_field (self->fields[0], code, 0);
2469 return true;
2470 }
2471
2472 /* Bitfields that are commonly used to encode certain operands' information
2474 may be partially used as part of the base opcode in some instructions.
2475 For example, the bit 1 of the field 'size' in
2476 FCVTXN <Vb><d>, <Va><n>
2477 is actually part of the base opcode, while only size<0> is available
2478 for encoding the register type. Another example is the AdvSIMD
2479 instruction ORR (register), in which the field 'size' is also used for
2480 the base opcode, leaving only the field 'Q' available to encode the
2481 vector register arrangement specifier '8B' or '16B'.
2482
2483 This function tries to deduce the qualifier from the value of partially
2484 constrained field(s). Given the VALUE of such a field or fields, the
2485 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
2486 operand encoding), the function returns the matching qualifier or
2487 AARCH64_OPND_QLF_NIL if nothing matches.
2488
2489 N.B. CANDIDATES is a group of possible qualifiers that are valid for
2490 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
2491 may end with AARCH64_OPND_QLF_NIL. */
2492
2493 static enum aarch64_opnd_qualifier
2494 get_qualifier_from_partial_encoding (aarch64_insn value,
2495 const enum aarch64_opnd_qualifier* \
2496 candidates,
2497 aarch64_insn mask)
2498 {
2499 int i;
2500 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
2501 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2502 {
2503 aarch64_insn standard_value;
2504 if (candidates[i] == AARCH64_OPND_QLF_NIL)
2505 break;
2506 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
2507 if ((standard_value & mask) == (value & mask))
2508 return candidates[i];
2509 }
2510 return AARCH64_OPND_QLF_NIL;
2511 }
2512
2513 /* Given a list of qualifier sequences, return all possible valid qualifiers
2514 for operand IDX in QUALIFIERS.
2515 Assume QUALIFIERS is an array whose length is large enough. */
2516
2517 static void
2518 get_operand_possible_qualifiers (int idx,
2519 const aarch64_opnd_qualifier_seq_t *list,
2520 enum aarch64_opnd_qualifier *qualifiers)
2521 {
2522 int i;
2523 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2524 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
2525 break;
2526 }
2527
2528 /* Decode the size Q field for e.g. SHADD.
2529 We tag one operand with the qualifer according to the code;
2530 whether the qualifier is valid for this opcode or not, it is the
2531 duty of the semantic checking. */
2532
2533 static int
2534 decode_sizeq (aarch64_inst *inst)
2535 {
2536 int idx;
2537 enum aarch64_opnd_qualifier qualifier;
2538 aarch64_insn code;
2539 aarch64_insn value, mask;
2540 enum aarch64_field_kind fld_sz;
2541 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2542
2543 if (inst->opcode->iclass == asisdlse
2544 || inst->opcode->iclass == asisdlsep
2545 || inst->opcode->iclass == asisdlso
2546 || inst->opcode->iclass == asisdlsop)
2547 fld_sz = FLD_vldst_size;
2548 else
2549 fld_sz = FLD_size;
2550
2551 code = inst->value;
2552 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
2553 /* Obtain the info that which bits of fields Q and size are actually
2554 available for operand encoding. Opcodes like FMAXNM and FMLA have
2555 size[1] unavailable. */
2556 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
2557
2558 /* The index of the operand we are going to tag a qualifier and the qualifer
2559 itself are reasoned from the value of the size and Q fields and the
2560 possible valid qualifier lists. */
2561 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
2562 DEBUG_TRACE ("key idx: %d", idx);
2563
2564 /* For most related instruciton, size:Q are fully available for operand
2565 encoding. */
2566 if (mask == 0x7)
2567 {
2568 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
2569 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2570 return 0;
2571 return 1;
2572 }
2573
2574 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2575 candidates);
2576 #ifdef DEBUG_AARCH64
2577 if (debug_dump)
2578 {
2579 int i;
2580 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
2581 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2582 DEBUG_TRACE ("qualifier %d: %s", i,
2583 aarch64_get_qualifier_name(candidates[i]));
2584 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
2585 }
2586 #endif /* DEBUG_AARCH64 */
2587
2588 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
2589
2590 if (qualifier == AARCH64_OPND_QLF_NIL)
2591 return 0;
2592
2593 inst->operands[idx].qualifier = qualifier;
2594 return 1;
2595 }
2596
2597 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
2598 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2599
2600 static int
2601 decode_asimd_fcvt (aarch64_inst *inst)
2602 {
2603 aarch64_field field = {0, 0};
2604 aarch64_insn value;
2605 enum aarch64_opnd_qualifier qualifier;
2606
2607 gen_sub_field (FLD_size, 0, 1, &field);
2608 value = extract_field_2 (&field, inst->value, 0);
2609 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2610 : AARCH64_OPND_QLF_V_2D;
2611 switch (inst->opcode->op)
2612 {
2613 case OP_FCVTN:
2614 case OP_FCVTN2:
2615 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2616 inst->operands[1].qualifier = qualifier;
2617 break;
2618 case OP_FCVTL:
2619 case OP_FCVTL2:
2620 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
2621 inst->operands[0].qualifier = qualifier;
2622 break;
2623 default:
2624 return 0;
2625 }
2626
2627 return 1;
2628 }
2629
2630 /* Decode size[0], i.e. bit 22, for
2631 e.g. FCVTXN <Vb><d>, <Va><n>. */
2632
2633 static int
2634 decode_asisd_fcvtxn (aarch64_inst *inst)
2635 {
2636 aarch64_field field = {0, 0};
2637 gen_sub_field (FLD_size, 0, 1, &field);
2638 if (!extract_field_2 (&field, inst->value, 0))
2639 return 0;
2640 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2641 return 1;
2642 }
2643
2644 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2645 static int
2646 decode_fcvt (aarch64_inst *inst)
2647 {
2648 enum aarch64_opnd_qualifier qualifier;
2649 aarch64_insn value;
2650 const aarch64_field field = {15, 2};
2651
2652 /* opc dstsize */
2653 value = extract_field_2 (&field, inst->value, 0);
2654 switch (value)
2655 {
2656 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2657 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2658 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2659 default: return 0;
2660 }
2661 inst->operands[0].qualifier = qualifier;
2662
2663 return 1;
2664 }
2665
2666 /* Do miscellaneous decodings that are not common enough to be driven by
2667 flags. */
2668
2669 static int
2670 do_misc_decoding (aarch64_inst *inst)
2671 {
2672 unsigned int value;
2673 switch (inst->opcode->op)
2674 {
2675 case OP_FCVT:
2676 return decode_fcvt (inst);
2677
2678 case OP_FCVTN:
2679 case OP_FCVTN2:
2680 case OP_FCVTL:
2681 case OP_FCVTL2:
2682 return decode_asimd_fcvt (inst);
2683
2684 case OP_FCVTXN_S:
2685 return decode_asisd_fcvtxn (inst);
2686
2687 case OP_MOV_P_P:
2688 case OP_MOVS_P_P:
2689 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2690 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2691 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2692
2693 case OP_MOV_Z_P_Z:
2694 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2695 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2696
2697 case OP_MOV_Z_V:
2698 /* Index must be zero. */
2699 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2700 return value > 0 && value <= 16 && value == (value & -value);
2701
2702 case OP_MOV_Z_Z:
2703 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2704 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2705
2706 case OP_MOV_Z_Zi:
2707 /* Index must be nonzero. */
2708 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2709 return value > 0 && value != (value & -value);
2710
2711 case OP_MOVM_P_P_P:
2712 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2713 == extract_field (FLD_SVE_Pm, inst->value, 0));
2714
2715 case OP_MOVZS_P_P_P:
2716 case OP_MOVZ_P_P_P:
2717 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2718 == extract_field (FLD_SVE_Pm, inst->value, 0));
2719
2720 case OP_NOTS_P_P_P_Z:
2721 case OP_NOT_P_P_P_Z:
2722 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2723 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2724
2725 default:
2726 return 0;
2727 }
2728 }
2729
2730 /* Opcodes that have fields shared by multiple operands are usually flagged
2731 with flags. In this function, we detect such flags, decode the related
2732 field(s) and store the information in one of the related operands. The
2733 'one' operand is not any operand but one of the operands that can
2734 accommadate all the information that has been decoded. */
2735
2736 static int
2737 do_special_decoding (aarch64_inst *inst)
2738 {
2739 int idx;
2740 aarch64_insn value;
2741 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2742 if (inst->opcode->flags & F_COND)
2743 {
2744 value = extract_field (FLD_cond2, inst->value, 0);
2745 inst->cond = get_cond_from_value (value);
2746 }
2747 /* 'sf' field. */
2748 if (inst->opcode->flags & F_SF)
2749 {
2750 idx = select_operand_for_sf_field_coding (inst->opcode);
2751 value = extract_field (FLD_sf, inst->value, 0);
2752 if (inst->opcode->iclass == fprcvtfloat2int
2753 || inst->opcode->iclass == fprcvtint2float)
2754 {
2755 if (value == 0)
2756 inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S;
2757 else
2758 inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D;
2759 }
2760 else
2761 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2762 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2763 return 0;
2764 if ((inst->opcode->flags & F_N)
2765 && extract_field (FLD_N, inst->value, 0) != value)
2766 return 0;
2767 }
2768 /* 'sf' field. */
2769 if (inst->opcode->flags & F_LSE_SZ)
2770 {
2771 idx = select_operand_for_sf_field_coding (inst->opcode);
2772 value = extract_field (FLD_lse_sz, inst->value, 0);
2773 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2774 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2775 return 0;
2776 }
2777 /* rcpc3 'size' field. */
2778 if (inst->opcode->flags & F_RCPC3_SIZE)
2779 {
2780 value = extract_field (FLD_rcpc3_size, inst->value, 0);
2781 for (int i = 0;
2782 aarch64_operands[inst->operands[i].type].op_class != AARCH64_OPND_CLASS_ADDRESS;
2783 i++)
2784 {
2785 if (aarch64_operands[inst->operands[i].type].op_class
2786 == AARCH64_OPND_CLASS_INT_REG)
2787 {
2788 inst->operands[i].qualifier = get_greg_qualifier_from_value (value & 1);
2789 if (inst->operands[i].qualifier == AARCH64_OPND_QLF_ERR)
2790 return 0;
2791 }
2792 else if (aarch64_operands[inst->operands[i].type].op_class
2793 == AARCH64_OPND_CLASS_FP_REG)
2794 {
2795 value += (extract_field (FLD_opc1, inst->value, 0) << 2);
2796 inst->operands[i].qualifier = get_sreg_qualifier_from_value (value);
2797 if (inst->operands[i].qualifier == AARCH64_OPND_QLF_ERR)
2798 return 0;
2799 }
2800 }
2801 }
2802
2803 /* size:Q fields. */
2804 if (inst->opcode->flags & F_SIZEQ)
2805 return decode_sizeq (inst);
2806
2807 if (inst->opcode->flags & F_FPTYPE)
2808 {
2809 idx = select_operand_for_fptype_field_coding (inst->opcode);
2810 value = extract_field (FLD_type, inst->value, 0);
2811 switch (value)
2812 {
2813 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2814 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2815 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2816 default: return 0;
2817 }
2818 }
2819
2820 if (inst->opcode->flags & F_SSIZE)
2821 {
2822 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2823 of the base opcode. */
2824 aarch64_insn mask;
2825 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2826 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2827 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2828 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2829 /* For most related instruciton, the 'size' field is fully available for
2830 operand encoding. */
2831 if (mask == 0x3)
2832 {
2833 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2834 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2835 return 0;
2836 }
2837 else
2838 {
2839 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2840 candidates);
2841 inst->operands[idx].qualifier
2842 = get_qualifier_from_partial_encoding (value, candidates, mask);
2843 }
2844 }
2845
2846 if (inst->opcode->flags & F_LSFE_SZ)
2847 {
2848 value = extract_field (FLD_ldst_size, inst->value, 0);
2849
2850 if (value > 0x3)
2851 return 0;
2852
2853 for (int i = 0;
2854 aarch64_operands[inst->operands[i].type].op_class != AARCH64_OPND_CLASS_ADDRESS;
2855 i++)
2856 {
2857 inst->operands[i].qualifier = get_sreg_qualifier_from_value (value);
2858 if (inst->operands[i].qualifier == AARCH64_OPND_QLF_ERR)
2859 return 0;
2860 }
2861 }
2862
2863 if (inst->opcode->flags & F_T)
2864 {
2865 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2866 int num = 0;
2867 unsigned val, Q;
2868 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2869 == AARCH64_OPND_CLASS_SIMD_REG);
2870 /* imm5<3:0> q <t>
2871 0000 x reserved
2872 xxx1 0 8b
2873 xxx1 1 16b
2874 xx10 0 4h
2875 xx10 1 8h
2876 x100 0 2s
2877 x100 1 4s
2878 1000 0 reserved
2879 1000 1 2d */
2880 val = extract_field (FLD_imm5, inst->value, 0);
2881 while ((val & 0x1) == 0 && ++num <= 3)
2882 val >>= 1;
2883 if (num > 3)
2884 return 0;
2885 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2886 inst->operands[0].qualifier =
2887 get_vreg_qualifier_from_value ((num << 1) | Q);
2888 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_ERR)
2889 return 0;
2890
2891 }
2892
2893 if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2894 {
2895 unsigned size;
2896 size = (unsigned) extract_field (FLD_size, inst->value,
2897 inst->opcode->mask);
2898 inst->operands[0].qualifier
2899 = get_vreg_qualifier_from_value (1 + (size << 1));
2900 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_ERR)
2901 return 0;
2902 inst->operands[2].qualifier = get_sreg_qualifier_from_value (size);
2903 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_ERR)
2904 return 0;
2905 }
2906
2907 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2908 {
2909 /* Use Rt to encode in the case of e.g.
2910 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2911 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2912 if (idx == -1)
2913 {
2914 /* Otherwise use the result operand, which has to be a integer
2915 register. */
2916 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2917 == AARCH64_OPND_CLASS_INT_REG);
2918 idx = 0;
2919 }
2920 assert (idx == 0 || idx == 1);
2921 value = extract_field (FLD_Q, inst->value, 0);
2922 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2923 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2924 return 0;
2925 }
2926
2927 if (inst->opcode->flags & F_LDS_SIZE)
2928 {
2929 aarch64_field field = {0, 0};
2930 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2931 == AARCH64_OPND_CLASS_INT_REG);
2932 gen_sub_field (FLD_opc, 0, 1, &field);
2933 value = extract_field_2 (&field, inst->value, 0);
2934 inst->operands[0].qualifier
2935 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2936 }
2937
2938 /* Miscellaneous decoding; done as the last step. */
2939 if (inst->opcode->flags & F_MISC)
2940 return do_misc_decoding (inst);
2941
2942 return 1;
2943 }
2944
2945 /* Converters converting a real opcode instruction to its alias form. */
2946
2947 /* ROR <Wd>, <Ws>, #<shift>
2948 is equivalent to:
2949 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2950 static int
2951 convert_extr_to_ror (aarch64_inst *inst)
2952 {
2953 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2954 {
2955 copy_operand_info (inst, 2, 3);
2956 inst->operands[3].type = AARCH64_OPND_NIL;
2957 return 1;
2958 }
2959 return 0;
2960 }
2961
2962 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2963 is equivalent to:
2964 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2965 static int
2966 convert_shll_to_xtl (aarch64_inst *inst)
2967 {
2968 if (inst->operands[2].imm.value == 0)
2969 {
2970 inst->operands[2].type = AARCH64_OPND_NIL;
2971 return 1;
2972 }
2973 return 0;
2974 }
2975
2976 /* Convert
2977 UBFM <Xd>, <Xn>, #<shift>, #63.
2978 to
2979 LSR <Xd>, <Xn>, #<shift>. */
2980 static int
2981 convert_bfm_to_sr (aarch64_inst *inst)
2982 {
2983 int64_t imms, val;
2984
2985 imms = inst->operands[3].imm.value;
2986 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2987 if (imms == val)
2988 {
2989 inst->operands[3].type = AARCH64_OPND_NIL;
2990 return 1;
2991 }
2992
2993 return 0;
2994 }
2995
2996 /* Convert MOV to ORR. */
2997 static int
2998 convert_orr_to_mov (aarch64_inst *inst)
2999 {
3000 /* MOV <Vd>.<T>, <Vn>.<T>
3001 is equivalent to:
3002 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
3003 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
3004 {
3005 inst->operands[2].type = AARCH64_OPND_NIL;
3006 return 1;
3007 }
3008 return 0;
3009 }
3010
3011 /* When <imms> >= <immr>, the instruction written:
3012 SBFX <Xd>, <Xn>, #<lsb>, #<width>
3013 is equivalent to:
3014 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
3015
3016 static int
3017 convert_bfm_to_bfx (aarch64_inst *inst)
3018 {
3019 int64_t immr, imms;
3020
3021 immr = inst->operands[2].imm.value;
3022 imms = inst->operands[3].imm.value;
3023 if (imms >= immr)
3024 {
3025 int64_t lsb = immr;
3026 inst->operands[2].imm.value = lsb;
3027 inst->operands[3].imm.value = imms + 1 - lsb;
3028 /* The two opcodes have different qualifiers for
3029 the immediate operands; reset to help the checking. */
3030 reset_operand_qualifier (inst, 2);
3031 reset_operand_qualifier (inst, 3);
3032 return 1;
3033 }
3034
3035 return 0;
3036 }
3037
3038 /* When <imms> < <immr>, the instruction written:
3039 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
3040 is equivalent to:
3041 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
3042
3043 static int
3044 convert_bfm_to_bfi (aarch64_inst *inst)
3045 {
3046 int64_t immr, imms, val;
3047
3048 immr = inst->operands[2].imm.value;
3049 imms = inst->operands[3].imm.value;
3050 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
3051 if (imms < immr)
3052 {
3053 inst->operands[2].imm.value = (val - immr) & (val - 1);
3054 inst->operands[3].imm.value = imms + 1;
3055 /* The two opcodes have different qualifiers for
3056 the immediate operands; reset to help the checking. */
3057 reset_operand_qualifier (inst, 2);
3058 reset_operand_qualifier (inst, 3);
3059 return 1;
3060 }
3061
3062 return 0;
3063 }
3064
3065 /* The instruction written:
3066 BFC <Xd>, #<lsb>, #<width>
3067 is equivalent to:
3068 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
3069
3070 static int
3071 convert_bfm_to_bfc (aarch64_inst *inst)
3072 {
3073 int64_t immr, imms, val;
3074
3075 /* Should have been assured by the base opcode value. */
3076 assert (inst->operands[1].reg.regno == 0x1f);
3077
3078 immr = inst->operands[2].imm.value;
3079 imms = inst->operands[3].imm.value;
3080 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
3081 if (imms < immr)
3082 {
3083 /* Drop XZR from the second operand. */
3084 copy_operand_info (inst, 1, 2);
3085 copy_operand_info (inst, 2, 3);
3086 inst->operands[3].type = AARCH64_OPND_NIL;
3087
3088 /* Recalculate the immediates. */
3089 inst->operands[1].imm.value = (val - immr) & (val - 1);
3090 inst->operands[2].imm.value = imms + 1;
3091
3092 /* The two opcodes have different qualifiers for the operands; reset to
3093 help the checking. */
3094 reset_operand_qualifier (inst, 1);
3095 reset_operand_qualifier (inst, 2);
3096 reset_operand_qualifier (inst, 3);
3097
3098 return 1;
3099 }
3100
3101 return 0;
3102 }
3103
3104 /* The instruction written:
3105 LSL <Xd>, <Xn>, #<shift>
3106 is equivalent to:
3107 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
3108
3109 static int
3110 convert_ubfm_to_lsl (aarch64_inst *inst)
3111 {
3112 int64_t immr = inst->operands[2].imm.value;
3113 int64_t imms = inst->operands[3].imm.value;
3114 int64_t val
3115 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
3116
3117 if ((immr == 0 && imms == val) || immr == imms + 1)
3118 {
3119 inst->operands[3].type = AARCH64_OPND_NIL;
3120 inst->operands[2].imm.value = val - imms;
3121 return 1;
3122 }
3123
3124 return 0;
3125 }
3126
3127 /* CINC <Wd>, <Wn>, <cond>
3128 is equivalent to:
3129 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
3130 where <cond> is not AL or NV. */
3131
3132 static int
3133 convert_from_csel (aarch64_inst *inst)
3134 {
3135 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
3136 && (inst->operands[3].cond->value & 0xe) != 0xe)
3137 {
3138 copy_operand_info (inst, 2, 3);
3139 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
3140 inst->operands[3].type = AARCH64_OPND_NIL;
3141 return 1;
3142 }
3143 return 0;
3144 }
3145
3146 /* CSET <Wd>, <cond>
3147 is equivalent to:
3148 CSINC <Wd>, WZR, WZR, invert(<cond>)
3149 where <cond> is not AL or NV. */
3150
3151 static int
3152 convert_csinc_to_cset (aarch64_inst *inst)
3153 {
3154 if (inst->operands[1].reg.regno == 0x1f
3155 && inst->operands[2].reg.regno == 0x1f
3156 && (inst->operands[3].cond->value & 0xe) != 0xe)
3157 {
3158 copy_operand_info (inst, 1, 3);
3159 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
3160 inst->operands[3].type = AARCH64_OPND_NIL;
3161 inst->operands[2].type = AARCH64_OPND_NIL;
3162 return 1;
3163 }
3164 return 0;
3165 }
3166
3167 /* MOV <Wd>, #<imm>
3168 is equivalent to:
3169 MOVZ <Wd>, #<imm16_5>, LSL #<shift>.
3170
3171 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3172 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3173 or where a MOVN has an immediate that could be encoded by MOVZ, or where
3174 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3175 machine-instruction mnemonic must be used. */
3176
3177 static int
3178 convert_movewide_to_mov (aarch64_inst *inst)
3179 {
3180 uint64_t value = inst->operands[1].imm.value;
3181 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
3182 if (value == 0 && inst->operands[1].shifter.amount != 0)
3183 return 0;
3184 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3185 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
3186 value <<= inst->operands[1].shifter.amount;
3187 /* As an alias convertor, it has to be clear that the INST->OPCODE
3188 is the opcode of the real instruction. */
3189 if (inst->opcode->op == OP_MOVN)
3190 {
3191 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3192 value = ~value;
3193 /* A MOVN has an immediate that could be encoded by MOVZ. */
3194 if (aarch64_wide_constant_p (value, is32, NULL))
3195 return 0;
3196 }
3197 inst->operands[1].imm.value = value;
3198 inst->operands[1].shifter.amount = 0;
3199 return 1;
3200 }
3201
3202 /* MOV <Wd>, #<imm>
3203 is equivalent to:
3204 ORR <Wd>, WZR, #<imm>.
3205
3206 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3207 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3208 or where a MOVN has an immediate that could be encoded by MOVZ, or where
3209 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3210 machine-instruction mnemonic must be used. */
3211
3212 static int
3213 convert_movebitmask_to_mov (aarch64_inst *inst)
3214 {
3215 int is32;
3216 uint64_t value;
3217
3218 /* Should have been assured by the base opcode value. */
3219 assert (inst->operands[1].reg.regno == 0x1f);
3220 copy_operand_info (inst, 1, 2);
3221 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3222 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3223 value = inst->operands[1].imm.value;
3224 /* ORR has an immediate that could be generated by a MOVZ or MOVN
3225 instruction. */
3226 if (inst->operands[0].reg.regno != 0x1f
3227 && (aarch64_wide_constant_p (value, is32, NULL)
3228 || aarch64_wide_constant_p (~value, is32, NULL)))
3229 return 0;
3230
3231 inst->operands[2].type = AARCH64_OPND_NIL;
3232 return 1;
3233 }
3234
3235 /* Some alias opcodes are disassembled by being converted from their real-form.
3236 N.B. INST->OPCODE is the real opcode rather than the alias. */
3237
3238 static int
3239 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
3240 {
3241 switch (alias->op)
3242 {
3243 case OP_ASR_IMM:
3244 case OP_LSR_IMM:
3245 return convert_bfm_to_sr (inst);
3246 case OP_LSL_IMM:
3247 return convert_ubfm_to_lsl (inst);
3248 case OP_CINC:
3249 case OP_CINV:
3250 case OP_CNEG:
3251 return convert_from_csel (inst);
3252 case OP_CSET:
3253 case OP_CSETM:
3254 return convert_csinc_to_cset (inst);
3255 case OP_UBFX:
3256 case OP_BFXIL:
3257 case OP_SBFX:
3258 return convert_bfm_to_bfx (inst);
3259 case OP_SBFIZ:
3260 case OP_BFI:
3261 case OP_UBFIZ:
3262 return convert_bfm_to_bfi (inst);
3263 case OP_BFC:
3264 return convert_bfm_to_bfc (inst);
3265 case OP_MOV_V:
3266 return convert_orr_to_mov (inst);
3267 case OP_MOV_IMM_WIDE:
3268 case OP_MOV_IMM_WIDEN:
3269 return convert_movewide_to_mov (inst);
3270 case OP_MOV_IMM_LOG:
3271 return convert_movebitmask_to_mov (inst);
3272 case OP_ROR_IMM:
3273 return convert_extr_to_ror (inst);
3274 case OP_SXTL:
3275 case OP_SXTL2:
3276 case OP_UXTL:
3277 case OP_UXTL2:
3278 return convert_shll_to_xtl (inst);
3279 default:
3280 return 0;
3281 }
3282 }
3283
3284 static bool
3285 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
3286 aarch64_inst *, int, aarch64_operand_error *errors);
3287
3288 /* Given the instruction information in *INST, check if the instruction has
3289 any alias form that can be used to represent *INST. If the answer is yes,
3290 update *INST to be in the form of the determined alias. */
3291
3292 /* In the opcode description table, the following flags are used in opcode
3293 entries to help establish the relations between the real and alias opcodes:
3294
3295 F_ALIAS: opcode is an alias
3296 F_HAS_ALIAS: opcode has alias(es)
3297 F_P1
3298 F_P2
3299 F_P3: Disassembly preference priority 1-3 (the larger the
3300 higher). If nothing is specified, it is the priority
3301 0 by default, i.e. the lowest priority.
3302
3303 Although the relation between the machine and the alias instructions are not
3304 explicitly described, it can be easily determined from the base opcode
3305 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
3306 description entries:
3307
3308 The mask of an alias opcode must be equal to or a super-set (i.e. more
3309 constrained) of that of the aliased opcode; so is the base opcode value.
3310
3311 if (opcode_has_alias (real) && alias_opcode_p (opcode)
3312 && (opcode->mask & real->mask) == real->mask
3313 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
3314 then OPCODE is an alias of, and only of, the REAL instruction
3315
3316 The alias relationship is forced flat-structured to keep related algorithm
3317 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
3318
3319 During the disassembling, the decoding decision tree (in
3320 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
3321 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
3322 not specified), the disassembler will check whether there is any alias
3323 instruction exists for this real instruction. If there is, the disassembler
3324 will try to disassemble the 32-bit binary again using the alias's rule, or
3325 try to convert the IR to the form of the alias. In the case of the multiple
3326 aliases, the aliases are tried one by one from the highest priority
3327 (currently the flag F_P3) to the lowest priority (no priority flag), and the
3328 first succeeds first adopted.
3329
3330 You may ask why there is a need for the conversion of IR from one form to
3331 another in handling certain aliases. This is because on one hand it avoids
3332 adding more operand code to handle unusual encoding/decoding; on other
3333 hand, during the disassembling, the conversion is an effective approach to
3334 check the condition of an alias (as an alias may be adopted only if certain
3335 conditions are met).
3336
3337 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
3338 aarch64_opcode_table and generated aarch64_find_alias_opcode and
3339 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
3340
3341 static void
3342 determine_disassembling_preference (struct aarch64_inst *inst,
3343 aarch64_operand_error *errors)
3344 {
3345 const aarch64_opcode *opcode;
3346 const aarch64_opcode *alias;
3347
3348 opcode = inst->opcode;
3349
3350 /* This opcode does not have an alias, so use itself. */
3351 if (!opcode_has_alias (opcode))
3352 return;
3353
3354 alias = aarch64_find_alias_opcode (opcode);
3355 assert (alias);
3356
3357 #ifdef DEBUG_AARCH64
3358 if (debug_dump)
3359 {
3360 const aarch64_opcode *tmp = alias;
3361 printf ("#### LIST orderd: ");
3362 while (tmp)
3363 {
3364 printf ("%s, ", tmp->name);
3365 tmp = aarch64_find_next_alias_opcode (tmp);
3366 }
3367 printf ("\n");
3368 }
3369 #endif /* DEBUG_AARCH64 */
3370
3371 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
3372 {
3373 DEBUG_TRACE ("try %s", alias->name);
3374 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
3375
3376 /* An alias can be a pseudo opcode which will never be used in the
3377 disassembly, e.g. BIC logical immediate is such a pseudo opcode
3378 aliasing AND. */
3379 if (pseudo_opcode_p (alias))
3380 {
3381 DEBUG_TRACE ("skip pseudo %s", alias->name);
3382 continue;
3383 }
3384
3385 if ((inst->value & alias->mask) != alias->opcode)
3386 {
3387 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
3388 continue;
3389 }
3390
3391 if (!AARCH64_CPU_HAS_ALL_FEATURES (arch_variant, *alias->avariant))
3392 {
3393 DEBUG_TRACE ("skip %s: we're missing features", alias->name);
3394 continue;
3395 }
3396
3397 /* No need to do any complicated transformation on operands, if the alias
3398 opcode does not have any operand. */
3399 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
3400 {
3401 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
3402 aarch64_replace_opcode (inst, alias);
3403 return;
3404 }
3405 if (alias->flags & F_CONV)
3406 {
3407 aarch64_inst copy;
3408 memcpy (©, inst, sizeof (aarch64_inst));
3409 /* ALIAS is the preference as long as the instruction can be
3410 successfully converted to the form of ALIAS. */
3411 if (convert_to_alias (©, alias) == 1)
3412 {
3413 aarch64_replace_opcode (©, alias);
3414 if (aarch64_match_operands_constraint (©, NULL) != 1)
3415 {
3416 DEBUG_TRACE ("FAILED with alias %s ", alias->name);
3417 }
3418 else
3419 {
3420 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
3421 memcpy (inst, ©, sizeof (aarch64_inst));
3422 }
3423 return;
3424 }
3425 }
3426 else
3427 {
3428 /* Directly decode the alias opcode. */
3429 aarch64_inst temp;
3430 memset (&temp, '\0', sizeof (aarch64_inst));
3431 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
3432 {
3433 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
3434 memcpy (inst, &temp, sizeof (aarch64_inst));
3435 return;
3436 }
3437 }
3438 }
3439 }
3440
3441 /* Some instructions (including all SVE ones) use the instruction class
3442 to describe how a qualifiers_list index is represented in the instruction
3443 encoding. If INST is such an instruction, decode the appropriate fields
3444 and fill in the operand qualifiers accordingly. Return true if no
3445 problems are found. */
3446
3447 static bool
3448 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
3449 {
3450 int i, variant;
3451
3452 variant = 0;
3453 switch (inst->opcode->iclass)
3454 {
3455 case sme_mov:
3456 variant = extract_fields (inst->value, 0, 2, FLD_SME_Q, FLD_SME_size_22);
3457 if (variant >= 4 && variant < 7)
3458 return false;
3459 if (variant == 7)
3460 variant = 4;
3461 break;
3462
3463 case sme_psel:
3464 i = extract_fields (inst->value, 0, 2, FLD_SME_tszh, FLD_SME_tszl);
3465 if (i == 0)
3466 return false;
3467 while ((i & 1) == 0)
3468 {
3469 i >>= 1;
3470 variant += 1;
3471 }
3472 break;
3473
3474 case sme_shift:
3475 i = extract_field (FLD_SVE_tszh, inst->value, 0);
3476 goto sve_shift;
3477
3478 case sme_size_12_bh:
3479 variant = extract_field (FLD_S, inst->value, 0);
3480 if (variant > 1)
3481 return false;
3482 break;
3483
3484 case sme_size_12_bhs:
3485 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3486 if (variant >= 3)
3487 return false;
3488 break;
3489
3490 case sme_size_12_hs:
3491 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3492 if (variant != 1 && variant != 2)
3493 return false;
3494 variant -= 1;
3495 break;
3496
3497 case sme_size_12_b:
3498 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3499 if (variant != 0)
3500 return false;
3501 break;
3502
3503 case sme_size_22:
3504 variant = extract_field (FLD_SME_size_22, inst->value, 0);
3505 break;
3506
3507 case sme_size_22_hsd:
3508 variant = extract_field (FLD_SME_size_22, inst->value, 0);
3509 if (variant < 1)
3510 return false;
3511 variant -= 1;
3512 break;
3513
3514 case sme_sz_23:
3515 variant = extract_field (FLD_SME_sz_23, inst->value, 0);
3516 break;
3517
3518 case sve_cpy:
3519 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
3520 break;
3521
3522 case sve_index:
3523 i = extract_field (FLD_imm5, inst->value, 0);
3524
3525 if ((i & 31) == 0)
3526 return false;
3527 while ((i & 1) == 0)
3528 {
3529 i >>= 1;
3530 variant += 1;
3531 }
3532 break;
3533
3534 case sve_limm:
3535 /* Pick the smallest applicable element size. */
3536 if ((inst->value & 0x20600) == 0x600)
3537 variant = 0;
3538 else if ((inst->value & 0x20400) == 0x400)
3539 variant = 1;
3540 else if ((inst->value & 0x20000) == 0)
3541 variant = 2;
3542 else
3543 variant = 3;
3544 break;
3545
3546 case sme2_mov:
3547 /* .D is preferred over the other sizes in disassembly. */
3548 variant = 3;
3549 break;
3550
3551 case sme2_movaz:
3552 case sme_misc:
3553 case sve_misc:
3554 /* These instructions have only a single variant. */
3555 break;
3556
3557 case sve_movprfx:
3558 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
3559 break;
3560
3561 case sve_pred_zm:
3562 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
3563 break;
3564
3565 case sve_shift_pred:
3566 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
3567 sve_shift:
3568 if (i == 0)
3569 return false;
3570 while (i != 1)
3571 {
3572 i >>= 1;
3573 variant += 1;
3574 }
3575 break;
3576
3577 case sve_shift_unpred:
3578 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3579 goto sve_shift;
3580
3581 case sve_size_bhs:
3582 variant = extract_field (FLD_size, inst->value, 0);
3583 if (variant >= 3)
3584 return false;
3585 break;
3586
3587 case sve_size_bhsd:
3588 variant = extract_field (FLD_size, inst->value, 0);
3589 break;
3590
3591 case sve_size_hsd:
3592 i = extract_field (FLD_size, inst->value, 0);
3593 if (i < 1)
3594 return false;
3595 variant = i - 1;
3596 break;
3597
3598 case sme_fp_sd:
3599 case sme_int_sd:
3600 case sve_size_bh:
3601 case sve_size_sd:
3602 variant = extract_field (FLD_SVE_sz, inst->value, 0);
3603 break;
3604
3605 case sve_size_sd2:
3606 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
3607 break;
3608
3609 case sve_size_sd3:
3610 variant = extract_field (FLD_SVE_sz3, inst->value, 0);
3611 break;
3612
3613 case sve_size_sd4:
3614 variant = extract_field (FLD_SVE_sz4, inst->value, 0);
3615 break;
3616
3617 case sve_size_hsd2:
3618 i = extract_field (FLD_SVE_size, inst->value, 0);
3619 if (i < 1)
3620 return false;
3621 variant = i - 1;
3622 break;
3623
3624 case sve_size_hsd3:
3625 i = extract_field (FLD_len, inst->value, 0);
3626 if (i < 1)
3627 return false;
3628 variant = i - 1;
3629 break;
3630
3631 case sve_size_13:
3632 /* Ignore low bit of this field since that is set in the opcode for
3633 instructions of this iclass. */
3634 i = (extract_field (FLD_size, inst->value, 0) & 2);
3635 variant = (i >> 1);
3636 break;
3637
3638 case sve_shift_tsz_bhsd:
3639 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3640 if (i == 0)
3641 return false;
3642 while (i != 1)
3643 {
3644 i >>= 1;
3645 variant += 1;
3646 }
3647 break;
3648
3649 case sve_size_tsz_bhs:
3650 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3651 if (i == 0)
3652 return false;
3653 while (i != 1)
3654 {
3655 if (i & 1)
3656 return false;
3657 i >>= 1;
3658 variant += 1;
3659 }
3660 break;
3661
3662 case sve_shift_tsz_hsd:
3663 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3664 if (i == 0)
3665 return false;
3666 while (i != 1)
3667 {
3668 i >>= 1;
3669 variant += 1;
3670 }
3671 break;
3672
3673 default:
3674 /* No mapping between instruction class and qualifiers. */
3675 return true;
3676 }
3677
3678 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3679 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
3680 return true;
3681 }
3682 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
3683 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
3684 return 1.
3685
3686 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
3687 determined and used to disassemble CODE; this is done just before the
3688 return. */
3689
3690 static bool
3691 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
3692 aarch64_inst *inst, int noaliases_p,
3693 aarch64_operand_error *errors)
3694 {
3695 int i;
3696
3697 DEBUG_TRACE ("enter with %s", opcode->name);
3698
3699 assert (opcode && inst);
3700
3701 /* Clear inst. */
3702 memset (inst, '\0', sizeof (aarch64_inst));
3703
3704 /* Check the base opcode. */
3705 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
3706 {
3707 DEBUG_TRACE ("base opcode match FAIL");
3708 goto decode_fail;
3709 }
3710
3711 inst->opcode = opcode;
3712 inst->value = code;
3713
3714 /* Assign operand codes and indexes. */
3715 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3716 {
3717 if (opcode->operands[i] == AARCH64_OPND_NIL)
3718 break;
3719 inst->operands[i].type = opcode->operands[i];
3720 inst->operands[i].idx = i;
3721 }
3722
3723 /* Call the opcode decoder indicated by flags. */
3724 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
3725 {
3726 DEBUG_TRACE ("opcode flag-based decoder FAIL");
3727 goto decode_fail;
3728 }
3729
3730 /* Possibly use the instruction class to determine the correct
3731 qualifier. */
3732 if (!aarch64_decode_variant_using_iclass (inst))
3733 {
3734 DEBUG_TRACE ("iclass-based decoder FAIL");
3735 goto decode_fail;
3736 }
3737
3738 /* Call operand decoders. */
3739 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3740 {
3741 const aarch64_operand *opnd;
3742 enum aarch64_opnd type;
3743
3744 type = opcode->operands[i];
3745 if (type == AARCH64_OPND_NIL)
3746 break;
3747 opnd = &aarch64_operands[type];
3748 if (operand_has_extractor (opnd)
3749 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
3750 errors)))
3751 {
3752 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
3753 goto decode_fail;
3754 }
3755 }
3756
3757 /* If the opcode has a verifier, then check it now. */
3758 if (opcode->verifier
3759 && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
3760 {
3761 DEBUG_TRACE ("operand verifier FAIL");
3762 goto decode_fail;
3763 }
3764
3765 /* Match the qualifiers. */
3766 if (aarch64_match_operands_constraint (inst, NULL) == 1)
3767 {
3768 /* Arriving here, the CODE has been determined as a valid instruction
3769 of OPCODE and *INST has been filled with information of this OPCODE
3770 instruction. Before the return, check if the instruction has any
3771 alias and should be disassembled in the form of its alias instead.
3772 If the answer is yes, *INST will be updated. */
3773 if (!noaliases_p)
3774 determine_disassembling_preference (inst, errors);
3775 DEBUG_TRACE ("SUCCESS");
3776 return true;
3777 }
3778 else
3779 {
3780 DEBUG_TRACE ("constraint matching FAIL");
3781 }
3782
3783 decode_fail:
3784 return false;
3785 }
3786
3787 /* This does some user-friendly fix-up to *INST. It is currently focus on
3789 the adjustment of qualifiers to help the printed instruction
3790 recognized/understood more easily. */
3791
3792 static void
3793 user_friendly_fixup (aarch64_inst *inst)
3794 {
3795 switch (inst->opcode->iclass)
3796 {
3797 case testbranch:
3798 /* TBNZ Xn|Wn, #uimm6, label
3799 Test and Branch Not Zero: conditionally jumps to label if bit number
3800 uimm6 in register Xn is not zero. The bit number implies the width of
3801 the register, which may be written and should be disassembled as Wn if
3802 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3803 */
3804 if (inst->operands[1].imm.value < 32)
3805 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3806 break;
3807 default: break;
3808 }
3809 }
3810
3811 /* Decode INSN and fill in *INST the instruction information. An alias
3812 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3813 success. */
3814
3815 enum err_type
3816 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3817 bool noaliases_p,
3818 aarch64_operand_error *errors)
3819 {
3820 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3821
3822 #ifdef DEBUG_AARCH64
3823 if (debug_dump)
3824 {
3825 const aarch64_opcode *tmp = opcode;
3826 printf ("\n");
3827 DEBUG_TRACE ("opcode lookup:");
3828 while (tmp != NULL)
3829 {
3830 aarch64_verbose (" %s", tmp->name);
3831 tmp = aarch64_find_next_opcode (tmp);
3832 }
3833 }
3834 #endif /* DEBUG_AARCH64 */
3835
3836 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3837 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3838 opcode field and value, apart from the difference that one of them has an
3839 extra field as part of the opcode, but such a field is used for operand
3840 encoding in other opcode(s) ('immh' in the case of the example). */
3841 while (opcode != NULL)
3842 {
3843 /* But only one opcode can be decoded successfully for, as the
3844 decoding routine will check the constraint carefully. */
3845 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3846 return ERR_OK;
3847 opcode = aarch64_find_next_opcode (opcode);
3848 }
3849
3850 return ERR_UND;
3851 }
3852
3853 /* Return a short string to indicate a switch to STYLE. These strings
3854 will be embedded into the disassembled operand text (as produced by
3855 aarch64_print_operand), and then spotted in the print_operands function
3856 so that the disassembler output can be split by style. */
3857
3858 static const char *
3859 get_style_text (enum disassembler_style style)
3860 {
3861 static bool init = false;
3862 static char formats[16][4];
3863 unsigned num;
3864
3865 /* First time through we build a string for every possible format. This
3866 code relies on there being no more than 16 different styles (there's
3867 an assert below for this). */
3868 if (!init)
3869 {
3870 int i;
3871
3872 for (i = 0; i <= 0xf; ++i)
3873 {
3874 int res ATTRIBUTE_UNUSED
3875 = snprintf (&formats[i][0], sizeof (formats[i]), "%c%x%c",
3876 STYLE_MARKER_CHAR, i, STYLE_MARKER_CHAR);
3877 assert (res == 3);
3878 }
3879
3880 init = true;
3881 }
3882
3883 /* Return the string that marks switching to STYLE. */
3884 num = (unsigned) style;
3885 assert (style <= 0xf);
3886 return formats[num];
3887 }
3888
3889 /* Callback used by aarch64_print_operand to apply STYLE to the
3890 disassembler output created from FMT and ARGS. The STYLER object holds
3891 any required state. Must return a pointer to a string (created from FMT
3892 and ARGS) that will continue to be valid until the complete disassembled
3893 instruction has been printed.
3894
3895 We return a string that includes two embedded style markers, the first,
3896 places at the start of the string, indicates a switch to STYLE, and the
3897 second, placed at the end of the string, indicates a switch back to the
3898 default text style.
3899
3900 Later, when we print the operand text we take care to collapse any
3901 adjacent style markers, and to ignore any style markers that appear at
3902 the very end of a complete operand string. */
3903
3904 static const char *aarch64_apply_style (struct aarch64_styler *styler,
3905 enum disassembler_style style,
3906 const char *fmt,
3907 va_list args)
3908 {
3909 int res;
3910 char *ptr, *tmp;
3911 struct obstack *stack = (struct obstack *) styler->state;
3912 va_list ap;
3913
3914 /* These are the two strings for switching styles. */
3915 const char *style_on = get_style_text (style);
3916 const char *style_off = get_style_text (dis_style_text);
3917
3918 /* Calculate space needed once FMT and ARGS are expanded. */
3919 va_copy (ap, args);
3920 res = vsnprintf (NULL, 0, fmt, ap);
3921 va_end (ap);
3922 assert (res >= 0);
3923
3924 /* Allocate space on the obstack for the expanded FMT and ARGS, as well
3925 as the two strings for switching styles, then write all of these
3926 strings onto the obstack. */
3927 ptr = (char *) obstack_alloc (stack, res + strlen (style_on)
3928 + strlen (style_off) + 1);
3929 tmp = stpcpy (ptr, style_on);
3930 res = vsnprintf (tmp, (res + 1), fmt, args);
3931 assert (res >= 0);
3932 tmp += res;
3933 strcpy (tmp, style_off);
3934
3935 return ptr;
3936 }
3937
3938 /* Print operands. */
3939
3940 static void
3941 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3942 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3943 bool *has_notes)
3944 {
3945 char *notes = NULL;
3946 int i, pcrel_p, num_printed;
3947 struct aarch64_styler styler;
3948 struct obstack content;
3949 obstack_init (&content);
3950
3951 styler.apply_style = aarch64_apply_style;
3952 styler.state = (void *) &content;
3953
3954 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3955 {
3956 char str[128];
3957 char cmt[128];
3958
3959 /* We regard the opcode operand info more, however we also look into
3960 the inst->operands to support the disassembling of the optional
3961 operand.
3962 The two operand code should be the same in all cases, apart from
3963 when the operand can be optional. */
3964 if (opcode->operands[i] == AARCH64_OPND_NIL
3965 || opnds[i].type == AARCH64_OPND_NIL)
3966 break;
3967
3968 /* Generate the operand string in STR. */
3969 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3970 &info->target, ¬es, cmt, sizeof (cmt),
3971 arch_variant, &styler);
3972
3973 /* Print the delimiter (taking account of omitted operand(s)). */
3974 if (str[0] != '\0')
3975 (*info->fprintf_styled_func) (info->stream, dis_style_text, "%s",
3976 num_printed++ == 0 ? "\t" : ", ");
3977
3978 /* Print the operand. */
3979 if (pcrel_p)
3980 (*info->print_address_func) (info->target, info);
3981 else
3982 {
3983 /* This operand came from aarch64_print_operand, and will include
3984 embedded strings indicating which style each character should
3985 have. In the following code we split the text based on
3986 CURR_STYLE, and call the styled print callback to print each
3987 block of text in the appropriate style. */
3988 char *start, *curr;
3989 enum disassembler_style curr_style = dis_style_text;
3990
3991 start = curr = str;
3992 do
3993 {
3994 if (*curr == '\0'
3995 || (*curr == STYLE_MARKER_CHAR
3996 && ISXDIGIT (*(curr + 1))
3997 && *(curr + 2) == STYLE_MARKER_CHAR))
3998 {
3999 /* Output content between our START position and CURR. */
4000 int len = curr - start;
4001 if (len > 0)
4002 {
4003 if ((*info->fprintf_styled_func) (info->stream,
4004 curr_style,
4005 "%.*s",
4006 len, start) < 0)
4007 break;
4008 }
4009
4010 if (*curr == '\0')
4011 break;
4012
4013 /* Skip over the initial STYLE_MARKER_CHAR. */
4014 ++curr;
4015
4016 /* Update the CURR_STYLE. As there are less than 16
4017 styles, it is possible, that if the input is corrupted
4018 in some way, that we might set CURR_STYLE to an
4019 invalid value. Don't worry though, we check for this
4020 situation. */
4021 if (*curr >= '0' && *curr <= '9')
4022 curr_style = (enum disassembler_style) (*curr - '0');
4023 else if (*curr >= 'a' && *curr <= 'f')
4024 curr_style = (enum disassembler_style) (*curr - 'a' + 10);
4025 else
4026 curr_style = dis_style_text;
4027
4028 /* Check for an invalid style having been selected. This
4029 should never happen, but it doesn't hurt to be a
4030 little paranoid. */
4031 if (curr_style > dis_style_comment_start)
4032 curr_style = dis_style_text;
4033
4034 /* Skip the hex character, and the closing STYLE_MARKER_CHAR. */
4035 curr += 2;
4036
4037 /* Reset the START to after the style marker. */
4038 start = curr;
4039 }
4040 else
4041 ++curr;
4042 }
4043 while (true);
4044 }
4045
4046 /* Print the comment. This works because only the last operand ever
4047 adds a comment. If that ever changes then we'll need to be
4048 smarter here. */
4049 if (cmt[0] != '\0')
4050 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4051 "\t// %s", cmt);
4052 }
4053
4054 if (notes && !no_notes)
4055 {
4056 *has_notes = true;
4057 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4058 " // note: %s", notes);
4059 }
4060
4061 obstack_free (&content, NULL);
4062 }
4063
4064 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
4065
4066 static void
4067 remove_dot_suffix (char *name, const aarch64_inst *inst)
4068 {
4069 char *ptr;
4070 size_t len;
4071
4072 ptr = strchr (inst->opcode->name, '.');
4073 assert (ptr && inst->cond);
4074 len = ptr - inst->opcode->name;
4075 assert (len < 8);
4076 strncpy (name, inst->opcode->name, len);
4077 name[len] = '\0';
4078 }
4079
4080 /* Print the instruction mnemonic name. */
4081
4082 static void
4083 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
4084 {
4085 if (inst->opcode->flags & F_COND)
4086 {
4087 /* For instructions that are truly conditionally executed, e.g. b.cond,
4088 prepare the full mnemonic name with the corresponding condition
4089 suffix. */
4090 char name[8];
4091
4092 remove_dot_suffix (name, inst);
4093 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
4094 "%s.%s", name, inst->cond->names[0]);
4095 }
4096 else
4097 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
4098 "%s", inst->opcode->name);
4099 }
4100
4101 /* Decide whether we need to print a comment after the operands of
4102 instruction INST. */
4103
4104 static void
4105 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
4106 {
4107 if (inst->opcode->flags & F_COND)
4108 {
4109 char name[8];
4110 unsigned int i, num_conds;
4111
4112 remove_dot_suffix (name, inst);
4113 num_conds = ARRAY_SIZE (inst->cond->names);
4114 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
4115 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4116 "%s %s.%s",
4117 i == 1 ? " //" : ",",
4118 name, inst->cond->names[i]);
4119 }
4120 }
4121
4122 /* Build notes from verifiers into a string for printing. */
4123
4124 static void
4125 print_verifier_notes (aarch64_operand_error *detail,
4126 struct disassemble_info *info)
4127 {
4128 if (no_notes)
4129 return;
4130
4131 /* The output of the verifier cannot be a fatal error, otherwise the assembly
4132 would not have succeeded. We can safely ignore these. */
4133 assert (detail->non_fatal);
4134
4135 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4136 " // note: ");
4137 switch (detail->kind)
4138 {
4139 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
4140 (*info->fprintf_styled_func) (info->stream, dis_style_text,
4141 _("this `%s' should have an immediately"
4142 " preceding `%s'"),
4143 detail->data[0].s, detail->data[1].s);
4144 break;
4145
4146 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
4147 (*info->fprintf_styled_func) (info->stream, dis_style_text,
4148 _("expected `%s' after previous `%s'"),
4149 detail->data[0].s, detail->data[1].s);
4150 break;
4151
4152 default:
4153 assert (detail->error);
4154 (*info->fprintf_styled_func) (info->stream, dis_style_text,
4155 "%s", detail->error);
4156 if (detail->index >= 0)
4157 (*info->fprintf_styled_func) (info->stream, dis_style_text,
4158 " at operand %d", detail->index + 1);
4159 break;
4160 }
4161 }
4162
4163 /* Print the instruction according to *INST. */
4164
4165 static void
4166 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
4167 const aarch64_insn code,
4168 struct disassemble_info *info,
4169 aarch64_operand_error *mismatch_details)
4170 {
4171 bool has_notes = false;
4172
4173 print_mnemonic_name (inst, info);
4174 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
4175 print_comment (inst, info);
4176
4177 /* We've already printed a note, not enough space to print more so exit.
4178 Usually notes shouldn't overlap so it shouldn't happen that we have a note
4179 from a register and instruction at the same time. */
4180 if (has_notes)
4181 return;
4182
4183 /* Always run constraint verifiers, this is needed because constraints need to
4184 maintain a global state regardless of whether the instruction has the flag
4185 set or not. */
4186 enum err_type result = verify_constraints (inst, code, pc, false,
4187 mismatch_details, &insn_sequence);
4188 switch (result)
4189 {
4190 case ERR_VFI:
4191 print_verifier_notes (mismatch_details, info);
4192 break;
4193 case ERR_UND:
4194 case ERR_UNP:
4195 case ERR_NYI:
4196 default:
4197 break;
4198 }
4199 }
4200
4201 /* Entry-point of the instruction disassembler and printer. */
4202
4203 static void
4204 print_insn_aarch64_word (bfd_vma pc,
4205 uint32_t word,
4206 struct disassemble_info *info,
4207 aarch64_operand_error *errors)
4208 {
4209 static const char *err_msg[ERR_NR_ENTRIES+1] =
4210 {
4211 [ERR_OK] = "_",
4212 [ERR_UND] = "undefined",
4213 [ERR_UNP] = "unpredictable",
4214 [ERR_NYI] = "NYI"
4215 };
4216
4217 enum err_type ret;
4218 aarch64_inst inst;
4219
4220 info->insn_info_valid = 1;
4221 info->branch_delay_insns = 0;
4222 info->data_size = 0;
4223 info->target = 0;
4224 info->target2 = 0;
4225
4226 if (info->flags & INSN_HAS_RELOC)
4227 /* If the instruction has a reloc associated with it, then
4228 the offset field in the instruction will actually be the
4229 addend for the reloc. (If we are using REL type relocs).
4230 In such cases, we can ignore the pc when computing
4231 addresses, since the addend is not currently pc-relative. */
4232 pc = 0;
4233
4234 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
4235
4236 if (((word >> 21) & 0x3ff) == 1)
4237 {
4238 /* RESERVED for ALES. */
4239 assert (ret != ERR_OK);
4240 ret = ERR_NYI;
4241 }
4242
4243 switch (ret)
4244 {
4245 case ERR_UND:
4246 case ERR_UNP:
4247 case ERR_NYI:
4248 /* Handle undefined instructions. */
4249 info->insn_type = dis_noninsn;
4250 (*info->fprintf_styled_func) (info->stream,
4251 dis_style_assembler_directive,
4252 ".inst\t");
4253 (*info->fprintf_styled_func) (info->stream, dis_style_immediate,
4254 "0x%08x", word);
4255 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4256 " ; %s", err_msg[ret]);
4257 break;
4258 case ERR_OK:
4259 user_friendly_fixup (&inst);
4260 if (inst.opcode->iclass == condbranch
4261 || inst.opcode->iclass == testbranch
4262 || inst.opcode->iclass == compbranch)
4263 info->insn_type = dis_condbranch;
4264 else if (inst.opcode->iclass == branch_imm)
4265 info->insn_type = dis_jsr;
4266 print_aarch64_insn (pc, &inst, word, info, errors);
4267 break;
4268 default:
4269 abort ();
4270 }
4271 }
4272
4273 /* Disallow mapping symbols ($x, $d etc) from
4274 being displayed in symbol relative addresses. */
4275
4276 bool
4277 aarch64_symbol_is_valid (asymbol * sym,
4278 struct disassemble_info * info ATTRIBUTE_UNUSED)
4279 {
4280 const char * name;
4281
4282 if (sym == NULL)
4283 return false;
4284
4285 name = bfd_asymbol_name (sym);
4286
4287 return name
4288 && (name[0] != '$'
4289 || (name[1] != 'x' && name[1] != 'd')
4290 || (name[2] != '\0' && name[2] != '.'));
4291 }
4292
4293 /* Print data bytes on INFO->STREAM. */
4294
4295 static void
4296 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
4297 uint32_t word,
4298 struct disassemble_info *info,
4299 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4300 {
4301 switch (info->bytes_per_chunk)
4302 {
4303 case 1:
4304 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4305 ".byte\t");
4306 info->fprintf_styled_func (info->stream, dis_style_immediate,
4307 "0x%02x", word);
4308 break;
4309 case 2:
4310 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4311 ".short\t");
4312 info->fprintf_styled_func (info->stream, dis_style_immediate,
4313 "0x%04x", word);
4314 break;
4315 case 4:
4316 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4317 ".word\t");
4318 info->fprintf_styled_func (info->stream, dis_style_immediate,
4319 "0x%08x", word);
4320 break;
4321 default:
4322 abort ();
4323 }
4324 }
4325
4326 /* Try to infer the code or data type from a symbol.
4327 Returns nonzero if *MAP_TYPE was set. */
4328
4329 static int
4330 get_sym_code_type (struct disassemble_info *info, int n,
4331 enum map_type *map_type)
4332 {
4333 asymbol * as;
4334 elf_symbol_type *es;
4335 unsigned int type;
4336 const char *name;
4337
4338 /* If the symbol is in a different section, ignore it. */
4339 if (info->section != NULL && info->section != info->symtab[n]->section)
4340 return false;
4341
4342 if (n >= info->symtab_size)
4343 return false;
4344
4345 as = info->symtab[n];
4346 if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
4347 return false;
4348 es = (elf_symbol_type *) as;
4349
4350 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
4351
4352 /* If the symbol has function type then use that. */
4353 if (type == STT_FUNC)
4354 {
4355 *map_type = MAP_INSN;
4356 return true;
4357 }
4358
4359 /* Check for mapping symbols. */
4360 name = bfd_asymbol_name(info->symtab[n]);
4361 if (name[0] == '$'
4362 && (name[1] == 'x' || name[1] == 'd')
4363 && (name[2] == '\0' || name[2] == '.'))
4364 {
4365 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
4366 return true;
4367 }
4368
4369 return false;
4370 }
4371
4372 /* Set the feature bits in arch_variant in order to get the correct disassembly
4373 for the chosen architecture variant.
4374
4375 Currently we only restrict disassembly for Armv8-R and otherwise enable all
4376 non-R-profile features. */
4377 static void
4378 select_aarch64_variant (unsigned mach)
4379 {
4380 switch (mach)
4381 {
4382 case bfd_mach_aarch64_8R:
4383 AARCH64_SET_FEATURE (arch_variant, AARCH64_ARCH_V8R);
4384 break;
4385 default:
4386 arch_variant = (aarch64_feature_set) AARCH64_ALL_FEATURES;
4387 AARCH64_CLEAR_FEATURE (arch_variant, arch_variant, V8R);
4388 }
4389 }
4390
4391 /* Entry-point of the AArch64 disassembler. */
4392
4393 int
4394 print_insn_aarch64 (bfd_vma pc,
4395 struct disassemble_info *info)
4396 {
4397 bfd_byte buffer[INSNLEN];
4398 int status;
4399 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
4400 aarch64_operand_error *);
4401 bool found = false;
4402 unsigned int size = 4;
4403 unsigned long data;
4404 aarch64_operand_error errors;
4405 static bool set_features;
4406
4407 if (info->disassembler_options)
4408 {
4409 set_default_aarch64_dis_options (info);
4410
4411 parse_aarch64_dis_options (info->disassembler_options);
4412
4413 /* To avoid repeated parsing of these options, we remove them here. */
4414 info->disassembler_options = NULL;
4415 }
4416
4417 if (!set_features)
4418 {
4419 select_aarch64_variant (info->mach);
4420 set_features = true;
4421 }
4422
4423 /* Aarch64 instructions are always little-endian */
4424 info->endian_code = BFD_ENDIAN_LITTLE;
4425
4426 /* Default to DATA. A text section is required by the ABI to contain an
4427 INSN mapping symbol at the start. A data section has no such
4428 requirement, hence if no mapping symbol is found the section must
4429 contain only data. This however isn't very useful if the user has
4430 fully stripped the binaries. If this is the case use the section
4431 attributes to determine the default. If we have no section default to
4432 INSN as well, as we may be disassembling some raw bytes on a baremetal
4433 HEX file or similar. */
4434 enum map_type type = MAP_DATA;
4435 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
4436 type = MAP_INSN;
4437
4438 /* First check the full symtab for a mapping symbol, even if there
4439 are no usable non-mapping symbols for this address. */
4440 if (info->symtab_size != 0
4441 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
4442 {
4443 int last_sym = -1;
4444 bfd_vma addr, section_vma = 0;
4445 bool can_use_search_opt_p;
4446 int n;
4447
4448 if (pc <= last_mapping_addr)
4449 last_mapping_sym = -1;
4450
4451 /* Start scanning at the start of the function, or wherever
4452 we finished last time. */
4453 n = info->symtab_pos + 1;
4454
4455 /* If the last stop offset is different from the current one it means we
4456 are disassembling a different glob of bytes. As such the optimization
4457 would not be safe and we should start over. */
4458 can_use_search_opt_p = last_mapping_sym >= 0
4459 && info->stop_offset == last_stop_offset;
4460
4461 if (n >= last_mapping_sym && can_use_search_opt_p)
4462 n = last_mapping_sym;
4463
4464 /* Look down while we haven't passed the location being disassembled.
4465 The reason for this is that there's no defined order between a symbol
4466 and an mapping symbol that may be at the same address. We may have to
4467 look at least one position ahead. */
4468 for (; n < info->symtab_size; n++)
4469 {
4470 addr = bfd_asymbol_value (info->symtab[n]);
4471 if (addr > pc)
4472 break;
4473 if (get_sym_code_type (info, n, &type))
4474 {
4475 last_sym = n;
4476 found = true;
4477 }
4478 }
4479
4480 if (!found)
4481 {
4482 n = info->symtab_pos;
4483 if (n >= last_mapping_sym && can_use_search_opt_p)
4484 n = last_mapping_sym;
4485
4486 /* No mapping symbol found at this address. Look backwards
4487 for a preceeding one, but don't go pass the section start
4488 otherwise a data section with no mapping symbol can pick up
4489 a text mapping symbol of a preceeding section. The documentation
4490 says section can be NULL, in which case we will seek up all the
4491 way to the top. */
4492 if (info->section)
4493 section_vma = info->section->vma;
4494
4495 for (; n >= 0; n--)
4496 {
4497 addr = bfd_asymbol_value (info->symtab[n]);
4498 if (addr < section_vma)
4499 break;
4500
4501 if (get_sym_code_type (info, n, &type))
4502 {
4503 last_sym = n;
4504 found = true;
4505 break;
4506 }
4507 }
4508 }
4509
4510 last_mapping_sym = last_sym;
4511 last_type = type;
4512 last_stop_offset = info->stop_offset;
4513
4514 /* Look a little bit ahead to see if we should print out
4515 less than four bytes of data. If there's a symbol,
4516 mapping or otherwise, after two bytes then don't
4517 print more. */
4518 if (last_type == MAP_DATA)
4519 {
4520 size = 4 - (pc & 3);
4521 for (n = last_sym + 1; n < info->symtab_size; n++)
4522 {
4523 addr = bfd_asymbol_value (info->symtab[n]);
4524 if (addr > pc)
4525 {
4526 if (addr - pc < size)
4527 size = addr - pc;
4528 break;
4529 }
4530 }
4531 /* If the next symbol is after three bytes, we need to
4532 print only part of the data, so that we can use either
4533 .byte or .short. */
4534 if (size == 3)
4535 size = (pc & 1) ? 1 : 2;
4536 }
4537 }
4538 else
4539 last_type = type;
4540
4541 /* PR 10263: Disassemble data if requested to do so by the user. */
4542 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
4543 {
4544 /* size was set above. */
4545 info->bytes_per_chunk = size;
4546 info->display_endian = info->endian;
4547 printer = print_insn_data;
4548 }
4549 else
4550 {
4551 info->bytes_per_chunk = size = INSNLEN;
4552 info->display_endian = info->endian_code;
4553 printer = print_insn_aarch64_word;
4554 }
4555
4556 status = (*info->read_memory_func) (pc, buffer, size, info);
4557 if (status != 0)
4558 {
4559 (*info->memory_error_func) (status, pc, info);
4560 return -1;
4561 }
4562
4563 data = bfd_get_bits (buffer, size * 8,
4564 info->display_endian == BFD_ENDIAN_BIG);
4565
4566 (*printer) (pc, data, info, &errors);
4567
4568 return size;
4569 }
4570
4571 void
4573 print_aarch64_disassembler_options (FILE *stream)
4574 {
4575 fprintf (stream, _("\n\
4576 The following AARCH64 specific disassembler options are supported for use\n\
4577 with the -M switch (multiple options should be separated by commas):\n"));
4578
4579 fprintf (stream, _("\n\
4580 no-aliases Don't print instruction aliases.\n"));
4581
4582 fprintf (stream, _("\n\
4583 aliases Do print instruction aliases.\n"));
4584
4585 fprintf (stream, _("\n\
4586 no-notes Don't print instruction notes.\n"));
4587
4588 fprintf (stream, _("\n\
4589 notes Do print instruction notes.\n"));
4590
4591 #ifdef DEBUG_AARCH64
4592 fprintf (stream, _("\n\
4593 debug_dump Temp switch for debug trace.\n"));
4594 #endif /* DEBUG_AARCH64 */
4595
4596 fprintf (stream, _("\n"));
4597 }
4598