tc-aarch64.c revision 1.1.1.7 1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 /* Diagnostics inline function utilities.
164
165 These are lightweight utilities which should only be called by parse_operands
166 and other parsers. GAS processes each assembly line by parsing it against
167 instruction template(s), in the case of multiple templates (for the same
168 mnemonic name), those templates are tried one by one until one succeeds or
169 all fail. An assembly line may fail a few templates before being
170 successfully parsed; an error saved here in most cases is not a user error
171 but an error indicating the current template is not the right template.
172 Therefore it is very important that errors can be saved at a low cost during
173 the parsing; we don't want to slow down the whole parsing by recording
174 non-user errors in detail.
175
176 Remember that the objective is to help GAS pick up the most appropriate
177 error message in the case of multiple templates, e.g. FMOV which has 8
178 templates. */
179
180 static inline void
181 clear_error (void)
182 {
183 inst.parsing_error.kind = AARCH64_OPDE_NIL;
184 inst.parsing_error.error = NULL;
185 }
186
187 static inline bool
188 error_p (void)
189 {
190 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
191 }
192
193 static inline const char *
194 get_error_message (void)
195 {
196 return inst.parsing_error.error;
197 }
198
199 static inline enum aarch64_operand_error_kind
200 get_error_kind (void)
201 {
202 return inst.parsing_error.kind;
203 }
204
205 static inline void
206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208 inst.parsing_error.kind = kind;
209 inst.parsing_error.error = error;
210 }
211
212 static inline void
213 set_recoverable_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_RECOVERABLE, error);
216 }
217
218 /* Use the DESC field of the corresponding aarch64_operand entry to compose
219 the error message. */
220 static inline void
221 set_default_error (void)
222 {
223 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
224 }
225
226 static inline void
227 set_syntax_error (const char *error)
228 {
229 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
230 }
231
232 static inline void
233 set_first_syntax_error (const char *error)
234 {
235 if (! error_p ())
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_fatal_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
243 }
244
245 /* Return value for certain parsers when the parsing fails; those parsers
247 return the information of the parsed result, e.g. register number, on
248 success. */
249 #define PARSE_FAIL -1
250
251 /* This is an invalid condition code that means no conditional field is
252 present. */
253 #define COND_ALWAYS 0x10
254
255 typedef struct
256 {
257 const char *template;
258 uint32_t value;
259 } asm_nzcv;
260
261 struct reloc_entry
262 {
263 char *name;
264 bfd_reloc_code_real_type reloc;
265 };
266
267 /* Macros to define the register types and masks for the purpose
268 of parsing. */
269
270 #undef AARCH64_REG_TYPES
271 #define AARCH64_REG_TYPES \
272 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
273 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
274 BASIC_REG_TYPE(SP_32) /* wsp */ \
275 BASIC_REG_TYPE(SP_64) /* sp */ \
276 BASIC_REG_TYPE(Z_32) /* wzr */ \
277 BASIC_REG_TYPE(Z_64) /* xzr */ \
278 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
279 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
280 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
281 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
282 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
283 BASIC_REG_TYPE(VN) /* v[0-31] */ \
284 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
285 BASIC_REG_TYPE(PN) /* p[0-15] */ \
286 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
287 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
288 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
289 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
290 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
291 /* Typecheck: same, plus SVE registers. */ \
292 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
293 | REG_TYPE(ZN)) \
294 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
295 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
296 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
297 /* Typecheck: same, plus SVE registers. */ \
298 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
300 | REG_TYPE(ZN)) \
301 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
302 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
303 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
304 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
305 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
306 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
307 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
308 /* Typecheck: any [BHSDQ]P FP. */ \
309 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
310 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
311 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
312 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
316 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
317 be used for SVE instructions, since Zn and Pn are valid symbols \
318 in other contexts. */ \
319 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
320 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
322 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
323 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
324 | REG_TYPE(ZN) | REG_TYPE(PN)) \
325 /* Any integer register; used for error messages only. */ \
326 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
327 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
328 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
329 /* Pseudo type to mark the end of the enumerator sequence. */ \
330 BASIC_REG_TYPE(MAX)
331
332 #undef BASIC_REG_TYPE
333 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
334 #undef MULTI_REG_TYPE
335 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
336
337 /* Register type enumerators. */
338 typedef enum aarch64_reg_type_
339 {
340 /* A list of REG_TYPE_*. */
341 AARCH64_REG_TYPES
342 } aarch64_reg_type;
343
344 #undef BASIC_REG_TYPE
345 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
346 #undef REG_TYPE
347 #define REG_TYPE(T) (1 << REG_TYPE_##T)
348 #undef MULTI_REG_TYPE
349 #define MULTI_REG_TYPE(T,V) V,
350
351 /* Structure for a hash table entry for a register. */
352 typedef struct
353 {
354 const char *name;
355 unsigned char number;
356 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
357 unsigned char builtin;
358 } reg_entry;
359
360 /* Values indexed by aarch64_reg_type to assist the type checking. */
361 static const unsigned reg_type_masks[] =
362 {
363 AARCH64_REG_TYPES
364 };
365
366 #undef BASIC_REG_TYPE
367 #undef REG_TYPE
368 #undef MULTI_REG_TYPE
369 #undef AARCH64_REG_TYPES
370
371 /* Diagnostics used when we don't get a register of the expected type.
372 Note: this has to synchronized with aarch64_reg_type definitions
373 above. */
374 static const char *
375 get_reg_expected_msg (aarch64_reg_type reg_type)
376 {
377 const char *msg;
378
379 switch (reg_type)
380 {
381 case REG_TYPE_R_32:
382 msg = N_("integer 32-bit register expected");
383 break;
384 case REG_TYPE_R_64:
385 msg = N_("integer 64-bit register expected");
386 break;
387 case REG_TYPE_R_N:
388 msg = N_("integer register expected");
389 break;
390 case REG_TYPE_R64_SP:
391 msg = N_("64-bit integer or SP register expected");
392 break;
393 case REG_TYPE_SVE_BASE:
394 msg = N_("base register expected");
395 break;
396 case REG_TYPE_R_Z:
397 msg = N_("integer or zero register expected");
398 break;
399 case REG_TYPE_SVE_OFFSET:
400 msg = N_("offset register expected");
401 break;
402 case REG_TYPE_R_SP:
403 msg = N_("integer or SP register expected");
404 break;
405 case REG_TYPE_R_Z_SP:
406 msg = N_("integer, zero or SP register expected");
407 break;
408 case REG_TYPE_FP_B:
409 msg = N_("8-bit SIMD scalar register expected");
410 break;
411 case REG_TYPE_FP_H:
412 msg = N_("16-bit SIMD scalar or floating-point half precision "
413 "register expected");
414 break;
415 case REG_TYPE_FP_S:
416 msg = N_("32-bit SIMD scalar or floating-point single precision "
417 "register expected");
418 break;
419 case REG_TYPE_FP_D:
420 msg = N_("64-bit SIMD scalar or floating-point double precision "
421 "register expected");
422 break;
423 case REG_TYPE_FP_Q:
424 msg = N_("128-bit SIMD scalar or floating-point quad precision "
425 "register expected");
426 break;
427 case REG_TYPE_R_Z_BHSDQ_V:
428 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
429 msg = N_("register expected");
430 break;
431 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
432 msg = N_("SIMD scalar or floating-point register expected");
433 break;
434 case REG_TYPE_VN: /* any V reg */
435 msg = N_("vector register expected");
436 break;
437 case REG_TYPE_ZN:
438 msg = N_("SVE vector register expected");
439 break;
440 case REG_TYPE_PN:
441 msg = N_("SVE predicate register expected");
442 break;
443 default:
444 as_fatal (_("invalid register type %d"), reg_type);
445 }
446 return msg;
447 }
448
449 /* Some well known registers that we refer to directly elsewhere. */
450 #define REG_SP 31
451 #define REG_ZR 31
452
453 /* Instructions take 4 bytes in the object file. */
454 #define INSN_SIZE 4
455
456 static htab_t aarch64_ops_hsh;
457 static htab_t aarch64_cond_hsh;
458 static htab_t aarch64_shift_hsh;
459 static htab_t aarch64_sys_regs_hsh;
460 static htab_t aarch64_pstatefield_hsh;
461 static htab_t aarch64_sys_regs_ic_hsh;
462 static htab_t aarch64_sys_regs_dc_hsh;
463 static htab_t aarch64_sys_regs_at_hsh;
464 static htab_t aarch64_sys_regs_tlbi_hsh;
465 static htab_t aarch64_sys_regs_sr_hsh;
466 static htab_t aarch64_reg_hsh;
467 static htab_t aarch64_barrier_opt_hsh;
468 static htab_t aarch64_nzcv_hsh;
469 static htab_t aarch64_pldop_hsh;
470 static htab_t aarch64_hint_opt_hsh;
471
472 /* Stuff needed to resolve the label ambiguity
473 As:
474 ...
475 label: <insn>
476 may differ from:
477 ...
478 label:
479 <insn> */
480
481 static symbolS *last_label_seen;
482
483 /* Literal pool structure. Held on a per-section
484 and per-sub-section basis. */
485
486 #define MAX_LITERAL_POOL_SIZE 1024
487 typedef struct literal_expression
488 {
489 expressionS exp;
490 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
491 LITTLENUM_TYPE * bignum;
492 } literal_expression;
493
494 typedef struct literal_pool
495 {
496 literal_expression literals[MAX_LITERAL_POOL_SIZE];
497 unsigned int next_free_entry;
498 unsigned int id;
499 symbolS *symbol;
500 segT section;
501 subsegT sub_section;
502 int size;
503 struct literal_pool *next;
504 } literal_pool;
505
506 /* Pointer to a linked list of literal pools. */
507 static literal_pool *list_of_pools = NULL;
508
509 /* Pure syntax. */
511
512 /* This array holds the chars that always start a comment. If the
513 pre-processor is disabled, these aren't very useful. */
514 const char comment_chars[] = "";
515
516 /* This array holds the chars that only start a comment at the beginning of
517 a line. If the line seems to have the form '# 123 filename'
518 .line and .file directives will appear in the pre-processed output. */
519 /* Note that input_file.c hand checks for '#' at the beginning of the
520 first line of the input file. This is because the compiler outputs
521 #NO_APP at the beginning of its output. */
522 /* Also note that comments like this one will always work. */
523 const char line_comment_chars[] = "#";
524
525 const char line_separator_chars[] = ";";
526
527 /* Chars that can be used to separate mant
528 from exp in floating point numbers. */
529 const char EXP_CHARS[] = "eE";
530
531 /* Chars that mean this number is a floating point constant. */
532 /* As in 0f12.456 */
533 /* or 0d1.2345e12 */
534
535 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
536
537 /* Prefix character that indicates the start of an immediate value. */
538 #define is_immediate_prefix(C) ((C) == '#')
539
540 /* Separator character handling. */
541
542 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
543
544 static inline bool
545 skip_past_char (char **str, char c)
546 {
547 if (**str == c)
548 {
549 (*str)++;
550 return true;
551 }
552 else
553 return false;
554 }
555
556 #define skip_past_comma(str) skip_past_char (str, ',')
557
558 /* Arithmetic expressions (possibly involving symbols). */
559
560 static bool in_aarch64_get_expression = false;
561
562 /* Third argument to aarch64_get_expression. */
563 #define GE_NO_PREFIX false
564 #define GE_OPT_PREFIX true
565
566 /* Fourth argument to aarch64_get_expression. */
567 #define ALLOW_ABSENT false
568 #define REJECT_ABSENT true
569
570 /* Fifth argument to aarch64_get_expression. */
571 #define NORMAL_RESOLUTION false
572
573 /* Return TRUE if the string pointed by *STR is successfully parsed
574 as an valid expression; *EP will be filled with the information of
575 such an expression. Otherwise return FALSE.
576
577 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
578 If REJECT_ABSENT is true then trat missing expressions as an error.
579 If DEFER_RESOLUTION is true, then do not resolve expressions against
580 constant symbols. Necessary if the expression is part of a fixup
581 that uses a reloc that must be emitted. */
582
583 static bool
584 aarch64_get_expression (expressionS * ep,
585 char ** str,
586 bool allow_immediate_prefix,
587 bool reject_absent,
588 bool defer_resolution)
589 {
590 char *save_in;
591 segT seg;
592 bool prefix_present = false;
593
594 if (allow_immediate_prefix)
595 {
596 if (is_immediate_prefix (**str))
597 {
598 (*str)++;
599 prefix_present = true;
600 }
601 }
602
603 memset (ep, 0, sizeof (expressionS));
604
605 save_in = input_line_pointer;
606 input_line_pointer = *str;
607 in_aarch64_get_expression = true;
608 if (defer_resolution)
609 seg = deferred_expression (ep);
610 else
611 seg = expression (ep);
612 in_aarch64_get_expression = false;
613
614 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
615 {
616 /* We found a bad expression in md_operand(). */
617 *str = input_line_pointer;
618 input_line_pointer = save_in;
619 if (prefix_present && ! error_p ())
620 set_fatal_syntax_error (_("bad expression"));
621 else
622 set_first_syntax_error (_("bad expression"));
623 return false;
624 }
625
626 #ifdef OBJ_AOUT
627 if (seg != absolute_section
628 && seg != text_section
629 && seg != data_section
630 && seg != bss_section
631 && seg != undefined_section)
632 {
633 set_syntax_error (_("bad segment"));
634 *str = input_line_pointer;
635 input_line_pointer = save_in;
636 return false;
637 }
638 #else
639 (void) seg;
640 #endif
641
642 *str = input_line_pointer;
643 input_line_pointer = save_in;
644 return true;
645 }
646
647 /* Turn a string in input_line_pointer into a floating point constant
648 of type TYPE, and store the appropriate bytes in *LITP. The number
649 of LITTLENUMS emitted is stored in *SIZEP. An error message is
650 returned, or NULL on OK. */
651
652 const char *
653 md_atof (int type, char *litP, int *sizeP)
654 {
655 return ieee_md_atof (type, litP, sizeP, target_big_endian);
656 }
657
658 /* We handle all bad expressions here, so that we can report the faulty
659 instruction in the error message. */
660 void
661 md_operand (expressionS * exp)
662 {
663 if (in_aarch64_get_expression)
664 exp->X_op = O_illegal;
665 }
666
667 /* Immediate values. */
668
669 /* Errors may be set multiple times during parsing or bit encoding
670 (particularly in the Neon bits), but usually the earliest error which is set
671 will be the most meaningful. Avoid overwriting it with later (cascading)
672 errors by calling this function. */
673
674 static void
675 first_error (const char *error)
676 {
677 if (! error_p ())
678 set_syntax_error (error);
679 }
680
681 /* Similar to first_error, but this function accepts formatted error
682 message. */
683 static void
684 first_error_fmt (const char *format, ...)
685 {
686 va_list args;
687 enum
688 { size = 100 };
689 /* N.B. this single buffer will not cause error messages for different
690 instructions to pollute each other; this is because at the end of
691 processing of each assembly line, error message if any will be
692 collected by as_bad. */
693 static char buffer[size];
694
695 if (! error_p ())
696 {
697 int ret ATTRIBUTE_UNUSED;
698 va_start (args, format);
699 ret = vsnprintf (buffer, size, format, args);
700 know (ret <= size - 1 && ret >= 0);
701 va_end (args);
702 set_syntax_error (buffer);
703 }
704 }
705
706 /* Register parsing. */
707
708 /* Generic register parser which is called by other specialized
709 register parsers.
710 CCP points to what should be the beginning of a register name.
711 If it is indeed a valid register name, advance CCP over it and
712 return the reg_entry structure; otherwise return NULL.
713 It does not issue diagnostics. */
714
715 static reg_entry *
716 parse_reg (char **ccp)
717 {
718 char *start = *ccp;
719 char *p;
720 reg_entry *reg;
721
722 #ifdef REGISTER_PREFIX
723 if (*start != REGISTER_PREFIX)
724 return NULL;
725 start++;
726 #endif
727
728 p = start;
729 if (!ISALPHA (*p) || !is_name_beginner (*p))
730 return NULL;
731
732 do
733 p++;
734 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
735
736 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
737
738 if (!reg)
739 return NULL;
740
741 *ccp = p;
742 return reg;
743 }
744
745 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
746 return FALSE. */
747 static bool
748 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
749 {
750 return (reg_type_masks[type] & (1 << reg->type)) != 0;
751 }
752
753 /* Try to parse a base or offset register. Allow SVE base and offset
754 registers if REG_TYPE includes SVE registers. Return the register
755 entry on success, setting *QUALIFIER to the register qualifier.
756 Return null otherwise.
757
758 Note that this function does not issue any diagnostics. */
759
760 static const reg_entry *
761 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
762 aarch64_opnd_qualifier_t *qualifier)
763 {
764 char *str = *ccp;
765 const reg_entry *reg = parse_reg (&str);
766
767 if (reg == NULL)
768 return NULL;
769
770 switch (reg->type)
771 {
772 case REG_TYPE_R_32:
773 case REG_TYPE_SP_32:
774 case REG_TYPE_Z_32:
775 *qualifier = AARCH64_OPND_QLF_W;
776 break;
777
778 case REG_TYPE_R_64:
779 case REG_TYPE_SP_64:
780 case REG_TYPE_Z_64:
781 *qualifier = AARCH64_OPND_QLF_X;
782 break;
783
784 case REG_TYPE_ZN:
785 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
786 || str[0] != '.')
787 return NULL;
788 switch (TOLOWER (str[1]))
789 {
790 case 's':
791 *qualifier = AARCH64_OPND_QLF_S_S;
792 break;
793 case 'd':
794 *qualifier = AARCH64_OPND_QLF_S_D;
795 break;
796 default:
797 return NULL;
798 }
799 str += 2;
800 break;
801
802 default:
803 return NULL;
804 }
805
806 *ccp = str;
807
808 return reg;
809 }
810
811 /* Try to parse a base or offset register. Return the register entry
812 on success, setting *QUALIFIER to the register qualifier. Return null
813 otherwise.
814
815 Note that this function does not issue any diagnostics. */
816
817 static const reg_entry *
818 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
819 {
820 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
821 }
822
823 /* Parse the qualifier of a vector register or vector element of type
824 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
825 succeeds; otherwise return FALSE.
826
827 Accept only one occurrence of:
828 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
829 b h s d q */
830 static bool
831 parse_vector_type_for_operand (aarch64_reg_type reg_type,
832 struct vector_type_el *parsed_type, char **str)
833 {
834 char *ptr = *str;
835 unsigned width;
836 unsigned element_size;
837 enum vector_el_type type;
838
839 /* skip '.' */
840 gas_assert (*ptr == '.');
841 ptr++;
842
843 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
844 {
845 width = 0;
846 goto elt_size;
847 }
848 width = strtoul (ptr, &ptr, 10);
849 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
850 {
851 first_error_fmt (_("bad size %d in vector width specifier"), width);
852 return false;
853 }
854
855 elt_size:
856 switch (TOLOWER (*ptr))
857 {
858 case 'b':
859 type = NT_b;
860 element_size = 8;
861 break;
862 case 'h':
863 type = NT_h;
864 element_size = 16;
865 break;
866 case 's':
867 type = NT_s;
868 element_size = 32;
869 break;
870 case 'd':
871 type = NT_d;
872 element_size = 64;
873 break;
874 case 'q':
875 if (reg_type == REG_TYPE_ZN || width == 1)
876 {
877 type = NT_q;
878 element_size = 128;
879 break;
880 }
881 /* fall through. */
882 default:
883 if (*ptr != '\0')
884 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
885 else
886 first_error (_("missing element size"));
887 return false;
888 }
889 if (width != 0 && width * element_size != 64
890 && width * element_size != 128
891 && !(width == 2 && element_size == 16)
892 && !(width == 4 && element_size == 8))
893 {
894 first_error_fmt (_
895 ("invalid element size %d and vector size combination %c"),
896 width, *ptr);
897 return false;
898 }
899 ptr++;
900
901 parsed_type->type = type;
902 parsed_type->width = width;
903
904 *str = ptr;
905
906 return true;
907 }
908
909 /* *STR contains an SVE zero/merge predication suffix. Parse it into
910 *PARSED_TYPE and point *STR at the end of the suffix. */
911
912 static bool
913 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
914 {
915 char *ptr = *str;
916
917 /* Skip '/'. */
918 gas_assert (*ptr == '/');
919 ptr++;
920 switch (TOLOWER (*ptr))
921 {
922 case 'z':
923 parsed_type->type = NT_zero;
924 break;
925 case 'm':
926 parsed_type->type = NT_merge;
927 break;
928 default:
929 if (*ptr != '\0' && *ptr != ',')
930 first_error_fmt (_("unexpected character `%c' in predication type"),
931 *ptr);
932 else
933 first_error (_("missing predication type"));
934 return false;
935 }
936 parsed_type->width = 0;
937 *str = ptr + 1;
938 return true;
939 }
940
941 /* Parse a register of the type TYPE.
942
943 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
944 name or the parsed register is not of TYPE.
945
946 Otherwise return the register number, and optionally fill in the actual
947 type of the register in *RTYPE when multiple alternatives were given, and
948 return the register shape and element index information in *TYPEINFO.
949
950 IN_REG_LIST should be set with TRUE if the caller is parsing a register
951 list. */
952
953 static int
954 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
955 struct vector_type_el *typeinfo, bool in_reg_list)
956 {
957 char *str = *ccp;
958 const reg_entry *reg = parse_reg (&str);
959 struct vector_type_el atype;
960 struct vector_type_el parsetype;
961 bool is_typed_vecreg = false;
962
963 atype.defined = 0;
964 atype.type = NT_invtype;
965 atype.width = -1;
966 atype.index = 0;
967
968 if (reg == NULL)
969 {
970 if (typeinfo)
971 *typeinfo = atype;
972 set_default_error ();
973 return PARSE_FAIL;
974 }
975
976 if (! aarch64_check_reg_type (reg, type))
977 {
978 DEBUG_TRACE ("reg type check failed");
979 set_default_error ();
980 return PARSE_FAIL;
981 }
982 type = reg->type;
983
984 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
985 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
986 {
987 if (*str == '.')
988 {
989 if (!parse_vector_type_for_operand (type, &parsetype, &str))
990 return PARSE_FAIL;
991 }
992 else
993 {
994 if (!parse_predication_for_operand (&parsetype, &str))
995 return PARSE_FAIL;
996 }
997
998 /* Register if of the form Vn.[bhsdq]. */
999 is_typed_vecreg = true;
1000
1001 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1002 {
1003 /* The width is always variable; we don't allow an integer width
1004 to be specified. */
1005 gas_assert (parsetype.width == 0);
1006 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1007 }
1008 else if (parsetype.width == 0)
1009 /* Expect index. In the new scheme we cannot have
1010 Vn.[bhsdq] represent a scalar. Therefore any
1011 Vn.[bhsdq] should have an index following it.
1012 Except in reglists of course. */
1013 atype.defined |= NTA_HASINDEX;
1014 else
1015 atype.defined |= NTA_HASTYPE;
1016
1017 atype.type = parsetype.type;
1018 atype.width = parsetype.width;
1019 }
1020
1021 if (skip_past_char (&str, '['))
1022 {
1023 expressionS exp;
1024
1025 /* Reject Sn[index] syntax. */
1026 if (!is_typed_vecreg)
1027 {
1028 first_error (_("this type of register can't be indexed"));
1029 return PARSE_FAIL;
1030 }
1031
1032 if (in_reg_list)
1033 {
1034 first_error (_("index not allowed inside register list"));
1035 return PARSE_FAIL;
1036 }
1037
1038 atype.defined |= NTA_HASINDEX;
1039
1040 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1041 NORMAL_RESOLUTION);
1042
1043 if (exp.X_op != O_constant)
1044 {
1045 first_error (_("constant expression required"));
1046 return PARSE_FAIL;
1047 }
1048
1049 if (! skip_past_char (&str, ']'))
1050 return PARSE_FAIL;
1051
1052 atype.index = exp.X_add_number;
1053 }
1054 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1055 {
1056 /* Indexed vector register expected. */
1057 first_error (_("indexed vector register expected"));
1058 return PARSE_FAIL;
1059 }
1060
1061 /* A vector reg Vn should be typed or indexed. */
1062 if (type == REG_TYPE_VN && atype.defined == 0)
1063 {
1064 first_error (_("invalid use of vector register"));
1065 }
1066
1067 if (typeinfo)
1068 *typeinfo = atype;
1069
1070 if (rtype)
1071 *rtype = type;
1072
1073 *ccp = str;
1074
1075 return reg->number;
1076 }
1077
1078 /* Parse register.
1079
1080 Return the register number on success; return PARSE_FAIL otherwise.
1081
1082 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1083 the register (e.g. NEON double or quad reg when either has been requested).
1084
1085 If this is a NEON vector register with additional type information, fill
1086 in the struct pointed to by VECTYPE (if non-NULL).
1087
1088 This parser does not handle register list. */
1089
1090 static int
1091 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1092 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1093 {
1094 struct vector_type_el atype;
1095 char *str = *ccp;
1096 int reg = parse_typed_reg (&str, type, rtype, &atype,
1097 /*in_reg_list= */ false);
1098
1099 if (reg == PARSE_FAIL)
1100 return PARSE_FAIL;
1101
1102 if (vectype)
1103 *vectype = atype;
1104
1105 *ccp = str;
1106
1107 return reg;
1108 }
1109
1110 static inline bool
1111 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1112 {
1113 return
1114 e1.type == e2.type
1115 && e1.defined == e2.defined
1116 && e1.width == e2.width && e1.index == e2.index;
1117 }
1118
1119 /* This function parses a list of vector registers of type TYPE.
1120 On success, it returns the parsed register list information in the
1121 following encoded format:
1122
1123 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1124 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1125
1126 The information of the register shape and/or index is returned in
1127 *VECTYPE.
1128
1129 It returns PARSE_FAIL if the register list is invalid.
1130
1131 The list contains one to four registers.
1132 Each register can be one of:
1133 <Vt>.<T>[<index>]
1134 <Vt>.<T>
1135 All <T> should be identical.
1136 All <index> should be identical.
1137 There are restrictions on <Vt> numbers which are checked later
1138 (by reg_list_valid_p). */
1139
1140 static int
1141 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1142 struct vector_type_el *vectype)
1143 {
1144 char *str = *ccp;
1145 int nb_regs;
1146 struct vector_type_el typeinfo, typeinfo_first;
1147 int val, val_range;
1148 int in_range;
1149 int ret_val;
1150 int i;
1151 bool error = false;
1152 bool expect_index = false;
1153
1154 if (*str != '{')
1155 {
1156 set_syntax_error (_("expecting {"));
1157 return PARSE_FAIL;
1158 }
1159 str++;
1160
1161 nb_regs = 0;
1162 typeinfo_first.defined = 0;
1163 typeinfo_first.type = NT_invtype;
1164 typeinfo_first.width = -1;
1165 typeinfo_first.index = 0;
1166 ret_val = 0;
1167 val = -1;
1168 val_range = -1;
1169 in_range = 0;
1170 do
1171 {
1172 if (in_range)
1173 {
1174 str++; /* skip over '-' */
1175 val_range = val;
1176 }
1177 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1178 /*in_reg_list= */ true);
1179 if (val == PARSE_FAIL)
1180 {
1181 set_first_syntax_error (_("invalid vector register in list"));
1182 error = true;
1183 continue;
1184 }
1185 /* reject [bhsd]n */
1186 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1187 {
1188 set_first_syntax_error (_("invalid scalar register in list"));
1189 error = true;
1190 continue;
1191 }
1192
1193 if (typeinfo.defined & NTA_HASINDEX)
1194 expect_index = true;
1195
1196 if (in_range)
1197 {
1198 if (val < val_range)
1199 {
1200 set_first_syntax_error
1201 (_("invalid range in vector register list"));
1202 error = true;
1203 }
1204 val_range++;
1205 }
1206 else
1207 {
1208 val_range = val;
1209 if (nb_regs == 0)
1210 typeinfo_first = typeinfo;
1211 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1212 {
1213 set_first_syntax_error
1214 (_("type mismatch in vector register list"));
1215 error = true;
1216 }
1217 }
1218 if (! error)
1219 for (i = val_range; i <= val; i++)
1220 {
1221 ret_val |= i << (5 * nb_regs);
1222 nb_regs++;
1223 }
1224 in_range = 0;
1225 }
1226 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1227
1228 skip_whitespace (str);
1229 if (*str != '}')
1230 {
1231 set_first_syntax_error (_("end of vector register list not found"));
1232 error = true;
1233 }
1234 str++;
1235
1236 skip_whitespace (str);
1237
1238 if (expect_index)
1239 {
1240 if (skip_past_char (&str, '['))
1241 {
1242 expressionS exp;
1243
1244 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1245 NORMAL_RESOLUTION);
1246 if (exp.X_op != O_constant)
1247 {
1248 set_first_syntax_error (_("constant expression required."));
1249 error = true;
1250 }
1251 if (! skip_past_char (&str, ']'))
1252 error = true;
1253 else
1254 typeinfo_first.index = exp.X_add_number;
1255 }
1256 else
1257 {
1258 set_first_syntax_error (_("expected index"));
1259 error = true;
1260 }
1261 }
1262
1263 if (nb_regs > 4)
1264 {
1265 set_first_syntax_error (_("too many registers in vector register list"));
1266 error = true;
1267 }
1268 else if (nb_regs == 0)
1269 {
1270 set_first_syntax_error (_("empty vector register list"));
1271 error = true;
1272 }
1273
1274 *ccp = str;
1275 if (! error)
1276 *vectype = typeinfo_first;
1277
1278 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1279 }
1280
1281 /* Directives: register aliases. */
1282
1283 static reg_entry *
1284 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1285 {
1286 reg_entry *new;
1287 const char *name;
1288
1289 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1290 {
1291 if (new->builtin)
1292 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1293 str);
1294
1295 /* Only warn about a redefinition if it's not defined as the
1296 same register. */
1297 else if (new->number != number || new->type != type)
1298 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1299
1300 return NULL;
1301 }
1302
1303 name = xstrdup (str);
1304 new = XNEW (reg_entry);
1305
1306 new->name = name;
1307 new->number = number;
1308 new->type = type;
1309 new->builtin = false;
1310
1311 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1312
1313 return new;
1314 }
1315
1316 /* Look for the .req directive. This is of the form:
1317
1318 new_register_name .req existing_register_name
1319
1320 If we find one, or if it looks sufficiently like one that we want to
1321 handle any error here, return TRUE. Otherwise return FALSE. */
1322
1323 static bool
1324 create_register_alias (char *newname, char *p)
1325 {
1326 const reg_entry *old;
1327 char *oldname, *nbuf;
1328 size_t nlen;
1329
1330 /* The input scrubber ensures that whitespace after the mnemonic is
1331 collapsed to single spaces. */
1332 oldname = p;
1333 if (!startswith (oldname, " .req "))
1334 return false;
1335
1336 oldname += 6;
1337 if (*oldname == '\0')
1338 return false;
1339
1340 old = str_hash_find (aarch64_reg_hsh, oldname);
1341 if (!old)
1342 {
1343 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1344 return true;
1345 }
1346
1347 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1348 the desired alias name, and p points to its end. If not, then
1349 the desired alias name is in the global original_case_string. */
1350 #ifdef TC_CASE_SENSITIVE
1351 nlen = p - newname;
1352 #else
1353 newname = original_case_string;
1354 nlen = strlen (newname);
1355 #endif
1356
1357 nbuf = xmemdup0 (newname, nlen);
1358
1359 /* Create aliases under the new name as stated; an all-lowercase
1360 version of the new name; and an all-uppercase version of the new
1361 name. */
1362 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1363 {
1364 for (p = nbuf; *p; p++)
1365 *p = TOUPPER (*p);
1366
1367 if (strncmp (nbuf, newname, nlen))
1368 {
1369 /* If this attempt to create an additional alias fails, do not bother
1370 trying to create the all-lower case alias. We will fail and issue
1371 a second, duplicate error message. This situation arises when the
1372 programmer does something like:
1373 foo .req r0
1374 Foo .req r1
1375 The second .req creates the "Foo" alias but then fails to create
1376 the artificial FOO alias because it has already been created by the
1377 first .req. */
1378 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1379 {
1380 free (nbuf);
1381 return true;
1382 }
1383 }
1384
1385 for (p = nbuf; *p; p++)
1386 *p = TOLOWER (*p);
1387
1388 if (strncmp (nbuf, newname, nlen))
1389 insert_reg_alias (nbuf, old->number, old->type);
1390 }
1391
1392 free (nbuf);
1393 return true;
1394 }
1395
1396 /* Should never be called, as .req goes between the alias and the
1397 register name, not at the beginning of the line. */
1398 static void
1399 s_req (int a ATTRIBUTE_UNUSED)
1400 {
1401 as_bad (_("invalid syntax for .req directive"));
1402 }
1403
1404 /* The .unreq directive deletes an alias which was previously defined
1405 by .req. For example:
1406
1407 my_alias .req r11
1408 .unreq my_alias */
1409
1410 static void
1411 s_unreq (int a ATTRIBUTE_UNUSED)
1412 {
1413 char *name;
1414 char saved_char;
1415
1416 name = input_line_pointer;
1417
1418 while (*input_line_pointer != 0
1419 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1420 ++input_line_pointer;
1421
1422 saved_char = *input_line_pointer;
1423 *input_line_pointer = 0;
1424
1425 if (!*name)
1426 as_bad (_("invalid syntax for .unreq directive"));
1427 else
1428 {
1429 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1430
1431 if (!reg)
1432 as_bad (_("unknown register alias '%s'"), name);
1433 else if (reg->builtin)
1434 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1435 name);
1436 else
1437 {
1438 char *p;
1439 char *nbuf;
1440
1441 str_hash_delete (aarch64_reg_hsh, name);
1442 free ((char *) reg->name);
1443 free (reg);
1444
1445 /* Also locate the all upper case and all lower case versions.
1446 Do not complain if we cannot find one or the other as it
1447 was probably deleted above. */
1448
1449 nbuf = strdup (name);
1450 for (p = nbuf; *p; p++)
1451 *p = TOUPPER (*p);
1452 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1453 if (reg)
1454 {
1455 str_hash_delete (aarch64_reg_hsh, nbuf);
1456 free ((char *) reg->name);
1457 free (reg);
1458 }
1459
1460 for (p = nbuf; *p; p++)
1461 *p = TOLOWER (*p);
1462 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1463 if (reg)
1464 {
1465 str_hash_delete (aarch64_reg_hsh, nbuf);
1466 free ((char *) reg->name);
1467 free (reg);
1468 }
1469
1470 free (nbuf);
1471 }
1472 }
1473
1474 *input_line_pointer = saved_char;
1475 demand_empty_rest_of_line ();
1476 }
1477
1478 /* Directives: Instruction set selection. */
1479
1480 #ifdef OBJ_ELF
1481 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1482 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1483 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1484 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1485
1486 /* Create a new mapping symbol for the transition to STATE. */
1487
1488 static void
1489 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1490 {
1491 symbolS *symbolP;
1492 const char *symname;
1493 int type;
1494
1495 switch (state)
1496 {
1497 case MAP_DATA:
1498 symname = "$d";
1499 type = BSF_NO_FLAGS;
1500 break;
1501 case MAP_INSN:
1502 symname = "$x";
1503 type = BSF_NO_FLAGS;
1504 break;
1505 default:
1506 abort ();
1507 }
1508
1509 symbolP = symbol_new (symname, now_seg, frag, value);
1510 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1511
1512 /* Save the mapping symbols for future reference. Also check that
1513 we do not place two mapping symbols at the same offset within a
1514 frag. We'll handle overlap between frags in
1515 check_mapping_symbols.
1516
1517 If .fill or other data filling directive generates zero sized data,
1518 the mapping symbol for the following code will have the same value
1519 as the one generated for the data filling directive. In this case,
1520 we replace the old symbol with the new one at the same address. */
1521 if (value == 0)
1522 {
1523 if (frag->tc_frag_data.first_map != NULL)
1524 {
1525 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1526 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1527 &symbol_lastP);
1528 }
1529 frag->tc_frag_data.first_map = symbolP;
1530 }
1531 if (frag->tc_frag_data.last_map != NULL)
1532 {
1533 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1534 S_GET_VALUE (symbolP));
1535 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1536 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1537 &symbol_lastP);
1538 }
1539 frag->tc_frag_data.last_map = symbolP;
1540 }
1541
1542 /* We must sometimes convert a region marked as code to data during
1543 code alignment, if an odd number of bytes have to be padded. The
1544 code mapping symbol is pushed to an aligned address. */
1545
1546 static void
1547 insert_data_mapping_symbol (enum mstate state,
1548 valueT value, fragS * frag, offsetT bytes)
1549 {
1550 /* If there was already a mapping symbol, remove it. */
1551 if (frag->tc_frag_data.last_map != NULL
1552 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1553 frag->fr_address + value)
1554 {
1555 symbolS *symp = frag->tc_frag_data.last_map;
1556
1557 if (value == 0)
1558 {
1559 know (frag->tc_frag_data.first_map == symp);
1560 frag->tc_frag_data.first_map = NULL;
1561 }
1562 frag->tc_frag_data.last_map = NULL;
1563 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1564 }
1565
1566 make_mapping_symbol (MAP_DATA, value, frag);
1567 make_mapping_symbol (state, value + bytes, frag);
1568 }
1569
1570 static void mapping_state_2 (enum mstate state, int max_chars);
1571
1572 /* Set the mapping state to STATE. Only call this when about to
1573 emit some STATE bytes to the file. */
1574
1575 void
1576 mapping_state (enum mstate state)
1577 {
1578 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1579
1580 if (state == MAP_INSN)
1581 /* AArch64 instructions require 4-byte alignment. When emitting
1582 instructions into any section, record the appropriate section
1583 alignment. */
1584 record_alignment (now_seg, 2);
1585
1586 if (mapstate == state)
1587 /* The mapping symbol has already been emitted.
1588 There is nothing else to do. */
1589 return;
1590
1591 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1592 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1593 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1594 evaluated later in the next else. */
1595 return;
1596 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1597 {
1598 /* Only add the symbol if the offset is > 0:
1599 if we're at the first frag, check it's size > 0;
1600 if we're not at the first frag, then for sure
1601 the offset is > 0. */
1602 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1603 const int add_symbol = (frag_now != frag_first)
1604 || (frag_now_fix () > 0);
1605
1606 if (add_symbol)
1607 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1608 }
1609 #undef TRANSITION
1610
1611 mapping_state_2 (state, 0);
1612 }
1613
1614 /* Same as mapping_state, but MAX_CHARS bytes have already been
1615 allocated. Put the mapping symbol that far back. */
1616
1617 static void
1618 mapping_state_2 (enum mstate state, int max_chars)
1619 {
1620 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1621
1622 if (!SEG_NORMAL (now_seg))
1623 return;
1624
1625 if (mapstate == state)
1626 /* The mapping symbol has already been emitted.
1627 There is nothing else to do. */
1628 return;
1629
1630 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1631 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1632 }
1633 #else
1634 #define mapping_state(x) /* nothing */
1635 #define mapping_state_2(x, y) /* nothing */
1636 #endif
1637
1638 /* Directives: sectioning and alignment. */
1639
1640 static void
1641 s_bss (int ignore ATTRIBUTE_UNUSED)
1642 {
1643 /* We don't support putting frags in the BSS segment, we fake it by
1644 marking in_bss, then looking at s_skip for clues. */
1645 subseg_set (bss_section, 0);
1646 demand_empty_rest_of_line ();
1647 mapping_state (MAP_DATA);
1648 }
1649
1650 static void
1651 s_even (int ignore ATTRIBUTE_UNUSED)
1652 {
1653 /* Never make frag if expect extra pass. */
1654 if (!need_pass_2)
1655 frag_align (1, 0, 0);
1656
1657 record_alignment (now_seg, 1);
1658
1659 demand_empty_rest_of_line ();
1660 }
1661
1662 /* Directives: Literal pools. */
1663
1664 static literal_pool *
1665 find_literal_pool (int size)
1666 {
1667 literal_pool *pool;
1668
1669 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1670 {
1671 if (pool->section == now_seg
1672 && pool->sub_section == now_subseg && pool->size == size)
1673 break;
1674 }
1675
1676 return pool;
1677 }
1678
1679 static literal_pool *
1680 find_or_make_literal_pool (int size)
1681 {
1682 /* Next literal pool ID number. */
1683 static unsigned int latest_pool_num = 1;
1684 literal_pool *pool;
1685
1686 pool = find_literal_pool (size);
1687
1688 if (pool == NULL)
1689 {
1690 /* Create a new pool. */
1691 pool = XNEW (literal_pool);
1692 if (!pool)
1693 return NULL;
1694
1695 /* Currently we always put the literal pool in the current text
1696 section. If we were generating "small" model code where we
1697 knew that all code and initialised data was within 1MB then
1698 we could output literals to mergeable, read-only data
1699 sections. */
1700
1701 pool->next_free_entry = 0;
1702 pool->section = now_seg;
1703 pool->sub_section = now_subseg;
1704 pool->size = size;
1705 pool->next = list_of_pools;
1706 pool->symbol = NULL;
1707
1708 /* Add it to the list. */
1709 list_of_pools = pool;
1710 }
1711
1712 /* New pools, and emptied pools, will have a NULL symbol. */
1713 if (pool->symbol == NULL)
1714 {
1715 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1716 &zero_address_frag, 0);
1717 pool->id = latest_pool_num++;
1718 }
1719
1720 /* Done. */
1721 return pool;
1722 }
1723
1724 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1725 Return TRUE on success, otherwise return FALSE. */
1726 static bool
1727 add_to_lit_pool (expressionS *exp, int size)
1728 {
1729 literal_pool *pool;
1730 unsigned int entry;
1731
1732 pool = find_or_make_literal_pool (size);
1733
1734 /* Check if this literal value is already in the pool. */
1735 for (entry = 0; entry < pool->next_free_entry; entry++)
1736 {
1737 expressionS * litexp = & pool->literals[entry].exp;
1738
1739 if ((litexp->X_op == exp->X_op)
1740 && (exp->X_op == O_constant)
1741 && (litexp->X_add_number == exp->X_add_number)
1742 && (litexp->X_unsigned == exp->X_unsigned))
1743 break;
1744
1745 if ((litexp->X_op == exp->X_op)
1746 && (exp->X_op == O_symbol)
1747 && (litexp->X_add_number == exp->X_add_number)
1748 && (litexp->X_add_symbol == exp->X_add_symbol)
1749 && (litexp->X_op_symbol == exp->X_op_symbol))
1750 break;
1751 }
1752
1753 /* Do we need to create a new entry? */
1754 if (entry == pool->next_free_entry)
1755 {
1756 if (entry >= MAX_LITERAL_POOL_SIZE)
1757 {
1758 set_syntax_error (_("literal pool overflow"));
1759 return false;
1760 }
1761
1762 pool->literals[entry].exp = *exp;
1763 pool->next_free_entry += 1;
1764 if (exp->X_op == O_big)
1765 {
1766 /* PR 16688: Bignums are held in a single global array. We must
1767 copy and preserve that value now, before it is overwritten. */
1768 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1769 exp->X_add_number);
1770 memcpy (pool->literals[entry].bignum, generic_bignum,
1771 CHARS_PER_LITTLENUM * exp->X_add_number);
1772 }
1773 else
1774 pool->literals[entry].bignum = NULL;
1775 }
1776
1777 exp->X_op = O_symbol;
1778 exp->X_add_number = ((int) entry) * size;
1779 exp->X_add_symbol = pool->symbol;
1780
1781 return true;
1782 }
1783
1784 /* Can't use symbol_new here, so have to create a symbol and then at
1785 a later date assign it a value. That's what these functions do. */
1786
1787 static void
1788 symbol_locate (symbolS * symbolP,
1789 const char *name,/* It is copied, the caller can modify. */
1790 segT segment, /* Segment identifier (SEG_<something>). */
1791 valueT valu, /* Symbol value. */
1792 fragS * frag) /* Associated fragment. */
1793 {
1794 size_t name_length;
1795 char *preserved_copy_of_name;
1796
1797 name_length = strlen (name) + 1; /* +1 for \0. */
1798 obstack_grow (¬es, name, name_length);
1799 preserved_copy_of_name = obstack_finish (¬es);
1800
1801 #ifdef tc_canonicalize_symbol_name
1802 preserved_copy_of_name =
1803 tc_canonicalize_symbol_name (preserved_copy_of_name);
1804 #endif
1805
1806 S_SET_NAME (symbolP, preserved_copy_of_name);
1807
1808 S_SET_SEGMENT (symbolP, segment);
1809 S_SET_VALUE (symbolP, valu);
1810 symbol_clear_list_pointers (symbolP);
1811
1812 symbol_set_frag (symbolP, frag);
1813
1814 /* Link to end of symbol chain. */
1815 {
1816 extern int symbol_table_frozen;
1817
1818 if (symbol_table_frozen)
1819 abort ();
1820 }
1821
1822 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1823
1824 obj_symbol_new_hook (symbolP);
1825
1826 #ifdef tc_symbol_new_hook
1827 tc_symbol_new_hook (symbolP);
1828 #endif
1829
1830 #ifdef DEBUG_SYMS
1831 verify_symbol_chain (symbol_rootP, symbol_lastP);
1832 #endif /* DEBUG_SYMS */
1833 }
1834
1835
1836 static void
1837 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1838 {
1839 unsigned int entry;
1840 literal_pool *pool;
1841 char sym_name[20];
1842 int align;
1843
1844 for (align = 2; align <= 4; align++)
1845 {
1846 int size = 1 << align;
1847
1848 pool = find_literal_pool (size);
1849 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1850 continue;
1851
1852 /* Align pool as you have word accesses.
1853 Only make a frag if we have to. */
1854 if (!need_pass_2)
1855 frag_align (align, 0, 0);
1856
1857 mapping_state (MAP_DATA);
1858
1859 record_alignment (now_seg, align);
1860
1861 sprintf (sym_name, "$$lit_\002%x", pool->id);
1862
1863 symbol_locate (pool->symbol, sym_name, now_seg,
1864 (valueT) frag_now_fix (), frag_now);
1865 symbol_table_insert (pool->symbol);
1866
1867 for (entry = 0; entry < pool->next_free_entry; entry++)
1868 {
1869 expressionS * exp = & pool->literals[entry].exp;
1870
1871 if (exp->X_op == O_big)
1872 {
1873 /* PR 16688: Restore the global bignum value. */
1874 gas_assert (pool->literals[entry].bignum != NULL);
1875 memcpy (generic_bignum, pool->literals[entry].bignum,
1876 CHARS_PER_LITTLENUM * exp->X_add_number);
1877 }
1878
1879 /* First output the expression in the instruction to the pool. */
1880 emit_expr (exp, size); /* .word|.xword */
1881
1882 if (exp->X_op == O_big)
1883 {
1884 free (pool->literals[entry].bignum);
1885 pool->literals[entry].bignum = NULL;
1886 }
1887 }
1888
1889 /* Mark the pool as empty. */
1890 pool->next_free_entry = 0;
1891 pool->symbol = NULL;
1892 }
1893 }
1894
1895 #ifdef OBJ_ELF
1896 /* Forward declarations for functions below, in the MD interface
1897 section. */
1898 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1899 static struct reloc_table_entry * find_reloc_table_entry (char **);
1900
1901 /* Directives: Data. */
1902 /* N.B. the support for relocation suffix in this directive needs to be
1903 implemented properly. */
1904
1905 static void
1906 s_aarch64_elf_cons (int nbytes)
1907 {
1908 expressionS exp;
1909
1910 #ifdef md_flush_pending_output
1911 md_flush_pending_output ();
1912 #endif
1913
1914 if (is_it_end_of_statement ())
1915 {
1916 demand_empty_rest_of_line ();
1917 return;
1918 }
1919
1920 #ifdef md_cons_align
1921 md_cons_align (nbytes);
1922 #endif
1923
1924 mapping_state (MAP_DATA);
1925 do
1926 {
1927 struct reloc_table_entry *reloc;
1928
1929 expression (&exp);
1930
1931 if (exp.X_op != O_symbol)
1932 emit_expr (&exp, (unsigned int) nbytes);
1933 else
1934 {
1935 skip_past_char (&input_line_pointer, '#');
1936 if (skip_past_char (&input_line_pointer, ':'))
1937 {
1938 reloc = find_reloc_table_entry (&input_line_pointer);
1939 if (reloc == NULL)
1940 as_bad (_("unrecognized relocation suffix"));
1941 else
1942 as_bad (_("unimplemented relocation suffix"));
1943 ignore_rest_of_line ();
1944 return;
1945 }
1946 else
1947 emit_expr (&exp, (unsigned int) nbytes);
1948 }
1949 }
1950 while (*input_line_pointer++ == ',');
1951
1952 /* Put terminator back into stream. */
1953 input_line_pointer--;
1954 demand_empty_rest_of_line ();
1955 }
1956
1957 /* Mark symbol that it follows a variant PCS convention. */
1958
1959 static void
1960 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1961 {
1962 char *name;
1963 char c;
1964 symbolS *sym;
1965 asymbol *bfdsym;
1966 elf_symbol_type *elfsym;
1967
1968 c = get_symbol_name (&name);
1969 if (!*name)
1970 as_bad (_("Missing symbol name in directive"));
1971 sym = symbol_find_or_make (name);
1972 restore_line_pointer (c);
1973 demand_empty_rest_of_line ();
1974 bfdsym = symbol_get_bfdsym (sym);
1975 elfsym = elf_symbol_from (bfdsym);
1976 gas_assert (elfsym);
1977 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1978 }
1979 #endif /* OBJ_ELF */
1980
1981 /* Output a 32-bit word, but mark as an instruction. */
1982
1983 static void
1984 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1985 {
1986 expressionS exp;
1987 unsigned n = 0;
1988
1989 #ifdef md_flush_pending_output
1990 md_flush_pending_output ();
1991 #endif
1992
1993 if (is_it_end_of_statement ())
1994 {
1995 demand_empty_rest_of_line ();
1996 return;
1997 }
1998
1999 /* Sections are assumed to start aligned. In executable section, there is no
2000 MAP_DATA symbol pending. So we only align the address during
2001 MAP_DATA --> MAP_INSN transition.
2002 For other sections, this is not guaranteed. */
2003 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2004 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2005 frag_align_code (2, 0);
2006
2007 #ifdef OBJ_ELF
2008 mapping_state (MAP_INSN);
2009 #endif
2010
2011 do
2012 {
2013 expression (&exp);
2014 if (exp.X_op != O_constant)
2015 {
2016 as_bad (_("constant expression required"));
2017 ignore_rest_of_line ();
2018 return;
2019 }
2020
2021 if (target_big_endian)
2022 {
2023 unsigned int val = exp.X_add_number;
2024 exp.X_add_number = SWAP_32 (val);
2025 }
2026 emit_expr (&exp, INSN_SIZE);
2027 ++n;
2028 }
2029 while (*input_line_pointer++ == ',');
2030
2031 dwarf2_emit_insn (n * INSN_SIZE);
2032
2033 /* Put terminator back into stream. */
2034 input_line_pointer--;
2035 demand_empty_rest_of_line ();
2036 }
2037
2038 static void
2039 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2040 {
2041 demand_empty_rest_of_line ();
2042 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2043 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2044 }
2045
2046 #ifdef OBJ_ELF
2047 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2048
2049 static void
2050 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2051 {
2052 expressionS exp;
2053
2054 expression (&exp);
2055 frag_grow (4);
2056 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2057 BFD_RELOC_AARCH64_TLSDESC_ADD);
2058
2059 demand_empty_rest_of_line ();
2060 }
2061
2062 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2063
2064 static void
2065 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2066 {
2067 expressionS exp;
2068
2069 /* Since we're just labelling the code, there's no need to define a
2070 mapping symbol. */
2071 expression (&exp);
2072 /* Make sure there is enough room in this frag for the following
2073 blr. This trick only works if the blr follows immediately after
2074 the .tlsdesc directive. */
2075 frag_grow (4);
2076 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2077 BFD_RELOC_AARCH64_TLSDESC_CALL);
2078
2079 demand_empty_rest_of_line ();
2080 }
2081
2082 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2083
2084 static void
2085 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2086 {
2087 expressionS exp;
2088
2089 expression (&exp);
2090 frag_grow (4);
2091 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2092 BFD_RELOC_AARCH64_TLSDESC_LDR);
2093
2094 demand_empty_rest_of_line ();
2095 }
2096 #endif /* OBJ_ELF */
2097
2098 static void s_aarch64_arch (int);
2099 static void s_aarch64_cpu (int);
2100 static void s_aarch64_arch_extension (int);
2101
2102 /* This table describes all the machine specific pseudo-ops the assembler
2103 has to support. The fields are:
2104 pseudo-op name without dot
2105 function to call to execute this pseudo-op
2106 Integer arg to pass to the function. */
2107
2108 const pseudo_typeS md_pseudo_table[] = {
2109 /* Never called because '.req' does not start a line. */
2110 {"req", s_req, 0},
2111 {"unreq", s_unreq, 0},
2112 {"bss", s_bss, 0},
2113 {"even", s_even, 0},
2114 {"ltorg", s_ltorg, 0},
2115 {"pool", s_ltorg, 0},
2116 {"cpu", s_aarch64_cpu, 0},
2117 {"arch", s_aarch64_arch, 0},
2118 {"arch_extension", s_aarch64_arch_extension, 0},
2119 {"inst", s_aarch64_inst, 0},
2120 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2121 #ifdef OBJ_ELF
2122 {"tlsdescadd", s_tlsdescadd, 0},
2123 {"tlsdesccall", s_tlsdesccall, 0},
2124 {"tlsdescldr", s_tlsdescldr, 0},
2125 {"word", s_aarch64_elf_cons, 4},
2126 {"long", s_aarch64_elf_cons, 4},
2127 {"xword", s_aarch64_elf_cons, 8},
2128 {"dword", s_aarch64_elf_cons, 8},
2129 {"variant_pcs", s_variant_pcs, 0},
2130 #endif
2131 {"float16", float_cons, 'h'},
2132 {"bfloat16", float_cons, 'b'},
2133 {0, 0, 0}
2134 };
2135
2136
2138 /* Check whether STR points to a register name followed by a comma or the
2139 end of line; REG_TYPE indicates which register types are checked
2140 against. Return TRUE if STR is such a register name; otherwise return
2141 FALSE. The function does not intend to produce any diagnostics, but since
2142 the register parser aarch64_reg_parse, which is called by this function,
2143 does produce diagnostics, we call clear_error to clear any diagnostics
2144 that may be generated by aarch64_reg_parse.
2145 Also, the function returns FALSE directly if there is any user error
2146 present at the function entry. This prevents the existing diagnostics
2147 state from being spoiled.
2148 The function currently serves parse_constant_immediate and
2149 parse_big_immediate only. */
2150 static bool
2151 reg_name_p (char *str, aarch64_reg_type reg_type)
2152 {
2153 int reg;
2154
2155 /* Prevent the diagnostics state from being spoiled. */
2156 if (error_p ())
2157 return false;
2158
2159 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2160
2161 /* Clear the parsing error that may be set by the reg parser. */
2162 clear_error ();
2163
2164 if (reg == PARSE_FAIL)
2165 return false;
2166
2167 skip_whitespace (str);
2168 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2169 return true;
2170
2171 return false;
2172 }
2173
2174 /* Parser functions used exclusively in instruction operands. */
2175
2176 /* Parse an immediate expression which may not be constant.
2177
2178 To prevent the expression parser from pushing a register name
2179 into the symbol table as an undefined symbol, firstly a check is
2180 done to find out whether STR is a register of type REG_TYPE followed
2181 by a comma or the end of line. Return FALSE if STR is such a string. */
2182
2183 static bool
2184 parse_immediate_expression (char **str, expressionS *exp,
2185 aarch64_reg_type reg_type)
2186 {
2187 if (reg_name_p (*str, reg_type))
2188 {
2189 set_recoverable_error (_("immediate operand required"));
2190 return false;
2191 }
2192
2193 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2194 NORMAL_RESOLUTION);
2195
2196 if (exp->X_op == O_absent)
2197 {
2198 set_fatal_syntax_error (_("missing immediate expression"));
2199 return false;
2200 }
2201
2202 return true;
2203 }
2204
2205 /* Constant immediate-value read function for use in insn parsing.
2206 STR points to the beginning of the immediate (with the optional
2207 leading #); *VAL receives the value. REG_TYPE says which register
2208 names should be treated as registers rather than as symbolic immediates.
2209
2210 Return TRUE on success; otherwise return FALSE. */
2211
2212 static bool
2213 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2214 {
2215 expressionS exp;
2216
2217 if (! parse_immediate_expression (str, &exp, reg_type))
2218 return false;
2219
2220 if (exp.X_op != O_constant)
2221 {
2222 set_syntax_error (_("constant expression required"));
2223 return false;
2224 }
2225
2226 *val = exp.X_add_number;
2227 return true;
2228 }
2229
2230 static uint32_t
2231 encode_imm_float_bits (uint32_t imm)
2232 {
2233 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2234 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2235 }
2236
2237 /* Return TRUE if the single-precision floating-point value encoded in IMM
2238 can be expressed in the AArch64 8-bit signed floating-point format with
2239 3-bit exponent and normalized 4 bits of precision; in other words, the
2240 floating-point value must be expressable as
2241 (+/-) n / 16 * power (2, r)
2242 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2243
2244 static bool
2245 aarch64_imm_float_p (uint32_t imm)
2246 {
2247 /* If a single-precision floating-point value has the following bit
2248 pattern, it can be expressed in the AArch64 8-bit floating-point
2249 format:
2250
2251 3 32222222 2221111111111
2252 1 09876543 21098765432109876543210
2253 n Eeeeeexx xxxx0000000000000000000
2254
2255 where n, e and each x are either 0 or 1 independently, with
2256 E == ~ e. */
2257
2258 uint32_t pattern;
2259
2260 /* Prepare the pattern for 'Eeeeee'. */
2261 if (((imm >> 30) & 0x1) == 0)
2262 pattern = 0x3e000000;
2263 else
2264 pattern = 0x40000000;
2265
2266 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2267 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2268 }
2269
2270 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2271 as an IEEE float without any loss of precision. Store the value in
2272 *FPWORD if so. */
2273
2274 static bool
2275 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2276 {
2277 /* If a double-precision floating-point value has the following bit
2278 pattern, it can be expressed in a float:
2279
2280 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2281 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2282 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2283
2284 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2285 if Eeee_eeee != 1111_1111
2286
2287 where n, e, s and S are either 0 or 1 independently and where ~ is the
2288 inverse of E. */
2289
2290 uint32_t pattern;
2291 uint32_t high32 = imm >> 32;
2292 uint32_t low32 = imm;
2293
2294 /* Lower 29 bits need to be 0s. */
2295 if ((imm & 0x1fffffff) != 0)
2296 return false;
2297
2298 /* Prepare the pattern for 'Eeeeeeeee'. */
2299 if (((high32 >> 30) & 0x1) == 0)
2300 pattern = 0x38000000;
2301 else
2302 pattern = 0x40000000;
2303
2304 /* Check E~~~. */
2305 if ((high32 & 0x78000000) != pattern)
2306 return false;
2307
2308 /* Check Eeee_eeee != 1111_1111. */
2309 if ((high32 & 0x7ff00000) == 0x47f00000)
2310 return false;
2311
2312 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2313 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2314 | (low32 >> 29)); /* 3 S bits. */
2315 return true;
2316 }
2317
2318 /* Return true if we should treat OPERAND as a double-precision
2319 floating-point operand rather than a single-precision one. */
2320 static bool
2321 double_precision_operand_p (const aarch64_opnd_info *operand)
2322 {
2323 /* Check for unsuffixed SVE registers, which are allowed
2324 for LDR and STR but not in instructions that require an
2325 immediate. We get better error messages if we arbitrarily
2326 pick one size, parse the immediate normally, and then
2327 report the match failure in the normal way. */
2328 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2329 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2330 }
2331
2332 /* Parse a floating-point immediate. Return TRUE on success and return the
2333 value in *IMMED in the format of IEEE754 single-precision encoding.
2334 *CCP points to the start of the string; DP_P is TRUE when the immediate
2335 is expected to be in double-precision (N.B. this only matters when
2336 hexadecimal representation is involved). REG_TYPE says which register
2337 names should be treated as registers rather than as symbolic immediates.
2338
2339 This routine accepts any IEEE float; it is up to the callers to reject
2340 invalid ones. */
2341
2342 static bool
2343 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2344 aarch64_reg_type reg_type)
2345 {
2346 char *str = *ccp;
2347 char *fpnum;
2348 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2349 int64_t val = 0;
2350 unsigned fpword = 0;
2351 bool hex_p = false;
2352
2353 skip_past_char (&str, '#');
2354
2355 fpnum = str;
2356 skip_whitespace (fpnum);
2357
2358 if (startswith (fpnum, "0x"))
2359 {
2360 /* Support the hexadecimal representation of the IEEE754 encoding.
2361 Double-precision is expected when DP_P is TRUE, otherwise the
2362 representation should be in single-precision. */
2363 if (! parse_constant_immediate (&str, &val, reg_type))
2364 goto invalid_fp;
2365
2366 if (dp_p)
2367 {
2368 if (!can_convert_double_to_float (val, &fpword))
2369 goto invalid_fp;
2370 }
2371 else if ((uint64_t) val > 0xffffffff)
2372 goto invalid_fp;
2373 else
2374 fpword = val;
2375
2376 hex_p = true;
2377 }
2378 else if (reg_name_p (str, reg_type))
2379 {
2380 set_recoverable_error (_("immediate operand required"));
2381 return false;
2382 }
2383
2384 if (! hex_p)
2385 {
2386 int i;
2387
2388 if ((str = atof_ieee (str, 's', words)) == NULL)
2389 goto invalid_fp;
2390
2391 /* Our FP word must be 32 bits (single-precision FP). */
2392 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2393 {
2394 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2395 fpword |= words[i];
2396 }
2397 }
2398
2399 *immed = fpword;
2400 *ccp = str;
2401 return true;
2402
2403 invalid_fp:
2404 set_fatal_syntax_error (_("invalid floating-point constant"));
2405 return false;
2406 }
2407
2408 /* Less-generic immediate-value read function with the possibility of loading
2409 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2410 instructions.
2411
2412 To prevent the expression parser from pushing a register name into the
2413 symbol table as an undefined symbol, a check is firstly done to find
2414 out whether STR is a register of type REG_TYPE followed by a comma or
2415 the end of line. Return FALSE if STR is such a register. */
2416
2417 static bool
2418 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2419 {
2420 char *ptr = *str;
2421
2422 if (reg_name_p (ptr, reg_type))
2423 {
2424 set_syntax_error (_("immediate operand required"));
2425 return false;
2426 }
2427
2428 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2429 NORMAL_RESOLUTION);
2430
2431 if (inst.reloc.exp.X_op == O_constant)
2432 *imm = inst.reloc.exp.X_add_number;
2433
2434 *str = ptr;
2435
2436 return true;
2437 }
2438
2439 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2440 if NEED_LIBOPCODES is non-zero, the fixup will need
2441 assistance from the libopcodes. */
2442
2443 static inline void
2444 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2445 const aarch64_opnd_info *operand,
2446 int need_libopcodes_p)
2447 {
2448 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2449 reloc->opnd = operand->type;
2450 if (need_libopcodes_p)
2451 reloc->need_libopcodes_p = 1;
2452 };
2453
2454 /* Return TRUE if the instruction needs to be fixed up later internally by
2455 the GAS; otherwise return FALSE. */
2456
2457 static inline bool
2458 aarch64_gas_internal_fixup_p (void)
2459 {
2460 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2461 }
2462
2463 /* Assign the immediate value to the relevant field in *OPERAND if
2464 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2465 needs an internal fixup in a later stage.
2466 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2467 IMM.VALUE that may get assigned with the constant. */
2468 static inline void
2469 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2470 aarch64_opnd_info *operand,
2471 int addr_off_p,
2472 int need_libopcodes_p,
2473 int skip_p)
2474 {
2475 if (reloc->exp.X_op == O_constant)
2476 {
2477 if (addr_off_p)
2478 operand->addr.offset.imm = reloc->exp.X_add_number;
2479 else
2480 operand->imm.value = reloc->exp.X_add_number;
2481 reloc->type = BFD_RELOC_UNUSED;
2482 }
2483 else
2484 {
2485 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2486 /* Tell libopcodes to ignore this operand or not. This is helpful
2487 when one of the operands needs to be fixed up later but we need
2488 libopcodes to check the other operands. */
2489 operand->skip = skip_p;
2490 }
2491 }
2492
2493 /* Relocation modifiers. Each entry in the table contains the textual
2494 name for the relocation which may be placed before a symbol used as
2495 a load/store offset, or add immediate. It must be surrounded by a
2496 leading and trailing colon, for example:
2497
2498 ldr x0, [x1, #:rello:varsym]
2499 add x0, x1, #:rello:varsym */
2500
2501 struct reloc_table_entry
2502 {
2503 const char *name;
2504 int pc_rel;
2505 bfd_reloc_code_real_type adr_type;
2506 bfd_reloc_code_real_type adrp_type;
2507 bfd_reloc_code_real_type movw_type;
2508 bfd_reloc_code_real_type add_type;
2509 bfd_reloc_code_real_type ldst_type;
2510 bfd_reloc_code_real_type ld_literal_type;
2511 };
2512
2513 static struct reloc_table_entry reloc_table[] =
2514 {
2515 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2516 {"lo12", 0,
2517 0, /* adr_type */
2518 0,
2519 0,
2520 BFD_RELOC_AARCH64_ADD_LO12,
2521 BFD_RELOC_AARCH64_LDST_LO12,
2522 0},
2523
2524 /* Higher 21 bits of pc-relative page offset: ADRP */
2525 {"pg_hi21", 1,
2526 0, /* adr_type */
2527 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2528 0,
2529 0,
2530 0,
2531 0},
2532
2533 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2534 {"pg_hi21_nc", 1,
2535 0, /* adr_type */
2536 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2537 0,
2538 0,
2539 0,
2540 0},
2541
2542 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2543 {"abs_g0", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_MOVW_G0,
2547 0,
2548 0,
2549 0},
2550
2551 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2552 {"abs_g0_s", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_MOVW_G0_S,
2556 0,
2557 0,
2558 0},
2559
2560 /* Less significant bits 0-15 of address/value: MOVK, no check */
2561 {"abs_g0_nc", 0,
2562 0, /* adr_type */
2563 0,
2564 BFD_RELOC_AARCH64_MOVW_G0_NC,
2565 0,
2566 0,
2567 0},
2568
2569 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2570 {"abs_g1", 0,
2571 0, /* adr_type */
2572 0,
2573 BFD_RELOC_AARCH64_MOVW_G1,
2574 0,
2575 0,
2576 0},
2577
2578 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2579 {"abs_g1_s", 0,
2580 0, /* adr_type */
2581 0,
2582 BFD_RELOC_AARCH64_MOVW_G1_S,
2583 0,
2584 0,
2585 0},
2586
2587 /* Less significant bits 16-31 of address/value: MOVK, no check */
2588 {"abs_g1_nc", 0,
2589 0, /* adr_type */
2590 0,
2591 BFD_RELOC_AARCH64_MOVW_G1_NC,
2592 0,
2593 0,
2594 0},
2595
2596 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2597 {"abs_g2", 0,
2598 0, /* adr_type */
2599 0,
2600 BFD_RELOC_AARCH64_MOVW_G2,
2601 0,
2602 0,
2603 0},
2604
2605 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2606 {"abs_g2_s", 0,
2607 0, /* adr_type */
2608 0,
2609 BFD_RELOC_AARCH64_MOVW_G2_S,
2610 0,
2611 0,
2612 0},
2613
2614 /* Less significant bits 32-47 of address/value: MOVK, no check */
2615 {"abs_g2_nc", 0,
2616 0, /* adr_type */
2617 0,
2618 BFD_RELOC_AARCH64_MOVW_G2_NC,
2619 0,
2620 0,
2621 0},
2622
2623 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2624 {"abs_g3", 0,
2625 0, /* adr_type */
2626 0,
2627 BFD_RELOC_AARCH64_MOVW_G3,
2628 0,
2629 0,
2630 0},
2631
2632 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2633 {"prel_g0", 1,
2634 0, /* adr_type */
2635 0,
2636 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2637 0,
2638 0,
2639 0},
2640
2641 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2642 {"prel_g0_nc", 1,
2643 0, /* adr_type */
2644 0,
2645 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2646 0,
2647 0,
2648 0},
2649
2650 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2651 {"prel_g1", 1,
2652 0, /* adr_type */
2653 0,
2654 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2655 0,
2656 0,
2657 0},
2658
2659 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2660 {"prel_g1_nc", 1,
2661 0, /* adr_type */
2662 0,
2663 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2664 0,
2665 0,
2666 0},
2667
2668 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2669 {"prel_g2", 1,
2670 0, /* adr_type */
2671 0,
2672 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2673 0,
2674 0,
2675 0},
2676
2677 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2678 {"prel_g2_nc", 1,
2679 0, /* adr_type */
2680 0,
2681 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2682 0,
2683 0,
2684 0},
2685
2686 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2687 {"prel_g3", 1,
2688 0, /* adr_type */
2689 0,
2690 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2691 0,
2692 0,
2693 0},
2694
2695 /* Get to the page containing GOT entry for a symbol. */
2696 {"got", 1,
2697 0, /* adr_type */
2698 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2699 0,
2700 0,
2701 0,
2702 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2703
2704 /* 12 bit offset into the page containing GOT entry for that symbol. */
2705 {"got_lo12", 0,
2706 0, /* adr_type */
2707 0,
2708 0,
2709 0,
2710 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2711 0},
2712
2713 /* 0-15 bits of address/value: MOVk, no check. */
2714 {"gotoff_g0_nc", 0,
2715 0, /* adr_type */
2716 0,
2717 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2718 0,
2719 0,
2720 0},
2721
2722 /* Most significant bits 16-31 of address/value: MOVZ. */
2723 {"gotoff_g1", 0,
2724 0, /* adr_type */
2725 0,
2726 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2727 0,
2728 0,
2729 0},
2730
2731 /* 15 bit offset into the page containing GOT entry for that symbol. */
2732 {"gotoff_lo15", 0,
2733 0, /* adr_type */
2734 0,
2735 0,
2736 0,
2737 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2738 0},
2739
2740 /* Get to the page containing GOT TLS entry for a symbol */
2741 {"gottprel_g0_nc", 0,
2742 0, /* adr_type */
2743 0,
2744 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2745 0,
2746 0,
2747 0},
2748
2749 /* Get to the page containing GOT TLS entry for a symbol */
2750 {"gottprel_g1", 0,
2751 0, /* adr_type */
2752 0,
2753 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2754 0,
2755 0,
2756 0},
2757
2758 /* Get to the page containing GOT TLS entry for a symbol */
2759 {"tlsgd", 0,
2760 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2761 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2762 0,
2763 0,
2764 0,
2765 0},
2766
2767 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2768 {"tlsgd_lo12", 0,
2769 0, /* adr_type */
2770 0,
2771 0,
2772 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2773 0,
2774 0},
2775
2776 /* Lower 16 bits address/value: MOVk. */
2777 {"tlsgd_g0_nc", 0,
2778 0, /* adr_type */
2779 0,
2780 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2781 0,
2782 0,
2783 0},
2784
2785 /* Most significant bits 16-31 of address/value: MOVZ. */
2786 {"tlsgd_g1", 0,
2787 0, /* adr_type */
2788 0,
2789 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2790 0,
2791 0,
2792 0},
2793
2794 /* Get to the page containing GOT TLS entry for a symbol */
2795 {"tlsdesc", 0,
2796 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2797 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2798 0,
2799 0,
2800 0,
2801 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2802
2803 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2804 {"tlsdesc_lo12", 0,
2805 0, /* adr_type */
2806 0,
2807 0,
2808 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2809 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2810 0},
2811
2812 /* Get to the page containing GOT TLS entry for a symbol.
2813 The same as GD, we allocate two consecutive GOT slots
2814 for module index and module offset, the only difference
2815 with GD is the module offset should be initialized to
2816 zero without any outstanding runtime relocation. */
2817 {"tlsldm", 0,
2818 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2819 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2820 0,
2821 0,
2822 0,
2823 0},
2824
2825 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2826 {"tlsldm_lo12_nc", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2831 0,
2832 0},
2833
2834 /* 12 bit offset into the module TLS base address. */
2835 {"dtprel_lo12", 0,
2836 0, /* adr_type */
2837 0,
2838 0,
2839 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2840 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2841 0},
2842
2843 /* Same as dtprel_lo12, no overflow check. */
2844 {"dtprel_lo12_nc", 0,
2845 0, /* adr_type */
2846 0,
2847 0,
2848 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2849 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2850 0},
2851
2852 /* bits[23:12] of offset to the module TLS base address. */
2853 {"dtprel_hi12", 0,
2854 0, /* adr_type */
2855 0,
2856 0,
2857 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2858 0,
2859 0},
2860
2861 /* bits[15:0] of offset to the module TLS base address. */
2862 {"dtprel_g0", 0,
2863 0, /* adr_type */
2864 0,
2865 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2866 0,
2867 0,
2868 0},
2869
2870 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2871 {"dtprel_g0_nc", 0,
2872 0, /* adr_type */
2873 0,
2874 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2875 0,
2876 0,
2877 0},
2878
2879 /* bits[31:16] of offset to the module TLS base address. */
2880 {"dtprel_g1", 0,
2881 0, /* adr_type */
2882 0,
2883 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2884 0,
2885 0,
2886 0},
2887
2888 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2889 {"dtprel_g1_nc", 0,
2890 0, /* adr_type */
2891 0,
2892 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2893 0,
2894 0,
2895 0},
2896
2897 /* bits[47:32] of offset to the module TLS base address. */
2898 {"dtprel_g2", 0,
2899 0, /* adr_type */
2900 0,
2901 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2902 0,
2903 0,
2904 0},
2905
2906 /* Lower 16 bit offset into GOT entry for a symbol */
2907 {"tlsdesc_off_g0_nc", 0,
2908 0, /* adr_type */
2909 0,
2910 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2911 0,
2912 0,
2913 0},
2914
2915 /* Higher 16 bit offset into GOT entry for a symbol */
2916 {"tlsdesc_off_g1", 0,
2917 0, /* adr_type */
2918 0,
2919 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2920 0,
2921 0,
2922 0},
2923
2924 /* Get to the page containing GOT TLS entry for a symbol */
2925 {"gottprel", 0,
2926 0, /* adr_type */
2927 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2928 0,
2929 0,
2930 0,
2931 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2932
2933 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2934 {"gottprel_lo12", 0,
2935 0, /* adr_type */
2936 0,
2937 0,
2938 0,
2939 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2940 0},
2941
2942 /* Get tp offset for a symbol. */
2943 {"tprel", 0,
2944 0, /* adr_type */
2945 0,
2946 0,
2947 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2948 0,
2949 0},
2950
2951 /* Get tp offset for a symbol. */
2952 {"tprel_lo12", 0,
2953 0, /* adr_type */
2954 0,
2955 0,
2956 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2957 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2958 0},
2959
2960 /* Get tp offset for a symbol. */
2961 {"tprel_hi12", 0,
2962 0, /* adr_type */
2963 0,
2964 0,
2965 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2966 0,
2967 0},
2968
2969 /* Get tp offset for a symbol. */
2970 {"tprel_lo12_nc", 0,
2971 0, /* adr_type */
2972 0,
2973 0,
2974 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2975 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2976 0},
2977
2978 /* Most significant bits 32-47 of address/value: MOVZ. */
2979 {"tprel_g2", 0,
2980 0, /* adr_type */
2981 0,
2982 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2983 0,
2984 0,
2985 0},
2986
2987 /* Most significant bits 16-31 of address/value: MOVZ. */
2988 {"tprel_g1", 0,
2989 0, /* adr_type */
2990 0,
2991 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2992 0,
2993 0,
2994 0},
2995
2996 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2997 {"tprel_g1_nc", 0,
2998 0, /* adr_type */
2999 0,
3000 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3001 0,
3002 0,
3003 0},
3004
3005 /* Most significant bits 0-15 of address/value: MOVZ. */
3006 {"tprel_g0", 0,
3007 0, /* adr_type */
3008 0,
3009 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3010 0,
3011 0,
3012 0},
3013
3014 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3015 {"tprel_g0_nc", 0,
3016 0, /* adr_type */
3017 0,
3018 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3019 0,
3020 0,
3021 0},
3022
3023 /* 15bit offset from got entry to base address of GOT table. */
3024 {"gotpage_lo15", 0,
3025 0,
3026 0,
3027 0,
3028 0,
3029 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3030 0},
3031
3032 /* 14bit offset from got entry to base address of GOT table. */
3033 {"gotpage_lo14", 0,
3034 0,
3035 0,
3036 0,
3037 0,
3038 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3039 0},
3040 };
3041
3042 /* Given the address of a pointer pointing to the textual name of a
3043 relocation as may appear in assembler source, attempt to find its
3044 details in reloc_table. The pointer will be updated to the character
3045 after the trailing colon. On failure, NULL will be returned;
3046 otherwise return the reloc_table_entry. */
3047
3048 static struct reloc_table_entry *
3049 find_reloc_table_entry (char **str)
3050 {
3051 unsigned int i;
3052 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3053 {
3054 int length = strlen (reloc_table[i].name);
3055
3056 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3057 && (*str)[length] == ':')
3058 {
3059 *str += (length + 1);
3060 return &reloc_table[i];
3061 }
3062 }
3063
3064 return NULL;
3065 }
3066
3067 /* Returns 0 if the relocation should never be forced,
3068 1 if the relocation must be forced, and -1 if either
3069 result is OK. */
3070
3071 static signed int
3072 aarch64_force_reloc (unsigned int type)
3073 {
3074 switch (type)
3075 {
3076 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3077 /* Perform these "immediate" internal relocations
3078 even if the symbol is extern or weak. */
3079 return 0;
3080
3081 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3082 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3083 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3084 /* Pseudo relocs that need to be fixed up according to
3085 ilp32_p. */
3086 return 1;
3087
3088 case BFD_RELOC_AARCH64_ADD_LO12:
3089 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3090 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3091 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3092 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3093 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3094 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3095 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3096 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3097 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3098 case BFD_RELOC_AARCH64_LDST128_LO12:
3099 case BFD_RELOC_AARCH64_LDST16_LO12:
3100 case BFD_RELOC_AARCH64_LDST32_LO12:
3101 case BFD_RELOC_AARCH64_LDST64_LO12:
3102 case BFD_RELOC_AARCH64_LDST8_LO12:
3103 case BFD_RELOC_AARCH64_LDST_LO12:
3104 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3105 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3106 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3107 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3108 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3109 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3110 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3111 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3112 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3113 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3114 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3115 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3116 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3117 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3118 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3119 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3120 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3121 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3122 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3123 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3124 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3125 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3126 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3127 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3128 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3129 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3130 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3131 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3132 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3133 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3134 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3135 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3136 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3137 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3138 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3139 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3141 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3142 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3143 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3144 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3145 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3146 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3147 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3148 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3149 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3150 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3151 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3152 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3153 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3154 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3155 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3156 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3157 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3158 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3159 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3160 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3161 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3162 /* Always leave these relocations for the linker. */
3163 return 1;
3164
3165 default:
3166 return -1;
3167 }
3168 }
3169
3170 int
3171 aarch64_force_relocation (struct fix *fixp)
3172 {
3173 int res = aarch64_force_reloc (fixp->fx_r_type);
3174
3175 if (res == -1)
3176 return generic_force_reloc (fixp);
3177 return res;
3178 }
3179
3180 /* Mode argument to parse_shift and parser_shifter_operand. */
3181 enum parse_shift_mode
3182 {
3183 SHIFTED_NONE, /* no shifter allowed */
3184 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3185 "#imm{,lsl #n}" */
3186 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3187 "#imm" */
3188 SHIFTED_LSL, /* bare "lsl #n" */
3189 SHIFTED_MUL, /* bare "mul #n" */
3190 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3191 SHIFTED_MUL_VL, /* "mul vl" */
3192 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3193 };
3194
3195 /* Parse a <shift> operator on an AArch64 data processing instruction.
3196 Return TRUE on success; otherwise return FALSE. */
3197 static bool
3198 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3199 {
3200 const struct aarch64_name_value_pair *shift_op;
3201 enum aarch64_modifier_kind kind;
3202 expressionS exp;
3203 int exp_has_prefix;
3204 char *s = *str;
3205 char *p = s;
3206
3207 for (p = *str; ISALPHA (*p); p++)
3208 ;
3209
3210 if (p == *str)
3211 {
3212 set_syntax_error (_("shift expression expected"));
3213 return false;
3214 }
3215
3216 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3217
3218 if (shift_op == NULL)
3219 {
3220 set_syntax_error (_("shift operator expected"));
3221 return false;
3222 }
3223
3224 kind = aarch64_get_operand_modifier (shift_op);
3225
3226 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3227 {
3228 set_syntax_error (_("invalid use of 'MSL'"));
3229 return false;
3230 }
3231
3232 if (kind == AARCH64_MOD_MUL
3233 && mode != SHIFTED_MUL
3234 && mode != SHIFTED_MUL_VL)
3235 {
3236 set_syntax_error (_("invalid use of 'MUL'"));
3237 return false;
3238 }
3239
3240 switch (mode)
3241 {
3242 case SHIFTED_LOGIC_IMM:
3243 if (aarch64_extend_operator_p (kind))
3244 {
3245 set_syntax_error (_("extending shift is not permitted"));
3246 return false;
3247 }
3248 break;
3249
3250 case SHIFTED_ARITH_IMM:
3251 if (kind == AARCH64_MOD_ROR)
3252 {
3253 set_syntax_error (_("'ROR' shift is not permitted"));
3254 return false;
3255 }
3256 break;
3257
3258 case SHIFTED_LSL:
3259 if (kind != AARCH64_MOD_LSL)
3260 {
3261 set_syntax_error (_("only 'LSL' shift is permitted"));
3262 return false;
3263 }
3264 break;
3265
3266 case SHIFTED_MUL:
3267 if (kind != AARCH64_MOD_MUL)
3268 {
3269 set_syntax_error (_("only 'MUL' is permitted"));
3270 return false;
3271 }
3272 break;
3273
3274 case SHIFTED_MUL_VL:
3275 /* "MUL VL" consists of two separate tokens. Require the first
3276 token to be "MUL" and look for a following "VL". */
3277 if (kind == AARCH64_MOD_MUL)
3278 {
3279 skip_whitespace (p);
3280 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3281 {
3282 p += 2;
3283 kind = AARCH64_MOD_MUL_VL;
3284 break;
3285 }
3286 }
3287 set_syntax_error (_("only 'MUL VL' is permitted"));
3288 return false;
3289
3290 case SHIFTED_REG_OFFSET:
3291 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3292 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3293 {
3294 set_fatal_syntax_error
3295 (_("invalid shift for the register offset addressing mode"));
3296 return false;
3297 }
3298 break;
3299
3300 case SHIFTED_LSL_MSL:
3301 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3302 {
3303 set_syntax_error (_("invalid shift operator"));
3304 return false;
3305 }
3306 break;
3307
3308 default:
3309 abort ();
3310 }
3311
3312 /* Whitespace can appear here if the next thing is a bare digit. */
3313 skip_whitespace (p);
3314
3315 /* Parse shift amount. */
3316 exp_has_prefix = 0;
3317 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3318 exp.X_op = O_absent;
3319 else
3320 {
3321 if (is_immediate_prefix (*p))
3322 {
3323 p++;
3324 exp_has_prefix = 1;
3325 }
3326 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3327 NORMAL_RESOLUTION);
3328 }
3329 if (kind == AARCH64_MOD_MUL_VL)
3330 /* For consistency, give MUL VL the same shift amount as an implicit
3331 MUL #1. */
3332 operand->shifter.amount = 1;
3333 else if (exp.X_op == O_absent)
3334 {
3335 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3336 {
3337 set_syntax_error (_("missing shift amount"));
3338 return false;
3339 }
3340 operand->shifter.amount = 0;
3341 }
3342 else if (exp.X_op != O_constant)
3343 {
3344 set_syntax_error (_("constant shift amount required"));
3345 return false;
3346 }
3347 /* For parsing purposes, MUL #n has no inherent range. The range
3348 depends on the operand and will be checked by operand-specific
3349 routines. */
3350 else if (kind != AARCH64_MOD_MUL
3351 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3352 {
3353 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3354 return false;
3355 }
3356 else
3357 {
3358 operand->shifter.amount = exp.X_add_number;
3359 operand->shifter.amount_present = 1;
3360 }
3361
3362 operand->shifter.operator_present = 1;
3363 operand->shifter.kind = kind;
3364
3365 *str = p;
3366 return true;
3367 }
3368
3369 /* Parse a <shifter_operand> for a data processing instruction:
3370
3371 #<immediate>
3372 #<immediate>, LSL #imm
3373
3374 Validation of immediate operands is deferred to md_apply_fix.
3375
3376 Return TRUE on success; otherwise return FALSE. */
3377
3378 static bool
3379 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3380 enum parse_shift_mode mode)
3381 {
3382 char *p;
3383
3384 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3385 return false;
3386
3387 p = *str;
3388
3389 /* Accept an immediate expression. */
3390 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3391 REJECT_ABSENT, NORMAL_RESOLUTION))
3392 return false;
3393
3394 /* Accept optional LSL for arithmetic immediate values. */
3395 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3396 if (! parse_shift (&p, operand, SHIFTED_LSL))
3397 return false;
3398
3399 /* Not accept any shifter for logical immediate values. */
3400 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3401 && parse_shift (&p, operand, mode))
3402 {
3403 set_syntax_error (_("unexpected shift operator"));
3404 return false;
3405 }
3406
3407 *str = p;
3408 return true;
3409 }
3410
3411 /* Parse a <shifter_operand> for a data processing instruction:
3412
3413 <Rm>
3414 <Rm>, <shift>
3415 #<immediate>
3416 #<immediate>, LSL #imm
3417
3418 where <shift> is handled by parse_shift above, and the last two
3419 cases are handled by the function above.
3420
3421 Validation of immediate operands is deferred to md_apply_fix.
3422
3423 Return TRUE on success; otherwise return FALSE. */
3424
3425 static bool
3426 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3427 enum parse_shift_mode mode)
3428 {
3429 const reg_entry *reg;
3430 aarch64_opnd_qualifier_t qualifier;
3431 enum aarch64_operand_class opd_class
3432 = aarch64_get_operand_class (operand->type);
3433
3434 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3435 if (reg)
3436 {
3437 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3438 {
3439 set_syntax_error (_("unexpected register in the immediate operand"));
3440 return false;
3441 }
3442
3443 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3444 {
3445 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3446 return false;
3447 }
3448
3449 operand->reg.regno = reg->number;
3450 operand->qualifier = qualifier;
3451
3452 /* Accept optional shift operation on register. */
3453 if (! skip_past_comma (str))
3454 return true;
3455
3456 if (! parse_shift (str, operand, mode))
3457 return false;
3458
3459 return true;
3460 }
3461 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3462 {
3463 set_syntax_error
3464 (_("integer register expected in the extended/shifted operand "
3465 "register"));
3466 return false;
3467 }
3468
3469 /* We have a shifted immediate variable. */
3470 return parse_shifter_operand_imm (str, operand, mode);
3471 }
3472
3473 /* Return TRUE on success; return FALSE otherwise. */
3474
3475 static bool
3476 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3477 enum parse_shift_mode mode)
3478 {
3479 char *p = *str;
3480
3481 /* Determine if we have the sequence of characters #: or just :
3482 coming next. If we do, then we check for a :rello: relocation
3483 modifier. If we don't, punt the whole lot to
3484 parse_shifter_operand. */
3485
3486 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3487 {
3488 struct reloc_table_entry *entry;
3489
3490 if (p[0] == '#')
3491 p += 2;
3492 else
3493 p++;
3494 *str = p;
3495
3496 /* Try to parse a relocation. Anything else is an error. */
3497 if (!(entry = find_reloc_table_entry (str)))
3498 {
3499 set_syntax_error (_("unknown relocation modifier"));
3500 return false;
3501 }
3502
3503 if (entry->add_type == 0)
3504 {
3505 set_syntax_error
3506 (_("this relocation modifier is not allowed on this instruction"));
3507 return false;
3508 }
3509
3510 /* Save str before we decompose it. */
3511 p = *str;
3512
3513 /* Next, we parse the expression. */
3514 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3515 REJECT_ABSENT,
3516 aarch64_force_reloc (entry->add_type) == 1))
3517 return false;
3518
3519 /* Record the relocation type (use the ADD variant here). */
3520 inst.reloc.type = entry->add_type;
3521 inst.reloc.pc_rel = entry->pc_rel;
3522
3523 /* If str is empty, we've reached the end, stop here. */
3524 if (**str == '\0')
3525 return true;
3526
3527 /* Otherwise, we have a shifted reloc modifier, so rewind to
3528 recover the variable name and continue parsing for the shifter. */
3529 *str = p;
3530 return parse_shifter_operand_imm (str, operand, mode);
3531 }
3532
3533 return parse_shifter_operand (str, operand, mode);
3534 }
3535
3536 /* Parse all forms of an address expression. Information is written
3537 to *OPERAND and/or inst.reloc.
3538
3539 The A64 instruction set has the following addressing modes:
3540
3541 Offset
3542 [base] // in SIMD ld/st structure
3543 [base{,#0}] // in ld/st exclusive
3544 [base{,#imm}]
3545 [base,Xm{,LSL #imm}]
3546 [base,Xm,SXTX {#imm}]
3547 [base,Wm,(S|U)XTW {#imm}]
3548 Pre-indexed
3549 [base]! // in ldraa/ldrab exclusive
3550 [base,#imm]!
3551 Post-indexed
3552 [base],#imm
3553 [base],Xm // in SIMD ld/st structure
3554 PC-relative (literal)
3555 label
3556 SVE:
3557 [base,#imm,MUL VL]
3558 [base,Zm.D{,LSL #imm}]
3559 [base,Zm.S,(S|U)XTW {#imm}]
3560 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3561 [Zn.S,#imm]
3562 [Zn.D,#imm]
3563 [Zn.S{, Xm}]
3564 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3565 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3566 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3567
3568 (As a convenience, the notation "=immediate" is permitted in conjunction
3569 with the pc-relative literal load instructions to automatically place an
3570 immediate value or symbolic address in a nearby literal pool and generate
3571 a hidden label which references it.)
3572
3573 Upon a successful parsing, the address structure in *OPERAND will be
3574 filled in the following way:
3575
3576 .base_regno = <base>
3577 .offset.is_reg // 1 if the offset is a register
3578 .offset.imm = <imm>
3579 .offset.regno = <Rm>
3580
3581 For different addressing modes defined in the A64 ISA:
3582
3583 Offset
3584 .pcrel=0; .preind=1; .postind=0; .writeback=0
3585 Pre-indexed
3586 .pcrel=0; .preind=1; .postind=0; .writeback=1
3587 Post-indexed
3588 .pcrel=0; .preind=0; .postind=1; .writeback=1
3589 PC-relative (literal)
3590 .pcrel=1; .preind=1; .postind=0; .writeback=0
3591
3592 The shift/extension information, if any, will be stored in .shifter.
3593 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3594 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3595 corresponding register.
3596
3597 BASE_TYPE says which types of base register should be accepted and
3598 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3599 is the type of shifter that is allowed for immediate offsets,
3600 or SHIFTED_NONE if none.
3601
3602 In all other respects, it is the caller's responsibility to check
3603 for addressing modes not supported by the instruction, and to set
3604 inst.reloc.type. */
3605
3606 static bool
3607 parse_address_main (char **str, aarch64_opnd_info *operand,
3608 aarch64_opnd_qualifier_t *base_qualifier,
3609 aarch64_opnd_qualifier_t *offset_qualifier,
3610 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3611 enum parse_shift_mode imm_shift_mode)
3612 {
3613 char *p = *str;
3614 const reg_entry *reg;
3615 expressionS *exp = &inst.reloc.exp;
3616
3617 *base_qualifier = AARCH64_OPND_QLF_NIL;
3618 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3619 if (! skip_past_char (&p, '['))
3620 {
3621 /* =immediate or label. */
3622 operand->addr.pcrel = 1;
3623 operand->addr.preind = 1;
3624
3625 /* #:<reloc_op>:<symbol> */
3626 skip_past_char (&p, '#');
3627 if (skip_past_char (&p, ':'))
3628 {
3629 bfd_reloc_code_real_type ty;
3630 struct reloc_table_entry *entry;
3631
3632 /* Try to parse a relocation modifier. Anything else is
3633 an error. */
3634 entry = find_reloc_table_entry (&p);
3635 if (! entry)
3636 {
3637 set_syntax_error (_("unknown relocation modifier"));
3638 return false;
3639 }
3640
3641 switch (operand->type)
3642 {
3643 case AARCH64_OPND_ADDR_PCREL21:
3644 /* adr */
3645 ty = entry->adr_type;
3646 break;
3647
3648 default:
3649 ty = entry->ld_literal_type;
3650 break;
3651 }
3652
3653 if (ty == 0)
3654 {
3655 set_syntax_error
3656 (_("this relocation modifier is not allowed on this "
3657 "instruction"));
3658 return false;
3659 }
3660
3661 /* #:<reloc_op>: */
3662 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3663 aarch64_force_reloc (ty) == 1))
3664 {
3665 set_syntax_error (_("invalid relocation expression"));
3666 return false;
3667 }
3668 /* #:<reloc_op>:<expr> */
3669 /* Record the relocation type. */
3670 inst.reloc.type = ty;
3671 inst.reloc.pc_rel = entry->pc_rel;
3672 }
3673 else
3674 {
3675 if (skip_past_char (&p, '='))
3676 /* =immediate; need to generate the literal in the literal pool. */
3677 inst.gen_lit_pool = 1;
3678
3679 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3680 NORMAL_RESOLUTION))
3681 {
3682 set_syntax_error (_("invalid address"));
3683 return false;
3684 }
3685 }
3686
3687 *str = p;
3688 return true;
3689 }
3690
3691 /* [ */
3692
3693 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3694 if (!reg || !aarch64_check_reg_type (reg, base_type))
3695 {
3696 set_syntax_error (_(get_reg_expected_msg (base_type)));
3697 return false;
3698 }
3699 operand->addr.base_regno = reg->number;
3700
3701 /* [Xn */
3702 if (skip_past_comma (&p))
3703 {
3704 /* [Xn, */
3705 operand->addr.preind = 1;
3706
3707 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3708 if (reg)
3709 {
3710 if (!aarch64_check_reg_type (reg, offset_type))
3711 {
3712 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3713 return false;
3714 }
3715
3716 /* [Xn,Rm */
3717 operand->addr.offset.regno = reg->number;
3718 operand->addr.offset.is_reg = 1;
3719 /* Shifted index. */
3720 if (skip_past_comma (&p))
3721 {
3722 /* [Xn,Rm, */
3723 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3724 /* Use the diagnostics set in parse_shift, so not set new
3725 error message here. */
3726 return false;
3727 }
3728 /* We only accept:
3729 [base,Xm] # For vector plus scalar SVE2 indexing.
3730 [base,Xm{,LSL #imm}]
3731 [base,Xm,SXTX {#imm}]
3732 [base,Wm,(S|U)XTW {#imm}] */
3733 if (operand->shifter.kind == AARCH64_MOD_NONE
3734 || operand->shifter.kind == AARCH64_MOD_LSL
3735 || operand->shifter.kind == AARCH64_MOD_SXTX)
3736 {
3737 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3738 {
3739 set_syntax_error (_("invalid use of 32-bit register offset"));
3740 return false;
3741 }
3742 if (aarch64_get_qualifier_esize (*base_qualifier)
3743 != aarch64_get_qualifier_esize (*offset_qualifier)
3744 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3745 || *base_qualifier != AARCH64_OPND_QLF_S_S
3746 || *offset_qualifier != AARCH64_OPND_QLF_X))
3747 {
3748 set_syntax_error (_("offset has different size from base"));
3749 return false;
3750 }
3751 }
3752 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3753 {
3754 set_syntax_error (_("invalid use of 64-bit register offset"));
3755 return false;
3756 }
3757 }
3758 else
3759 {
3760 /* [Xn,#:<reloc_op>:<symbol> */
3761 skip_past_char (&p, '#');
3762 if (skip_past_char (&p, ':'))
3763 {
3764 struct reloc_table_entry *entry;
3765
3766 /* Try to parse a relocation modifier. Anything else is
3767 an error. */
3768 if (!(entry = find_reloc_table_entry (&p)))
3769 {
3770 set_syntax_error (_("unknown relocation modifier"));
3771 return false;
3772 }
3773
3774 if (entry->ldst_type == 0)
3775 {
3776 set_syntax_error
3777 (_("this relocation modifier is not allowed on this "
3778 "instruction"));
3779 return false;
3780 }
3781
3782 /* [Xn,#:<reloc_op>: */
3783 /* We now have the group relocation table entry corresponding to
3784 the name in the assembler source. Next, we parse the
3785 expression. */
3786 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3787 aarch64_force_reloc (entry->ldst_type) == 1))
3788 {
3789 set_syntax_error (_("invalid relocation expression"));
3790 return false;
3791 }
3792
3793 /* [Xn,#:<reloc_op>:<expr> */
3794 /* Record the load/store relocation type. */
3795 inst.reloc.type = entry->ldst_type;
3796 inst.reloc.pc_rel = entry->pc_rel;
3797 }
3798 else
3799 {
3800 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3801 NORMAL_RESOLUTION))
3802 {
3803 set_syntax_error (_("invalid expression in the address"));
3804 return false;
3805 }
3806 /* [Xn,<expr> */
3807 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3808 /* [Xn,<expr>,<shifter> */
3809 if (! parse_shift (&p, operand, imm_shift_mode))
3810 return false;
3811 }
3812 }
3813 }
3814
3815 if (! skip_past_char (&p, ']'))
3816 {
3817 set_syntax_error (_("']' expected"));
3818 return false;
3819 }
3820
3821 if (skip_past_char (&p, '!'))
3822 {
3823 if (operand->addr.preind && operand->addr.offset.is_reg)
3824 {
3825 set_syntax_error (_("register offset not allowed in pre-indexed "
3826 "addressing mode"));
3827 return false;
3828 }
3829 /* [Xn]! */
3830 operand->addr.writeback = 1;
3831 }
3832 else if (skip_past_comma (&p))
3833 {
3834 /* [Xn], */
3835 operand->addr.postind = 1;
3836 operand->addr.writeback = 1;
3837
3838 if (operand->addr.preind)
3839 {
3840 set_syntax_error (_("cannot combine pre- and post-indexing"));
3841 return false;
3842 }
3843
3844 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3845 if (reg)
3846 {
3847 /* [Xn],Xm */
3848 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3849 {
3850 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3851 return false;
3852 }
3853
3854 operand->addr.offset.regno = reg->number;
3855 operand->addr.offset.is_reg = 1;
3856 }
3857 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3858 NORMAL_RESOLUTION))
3859 {
3860 /* [Xn],#expr */
3861 set_syntax_error (_("invalid expression in the address"));
3862 return false;
3863 }
3864 }
3865
3866 /* If at this point neither .preind nor .postind is set, we have a
3867 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3868 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3869 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3870 [Zn.<T>, xzr]. */
3871 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3872 {
3873 if (operand->addr.writeback)
3874 {
3875 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3876 {
3877 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3878 operand->addr.offset.is_reg = 0;
3879 operand->addr.offset.imm = 0;
3880 operand->addr.preind = 1;
3881 }
3882 else
3883 {
3884 /* Reject [Rn]! */
3885 set_syntax_error (_("missing offset in the pre-indexed address"));
3886 return false;
3887 }
3888 }
3889 else
3890 {
3891 operand->addr.preind = 1;
3892 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3893 {
3894 operand->addr.offset.is_reg = 1;
3895 operand->addr.offset.regno = REG_ZR;
3896 *offset_qualifier = AARCH64_OPND_QLF_X;
3897 }
3898 else
3899 {
3900 inst.reloc.exp.X_op = O_constant;
3901 inst.reloc.exp.X_add_number = 0;
3902 }
3903 }
3904 }
3905
3906 *str = p;
3907 return true;
3908 }
3909
3910 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3911 on success. */
3912 static bool
3913 parse_address (char **str, aarch64_opnd_info *operand)
3914 {
3915 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3916 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3917 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3918 }
3919
3920 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3921 The arguments have the same meaning as for parse_address_main.
3922 Return TRUE on success. */
3923 static bool
3924 parse_sve_address (char **str, aarch64_opnd_info *operand,
3925 aarch64_opnd_qualifier_t *base_qualifier,
3926 aarch64_opnd_qualifier_t *offset_qualifier)
3927 {
3928 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3929 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3930 SHIFTED_MUL_VL);
3931 }
3932
3933 /* Parse a register X0-X30. The register must be 64-bit and register 31
3934 is unallocated. */
3935 static bool
3936 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3937 {
3938 const reg_entry *reg = parse_reg (str);
3939 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3940 {
3941 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3942 return false;
3943 }
3944 operand->reg.regno = reg->number;
3945 operand->qualifier = AARCH64_OPND_QLF_X;
3946 return true;
3947 }
3948
3949 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3950 Return TRUE on success; otherwise return FALSE. */
3951 static bool
3952 parse_half (char **str, int *internal_fixup_p)
3953 {
3954 char *p = *str;
3955
3956 skip_past_char (&p, '#');
3957
3958 gas_assert (internal_fixup_p);
3959 *internal_fixup_p = 0;
3960
3961 if (*p == ':')
3962 {
3963 struct reloc_table_entry *entry;
3964
3965 /* Try to parse a relocation. Anything else is an error. */
3966 ++p;
3967
3968 if (!(entry = find_reloc_table_entry (&p)))
3969 {
3970 set_syntax_error (_("unknown relocation modifier"));
3971 return false;
3972 }
3973
3974 if (entry->movw_type == 0)
3975 {
3976 set_syntax_error
3977 (_("this relocation modifier is not allowed on this instruction"));
3978 return false;
3979 }
3980
3981 inst.reloc.type = entry->movw_type;
3982 }
3983 else
3984 *internal_fixup_p = 1;
3985
3986 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3987 aarch64_force_reloc (inst.reloc.type) == 1))
3988 return false;
3989
3990 *str = p;
3991 return true;
3992 }
3993
3994 /* Parse an operand for an ADRP instruction:
3995 ADRP <Xd>, <label>
3996 Return TRUE on success; otherwise return FALSE. */
3997
3998 static bool
3999 parse_adrp (char **str)
4000 {
4001 char *p;
4002
4003 p = *str;
4004 if (*p == ':')
4005 {
4006 struct reloc_table_entry *entry;
4007
4008 /* Try to parse a relocation. Anything else is an error. */
4009 ++p;
4010 if (!(entry = find_reloc_table_entry (&p)))
4011 {
4012 set_syntax_error (_("unknown relocation modifier"));
4013 return false;
4014 }
4015
4016 if (entry->adrp_type == 0)
4017 {
4018 set_syntax_error
4019 (_("this relocation modifier is not allowed on this instruction"));
4020 return false;
4021 }
4022
4023 inst.reloc.type = entry->adrp_type;
4024 }
4025 else
4026 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4027
4028 inst.reloc.pc_rel = 1;
4029 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4030 aarch64_force_reloc (inst.reloc.type) == 1))
4031 return false;
4032 *str = p;
4033 return true;
4034 }
4035
4036 /* Miscellaneous. */
4037
4038 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4039 of SIZE tokens in which index I gives the token for field value I,
4040 or is null if field value I is invalid. REG_TYPE says which register
4041 names should be treated as registers rather than as symbolic immediates.
4042
4043 Return true on success, moving *STR past the operand and storing the
4044 field value in *VAL. */
4045
4046 static int
4047 parse_enum_string (char **str, int64_t *val, const char *const *array,
4048 size_t size, aarch64_reg_type reg_type)
4049 {
4050 expressionS exp;
4051 char *p, *q;
4052 size_t i;
4053
4054 /* Match C-like tokens. */
4055 p = q = *str;
4056 while (ISALNUM (*q))
4057 q++;
4058
4059 for (i = 0; i < size; ++i)
4060 if (array[i]
4061 && strncasecmp (array[i], p, q - p) == 0
4062 && array[i][q - p] == 0)
4063 {
4064 *val = i;
4065 *str = q;
4066 return true;
4067 }
4068
4069 if (!parse_immediate_expression (&p, &exp, reg_type))
4070 return false;
4071
4072 if (exp.X_op == O_constant
4073 && (uint64_t) exp.X_add_number < size)
4074 {
4075 *val = exp.X_add_number;
4076 *str = p;
4077 return true;
4078 }
4079
4080 /* Use the default error for this operand. */
4081 return false;
4082 }
4083
4084 /* Parse an option for a preload instruction. Returns the encoding for the
4085 option, or PARSE_FAIL. */
4086
4087 static int
4088 parse_pldop (char **str)
4089 {
4090 char *p, *q;
4091 const struct aarch64_name_value_pair *o;
4092
4093 p = q = *str;
4094 while (ISALNUM (*q))
4095 q++;
4096
4097 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4098 if (!o)
4099 return PARSE_FAIL;
4100
4101 *str = q;
4102 return o->value;
4103 }
4104
4105 /* Parse an option for a barrier instruction. Returns the encoding for the
4106 option, or PARSE_FAIL. */
4107
4108 static int
4109 parse_barrier (char **str)
4110 {
4111 char *p, *q;
4112 const struct aarch64_name_value_pair *o;
4113
4114 p = q = *str;
4115 while (ISALPHA (*q))
4116 q++;
4117
4118 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4119 if (!o)
4120 return PARSE_FAIL;
4121
4122 *str = q;
4123 return o->value;
4124 }
4125
4126 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4127 return 0 if successful. Otherwise return PARSE_FAIL. */
4128
4129 static int
4130 parse_barrier_psb (char **str,
4131 const struct aarch64_name_value_pair ** hint_opt)
4132 {
4133 char *p, *q;
4134 const struct aarch64_name_value_pair *o;
4135
4136 p = q = *str;
4137 while (ISALPHA (*q))
4138 q++;
4139
4140 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4141 if (!o)
4142 {
4143 set_fatal_syntax_error
4144 ( _("unknown or missing option to PSB/TSB"));
4145 return PARSE_FAIL;
4146 }
4147
4148 if (o->value != 0x11)
4149 {
4150 /* PSB only accepts option name 'CSYNC'. */
4151 set_syntax_error
4152 (_("the specified option is not accepted for PSB/TSB"));
4153 return PARSE_FAIL;
4154 }
4155
4156 *str = q;
4157 *hint_opt = o;
4158 return 0;
4159 }
4160
4161 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4162 return 0 if successful. Otherwise return PARSE_FAIL. */
4163
4164 static int
4165 parse_bti_operand (char **str,
4166 const struct aarch64_name_value_pair ** hint_opt)
4167 {
4168 char *p, *q;
4169 const struct aarch64_name_value_pair *o;
4170
4171 p = q = *str;
4172 while (ISALPHA (*q))
4173 q++;
4174
4175 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4176 if (!o)
4177 {
4178 set_fatal_syntax_error
4179 ( _("unknown option to BTI"));
4180 return PARSE_FAIL;
4181 }
4182
4183 switch (o->value)
4184 {
4185 /* Valid BTI operands. */
4186 case HINT_OPD_C:
4187 case HINT_OPD_J:
4188 case HINT_OPD_JC:
4189 break;
4190
4191 default:
4192 set_syntax_error
4193 (_("unknown option to BTI"));
4194 return PARSE_FAIL;
4195 }
4196
4197 *str = q;
4198 *hint_opt = o;
4199 return 0;
4200 }
4201
4202 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4203 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4204 on failure. Format:
4205
4206 REG_TYPE.QUALIFIER
4207
4208 Side effect: Update STR with current parse position of success.
4209 */
4210
4211 static const reg_entry *
4212 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4213 aarch64_opnd_qualifier_t *qualifier)
4214 {
4215 char *q;
4216
4217 reg_entry *reg = parse_reg (str);
4218 if (reg != NULL && reg->type == reg_type)
4219 {
4220 if (!skip_past_char (str, '.'))
4221 {
4222 set_syntax_error (_("missing ZA tile element size separator"));
4223 return NULL;
4224 }
4225
4226 q = *str;
4227 switch (TOLOWER (*q))
4228 {
4229 case 'b':
4230 *qualifier = AARCH64_OPND_QLF_S_B;
4231 break;
4232 case 'h':
4233 *qualifier = AARCH64_OPND_QLF_S_H;
4234 break;
4235 case 's':
4236 *qualifier = AARCH64_OPND_QLF_S_S;
4237 break;
4238 case 'd':
4239 *qualifier = AARCH64_OPND_QLF_S_D;
4240 break;
4241 case 'q':
4242 *qualifier = AARCH64_OPND_QLF_S_Q;
4243 break;
4244 default:
4245 return NULL;
4246 }
4247 q++;
4248
4249 *str = q;
4250 return reg;
4251 }
4252
4253 return NULL;
4254 }
4255
4256 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4257 Function return tile QUALIFIER on success.
4258
4259 Tiles are in example format: za[0-9]\.[bhsd]
4260
4261 Function returns <ZAda> register number or PARSE_FAIL.
4262 */
4263 static int
4264 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4265 {
4266 int regno;
4267 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4268
4269 if (reg == NULL)
4270 return PARSE_FAIL;
4271 regno = reg->number;
4272
4273 switch (*qualifier)
4274 {
4275 case AARCH64_OPND_QLF_S_B:
4276 if (regno != 0x00)
4277 {
4278 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4279 return PARSE_FAIL;
4280 }
4281 break;
4282 case AARCH64_OPND_QLF_S_H:
4283 if (regno > 0x01)
4284 {
4285 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4286 return PARSE_FAIL;
4287 }
4288 break;
4289 case AARCH64_OPND_QLF_S_S:
4290 if (regno > 0x03)
4291 {
4292 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4293 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4294 return PARSE_FAIL;
4295 }
4296 break;
4297 case AARCH64_OPND_QLF_S_D:
4298 if (regno > 0x07)
4299 {
4300 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4301 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4302 return PARSE_FAIL;
4303 }
4304 break;
4305 default:
4306 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4307 return PARSE_FAIL;
4308 }
4309
4310 return regno;
4311 }
4312
4313 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4314
4315 #<imm>
4316 <imm>
4317
4318 Function return TRUE if immediate was found, or FALSE.
4319 */
4320 static bool
4321 parse_sme_immediate (char **str, int64_t *imm)
4322 {
4323 int64_t val;
4324 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4325 return false;
4326
4327 *imm = val;
4328 return true;
4329 }
4330
4331 /* Parse index with vector select register and immediate:
4332
4333 [<Wv>, <imm>]
4334 [<Wv>, #<imm>]
4335 where <Wv> is in W12-W15 range and # is optional for immediate.
4336
4337 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4338 is set to true.
4339
4340 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4341 IMM output.
4342 */
4343 static bool
4344 parse_sme_za_hv_tiles_operand_index (char **str,
4345 int *vector_select_register,
4346 int64_t *imm)
4347 {
4348 const reg_entry *reg;
4349
4350 if (!skip_past_char (str, '['))
4351 {
4352 set_syntax_error (_("expected '['"));
4353 return false;
4354 }
4355
4356 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4357 reg = parse_reg (str);
4358 if (reg == NULL || reg->type != REG_TYPE_R_32
4359 || reg->number < 12 || reg->number > 15)
4360 {
4361 set_syntax_error (_("expected vector select register W12-W15"));
4362 return false;
4363 }
4364 *vector_select_register = reg->number;
4365
4366 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4367 {
4368 set_syntax_error (_("expected ','"));
4369 return false;
4370 }
4371
4372 if (!parse_sme_immediate (str, imm))
4373 {
4374 set_syntax_error (_("index offset immediate expected"));
4375 return false;
4376 }
4377
4378 if (!skip_past_char (str, ']'))
4379 {
4380 set_syntax_error (_("expected ']'"));
4381 return false;
4382 }
4383
4384 return true;
4385 }
4386
4387 /* Parse SME ZA horizontal or vertical vector access to tiles.
4388 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4389 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4390 contains <Wv> select register and corresponding optional IMMEDIATE.
4391 In addition QUALIFIER is extracted.
4392
4393 Field format examples:
4394
4395 ZA0<HV>.B[<Wv>, #<imm>]
4396 <ZAn><HV>.H[<Wv>, #<imm>]
4397 <ZAn><HV>.S[<Wv>, #<imm>]
4398 <ZAn><HV>.D[<Wv>, #<imm>]
4399 <ZAn><HV>.Q[<Wv>, #<imm>]
4400
4401 Function returns <ZAda> register number or PARSE_FAIL.
4402 */
4403 static int
4404 parse_sme_za_hv_tiles_operand (char **str,
4405 enum sme_hv_slice *slice_indicator,
4406 int *vector_select_register,
4407 int *imm,
4408 aarch64_opnd_qualifier_t *qualifier)
4409 {
4410 char *qh, *qv;
4411 int regno;
4412 int regno_limit;
4413 int64_t imm_limit;
4414 int64_t imm_value;
4415 const reg_entry *reg;
4416
4417 qh = qv = *str;
4418 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4419 {
4420 *slice_indicator = HV_horizontal;
4421 *str = qh;
4422 }
4423 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4424 {
4425 *slice_indicator = HV_vertical;
4426 *str = qv;
4427 }
4428 else
4429 return PARSE_FAIL;
4430 regno = reg->number;
4431
4432 switch (*qualifier)
4433 {
4434 case AARCH64_OPND_QLF_S_B:
4435 regno_limit = 0;
4436 imm_limit = 15;
4437 break;
4438 case AARCH64_OPND_QLF_S_H:
4439 regno_limit = 1;
4440 imm_limit = 7;
4441 break;
4442 case AARCH64_OPND_QLF_S_S:
4443 regno_limit = 3;
4444 imm_limit = 3;
4445 break;
4446 case AARCH64_OPND_QLF_S_D:
4447 regno_limit = 7;
4448 imm_limit = 1;
4449 break;
4450 case AARCH64_OPND_QLF_S_Q:
4451 regno_limit = 15;
4452 imm_limit = 0;
4453 break;
4454 default:
4455 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4456 return PARSE_FAIL;
4457 }
4458
4459 /* Check if destination register ZA tile vector is in range for given
4460 instruction variant. */
4461 if (regno < 0 || regno > regno_limit)
4462 {
4463 set_syntax_error (_("ZA tile vector out of range"));
4464 return PARSE_FAIL;
4465 }
4466
4467 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4468 &imm_value))
4469 return PARSE_FAIL;
4470
4471 /* Check if optional index offset is in the range for instruction
4472 variant. */
4473 if (imm_value < 0 || imm_value > imm_limit)
4474 {
4475 set_syntax_error (_("index offset out of range"));
4476 return PARSE_FAIL;
4477 }
4478
4479 *imm = imm_value;
4480
4481 return regno;
4482 }
4483
4484
4485 static int
4486 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4487 enum sme_hv_slice *slice_indicator,
4488 int *vector_select_register,
4489 int *imm,
4490 aarch64_opnd_qualifier_t *qualifier)
4491 {
4492 int regno;
4493
4494 if (!skip_past_char (str, '{'))
4495 {
4496 set_syntax_error (_("expected '{'"));
4497 return PARSE_FAIL;
4498 }
4499
4500 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4501 vector_select_register, imm,
4502 qualifier);
4503
4504 if (regno == PARSE_FAIL)
4505 return PARSE_FAIL;
4506
4507 if (!skip_past_char (str, '}'))
4508 {
4509 set_syntax_error (_("expected '}'"));
4510 return PARSE_FAIL;
4511 }
4512
4513 return regno;
4514 }
4515
4516 /* Parse list of up to eight 64-bit element tile names separated by commas in
4517 SME's ZERO instruction:
4518
4519 ZERO { <mask> }
4520
4521 Function returns <mask>:
4522
4523 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4524 */
4525 static int
4526 parse_sme_zero_mask(char **str)
4527 {
4528 char *q;
4529 int mask;
4530 aarch64_opnd_qualifier_t qualifier;
4531
4532 mask = 0x00;
4533 q = *str;
4534 do
4535 {
4536 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4537 if (reg)
4538 {
4539 int regno = reg->number;
4540 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4541 {
4542 /* { ZA0.B } is assembled as all-ones immediate. */
4543 mask = 0xff;
4544 }
4545 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4546 mask |= 0x55 << regno;
4547 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4548 mask |= 0x11 << regno;
4549 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4550 mask |= 0x01 << regno;
4551 else
4552 {
4553 set_syntax_error (_("wrong ZA tile element format"));
4554 return PARSE_FAIL;
4555 }
4556 continue;
4557 }
4558 else if (strncasecmp (q, "za", 2) == 0
4559 && !ISALNUM (q[2]))
4560 {
4561 /* { ZA } is assembled as all-ones immediate. */
4562 mask = 0xff;
4563 q += 2;
4564 continue;
4565 }
4566 else
4567 {
4568 set_syntax_error (_("wrong ZA tile element format"));
4569 return PARSE_FAIL;
4570 }
4571 }
4572 while (skip_past_char (&q, ','));
4573
4574 *str = q;
4575 return mask;
4576 }
4577
4578 /* Wraps in curly braces <mask> operand ZERO instruction:
4579
4580 ZERO { <mask> }
4581
4582 Function returns value of <mask> bit-field.
4583 */
4584 static int
4585 parse_sme_list_of_64bit_tiles (char **str)
4586 {
4587 int regno;
4588
4589 if (!skip_past_char (str, '{'))
4590 {
4591 set_syntax_error (_("expected '{'"));
4592 return PARSE_FAIL;
4593 }
4594
4595 /* Empty <mask> list is an all-zeros immediate. */
4596 if (!skip_past_char (str, '}'))
4597 {
4598 regno = parse_sme_zero_mask (str);
4599 if (regno == PARSE_FAIL)
4600 return PARSE_FAIL;
4601
4602 if (!skip_past_char (str, '}'))
4603 {
4604 set_syntax_error (_("expected '}'"));
4605 return PARSE_FAIL;
4606 }
4607 }
4608 else
4609 regno = 0x00;
4610
4611 return regno;
4612 }
4613
4614 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4615 Operand format:
4616
4617 ZA[<Wv>, <imm>]
4618 ZA[<Wv>, #<imm>]
4619
4620 Function returns <Wv> or PARSE_FAIL.
4621 */
4622 static int
4623 parse_sme_za_array (char **str, int *imm)
4624 {
4625 char *p, *q;
4626 int regno;
4627 int64_t imm_value;
4628
4629 p = q = *str;
4630 while (ISALPHA (*q))
4631 q++;
4632
4633 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4634 {
4635 set_syntax_error (_("expected ZA array"));
4636 return PARSE_FAIL;
4637 }
4638
4639 if (! parse_sme_za_hv_tiles_operand_index (&q, ®no, &imm_value))
4640 return PARSE_FAIL;
4641
4642 if (imm_value < 0 || imm_value > 15)
4643 {
4644 set_syntax_error (_("offset out of range"));
4645 return PARSE_FAIL;
4646 }
4647
4648 *imm = imm_value;
4649 *str = q;
4650 return regno;
4651 }
4652
4653 /* Parse streaming mode operand for SMSTART and SMSTOP.
4654
4655 {SM | ZA}
4656
4657 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4658 */
4659 static int
4660 parse_sme_sm_za (char **str)
4661 {
4662 char *p, *q;
4663
4664 p = q = *str;
4665 while (ISALPHA (*q))
4666 q++;
4667
4668 if ((q - p != 2)
4669 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4670 {
4671 set_syntax_error (_("expected SM or ZA operand"));
4672 return PARSE_FAIL;
4673 }
4674
4675 *str = q;
4676 return TOLOWER (p[0]);
4677 }
4678
4679 /* Parse the name of the source scalable predicate register, the index base
4680 register W12-W15 and the element index. Function performs element index
4681 limit checks as well as qualifier type checks.
4682
4683 <Pn>.<T>[<Wv>, <imm>]
4684 <Pn>.<T>[<Wv>, #<imm>]
4685
4686 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4687 <imm> to IMM.
4688 Function returns <Pn>, or PARSE_FAIL.
4689 */
4690 static int
4691 parse_sme_pred_reg_with_index(char **str,
4692 int *index_base_reg,
4693 int *imm,
4694 aarch64_opnd_qualifier_t *qualifier)
4695 {
4696 int regno;
4697 int64_t imm_limit;
4698 int64_t imm_value;
4699 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4700
4701 if (reg == NULL)
4702 return PARSE_FAIL;
4703 regno = reg->number;
4704
4705 switch (*qualifier)
4706 {
4707 case AARCH64_OPND_QLF_S_B:
4708 imm_limit = 15;
4709 break;
4710 case AARCH64_OPND_QLF_S_H:
4711 imm_limit = 7;
4712 break;
4713 case AARCH64_OPND_QLF_S_S:
4714 imm_limit = 3;
4715 break;
4716 case AARCH64_OPND_QLF_S_D:
4717 imm_limit = 1;
4718 break;
4719 default:
4720 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4721 return PARSE_FAIL;
4722 }
4723
4724 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4725 return PARSE_FAIL;
4726
4727 if (imm_value < 0 || imm_value > imm_limit)
4728 {
4729 set_syntax_error (_("element index out of range for given variant"));
4730 return PARSE_FAIL;
4731 }
4732
4733 *imm = imm_value;
4734
4735 return regno;
4736 }
4737
4738 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4739 Returns the encoding for the option, or PARSE_FAIL.
4740
4741 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4742 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4743
4744 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4745 field, otherwise as a system register.
4746 */
4747
4748 static int
4749 parse_sys_reg (char **str, htab_t sys_regs,
4750 int imple_defined_p, int pstatefield_p,
4751 uint32_t* flags)
4752 {
4753 char *p, *q;
4754 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4755 const aarch64_sys_reg *o;
4756 int value;
4757
4758 p = buf;
4759 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4760 if (p < buf + (sizeof (buf) - 1))
4761 *p++ = TOLOWER (*q);
4762 *p = '\0';
4763
4764 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4765 valid system register. This is enforced by construction of the hash
4766 table. */
4767 if (p - buf != q - *str)
4768 return PARSE_FAIL;
4769
4770 o = str_hash_find (sys_regs, buf);
4771 if (!o)
4772 {
4773 if (!imple_defined_p)
4774 return PARSE_FAIL;
4775 else
4776 {
4777 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4778 unsigned int op0, op1, cn, cm, op2;
4779
4780 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4781 != 5)
4782 return PARSE_FAIL;
4783 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4784 return PARSE_FAIL;
4785 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4786 if (flags)
4787 *flags = 0;
4788 }
4789 }
4790 else
4791 {
4792 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4793 as_bad (_("selected processor does not support PSTATE field "
4794 "name '%s'"), buf);
4795 if (!pstatefield_p
4796 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4797 o->value, o->flags, o->features))
4798 as_bad (_("selected processor does not support system register "
4799 "name '%s'"), buf);
4800 if (aarch64_sys_reg_deprecated_p (o->flags))
4801 as_warn (_("system register name '%s' is deprecated and may be "
4802 "removed in a future release"), buf);
4803 value = o->value;
4804 if (flags)
4805 *flags = o->flags;
4806 }
4807
4808 *str = q;
4809 return value;
4810 }
4811
4812 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4813 for the option, or NULL. */
4814
4815 static const aarch64_sys_ins_reg *
4816 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4817 {
4818 char *p, *q;
4819 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4820 const aarch64_sys_ins_reg *o;
4821
4822 p = buf;
4823 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4824 if (p < buf + (sizeof (buf) - 1))
4825 *p++ = TOLOWER (*q);
4826 *p = '\0';
4827
4828 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4829 valid system register. This is enforced by construction of the hash
4830 table. */
4831 if (p - buf != q - *str)
4832 return NULL;
4833
4834 o = str_hash_find (sys_ins_regs, buf);
4835 if (!o)
4836 return NULL;
4837
4838 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4839 o->name, o->value, o->flags, 0))
4840 as_bad (_("selected processor does not support system register "
4841 "name '%s'"), buf);
4842 if (aarch64_sys_reg_deprecated_p (o->flags))
4843 as_warn (_("system register name '%s' is deprecated and may be "
4844 "removed in a future release"), buf);
4845
4846 *str = q;
4847 return o;
4848 }
4849
4850 #define po_char_or_fail(chr) do { \
4852 if (! skip_past_char (&str, chr)) \
4853 goto failure; \
4854 } while (0)
4855
4856 #define po_reg_or_fail(regtype) do { \
4857 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4858 if (val == PARSE_FAIL) \
4859 { \
4860 set_default_error (); \
4861 goto failure; \
4862 } \
4863 } while (0)
4864
4865 #define po_int_reg_or_fail(reg_type) do { \
4866 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4867 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4868 { \
4869 set_default_error (); \
4870 goto failure; \
4871 } \
4872 info->reg.regno = reg->number; \
4873 info->qualifier = qualifier; \
4874 } while (0)
4875
4876 #define po_imm_nc_or_fail() do { \
4877 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4878 goto failure; \
4879 } while (0)
4880
4881 #define po_imm_or_fail(min, max) do { \
4882 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4883 goto failure; \
4884 if (val < min || val > max) \
4885 { \
4886 set_fatal_syntax_error (_("immediate value out of range "\
4887 #min " to "#max)); \
4888 goto failure; \
4889 } \
4890 } while (0)
4891
4892 #define po_enum_or_fail(array) do { \
4893 if (!parse_enum_string (&str, &val, array, \
4894 ARRAY_SIZE (array), imm_reg_type)) \
4895 goto failure; \
4896 } while (0)
4897
4898 #define po_misc_or_fail(expr) do { \
4899 if (!expr) \
4900 goto failure; \
4901 } while (0)
4902
4903 /* encode the 12-bit imm field of Add/sub immediate */
4905 static inline uint32_t
4906 encode_addsub_imm (uint32_t imm)
4907 {
4908 return imm << 10;
4909 }
4910
4911 /* encode the shift amount field of Add/sub immediate */
4912 static inline uint32_t
4913 encode_addsub_imm_shift_amount (uint32_t cnt)
4914 {
4915 return cnt << 22;
4916 }
4917
4918
4919 /* encode the imm field of Adr instruction */
4920 static inline uint32_t
4921 encode_adr_imm (uint32_t imm)
4922 {
4923 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4924 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4925 }
4926
4927 /* encode the immediate field of Move wide immediate */
4928 static inline uint32_t
4929 encode_movw_imm (uint32_t imm)
4930 {
4931 return imm << 5;
4932 }
4933
4934 /* encode the 26-bit offset of unconditional branch */
4935 static inline uint32_t
4936 encode_branch_ofs_26 (uint32_t ofs)
4937 {
4938 return ofs & ((1 << 26) - 1);
4939 }
4940
4941 /* encode the 19-bit offset of conditional branch and compare & branch */
4942 static inline uint32_t
4943 encode_cond_branch_ofs_19 (uint32_t ofs)
4944 {
4945 return (ofs & ((1 << 19) - 1)) << 5;
4946 }
4947
4948 /* encode the 19-bit offset of ld literal */
4949 static inline uint32_t
4950 encode_ld_lit_ofs_19 (uint32_t ofs)
4951 {
4952 return (ofs & ((1 << 19) - 1)) << 5;
4953 }
4954
4955 /* Encode the 14-bit offset of test & branch. */
4956 static inline uint32_t
4957 encode_tst_branch_ofs_14 (uint32_t ofs)
4958 {
4959 return (ofs & ((1 << 14) - 1)) << 5;
4960 }
4961
4962 /* Encode the 16-bit imm field of svc/hvc/smc. */
4963 static inline uint32_t
4964 encode_svc_imm (uint32_t imm)
4965 {
4966 return imm << 5;
4967 }
4968
4969 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4970 static inline uint32_t
4971 reencode_addsub_switch_add_sub (uint32_t opcode)
4972 {
4973 return opcode ^ (1 << 30);
4974 }
4975
4976 static inline uint32_t
4977 reencode_movzn_to_movz (uint32_t opcode)
4978 {
4979 return opcode | (1 << 30);
4980 }
4981
4982 static inline uint32_t
4983 reencode_movzn_to_movn (uint32_t opcode)
4984 {
4985 return opcode & ~(1 << 30);
4986 }
4987
4988 /* Overall per-instruction processing. */
4989
4990 /* We need to be able to fix up arbitrary expressions in some statements.
4991 This is so that we can handle symbols that are an arbitrary distance from
4992 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4993 which returns part of an address in a form which will be valid for
4994 a data instruction. We do this by pushing the expression into a symbol
4995 in the expr_section, and creating a fix for that. */
4996
4997 static fixS *
4998 fix_new_aarch64 (fragS * frag,
4999 int where,
5000 short int size,
5001 expressionS * exp,
5002 int pc_rel,
5003 int reloc)
5004 {
5005 fixS *new_fix;
5006
5007 switch (exp->X_op)
5008 {
5009 case O_constant:
5010 case O_symbol:
5011 case O_add:
5012 case O_subtract:
5013 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5014 break;
5015
5016 default:
5017 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5018 pc_rel, reloc);
5019 break;
5020 }
5021 return new_fix;
5022 }
5023
5024 /* Diagnostics on operands errors. */
5026
5027 /* By default, output verbose error message.
5028 Disable the verbose error message by -mno-verbose-error. */
5029 static int verbose_error_p = 1;
5030
5031 #ifdef DEBUG_AARCH64
5032 /* N.B. this is only for the purpose of debugging. */
5033 const char* operand_mismatch_kind_names[] =
5034 {
5035 "AARCH64_OPDE_NIL",
5036 "AARCH64_OPDE_RECOVERABLE",
5037 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5038 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5039 "AARCH64_OPDE_SYNTAX_ERROR",
5040 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5041 "AARCH64_OPDE_INVALID_VARIANT",
5042 "AARCH64_OPDE_OUT_OF_RANGE",
5043 "AARCH64_OPDE_UNALIGNED",
5044 "AARCH64_OPDE_REG_LIST",
5045 "AARCH64_OPDE_OTHER_ERROR",
5046 };
5047 #endif /* DEBUG_AARCH64 */
5048
5049 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5050
5051 When multiple errors of different kinds are found in the same assembly
5052 line, only the error of the highest severity will be picked up for
5053 issuing the diagnostics. */
5054
5055 static inline bool
5056 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5057 enum aarch64_operand_error_kind rhs)
5058 {
5059 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5060 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5061 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5062 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5063 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5064 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5065 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5066 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5067 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5068 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5069 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5070 return lhs > rhs;
5071 }
5072
5073 /* Helper routine to get the mnemonic name from the assembly instruction
5074 line; should only be called for the diagnosis purpose, as there is
5075 string copy operation involved, which may affect the runtime
5076 performance if used in elsewhere. */
5077
5078 static const char*
5079 get_mnemonic_name (const char *str)
5080 {
5081 static char mnemonic[32];
5082 char *ptr;
5083
5084 /* Get the first 15 bytes and assume that the full name is included. */
5085 strncpy (mnemonic, str, 31);
5086 mnemonic[31] = '\0';
5087
5088 /* Scan up to the end of the mnemonic, which must end in white space,
5089 '.', or end of string. */
5090 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5091 ;
5092
5093 *ptr = '\0';
5094
5095 /* Append '...' to the truncated long name. */
5096 if (ptr - mnemonic == 31)
5097 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5098
5099 return mnemonic;
5100 }
5101
5102 static void
5103 reset_aarch64_instruction (aarch64_instruction *instruction)
5104 {
5105 memset (instruction, '\0', sizeof (aarch64_instruction));
5106 instruction->reloc.type = BFD_RELOC_UNUSED;
5107 }
5108
5109 /* Data structures storing one user error in the assembly code related to
5110 operands. */
5111
5112 struct operand_error_record
5113 {
5114 const aarch64_opcode *opcode;
5115 aarch64_operand_error detail;
5116 struct operand_error_record *next;
5117 };
5118
5119 typedef struct operand_error_record operand_error_record;
5120
5121 struct operand_errors
5122 {
5123 operand_error_record *head;
5124 operand_error_record *tail;
5125 };
5126
5127 typedef struct operand_errors operand_errors;
5128
5129 /* Top-level data structure reporting user errors for the current line of
5130 the assembly code.
5131 The way md_assemble works is that all opcodes sharing the same mnemonic
5132 name are iterated to find a match to the assembly line. In this data
5133 structure, each of the such opcodes will have one operand_error_record
5134 allocated and inserted. In other words, excessive errors related with
5135 a single opcode are disregarded. */
5136 operand_errors operand_error_report;
5137
5138 /* Free record nodes. */
5139 static operand_error_record *free_opnd_error_record_nodes = NULL;
5140
5141 /* Initialize the data structure that stores the operand mismatch
5142 information on assembling one line of the assembly code. */
5143 static void
5144 init_operand_error_report (void)
5145 {
5146 if (operand_error_report.head != NULL)
5147 {
5148 gas_assert (operand_error_report.tail != NULL);
5149 operand_error_report.tail->next = free_opnd_error_record_nodes;
5150 free_opnd_error_record_nodes = operand_error_report.head;
5151 operand_error_report.head = NULL;
5152 operand_error_report.tail = NULL;
5153 return;
5154 }
5155 gas_assert (operand_error_report.tail == NULL);
5156 }
5157
5158 /* Return TRUE if some operand error has been recorded during the
5159 parsing of the current assembly line using the opcode *OPCODE;
5160 otherwise return FALSE. */
5161 static inline bool
5162 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5163 {
5164 operand_error_record *record = operand_error_report.head;
5165 return record && record->opcode == opcode;
5166 }
5167
5168 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5169 OPCODE field is initialized with OPCODE.
5170 N.B. only one record for each opcode, i.e. the maximum of one error is
5171 recorded for each instruction template. */
5172
5173 static void
5174 add_operand_error_record (const operand_error_record* new_record)
5175 {
5176 const aarch64_opcode *opcode = new_record->opcode;
5177 operand_error_record* record = operand_error_report.head;
5178
5179 /* The record may have been created for this opcode. If not, we need
5180 to prepare one. */
5181 if (! opcode_has_operand_error_p (opcode))
5182 {
5183 /* Get one empty record. */
5184 if (free_opnd_error_record_nodes == NULL)
5185 {
5186 record = XNEW (operand_error_record);
5187 }
5188 else
5189 {
5190 record = free_opnd_error_record_nodes;
5191 free_opnd_error_record_nodes = record->next;
5192 }
5193 record->opcode = opcode;
5194 /* Insert at the head. */
5195 record->next = operand_error_report.head;
5196 operand_error_report.head = record;
5197 if (operand_error_report.tail == NULL)
5198 operand_error_report.tail = record;
5199 }
5200 else if (record->detail.kind != AARCH64_OPDE_NIL
5201 && record->detail.index <= new_record->detail.index
5202 && operand_error_higher_severity_p (record->detail.kind,
5203 new_record->detail.kind))
5204 {
5205 /* In the case of multiple errors found on operands related with a
5206 single opcode, only record the error of the leftmost operand and
5207 only if the error is of higher severity. */
5208 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5209 " the existing error %s on operand %d",
5210 operand_mismatch_kind_names[new_record->detail.kind],
5211 new_record->detail.index,
5212 operand_mismatch_kind_names[record->detail.kind],
5213 record->detail.index);
5214 return;
5215 }
5216
5217 record->detail = new_record->detail;
5218 }
5219
5220 static inline void
5221 record_operand_error_info (const aarch64_opcode *opcode,
5222 aarch64_operand_error *error_info)
5223 {
5224 operand_error_record record;
5225 record.opcode = opcode;
5226 record.detail = *error_info;
5227 add_operand_error_record (&record);
5228 }
5229
5230 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5231 error message *ERROR, for operand IDX (count from 0). */
5232
5233 static void
5234 record_operand_error (const aarch64_opcode *opcode, int idx,
5235 enum aarch64_operand_error_kind kind,
5236 const char* error)
5237 {
5238 aarch64_operand_error info;
5239 memset(&info, 0, sizeof (info));
5240 info.index = idx;
5241 info.kind = kind;
5242 info.error = error;
5243 info.non_fatal = false;
5244 record_operand_error_info (opcode, &info);
5245 }
5246
5247 static void
5248 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5249 enum aarch64_operand_error_kind kind,
5250 const char* error, const int *extra_data)
5251 {
5252 aarch64_operand_error info;
5253 info.index = idx;
5254 info.kind = kind;
5255 info.error = error;
5256 info.data[0].i = extra_data[0];
5257 info.data[1].i = extra_data[1];
5258 info.data[2].i = extra_data[2];
5259 info.non_fatal = false;
5260 record_operand_error_info (opcode, &info);
5261 }
5262
5263 static void
5264 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5265 const char* error, int lower_bound,
5266 int upper_bound)
5267 {
5268 int data[3] = {lower_bound, upper_bound, 0};
5269 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5270 error, data);
5271 }
5272
5273 /* Remove the operand error record for *OPCODE. */
5274 static void ATTRIBUTE_UNUSED
5275 remove_operand_error_record (const aarch64_opcode *opcode)
5276 {
5277 if (opcode_has_operand_error_p (opcode))
5278 {
5279 operand_error_record* record = operand_error_report.head;
5280 gas_assert (record != NULL && operand_error_report.tail != NULL);
5281 operand_error_report.head = record->next;
5282 record->next = free_opnd_error_record_nodes;
5283 free_opnd_error_record_nodes = record;
5284 if (operand_error_report.head == NULL)
5285 {
5286 gas_assert (operand_error_report.tail == record);
5287 operand_error_report.tail = NULL;
5288 }
5289 }
5290 }
5291
5292 /* Given the instruction in *INSTR, return the index of the best matched
5293 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5294
5295 Return -1 if there is no qualifier sequence; return the first match
5296 if there is multiple matches found. */
5297
5298 static int
5299 find_best_match (const aarch64_inst *instr,
5300 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5301 {
5302 int i, num_opnds, max_num_matched, idx;
5303
5304 num_opnds = aarch64_num_of_operands (instr->opcode);
5305 if (num_opnds == 0)
5306 {
5307 DEBUG_TRACE ("no operand");
5308 return -1;
5309 }
5310
5311 max_num_matched = 0;
5312 idx = 0;
5313
5314 /* For each pattern. */
5315 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5316 {
5317 int j, num_matched;
5318 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5319
5320 /* Most opcodes has much fewer patterns in the list. */
5321 if (empty_qualifier_sequence_p (qualifiers))
5322 {
5323 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5324 break;
5325 }
5326
5327 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5328 if (*qualifiers == instr->operands[j].qualifier)
5329 ++num_matched;
5330
5331 if (num_matched > max_num_matched)
5332 {
5333 max_num_matched = num_matched;
5334 idx = i;
5335 }
5336 }
5337
5338 DEBUG_TRACE ("return with %d", idx);
5339 return idx;
5340 }
5341
5342 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5343 corresponding operands in *INSTR. */
5344
5345 static inline void
5346 assign_qualifier_sequence (aarch64_inst *instr,
5347 const aarch64_opnd_qualifier_t *qualifiers)
5348 {
5349 int i = 0;
5350 int num_opnds = aarch64_num_of_operands (instr->opcode);
5351 gas_assert (num_opnds);
5352 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5353 instr->operands[i].qualifier = *qualifiers;
5354 }
5355
5356 /* Print operands for the diagnosis purpose. */
5357
5358 static void
5359 print_operands (char *buf, const aarch64_opcode *opcode,
5360 const aarch64_opnd_info *opnds)
5361 {
5362 int i;
5363
5364 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5365 {
5366 char str[128];
5367 char cmt[128];
5368
5369 /* We regard the opcode operand info more, however we also look into
5370 the inst->operands to support the disassembling of the optional
5371 operand.
5372 The two operand code should be the same in all cases, apart from
5373 when the operand can be optional. */
5374 if (opcode->operands[i] == AARCH64_OPND_NIL
5375 || opnds[i].type == AARCH64_OPND_NIL)
5376 break;
5377
5378 /* Generate the operand string in STR. */
5379 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5380 NULL, cmt, sizeof (cmt), cpu_variant);
5381
5382 /* Delimiter. */
5383 if (str[0] != '\0')
5384 strcat (buf, i == 0 ? " " : ", ");
5385
5386 /* Append the operand string. */
5387 strcat (buf, str);
5388
5389 /* Append a comment. This works because only the last operand ever
5390 adds a comment. If that ever changes then we'll need to be
5391 smarter here. */
5392 if (cmt[0] != '\0')
5393 {
5394 strcat (buf, "\t// ");
5395 strcat (buf, cmt);
5396 }
5397 }
5398 }
5399
5400 /* Send to stderr a string as information. */
5401
5402 static void
5403 output_info (const char *format, ...)
5404 {
5405 const char *file;
5406 unsigned int line;
5407 va_list args;
5408
5409 file = as_where (&line);
5410 if (file)
5411 {
5412 if (line != 0)
5413 fprintf (stderr, "%s:%u: ", file, line);
5414 else
5415 fprintf (stderr, "%s: ", file);
5416 }
5417 fprintf (stderr, _("Info: "));
5418 va_start (args, format);
5419 vfprintf (stderr, format, args);
5420 va_end (args);
5421 (void) putc ('\n', stderr);
5422 }
5423
5424 /* Output one operand error record. */
5425
5426 static void
5427 output_operand_error_record (const operand_error_record *record, char *str)
5428 {
5429 const aarch64_operand_error *detail = &record->detail;
5430 int idx = detail->index;
5431 const aarch64_opcode *opcode = record->opcode;
5432 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5433 : AARCH64_OPND_NIL);
5434
5435 typedef void (*handler_t)(const char *format, ...);
5436 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5437
5438 switch (detail->kind)
5439 {
5440 case AARCH64_OPDE_NIL:
5441 gas_assert (0);
5442 break;
5443
5444 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5445 handler (_("this `%s' should have an immediately preceding `%s'"
5446 " -- `%s'"),
5447 detail->data[0].s, detail->data[1].s, str);
5448 break;
5449
5450 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5451 handler (_("the preceding `%s' should be followed by `%s` rather"
5452 " than `%s` -- `%s'"),
5453 detail->data[1].s, detail->data[0].s, opcode->name, str);
5454 break;
5455
5456 case AARCH64_OPDE_SYNTAX_ERROR:
5457 case AARCH64_OPDE_RECOVERABLE:
5458 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5459 case AARCH64_OPDE_OTHER_ERROR:
5460 /* Use the prepared error message if there is, otherwise use the
5461 operand description string to describe the error. */
5462 if (detail->error != NULL)
5463 {
5464 if (idx < 0)
5465 handler (_("%s -- `%s'"), detail->error, str);
5466 else
5467 handler (_("%s at operand %d -- `%s'"),
5468 detail->error, idx + 1, str);
5469 }
5470 else
5471 {
5472 gas_assert (idx >= 0);
5473 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5474 aarch64_get_operand_desc (opd_code), str);
5475 }
5476 break;
5477
5478 case AARCH64_OPDE_INVALID_VARIANT:
5479 handler (_("operand mismatch -- `%s'"), str);
5480 if (verbose_error_p)
5481 {
5482 /* We will try to correct the erroneous instruction and also provide
5483 more information e.g. all other valid variants.
5484
5485 The string representation of the corrected instruction and other
5486 valid variants are generated by
5487
5488 1) obtaining the intermediate representation of the erroneous
5489 instruction;
5490 2) manipulating the IR, e.g. replacing the operand qualifier;
5491 3) printing out the instruction by calling the printer functions
5492 shared with the disassembler.
5493
5494 The limitation of this method is that the exact input assembly
5495 line cannot be accurately reproduced in some cases, for example an
5496 optional operand present in the actual assembly line will be
5497 omitted in the output; likewise for the optional syntax rules,
5498 e.g. the # before the immediate. Another limitation is that the
5499 assembly symbols and relocation operations in the assembly line
5500 currently cannot be printed out in the error report. Last but not
5501 least, when there is other error(s) co-exist with this error, the
5502 'corrected' instruction may be still incorrect, e.g. given
5503 'ldnp h0,h1,[x0,#6]!'
5504 this diagnosis will provide the version:
5505 'ldnp s0,s1,[x0,#6]!'
5506 which is still not right. */
5507 size_t len = strlen (get_mnemonic_name (str));
5508 int i, qlf_idx;
5509 bool result;
5510 char buf[2048];
5511 aarch64_inst *inst_base = &inst.base;
5512 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5513
5514 /* Init inst. */
5515 reset_aarch64_instruction (&inst);
5516 inst_base->opcode = opcode;
5517
5518 /* Reset the error report so that there is no side effect on the
5519 following operand parsing. */
5520 init_operand_error_report ();
5521
5522 /* Fill inst. */
5523 result = parse_operands (str + len, opcode)
5524 && programmer_friendly_fixup (&inst);
5525 gas_assert (result);
5526 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5527 NULL, NULL, insn_sequence);
5528 gas_assert (!result);
5529
5530 /* Find the most matched qualifier sequence. */
5531 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5532 gas_assert (qlf_idx > -1);
5533
5534 /* Assign the qualifiers. */
5535 assign_qualifier_sequence (inst_base,
5536 opcode->qualifiers_list[qlf_idx]);
5537
5538 /* Print the hint. */
5539 output_info (_(" did you mean this?"));
5540 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5541 print_operands (buf, opcode, inst_base->operands);
5542 output_info (_(" %s"), buf);
5543
5544 /* Print out other variant(s) if there is any. */
5545 if (qlf_idx != 0 ||
5546 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5547 output_info (_(" other valid variant(s):"));
5548
5549 /* For each pattern. */
5550 qualifiers_list = opcode->qualifiers_list;
5551 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5552 {
5553 /* Most opcodes has much fewer patterns in the list.
5554 First NIL qualifier indicates the end in the list. */
5555 if (empty_qualifier_sequence_p (*qualifiers_list))
5556 break;
5557
5558 if (i != qlf_idx)
5559 {
5560 /* Mnemonics name. */
5561 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5562
5563 /* Assign the qualifiers. */
5564 assign_qualifier_sequence (inst_base, *qualifiers_list);
5565
5566 /* Print instruction. */
5567 print_operands (buf, opcode, inst_base->operands);
5568
5569 output_info (_(" %s"), buf);
5570 }
5571 }
5572 }
5573 break;
5574
5575 case AARCH64_OPDE_UNTIED_IMMS:
5576 handler (_("operand %d must have the same immediate value "
5577 "as operand 1 -- `%s'"),
5578 detail->index + 1, str);
5579 break;
5580
5581 case AARCH64_OPDE_UNTIED_OPERAND:
5582 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5583 detail->index + 1, str);
5584 break;
5585
5586 case AARCH64_OPDE_OUT_OF_RANGE:
5587 if (detail->data[0].i != detail->data[1].i)
5588 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5589 detail->error ? detail->error : _("immediate value"),
5590 detail->data[0].i, detail->data[1].i, idx + 1, str);
5591 else
5592 handler (_("%s must be %d at operand %d -- `%s'"),
5593 detail->error ? detail->error : _("immediate value"),
5594 detail->data[0].i, idx + 1, str);
5595 break;
5596
5597 case AARCH64_OPDE_REG_LIST:
5598 if (detail->data[0].i == 1)
5599 handler (_("invalid number of registers in the list; "
5600 "only 1 register is expected at operand %d -- `%s'"),
5601 idx + 1, str);
5602 else
5603 handler (_("invalid number of registers in the list; "
5604 "%d registers are expected at operand %d -- `%s'"),
5605 detail->data[0].i, idx + 1, str);
5606 break;
5607
5608 case AARCH64_OPDE_UNALIGNED:
5609 handler (_("immediate value must be a multiple of "
5610 "%d at operand %d -- `%s'"),
5611 detail->data[0].i, idx + 1, str);
5612 break;
5613
5614 default:
5615 gas_assert (0);
5616 break;
5617 }
5618 }
5619
5620 /* Process and output the error message about the operand mismatching.
5621
5622 When this function is called, the operand error information had
5623 been collected for an assembly line and there will be multiple
5624 errors in the case of multiple instruction templates; output the
5625 error message that most closely describes the problem.
5626
5627 The errors to be printed can be filtered on printing all errors
5628 or only non-fatal errors. This distinction has to be made because
5629 the error buffer may already be filled with fatal errors we don't want to
5630 print due to the different instruction templates. */
5631
5632 static void
5633 output_operand_error_report (char *str, bool non_fatal_only)
5634 {
5635 int largest_error_pos;
5636 const char *msg = NULL;
5637 enum aarch64_operand_error_kind kind;
5638 operand_error_record *curr;
5639 operand_error_record *head = operand_error_report.head;
5640 operand_error_record *record = NULL;
5641
5642 /* No error to report. */
5643 if (head == NULL)
5644 return;
5645
5646 gas_assert (head != NULL && operand_error_report.tail != NULL);
5647
5648 /* Only one error. */
5649 if (head == operand_error_report.tail)
5650 {
5651 /* If the only error is a non-fatal one and we don't want to print it,
5652 just exit. */
5653 if (!non_fatal_only || head->detail.non_fatal)
5654 {
5655 DEBUG_TRACE ("single opcode entry with error kind: %s",
5656 operand_mismatch_kind_names[head->detail.kind]);
5657 output_operand_error_record (head, str);
5658 }
5659 return;
5660 }
5661
5662 /* Find the error kind of the highest severity. */
5663 DEBUG_TRACE ("multiple opcode entries with error kind");
5664 kind = AARCH64_OPDE_NIL;
5665 for (curr = head; curr != NULL; curr = curr->next)
5666 {
5667 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5668 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5669 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5670 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5671 kind = curr->detail.kind;
5672 }
5673
5674 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5675
5676 /* Pick up one of errors of KIND to report. */
5677 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5678 for (curr = head; curr != NULL; curr = curr->next)
5679 {
5680 /* If we don't want to print non-fatal errors then don't consider them
5681 at all. */
5682 if (curr->detail.kind != kind
5683 || (non_fatal_only && !curr->detail.non_fatal))
5684 continue;
5685 /* If there are multiple errors, pick up the one with the highest
5686 mismatching operand index. In the case of multiple errors with
5687 the equally highest operand index, pick up the first one or the
5688 first one with non-NULL error message. */
5689 if (curr->detail.index > largest_error_pos
5690 || (curr->detail.index == largest_error_pos && msg == NULL
5691 && curr->detail.error != NULL))
5692 {
5693 largest_error_pos = curr->detail.index;
5694 record = curr;
5695 msg = record->detail.error;
5696 }
5697 }
5698
5699 /* The way errors are collected in the back-end is a bit non-intuitive. But
5700 essentially, because each operand template is tried recursively you may
5701 always have errors collected from the previous tried OPND. These are
5702 usually skipped if there is one successful match. However now with the
5703 non-fatal errors we have to ignore those previously collected hard errors
5704 when we're only interested in printing the non-fatal ones. This condition
5705 prevents us from printing errors that are not appropriate, since we did
5706 match a condition, but it also has warnings that it wants to print. */
5707 if (non_fatal_only && !record)
5708 return;
5709
5710 gas_assert (largest_error_pos != -2 && record != NULL);
5711 DEBUG_TRACE ("Pick up error kind %s to report",
5712 operand_mismatch_kind_names[record->detail.kind]);
5713
5714 /* Output. */
5715 output_operand_error_record (record, str);
5716 }
5717
5718 /* Write an AARCH64 instruction to buf - always little-endian. */
5720 static void
5721 put_aarch64_insn (char *buf, uint32_t insn)
5722 {
5723 unsigned char *where = (unsigned char *) buf;
5724 where[0] = insn;
5725 where[1] = insn >> 8;
5726 where[2] = insn >> 16;
5727 where[3] = insn >> 24;
5728 }
5729
5730 static uint32_t
5731 get_aarch64_insn (char *buf)
5732 {
5733 unsigned char *where = (unsigned char *) buf;
5734 uint32_t result;
5735 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5736 | ((uint32_t) where[3] << 24)));
5737 return result;
5738 }
5739
5740 static void
5741 output_inst (struct aarch64_inst *new_inst)
5742 {
5743 char *to = NULL;
5744
5745 to = frag_more (INSN_SIZE);
5746
5747 frag_now->tc_frag_data.recorded = 1;
5748
5749 put_aarch64_insn (to, inst.base.value);
5750
5751 if (inst.reloc.type != BFD_RELOC_UNUSED)
5752 {
5753 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5754 INSN_SIZE, &inst.reloc.exp,
5755 inst.reloc.pc_rel,
5756 inst.reloc.type);
5757 DEBUG_TRACE ("Prepared relocation fix up");
5758 /* Don't check the addend value against the instruction size,
5759 that's the job of our code in md_apply_fix(). */
5760 fixp->fx_no_overflow = 1;
5761 if (new_inst != NULL)
5762 fixp->tc_fix_data.inst = new_inst;
5763 if (aarch64_gas_internal_fixup_p ())
5764 {
5765 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5766 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5767 fixp->fx_addnumber = inst.reloc.flags;
5768 }
5769 }
5770
5771 dwarf2_emit_insn (INSN_SIZE);
5772 }
5773
5774 /* Link together opcodes of the same name. */
5775
5776 struct templates
5777 {
5778 const aarch64_opcode *opcode;
5779 struct templates *next;
5780 };
5781
5782 typedef struct templates templates;
5783
5784 static templates *
5785 lookup_mnemonic (const char *start, int len)
5786 {
5787 templates *templ = NULL;
5788
5789 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5790 return templ;
5791 }
5792
5793 /* Subroutine of md_assemble, responsible for looking up the primary
5794 opcode from the mnemonic the user wrote. BASE points to the beginning
5795 of the mnemonic, DOT points to the first '.' within the mnemonic
5796 (if any) and END points to the end of the mnemonic. */
5797
5798 static templates *
5799 opcode_lookup (char *base, char *dot, char *end)
5800 {
5801 const aarch64_cond *cond;
5802 char condname[16];
5803 int len;
5804
5805 if (dot == end)
5806 return 0;
5807
5808 inst.cond = COND_ALWAYS;
5809
5810 /* Handle a possible condition. */
5811 if (dot)
5812 {
5813 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5814 if (!cond)
5815 return 0;
5816 inst.cond = cond->value;
5817 len = dot - base;
5818 }
5819 else
5820 len = end - base;
5821
5822 if (inst.cond == COND_ALWAYS)
5823 {
5824 /* Look for unaffixed mnemonic. */
5825 return lookup_mnemonic (base, len);
5826 }
5827 else if (len <= 13)
5828 {
5829 /* append ".c" to mnemonic if conditional */
5830 memcpy (condname, base, len);
5831 memcpy (condname + len, ".c", 2);
5832 base = condname;
5833 len += 2;
5834 return lookup_mnemonic (base, len);
5835 }
5836
5837 return NULL;
5838 }
5839
5840 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5841 to a corresponding operand qualifier. */
5842
5843 static inline aarch64_opnd_qualifier_t
5844 vectype_to_qualifier (const struct vector_type_el *vectype)
5845 {
5846 /* Element size in bytes indexed by vector_el_type. */
5847 const unsigned char ele_size[5]
5848 = {1, 2, 4, 8, 16};
5849 const unsigned int ele_base [5] =
5850 {
5851 AARCH64_OPND_QLF_V_4B,
5852 AARCH64_OPND_QLF_V_2H,
5853 AARCH64_OPND_QLF_V_2S,
5854 AARCH64_OPND_QLF_V_1D,
5855 AARCH64_OPND_QLF_V_1Q
5856 };
5857
5858 if (!vectype->defined || vectype->type == NT_invtype)
5859 goto vectype_conversion_fail;
5860
5861 if (vectype->type == NT_zero)
5862 return AARCH64_OPND_QLF_P_Z;
5863 if (vectype->type == NT_merge)
5864 return AARCH64_OPND_QLF_P_M;
5865
5866 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5867
5868 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5869 {
5870 /* Special case S_4B. */
5871 if (vectype->type == NT_b && vectype->width == 4)
5872 return AARCH64_OPND_QLF_S_4B;
5873
5874 /* Special case S_2H. */
5875 if (vectype->type == NT_h && vectype->width == 2)
5876 return AARCH64_OPND_QLF_S_2H;
5877
5878 /* Vector element register. */
5879 return AARCH64_OPND_QLF_S_B + vectype->type;
5880 }
5881 else
5882 {
5883 /* Vector register. */
5884 int reg_size = ele_size[vectype->type] * vectype->width;
5885 unsigned offset;
5886 unsigned shift;
5887 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5888 goto vectype_conversion_fail;
5889
5890 /* The conversion is by calculating the offset from the base operand
5891 qualifier for the vector type. The operand qualifiers are regular
5892 enough that the offset can established by shifting the vector width by
5893 a vector-type dependent amount. */
5894 shift = 0;
5895 if (vectype->type == NT_b)
5896 shift = 3;
5897 else if (vectype->type == NT_h || vectype->type == NT_s)
5898 shift = 2;
5899 else if (vectype->type >= NT_d)
5900 shift = 1;
5901 else
5902 gas_assert (0);
5903
5904 offset = ele_base [vectype->type] + (vectype->width >> shift);
5905 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5906 && offset <= AARCH64_OPND_QLF_V_1Q);
5907 return offset;
5908 }
5909
5910 vectype_conversion_fail:
5911 first_error (_("bad vector arrangement type"));
5912 return AARCH64_OPND_QLF_NIL;
5913 }
5914
5915 /* Process an optional operand that is found omitted from the assembly line.
5916 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5917 instruction's opcode entry while IDX is the index of this omitted operand.
5918 */
5919
5920 static void
5921 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5922 int idx, aarch64_opnd_info *operand)
5923 {
5924 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5925 gas_assert (optional_operand_p (opcode, idx));
5926 gas_assert (!operand->present);
5927
5928 switch (type)
5929 {
5930 case AARCH64_OPND_Rd:
5931 case AARCH64_OPND_Rn:
5932 case AARCH64_OPND_Rm:
5933 case AARCH64_OPND_Rt:
5934 case AARCH64_OPND_Rt2:
5935 case AARCH64_OPND_Rt_LS64:
5936 case AARCH64_OPND_Rt_SP:
5937 case AARCH64_OPND_Rs:
5938 case AARCH64_OPND_Ra:
5939 case AARCH64_OPND_Rt_SYS:
5940 case AARCH64_OPND_Rd_SP:
5941 case AARCH64_OPND_Rn_SP:
5942 case AARCH64_OPND_Rm_SP:
5943 case AARCH64_OPND_Fd:
5944 case AARCH64_OPND_Fn:
5945 case AARCH64_OPND_Fm:
5946 case AARCH64_OPND_Fa:
5947 case AARCH64_OPND_Ft:
5948 case AARCH64_OPND_Ft2:
5949 case AARCH64_OPND_Sd:
5950 case AARCH64_OPND_Sn:
5951 case AARCH64_OPND_Sm:
5952 case AARCH64_OPND_Va:
5953 case AARCH64_OPND_Vd:
5954 case AARCH64_OPND_Vn:
5955 case AARCH64_OPND_Vm:
5956 case AARCH64_OPND_VdD1:
5957 case AARCH64_OPND_VnD1:
5958 operand->reg.regno = default_value;
5959 break;
5960
5961 case AARCH64_OPND_Ed:
5962 case AARCH64_OPND_En:
5963 case AARCH64_OPND_Em:
5964 case AARCH64_OPND_Em16:
5965 case AARCH64_OPND_SM3_IMM2:
5966 operand->reglane.regno = default_value;
5967 break;
5968
5969 case AARCH64_OPND_IDX:
5970 case AARCH64_OPND_BIT_NUM:
5971 case AARCH64_OPND_IMMR:
5972 case AARCH64_OPND_IMMS:
5973 case AARCH64_OPND_SHLL_IMM:
5974 case AARCH64_OPND_IMM_VLSL:
5975 case AARCH64_OPND_IMM_VLSR:
5976 case AARCH64_OPND_CCMP_IMM:
5977 case AARCH64_OPND_FBITS:
5978 case AARCH64_OPND_UIMM4:
5979 case AARCH64_OPND_UIMM3_OP1:
5980 case AARCH64_OPND_UIMM3_OP2:
5981 case AARCH64_OPND_IMM:
5982 case AARCH64_OPND_IMM_2:
5983 case AARCH64_OPND_WIDTH:
5984 case AARCH64_OPND_UIMM7:
5985 case AARCH64_OPND_NZCV:
5986 case AARCH64_OPND_SVE_PATTERN:
5987 case AARCH64_OPND_SVE_PRFOP:
5988 operand->imm.value = default_value;
5989 break;
5990
5991 case AARCH64_OPND_SVE_PATTERN_SCALED:
5992 operand->imm.value = default_value;
5993 operand->shifter.kind = AARCH64_MOD_MUL;
5994 operand->shifter.amount = 1;
5995 break;
5996
5997 case AARCH64_OPND_EXCEPTION:
5998 inst.reloc.type = BFD_RELOC_UNUSED;
5999 break;
6000
6001 case AARCH64_OPND_BARRIER_ISB:
6002 operand->barrier = aarch64_barrier_options + default_value;
6003 break;
6004
6005 case AARCH64_OPND_BTI_TARGET:
6006 operand->hint_option = aarch64_hint_options + default_value;
6007 break;
6008
6009 default:
6010 break;
6011 }
6012 }
6013
6014 /* Process the relocation type for move wide instructions.
6015 Return TRUE on success; otherwise return FALSE. */
6016
6017 static bool
6018 process_movw_reloc_info (void)
6019 {
6020 int is32;
6021 unsigned shift;
6022
6023 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6024
6025 if (inst.base.opcode->op == OP_MOVK)
6026 switch (inst.reloc.type)
6027 {
6028 case BFD_RELOC_AARCH64_MOVW_G0_S:
6029 case BFD_RELOC_AARCH64_MOVW_G1_S:
6030 case BFD_RELOC_AARCH64_MOVW_G2_S:
6031 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6032 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6033 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6034 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6035 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6036 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6037 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6038 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6039 set_syntax_error
6040 (_("the specified relocation type is not allowed for MOVK"));
6041 return false;
6042 default:
6043 break;
6044 }
6045
6046 switch (inst.reloc.type)
6047 {
6048 case BFD_RELOC_AARCH64_MOVW_G0:
6049 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6050 case BFD_RELOC_AARCH64_MOVW_G0_S:
6051 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6052 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6053 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6054 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6055 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6056 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6057 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6058 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6059 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6060 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6061 shift = 0;
6062 break;
6063 case BFD_RELOC_AARCH64_MOVW_G1:
6064 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6065 case BFD_RELOC_AARCH64_MOVW_G1_S:
6066 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6067 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6068 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6069 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6070 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6071 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6072 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6073 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6074 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6075 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6076 shift = 16;
6077 break;
6078 case BFD_RELOC_AARCH64_MOVW_G2:
6079 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6080 case BFD_RELOC_AARCH64_MOVW_G2_S:
6081 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6082 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6083 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6084 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6085 if (is32)
6086 {
6087 set_fatal_syntax_error
6088 (_("the specified relocation type is not allowed for 32-bit "
6089 "register"));
6090 return false;
6091 }
6092 shift = 32;
6093 break;
6094 case BFD_RELOC_AARCH64_MOVW_G3:
6095 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6096 if (is32)
6097 {
6098 set_fatal_syntax_error
6099 (_("the specified relocation type is not allowed for 32-bit "
6100 "register"));
6101 return false;
6102 }
6103 shift = 48;
6104 break;
6105 default:
6106 /* More cases should be added when more MOVW-related relocation types
6107 are supported in GAS. */
6108 gas_assert (aarch64_gas_internal_fixup_p ());
6109 /* The shift amount should have already been set by the parser. */
6110 return true;
6111 }
6112 inst.base.operands[1].shifter.amount = shift;
6113 return true;
6114 }
6115
6116 /* A primitive log calculator. */
6117
6118 static inline unsigned int
6119 get_logsz (unsigned int size)
6120 {
6121 const unsigned char ls[16] =
6122 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6123 if (size > 16)
6124 {
6125 gas_assert (0);
6126 return -1;
6127 }
6128 gas_assert (ls[size - 1] != (unsigned char)-1);
6129 return ls[size - 1];
6130 }
6131
6132 /* Determine and return the real reloc type code for an instruction
6133 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6134
6135 static inline bfd_reloc_code_real_type
6136 ldst_lo12_determine_real_reloc_type (void)
6137 {
6138 unsigned logsz, max_logsz;
6139 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6140 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6141
6142 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6143 {
6144 BFD_RELOC_AARCH64_LDST8_LO12,
6145 BFD_RELOC_AARCH64_LDST16_LO12,
6146 BFD_RELOC_AARCH64_LDST32_LO12,
6147 BFD_RELOC_AARCH64_LDST64_LO12,
6148 BFD_RELOC_AARCH64_LDST128_LO12
6149 },
6150 {
6151 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6152 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6153 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6154 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6155 BFD_RELOC_AARCH64_NONE
6156 },
6157 {
6158 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6159 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6160 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6161 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6162 BFD_RELOC_AARCH64_NONE
6163 },
6164 {
6165 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6166 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6167 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6168 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6169 BFD_RELOC_AARCH64_NONE
6170 },
6171 {
6172 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6173 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6174 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6175 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6176 BFD_RELOC_AARCH64_NONE
6177 }
6178 };
6179
6180 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6181 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6182 || (inst.reloc.type
6183 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6184 || (inst.reloc.type
6185 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6186 || (inst.reloc.type
6187 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6188 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6189
6190 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6191 opd1_qlf =
6192 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6193 1, opd0_qlf, 0);
6194 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6195
6196 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6197
6198 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6199 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6200 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6201 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6202 max_logsz = 3;
6203 else
6204 max_logsz = 4;
6205
6206 if (logsz > max_logsz)
6207 {
6208 /* SEE PR 27904 for an example of this. */
6209 set_fatal_syntax_error
6210 (_("relocation qualifier does not match instruction size"));
6211 return BFD_RELOC_AARCH64_NONE;
6212 }
6213
6214 /* In reloc.c, these pseudo relocation types should be defined in similar
6215 order as above reloc_ldst_lo12 array. Because the array index calculation
6216 below relies on this. */
6217 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6218 }
6219
6220 /* Check whether a register list REGINFO is valid. The registers must be
6221 numbered in increasing order (modulo 32), in increments of one or two.
6222
6223 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6224 increments of two.
6225
6226 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6227
6228 static bool
6229 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6230 {
6231 uint32_t i, nb_regs, prev_regno, incr;
6232
6233 nb_regs = 1 + (reginfo & 0x3);
6234 reginfo >>= 2;
6235 prev_regno = reginfo & 0x1f;
6236 incr = accept_alternate ? 2 : 1;
6237
6238 for (i = 1; i < nb_regs; ++i)
6239 {
6240 uint32_t curr_regno;
6241 reginfo >>= 5;
6242 curr_regno = reginfo & 0x1f;
6243 if (curr_regno != ((prev_regno + incr) & 0x1f))
6244 return false;
6245 prev_regno = curr_regno;
6246 }
6247
6248 return true;
6249 }
6250
6251 /* Generic instruction operand parser. This does no encoding and no
6252 semantic validation; it merely squirrels values away in the inst
6253 structure. Returns TRUE or FALSE depending on whether the
6254 specified grammar matched. */
6255
6256 static bool
6257 parse_operands (char *str, const aarch64_opcode *opcode)
6258 {
6259 int i;
6260 char *backtrack_pos = 0;
6261 const enum aarch64_opnd *operands = opcode->operands;
6262 aarch64_reg_type imm_reg_type;
6263
6264 clear_error ();
6265 skip_whitespace (str);
6266
6267 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6268 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6269 else
6270 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6271
6272 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6273 {
6274 int64_t val;
6275 const reg_entry *reg;
6276 int comma_skipped_p = 0;
6277 aarch64_reg_type rtype;
6278 struct vector_type_el vectype;
6279 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6280 aarch64_opnd_info *info = &inst.base.operands[i];
6281 aarch64_reg_type reg_type;
6282
6283 DEBUG_TRACE ("parse operand %d", i);
6284
6285 /* Assign the operand code. */
6286 info->type = operands[i];
6287
6288 if (optional_operand_p (opcode, i))
6289 {
6290 /* Remember where we are in case we need to backtrack. */
6291 gas_assert (!backtrack_pos);
6292 backtrack_pos = str;
6293 }
6294
6295 /* Expect comma between operands; the backtrack mechanism will take
6296 care of cases of omitted optional operand. */
6297 if (i > 0 && ! skip_past_char (&str, ','))
6298 {
6299 set_syntax_error (_("comma expected between operands"));
6300 goto failure;
6301 }
6302 else
6303 comma_skipped_p = 1;
6304
6305 switch (operands[i])
6306 {
6307 case AARCH64_OPND_Rd:
6308 case AARCH64_OPND_Rn:
6309 case AARCH64_OPND_Rm:
6310 case AARCH64_OPND_Rt:
6311 case AARCH64_OPND_Rt2:
6312 case AARCH64_OPND_Rs:
6313 case AARCH64_OPND_Ra:
6314 case AARCH64_OPND_Rt_LS64:
6315 case AARCH64_OPND_Rt_SYS:
6316 case AARCH64_OPND_PAIRREG:
6317 case AARCH64_OPND_SVE_Rm:
6318 po_int_reg_or_fail (REG_TYPE_R_Z);
6319
6320 /* In LS64 load/store instructions Rt register number must be even
6321 and <=22. */
6322 if (operands[i] == AARCH64_OPND_Rt_LS64)
6323 {
6324 /* We've already checked if this is valid register.
6325 This will check if register number (Rt) is not undefined for LS64
6326 instructions:
6327 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6328 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6329 {
6330 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6331 goto failure;
6332 }
6333 }
6334 break;
6335
6336 case AARCH64_OPND_Rd_SP:
6337 case AARCH64_OPND_Rn_SP:
6338 case AARCH64_OPND_Rt_SP:
6339 case AARCH64_OPND_SVE_Rn_SP:
6340 case AARCH64_OPND_Rm_SP:
6341 po_int_reg_or_fail (REG_TYPE_R_SP);
6342 break;
6343
6344 case AARCH64_OPND_Rm_EXT:
6345 case AARCH64_OPND_Rm_SFT:
6346 po_misc_or_fail (parse_shifter_operand
6347 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6348 ? SHIFTED_ARITH_IMM
6349 : SHIFTED_LOGIC_IMM)));
6350 if (!info->shifter.operator_present)
6351 {
6352 /* Default to LSL if not present. Libopcodes prefers shifter
6353 kind to be explicit. */
6354 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6355 info->shifter.kind = AARCH64_MOD_LSL;
6356 /* For Rm_EXT, libopcodes will carry out further check on whether
6357 or not stack pointer is used in the instruction (Recall that
6358 "the extend operator is not optional unless at least one of
6359 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6360 }
6361 break;
6362
6363 case AARCH64_OPND_Fd:
6364 case AARCH64_OPND_Fn:
6365 case AARCH64_OPND_Fm:
6366 case AARCH64_OPND_Fa:
6367 case AARCH64_OPND_Ft:
6368 case AARCH64_OPND_Ft2:
6369 case AARCH64_OPND_Sd:
6370 case AARCH64_OPND_Sn:
6371 case AARCH64_OPND_Sm:
6372 case AARCH64_OPND_SVE_VZn:
6373 case AARCH64_OPND_SVE_Vd:
6374 case AARCH64_OPND_SVE_Vm:
6375 case AARCH64_OPND_SVE_Vn:
6376 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6377 if (val == PARSE_FAIL)
6378 {
6379 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6380 goto failure;
6381 }
6382 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6383
6384 info->reg.regno = val;
6385 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6386 break;
6387
6388 case AARCH64_OPND_SVE_Pd:
6389 case AARCH64_OPND_SVE_Pg3:
6390 case AARCH64_OPND_SVE_Pg4_5:
6391 case AARCH64_OPND_SVE_Pg4_10:
6392 case AARCH64_OPND_SVE_Pg4_16:
6393 case AARCH64_OPND_SVE_Pm:
6394 case AARCH64_OPND_SVE_Pn:
6395 case AARCH64_OPND_SVE_Pt:
6396 case AARCH64_OPND_SME_Pm:
6397 reg_type = REG_TYPE_PN;
6398 goto vector_reg;
6399
6400 case AARCH64_OPND_SVE_Za_5:
6401 case AARCH64_OPND_SVE_Za_16:
6402 case AARCH64_OPND_SVE_Zd:
6403 case AARCH64_OPND_SVE_Zm_5:
6404 case AARCH64_OPND_SVE_Zm_16:
6405 case AARCH64_OPND_SVE_Zn:
6406 case AARCH64_OPND_SVE_Zt:
6407 reg_type = REG_TYPE_ZN;
6408 goto vector_reg;
6409
6410 case AARCH64_OPND_Va:
6411 case AARCH64_OPND_Vd:
6412 case AARCH64_OPND_Vn:
6413 case AARCH64_OPND_Vm:
6414 reg_type = REG_TYPE_VN;
6415 vector_reg:
6416 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6417 if (val == PARSE_FAIL)
6418 {
6419 first_error (_(get_reg_expected_msg (reg_type)));
6420 goto failure;
6421 }
6422 if (vectype.defined & NTA_HASINDEX)
6423 goto failure;
6424
6425 info->reg.regno = val;
6426 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6427 && vectype.type == NT_invtype)
6428 /* Unqualified Pn and Zn registers are allowed in certain
6429 contexts. Rely on F_STRICT qualifier checking to catch
6430 invalid uses. */
6431 info->qualifier = AARCH64_OPND_QLF_NIL;
6432 else
6433 {
6434 info->qualifier = vectype_to_qualifier (&vectype);
6435 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6436 goto failure;
6437 }
6438 break;
6439
6440 case AARCH64_OPND_VdD1:
6441 case AARCH64_OPND_VnD1:
6442 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6443 if (val == PARSE_FAIL)
6444 {
6445 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6446 goto failure;
6447 }
6448 if (vectype.type != NT_d || vectype.index != 1)
6449 {
6450 set_fatal_syntax_error
6451 (_("the top half of a 128-bit FP/SIMD register is expected"));
6452 goto failure;
6453 }
6454 info->reg.regno = val;
6455 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6456 here; it is correct for the purpose of encoding/decoding since
6457 only the register number is explicitly encoded in the related
6458 instructions, although this appears a bit hacky. */
6459 info->qualifier = AARCH64_OPND_QLF_S_D;
6460 break;
6461
6462 case AARCH64_OPND_SVE_Zm3_INDEX:
6463 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6464 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6465 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6466 case AARCH64_OPND_SVE_Zm4_INDEX:
6467 case AARCH64_OPND_SVE_Zn_INDEX:
6468 reg_type = REG_TYPE_ZN;
6469 goto vector_reg_index;
6470
6471 case AARCH64_OPND_Ed:
6472 case AARCH64_OPND_En:
6473 case AARCH64_OPND_Em:
6474 case AARCH64_OPND_Em16:
6475 case AARCH64_OPND_SM3_IMM2:
6476 reg_type = REG_TYPE_VN;
6477 vector_reg_index:
6478 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6479 if (val == PARSE_FAIL)
6480 {
6481 first_error (_(get_reg_expected_msg (reg_type)));
6482 goto failure;
6483 }
6484 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6485 goto failure;
6486
6487 info->reglane.regno = val;
6488 info->reglane.index = vectype.index;
6489 info->qualifier = vectype_to_qualifier (&vectype);
6490 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6491 goto failure;
6492 break;
6493
6494 case AARCH64_OPND_SVE_ZnxN:
6495 case AARCH64_OPND_SVE_ZtxN:
6496 reg_type = REG_TYPE_ZN;
6497 goto vector_reg_list;
6498
6499 case AARCH64_OPND_LVn:
6500 case AARCH64_OPND_LVt:
6501 case AARCH64_OPND_LVt_AL:
6502 case AARCH64_OPND_LEt:
6503 reg_type = REG_TYPE_VN;
6504 vector_reg_list:
6505 if (reg_type == REG_TYPE_ZN
6506 && get_opcode_dependent_value (opcode) == 1
6507 && *str != '{')
6508 {
6509 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6510 if (val == PARSE_FAIL)
6511 {
6512 first_error (_(get_reg_expected_msg (reg_type)));
6513 goto failure;
6514 }
6515 info->reglist.first_regno = val;
6516 info->reglist.num_regs = 1;
6517 }
6518 else
6519 {
6520 val = parse_vector_reg_list (&str, reg_type, &vectype);
6521 if (val == PARSE_FAIL)
6522 goto failure;
6523
6524 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6525 {
6526 set_fatal_syntax_error (_("invalid register list"));
6527 goto failure;
6528 }
6529
6530 if (vectype.width != 0 && *str != ',')
6531 {
6532 set_fatal_syntax_error
6533 (_("expected element type rather than vector type"));
6534 goto failure;
6535 }
6536
6537 info->reglist.first_regno = (val >> 2) & 0x1f;
6538 info->reglist.num_regs = (val & 0x3) + 1;
6539 }
6540 if (operands[i] == AARCH64_OPND_LEt)
6541 {
6542 if (!(vectype.defined & NTA_HASINDEX))
6543 goto failure;
6544 info->reglist.has_index = 1;
6545 info->reglist.index = vectype.index;
6546 }
6547 else
6548 {
6549 if (vectype.defined & NTA_HASINDEX)
6550 goto failure;
6551 if (!(vectype.defined & NTA_HASTYPE))
6552 {
6553 if (reg_type == REG_TYPE_ZN)
6554 set_fatal_syntax_error (_("missing type suffix"));
6555 goto failure;
6556 }
6557 }
6558 info->qualifier = vectype_to_qualifier (&vectype);
6559 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6560 goto failure;
6561 break;
6562
6563 case AARCH64_OPND_CRn:
6564 case AARCH64_OPND_CRm:
6565 {
6566 char prefix = *(str++);
6567 if (prefix != 'c' && prefix != 'C')
6568 goto failure;
6569
6570 po_imm_nc_or_fail ();
6571 if (val > 15)
6572 {
6573 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6574 goto failure;
6575 }
6576 info->qualifier = AARCH64_OPND_QLF_CR;
6577 info->imm.value = val;
6578 break;
6579 }
6580
6581 case AARCH64_OPND_SHLL_IMM:
6582 case AARCH64_OPND_IMM_VLSR:
6583 po_imm_or_fail (1, 64);
6584 info->imm.value = val;
6585 break;
6586
6587 case AARCH64_OPND_CCMP_IMM:
6588 case AARCH64_OPND_SIMM5:
6589 case AARCH64_OPND_FBITS:
6590 case AARCH64_OPND_TME_UIMM16:
6591 case AARCH64_OPND_UIMM4:
6592 case AARCH64_OPND_UIMM4_ADDG:
6593 case AARCH64_OPND_UIMM10:
6594 case AARCH64_OPND_UIMM3_OP1:
6595 case AARCH64_OPND_UIMM3_OP2:
6596 case AARCH64_OPND_IMM_VLSL:
6597 case AARCH64_OPND_IMM:
6598 case AARCH64_OPND_IMM_2:
6599 case AARCH64_OPND_WIDTH:
6600 case AARCH64_OPND_SVE_INV_LIMM:
6601 case AARCH64_OPND_SVE_LIMM:
6602 case AARCH64_OPND_SVE_LIMM_MOV:
6603 case AARCH64_OPND_SVE_SHLIMM_PRED:
6604 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6605 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6606 case AARCH64_OPND_SVE_SHRIMM_PRED:
6607 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6608 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6609 case AARCH64_OPND_SVE_SIMM5:
6610 case AARCH64_OPND_SVE_SIMM5B:
6611 case AARCH64_OPND_SVE_SIMM6:
6612 case AARCH64_OPND_SVE_SIMM8:
6613 case AARCH64_OPND_SVE_UIMM3:
6614 case AARCH64_OPND_SVE_UIMM7:
6615 case AARCH64_OPND_SVE_UIMM8:
6616 case AARCH64_OPND_SVE_UIMM8_53:
6617 case AARCH64_OPND_IMM_ROT1:
6618 case AARCH64_OPND_IMM_ROT2:
6619 case AARCH64_OPND_IMM_ROT3:
6620 case AARCH64_OPND_SVE_IMM_ROT1:
6621 case AARCH64_OPND_SVE_IMM_ROT2:
6622 case AARCH64_OPND_SVE_IMM_ROT3:
6623 po_imm_nc_or_fail ();
6624 info->imm.value = val;
6625 break;
6626
6627 case AARCH64_OPND_SVE_AIMM:
6628 case AARCH64_OPND_SVE_ASIMM:
6629 po_imm_nc_or_fail ();
6630 info->imm.value = val;
6631 skip_whitespace (str);
6632 if (skip_past_comma (&str))
6633 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6634 else
6635 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6636 break;
6637
6638 case AARCH64_OPND_SVE_PATTERN:
6639 po_enum_or_fail (aarch64_sve_pattern_array);
6640 info->imm.value = val;
6641 break;
6642
6643 case AARCH64_OPND_SVE_PATTERN_SCALED:
6644 po_enum_or_fail (aarch64_sve_pattern_array);
6645 info->imm.value = val;
6646 if (skip_past_comma (&str)
6647 && !parse_shift (&str, info, SHIFTED_MUL))
6648 goto failure;
6649 if (!info->shifter.operator_present)
6650 {
6651 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6652 info->shifter.kind = AARCH64_MOD_MUL;
6653 info->shifter.amount = 1;
6654 }
6655 break;
6656
6657 case AARCH64_OPND_SVE_PRFOP:
6658 po_enum_or_fail (aarch64_sve_prfop_array);
6659 info->imm.value = val;
6660 break;
6661
6662 case AARCH64_OPND_UIMM7:
6663 po_imm_or_fail (0, 127);
6664 info->imm.value = val;
6665 break;
6666
6667 case AARCH64_OPND_IDX:
6668 case AARCH64_OPND_MASK:
6669 case AARCH64_OPND_BIT_NUM:
6670 case AARCH64_OPND_IMMR:
6671 case AARCH64_OPND_IMMS:
6672 po_imm_or_fail (0, 63);
6673 info->imm.value = val;
6674 break;
6675
6676 case AARCH64_OPND_IMM0:
6677 po_imm_nc_or_fail ();
6678 if (val != 0)
6679 {
6680 set_fatal_syntax_error (_("immediate zero expected"));
6681 goto failure;
6682 }
6683 info->imm.value = 0;
6684 break;
6685
6686 case AARCH64_OPND_FPIMM0:
6687 {
6688 int qfloat;
6689 bool res1 = false, res2 = false;
6690 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6691 it is probably not worth the effort to support it. */
6692 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6693 imm_reg_type))
6694 && (error_p ()
6695 || !(res2 = parse_constant_immediate (&str, &val,
6696 imm_reg_type))))
6697 goto failure;
6698 if ((res1 && qfloat == 0) || (res2 && val == 0))
6699 {
6700 info->imm.value = 0;
6701 info->imm.is_fp = 1;
6702 break;
6703 }
6704 set_fatal_syntax_error (_("immediate zero expected"));
6705 goto failure;
6706 }
6707
6708 case AARCH64_OPND_IMM_MOV:
6709 {
6710 char *saved = str;
6711 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6712 reg_name_p (str, REG_TYPE_VN))
6713 goto failure;
6714 str = saved;
6715 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6716 GE_OPT_PREFIX, REJECT_ABSENT,
6717 NORMAL_RESOLUTION));
6718 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6719 later. fix_mov_imm_insn will try to determine a machine
6720 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6721 message if the immediate cannot be moved by a single
6722 instruction. */
6723 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6724 inst.base.operands[i].skip = 1;
6725 }
6726 break;
6727
6728 case AARCH64_OPND_SIMD_IMM:
6729 case AARCH64_OPND_SIMD_IMM_SFT:
6730 if (! parse_big_immediate (&str, &val, imm_reg_type))
6731 goto failure;
6732 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6733 /* addr_off_p */ 0,
6734 /* need_libopcodes_p */ 1,
6735 /* skip_p */ 1);
6736 /* Parse shift.
6737 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6738 shift, we don't check it here; we leave the checking to
6739 the libopcodes (operand_general_constraint_met_p). By
6740 doing this, we achieve better diagnostics. */
6741 if (skip_past_comma (&str)
6742 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6743 goto failure;
6744 if (!info->shifter.operator_present
6745 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6746 {
6747 /* Default to LSL if not present. Libopcodes prefers shifter
6748 kind to be explicit. */
6749 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6750 info->shifter.kind = AARCH64_MOD_LSL;
6751 }
6752 break;
6753
6754 case AARCH64_OPND_FPIMM:
6755 case AARCH64_OPND_SIMD_FPIMM:
6756 case AARCH64_OPND_SVE_FPIMM8:
6757 {
6758 int qfloat;
6759 bool dp_p;
6760
6761 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6762 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6763 || !aarch64_imm_float_p (qfloat))
6764 {
6765 if (!error_p ())
6766 set_fatal_syntax_error (_("invalid floating-point"
6767 " constant"));
6768 goto failure;
6769 }
6770 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6771 inst.base.operands[i].imm.is_fp = 1;
6772 }
6773 break;
6774
6775 case AARCH64_OPND_SVE_I1_HALF_ONE:
6776 case AARCH64_OPND_SVE_I1_HALF_TWO:
6777 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6778 {
6779 int qfloat;
6780 bool dp_p;
6781
6782 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6783 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6784 {
6785 if (!error_p ())
6786 set_fatal_syntax_error (_("invalid floating-point"
6787 " constant"));
6788 goto failure;
6789 }
6790 inst.base.operands[i].imm.value = qfloat;
6791 inst.base.operands[i].imm.is_fp = 1;
6792 }
6793 break;
6794
6795 case AARCH64_OPND_LIMM:
6796 po_misc_or_fail (parse_shifter_operand (&str, info,
6797 SHIFTED_LOGIC_IMM));
6798 if (info->shifter.operator_present)
6799 {
6800 set_fatal_syntax_error
6801 (_("shift not allowed for bitmask immediate"));
6802 goto failure;
6803 }
6804 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6805 /* addr_off_p */ 0,
6806 /* need_libopcodes_p */ 1,
6807 /* skip_p */ 1);
6808 break;
6809
6810 case AARCH64_OPND_AIMM:
6811 if (opcode->op == OP_ADD)
6812 /* ADD may have relocation types. */
6813 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6814 SHIFTED_ARITH_IMM));
6815 else
6816 po_misc_or_fail (parse_shifter_operand (&str, info,
6817 SHIFTED_ARITH_IMM));
6818 switch (inst.reloc.type)
6819 {
6820 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6821 info->shifter.amount = 12;
6822 break;
6823 case BFD_RELOC_UNUSED:
6824 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6825 if (info->shifter.kind != AARCH64_MOD_NONE)
6826 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6827 inst.reloc.pc_rel = 0;
6828 break;
6829 default:
6830 break;
6831 }
6832 info->imm.value = 0;
6833 if (!info->shifter.operator_present)
6834 {
6835 /* Default to LSL if not present. Libopcodes prefers shifter
6836 kind to be explicit. */
6837 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6838 info->shifter.kind = AARCH64_MOD_LSL;
6839 }
6840 break;
6841
6842 case AARCH64_OPND_HALF:
6843 {
6844 /* #<imm16> or relocation. */
6845 int internal_fixup_p;
6846 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6847 if (internal_fixup_p)
6848 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6849 skip_whitespace (str);
6850 if (skip_past_comma (&str))
6851 {
6852 /* {, LSL #<shift>} */
6853 if (! aarch64_gas_internal_fixup_p ())
6854 {
6855 set_fatal_syntax_error (_("can't mix relocation modifier "
6856 "with explicit shift"));
6857 goto failure;
6858 }
6859 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6860 }
6861 else
6862 inst.base.operands[i].shifter.amount = 0;
6863 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6864 inst.base.operands[i].imm.value = 0;
6865 if (! process_movw_reloc_info ())
6866 goto failure;
6867 }
6868 break;
6869
6870 case AARCH64_OPND_EXCEPTION:
6871 case AARCH64_OPND_UNDEFINED:
6872 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6873 imm_reg_type));
6874 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6875 /* addr_off_p */ 0,
6876 /* need_libopcodes_p */ 0,
6877 /* skip_p */ 1);
6878 break;
6879
6880 case AARCH64_OPND_NZCV:
6881 {
6882 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6883 if (nzcv != NULL)
6884 {
6885 str += 4;
6886 info->imm.value = nzcv->value;
6887 break;
6888 }
6889 po_imm_or_fail (0, 15);
6890 info->imm.value = val;
6891 }
6892 break;
6893
6894 case AARCH64_OPND_COND:
6895 case AARCH64_OPND_COND1:
6896 {
6897 char *start = str;
6898 do
6899 str++;
6900 while (ISALPHA (*str));
6901 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6902 if (info->cond == NULL)
6903 {
6904 set_syntax_error (_("invalid condition"));
6905 goto failure;
6906 }
6907 else if (operands[i] == AARCH64_OPND_COND1
6908 && (info->cond->value & 0xe) == 0xe)
6909 {
6910 /* Do not allow AL or NV. */
6911 set_default_error ();
6912 goto failure;
6913 }
6914 }
6915 break;
6916
6917 case AARCH64_OPND_ADDR_ADRP:
6918 po_misc_or_fail (parse_adrp (&str));
6919 /* Clear the value as operand needs to be relocated. */
6920 info->imm.value = 0;
6921 break;
6922
6923 case AARCH64_OPND_ADDR_PCREL14:
6924 case AARCH64_OPND_ADDR_PCREL19:
6925 case AARCH64_OPND_ADDR_PCREL21:
6926 case AARCH64_OPND_ADDR_PCREL26:
6927 po_misc_or_fail (parse_address (&str, info));
6928 if (!info->addr.pcrel)
6929 {
6930 set_syntax_error (_("invalid pc-relative address"));
6931 goto failure;
6932 }
6933 if (inst.gen_lit_pool
6934 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6935 {
6936 /* Only permit "=value" in the literal load instructions.
6937 The literal will be generated by programmer_friendly_fixup. */
6938 set_syntax_error (_("invalid use of \"=immediate\""));
6939 goto failure;
6940 }
6941 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6942 {
6943 set_syntax_error (_("unrecognized relocation suffix"));
6944 goto failure;
6945 }
6946 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6947 {
6948 info->imm.value = inst.reloc.exp.X_add_number;
6949 inst.reloc.type = BFD_RELOC_UNUSED;
6950 }
6951 else
6952 {
6953 info->imm.value = 0;
6954 if (inst.reloc.type == BFD_RELOC_UNUSED)
6955 switch (opcode->iclass)
6956 {
6957 case compbranch:
6958 case condbranch:
6959 /* e.g. CBZ or B.COND */
6960 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6961 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6962 break;
6963 case testbranch:
6964 /* e.g. TBZ */
6965 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6966 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6967 break;
6968 case branch_imm:
6969 /* e.g. B or BL */
6970 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6971 inst.reloc.type =
6972 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6973 : BFD_RELOC_AARCH64_JUMP26;
6974 break;
6975 case loadlit:
6976 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6977 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6978 break;
6979 case pcreladdr:
6980 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6981 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6982 break;
6983 default:
6984 gas_assert (0);
6985 abort ();
6986 }
6987 inst.reloc.pc_rel = 1;
6988 }
6989 break;
6990
6991 case AARCH64_OPND_ADDR_SIMPLE:
6992 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6993 {
6994 /* [<Xn|SP>{, #<simm>}] */
6995 char *start = str;
6996 /* First use the normal address-parsing routines, to get
6997 the usual syntax errors. */
6998 po_misc_or_fail (parse_address (&str, info));
6999 if (info->addr.pcrel || info->addr.offset.is_reg
7000 || !info->addr.preind || info->addr.postind
7001 || info->addr.writeback)
7002 {
7003 set_syntax_error (_("invalid addressing mode"));
7004 goto failure;
7005 }
7006
7007 /* Then retry, matching the specific syntax of these addresses. */
7008 str = start;
7009 po_char_or_fail ('[');
7010 po_reg_or_fail (REG_TYPE_R64_SP);
7011 /* Accept optional ", #0". */
7012 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7013 && skip_past_char (&str, ','))
7014 {
7015 skip_past_char (&str, '#');
7016 if (! skip_past_char (&str, '0'))
7017 {
7018 set_fatal_syntax_error
7019 (_("the optional immediate offset can only be 0"));
7020 goto failure;
7021 }
7022 }
7023 po_char_or_fail (']');
7024 break;
7025 }
7026
7027 case AARCH64_OPND_ADDR_REGOFF:
7028 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7029 po_misc_or_fail (parse_address (&str, info));
7030 regoff_addr:
7031 if (info->addr.pcrel || !info->addr.offset.is_reg
7032 || !info->addr.preind || info->addr.postind
7033 || info->addr.writeback)
7034 {
7035 set_syntax_error (_("invalid addressing mode"));
7036 goto failure;
7037 }
7038 if (!info->shifter.operator_present)
7039 {
7040 /* Default to LSL if not present. Libopcodes prefers shifter
7041 kind to be explicit. */
7042 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7043 info->shifter.kind = AARCH64_MOD_LSL;
7044 }
7045 /* Qualifier to be deduced by libopcodes. */
7046 break;
7047
7048 case AARCH64_OPND_ADDR_SIMM7:
7049 po_misc_or_fail (parse_address (&str, info));
7050 if (info->addr.pcrel || info->addr.offset.is_reg
7051 || (!info->addr.preind && !info->addr.postind))
7052 {
7053 set_syntax_error (_("invalid addressing mode"));
7054 goto failure;
7055 }
7056 if (inst.reloc.type != BFD_RELOC_UNUSED)
7057 {
7058 set_syntax_error (_("relocation not allowed"));
7059 goto failure;
7060 }
7061 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7062 /* addr_off_p */ 1,
7063 /* need_libopcodes_p */ 1,
7064 /* skip_p */ 0);
7065 break;
7066
7067 case AARCH64_OPND_ADDR_SIMM9:
7068 case AARCH64_OPND_ADDR_SIMM9_2:
7069 case AARCH64_OPND_ADDR_SIMM11:
7070 case AARCH64_OPND_ADDR_SIMM13:
7071 po_misc_or_fail (parse_address (&str, info));
7072 if (info->addr.pcrel || info->addr.offset.is_reg
7073 || (!info->addr.preind && !info->addr.postind)
7074 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7075 && info->addr.writeback))
7076 {
7077 set_syntax_error (_("invalid addressing mode"));
7078 goto failure;
7079 }
7080 if (inst.reloc.type != BFD_RELOC_UNUSED)
7081 {
7082 set_syntax_error (_("relocation not allowed"));
7083 goto failure;
7084 }
7085 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7086 /* addr_off_p */ 1,
7087 /* need_libopcodes_p */ 1,
7088 /* skip_p */ 0);
7089 break;
7090
7091 case AARCH64_OPND_ADDR_SIMM10:
7092 case AARCH64_OPND_ADDR_OFFSET:
7093 po_misc_or_fail (parse_address (&str, info));
7094 if (info->addr.pcrel || info->addr.offset.is_reg
7095 || !info->addr.preind || info->addr.postind)
7096 {
7097 set_syntax_error (_("invalid addressing mode"));
7098 goto failure;
7099 }
7100 if (inst.reloc.type != BFD_RELOC_UNUSED)
7101 {
7102 set_syntax_error (_("relocation not allowed"));
7103 goto failure;
7104 }
7105 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7106 /* addr_off_p */ 1,
7107 /* need_libopcodes_p */ 1,
7108 /* skip_p */ 0);
7109 break;
7110
7111 case AARCH64_OPND_ADDR_UIMM12:
7112 po_misc_or_fail (parse_address (&str, info));
7113 if (info->addr.pcrel || info->addr.offset.is_reg
7114 || !info->addr.preind || info->addr.writeback)
7115 {
7116 set_syntax_error (_("invalid addressing mode"));
7117 goto failure;
7118 }
7119 if (inst.reloc.type == BFD_RELOC_UNUSED)
7120 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7121 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7122 || (inst.reloc.type
7123 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7124 || (inst.reloc.type
7125 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7126 || (inst.reloc.type
7127 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7128 || (inst.reloc.type
7129 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7130 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7131 /* Leave qualifier to be determined by libopcodes. */
7132 break;
7133
7134 case AARCH64_OPND_SIMD_ADDR_POST:
7135 /* [<Xn|SP>], <Xm|#<amount>> */
7136 po_misc_or_fail (parse_address (&str, info));
7137 if (!info->addr.postind || !info->addr.writeback)
7138 {
7139 set_syntax_error (_("invalid addressing mode"));
7140 goto failure;
7141 }
7142 if (!info->addr.offset.is_reg)
7143 {
7144 if (inst.reloc.exp.X_op == O_constant)
7145 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7146 else
7147 {
7148 set_fatal_syntax_error
7149 (_("writeback value must be an immediate constant"));
7150 goto failure;
7151 }
7152 }
7153 /* No qualifier. */
7154 break;
7155
7156 case AARCH64_OPND_SME_SM_ZA:
7157 /* { SM | ZA } */
7158 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7159 {
7160 set_syntax_error (_("unknown or missing PSTATE field name"));
7161 goto failure;
7162 }
7163 info->reg.regno = val;
7164 break;
7165
7166 case AARCH64_OPND_SME_PnT_Wm_imm:
7167 /* <Pn>.<T>[<Wm>, #<imm>] */
7168 {
7169 int index_base_reg;
7170 int imm;
7171 val = parse_sme_pred_reg_with_index (&str,
7172 &index_base_reg,
7173 &imm,
7174 &qualifier);
7175 if (val == PARSE_FAIL)
7176 goto failure;
7177
7178 info->za_tile_vector.regno = val;
7179 info->za_tile_vector.index.regno = index_base_reg;
7180 info->za_tile_vector.index.imm = imm;
7181 info->qualifier = qualifier;
7182 break;
7183 }
7184
7185 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7186 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7187 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7188 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7189 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7190 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7191 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7192 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7193 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7194 case AARCH64_OPND_SVE_ADDR_RI_U6:
7195 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7196 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7197 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7198 /* [X<n>{, #imm, MUL VL}]
7199 [X<n>{, #imm}]
7200 but recognizing SVE registers. */
7201 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7202 &offset_qualifier));
7203 if (base_qualifier != AARCH64_OPND_QLF_X)
7204 {
7205 set_syntax_error (_("invalid addressing mode"));
7206 goto failure;
7207 }
7208 sve_regimm:
7209 if (info->addr.pcrel || info->addr.offset.is_reg
7210 || !info->addr.preind || info->addr.writeback)
7211 {
7212 set_syntax_error (_("invalid addressing mode"));
7213 goto failure;
7214 }
7215 if (inst.reloc.type != BFD_RELOC_UNUSED
7216 || inst.reloc.exp.X_op != O_constant)
7217 {
7218 /* Make sure this has priority over
7219 "invalid addressing mode". */
7220 set_fatal_syntax_error (_("constant offset required"));
7221 goto failure;
7222 }
7223 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7224 break;
7225
7226 case AARCH64_OPND_SVE_ADDR_R:
7227 /* [<Xn|SP>{, <R><m>}]
7228 but recognizing SVE registers. */
7229 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7230 &offset_qualifier));
7231 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7232 {
7233 offset_qualifier = AARCH64_OPND_QLF_X;
7234 info->addr.offset.is_reg = 1;
7235 info->addr.offset.regno = 31;
7236 }
7237 else if (base_qualifier != AARCH64_OPND_QLF_X
7238 || offset_qualifier != AARCH64_OPND_QLF_X)
7239 {
7240 set_syntax_error (_("invalid addressing mode"));
7241 goto failure;
7242 }
7243 goto regoff_addr;
7244
7245 case AARCH64_OPND_SVE_ADDR_RR:
7246 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7247 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7248 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7249 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7250 case AARCH64_OPND_SVE_ADDR_RX:
7251 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7252 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7253 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7254 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7255 but recognizing SVE registers. */
7256 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7257 &offset_qualifier));
7258 if (base_qualifier != AARCH64_OPND_QLF_X
7259 || offset_qualifier != AARCH64_OPND_QLF_X)
7260 {
7261 set_syntax_error (_("invalid addressing mode"));
7262 goto failure;
7263 }
7264 goto regoff_addr;
7265
7266 case AARCH64_OPND_SVE_ADDR_RZ:
7267 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7268 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7269 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7270 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7271 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7272 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7273 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7274 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7275 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7276 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7277 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7278 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7279 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7280 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7281 &offset_qualifier));
7282 if (base_qualifier != AARCH64_OPND_QLF_X
7283 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7284 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7285 {
7286 set_syntax_error (_("invalid addressing mode"));
7287 goto failure;
7288 }
7289 info->qualifier = offset_qualifier;
7290 goto regoff_addr;
7291
7292 case AARCH64_OPND_SVE_ADDR_ZX:
7293 /* [Zn.<T>{, <Xm>}]. */
7294 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7295 &offset_qualifier));
7296 /* Things to check:
7297 base_qualifier either S_S or S_D
7298 offset_qualifier must be X
7299 */
7300 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7301 && base_qualifier != AARCH64_OPND_QLF_S_D)
7302 || offset_qualifier != AARCH64_OPND_QLF_X)
7303 {
7304 set_syntax_error (_("invalid addressing mode"));
7305 goto failure;
7306 }
7307 info->qualifier = base_qualifier;
7308 if (!info->addr.offset.is_reg || info->addr.pcrel
7309 || !info->addr.preind || info->addr.writeback
7310 || info->shifter.operator_present != 0)
7311 {
7312 set_syntax_error (_("invalid addressing mode"));
7313 goto failure;
7314 }
7315 info->shifter.kind = AARCH64_MOD_LSL;
7316 break;
7317
7318
7319 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7320 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7321 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7322 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7323 /* [Z<n>.<T>{, #imm}] */
7324 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7325 &offset_qualifier));
7326 if (base_qualifier != AARCH64_OPND_QLF_S_S
7327 && base_qualifier != AARCH64_OPND_QLF_S_D)
7328 {
7329 set_syntax_error (_("invalid addressing mode"));
7330 goto failure;
7331 }
7332 info->qualifier = base_qualifier;
7333 goto sve_regimm;
7334
7335 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7336 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7337 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7338 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7339 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7340
7341 We don't reject:
7342
7343 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7344
7345 here since we get better error messages by leaving it to
7346 the qualifier checking routines. */
7347 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7348 &offset_qualifier));
7349 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7350 && base_qualifier != AARCH64_OPND_QLF_S_D)
7351 || offset_qualifier != base_qualifier)
7352 {
7353 set_syntax_error (_("invalid addressing mode"));
7354 goto failure;
7355 }
7356 info->qualifier = base_qualifier;
7357 goto regoff_addr;
7358
7359 case AARCH64_OPND_SYSREG:
7360 {
7361 uint32_t sysreg_flags;
7362 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7363 &sysreg_flags)) == PARSE_FAIL)
7364 {
7365 set_syntax_error (_("unknown or missing system register name"));
7366 goto failure;
7367 }
7368 inst.base.operands[i].sysreg.value = val;
7369 inst.base.operands[i].sysreg.flags = sysreg_flags;
7370 break;
7371 }
7372
7373 case AARCH64_OPND_PSTATEFIELD:
7374 {
7375 uint32_t sysreg_flags;
7376 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7377 &sysreg_flags)) == PARSE_FAIL)
7378 {
7379 set_syntax_error (_("unknown or missing PSTATE field name"));
7380 goto failure;
7381 }
7382 inst.base.operands[i].pstatefield = val;
7383 inst.base.operands[i].sysreg.flags = sysreg_flags;
7384 break;
7385 }
7386
7387 case AARCH64_OPND_SYSREG_IC:
7388 inst.base.operands[i].sysins_op =
7389 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7390 goto sys_reg_ins;
7391
7392 case AARCH64_OPND_SYSREG_DC:
7393 inst.base.operands[i].sysins_op =
7394 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7395 goto sys_reg_ins;
7396
7397 case AARCH64_OPND_SYSREG_AT:
7398 inst.base.operands[i].sysins_op =
7399 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7400 goto sys_reg_ins;
7401
7402 case AARCH64_OPND_SYSREG_SR:
7403 inst.base.operands[i].sysins_op =
7404 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7405 goto sys_reg_ins;
7406
7407 case AARCH64_OPND_SYSREG_TLBI:
7408 inst.base.operands[i].sysins_op =
7409 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7410 sys_reg_ins:
7411 if (inst.base.operands[i].sysins_op == NULL)
7412 {
7413 set_fatal_syntax_error ( _("unknown or missing operation name"));
7414 goto failure;
7415 }
7416 break;
7417
7418 case AARCH64_OPND_BARRIER:
7419 case AARCH64_OPND_BARRIER_ISB:
7420 val = parse_barrier (&str);
7421 if (val != PARSE_FAIL
7422 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7423 {
7424 /* ISB only accepts options name 'sy'. */
7425 set_syntax_error
7426 (_("the specified option is not accepted in ISB"));
7427 /* Turn off backtrack as this optional operand is present. */
7428 backtrack_pos = 0;
7429 goto failure;
7430 }
7431 if (val != PARSE_FAIL
7432 && operands[i] == AARCH64_OPND_BARRIER)
7433 {
7434 /* Regular barriers accept options CRm (C0-C15).
7435 DSB nXS barrier variant accepts values > 15. */
7436 if (val < 0 || val > 15)
7437 {
7438 set_syntax_error (_("the specified option is not accepted in DSB"));
7439 goto failure;
7440 }
7441 }
7442 /* This is an extension to accept a 0..15 immediate. */
7443 if (val == PARSE_FAIL)
7444 po_imm_or_fail (0, 15);
7445 info->barrier = aarch64_barrier_options + val;
7446 break;
7447
7448 case AARCH64_OPND_BARRIER_DSB_NXS:
7449 val = parse_barrier (&str);
7450 if (val != PARSE_FAIL)
7451 {
7452 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7453 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7454 {
7455 set_syntax_error (_("the specified option is not accepted in DSB"));
7456 /* Turn off backtrack as this optional operand is present. */
7457 backtrack_pos = 0;
7458 goto failure;
7459 }
7460 }
7461 else
7462 {
7463 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7464 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7465 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7466 goto failure;
7467 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7468 {
7469 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7470 goto failure;
7471 }
7472 }
7473 /* Option index is encoded as 2-bit value in val<3:2>. */
7474 val = (val >> 2) - 4;
7475 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7476 break;
7477
7478 case AARCH64_OPND_PRFOP:
7479 val = parse_pldop (&str);
7480 /* This is an extension to accept a 0..31 immediate. */
7481 if (val == PARSE_FAIL)
7482 po_imm_or_fail (0, 31);
7483 inst.base.operands[i].prfop = aarch64_prfops + val;
7484 break;
7485
7486 case AARCH64_OPND_BARRIER_PSB:
7487 val = parse_barrier_psb (&str, &(info->hint_option));
7488 if (val == PARSE_FAIL)
7489 goto failure;
7490 break;
7491
7492 case AARCH64_OPND_BTI_TARGET:
7493 val = parse_bti_operand (&str, &(info->hint_option));
7494 if (val == PARSE_FAIL)
7495 goto failure;
7496 break;
7497
7498 case AARCH64_OPND_SME_ZAda_2b:
7499 case AARCH64_OPND_SME_ZAda_3b:
7500 val = parse_sme_zada_operand (&str, &qualifier);
7501 if (val == PARSE_FAIL)
7502 goto failure;
7503 info->reg.regno = val;
7504 info->qualifier = qualifier;
7505 break;
7506
7507 case AARCH64_OPND_SME_ZA_HV_idx_src:
7508 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7509 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7510 {
7511 enum sme_hv_slice slice_indicator;
7512 int vector_select_register;
7513 int imm;
7514
7515 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7516 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7517 &slice_indicator,
7518 &vector_select_register,
7519 &imm,
7520 &qualifier);
7521 else
7522 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7523 &vector_select_register,
7524 &imm,
7525 &qualifier);
7526 if (val == PARSE_FAIL)
7527 goto failure;
7528 info->za_tile_vector.regno = val;
7529 info->za_tile_vector.index.regno = vector_select_register;
7530 info->za_tile_vector.index.imm = imm;
7531 info->za_tile_vector.v = slice_indicator;
7532 info->qualifier = qualifier;
7533 break;
7534 }
7535
7536 case AARCH64_OPND_SME_list_of_64bit_tiles:
7537 val = parse_sme_list_of_64bit_tiles (&str);
7538 if (val == PARSE_FAIL)
7539 goto failure;
7540 info->imm.value = val;
7541 break;
7542
7543 case AARCH64_OPND_SME_ZA_array:
7544 {
7545 int imm;
7546 val = parse_sme_za_array (&str, &imm);
7547 if (val == PARSE_FAIL)
7548 goto failure;
7549 info->za_tile_vector.index.regno = val;
7550 info->za_tile_vector.index.imm = imm;
7551 break;
7552 }
7553
7554 case AARCH64_OPND_MOPS_ADDR_Rd:
7555 case AARCH64_OPND_MOPS_ADDR_Rs:
7556 po_char_or_fail ('[');
7557 if (!parse_x0_to_x30 (&str, info))
7558 goto failure;
7559 po_char_or_fail (']');
7560 po_char_or_fail ('!');
7561 break;
7562
7563 case AARCH64_OPND_MOPS_WB_Rn:
7564 if (!parse_x0_to_x30 (&str, info))
7565 goto failure;
7566 po_char_or_fail ('!');
7567 break;
7568
7569 default:
7570 as_fatal (_("unhandled operand code %d"), operands[i]);
7571 }
7572
7573 /* If we get here, this operand was successfully parsed. */
7574 inst.base.operands[i].present = 1;
7575 continue;
7576
7577 failure:
7578 /* The parse routine should already have set the error, but in case
7579 not, set a default one here. */
7580 if (! error_p ())
7581 set_default_error ();
7582
7583 if (! backtrack_pos)
7584 goto parse_operands_return;
7585
7586 {
7587 /* We reach here because this operand is marked as optional, and
7588 either no operand was supplied or the operand was supplied but it
7589 was syntactically incorrect. In the latter case we report an
7590 error. In the former case we perform a few more checks before
7591 dropping through to the code to insert the default operand. */
7592
7593 char *tmp = backtrack_pos;
7594 char endchar = END_OF_INSN;
7595
7596 if (i != (aarch64_num_of_operands (opcode) - 1))
7597 endchar = ',';
7598 skip_past_char (&tmp, ',');
7599
7600 if (*tmp != endchar)
7601 /* The user has supplied an operand in the wrong format. */
7602 goto parse_operands_return;
7603
7604 /* Make sure there is not a comma before the optional operand.
7605 For example the fifth operand of 'sys' is optional:
7606
7607 sys #0,c0,c0,#0, <--- wrong
7608 sys #0,c0,c0,#0 <--- correct. */
7609 if (comma_skipped_p && i && endchar == END_OF_INSN)
7610 {
7611 set_fatal_syntax_error
7612 (_("unexpected comma before the omitted optional operand"));
7613 goto parse_operands_return;
7614 }
7615 }
7616
7617 /* Reaching here means we are dealing with an optional operand that is
7618 omitted from the assembly line. */
7619 gas_assert (optional_operand_p (opcode, i));
7620 info->present = 0;
7621 process_omitted_operand (operands[i], opcode, i, info);
7622
7623 /* Try again, skipping the optional operand at backtrack_pos. */
7624 str = backtrack_pos;
7625 backtrack_pos = 0;
7626
7627 /* Clear any error record after the omitted optional operand has been
7628 successfully handled. */
7629 clear_error ();
7630 }
7631
7632 /* Check if we have parsed all the operands. */
7633 if (*str != '\0' && ! error_p ())
7634 {
7635 /* Set I to the index of the last present operand; this is
7636 for the purpose of diagnostics. */
7637 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7638 ;
7639 set_fatal_syntax_error
7640 (_("unexpected characters following instruction"));
7641 }
7642
7643 parse_operands_return:
7644
7645 if (error_p ())
7646 {
7647 DEBUG_TRACE ("parsing FAIL: %s - %s",
7648 operand_mismatch_kind_names[get_error_kind ()],
7649 get_error_message ());
7650 /* Record the operand error properly; this is useful when there
7651 are multiple instruction templates for a mnemonic name, so that
7652 later on, we can select the error that most closely describes
7653 the problem. */
7654 record_operand_error (opcode, i, get_error_kind (),
7655 get_error_message ());
7656 return false;
7657 }
7658 else
7659 {
7660 DEBUG_TRACE ("parsing SUCCESS");
7661 return true;
7662 }
7663 }
7664
7665 /* It does some fix-up to provide some programmer friendly feature while
7666 keeping the libopcodes happy, i.e. libopcodes only accepts
7667 the preferred architectural syntax.
7668 Return FALSE if there is any failure; otherwise return TRUE. */
7669
7670 static bool
7671 programmer_friendly_fixup (aarch64_instruction *instr)
7672 {
7673 aarch64_inst *base = &instr->base;
7674 const aarch64_opcode *opcode = base->opcode;
7675 enum aarch64_op op = opcode->op;
7676 aarch64_opnd_info *operands = base->operands;
7677
7678 DEBUG_TRACE ("enter");
7679
7680 switch (opcode->iclass)
7681 {
7682 case testbranch:
7683 /* TBNZ Xn|Wn, #uimm6, label
7684 Test and Branch Not Zero: conditionally jumps to label if bit number
7685 uimm6 in register Xn is not zero. The bit number implies the width of
7686 the register, which may be written and should be disassembled as Wn if
7687 uimm is less than 32. */
7688 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7689 {
7690 if (operands[1].imm.value >= 32)
7691 {
7692 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7693 0, 31);
7694 return false;
7695 }
7696 operands[0].qualifier = AARCH64_OPND_QLF_X;
7697 }
7698 break;
7699 case loadlit:
7700 /* LDR Wt, label | =value
7701 As a convenience assemblers will typically permit the notation
7702 "=value" in conjunction with the pc-relative literal load instructions
7703 to automatically place an immediate value or symbolic address in a
7704 nearby literal pool and generate a hidden label which references it.
7705 ISREG has been set to 0 in the case of =value. */
7706 if (instr->gen_lit_pool
7707 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7708 {
7709 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7710 if (op == OP_LDRSW_LIT)
7711 size = 4;
7712 if (instr->reloc.exp.X_op != O_constant
7713 && instr->reloc.exp.X_op != O_big
7714 && instr->reloc.exp.X_op != O_symbol)
7715 {
7716 record_operand_error (opcode, 1,
7717 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7718 _("constant expression expected"));
7719 return false;
7720 }
7721 if (! add_to_lit_pool (&instr->reloc.exp, size))
7722 {
7723 record_operand_error (opcode, 1,
7724 AARCH64_OPDE_OTHER_ERROR,
7725 _("literal pool insertion failed"));
7726 return false;
7727 }
7728 }
7729 break;
7730 case log_shift:
7731 case bitfield:
7732 /* UXT[BHW] Wd, Wn
7733 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7734 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7735 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7736 A programmer-friendly assembler should accept a destination Xd in
7737 place of Wd, however that is not the preferred form for disassembly.
7738 */
7739 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7740 && operands[1].qualifier == AARCH64_OPND_QLF_W
7741 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7742 operands[0].qualifier = AARCH64_OPND_QLF_W;
7743 break;
7744
7745 case addsub_ext:
7746 {
7747 /* In the 64-bit form, the final register operand is written as Wm
7748 for all but the (possibly omitted) UXTX/LSL and SXTX
7749 operators.
7750 As a programmer-friendly assembler, we accept e.g.
7751 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7752 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7753 int idx = aarch64_operand_index (opcode->operands,
7754 AARCH64_OPND_Rm_EXT);
7755 gas_assert (idx == 1 || idx == 2);
7756 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7757 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7758 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7759 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7760 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7761 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7762 }
7763 break;
7764
7765 default:
7766 break;
7767 }
7768
7769 DEBUG_TRACE ("exit with SUCCESS");
7770 return true;
7771 }
7772
7773 /* Check for loads and stores that will cause unpredictable behavior. */
7774
7775 static void
7776 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7777 {
7778 aarch64_inst *base = &instr->base;
7779 const aarch64_opcode *opcode = base->opcode;
7780 const aarch64_opnd_info *opnds = base->operands;
7781 switch (opcode->iclass)
7782 {
7783 case ldst_pos:
7784 case ldst_imm9:
7785 case ldst_imm10:
7786 case ldst_unscaled:
7787 case ldst_unpriv:
7788 /* Loading/storing the base register is unpredictable if writeback. */
7789 if ((aarch64_get_operand_class (opnds[0].type)
7790 == AARCH64_OPND_CLASS_INT_REG)
7791 && opnds[0].reg.regno == opnds[1].addr.base_regno
7792 && opnds[1].addr.base_regno != REG_SP
7793 /* Exempt STG/STZG/ST2G/STZ2G. */
7794 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7795 && opnds[1].addr.writeback)
7796 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7797 break;
7798
7799 case ldstpair_off:
7800 case ldstnapair_offs:
7801 case ldstpair_indexed:
7802 /* Loading/storing the base register is unpredictable if writeback. */
7803 if ((aarch64_get_operand_class (opnds[0].type)
7804 == AARCH64_OPND_CLASS_INT_REG)
7805 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7806 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7807 && opnds[2].addr.base_regno != REG_SP
7808 /* Exempt STGP. */
7809 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7810 && opnds[2].addr.writeback)
7811 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7812 /* Load operations must load different registers. */
7813 if ((opcode->opcode & (1 << 22))
7814 && opnds[0].reg.regno == opnds[1].reg.regno)
7815 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7816 break;
7817
7818 case ldstexcl:
7819 if ((aarch64_get_operand_class (opnds[0].type)
7820 == AARCH64_OPND_CLASS_INT_REG)
7821 && (aarch64_get_operand_class (opnds[1].type)
7822 == AARCH64_OPND_CLASS_INT_REG))
7823 {
7824 if ((opcode->opcode & (1 << 22)))
7825 {
7826 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7827 if ((opcode->opcode & (1 << 21))
7828 && opnds[0].reg.regno == opnds[1].reg.regno)
7829 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7830 }
7831 else
7832 {
7833 /* Store-Exclusive is unpredictable if Rt == Rs. */
7834 if (opnds[0].reg.regno == opnds[1].reg.regno)
7835 as_warn
7836 (_("unpredictable: identical transfer and status registers"
7837 " --`%s'"),str);
7838
7839 if (opnds[0].reg.regno == opnds[2].reg.regno)
7840 {
7841 if (!(opcode->opcode & (1 << 21)))
7842 /* Store-Exclusive is unpredictable if Rn == Rs. */
7843 as_warn
7844 (_("unpredictable: identical base and status registers"
7845 " --`%s'"),str);
7846 else
7847 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7848 as_warn
7849 (_("unpredictable: "
7850 "identical transfer and status registers"
7851 " --`%s'"),str);
7852 }
7853
7854 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7855 if ((opcode->opcode & (1 << 21))
7856 && opnds[0].reg.regno == opnds[3].reg.regno
7857 && opnds[3].reg.regno != REG_SP)
7858 as_warn (_("unpredictable: identical base and status registers"
7859 " --`%s'"),str);
7860 }
7861 }
7862 break;
7863
7864 default:
7865 break;
7866 }
7867 }
7868
7869 static void
7870 force_automatic_sequence_close (void)
7871 {
7872 struct aarch64_segment_info_type *tc_seg_info;
7873
7874 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7875 if (tc_seg_info->insn_sequence.instr)
7876 {
7877 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7878 _("previous `%s' sequence has not been closed"),
7879 tc_seg_info->insn_sequence.instr->opcode->name);
7880 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7881 }
7882 }
7883
7884 /* A wrapper function to interface with libopcodes on encoding and
7885 record the error message if there is any.
7886
7887 Return TRUE on success; otherwise return FALSE. */
7888
7889 static bool
7890 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7891 aarch64_insn *code)
7892 {
7893 aarch64_operand_error error_info;
7894 memset (&error_info, '\0', sizeof (error_info));
7895 error_info.kind = AARCH64_OPDE_NIL;
7896 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7897 && !error_info.non_fatal)
7898 return true;
7899
7900 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7901 record_operand_error_info (opcode, &error_info);
7902 return error_info.non_fatal;
7903 }
7904
7905 #ifdef DEBUG_AARCH64
7906 static inline void
7907 dump_opcode_operands (const aarch64_opcode *opcode)
7908 {
7909 int i = 0;
7910 while (opcode->operands[i] != AARCH64_OPND_NIL)
7911 {
7912 aarch64_verbose ("\t\t opnd%d: %s", i,
7913 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7914 ? aarch64_get_operand_name (opcode->operands[i])
7915 : aarch64_get_operand_desc (opcode->operands[i]));
7916 ++i;
7917 }
7918 }
7919 #endif /* DEBUG_AARCH64 */
7920
7921 /* This is the guts of the machine-dependent assembler. STR points to a
7922 machine dependent instruction. This function is supposed to emit
7923 the frags/bytes it assembles to. */
7924
7925 void
7926 md_assemble (char *str)
7927 {
7928 templates *template;
7929 const aarch64_opcode *opcode;
7930 struct aarch64_segment_info_type *tc_seg_info;
7931 aarch64_inst *inst_base;
7932 unsigned saved_cond;
7933
7934 /* Align the previous label if needed. */
7935 if (last_label_seen != NULL)
7936 {
7937 symbol_set_frag (last_label_seen, frag_now);
7938 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7939 S_SET_SEGMENT (last_label_seen, now_seg);
7940 }
7941
7942 /* Update the current insn_sequence from the segment. */
7943 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7944 insn_sequence = &tc_seg_info->insn_sequence;
7945 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7946
7947 inst.reloc.type = BFD_RELOC_UNUSED;
7948
7949 DEBUG_TRACE ("\n\n");
7950 DEBUG_TRACE ("==============================");
7951 DEBUG_TRACE ("Enter md_assemble with %s", str);
7952
7953 /* Scan up to the end of the mnemonic, which must end in whitespace,
7954 '.', or end of string. */
7955 char *p = str;
7956 char *dot = 0;
7957 for (; is_part_of_name (*p); p++)
7958 if (*p == '.' && !dot)
7959 dot = p;
7960
7961 if (p == str)
7962 {
7963 as_bad (_("unknown mnemonic -- `%s'"), str);
7964 return;
7965 }
7966
7967 if (!dot && create_register_alias (str, p))
7968 return;
7969
7970 template = opcode_lookup (str, dot, p);
7971 if (!template)
7972 {
7973 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7974 str);
7975 return;
7976 }
7977
7978 skip_whitespace (p);
7979 if (*p == ',')
7980 {
7981 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7982 get_mnemonic_name (str), str);
7983 return;
7984 }
7985
7986 init_operand_error_report ();
7987
7988 /* Sections are assumed to start aligned. In executable section, there is no
7989 MAP_DATA symbol pending. So we only align the address during
7990 MAP_DATA --> MAP_INSN transition.
7991 For other sections, this is not guaranteed. */
7992 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7993 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7994 frag_align_code (2, 0);
7995
7996 saved_cond = inst.cond;
7997 reset_aarch64_instruction (&inst);
7998 inst.cond = saved_cond;
7999
8000 /* Iterate through all opcode entries with the same mnemonic name. */
8001 do
8002 {
8003 opcode = template->opcode;
8004
8005 DEBUG_TRACE ("opcode %s found", opcode->name);
8006 #ifdef DEBUG_AARCH64
8007 if (debug_dump)
8008 dump_opcode_operands (opcode);
8009 #endif /* DEBUG_AARCH64 */
8010
8011 mapping_state (MAP_INSN);
8012
8013 inst_base = &inst.base;
8014 inst_base->opcode = opcode;
8015
8016 /* Truly conditionally executed instructions, e.g. b.cond. */
8017 if (opcode->flags & F_COND)
8018 {
8019 gas_assert (inst.cond != COND_ALWAYS);
8020 inst_base->cond = get_cond_from_value (inst.cond);
8021 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8022 }
8023 else if (inst.cond != COND_ALWAYS)
8024 {
8025 /* It shouldn't arrive here, where the assembly looks like a
8026 conditional instruction but the found opcode is unconditional. */
8027 gas_assert (0);
8028 continue;
8029 }
8030
8031 if (parse_operands (p, opcode)
8032 && programmer_friendly_fixup (&inst)
8033 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8034 {
8035 /* Check that this instruction is supported for this CPU. */
8036 if (!opcode->avariant
8037 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8038 {
8039 as_bad (_("selected processor does not support `%s'"), str);
8040 return;
8041 }
8042
8043 warn_unpredictable_ldst (&inst, str);
8044
8045 if (inst.reloc.type == BFD_RELOC_UNUSED
8046 || !inst.reloc.need_libopcodes_p)
8047 output_inst (NULL);
8048 else
8049 {
8050 /* If there is relocation generated for the instruction,
8051 store the instruction information for the future fix-up. */
8052 struct aarch64_inst *copy;
8053 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8054 copy = XNEW (struct aarch64_inst);
8055 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8056 output_inst (copy);
8057 }
8058
8059 /* Issue non-fatal messages if any. */
8060 output_operand_error_report (str, true);
8061 return;
8062 }
8063
8064 template = template->next;
8065 if (template != NULL)
8066 {
8067 reset_aarch64_instruction (&inst);
8068 inst.cond = saved_cond;
8069 }
8070 }
8071 while (template != NULL);
8072
8073 /* Issue the error messages if any. */
8074 output_operand_error_report (str, false);
8075 }
8076
8077 /* Various frobbings of labels and their addresses. */
8078
8079 void
8080 aarch64_start_line_hook (void)
8081 {
8082 last_label_seen = NULL;
8083 }
8084
8085 void
8086 aarch64_frob_label (symbolS * sym)
8087 {
8088 last_label_seen = sym;
8089
8090 dwarf2_emit_label (sym);
8091 }
8092
8093 void
8094 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8095 {
8096 /* Check to see if we have a block to close. */
8097 force_automatic_sequence_close ();
8098 }
8099
8100 int
8101 aarch64_data_in_code (void)
8102 {
8103 if (startswith (input_line_pointer + 1, "data:"))
8104 {
8105 *input_line_pointer = '/';
8106 input_line_pointer += 5;
8107 *input_line_pointer = 0;
8108 return 1;
8109 }
8110
8111 return 0;
8112 }
8113
8114 char *
8115 aarch64_canonicalize_symbol_name (char *name)
8116 {
8117 int len;
8118
8119 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8120 *(name + len - 5) = 0;
8121
8122 return name;
8123 }
8124
8125 /* Table of all register names defined by default. The user can
8127 define additional names with .req. Note that all register names
8128 should appear in both upper and lowercase variants. Some registers
8129 also have mixed-case names. */
8130
8131 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8132 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8133 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8134 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8135 #define REGSET16(p,t) \
8136 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8137 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8138 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8139 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8140 #define REGSET16S(p,s,t) \
8141 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8142 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8143 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8144 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8145 #define REGSET31(p,t) \
8146 REGSET16(p, t), \
8147 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8148 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8149 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8150 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8151 #define REGSET(p,t) \
8152 REGSET31(p,t), REGNUM(p,31,t)
8153
8154 /* These go into aarch64_reg_hsh hash-table. */
8155 static const reg_entry reg_names[] = {
8156 /* Integer registers. */
8157 REGSET31 (x, R_64), REGSET31 (X, R_64),
8158 REGSET31 (w, R_32), REGSET31 (W, R_32),
8159
8160 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8161 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8162 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8163 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8164 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8165 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8166
8167 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8168 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8169
8170 /* Floating-point single precision registers. */
8171 REGSET (s, FP_S), REGSET (S, FP_S),
8172
8173 /* Floating-point double precision registers. */
8174 REGSET (d, FP_D), REGSET (D, FP_D),
8175
8176 /* Floating-point half precision registers. */
8177 REGSET (h, FP_H), REGSET (H, FP_H),
8178
8179 /* Floating-point byte precision registers. */
8180 REGSET (b, FP_B), REGSET (B, FP_B),
8181
8182 /* Floating-point quad precision registers. */
8183 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8184
8185 /* FP/SIMD registers. */
8186 REGSET (v, VN), REGSET (V, VN),
8187
8188 /* SVE vector registers. */
8189 REGSET (z, ZN), REGSET (Z, ZN),
8190
8191 /* SVE predicate registers. */
8192 REGSET16 (p, PN), REGSET16 (P, PN),
8193
8194 /* SME ZA tile registers. */
8195 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8196
8197 /* SME ZA tile registers (horizontal slice). */
8198 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8199
8200 /* SME ZA tile registers (vertical slice). */
8201 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8202 };
8203
8204 #undef REGDEF
8205 #undef REGDEF_ALIAS
8206 #undef REGNUM
8207 #undef REGSET16
8208 #undef REGSET31
8209 #undef REGSET
8210
8211 #define N 1
8212 #define n 0
8213 #define Z 1
8214 #define z 0
8215 #define C 1
8216 #define c 0
8217 #define V 1
8218 #define v 0
8219 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8220 static const asm_nzcv nzcv_names[] = {
8221 {"nzcv", B (n, z, c, v)},
8222 {"nzcV", B (n, z, c, V)},
8223 {"nzCv", B (n, z, C, v)},
8224 {"nzCV", B (n, z, C, V)},
8225 {"nZcv", B (n, Z, c, v)},
8226 {"nZcV", B (n, Z, c, V)},
8227 {"nZCv", B (n, Z, C, v)},
8228 {"nZCV", B (n, Z, C, V)},
8229 {"Nzcv", B (N, z, c, v)},
8230 {"NzcV", B (N, z, c, V)},
8231 {"NzCv", B (N, z, C, v)},
8232 {"NzCV", B (N, z, C, V)},
8233 {"NZcv", B (N, Z, c, v)},
8234 {"NZcV", B (N, Z, c, V)},
8235 {"NZCv", B (N, Z, C, v)},
8236 {"NZCV", B (N, Z, C, V)}
8237 };
8238
8239 #undef N
8240 #undef n
8241 #undef Z
8242 #undef z
8243 #undef C
8244 #undef c
8245 #undef V
8246 #undef v
8247 #undef B
8248
8249 /* MD interface: bits in the object file. */
8251
8252 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8253 for use in the a.out file, and stores them in the array pointed to by buf.
8254 This knows about the endian-ness of the target machine and does
8255 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8256 2 (short) and 4 (long) Floating numbers are put out as a series of
8257 LITTLENUMS (shorts, here at least). */
8258
8259 void
8260 md_number_to_chars (char *buf, valueT val, int n)
8261 {
8262 if (target_big_endian)
8263 number_to_chars_bigendian (buf, val, n);
8264 else
8265 number_to_chars_littleendian (buf, val, n);
8266 }
8267
8268 /* MD interface: Sections. */
8269
8270 /* Estimate the size of a frag before relaxing. Assume everything fits in
8271 4 bytes. */
8272
8273 int
8274 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8275 {
8276 fragp->fr_var = 4;
8277 return 4;
8278 }
8279
8280 /* Round up a section size to the appropriate boundary. */
8281
8282 valueT
8283 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8284 {
8285 return size;
8286 }
8287
8288 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8289 of an rs_align_code fragment.
8290
8291 Here we fill the frag with the appropriate info for padding the
8292 output stream. The resulting frag will consist of a fixed (fr_fix)
8293 and of a repeating (fr_var) part.
8294
8295 The fixed content is always emitted before the repeating content and
8296 these two parts are used as follows in constructing the output:
8297 - the fixed part will be used to align to a valid instruction word
8298 boundary, in case that we start at a misaligned address; as no
8299 executable instruction can live at the misaligned location, we
8300 simply fill with zeros;
8301 - the variable part will be used to cover the remaining padding and
8302 we fill using the AArch64 NOP instruction.
8303
8304 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8305 enough storage space for up to 3 bytes for padding the back to a valid
8306 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8307
8308 void
8309 aarch64_handle_align (fragS * fragP)
8310 {
8311 /* NOP = d503201f */
8312 /* AArch64 instructions are always little-endian. */
8313 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8314
8315 int bytes, fix, noop_size;
8316 char *p;
8317
8318 if (fragP->fr_type != rs_align_code)
8319 return;
8320
8321 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8322 p = fragP->fr_literal + fragP->fr_fix;
8323
8324 #ifdef OBJ_ELF
8325 gas_assert (fragP->tc_frag_data.recorded);
8326 #endif
8327
8328 noop_size = sizeof (aarch64_noop);
8329
8330 fix = bytes & (noop_size - 1);
8331 if (fix)
8332 {
8333 #ifdef OBJ_ELF
8334 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8335 #endif
8336 memset (p, 0, fix);
8337 p += fix;
8338 fragP->fr_fix += fix;
8339 }
8340
8341 if (noop_size)
8342 memcpy (p, aarch64_noop, noop_size);
8343 fragP->fr_var = noop_size;
8344 }
8345
8346 /* Perform target specific initialisation of a frag.
8347 Note - despite the name this initialisation is not done when the frag
8348 is created, but only when its type is assigned. A frag can be created
8349 and used a long time before its type is set, so beware of assuming that
8350 this initialisation is performed first. */
8351
8352 #ifndef OBJ_ELF
8353 void
8354 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8355 int max_chars ATTRIBUTE_UNUSED)
8356 {
8357 }
8358
8359 #else /* OBJ_ELF is defined. */
8360 void
8361 aarch64_init_frag (fragS * fragP, int max_chars)
8362 {
8363 /* Record a mapping symbol for alignment frags. We will delete this
8364 later if the alignment ends up empty. */
8365 if (!fragP->tc_frag_data.recorded)
8366 fragP->tc_frag_data.recorded = 1;
8367
8368 /* PR 21809: Do not set a mapping state for debug sections
8369 - it just confuses other tools. */
8370 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8371 return;
8372
8373 switch (fragP->fr_type)
8374 {
8375 case rs_align_test:
8376 case rs_fill:
8377 mapping_state_2 (MAP_DATA, max_chars);
8378 break;
8379 case rs_align:
8380 /* PR 20364: We can get alignment frags in code sections,
8381 so do not just assume that we should use the MAP_DATA state. */
8382 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8383 break;
8384 case rs_align_code:
8385 mapping_state_2 (MAP_INSN, max_chars);
8386 break;
8387 default:
8388 break;
8389 }
8390 }
8391
8392 /* Initialize the DWARF-2 unwind information for this procedure. */
8394
8395 void
8396 tc_aarch64_frame_initial_instructions (void)
8397 {
8398 cfi_add_CFA_def_cfa (REG_SP, 0);
8399 }
8400 #endif /* OBJ_ELF */
8401
8402 /* Convert REGNAME to a DWARF-2 register number. */
8403
8404 int
8405 tc_aarch64_regname_to_dw2regnum (char *regname)
8406 {
8407 const reg_entry *reg = parse_reg (®name);
8408 if (reg == NULL)
8409 return -1;
8410
8411 switch (reg->type)
8412 {
8413 case REG_TYPE_SP_32:
8414 case REG_TYPE_SP_64:
8415 case REG_TYPE_R_32:
8416 case REG_TYPE_R_64:
8417 return reg->number;
8418
8419 case REG_TYPE_FP_B:
8420 case REG_TYPE_FP_H:
8421 case REG_TYPE_FP_S:
8422 case REG_TYPE_FP_D:
8423 case REG_TYPE_FP_Q:
8424 return reg->number + 64;
8425
8426 default:
8427 break;
8428 }
8429 return -1;
8430 }
8431
8432 /* Implement DWARF2_ADDR_SIZE. */
8433
8434 int
8435 aarch64_dwarf2_addr_size (void)
8436 {
8437 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8438 if (ilp32_p)
8439 return 4;
8440 #endif
8441 return bfd_arch_bits_per_address (stdoutput) / 8;
8442 }
8443
8444 /* MD interface: Symbol and relocation handling. */
8445
8446 /* Return the address within the segment that a PC-relative fixup is
8447 relative to. For AArch64 PC-relative fixups applied to instructions
8448 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8449
8450 long
8451 md_pcrel_from_section (fixS * fixP, segT seg)
8452 {
8453 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8454
8455 /* If this is pc-relative and we are going to emit a relocation
8456 then we just want to put out any pipeline compensation that the linker
8457 will need. Otherwise we want to use the calculated base. */
8458 if (fixP->fx_pcrel
8459 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8460 || aarch64_force_relocation (fixP)))
8461 base = 0;
8462
8463 /* AArch64 should be consistent for all pc-relative relocations. */
8464 return base + AARCH64_PCREL_OFFSET;
8465 }
8466
8467 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8468 Otherwise we have no need to default values of symbols. */
8469
8470 symbolS *
8471 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8472 {
8473 #ifdef OBJ_ELF
8474 if (name[0] == '_' && name[1] == 'G'
8475 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8476 {
8477 if (!GOT_symbol)
8478 {
8479 if (symbol_find (name))
8480 as_bad (_("GOT already in the symbol table"));
8481
8482 GOT_symbol = symbol_new (name, undefined_section,
8483 &zero_address_frag, 0);
8484 }
8485
8486 return GOT_symbol;
8487 }
8488 #endif
8489
8490 return 0;
8491 }
8492
8493 /* Return non-zero if the indicated VALUE has overflowed the maximum
8494 range expressible by a unsigned number with the indicated number of
8495 BITS. */
8496
8497 static bool
8498 unsigned_overflow (valueT value, unsigned bits)
8499 {
8500 valueT lim;
8501 if (bits >= sizeof (valueT) * 8)
8502 return false;
8503 lim = (valueT) 1 << bits;
8504 return (value >= lim);
8505 }
8506
8507
8508 /* Return non-zero if the indicated VALUE has overflowed the maximum
8509 range expressible by an signed number with the indicated number of
8510 BITS. */
8511
8512 static bool
8513 signed_overflow (offsetT value, unsigned bits)
8514 {
8515 offsetT lim;
8516 if (bits >= sizeof (offsetT) * 8)
8517 return false;
8518 lim = (offsetT) 1 << (bits - 1);
8519 return (value < -lim || value >= lim);
8520 }
8521
8522 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8523 unsigned immediate offset load/store instruction, try to encode it as
8524 an unscaled, 9-bit, signed immediate offset load/store instruction.
8525 Return TRUE if it is successful; otherwise return FALSE.
8526
8527 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8528 in response to the standard LDR/STR mnemonics when the immediate offset is
8529 unambiguous, i.e. when it is negative or unaligned. */
8530
8531 static bool
8532 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8533 {
8534 int idx;
8535 enum aarch64_op new_op;
8536 const aarch64_opcode *new_opcode;
8537
8538 gas_assert (instr->opcode->iclass == ldst_pos);
8539
8540 switch (instr->opcode->op)
8541 {
8542 case OP_LDRB_POS:new_op = OP_LDURB; break;
8543 case OP_STRB_POS: new_op = OP_STURB; break;
8544 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8545 case OP_LDRH_POS: new_op = OP_LDURH; break;
8546 case OP_STRH_POS: new_op = OP_STURH; break;
8547 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8548 case OP_LDR_POS: new_op = OP_LDUR; break;
8549 case OP_STR_POS: new_op = OP_STUR; break;
8550 case OP_LDRF_POS: new_op = OP_LDURV; break;
8551 case OP_STRF_POS: new_op = OP_STURV; break;
8552 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8553 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8554 default: new_op = OP_NIL; break;
8555 }
8556
8557 if (new_op == OP_NIL)
8558 return false;
8559
8560 new_opcode = aarch64_get_opcode (new_op);
8561 gas_assert (new_opcode != NULL);
8562
8563 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8564 instr->opcode->op, new_opcode->op);
8565
8566 aarch64_replace_opcode (instr, new_opcode);
8567
8568 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8569 qualifier matching may fail because the out-of-date qualifier will
8570 prevent the operand being updated with a new and correct qualifier. */
8571 idx = aarch64_operand_index (instr->opcode->operands,
8572 AARCH64_OPND_ADDR_SIMM9);
8573 gas_assert (idx == 1);
8574 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8575
8576 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8577
8578 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8579 insn_sequence))
8580 return false;
8581
8582 return true;
8583 }
8584
8585 /* Called by fix_insn to fix a MOV immediate alias instruction.
8586
8587 Operand for a generic move immediate instruction, which is an alias
8588 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8589 a 32-bit/64-bit immediate value into general register. An assembler error
8590 shall result if the immediate cannot be created by a single one of these
8591 instructions. If there is a choice, then to ensure reversability an
8592 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8593
8594 static void
8595 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8596 {
8597 const aarch64_opcode *opcode;
8598
8599 /* Need to check if the destination is SP/ZR. The check has to be done
8600 before any aarch64_replace_opcode. */
8601 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8602 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8603
8604 instr->operands[1].imm.value = value;
8605 instr->operands[1].skip = 0;
8606
8607 if (try_mov_wide_p)
8608 {
8609 /* Try the MOVZ alias. */
8610 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8611 aarch64_replace_opcode (instr, opcode);
8612 if (aarch64_opcode_encode (instr->opcode, instr,
8613 &instr->value, NULL, NULL, insn_sequence))
8614 {
8615 put_aarch64_insn (buf, instr->value);
8616 return;
8617 }
8618 /* Try the MOVK alias. */
8619 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8620 aarch64_replace_opcode (instr, opcode);
8621 if (aarch64_opcode_encode (instr->opcode, instr,
8622 &instr->value, NULL, NULL, insn_sequence))
8623 {
8624 put_aarch64_insn (buf, instr->value);
8625 return;
8626 }
8627 }
8628
8629 if (try_mov_bitmask_p)
8630 {
8631 /* Try the ORR alias. */
8632 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8633 aarch64_replace_opcode (instr, opcode);
8634 if (aarch64_opcode_encode (instr->opcode, instr,
8635 &instr->value, NULL, NULL, insn_sequence))
8636 {
8637 put_aarch64_insn (buf, instr->value);
8638 return;
8639 }
8640 }
8641
8642 as_bad_where (fixP->fx_file, fixP->fx_line,
8643 _("immediate cannot be moved by a single instruction"));
8644 }
8645
8646 /* An instruction operand which is immediate related may have symbol used
8647 in the assembly, e.g.
8648
8649 mov w0, u32
8650 .set u32, 0x00ffff00
8651
8652 At the time when the assembly instruction is parsed, a referenced symbol,
8653 like 'u32' in the above example may not have been seen; a fixS is created
8654 in such a case and is handled here after symbols have been resolved.
8655 Instruction is fixed up with VALUE using the information in *FIXP plus
8656 extra information in FLAGS.
8657
8658 This function is called by md_apply_fix to fix up instructions that need
8659 a fix-up described above but does not involve any linker-time relocation. */
8660
8661 static void
8662 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8663 {
8664 int idx;
8665 uint32_t insn;
8666 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8667 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8668 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8669
8670 if (new_inst)
8671 {
8672 /* Now the instruction is about to be fixed-up, so the operand that
8673 was previously marked as 'ignored' needs to be unmarked in order
8674 to get the encoding done properly. */
8675 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8676 new_inst->operands[idx].skip = 0;
8677 }
8678
8679 gas_assert (opnd != AARCH64_OPND_NIL);
8680
8681 switch (opnd)
8682 {
8683 case AARCH64_OPND_EXCEPTION:
8684 case AARCH64_OPND_UNDEFINED:
8685 if (unsigned_overflow (value, 16))
8686 as_bad_where (fixP->fx_file, fixP->fx_line,
8687 _("immediate out of range"));
8688 insn = get_aarch64_insn (buf);
8689 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8690 put_aarch64_insn (buf, insn);
8691 break;
8692
8693 case AARCH64_OPND_AIMM:
8694 /* ADD or SUB with immediate.
8695 NOTE this assumes we come here with a add/sub shifted reg encoding
8696 3 322|2222|2 2 2 21111 111111
8697 1 098|7654|3 2 1 09876 543210 98765 43210
8698 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8699 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8700 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8701 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8702 ->
8703 3 322|2222|2 2 221111111111
8704 1 098|7654|3 2 109876543210 98765 43210
8705 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8706 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8707 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8708 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8709 Fields sf Rn Rd are already set. */
8710 insn = get_aarch64_insn (buf);
8711 if (value < 0)
8712 {
8713 /* Add <-> sub. */
8714 insn = reencode_addsub_switch_add_sub (insn);
8715 value = -value;
8716 }
8717
8718 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8719 && unsigned_overflow (value, 12))
8720 {
8721 /* Try to shift the value by 12 to make it fit. */
8722 if (((value >> 12) << 12) == value
8723 && ! unsigned_overflow (value, 12 + 12))
8724 {
8725 value >>= 12;
8726 insn |= encode_addsub_imm_shift_amount (1);
8727 }
8728 }
8729
8730 if (unsigned_overflow (value, 12))
8731 as_bad_where (fixP->fx_file, fixP->fx_line,
8732 _("immediate out of range"));
8733
8734 insn |= encode_addsub_imm (value);
8735
8736 put_aarch64_insn (buf, insn);
8737 break;
8738
8739 case AARCH64_OPND_SIMD_IMM:
8740 case AARCH64_OPND_SIMD_IMM_SFT:
8741 case AARCH64_OPND_LIMM:
8742 /* Bit mask immediate. */
8743 gas_assert (new_inst != NULL);
8744 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8745 new_inst->operands[idx].imm.value = value;
8746 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8747 &new_inst->value, NULL, NULL, insn_sequence))
8748 put_aarch64_insn (buf, new_inst->value);
8749 else
8750 as_bad_where (fixP->fx_file, fixP->fx_line,
8751 _("invalid immediate"));
8752 break;
8753
8754 case AARCH64_OPND_HALF:
8755 /* 16-bit unsigned immediate. */
8756 if (unsigned_overflow (value, 16))
8757 as_bad_where (fixP->fx_file, fixP->fx_line,
8758 _("immediate out of range"));
8759 insn = get_aarch64_insn (buf);
8760 insn |= encode_movw_imm (value & 0xffff);
8761 put_aarch64_insn (buf, insn);
8762 break;
8763
8764 case AARCH64_OPND_IMM_MOV:
8765 /* Operand for a generic move immediate instruction, which is
8766 an alias instruction that generates a single MOVZ, MOVN or ORR
8767 instruction to loads a 32-bit/64-bit immediate value into general
8768 register. An assembler error shall result if the immediate cannot be
8769 created by a single one of these instructions. If there is a choice,
8770 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8771 and MOVZ or MOVN to ORR. */
8772 gas_assert (new_inst != NULL);
8773 fix_mov_imm_insn (fixP, buf, new_inst, value);
8774 break;
8775
8776 case AARCH64_OPND_ADDR_SIMM7:
8777 case AARCH64_OPND_ADDR_SIMM9:
8778 case AARCH64_OPND_ADDR_SIMM9_2:
8779 case AARCH64_OPND_ADDR_SIMM10:
8780 case AARCH64_OPND_ADDR_UIMM12:
8781 case AARCH64_OPND_ADDR_SIMM11:
8782 case AARCH64_OPND_ADDR_SIMM13:
8783 /* Immediate offset in an address. */
8784 insn = get_aarch64_insn (buf);
8785
8786 gas_assert (new_inst != NULL && new_inst->value == insn);
8787 gas_assert (new_inst->opcode->operands[1] == opnd
8788 || new_inst->opcode->operands[2] == opnd);
8789
8790 /* Get the index of the address operand. */
8791 if (new_inst->opcode->operands[1] == opnd)
8792 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8793 idx = 1;
8794 else
8795 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8796 idx = 2;
8797
8798 /* Update the resolved offset value. */
8799 new_inst->operands[idx].addr.offset.imm = value;
8800
8801 /* Encode/fix-up. */
8802 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8803 &new_inst->value, NULL, NULL, insn_sequence))
8804 {
8805 put_aarch64_insn (buf, new_inst->value);
8806 break;
8807 }
8808 else if (new_inst->opcode->iclass == ldst_pos
8809 && try_to_encode_as_unscaled_ldst (new_inst))
8810 {
8811 put_aarch64_insn (buf, new_inst->value);
8812 break;
8813 }
8814
8815 as_bad_where (fixP->fx_file, fixP->fx_line,
8816 _("immediate offset out of range"));
8817 break;
8818
8819 default:
8820 gas_assert (0);
8821 as_fatal (_("unhandled operand code %d"), opnd);
8822 }
8823 }
8824
8825 /* Apply a fixup (fixP) to segment data, once it has been determined
8826 by our caller that we have all the info we need to fix it up.
8827
8828 Parameter valP is the pointer to the value of the bits. */
8829
8830 void
8831 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8832 {
8833 offsetT value = *valP;
8834 uint32_t insn;
8835 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8836 int scale;
8837 unsigned flags = fixP->fx_addnumber;
8838
8839 DEBUG_TRACE ("\n\n");
8840 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8841 DEBUG_TRACE ("Enter md_apply_fix");
8842
8843 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8844
8845 /* Note whether this will delete the relocation. */
8846
8847 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8848 fixP->fx_done = 1;
8849
8850 /* Process the relocations. */
8851 switch (fixP->fx_r_type)
8852 {
8853 case BFD_RELOC_NONE:
8854 /* This will need to go in the object file. */
8855 fixP->fx_done = 0;
8856 break;
8857
8858 case BFD_RELOC_8:
8859 case BFD_RELOC_8_PCREL:
8860 if (fixP->fx_done || !seg->use_rela_p)
8861 md_number_to_chars (buf, value, 1);
8862 break;
8863
8864 case BFD_RELOC_16:
8865 case BFD_RELOC_16_PCREL:
8866 if (fixP->fx_done || !seg->use_rela_p)
8867 md_number_to_chars (buf, value, 2);
8868 break;
8869
8870 case BFD_RELOC_32:
8871 case BFD_RELOC_32_PCREL:
8872 if (fixP->fx_done || !seg->use_rela_p)
8873 md_number_to_chars (buf, value, 4);
8874 break;
8875
8876 case BFD_RELOC_64:
8877 case BFD_RELOC_64_PCREL:
8878 if (fixP->fx_done || !seg->use_rela_p)
8879 md_number_to_chars (buf, value, 8);
8880 break;
8881
8882 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8883 /* We claim that these fixups have been processed here, even if
8884 in fact we generate an error because we do not have a reloc
8885 for them, so tc_gen_reloc() will reject them. */
8886 fixP->fx_done = 1;
8887 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8888 {
8889 as_bad_where (fixP->fx_file, fixP->fx_line,
8890 _("undefined symbol %s used as an immediate value"),
8891 S_GET_NAME (fixP->fx_addsy));
8892 goto apply_fix_return;
8893 }
8894 fix_insn (fixP, flags, value);
8895 break;
8896
8897 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8898 if (fixP->fx_done || !seg->use_rela_p)
8899 {
8900 if (value & 3)
8901 as_bad_where (fixP->fx_file, fixP->fx_line,
8902 _("pc-relative load offset not word aligned"));
8903 if (signed_overflow (value, 21))
8904 as_bad_where (fixP->fx_file, fixP->fx_line,
8905 _("pc-relative load offset out of range"));
8906 insn = get_aarch64_insn (buf);
8907 insn |= encode_ld_lit_ofs_19 (value >> 2);
8908 put_aarch64_insn (buf, insn);
8909 }
8910 break;
8911
8912 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8913 if (fixP->fx_done || !seg->use_rela_p)
8914 {
8915 if (signed_overflow (value, 21))
8916 as_bad_where (fixP->fx_file, fixP->fx_line,
8917 _("pc-relative address offset out of range"));
8918 insn = get_aarch64_insn (buf);
8919 insn |= encode_adr_imm (value);
8920 put_aarch64_insn (buf, insn);
8921 }
8922 break;
8923
8924 case BFD_RELOC_AARCH64_BRANCH19:
8925 if (fixP->fx_done || !seg->use_rela_p)
8926 {
8927 if (value & 3)
8928 as_bad_where (fixP->fx_file, fixP->fx_line,
8929 _("conditional branch target not word aligned"));
8930 if (signed_overflow (value, 21))
8931 as_bad_where (fixP->fx_file, fixP->fx_line,
8932 _("conditional branch out of range"));
8933 insn = get_aarch64_insn (buf);
8934 insn |= encode_cond_branch_ofs_19 (value >> 2);
8935 put_aarch64_insn (buf, insn);
8936 }
8937 break;
8938
8939 case BFD_RELOC_AARCH64_TSTBR14:
8940 if (fixP->fx_done || !seg->use_rela_p)
8941 {
8942 if (value & 3)
8943 as_bad_where (fixP->fx_file, fixP->fx_line,
8944 _("conditional branch target not word aligned"));
8945 if (signed_overflow (value, 16))
8946 as_bad_where (fixP->fx_file, fixP->fx_line,
8947 _("conditional branch out of range"));
8948 insn = get_aarch64_insn (buf);
8949 insn |= encode_tst_branch_ofs_14 (value >> 2);
8950 put_aarch64_insn (buf, insn);
8951 }
8952 break;
8953
8954 case BFD_RELOC_AARCH64_CALL26:
8955 case BFD_RELOC_AARCH64_JUMP26:
8956 if (fixP->fx_done || !seg->use_rela_p)
8957 {
8958 if (value & 3)
8959 as_bad_where (fixP->fx_file, fixP->fx_line,
8960 _("branch target not word aligned"));
8961 if (signed_overflow (value, 28))
8962 as_bad_where (fixP->fx_file, fixP->fx_line,
8963 _("branch out of range"));
8964 insn = get_aarch64_insn (buf);
8965 insn |= encode_branch_ofs_26 (value >> 2);
8966 put_aarch64_insn (buf, insn);
8967 }
8968 break;
8969
8970 case BFD_RELOC_AARCH64_MOVW_G0:
8971 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8972 case BFD_RELOC_AARCH64_MOVW_G0_S:
8973 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8974 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8975 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8976 scale = 0;
8977 goto movw_common;
8978 case BFD_RELOC_AARCH64_MOVW_G1:
8979 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8980 case BFD_RELOC_AARCH64_MOVW_G1_S:
8981 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8982 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8983 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8984 scale = 16;
8985 goto movw_common;
8986 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8987 scale = 0;
8988 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8989 /* Should always be exported to object file, see
8990 aarch64_force_relocation(). */
8991 gas_assert (!fixP->fx_done);
8992 gas_assert (seg->use_rela_p);
8993 goto movw_common;
8994 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8995 scale = 16;
8996 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8997 /* Should always be exported to object file, see
8998 aarch64_force_relocation(). */
8999 gas_assert (!fixP->fx_done);
9000 gas_assert (seg->use_rela_p);
9001 goto movw_common;
9002 case BFD_RELOC_AARCH64_MOVW_G2:
9003 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9004 case BFD_RELOC_AARCH64_MOVW_G2_S:
9005 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9006 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9007 scale = 32;
9008 goto movw_common;
9009 case BFD_RELOC_AARCH64_MOVW_G3:
9010 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9011 scale = 48;
9012 movw_common:
9013 if (fixP->fx_done || !seg->use_rela_p)
9014 {
9015 insn = get_aarch64_insn (buf);
9016
9017 if (!fixP->fx_done)
9018 {
9019 /* REL signed addend must fit in 16 bits */
9020 if (signed_overflow (value, 16))
9021 as_bad_where (fixP->fx_file, fixP->fx_line,
9022 _("offset out of range"));
9023 }
9024 else
9025 {
9026 /* Check for overflow and scale. */
9027 switch (fixP->fx_r_type)
9028 {
9029 case BFD_RELOC_AARCH64_MOVW_G0:
9030 case BFD_RELOC_AARCH64_MOVW_G1:
9031 case BFD_RELOC_AARCH64_MOVW_G2:
9032 case BFD_RELOC_AARCH64_MOVW_G3:
9033 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9034 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9035 if (unsigned_overflow (value, scale + 16))
9036 as_bad_where (fixP->fx_file, fixP->fx_line,
9037 _("unsigned value out of range"));
9038 break;
9039 case BFD_RELOC_AARCH64_MOVW_G0_S:
9040 case BFD_RELOC_AARCH64_MOVW_G1_S:
9041 case BFD_RELOC_AARCH64_MOVW_G2_S:
9042 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9043 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9044 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9045 /* NOTE: We can only come here with movz or movn. */
9046 if (signed_overflow (value, scale + 16))
9047 as_bad_where (fixP->fx_file, fixP->fx_line,
9048 _("signed value out of range"));
9049 if (value < 0)
9050 {
9051 /* Force use of MOVN. */
9052 value = ~value;
9053 insn = reencode_movzn_to_movn (insn);
9054 }
9055 else
9056 {
9057 /* Force use of MOVZ. */
9058 insn = reencode_movzn_to_movz (insn);
9059 }
9060 break;
9061 default:
9062 /* Unchecked relocations. */
9063 break;
9064 }
9065 value >>= scale;
9066 }
9067
9068 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9069 insn |= encode_movw_imm (value & 0xffff);
9070
9071 put_aarch64_insn (buf, insn);
9072 }
9073 break;
9074
9075 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9076 fixP->fx_r_type = (ilp32_p
9077 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9078 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9079 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9080 /* Should always be exported to object file, see
9081 aarch64_force_relocation(). */
9082 gas_assert (!fixP->fx_done);
9083 gas_assert (seg->use_rela_p);
9084 break;
9085
9086 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9087 fixP->fx_r_type = (ilp32_p
9088 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9089 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9090 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9091 /* Should always be exported to object file, see
9092 aarch64_force_relocation(). */
9093 gas_assert (!fixP->fx_done);
9094 gas_assert (seg->use_rela_p);
9095 break;
9096
9097 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9098 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9099 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9100 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9101 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9102 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9103 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9104 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9105 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9106 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9107 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9108 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9109 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9110 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9111 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9112 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9113 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9114 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9115 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9116 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9117 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9118 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9119 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9120 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9121 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9122 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9123 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9124 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9125 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9126 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9127 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9128 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9129 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9130 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9131 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9132 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9133 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9134 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9135 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9136 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9137 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9138 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9139 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9140 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9141 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9142 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9143 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9144 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9145 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9146 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9147 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9148 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9149 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9150 /* Should always be exported to object file, see
9151 aarch64_force_relocation(). */
9152 gas_assert (!fixP->fx_done);
9153 gas_assert (seg->use_rela_p);
9154 break;
9155
9156 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9157 /* Should always be exported to object file, see
9158 aarch64_force_relocation(). */
9159 fixP->fx_r_type = (ilp32_p
9160 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9161 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9162 gas_assert (!fixP->fx_done);
9163 gas_assert (seg->use_rela_p);
9164 break;
9165
9166 case BFD_RELOC_AARCH64_ADD_LO12:
9167 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9168 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9169 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9170 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9171 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9172 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9173 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9174 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9175 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9176 case BFD_RELOC_AARCH64_LDST128_LO12:
9177 case BFD_RELOC_AARCH64_LDST16_LO12:
9178 case BFD_RELOC_AARCH64_LDST32_LO12:
9179 case BFD_RELOC_AARCH64_LDST64_LO12:
9180 case BFD_RELOC_AARCH64_LDST8_LO12:
9181 /* Should always be exported to object file, see
9182 aarch64_force_relocation(). */
9183 gas_assert (!fixP->fx_done);
9184 gas_assert (seg->use_rela_p);
9185 break;
9186
9187 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9188 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9189 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9190 break;
9191
9192 case BFD_RELOC_UNUSED:
9193 /* An error will already have been reported. */
9194 break;
9195
9196 default:
9197 as_bad_where (fixP->fx_file, fixP->fx_line,
9198 _("unexpected %s fixup"),
9199 bfd_get_reloc_code_name (fixP->fx_r_type));
9200 break;
9201 }
9202
9203 apply_fix_return:
9204 /* Free the allocated the struct aarch64_inst.
9205 N.B. currently there are very limited number of fix-up types actually use
9206 this field, so the impact on the performance should be minimal . */
9207 free (fixP->tc_fix_data.inst);
9208
9209 return;
9210 }
9211
9212 /* Translate internal representation of relocation info to BFD target
9213 format. */
9214
9215 arelent *
9216 tc_gen_reloc (asection * section, fixS * fixp)
9217 {
9218 arelent *reloc;
9219 bfd_reloc_code_real_type code;
9220
9221 reloc = XNEW (arelent);
9222
9223 reloc->sym_ptr_ptr = XNEW (asymbol *);
9224 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9225 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9226
9227 if (fixp->fx_pcrel)
9228 {
9229 if (section->use_rela_p)
9230 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9231 else
9232 fixp->fx_offset = reloc->address;
9233 }
9234 reloc->addend = fixp->fx_offset;
9235
9236 code = fixp->fx_r_type;
9237 switch (code)
9238 {
9239 case BFD_RELOC_16:
9240 if (fixp->fx_pcrel)
9241 code = BFD_RELOC_16_PCREL;
9242 break;
9243
9244 case BFD_RELOC_32:
9245 if (fixp->fx_pcrel)
9246 code = BFD_RELOC_32_PCREL;
9247 break;
9248
9249 case BFD_RELOC_64:
9250 if (fixp->fx_pcrel)
9251 code = BFD_RELOC_64_PCREL;
9252 break;
9253
9254 default:
9255 break;
9256 }
9257
9258 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9259 if (reloc->howto == NULL)
9260 {
9261 as_bad_where (fixp->fx_file, fixp->fx_line,
9262 _
9263 ("cannot represent %s relocation in this object file format"),
9264 bfd_get_reloc_code_name (code));
9265 return NULL;
9266 }
9267
9268 return reloc;
9269 }
9270
9271 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9272
9273 void
9274 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9275 {
9276 bfd_reloc_code_real_type type;
9277 int pcrel = 0;
9278
9279 /* Pick a reloc.
9280 FIXME: @@ Should look at CPU word size. */
9281 switch (size)
9282 {
9283 case 1:
9284 type = BFD_RELOC_8;
9285 break;
9286 case 2:
9287 type = BFD_RELOC_16;
9288 break;
9289 case 4:
9290 type = BFD_RELOC_32;
9291 break;
9292 case 8:
9293 type = BFD_RELOC_64;
9294 break;
9295 default:
9296 as_bad (_("cannot do %u-byte relocation"), size);
9297 type = BFD_RELOC_UNUSED;
9298 break;
9299 }
9300
9301 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9302 }
9303
9304 #ifdef OBJ_ELF
9305
9306 /* Implement md_after_parse_args. This is the earliest time we need to decide
9307 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9308
9309 void
9310 aarch64_after_parse_args (void)
9311 {
9312 if (aarch64_abi != AARCH64_ABI_NONE)
9313 return;
9314
9315 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9316 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9317 aarch64_abi = AARCH64_ABI_ILP32;
9318 else
9319 aarch64_abi = AARCH64_ABI_LP64;
9320 }
9321
9322 const char *
9323 elf64_aarch64_target_format (void)
9324 {
9325 #ifdef TE_CLOUDABI
9326 /* FIXME: What to do for ilp32_p ? */
9327 if (target_big_endian)
9328 return "elf64-bigaarch64-cloudabi";
9329 else
9330 return "elf64-littleaarch64-cloudabi";
9331 #else
9332 if (target_big_endian)
9333 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9334 else
9335 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9336 #endif
9337 }
9338
9339 void
9340 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9341 {
9342 elf_frob_symbol (symp, puntp);
9343 }
9344 #endif
9345
9346 /* MD interface: Finalization. */
9347
9348 /* A good place to do this, although this was probably not intended
9349 for this kind of use. We need to dump the literal pool before
9350 references are made to a null symbol pointer. */
9351
9352 void
9353 aarch64_cleanup (void)
9354 {
9355 literal_pool *pool;
9356
9357 for (pool = list_of_pools; pool; pool = pool->next)
9358 {
9359 /* Put it at the end of the relevant section. */
9360 subseg_set (pool->section, pool->sub_section);
9361 s_ltorg (0);
9362 }
9363 }
9364
9365 #ifdef OBJ_ELF
9366 /* Remove any excess mapping symbols generated for alignment frags in
9367 SEC. We may have created a mapping symbol before a zero byte
9368 alignment; remove it if there's a mapping symbol after the
9369 alignment. */
9370 static void
9371 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9372 void *dummy ATTRIBUTE_UNUSED)
9373 {
9374 segment_info_type *seginfo = seg_info (sec);
9375 fragS *fragp;
9376
9377 if (seginfo == NULL || seginfo->frchainP == NULL)
9378 return;
9379
9380 for (fragp = seginfo->frchainP->frch_root;
9381 fragp != NULL; fragp = fragp->fr_next)
9382 {
9383 symbolS *sym = fragp->tc_frag_data.last_map;
9384 fragS *next = fragp->fr_next;
9385
9386 /* Variable-sized frags have been converted to fixed size by
9387 this point. But if this was variable-sized to start with,
9388 there will be a fixed-size frag after it. So don't handle
9389 next == NULL. */
9390 if (sym == NULL || next == NULL)
9391 continue;
9392
9393 if (S_GET_VALUE (sym) < next->fr_address)
9394 /* Not at the end of this frag. */
9395 continue;
9396 know (S_GET_VALUE (sym) == next->fr_address);
9397
9398 do
9399 {
9400 if (next->tc_frag_data.first_map != NULL)
9401 {
9402 /* Next frag starts with a mapping symbol. Discard this
9403 one. */
9404 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9405 break;
9406 }
9407
9408 if (next->fr_next == NULL)
9409 {
9410 /* This mapping symbol is at the end of the section. Discard
9411 it. */
9412 know (next->fr_fix == 0 && next->fr_var == 0);
9413 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9414 break;
9415 }
9416
9417 /* As long as we have empty frags without any mapping symbols,
9418 keep looking. */
9419 /* If the next frag is non-empty and does not start with a
9420 mapping symbol, then this mapping symbol is required. */
9421 if (next->fr_address != next->fr_next->fr_address)
9422 break;
9423
9424 next = next->fr_next;
9425 }
9426 while (next != NULL);
9427 }
9428 }
9429 #endif
9430
9431 /* Adjust the symbol table. */
9432
9433 void
9434 aarch64_adjust_symtab (void)
9435 {
9436 #ifdef OBJ_ELF
9437 /* Remove any overlapping mapping symbols generated by alignment frags. */
9438 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9439 /* Now do generic ELF adjustments. */
9440 elf_adjust_symtab ();
9441 #endif
9442 }
9443
9444 static void
9445 checked_hash_insert (htab_t table, const char *key, void *value)
9446 {
9447 str_hash_insert (table, key, value, 0);
9448 }
9449
9450 static void
9451 sysreg_hash_insert (htab_t table, const char *key, void *value)
9452 {
9453 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9454 checked_hash_insert (table, key, value);
9455 }
9456
9457 static void
9458 fill_instruction_hash_table (void)
9459 {
9460 const aarch64_opcode *opcode = aarch64_opcode_table;
9461
9462 while (opcode->name != NULL)
9463 {
9464 templates *templ, *new_templ;
9465 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9466
9467 new_templ = XNEW (templates);
9468 new_templ->opcode = opcode;
9469 new_templ->next = NULL;
9470
9471 if (!templ)
9472 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9473 else
9474 {
9475 new_templ->next = templ->next;
9476 templ->next = new_templ;
9477 }
9478 ++opcode;
9479 }
9480 }
9481
9482 static inline void
9483 convert_to_upper (char *dst, const char *src, size_t num)
9484 {
9485 unsigned int i;
9486 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9487 *dst = TOUPPER (*src);
9488 *dst = '\0';
9489 }
9490
9491 /* Assume STR point to a lower-case string, allocate, convert and return
9492 the corresponding upper-case string. */
9493 static inline const char*
9494 get_upper_str (const char *str)
9495 {
9496 char *ret;
9497 size_t len = strlen (str);
9498 ret = XNEWVEC (char, len + 1);
9499 convert_to_upper (ret, str, len);
9500 return ret;
9501 }
9502
9503 /* MD interface: Initialization. */
9504
9505 void
9506 md_begin (void)
9507 {
9508 unsigned mach;
9509 unsigned int i;
9510
9511 aarch64_ops_hsh = str_htab_create ();
9512 aarch64_cond_hsh = str_htab_create ();
9513 aarch64_shift_hsh = str_htab_create ();
9514 aarch64_sys_regs_hsh = str_htab_create ();
9515 aarch64_pstatefield_hsh = str_htab_create ();
9516 aarch64_sys_regs_ic_hsh = str_htab_create ();
9517 aarch64_sys_regs_dc_hsh = str_htab_create ();
9518 aarch64_sys_regs_at_hsh = str_htab_create ();
9519 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9520 aarch64_sys_regs_sr_hsh = str_htab_create ();
9521 aarch64_reg_hsh = str_htab_create ();
9522 aarch64_barrier_opt_hsh = str_htab_create ();
9523 aarch64_nzcv_hsh = str_htab_create ();
9524 aarch64_pldop_hsh = str_htab_create ();
9525 aarch64_hint_opt_hsh = str_htab_create ();
9526
9527 fill_instruction_hash_table ();
9528
9529 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9530 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9531 (void *) (aarch64_sys_regs + i));
9532
9533 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9534 sysreg_hash_insert (aarch64_pstatefield_hsh,
9535 aarch64_pstatefields[i].name,
9536 (void *) (aarch64_pstatefields + i));
9537
9538 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9539 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9540 aarch64_sys_regs_ic[i].name,
9541 (void *) (aarch64_sys_regs_ic + i));
9542
9543 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9544 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9545 aarch64_sys_regs_dc[i].name,
9546 (void *) (aarch64_sys_regs_dc + i));
9547
9548 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9549 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9550 aarch64_sys_regs_at[i].name,
9551 (void *) (aarch64_sys_regs_at + i));
9552
9553 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9554 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9555 aarch64_sys_regs_tlbi[i].name,
9556 (void *) (aarch64_sys_regs_tlbi + i));
9557
9558 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9559 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9560 aarch64_sys_regs_sr[i].name,
9561 (void *) (aarch64_sys_regs_sr + i));
9562
9563 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9564 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9565 (void *) (reg_names + i));
9566
9567 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9568 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9569 (void *) (nzcv_names + i));
9570
9571 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9572 {
9573 const char *name = aarch64_operand_modifiers[i].name;
9574 checked_hash_insert (aarch64_shift_hsh, name,
9575 (void *) (aarch64_operand_modifiers + i));
9576 /* Also hash the name in the upper case. */
9577 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9578 (void *) (aarch64_operand_modifiers + i));
9579 }
9580
9581 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9582 {
9583 unsigned int j;
9584 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9585 the same condition code. */
9586 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9587 {
9588 const char *name = aarch64_conds[i].names[j];
9589 if (name == NULL)
9590 break;
9591 checked_hash_insert (aarch64_cond_hsh, name,
9592 (void *) (aarch64_conds + i));
9593 /* Also hash the name in the upper case. */
9594 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9595 (void *) (aarch64_conds + i));
9596 }
9597 }
9598
9599 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9600 {
9601 const char *name = aarch64_barrier_options[i].name;
9602 /* Skip xx00 - the unallocated values of option. */
9603 if ((i & 0x3) == 0)
9604 continue;
9605 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9606 (void *) (aarch64_barrier_options + i));
9607 /* Also hash the name in the upper case. */
9608 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9609 (void *) (aarch64_barrier_options + i));
9610 }
9611
9612 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9613 {
9614 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9615 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9616 (void *) (aarch64_barrier_dsb_nxs_options + i));
9617 /* Also hash the name in the upper case. */
9618 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9619 (void *) (aarch64_barrier_dsb_nxs_options + i));
9620 }
9621
9622 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9623 {
9624 const char* name = aarch64_prfops[i].name;
9625 /* Skip the unallocated hint encodings. */
9626 if (name == NULL)
9627 continue;
9628 checked_hash_insert (aarch64_pldop_hsh, name,
9629 (void *) (aarch64_prfops + i));
9630 /* Also hash the name in the upper case. */
9631 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9632 (void *) (aarch64_prfops + i));
9633 }
9634
9635 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9636 {
9637 const char* name = aarch64_hint_options[i].name;
9638 const char* upper_name = get_upper_str(name);
9639
9640 checked_hash_insert (aarch64_hint_opt_hsh, name,
9641 (void *) (aarch64_hint_options + i));
9642
9643 /* Also hash the name in the upper case if not the same. */
9644 if (strcmp (name, upper_name) != 0)
9645 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9646 (void *) (aarch64_hint_options + i));
9647 }
9648
9649 /* Set the cpu variant based on the command-line options. */
9650 if (!mcpu_cpu_opt)
9651 mcpu_cpu_opt = march_cpu_opt;
9652
9653 if (!mcpu_cpu_opt)
9654 mcpu_cpu_opt = &cpu_default;
9655
9656 cpu_variant = *mcpu_cpu_opt;
9657
9658 /* Record the CPU type. */
9659 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9660
9661 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9662 }
9663
9664 /* Command line processing. */
9665
9666 const char *md_shortopts = "m:";
9667
9668 #ifdef AARCH64_BI_ENDIAN
9669 #define OPTION_EB (OPTION_MD_BASE + 0)
9670 #define OPTION_EL (OPTION_MD_BASE + 1)
9671 #else
9672 #if TARGET_BYTES_BIG_ENDIAN
9673 #define OPTION_EB (OPTION_MD_BASE + 0)
9674 #else
9675 #define OPTION_EL (OPTION_MD_BASE + 1)
9676 #endif
9677 #endif
9678
9679 struct option md_longopts[] = {
9680 #ifdef OPTION_EB
9681 {"EB", no_argument, NULL, OPTION_EB},
9682 #endif
9683 #ifdef OPTION_EL
9684 {"EL", no_argument, NULL, OPTION_EL},
9685 #endif
9686 {NULL, no_argument, NULL, 0}
9687 };
9688
9689 size_t md_longopts_size = sizeof (md_longopts);
9690
9691 struct aarch64_option_table
9692 {
9693 const char *option; /* Option name to match. */
9694 const char *help; /* Help information. */
9695 int *var; /* Variable to change. */
9696 int value; /* What to change it to. */
9697 char *deprecated; /* If non-null, print this message. */
9698 };
9699
9700 static struct aarch64_option_table aarch64_opts[] = {
9701 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9702 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9703 NULL},
9704 #ifdef DEBUG_AARCH64
9705 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9706 #endif /* DEBUG_AARCH64 */
9707 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9708 NULL},
9709 {"mno-verbose-error", N_("do not output verbose error messages"),
9710 &verbose_error_p, 0, NULL},
9711 {NULL, NULL, NULL, 0, NULL}
9712 };
9713
9714 struct aarch64_cpu_option_table
9715 {
9716 const char *name;
9717 const aarch64_feature_set value;
9718 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9719 case. */
9720 const char *canonical_name;
9721 };
9722
9723 /* This list should, at a minimum, contain all the cpu names
9724 recognized by GCC. */
9725 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9726 {"all", AARCH64_ANY, NULL},
9727 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9728 AARCH64_FEATURE_CRC), "Cortex-A34"},
9729 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9730 AARCH64_FEATURE_CRC), "Cortex-A35"},
9731 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9732 AARCH64_FEATURE_CRC), "Cortex-A53"},
9733 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9734 AARCH64_FEATURE_CRC), "Cortex-A57"},
9735 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9736 AARCH64_FEATURE_CRC), "Cortex-A72"},
9737 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9738 AARCH64_FEATURE_CRC), "Cortex-A73"},
9739 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9740 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9741 "Cortex-A55"},
9742 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9743 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9744 "Cortex-A75"},
9745 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9746 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9747 "Cortex-A76"},
9748 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9749 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9750 | AARCH64_FEATURE_DOTPROD
9751 | AARCH64_FEATURE_SSBS),
9752 "Cortex-A76AE"},
9753 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9754 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9755 | AARCH64_FEATURE_DOTPROD
9756 | AARCH64_FEATURE_SSBS),
9757 "Cortex-A77"},
9758 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9759 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9760 | AARCH64_FEATURE_DOTPROD
9761 | AARCH64_FEATURE_SSBS),
9762 "Cortex-A65"},
9763 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9764 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9765 | AARCH64_FEATURE_DOTPROD
9766 | AARCH64_FEATURE_SSBS),
9767 "Cortex-A65AE"},
9768 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9769 AARCH64_FEATURE_F16
9770 | AARCH64_FEATURE_RCPC
9771 | AARCH64_FEATURE_DOTPROD
9772 | AARCH64_FEATURE_SSBS
9773 | AARCH64_FEATURE_PROFILE),
9774 "Cortex-A78"},
9775 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9776 AARCH64_FEATURE_F16
9777 | AARCH64_FEATURE_RCPC
9778 | AARCH64_FEATURE_DOTPROD
9779 | AARCH64_FEATURE_SSBS
9780 | AARCH64_FEATURE_PROFILE),
9781 "Cortex-A78AE"},
9782 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9783 AARCH64_FEATURE_DOTPROD
9784 | AARCH64_FEATURE_F16
9785 | AARCH64_FEATURE_FLAGM
9786 | AARCH64_FEATURE_PAC
9787 | AARCH64_FEATURE_PROFILE
9788 | AARCH64_FEATURE_RCPC
9789 | AARCH64_FEATURE_SSBS),
9790 "Cortex-A78C"},
9791 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9792 AARCH64_FEATURE_BFLOAT16
9793 | AARCH64_FEATURE_I8MM
9794 | AARCH64_FEATURE_MEMTAG
9795 | AARCH64_FEATURE_SVE2_BITPERM),
9796 "Cortex-A510"},
9797 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9798 AARCH64_FEATURE_BFLOAT16
9799 | AARCH64_FEATURE_I8MM
9800 | AARCH64_FEATURE_MEMTAG
9801 | AARCH64_FEATURE_SVE2_BITPERM),
9802 "Cortex-A710"},
9803 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9804 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9805 | AARCH64_FEATURE_DOTPROD
9806 | AARCH64_FEATURE_PROFILE),
9807 "Ares"},
9808 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9809 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9810 "Samsung Exynos M1"},
9811 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9812 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9813 | AARCH64_FEATURE_RDMA),
9814 "Qualcomm Falkor"},
9815 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9816 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9817 | AARCH64_FEATURE_DOTPROD
9818 | AARCH64_FEATURE_SSBS),
9819 "Neoverse E1"},
9820 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9821 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9822 | AARCH64_FEATURE_DOTPROD
9823 | AARCH64_FEATURE_PROFILE),
9824 "Neoverse N1"},
9825 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9826 AARCH64_FEATURE_BFLOAT16
9827 | AARCH64_FEATURE_I8MM
9828 | AARCH64_FEATURE_F16
9829 | AARCH64_FEATURE_SVE
9830 | AARCH64_FEATURE_SVE2
9831 | AARCH64_FEATURE_SVE2_BITPERM
9832 | AARCH64_FEATURE_MEMTAG
9833 | AARCH64_FEATURE_RNG),
9834 "Neoverse N2"},
9835 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9836 AARCH64_FEATURE_PROFILE
9837 | AARCH64_FEATURE_CVADP
9838 | AARCH64_FEATURE_SVE
9839 | AARCH64_FEATURE_SSBS
9840 | AARCH64_FEATURE_RNG
9841 | AARCH64_FEATURE_F16
9842 | AARCH64_FEATURE_BFLOAT16
9843 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9844 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9845 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9846 | AARCH64_FEATURE_RDMA),
9847 "Qualcomm QDF24XX"},
9848 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9849 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9850 "Qualcomm Saphira"},
9851 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9852 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9853 "Cavium ThunderX"},
9854 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9855 AARCH64_FEATURE_CRYPTO),
9856 "Broadcom Vulcan"},
9857 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9858 in earlier releases and is superseded by 'xgene1' in all
9859 tools. */
9860 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9861 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9862 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9863 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9864 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9865 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9866 AARCH64_FEATURE_F16
9867 | AARCH64_FEATURE_RCPC
9868 | AARCH64_FEATURE_DOTPROD
9869 | AARCH64_FEATURE_SSBS
9870 | AARCH64_FEATURE_PROFILE),
9871 "Cortex-X1"},
9872 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9873 AARCH64_FEATURE_BFLOAT16
9874 | AARCH64_FEATURE_I8MM
9875 | AARCH64_FEATURE_MEMTAG
9876 | AARCH64_FEATURE_SVE2_BITPERM),
9877 "Cortex-X2"},
9878 {"generic", AARCH64_ARCH_V8, NULL},
9879
9880 {NULL, AARCH64_ARCH_NONE, NULL}
9881 };
9882
9883 struct aarch64_arch_option_table
9884 {
9885 const char *name;
9886 const aarch64_feature_set value;
9887 };
9888
9889 /* This list should, at a minimum, contain all the architecture names
9890 recognized by GCC. */
9891 static const struct aarch64_arch_option_table aarch64_archs[] = {
9892 {"all", AARCH64_ANY},
9893 {"armv8-a", AARCH64_ARCH_V8},
9894 {"armv8.1-a", AARCH64_ARCH_V8_1},
9895 {"armv8.2-a", AARCH64_ARCH_V8_2},
9896 {"armv8.3-a", AARCH64_ARCH_V8_3},
9897 {"armv8.4-a", AARCH64_ARCH_V8_4},
9898 {"armv8.5-a", AARCH64_ARCH_V8_5},
9899 {"armv8.6-a", AARCH64_ARCH_V8_6},
9900 {"armv8.7-a", AARCH64_ARCH_V8_7},
9901 {"armv8.8-a", AARCH64_ARCH_V8_8},
9902 {"armv8-r", AARCH64_ARCH_V8_R},
9903 {"armv9-a", AARCH64_ARCH_V9},
9904 {"armv9.1-a", AARCH64_ARCH_V9_1},
9905 {"armv9.2-a", AARCH64_ARCH_V9_2},
9906 {"armv9.3-a", AARCH64_ARCH_V9_3},
9907 {NULL, AARCH64_ARCH_NONE}
9908 };
9909
9910 /* ISA extensions. */
9911 struct aarch64_option_cpu_value_table
9912 {
9913 const char *name;
9914 const aarch64_feature_set value;
9915 const aarch64_feature_set require; /* Feature dependencies. */
9916 };
9917
9918 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9919 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9920 AARCH64_ARCH_NONE},
9921 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9922 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9923 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9924 AARCH64_ARCH_NONE},
9925 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9926 AARCH64_ARCH_NONE},
9927 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9928 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9929 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9930 AARCH64_ARCH_NONE},
9931 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9932 AARCH64_ARCH_NONE},
9933 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9934 AARCH64_ARCH_NONE},
9935 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9936 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9937 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9938 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9939 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9940 AARCH64_FEATURE (AARCH64_FEATURE_FP
9941 | AARCH64_FEATURE_F16, 0)},
9942 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9943 AARCH64_ARCH_NONE},
9944 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9945 AARCH64_FEATURE (AARCH64_FEATURE_F16
9946 | AARCH64_FEATURE_SIMD
9947 | AARCH64_FEATURE_COMPNUM, 0)},
9948 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9949 AARCH64_ARCH_NONE},
9950 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9951 AARCH64_FEATURE (AARCH64_FEATURE_F16
9952 | AARCH64_FEATURE_SIMD, 0)},
9953 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9954 AARCH64_ARCH_NONE},
9955 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9956 AARCH64_ARCH_NONE},
9957 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9958 AARCH64_ARCH_NONE},
9959 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9960 AARCH64_ARCH_NONE},
9961 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9962 AARCH64_ARCH_NONE},
9963 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9964 AARCH64_ARCH_NONE},
9965 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9966 AARCH64_ARCH_NONE},
9967 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9968 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9969 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9970 AARCH64_ARCH_NONE},
9971 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9972 AARCH64_ARCH_NONE},
9973 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9974 AARCH64_ARCH_NONE},
9975 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9976 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9977 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9978 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9979 | AARCH64_FEATURE_SM4, 0)},
9980 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9981 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9982 | AARCH64_FEATURE_AES, 0)},
9983 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9984 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9985 | AARCH64_FEATURE_SHA3, 0)},
9986 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9987 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9988 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9989 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9990 | AARCH64_FEATURE_BFLOAT16, 0)},
9991 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9992 AARCH64_FEATURE (AARCH64_FEATURE_SME
9993 | AARCH64_FEATURE_SVE2
9994 | AARCH64_FEATURE_BFLOAT16, 0)},
9995 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9996 AARCH64_FEATURE (AARCH64_FEATURE_SME
9997 | AARCH64_FEATURE_SVE2
9998 | AARCH64_FEATURE_BFLOAT16, 0)},
9999 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10000 AARCH64_ARCH_NONE},
10001 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10002 AARCH64_ARCH_NONE},
10003 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10004 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10005 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10006 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10007 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10008 AARCH64_ARCH_NONE},
10009 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10010 AARCH64_ARCH_NONE},
10011 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10012 AARCH64_ARCH_NONE},
10013 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10014 AARCH64_ARCH_NONE},
10015 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10016 AARCH64_ARCH_NONE},
10017 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10018 };
10019
10020 struct aarch64_long_option_table
10021 {
10022 const char *option; /* Substring to match. */
10023 const char *help; /* Help information. */
10024 int (*func) (const char *subopt); /* Function to decode sub-option. */
10025 char *deprecated; /* If non-null, print this message. */
10026 };
10027
10028 /* Transitive closure of features depending on set. */
10029 static aarch64_feature_set
10030 aarch64_feature_disable_set (aarch64_feature_set set)
10031 {
10032 const struct aarch64_option_cpu_value_table *opt;
10033 aarch64_feature_set prev = 0;
10034
10035 while (prev != set) {
10036 prev = set;
10037 for (opt = aarch64_features; opt->name != NULL; opt++)
10038 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10039 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10040 }
10041 return set;
10042 }
10043
10044 /* Transitive closure of dependencies of set. */
10045 static aarch64_feature_set
10046 aarch64_feature_enable_set (aarch64_feature_set set)
10047 {
10048 const struct aarch64_option_cpu_value_table *opt;
10049 aarch64_feature_set prev = 0;
10050
10051 while (prev != set) {
10052 prev = set;
10053 for (opt = aarch64_features; opt->name != NULL; opt++)
10054 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10055 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10056 }
10057 return set;
10058 }
10059
10060 static int
10061 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10062 bool ext_only)
10063 {
10064 /* We insist on extensions being added before being removed. We achieve
10065 this by using the ADDING_VALUE variable to indicate whether we are
10066 adding an extension (1) or removing it (0) and only allowing it to
10067 change in the order -1 -> 1 -> 0. */
10068 int adding_value = -1;
10069 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10070
10071 /* Copy the feature set, so that we can modify it. */
10072 *ext_set = **opt_p;
10073 *opt_p = ext_set;
10074
10075 while (str != NULL && *str != 0)
10076 {
10077 const struct aarch64_option_cpu_value_table *opt;
10078 const char *ext = NULL;
10079 int optlen;
10080
10081 if (!ext_only)
10082 {
10083 if (*str != '+')
10084 {
10085 as_bad (_("invalid architectural extension"));
10086 return 0;
10087 }
10088
10089 ext = strchr (++str, '+');
10090 }
10091
10092 if (ext != NULL)
10093 optlen = ext - str;
10094 else
10095 optlen = strlen (str);
10096
10097 if (optlen >= 2 && startswith (str, "no"))
10098 {
10099 if (adding_value != 0)
10100 adding_value = 0;
10101 optlen -= 2;
10102 str += 2;
10103 }
10104 else if (optlen > 0)
10105 {
10106 if (adding_value == -1)
10107 adding_value = 1;
10108 else if (adding_value != 1)
10109 {
10110 as_bad (_("must specify extensions to add before specifying "
10111 "those to remove"));
10112 return false;
10113 }
10114 }
10115
10116 if (optlen == 0)
10117 {
10118 as_bad (_("missing architectural extension"));
10119 return 0;
10120 }
10121
10122 gas_assert (adding_value != -1);
10123
10124 for (opt = aarch64_features; opt->name != NULL; opt++)
10125 if (strncmp (opt->name, str, optlen) == 0)
10126 {
10127 aarch64_feature_set set;
10128
10129 /* Add or remove the extension. */
10130 if (adding_value)
10131 {
10132 set = aarch64_feature_enable_set (opt->value);
10133 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10134 }
10135 else
10136 {
10137 set = aarch64_feature_disable_set (opt->value);
10138 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10139 }
10140 break;
10141 }
10142
10143 if (opt->name == NULL)
10144 {
10145 as_bad (_("unknown architectural extension `%s'"), str);
10146 return 0;
10147 }
10148
10149 str = ext;
10150 };
10151
10152 return 1;
10153 }
10154
10155 static int
10156 aarch64_parse_cpu (const char *str)
10157 {
10158 const struct aarch64_cpu_option_table *opt;
10159 const char *ext = strchr (str, '+');
10160 size_t optlen;
10161
10162 if (ext != NULL)
10163 optlen = ext - str;
10164 else
10165 optlen = strlen (str);
10166
10167 if (optlen == 0)
10168 {
10169 as_bad (_("missing cpu name `%s'"), str);
10170 return 0;
10171 }
10172
10173 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10174 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10175 {
10176 mcpu_cpu_opt = &opt->value;
10177 if (ext != NULL)
10178 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10179
10180 return 1;
10181 }
10182
10183 as_bad (_("unknown cpu `%s'"), str);
10184 return 0;
10185 }
10186
10187 static int
10188 aarch64_parse_arch (const char *str)
10189 {
10190 const struct aarch64_arch_option_table *opt;
10191 const char *ext = strchr (str, '+');
10192 size_t optlen;
10193
10194 if (ext != NULL)
10195 optlen = ext - str;
10196 else
10197 optlen = strlen (str);
10198
10199 if (optlen == 0)
10200 {
10201 as_bad (_("missing architecture name `%s'"), str);
10202 return 0;
10203 }
10204
10205 for (opt = aarch64_archs; opt->name != NULL; opt++)
10206 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10207 {
10208 march_cpu_opt = &opt->value;
10209 if (ext != NULL)
10210 return aarch64_parse_features (ext, &march_cpu_opt, false);
10211
10212 return 1;
10213 }
10214
10215 as_bad (_("unknown architecture `%s'\n"), str);
10216 return 0;
10217 }
10218
10219 /* ABIs. */
10220 struct aarch64_option_abi_value_table
10221 {
10222 const char *name;
10223 enum aarch64_abi_type value;
10224 };
10225
10226 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10227 {"ilp32", AARCH64_ABI_ILP32},
10228 {"lp64", AARCH64_ABI_LP64},
10229 };
10230
10231 static int
10232 aarch64_parse_abi (const char *str)
10233 {
10234 unsigned int i;
10235
10236 if (str[0] == '\0')
10237 {
10238 as_bad (_("missing abi name `%s'"), str);
10239 return 0;
10240 }
10241
10242 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10243 if (strcmp (str, aarch64_abis[i].name) == 0)
10244 {
10245 aarch64_abi = aarch64_abis[i].value;
10246 return 1;
10247 }
10248
10249 as_bad (_("unknown abi `%s'\n"), str);
10250 return 0;
10251 }
10252
10253 static struct aarch64_long_option_table aarch64_long_opts[] = {
10254 #ifdef OBJ_ELF
10255 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10256 aarch64_parse_abi, NULL},
10257 #endif /* OBJ_ELF */
10258 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10259 aarch64_parse_cpu, NULL},
10260 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10261 aarch64_parse_arch, NULL},
10262 {NULL, NULL, 0, NULL}
10263 };
10264
10265 int
10266 md_parse_option (int c, const char *arg)
10267 {
10268 struct aarch64_option_table *opt;
10269 struct aarch64_long_option_table *lopt;
10270
10271 switch (c)
10272 {
10273 #ifdef OPTION_EB
10274 case OPTION_EB:
10275 target_big_endian = 1;
10276 break;
10277 #endif
10278
10279 #ifdef OPTION_EL
10280 case OPTION_EL:
10281 target_big_endian = 0;
10282 break;
10283 #endif
10284
10285 case 'a':
10286 /* Listing option. Just ignore these, we don't support additional
10287 ones. */
10288 return 0;
10289
10290 default:
10291 for (opt = aarch64_opts; opt->option != NULL; opt++)
10292 {
10293 if (c == opt->option[0]
10294 && ((arg == NULL && opt->option[1] == 0)
10295 || streq (arg, opt->option + 1)))
10296 {
10297 /* If the option is deprecated, tell the user. */
10298 if (opt->deprecated != NULL)
10299 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10300 arg ? arg : "", _(opt->deprecated));
10301
10302 if (opt->var != NULL)
10303 *opt->var = opt->value;
10304
10305 return 1;
10306 }
10307 }
10308
10309 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10310 {
10311 /* These options are expected to have an argument. */
10312 if (c == lopt->option[0]
10313 && arg != NULL
10314 && startswith (arg, lopt->option + 1))
10315 {
10316 /* If the option is deprecated, tell the user. */
10317 if (lopt->deprecated != NULL)
10318 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10319 _(lopt->deprecated));
10320
10321 /* Call the sup-option parser. */
10322 return lopt->func (arg + strlen (lopt->option) - 1);
10323 }
10324 }
10325
10326 return 0;
10327 }
10328
10329 return 1;
10330 }
10331
10332 void
10333 md_show_usage (FILE * fp)
10334 {
10335 struct aarch64_option_table *opt;
10336 struct aarch64_long_option_table *lopt;
10337
10338 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10339
10340 for (opt = aarch64_opts; opt->option != NULL; opt++)
10341 if (opt->help != NULL)
10342 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10343
10344 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10345 if (lopt->help != NULL)
10346 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10347
10348 #ifdef OPTION_EB
10349 fprintf (fp, _("\
10350 -EB assemble code for a big-endian cpu\n"));
10351 #endif
10352
10353 #ifdef OPTION_EL
10354 fprintf (fp, _("\
10355 -EL assemble code for a little-endian cpu\n"));
10356 #endif
10357 }
10358
10359 /* Parse a .cpu directive. */
10360
10361 static void
10362 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10363 {
10364 const struct aarch64_cpu_option_table *opt;
10365 char saved_char;
10366 char *name;
10367 char *ext;
10368 size_t optlen;
10369
10370 name = input_line_pointer;
10371 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10372 input_line_pointer++;
10373 saved_char = *input_line_pointer;
10374 *input_line_pointer = 0;
10375
10376 ext = strchr (name, '+');
10377
10378 if (ext != NULL)
10379 optlen = ext - name;
10380 else
10381 optlen = strlen (name);
10382
10383 /* Skip the first "all" entry. */
10384 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10385 if (strlen (opt->name) == optlen
10386 && strncmp (name, opt->name, optlen) == 0)
10387 {
10388 mcpu_cpu_opt = &opt->value;
10389 if (ext != NULL)
10390 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10391 return;
10392
10393 cpu_variant = *mcpu_cpu_opt;
10394
10395 *input_line_pointer = saved_char;
10396 demand_empty_rest_of_line ();
10397 return;
10398 }
10399 as_bad (_("unknown cpu `%s'"), name);
10400 *input_line_pointer = saved_char;
10401 ignore_rest_of_line ();
10402 }
10403
10404
10405 /* Parse a .arch directive. */
10406
10407 static void
10408 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10409 {
10410 const struct aarch64_arch_option_table *opt;
10411 char saved_char;
10412 char *name;
10413 char *ext;
10414 size_t optlen;
10415
10416 name = input_line_pointer;
10417 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10418 input_line_pointer++;
10419 saved_char = *input_line_pointer;
10420 *input_line_pointer = 0;
10421
10422 ext = strchr (name, '+');
10423
10424 if (ext != NULL)
10425 optlen = ext - name;
10426 else
10427 optlen = strlen (name);
10428
10429 /* Skip the first "all" entry. */
10430 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10431 if (strlen (opt->name) == optlen
10432 && strncmp (name, opt->name, optlen) == 0)
10433 {
10434 mcpu_cpu_opt = &opt->value;
10435 if (ext != NULL)
10436 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10437 return;
10438
10439 cpu_variant = *mcpu_cpu_opt;
10440
10441 *input_line_pointer = saved_char;
10442 demand_empty_rest_of_line ();
10443 return;
10444 }
10445
10446 as_bad (_("unknown architecture `%s'\n"), name);
10447 *input_line_pointer = saved_char;
10448 ignore_rest_of_line ();
10449 }
10450
10451 /* Parse a .arch_extension directive. */
10452
10453 static void
10454 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10455 {
10456 char saved_char;
10457 char *ext = input_line_pointer;;
10458
10459 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10460 input_line_pointer++;
10461 saved_char = *input_line_pointer;
10462 *input_line_pointer = 0;
10463
10464 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10465 return;
10466
10467 cpu_variant = *mcpu_cpu_opt;
10468
10469 *input_line_pointer = saved_char;
10470 demand_empty_rest_of_line ();
10471 }
10472
10473 /* Copy symbol information. */
10474
10475 void
10476 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10477 {
10478 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10479 }
10480
10481 #ifdef OBJ_ELF
10482 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10483 This is needed so AArch64 specific st_other values can be independently
10484 specified for an IFUNC resolver (that is called by the dynamic linker)
10485 and the symbol it resolves (aliased to the resolver). In particular,
10486 if a function symbol has special st_other value set via directives,
10487 then attaching an IFUNC resolver to that symbol should not override
10488 the st_other setting. Requiring the directive on the IFUNC resolver
10489 symbol would be unexpected and problematic in C code, where the two
10490 symbols appear as two independent function declarations. */
10491
10492 void
10493 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10494 {
10495 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10496 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10497 /* If size is unset, copy size from src. Because we don't track whether
10498 .size has been used, we can't differentiate .size dest, 0 from the case
10499 where dest's size is unset. */
10500 if (!destelf->size && S_GET_SIZE (dest) == 0)
10501 {
10502 if (srcelf->size)
10503 {
10504 destelf->size = XNEW (expressionS);
10505 *destelf->size = *srcelf->size;
10506 }
10507 S_SET_SIZE (dest, S_GET_SIZE (src));
10508 }
10509 }
10510 #endif
10511