aarch64-opc.c revision 1.1.1.5 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201
202 const aarch64_field fields[] =
204 {
205 { 0, 0 }, /* NIL. */
206 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
207 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
208 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
209 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
210 { 5, 19 }, /* imm19: e.g. in CBZ. */
211 { 5, 19 }, /* immhi: e.g. in ADRP. */
212 { 29, 2 }, /* immlo: e.g. in ADRP. */
213 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
214 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
215 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
216 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
217 { 0, 5 }, /* Rt: in load/store instructions. */
218 { 0, 5 }, /* Rd: in many integer instructions. */
219 { 5, 5 }, /* Rn: in many integer instructions. */
220 { 10, 5 }, /* Rt2: in load/store pair instructions. */
221 { 10, 5 }, /* Ra: in fp instructions. */
222 { 5, 3 }, /* op2: in the system instructions. */
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 12, 4 }, /* CRn: in the system instructions. */
225 { 16, 3 }, /* op1: in the system instructions. */
226 { 19, 2 }, /* op0: in the system instructions. */
227 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
228 { 12, 4 }, /* cond: condition flags as a source operand. */
229 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
230 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
231 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
232 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
233 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
234 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
235 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
236 { 12, 1 }, /* S: in load/store reg offset instructions. */
237 { 21, 2 }, /* hw: in move wide constant instructions. */
238 { 22, 2 }, /* opc: in load/store reg offset instructions. */
239 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
240 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
241 { 22, 2 }, /* type: floating point type field in fp data inst. */
242 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
243 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
244 { 15, 6 }, /* imm6_2: in rmif instructions. */
245 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
246 { 0, 4 }, /* imm4_2: in rmif instructions. */
247 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
248 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
249 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
250 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
251 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
252 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
253 { 5, 14 }, /* imm14: in test bit and branch instructions. */
254 { 5, 16 }, /* imm16: in exception instructions. */
255 { 0, 26 }, /* imm26: in unconditional branch instructions. */
256 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
257 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
258 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
259 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
260 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
261 { 22, 1 }, /* N: in logical (immediate) instructions. */
262 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
263 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
264 { 31, 1 }, /* sf: in integer data processing instructions. */
265 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
266 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
267 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
268 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
269 { 31, 1 }, /* b5: in the test bit and branch instructions. */
270 { 19, 5 }, /* b40: in the test bit and branch instructions. */
271 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
272 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
273 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
274 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
275 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
276 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
277 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
278 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
279 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
280 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
281 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
282 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
283 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
284 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
285 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
286 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
287 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
290 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
291 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
292 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
293 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
294 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
295 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
296 { 5, 1 }, /* SVE_i1: single-bit immediate. */
297 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
298 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
299 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
300 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
301 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
302 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
303 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
304 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
305 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
306 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
307 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
308 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
309 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
310 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
311 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
312 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
313 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
314 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
315 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
316 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
317 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
318 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
319 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
320 { 16, 4 }, /* SVE_tsz: triangular size select. */
321 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
322 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
323 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
324 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
325 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
326 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
327 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
328 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
329 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
330 { 22, 1 }, /* sz: 1-bit element size select. */
331 };
332
333 enum aarch64_operand_class
334 aarch64_get_operand_class (enum aarch64_opnd type)
335 {
336 return aarch64_operands[type].op_class;
337 }
338
339 const char *
340 aarch64_get_operand_name (enum aarch64_opnd type)
341 {
342 return aarch64_operands[type].name;
343 }
344
345 /* Get operand description string.
346 This is usually for the diagnosis purpose. */
347 const char *
348 aarch64_get_operand_desc (enum aarch64_opnd type)
349 {
350 return aarch64_operands[type].desc;
351 }
352
353 /* Table of all conditional affixes. */
354 const aarch64_cond aarch64_conds[16] =
355 {
356 {{"eq", "none"}, 0x0},
357 {{"ne", "any"}, 0x1},
358 {{"cs", "hs", "nlast"}, 0x2},
359 {{"cc", "lo", "ul", "last"}, 0x3},
360 {{"mi", "first"}, 0x4},
361 {{"pl", "nfrst"}, 0x5},
362 {{"vs"}, 0x6},
363 {{"vc"}, 0x7},
364 {{"hi", "pmore"}, 0x8},
365 {{"ls", "plast"}, 0x9},
366 {{"ge", "tcont"}, 0xa},
367 {{"lt", "tstop"}, 0xb},
368 {{"gt"}, 0xc},
369 {{"le"}, 0xd},
370 {{"al"}, 0xe},
371 {{"nv"}, 0xf},
372 };
373
374 const aarch64_cond *
375 get_cond_from_value (aarch64_insn value)
376 {
377 assert (value < 16);
378 return &aarch64_conds[(unsigned int) value];
379 }
380
381 const aarch64_cond *
382 get_inverted_cond (const aarch64_cond *cond)
383 {
384 return &aarch64_conds[cond->value ^ 0x1];
385 }
386
387 /* Table describing the operand extension/shifting operators; indexed by
388 enum aarch64_modifier_kind.
389
390 The value column provides the most common values for encoding modifiers,
391 which enables table-driven encoding/decoding for the modifiers. */
392 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
393 {
394 {"none", 0x0},
395 {"msl", 0x0},
396 {"ror", 0x3},
397 {"asr", 0x2},
398 {"lsr", 0x1},
399 {"lsl", 0x0},
400 {"uxtb", 0x0},
401 {"uxth", 0x1},
402 {"uxtw", 0x2},
403 {"uxtx", 0x3},
404 {"sxtb", 0x4},
405 {"sxth", 0x5},
406 {"sxtw", 0x6},
407 {"sxtx", 0x7},
408 {"mul", 0x0},
409 {"mul vl", 0x0},
410 {NULL, 0},
411 };
412
413 enum aarch64_modifier_kind
414 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
415 {
416 return desc - aarch64_operand_modifiers;
417 }
418
419 aarch64_insn
420 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
421 {
422 return aarch64_operand_modifiers[kind].value;
423 }
424
425 enum aarch64_modifier_kind
426 aarch64_get_operand_modifier_from_value (aarch64_insn value,
427 bfd_boolean extend_p)
428 {
429 if (extend_p == TRUE)
430 return AARCH64_MOD_UXTB + value;
431 else
432 return AARCH64_MOD_LSL - value;
433 }
434
435 bfd_boolean
436 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
437 {
438 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
439 ? TRUE : FALSE;
440 }
441
442 static inline bfd_boolean
443 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
444 {
445 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
446 ? TRUE : FALSE;
447 }
448
449 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
450 {
451 { "#0x00", 0x0 },
452 { "oshld", 0x1 },
453 { "oshst", 0x2 },
454 { "osh", 0x3 },
455 { "#0x04", 0x4 },
456 { "nshld", 0x5 },
457 { "nshst", 0x6 },
458 { "nsh", 0x7 },
459 { "#0x08", 0x8 },
460 { "ishld", 0x9 },
461 { "ishst", 0xa },
462 { "ish", 0xb },
463 { "#0x0c", 0xc },
464 { "ld", 0xd },
465 { "st", 0xe },
466 { "sy", 0xf },
467 };
468
469 /* Table describing the operands supported by the aliases of the HINT
470 instruction.
471
472 The name column is the operand that is accepted for the alias. The value
473 column is the hint number of the alias. The list of operands is terminated
474 by NULL in the name column. */
475
476 const struct aarch64_name_value_pair aarch64_hint_options[] =
477 {
478 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
479 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
480 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
481 { "c", HINT_OPD_C }, /* BTI C. */
482 { "j", HINT_OPD_J }, /* BTI J. */
483 { "jc", HINT_OPD_JC }, /* BTI JC. */
484 { NULL, HINT_OPD_NULL },
485 };
486
487 /* op -> op: load = 0 instruction = 1 store = 2
488 l -> level: 1-3
489 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
490 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
491 const struct aarch64_name_value_pair aarch64_prfops[32] =
492 {
493 { "pldl1keep", B(0, 1, 0) },
494 { "pldl1strm", B(0, 1, 1) },
495 { "pldl2keep", B(0, 2, 0) },
496 { "pldl2strm", B(0, 2, 1) },
497 { "pldl3keep", B(0, 3, 0) },
498 { "pldl3strm", B(0, 3, 1) },
499 { NULL, 0x06 },
500 { NULL, 0x07 },
501 { "plil1keep", B(1, 1, 0) },
502 { "plil1strm", B(1, 1, 1) },
503 { "plil2keep", B(1, 2, 0) },
504 { "plil2strm", B(1, 2, 1) },
505 { "plil3keep", B(1, 3, 0) },
506 { "plil3strm", B(1, 3, 1) },
507 { NULL, 0x0e },
508 { NULL, 0x0f },
509 { "pstl1keep", B(2, 1, 0) },
510 { "pstl1strm", B(2, 1, 1) },
511 { "pstl2keep", B(2, 2, 0) },
512 { "pstl2strm", B(2, 2, 1) },
513 { "pstl3keep", B(2, 3, 0) },
514 { "pstl3strm", B(2, 3, 1) },
515 { NULL, 0x16 },
516 { NULL, 0x17 },
517 { NULL, 0x18 },
518 { NULL, 0x19 },
519 { NULL, 0x1a },
520 { NULL, 0x1b },
521 { NULL, 0x1c },
522 { NULL, 0x1d },
523 { NULL, 0x1e },
524 { NULL, 0x1f },
525 };
526 #undef B
527
528 /* Utilities on value constraint. */
530
531 static inline int
532 value_in_range_p (int64_t value, int low, int high)
533 {
534 return (value >= low && value <= high) ? 1 : 0;
535 }
536
537 /* Return true if VALUE is a multiple of ALIGN. */
538 static inline int
539 value_aligned_p (int64_t value, int align)
540 {
541 return (value % align) == 0;
542 }
543
544 /* A signed value fits in a field. */
545 static inline int
546 value_fit_signed_field_p (int64_t value, unsigned width)
547 {
548 assert (width < 32);
549 if (width < sizeof (value) * 8)
550 {
551 int64_t lim = (uint64_t) 1 << (width - 1);
552 if (value >= -lim && value < lim)
553 return 1;
554 }
555 return 0;
556 }
557
558 /* An unsigned value fits in a field. */
559 static inline int
560 value_fit_unsigned_field_p (int64_t value, unsigned width)
561 {
562 assert (width < 32);
563 if (width < sizeof (value) * 8)
564 {
565 int64_t lim = (uint64_t) 1 << width;
566 if (value >= 0 && value < lim)
567 return 1;
568 }
569 return 0;
570 }
571
572 /* Return 1 if OPERAND is SP or WSP. */
573 int
574 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
575 {
576 return ((aarch64_get_operand_class (operand->type)
577 == AARCH64_OPND_CLASS_INT_REG)
578 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
579 && operand->reg.regno == 31);
580 }
581
582 /* Return 1 if OPERAND is XZR or WZP. */
583 int
584 aarch64_zero_register_p (const aarch64_opnd_info *operand)
585 {
586 return ((aarch64_get_operand_class (operand->type)
587 == AARCH64_OPND_CLASS_INT_REG)
588 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
589 && operand->reg.regno == 31);
590 }
591
592 /* Return true if the operand *OPERAND that has the operand code
593 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
594 qualified by the qualifier TARGET. */
595
596 static inline int
597 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
598 aarch64_opnd_qualifier_t target)
599 {
600 switch (operand->qualifier)
601 {
602 case AARCH64_OPND_QLF_W:
603 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
604 return 1;
605 break;
606 case AARCH64_OPND_QLF_X:
607 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
608 return 1;
609 break;
610 case AARCH64_OPND_QLF_WSP:
611 if (target == AARCH64_OPND_QLF_W
612 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
613 return 1;
614 break;
615 case AARCH64_OPND_QLF_SP:
616 if (target == AARCH64_OPND_QLF_X
617 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
618 return 1;
619 break;
620 default:
621 break;
622 }
623
624 return 0;
625 }
626
627 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
628 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
629
630 Return NIL if more than one expected qualifiers are found. */
631
632 aarch64_opnd_qualifier_t
633 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
634 int idx,
635 const aarch64_opnd_qualifier_t known_qlf,
636 int known_idx)
637 {
638 int i, saved_i;
639
640 /* Special case.
641
642 When the known qualifier is NIL, we have to assume that there is only
643 one qualifier sequence in the *QSEQ_LIST and return the corresponding
644 qualifier directly. One scenario is that for instruction
645 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
646 which has only one possible valid qualifier sequence
647 NIL, S_D
648 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
649 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
650
651 Because the qualifier NIL has dual roles in the qualifier sequence:
652 it can mean no qualifier for the operand, or the qualifer sequence is
653 not in use (when all qualifiers in the sequence are NILs), we have to
654 handle this special case here. */
655 if (known_qlf == AARCH64_OPND_NIL)
656 {
657 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
658 return qseq_list[0][idx];
659 }
660
661 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
662 {
663 if (qseq_list[i][known_idx] == known_qlf)
664 {
665 if (saved_i != -1)
666 /* More than one sequences are found to have KNOWN_QLF at
667 KNOWN_IDX. */
668 return AARCH64_OPND_NIL;
669 saved_i = i;
670 }
671 }
672
673 return qseq_list[saved_i][idx];
674 }
675
676 enum operand_qualifier_kind
677 {
678 OQK_NIL,
679 OQK_OPD_VARIANT,
680 OQK_VALUE_IN_RANGE,
681 OQK_MISC,
682 };
683
684 /* Operand qualifier description. */
685 struct operand_qualifier_data
686 {
687 /* The usage of the three data fields depends on the qualifier kind. */
688 int data0;
689 int data1;
690 int data2;
691 /* Description. */
692 const char *desc;
693 /* Kind. */
694 enum operand_qualifier_kind kind;
695 };
696
697 /* Indexed by the operand qualifier enumerators. */
698 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
699 {
700 {0, 0, 0, "NIL", OQK_NIL},
701
702 /* Operand variant qualifiers.
703 First 3 fields:
704 element size, number of elements and common value for encoding. */
705
706 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
707 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
708 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
709 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
710
711 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
712 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
713 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
714 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
715 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
716 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
717 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
718
719 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
720 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
721 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
722 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
723 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
724 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
725 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
726 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
727 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
728 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
729 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
730
731 {0, 0, 0, "z", OQK_OPD_VARIANT},
732 {0, 0, 0, "m", OQK_OPD_VARIANT},
733
734 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
735 {16, 0, 0, "tag", OQK_OPD_VARIANT},
736
737 /* Qualifiers constraining the value range.
738 First 3 fields:
739 Lower bound, higher bound, unused. */
740
741 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
742 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
743 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
744 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
745 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
746 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
747 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
748
749 /* Qualifiers for miscellaneous purpose.
750 First 3 fields:
751 unused, unused and unused. */
752
753 {0, 0, 0, "lsl", 0},
754 {0, 0, 0, "msl", 0},
755
756 {0, 0, 0, "retrieving", 0},
757 };
758
759 static inline bfd_boolean
760 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
761 {
762 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
763 ? TRUE : FALSE;
764 }
765
766 static inline bfd_boolean
767 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
768 {
769 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
770 ? TRUE : FALSE;
771 }
772
773 const char*
774 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
775 {
776 return aarch64_opnd_qualifiers[qualifier].desc;
777 }
778
779 /* Given an operand qualifier, return the expected data element size
780 of a qualified operand. */
781 unsigned char
782 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
783 {
784 assert (operand_variant_qualifier_p (qualifier) == TRUE);
785 return aarch64_opnd_qualifiers[qualifier].data0;
786 }
787
788 unsigned char
789 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
790 {
791 assert (operand_variant_qualifier_p (qualifier) == TRUE);
792 return aarch64_opnd_qualifiers[qualifier].data1;
793 }
794
795 aarch64_insn
796 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
797 {
798 assert (operand_variant_qualifier_p (qualifier) == TRUE);
799 return aarch64_opnd_qualifiers[qualifier].data2;
800 }
801
802 static int
803 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
804 {
805 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
806 return aarch64_opnd_qualifiers[qualifier].data0;
807 }
808
809 static int
810 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
811 {
812 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
813 return aarch64_opnd_qualifiers[qualifier].data1;
814 }
815
816 #ifdef DEBUG_AARCH64
817 void
818 aarch64_verbose (const char *str, ...)
819 {
820 va_list ap;
821 va_start (ap, str);
822 printf ("#### ");
823 vprintf (str, ap);
824 printf ("\n");
825 va_end (ap);
826 }
827
828 static inline void
829 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
830 {
831 int i;
832 printf ("#### \t");
833 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
834 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
835 printf ("\n");
836 }
837
838 static void
839 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
840 const aarch64_opnd_qualifier_t *qualifier)
841 {
842 int i;
843 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
844
845 aarch64_verbose ("dump_match_qualifiers:");
846 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
847 curr[i] = opnd[i].qualifier;
848 dump_qualifier_sequence (curr);
849 aarch64_verbose ("against");
850 dump_qualifier_sequence (qualifier);
851 }
852 #endif /* DEBUG_AARCH64 */
853
854 /* This function checks if the given instruction INSN is a destructive
855 instruction based on the usage of the registers. It does not recognize
856 unary destructive instructions. */
857 bfd_boolean
858 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
859 {
860 int i = 0;
861 const enum aarch64_opnd *opnds = opcode->operands;
862
863 if (opnds[0] == AARCH64_OPND_NIL)
864 return FALSE;
865
866 while (opnds[++i] != AARCH64_OPND_NIL)
867 if (opnds[i] == opnds[0])
868 return TRUE;
869
870 return FALSE;
871 }
872
873 /* TODO improve this, we can have an extra field at the runtime to
874 store the number of operands rather than calculating it every time. */
875
876 int
877 aarch64_num_of_operands (const aarch64_opcode *opcode)
878 {
879 int i = 0;
880 const enum aarch64_opnd *opnds = opcode->operands;
881 while (opnds[i++] != AARCH64_OPND_NIL)
882 ;
883 --i;
884 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
885 return i;
886 }
887
888 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
889 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
890
891 N.B. on the entry, it is very likely that only some operands in *INST
892 have had their qualifiers been established.
893
894 If STOP_AT is not -1, the function will only try to match
895 the qualifier sequence for operands before and including the operand
896 of index STOP_AT; and on success *RET will only be filled with the first
897 (STOP_AT+1) qualifiers.
898
899 A couple examples of the matching algorithm:
900
901 X,W,NIL should match
902 X,W,NIL
903
904 NIL,NIL should match
905 X ,NIL
906
907 Apart from serving the main encoding routine, this can also be called
908 during or after the operand decoding. */
909
910 int
911 aarch64_find_best_match (const aarch64_inst *inst,
912 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
913 int stop_at, aarch64_opnd_qualifier_t *ret)
914 {
915 int found = 0;
916 int i, num_opnds;
917 const aarch64_opnd_qualifier_t *qualifiers;
918
919 num_opnds = aarch64_num_of_operands (inst->opcode);
920 if (num_opnds == 0)
921 {
922 DEBUG_TRACE ("SUCCEED: no operand");
923 return 1;
924 }
925
926 if (stop_at < 0 || stop_at >= num_opnds)
927 stop_at = num_opnds - 1;
928
929 /* For each pattern. */
930 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
931 {
932 int j;
933 qualifiers = *qualifiers_list;
934
935 /* Start as positive. */
936 found = 1;
937
938 DEBUG_TRACE ("%d", i);
939 #ifdef DEBUG_AARCH64
940 if (debug_dump)
941 dump_match_qualifiers (inst->operands, qualifiers);
942 #endif
943
944 /* Most opcodes has much fewer patterns in the list.
945 First NIL qualifier indicates the end in the list. */
946 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
947 {
948 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
949 if (i)
950 found = 0;
951 break;
952 }
953
954 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
955 {
956 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
957 {
958 /* Either the operand does not have qualifier, or the qualifier
959 for the operand needs to be deduced from the qualifier
960 sequence.
961 In the latter case, any constraint checking related with
962 the obtained qualifier should be done later in
963 operand_general_constraint_met_p. */
964 continue;
965 }
966 else if (*qualifiers != inst->operands[j].qualifier)
967 {
968 /* Unless the target qualifier can also qualify the operand
969 (which has already had a non-nil qualifier), non-equal
970 qualifiers are generally un-matched. */
971 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
972 continue;
973 else
974 {
975 found = 0;
976 break;
977 }
978 }
979 else
980 continue; /* Equal qualifiers are certainly matched. */
981 }
982
983 /* Qualifiers established. */
984 if (found == 1)
985 break;
986 }
987
988 if (found == 1)
989 {
990 /* Fill the result in *RET. */
991 int j;
992 qualifiers = *qualifiers_list;
993
994 DEBUG_TRACE ("complete qualifiers using list %d", i);
995 #ifdef DEBUG_AARCH64
996 if (debug_dump)
997 dump_qualifier_sequence (qualifiers);
998 #endif
999
1000 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1001 ret[j] = *qualifiers;
1002 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1003 ret[j] = AARCH64_OPND_QLF_NIL;
1004
1005 DEBUG_TRACE ("SUCCESS");
1006 return 1;
1007 }
1008
1009 DEBUG_TRACE ("FAIL");
1010 return 0;
1011 }
1012
1013 /* Operand qualifier matching and resolving.
1014
1015 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1016 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1017
1018 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1019 succeeds. */
1020
1021 static int
1022 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1023 {
1024 int i, nops;
1025 aarch64_opnd_qualifier_seq_t qualifiers;
1026
1027 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1028 qualifiers))
1029 {
1030 DEBUG_TRACE ("matching FAIL");
1031 return 0;
1032 }
1033
1034 if (inst->opcode->flags & F_STRICT)
1035 {
1036 /* Require an exact qualifier match, even for NIL qualifiers. */
1037 nops = aarch64_num_of_operands (inst->opcode);
1038 for (i = 0; i < nops; ++i)
1039 if (inst->operands[i].qualifier != qualifiers[i])
1040 return FALSE;
1041 }
1042
1043 /* Update the qualifiers. */
1044 if (update_p == TRUE)
1045 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1046 {
1047 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1048 break;
1049 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1050 "update %s with %s for operand %d",
1051 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1052 aarch64_get_qualifier_name (qualifiers[i]), i);
1053 inst->operands[i].qualifier = qualifiers[i];
1054 }
1055
1056 DEBUG_TRACE ("matching SUCCESS");
1057 return 1;
1058 }
1059
1060 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1061 register by MOVZ.
1062
1063 IS32 indicates whether value is a 32-bit immediate or not.
1064 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1065 amount will be returned in *SHIFT_AMOUNT. */
1066
1067 bfd_boolean
1068 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1069 {
1070 int amount;
1071
1072 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1073
1074 if (is32)
1075 {
1076 /* Allow all zeros or all ones in top 32-bits, so that
1077 32-bit constant expressions like ~0x80000000 are
1078 permitted. */
1079 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1080 /* Immediate out of range. */
1081 return FALSE;
1082 value &= 0xffffffff;
1083 }
1084
1085 /* first, try movz then movn */
1086 amount = -1;
1087 if ((value & ((uint64_t) 0xffff << 0)) == value)
1088 amount = 0;
1089 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1090 amount = 16;
1091 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1092 amount = 32;
1093 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1094 amount = 48;
1095
1096 if (amount == -1)
1097 {
1098 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1099 return FALSE;
1100 }
1101
1102 if (shift_amount != NULL)
1103 *shift_amount = amount;
1104
1105 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1106
1107 return TRUE;
1108 }
1109
1110 /* Build the accepted values for immediate logical SIMD instructions.
1111
1112 The standard encodings of the immediate value are:
1113 N imms immr SIMD size R S
1114 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1115 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1116 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1117 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1118 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1119 0 11110s 00000r 2 UInt(r) UInt(s)
1120 where all-ones value of S is reserved.
1121
1122 Let's call E the SIMD size.
1123
1124 The immediate value is: S+1 bits '1' rotated to the right by R.
1125
1126 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1127 (remember S != E - 1). */
1128
1129 #define TOTAL_IMM_NB 5334
1130
1131 typedef struct
1132 {
1133 uint64_t imm;
1134 aarch64_insn encoding;
1135 } simd_imm_encoding;
1136
1137 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1138
1139 static int
1140 simd_imm_encoding_cmp(const void *i1, const void *i2)
1141 {
1142 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1143 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1144
1145 if (imm1->imm < imm2->imm)
1146 return -1;
1147 if (imm1->imm > imm2->imm)
1148 return +1;
1149 return 0;
1150 }
1151
1152 /* immediate bitfield standard encoding
1153 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1154 1 ssssss rrrrrr 64 rrrrrr ssssss
1155 0 0sssss 0rrrrr 32 rrrrr sssss
1156 0 10ssss 00rrrr 16 rrrr ssss
1157 0 110sss 000rrr 8 rrr sss
1158 0 1110ss 0000rr 4 rr ss
1159 0 11110s 00000r 2 r s */
1160 static inline int
1161 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1162 {
1163 return (is64 << 12) | (r << 6) | s;
1164 }
1165
1166 static void
1167 build_immediate_table (void)
1168 {
1169 uint32_t log_e, e, s, r, s_mask;
1170 uint64_t mask, imm;
1171 int nb_imms;
1172 int is64;
1173
1174 nb_imms = 0;
1175 for (log_e = 1; log_e <= 6; log_e++)
1176 {
1177 /* Get element size. */
1178 e = 1u << log_e;
1179 if (log_e == 6)
1180 {
1181 is64 = 1;
1182 mask = 0xffffffffffffffffull;
1183 s_mask = 0;
1184 }
1185 else
1186 {
1187 is64 = 0;
1188 mask = (1ull << e) - 1;
1189 /* log_e s_mask
1190 1 ((1 << 4) - 1) << 2 = 111100
1191 2 ((1 << 3) - 1) << 3 = 111000
1192 3 ((1 << 2) - 1) << 4 = 110000
1193 4 ((1 << 1) - 1) << 5 = 100000
1194 5 ((1 << 0) - 1) << 6 = 000000 */
1195 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1196 }
1197 for (s = 0; s < e - 1; s++)
1198 for (r = 0; r < e; r++)
1199 {
1200 /* s+1 consecutive bits to 1 (s < 63) */
1201 imm = (1ull << (s + 1)) - 1;
1202 /* rotate right by r */
1203 if (r != 0)
1204 imm = (imm >> r) | ((imm << (e - r)) & mask);
1205 /* replicate the constant depending on SIMD size */
1206 switch (log_e)
1207 {
1208 case 1: imm = (imm << 2) | imm;
1209 /* Fall through. */
1210 case 2: imm = (imm << 4) | imm;
1211 /* Fall through. */
1212 case 3: imm = (imm << 8) | imm;
1213 /* Fall through. */
1214 case 4: imm = (imm << 16) | imm;
1215 /* Fall through. */
1216 case 5: imm = (imm << 32) | imm;
1217 /* Fall through. */
1218 case 6: break;
1219 default: abort ();
1220 }
1221 simd_immediates[nb_imms].imm = imm;
1222 simd_immediates[nb_imms].encoding =
1223 encode_immediate_bitfield(is64, s | s_mask, r);
1224 nb_imms++;
1225 }
1226 }
1227 assert (nb_imms == TOTAL_IMM_NB);
1228 qsort(simd_immediates, nb_imms,
1229 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1230 }
1231
1232 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1233 be accepted by logical (immediate) instructions
1234 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1235
1236 ESIZE is the number of bytes in the decoded immediate value.
1237 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1238 VALUE will be returned in *ENCODING. */
1239
1240 bfd_boolean
1241 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1242 {
1243 simd_imm_encoding imm_enc;
1244 const simd_imm_encoding *imm_encoding;
1245 static bfd_boolean initialized = FALSE;
1246 uint64_t upper;
1247 int i;
1248
1249 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1250 value, esize);
1251
1252 if (!initialized)
1253 {
1254 build_immediate_table ();
1255 initialized = TRUE;
1256 }
1257
1258 /* Allow all zeros or all ones in top bits, so that
1259 constant expressions like ~1 are permitted. */
1260 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1261 if ((value & ~upper) != value && (value | upper) != value)
1262 return FALSE;
1263
1264 /* Replicate to a full 64-bit value. */
1265 value &= ~upper;
1266 for (i = esize * 8; i < 64; i *= 2)
1267 value |= (value << i);
1268
1269 imm_enc.imm = value;
1270 imm_encoding = (const simd_imm_encoding *)
1271 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1272 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1273 if (imm_encoding == NULL)
1274 {
1275 DEBUG_TRACE ("exit with FALSE");
1276 return FALSE;
1277 }
1278 if (encoding != NULL)
1279 *encoding = imm_encoding->encoding;
1280 DEBUG_TRACE ("exit with TRUE");
1281 return TRUE;
1282 }
1283
1284 /* If 64-bit immediate IMM is in the format of
1285 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1286 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1287 of value "abcdefgh". Otherwise return -1. */
1288 int
1289 aarch64_shrink_expanded_imm8 (uint64_t imm)
1290 {
1291 int i, ret;
1292 uint32_t byte;
1293
1294 ret = 0;
1295 for (i = 0; i < 8; i++)
1296 {
1297 byte = (imm >> (8 * i)) & 0xff;
1298 if (byte == 0xff)
1299 ret |= 1 << i;
1300 else if (byte != 0x00)
1301 return -1;
1302 }
1303 return ret;
1304 }
1305
1306 /* Utility inline functions for operand_general_constraint_met_p. */
1307
1308 static inline void
1309 set_error (aarch64_operand_error *mismatch_detail,
1310 enum aarch64_operand_error_kind kind, int idx,
1311 const char* error)
1312 {
1313 if (mismatch_detail == NULL)
1314 return;
1315 mismatch_detail->kind = kind;
1316 mismatch_detail->index = idx;
1317 mismatch_detail->error = error;
1318 }
1319
1320 static inline void
1321 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1322 const char* error)
1323 {
1324 if (mismatch_detail == NULL)
1325 return;
1326 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1327 }
1328
1329 static inline void
1330 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1331 int idx, int lower_bound, int upper_bound,
1332 const char* error)
1333 {
1334 if (mismatch_detail == NULL)
1335 return;
1336 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1337 mismatch_detail->data[0] = lower_bound;
1338 mismatch_detail->data[1] = upper_bound;
1339 }
1340
1341 static inline void
1342 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1343 int idx, int lower_bound, int upper_bound)
1344 {
1345 if (mismatch_detail == NULL)
1346 return;
1347 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1348 _("immediate value"));
1349 }
1350
1351 static inline void
1352 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1353 int idx, int lower_bound, int upper_bound)
1354 {
1355 if (mismatch_detail == NULL)
1356 return;
1357 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1358 _("immediate offset"));
1359 }
1360
1361 static inline void
1362 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1363 int idx, int lower_bound, int upper_bound)
1364 {
1365 if (mismatch_detail == NULL)
1366 return;
1367 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1368 _("register number"));
1369 }
1370
1371 static inline void
1372 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1373 int idx, int lower_bound, int upper_bound)
1374 {
1375 if (mismatch_detail == NULL)
1376 return;
1377 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1378 _("register element index"));
1379 }
1380
1381 static inline void
1382 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1383 int idx, int lower_bound, int upper_bound)
1384 {
1385 if (mismatch_detail == NULL)
1386 return;
1387 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1388 _("shift amount"));
1389 }
1390
1391 /* Report that the MUL modifier in operand IDX should be in the range
1392 [LOWER_BOUND, UPPER_BOUND]. */
1393 static inline void
1394 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1395 int idx, int lower_bound, int upper_bound)
1396 {
1397 if (mismatch_detail == NULL)
1398 return;
1399 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1400 _("multiplier"));
1401 }
1402
1403 static inline void
1404 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1405 int alignment)
1406 {
1407 if (mismatch_detail == NULL)
1408 return;
1409 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1410 mismatch_detail->data[0] = alignment;
1411 }
1412
1413 static inline void
1414 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1415 int expected_num)
1416 {
1417 if (mismatch_detail == NULL)
1418 return;
1419 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1420 mismatch_detail->data[0] = expected_num;
1421 }
1422
1423 static inline void
1424 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1425 const char* error)
1426 {
1427 if (mismatch_detail == NULL)
1428 return;
1429 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1430 }
1431
1432 /* General constraint checking based on operand code.
1433
1434 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1435 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1436
1437 This function has to be called after the qualifiers for all operands
1438 have been resolved.
1439
1440 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1441 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1442 of error message during the disassembling where error message is not
1443 wanted. We avoid the dynamic construction of strings of error messages
1444 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1445 use a combination of error code, static string and some integer data to
1446 represent an error. */
1447
1448 static int
1449 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1450 enum aarch64_opnd type,
1451 const aarch64_opcode *opcode,
1452 aarch64_operand_error *mismatch_detail)
1453 {
1454 unsigned num, modifiers, shift;
1455 unsigned char size;
1456 int64_t imm, min_value, max_value;
1457 uint64_t uvalue, mask;
1458 const aarch64_opnd_info *opnd = opnds + idx;
1459 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1460
1461 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1462
1463 switch (aarch64_operands[type].op_class)
1464 {
1465 case AARCH64_OPND_CLASS_INT_REG:
1466 /* Check pair reg constraints for cas* instructions. */
1467 if (type == AARCH64_OPND_PAIRREG)
1468 {
1469 assert (idx == 1 || idx == 3);
1470 if (opnds[idx - 1].reg.regno % 2 != 0)
1471 {
1472 set_syntax_error (mismatch_detail, idx - 1,
1473 _("reg pair must start from even reg"));
1474 return 0;
1475 }
1476 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1477 {
1478 set_syntax_error (mismatch_detail, idx,
1479 _("reg pair must be contiguous"));
1480 return 0;
1481 }
1482 break;
1483 }
1484
1485 /* <Xt> may be optional in some IC and TLBI instructions. */
1486 if (type == AARCH64_OPND_Rt_SYS)
1487 {
1488 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1489 == AARCH64_OPND_CLASS_SYSTEM));
1490 if (opnds[1].present
1491 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1492 {
1493 set_other_error (mismatch_detail, idx, _("extraneous register"));
1494 return 0;
1495 }
1496 if (!opnds[1].present
1497 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1498 {
1499 set_other_error (mismatch_detail, idx, _("missing register"));
1500 return 0;
1501 }
1502 }
1503 switch (qualifier)
1504 {
1505 case AARCH64_OPND_QLF_WSP:
1506 case AARCH64_OPND_QLF_SP:
1507 if (!aarch64_stack_pointer_p (opnd))
1508 {
1509 set_other_error (mismatch_detail, idx,
1510 _("stack pointer register expected"));
1511 return 0;
1512 }
1513 break;
1514 default:
1515 break;
1516 }
1517 break;
1518
1519 case AARCH64_OPND_CLASS_SVE_REG:
1520 switch (type)
1521 {
1522 case AARCH64_OPND_SVE_Zm3_INDEX:
1523 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1524 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1525 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1526 case AARCH64_OPND_SVE_Zm4_INDEX:
1527 size = get_operand_fields_width (get_operand_from_code (type));
1528 shift = get_operand_specific_data (&aarch64_operands[type]);
1529 mask = (1 << shift) - 1;
1530 if (opnd->reg.regno > mask)
1531 {
1532 assert (mask == 7 || mask == 15);
1533 set_other_error (mismatch_detail, idx,
1534 mask == 15
1535 ? _("z0-z15 expected")
1536 : _("z0-z7 expected"));
1537 return 0;
1538 }
1539 mask = (1u << (size - shift)) - 1;
1540 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1541 {
1542 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1543 return 0;
1544 }
1545 break;
1546
1547 case AARCH64_OPND_SVE_Zn_INDEX:
1548 size = aarch64_get_qualifier_esize (opnd->qualifier);
1549 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1550 {
1551 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1552 0, 64 / size - 1);
1553 return 0;
1554 }
1555 break;
1556
1557 case AARCH64_OPND_SVE_ZnxN:
1558 case AARCH64_OPND_SVE_ZtxN:
1559 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1560 {
1561 set_other_error (mismatch_detail, idx,
1562 _("invalid register list"));
1563 return 0;
1564 }
1565 break;
1566
1567 default:
1568 break;
1569 }
1570 break;
1571
1572 case AARCH64_OPND_CLASS_PRED_REG:
1573 if (opnd->reg.regno >= 8
1574 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1575 {
1576 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1577 return 0;
1578 }
1579 break;
1580
1581 case AARCH64_OPND_CLASS_COND:
1582 if (type == AARCH64_OPND_COND1
1583 && (opnds[idx].cond->value & 0xe) == 0xe)
1584 {
1585 /* Not allow AL or NV. */
1586 set_syntax_error (mismatch_detail, idx, NULL);
1587 }
1588 break;
1589
1590 case AARCH64_OPND_CLASS_ADDRESS:
1591 /* Check writeback. */
1592 switch (opcode->iclass)
1593 {
1594 case ldst_pos:
1595 case ldst_unscaled:
1596 case ldstnapair_offs:
1597 case ldstpair_off:
1598 case ldst_unpriv:
1599 if (opnd->addr.writeback == 1)
1600 {
1601 set_syntax_error (mismatch_detail, idx,
1602 _("unexpected address writeback"));
1603 return 0;
1604 }
1605 break;
1606 case ldst_imm10:
1607 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1608 {
1609 set_syntax_error (mismatch_detail, idx,
1610 _("unexpected address writeback"));
1611 return 0;
1612 }
1613 break;
1614 case ldst_imm9:
1615 case ldstpair_indexed:
1616 case asisdlsep:
1617 case asisdlsop:
1618 if (opnd->addr.writeback == 0)
1619 {
1620 set_syntax_error (mismatch_detail, idx,
1621 _("address writeback expected"));
1622 return 0;
1623 }
1624 break;
1625 default:
1626 assert (opnd->addr.writeback == 0);
1627 break;
1628 }
1629 switch (type)
1630 {
1631 case AARCH64_OPND_ADDR_SIMM7:
1632 /* Scaled signed 7 bits immediate offset. */
1633 /* Get the size of the data element that is accessed, which may be
1634 different from that of the source register size,
1635 e.g. in strb/ldrb. */
1636 size = aarch64_get_qualifier_esize (opnd->qualifier);
1637 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1638 {
1639 set_offset_out_of_range_error (mismatch_detail, idx,
1640 -64 * size, 63 * size);
1641 return 0;
1642 }
1643 if (!value_aligned_p (opnd->addr.offset.imm, size))
1644 {
1645 set_unaligned_error (mismatch_detail, idx, size);
1646 return 0;
1647 }
1648 break;
1649 case AARCH64_OPND_ADDR_OFFSET:
1650 case AARCH64_OPND_ADDR_SIMM9:
1651 /* Unscaled signed 9 bits immediate offset. */
1652 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1653 {
1654 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1655 return 0;
1656 }
1657 break;
1658
1659 case AARCH64_OPND_ADDR_SIMM9_2:
1660 /* Unscaled signed 9 bits immediate offset, which has to be negative
1661 or unaligned. */
1662 size = aarch64_get_qualifier_esize (qualifier);
1663 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1664 && !value_aligned_p (opnd->addr.offset.imm, size))
1665 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1666 return 1;
1667 set_other_error (mismatch_detail, idx,
1668 _("negative or unaligned offset expected"));
1669 return 0;
1670
1671 case AARCH64_OPND_ADDR_SIMM10:
1672 /* Scaled signed 10 bits immediate offset. */
1673 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1674 {
1675 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1676 return 0;
1677 }
1678 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1679 {
1680 set_unaligned_error (mismatch_detail, idx, 8);
1681 return 0;
1682 }
1683 break;
1684
1685 case AARCH64_OPND_ADDR_SIMM11:
1686 /* Signed 11 bits immediate offset (multiple of 16). */
1687 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1688 {
1689 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1690 return 0;
1691 }
1692
1693 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1694 {
1695 set_unaligned_error (mismatch_detail, idx, 16);
1696 return 0;
1697 }
1698 break;
1699
1700 case AARCH64_OPND_ADDR_SIMM13:
1701 /* Signed 13 bits immediate offset (multiple of 16). */
1702 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1703 {
1704 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1705 return 0;
1706 }
1707
1708 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1709 {
1710 set_unaligned_error (mismatch_detail, idx, 16);
1711 return 0;
1712 }
1713 break;
1714
1715 case AARCH64_OPND_SIMD_ADDR_POST:
1716 /* AdvSIMD load/store multiple structures, post-index. */
1717 assert (idx == 1);
1718 if (opnd->addr.offset.is_reg)
1719 {
1720 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1721 return 1;
1722 else
1723 {
1724 set_other_error (mismatch_detail, idx,
1725 _("invalid register offset"));
1726 return 0;
1727 }
1728 }
1729 else
1730 {
1731 const aarch64_opnd_info *prev = &opnds[idx-1];
1732 unsigned num_bytes; /* total number of bytes transferred. */
1733 /* The opcode dependent area stores the number of elements in
1734 each structure to be loaded/stored. */
1735 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1736 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1737 /* Special handling of loading single structure to all lane. */
1738 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1739 * aarch64_get_qualifier_esize (prev->qualifier);
1740 else
1741 num_bytes = prev->reglist.num_regs
1742 * aarch64_get_qualifier_esize (prev->qualifier)
1743 * aarch64_get_qualifier_nelem (prev->qualifier);
1744 if ((int) num_bytes != opnd->addr.offset.imm)
1745 {
1746 set_other_error (mismatch_detail, idx,
1747 _("invalid post-increment amount"));
1748 return 0;
1749 }
1750 }
1751 break;
1752
1753 case AARCH64_OPND_ADDR_REGOFF:
1754 /* Get the size of the data element that is accessed, which may be
1755 different from that of the source register size,
1756 e.g. in strb/ldrb. */
1757 size = aarch64_get_qualifier_esize (opnd->qualifier);
1758 /* It is either no shift or shift by the binary logarithm of SIZE. */
1759 if (opnd->shifter.amount != 0
1760 && opnd->shifter.amount != (int)get_logsz (size))
1761 {
1762 set_other_error (mismatch_detail, idx,
1763 _("invalid shift amount"));
1764 return 0;
1765 }
1766 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1767 operators. */
1768 switch (opnd->shifter.kind)
1769 {
1770 case AARCH64_MOD_UXTW:
1771 case AARCH64_MOD_LSL:
1772 case AARCH64_MOD_SXTW:
1773 case AARCH64_MOD_SXTX: break;
1774 default:
1775 set_other_error (mismatch_detail, idx,
1776 _("invalid extend/shift operator"));
1777 return 0;
1778 }
1779 break;
1780
1781 case AARCH64_OPND_ADDR_UIMM12:
1782 imm = opnd->addr.offset.imm;
1783 /* Get the size of the data element that is accessed, which may be
1784 different from that of the source register size,
1785 e.g. in strb/ldrb. */
1786 size = aarch64_get_qualifier_esize (qualifier);
1787 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1788 {
1789 set_offset_out_of_range_error (mismatch_detail, idx,
1790 0, 4095 * size);
1791 return 0;
1792 }
1793 if (!value_aligned_p (opnd->addr.offset.imm, size))
1794 {
1795 set_unaligned_error (mismatch_detail, idx, size);
1796 return 0;
1797 }
1798 break;
1799
1800 case AARCH64_OPND_ADDR_PCREL14:
1801 case AARCH64_OPND_ADDR_PCREL19:
1802 case AARCH64_OPND_ADDR_PCREL21:
1803 case AARCH64_OPND_ADDR_PCREL26:
1804 imm = opnd->imm.value;
1805 if (operand_need_shift_by_two (get_operand_from_code (type)))
1806 {
1807 /* The offset value in a PC-relative branch instruction is alway
1808 4-byte aligned and is encoded without the lowest 2 bits. */
1809 if (!value_aligned_p (imm, 4))
1810 {
1811 set_unaligned_error (mismatch_detail, idx, 4);
1812 return 0;
1813 }
1814 /* Right shift by 2 so that we can carry out the following check
1815 canonically. */
1816 imm >>= 2;
1817 }
1818 size = get_operand_fields_width (get_operand_from_code (type));
1819 if (!value_fit_signed_field_p (imm, size))
1820 {
1821 set_other_error (mismatch_detail, idx,
1822 _("immediate out of range"));
1823 return 0;
1824 }
1825 break;
1826
1827 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1828 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1829 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1830 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1831 min_value = -8;
1832 max_value = 7;
1833 sve_imm_offset_vl:
1834 assert (!opnd->addr.offset.is_reg);
1835 assert (opnd->addr.preind);
1836 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1837 min_value *= num;
1838 max_value *= num;
1839 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1840 || (opnd->shifter.operator_present
1841 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1842 {
1843 set_other_error (mismatch_detail, idx,
1844 _("invalid addressing mode"));
1845 return 0;
1846 }
1847 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1848 {
1849 set_offset_out_of_range_error (mismatch_detail, idx,
1850 min_value, max_value);
1851 return 0;
1852 }
1853 if (!value_aligned_p (opnd->addr.offset.imm, num))
1854 {
1855 set_unaligned_error (mismatch_detail, idx, num);
1856 return 0;
1857 }
1858 break;
1859
1860 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1861 min_value = -32;
1862 max_value = 31;
1863 goto sve_imm_offset_vl;
1864
1865 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1866 min_value = -256;
1867 max_value = 255;
1868 goto sve_imm_offset_vl;
1869
1870 case AARCH64_OPND_SVE_ADDR_RI_U6:
1871 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1872 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1873 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1874 min_value = 0;
1875 max_value = 63;
1876 sve_imm_offset:
1877 assert (!opnd->addr.offset.is_reg);
1878 assert (opnd->addr.preind);
1879 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1880 min_value *= num;
1881 max_value *= num;
1882 if (opnd->shifter.operator_present
1883 || opnd->shifter.amount_present)
1884 {
1885 set_other_error (mismatch_detail, idx,
1886 _("invalid addressing mode"));
1887 return 0;
1888 }
1889 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1890 {
1891 set_offset_out_of_range_error (mismatch_detail, idx,
1892 min_value, max_value);
1893 return 0;
1894 }
1895 if (!value_aligned_p (opnd->addr.offset.imm, num))
1896 {
1897 set_unaligned_error (mismatch_detail, idx, num);
1898 return 0;
1899 }
1900 break;
1901
1902 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1903 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1904 min_value = -8;
1905 max_value = 7;
1906 goto sve_imm_offset;
1907
1908 case AARCH64_OPND_SVE_ADDR_ZX:
1909 /* Everything is already ensured by parse_operands or
1910 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1911 argument type). */
1912 assert (opnd->addr.offset.is_reg);
1913 assert (opnd->addr.preind);
1914 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1915 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1916 assert (opnd->shifter.operator_present == 0);
1917 break;
1918
1919 case AARCH64_OPND_SVE_ADDR_R:
1920 case AARCH64_OPND_SVE_ADDR_RR:
1921 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1922 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1923 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1924 case AARCH64_OPND_SVE_ADDR_RX:
1925 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1926 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1927 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1928 case AARCH64_OPND_SVE_ADDR_RZ:
1929 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1930 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1931 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1932 modifiers = 1 << AARCH64_MOD_LSL;
1933 sve_rr_operand:
1934 assert (opnd->addr.offset.is_reg);
1935 assert (opnd->addr.preind);
1936 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1937 && opnd->addr.offset.regno == 31)
1938 {
1939 set_other_error (mismatch_detail, idx,
1940 _("index register xzr is not allowed"));
1941 return 0;
1942 }
1943 if (((1 << opnd->shifter.kind) & modifiers) == 0
1944 || (opnd->shifter.amount
1945 != get_operand_specific_data (&aarch64_operands[type])))
1946 {
1947 set_other_error (mismatch_detail, idx,
1948 _("invalid addressing mode"));
1949 return 0;
1950 }
1951 break;
1952
1953 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1954 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1955 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1956 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1957 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1958 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1959 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1960 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1961 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1962 goto sve_rr_operand;
1963
1964 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1965 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1966 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1967 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1968 min_value = 0;
1969 max_value = 31;
1970 goto sve_imm_offset;
1971
1972 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1973 modifiers = 1 << AARCH64_MOD_LSL;
1974 sve_zz_operand:
1975 assert (opnd->addr.offset.is_reg);
1976 assert (opnd->addr.preind);
1977 if (((1 << opnd->shifter.kind) & modifiers) == 0
1978 || opnd->shifter.amount < 0
1979 || opnd->shifter.amount > 3)
1980 {
1981 set_other_error (mismatch_detail, idx,
1982 _("invalid addressing mode"));
1983 return 0;
1984 }
1985 break;
1986
1987 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1988 modifiers = (1 << AARCH64_MOD_SXTW);
1989 goto sve_zz_operand;
1990
1991 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1992 modifiers = 1 << AARCH64_MOD_UXTW;
1993 goto sve_zz_operand;
1994
1995 default:
1996 break;
1997 }
1998 break;
1999
2000 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2001 if (type == AARCH64_OPND_LEt)
2002 {
2003 /* Get the upper bound for the element index. */
2004 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2005 if (!value_in_range_p (opnd->reglist.index, 0, num))
2006 {
2007 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2008 return 0;
2009 }
2010 }
2011 /* The opcode dependent area stores the number of elements in
2012 each structure to be loaded/stored. */
2013 num = get_opcode_dependent_value (opcode);
2014 switch (type)
2015 {
2016 case AARCH64_OPND_LVt:
2017 assert (num >= 1 && num <= 4);
2018 /* Unless LD1/ST1, the number of registers should be equal to that
2019 of the structure elements. */
2020 if (num != 1 && opnd->reglist.num_regs != num)
2021 {
2022 set_reg_list_error (mismatch_detail, idx, num);
2023 return 0;
2024 }
2025 break;
2026 case AARCH64_OPND_LVt_AL:
2027 case AARCH64_OPND_LEt:
2028 assert (num >= 1 && num <= 4);
2029 /* The number of registers should be equal to that of the structure
2030 elements. */
2031 if (opnd->reglist.num_regs != num)
2032 {
2033 set_reg_list_error (mismatch_detail, idx, num);
2034 return 0;
2035 }
2036 break;
2037 default:
2038 break;
2039 }
2040 break;
2041
2042 case AARCH64_OPND_CLASS_IMMEDIATE:
2043 /* Constraint check on immediate operand. */
2044 imm = opnd->imm.value;
2045 /* E.g. imm_0_31 constrains value to be 0..31. */
2046 if (qualifier_value_in_range_constraint_p (qualifier)
2047 && !value_in_range_p (imm, get_lower_bound (qualifier),
2048 get_upper_bound (qualifier)))
2049 {
2050 set_imm_out_of_range_error (mismatch_detail, idx,
2051 get_lower_bound (qualifier),
2052 get_upper_bound (qualifier));
2053 return 0;
2054 }
2055
2056 switch (type)
2057 {
2058 case AARCH64_OPND_AIMM:
2059 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2060 {
2061 set_other_error (mismatch_detail, idx,
2062 _("invalid shift operator"));
2063 return 0;
2064 }
2065 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2066 {
2067 set_other_error (mismatch_detail, idx,
2068 _("shift amount must be 0 or 12"));
2069 return 0;
2070 }
2071 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2072 {
2073 set_other_error (mismatch_detail, idx,
2074 _("immediate out of range"));
2075 return 0;
2076 }
2077 break;
2078
2079 case AARCH64_OPND_HALF:
2080 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2081 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2082 {
2083 set_other_error (mismatch_detail, idx,
2084 _("invalid shift operator"));
2085 return 0;
2086 }
2087 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2088 if (!value_aligned_p (opnd->shifter.amount, 16))
2089 {
2090 set_other_error (mismatch_detail, idx,
2091 _("shift amount must be a multiple of 16"));
2092 return 0;
2093 }
2094 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2095 {
2096 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2097 0, size * 8 - 16);
2098 return 0;
2099 }
2100 if (opnd->imm.value < 0)
2101 {
2102 set_other_error (mismatch_detail, idx,
2103 _("negative immediate value not allowed"));
2104 return 0;
2105 }
2106 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2107 {
2108 set_other_error (mismatch_detail, idx,
2109 _("immediate out of range"));
2110 return 0;
2111 }
2112 break;
2113
2114 case AARCH64_OPND_IMM_MOV:
2115 {
2116 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2117 imm = opnd->imm.value;
2118 assert (idx == 1);
2119 switch (opcode->op)
2120 {
2121 case OP_MOV_IMM_WIDEN:
2122 imm = ~imm;
2123 /* Fall through. */
2124 case OP_MOV_IMM_WIDE:
2125 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2126 {
2127 set_other_error (mismatch_detail, idx,
2128 _("immediate out of range"));
2129 return 0;
2130 }
2131 break;
2132 case OP_MOV_IMM_LOG:
2133 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2134 {
2135 set_other_error (mismatch_detail, idx,
2136 _("immediate out of range"));
2137 return 0;
2138 }
2139 break;
2140 default:
2141 assert (0);
2142 return 0;
2143 }
2144 }
2145 break;
2146
2147 case AARCH64_OPND_NZCV:
2148 case AARCH64_OPND_CCMP_IMM:
2149 case AARCH64_OPND_EXCEPTION:
2150 case AARCH64_OPND_TME_UIMM16:
2151 case AARCH64_OPND_UIMM4:
2152 case AARCH64_OPND_UIMM4_ADDG:
2153 case AARCH64_OPND_UIMM7:
2154 case AARCH64_OPND_UIMM3_OP1:
2155 case AARCH64_OPND_UIMM3_OP2:
2156 case AARCH64_OPND_SVE_UIMM3:
2157 case AARCH64_OPND_SVE_UIMM7:
2158 case AARCH64_OPND_SVE_UIMM8:
2159 case AARCH64_OPND_SVE_UIMM8_53:
2160 size = get_operand_fields_width (get_operand_from_code (type));
2161 assert (size < 32);
2162 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2163 {
2164 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2165 (1u << size) - 1);
2166 return 0;
2167 }
2168 break;
2169
2170 case AARCH64_OPND_UIMM10:
2171 /* Scaled unsigned 10 bits immediate offset. */
2172 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2173 {
2174 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2175 return 0;
2176 }
2177
2178 if (!value_aligned_p (opnd->imm.value, 16))
2179 {
2180 set_unaligned_error (mismatch_detail, idx, 16);
2181 return 0;
2182 }
2183 break;
2184
2185 case AARCH64_OPND_SIMM5:
2186 case AARCH64_OPND_SVE_SIMM5:
2187 case AARCH64_OPND_SVE_SIMM5B:
2188 case AARCH64_OPND_SVE_SIMM6:
2189 case AARCH64_OPND_SVE_SIMM8:
2190 size = get_operand_fields_width (get_operand_from_code (type));
2191 assert (size < 32);
2192 if (!value_fit_signed_field_p (opnd->imm.value, size))
2193 {
2194 set_imm_out_of_range_error (mismatch_detail, idx,
2195 -(1 << (size - 1)),
2196 (1 << (size - 1)) - 1);
2197 return 0;
2198 }
2199 break;
2200
2201 case AARCH64_OPND_WIDTH:
2202 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2203 && opnds[0].type == AARCH64_OPND_Rd);
2204 size = get_upper_bound (qualifier);
2205 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2206 /* lsb+width <= reg.size */
2207 {
2208 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2209 size - opnds[idx-1].imm.value);
2210 return 0;
2211 }
2212 break;
2213
2214 case AARCH64_OPND_LIMM:
2215 case AARCH64_OPND_SVE_LIMM:
2216 {
2217 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2218 uint64_t uimm = opnd->imm.value;
2219 if (opcode->op == OP_BIC)
2220 uimm = ~uimm;
2221 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2222 {
2223 set_other_error (mismatch_detail, idx,
2224 _("immediate out of range"));
2225 return 0;
2226 }
2227 }
2228 break;
2229
2230 case AARCH64_OPND_IMM0:
2231 case AARCH64_OPND_FPIMM0:
2232 if (opnd->imm.value != 0)
2233 {
2234 set_other_error (mismatch_detail, idx,
2235 _("immediate zero expected"));
2236 return 0;
2237 }
2238 break;
2239
2240 case AARCH64_OPND_IMM_ROT1:
2241 case AARCH64_OPND_IMM_ROT2:
2242 case AARCH64_OPND_SVE_IMM_ROT2:
2243 if (opnd->imm.value != 0
2244 && opnd->imm.value != 90
2245 && opnd->imm.value != 180
2246 && opnd->imm.value != 270)
2247 {
2248 set_other_error (mismatch_detail, idx,
2249 _("rotate expected to be 0, 90, 180 or 270"));
2250 return 0;
2251 }
2252 break;
2253
2254 case AARCH64_OPND_IMM_ROT3:
2255 case AARCH64_OPND_SVE_IMM_ROT1:
2256 case AARCH64_OPND_SVE_IMM_ROT3:
2257 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2258 {
2259 set_other_error (mismatch_detail, idx,
2260 _("rotate expected to be 90 or 270"));
2261 return 0;
2262 }
2263 break;
2264
2265 case AARCH64_OPND_SHLL_IMM:
2266 assert (idx == 2);
2267 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2268 if (opnd->imm.value != size)
2269 {
2270 set_other_error (mismatch_detail, idx,
2271 _("invalid shift amount"));
2272 return 0;
2273 }
2274 break;
2275
2276 case AARCH64_OPND_IMM_VLSL:
2277 size = aarch64_get_qualifier_esize (qualifier);
2278 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2279 {
2280 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2281 size * 8 - 1);
2282 return 0;
2283 }
2284 break;
2285
2286 case AARCH64_OPND_IMM_VLSR:
2287 size = aarch64_get_qualifier_esize (qualifier);
2288 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2289 {
2290 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2291 return 0;
2292 }
2293 break;
2294
2295 case AARCH64_OPND_SIMD_IMM:
2296 case AARCH64_OPND_SIMD_IMM_SFT:
2297 /* Qualifier check. */
2298 switch (qualifier)
2299 {
2300 case AARCH64_OPND_QLF_LSL:
2301 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2302 {
2303 set_other_error (mismatch_detail, idx,
2304 _("invalid shift operator"));
2305 return 0;
2306 }
2307 break;
2308 case AARCH64_OPND_QLF_MSL:
2309 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2310 {
2311 set_other_error (mismatch_detail, idx,
2312 _("invalid shift operator"));
2313 return 0;
2314 }
2315 break;
2316 case AARCH64_OPND_QLF_NIL:
2317 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2318 {
2319 set_other_error (mismatch_detail, idx,
2320 _("shift is not permitted"));
2321 return 0;
2322 }
2323 break;
2324 default:
2325 assert (0);
2326 return 0;
2327 }
2328 /* Is the immediate valid? */
2329 assert (idx == 1);
2330 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2331 {
2332 /* uimm8 or simm8 */
2333 if (!value_in_range_p (opnd->imm.value, -128, 255))
2334 {
2335 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2336 return 0;
2337 }
2338 }
2339 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2340 {
2341 /* uimm64 is not
2342 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2343 ffffffffgggggggghhhhhhhh'. */
2344 set_other_error (mismatch_detail, idx,
2345 _("invalid value for immediate"));
2346 return 0;
2347 }
2348 /* Is the shift amount valid? */
2349 switch (opnd->shifter.kind)
2350 {
2351 case AARCH64_MOD_LSL:
2352 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2353 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2354 {
2355 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2356 (size - 1) * 8);
2357 return 0;
2358 }
2359 if (!value_aligned_p (opnd->shifter.amount, 8))
2360 {
2361 set_unaligned_error (mismatch_detail, idx, 8);
2362 return 0;
2363 }
2364 break;
2365 case AARCH64_MOD_MSL:
2366 /* Only 8 and 16 are valid shift amount. */
2367 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2368 {
2369 set_other_error (mismatch_detail, idx,
2370 _("shift amount must be 0 or 16"));
2371 return 0;
2372 }
2373 break;
2374 default:
2375 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2376 {
2377 set_other_error (mismatch_detail, idx,
2378 _("invalid shift operator"));
2379 return 0;
2380 }
2381 break;
2382 }
2383 break;
2384
2385 case AARCH64_OPND_FPIMM:
2386 case AARCH64_OPND_SIMD_FPIMM:
2387 case AARCH64_OPND_SVE_FPIMM8:
2388 if (opnd->imm.is_fp == 0)
2389 {
2390 set_other_error (mismatch_detail, idx,
2391 _("floating-point immediate expected"));
2392 return 0;
2393 }
2394 /* The value is expected to be an 8-bit floating-point constant with
2395 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2396 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2397 instruction). */
2398 if (!value_in_range_p (opnd->imm.value, 0, 255))
2399 {
2400 set_other_error (mismatch_detail, idx,
2401 _("immediate out of range"));
2402 return 0;
2403 }
2404 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2405 {
2406 set_other_error (mismatch_detail, idx,
2407 _("invalid shift operator"));
2408 return 0;
2409 }
2410 break;
2411
2412 case AARCH64_OPND_SVE_AIMM:
2413 min_value = 0;
2414 sve_aimm:
2415 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2416 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2417 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2418 uvalue = opnd->imm.value;
2419 shift = opnd->shifter.amount;
2420 if (size == 1)
2421 {
2422 if (shift != 0)
2423 {
2424 set_other_error (mismatch_detail, idx,
2425 _("no shift amount allowed for"
2426 " 8-bit constants"));
2427 return 0;
2428 }
2429 }
2430 else
2431 {
2432 if (shift != 0 && shift != 8)
2433 {
2434 set_other_error (mismatch_detail, idx,
2435 _("shift amount must be 0 or 8"));
2436 return 0;
2437 }
2438 if (shift == 0 && (uvalue & 0xff) == 0)
2439 {
2440 shift = 8;
2441 uvalue = (int64_t) uvalue / 256;
2442 }
2443 }
2444 mask >>= shift;
2445 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2446 {
2447 set_other_error (mismatch_detail, idx,
2448 _("immediate too big for element size"));
2449 return 0;
2450 }
2451 uvalue = (uvalue - min_value) & mask;
2452 if (uvalue > 0xff)
2453 {
2454 set_other_error (mismatch_detail, idx,
2455 _("invalid arithmetic immediate"));
2456 return 0;
2457 }
2458 break;
2459
2460 case AARCH64_OPND_SVE_ASIMM:
2461 min_value = -128;
2462 goto sve_aimm;
2463
2464 case AARCH64_OPND_SVE_I1_HALF_ONE:
2465 assert (opnd->imm.is_fp);
2466 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2467 {
2468 set_other_error (mismatch_detail, idx,
2469 _("floating-point value must be 0.5 or 1.0"));
2470 return 0;
2471 }
2472 break;
2473
2474 case AARCH64_OPND_SVE_I1_HALF_TWO:
2475 assert (opnd->imm.is_fp);
2476 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2477 {
2478 set_other_error (mismatch_detail, idx,
2479 _("floating-point value must be 0.5 or 2.0"));
2480 return 0;
2481 }
2482 break;
2483
2484 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2485 assert (opnd->imm.is_fp);
2486 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2487 {
2488 set_other_error (mismatch_detail, idx,
2489 _("floating-point value must be 0.0 or 1.0"));
2490 return 0;
2491 }
2492 break;
2493
2494 case AARCH64_OPND_SVE_INV_LIMM:
2495 {
2496 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2497 uint64_t uimm = ~opnd->imm.value;
2498 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2499 {
2500 set_other_error (mismatch_detail, idx,
2501 _("immediate out of range"));
2502 return 0;
2503 }
2504 }
2505 break;
2506
2507 case AARCH64_OPND_SVE_LIMM_MOV:
2508 {
2509 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2510 uint64_t uimm = opnd->imm.value;
2511 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2512 {
2513 set_other_error (mismatch_detail, idx,
2514 _("immediate out of range"));
2515 return 0;
2516 }
2517 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2518 {
2519 set_other_error (mismatch_detail, idx,
2520 _("invalid replicated MOV immediate"));
2521 return 0;
2522 }
2523 }
2524 break;
2525
2526 case AARCH64_OPND_SVE_PATTERN_SCALED:
2527 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2528 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2529 {
2530 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2531 return 0;
2532 }
2533 break;
2534
2535 case AARCH64_OPND_SVE_SHLIMM_PRED:
2536 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2537 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2538 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2539 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2540 {
2541 set_imm_out_of_range_error (mismatch_detail, idx,
2542 0, 8 * size - 1);
2543 return 0;
2544 }
2545 break;
2546
2547 case AARCH64_OPND_SVE_SHRIMM_PRED:
2548 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2549 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2550 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2551 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2552 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2553 {
2554 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2555 return 0;
2556 }
2557 break;
2558
2559 default:
2560 break;
2561 }
2562 break;
2563
2564 case AARCH64_OPND_CLASS_SYSTEM:
2565 switch (type)
2566 {
2567 case AARCH64_OPND_PSTATEFIELD:
2568 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2569 /* MSR UAO, #uimm4
2570 MSR PAN, #uimm4
2571 MSR SSBS,#uimm4
2572 The immediate must be #0 or #1. */
2573 if ((opnd->pstatefield == 0x03 /* UAO. */
2574 || opnd->pstatefield == 0x04 /* PAN. */
2575 || opnd->pstatefield == 0x19 /* SSBS. */
2576 || opnd->pstatefield == 0x1a) /* DIT. */
2577 && opnds[1].imm.value > 1)
2578 {
2579 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2580 return 0;
2581 }
2582 /* MSR SPSel, #uimm4
2583 Uses uimm4 as a control value to select the stack pointer: if
2584 bit 0 is set it selects the current exception level's stack
2585 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2586 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2587 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2588 {
2589 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2590 return 0;
2591 }
2592 break;
2593 default:
2594 break;
2595 }
2596 break;
2597
2598 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2599 /* Get the upper bound for the element index. */
2600 if (opcode->op == OP_FCMLA_ELEM)
2601 /* FCMLA index range depends on the vector size of other operands
2602 and is halfed because complex numbers take two elements. */
2603 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2604 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2605 else
2606 num = 16;
2607 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2608 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2609
2610 /* Index out-of-range. */
2611 if (!value_in_range_p (opnd->reglane.index, 0, num))
2612 {
2613 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2614 return 0;
2615 }
2616 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2617 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2618 number is encoded in "size:M:Rm":
2619 size <Vm>
2620 00 RESERVED
2621 01 0:Rm
2622 10 M:Rm
2623 11 RESERVED */
2624 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2625 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2626 {
2627 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2628 return 0;
2629 }
2630 break;
2631
2632 case AARCH64_OPND_CLASS_MODIFIED_REG:
2633 assert (idx == 1 || idx == 2);
2634 switch (type)
2635 {
2636 case AARCH64_OPND_Rm_EXT:
2637 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2638 && opnd->shifter.kind != AARCH64_MOD_LSL)
2639 {
2640 set_other_error (mismatch_detail, idx,
2641 _("extend operator expected"));
2642 return 0;
2643 }
2644 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2645 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2646 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2647 case. */
2648 if (!aarch64_stack_pointer_p (opnds + 0)
2649 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2650 {
2651 if (!opnd->shifter.operator_present)
2652 {
2653 set_other_error (mismatch_detail, idx,
2654 _("missing extend operator"));
2655 return 0;
2656 }
2657 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2658 {
2659 set_other_error (mismatch_detail, idx,
2660 _("'LSL' operator not allowed"));
2661 return 0;
2662 }
2663 }
2664 assert (opnd->shifter.operator_present /* Default to LSL. */
2665 || opnd->shifter.kind == AARCH64_MOD_LSL);
2666 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2667 {
2668 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2669 return 0;
2670 }
2671 /* In the 64-bit form, the final register operand is written as Wm
2672 for all but the (possibly omitted) UXTX/LSL and SXTX
2673 operators.
2674 N.B. GAS allows X register to be used with any operator as a
2675 programming convenience. */
2676 if (qualifier == AARCH64_OPND_QLF_X
2677 && opnd->shifter.kind != AARCH64_MOD_LSL
2678 && opnd->shifter.kind != AARCH64_MOD_UXTX
2679 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2680 {
2681 set_other_error (mismatch_detail, idx, _("W register expected"));
2682 return 0;
2683 }
2684 break;
2685
2686 case AARCH64_OPND_Rm_SFT:
2687 /* ROR is not available to the shifted register operand in
2688 arithmetic instructions. */
2689 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2690 {
2691 set_other_error (mismatch_detail, idx,
2692 _("shift operator expected"));
2693 return 0;
2694 }
2695 if (opnd->shifter.kind == AARCH64_MOD_ROR
2696 && opcode->iclass != log_shift)
2697 {
2698 set_other_error (mismatch_detail, idx,
2699 _("'ROR' operator not allowed"));
2700 return 0;
2701 }
2702 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2703 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2704 {
2705 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2706 return 0;
2707 }
2708 break;
2709
2710 default:
2711 break;
2712 }
2713 break;
2714
2715 default:
2716 break;
2717 }
2718
2719 return 1;
2720 }
2721
2722 /* Main entrypoint for the operand constraint checking.
2723
2724 Return 1 if operands of *INST meet the constraint applied by the operand
2725 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2726 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2727 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2728 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2729 error kind when it is notified that an instruction does not pass the check).
2730
2731 Un-determined operand qualifiers may get established during the process. */
2732
2733 int
2734 aarch64_match_operands_constraint (aarch64_inst *inst,
2735 aarch64_operand_error *mismatch_detail)
2736 {
2737 int i;
2738
2739 DEBUG_TRACE ("enter");
2740
2741 /* Check for cases where a source register needs to be the same as the
2742 destination register. Do this before matching qualifiers since if
2743 an instruction has both invalid tying and invalid qualifiers,
2744 the error about qualifiers would suggest several alternative
2745 instructions that also have invalid tying. */
2746 i = inst->opcode->tied_operand;
2747 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2748 {
2749 if (mismatch_detail)
2750 {
2751 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2752 mismatch_detail->index = i;
2753 mismatch_detail->error = NULL;
2754 }
2755 return 0;
2756 }
2757
2758 /* Match operands' qualifier.
2759 *INST has already had qualifier establish for some, if not all, of
2760 its operands; we need to find out whether these established
2761 qualifiers match one of the qualifier sequence in
2762 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2763 with the corresponding qualifier in such a sequence.
2764 Only basic operand constraint checking is done here; the more thorough
2765 constraint checking will carried out by operand_general_constraint_met_p,
2766 which has be to called after this in order to get all of the operands'
2767 qualifiers established. */
2768 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2769 {
2770 DEBUG_TRACE ("FAIL on operand qualifier matching");
2771 if (mismatch_detail)
2772 {
2773 /* Return an error type to indicate that it is the qualifier
2774 matching failure; we don't care about which operand as there
2775 are enough information in the opcode table to reproduce it. */
2776 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2777 mismatch_detail->index = -1;
2778 mismatch_detail->error = NULL;
2779 }
2780 return 0;
2781 }
2782
2783 /* Match operands' constraint. */
2784 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2785 {
2786 enum aarch64_opnd type = inst->opcode->operands[i];
2787 if (type == AARCH64_OPND_NIL)
2788 break;
2789 if (inst->operands[i].skip)
2790 {
2791 DEBUG_TRACE ("skip the incomplete operand %d", i);
2792 continue;
2793 }
2794 if (operand_general_constraint_met_p (inst->operands, i, type,
2795 inst->opcode, mismatch_detail) == 0)
2796 {
2797 DEBUG_TRACE ("FAIL on operand %d", i);
2798 return 0;
2799 }
2800 }
2801
2802 DEBUG_TRACE ("PASS");
2803
2804 return 1;
2805 }
2806
2807 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2808 Also updates the TYPE of each INST->OPERANDS with the corresponding
2809 value of OPCODE->OPERANDS.
2810
2811 Note that some operand qualifiers may need to be manually cleared by
2812 the caller before it further calls the aarch64_opcode_encode; by
2813 doing this, it helps the qualifier matching facilities work
2814 properly. */
2815
2816 const aarch64_opcode*
2817 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2818 {
2819 int i;
2820 const aarch64_opcode *old = inst->opcode;
2821
2822 inst->opcode = opcode;
2823
2824 /* Update the operand types. */
2825 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2826 {
2827 inst->operands[i].type = opcode->operands[i];
2828 if (opcode->operands[i] == AARCH64_OPND_NIL)
2829 break;
2830 }
2831
2832 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2833
2834 return old;
2835 }
2836
2837 int
2838 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2839 {
2840 int i;
2841 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2842 if (operands[i] == operand)
2843 return i;
2844 else if (operands[i] == AARCH64_OPND_NIL)
2845 break;
2846 return -1;
2847 }
2848
2849 /* R0...R30, followed by FOR31. */
2851 #define BANK(R, FOR31) \
2852 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2853 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2854 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2855 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2856 /* [0][0] 32-bit integer regs with sp Wn
2857 [0][1] 64-bit integer regs with sp Xn sf=1
2858 [1][0] 32-bit integer regs with #0 Wn
2859 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2860 static const char *int_reg[2][2][32] = {
2861 #define R32(X) "w" #X
2862 #define R64(X) "x" #X
2863 { BANK (R32, "wsp"), BANK (R64, "sp") },
2864 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2865 #undef R64
2866 #undef R32
2867 };
2868
2869 /* Names of the SVE vector registers, first with .S suffixes,
2870 then with .D suffixes. */
2871
2872 static const char *sve_reg[2][32] = {
2873 #define ZS(X) "z" #X ".s"
2874 #define ZD(X) "z" #X ".d"
2875 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2876 #undef ZD
2877 #undef ZS
2878 };
2879 #undef BANK
2880
2881 /* Return the integer register name.
2882 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2883
2884 static inline const char *
2885 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2886 {
2887 const int has_zr = sp_reg_p ? 0 : 1;
2888 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2889 return int_reg[has_zr][is_64][regno];
2890 }
2891
2892 /* Like get_int_reg_name, but IS_64 is always 1. */
2893
2894 static inline const char *
2895 get_64bit_int_reg_name (int regno, int sp_reg_p)
2896 {
2897 const int has_zr = sp_reg_p ? 0 : 1;
2898 return int_reg[has_zr][1][regno];
2899 }
2900
2901 /* Get the name of the integer offset register in OPND, using the shift type
2902 to decide whether it's a word or doubleword. */
2903
2904 static inline const char *
2905 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2906 {
2907 switch (opnd->shifter.kind)
2908 {
2909 case AARCH64_MOD_UXTW:
2910 case AARCH64_MOD_SXTW:
2911 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2912
2913 case AARCH64_MOD_LSL:
2914 case AARCH64_MOD_SXTX:
2915 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2916
2917 default:
2918 abort ();
2919 }
2920 }
2921
2922 /* Get the name of the SVE vector offset register in OPND, using the operand
2923 qualifier to decide whether the suffix should be .S or .D. */
2924
2925 static inline const char *
2926 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2927 {
2928 assert (qualifier == AARCH64_OPND_QLF_S_S
2929 || qualifier == AARCH64_OPND_QLF_S_D);
2930 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2931 }
2932
2933 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2934
2935 typedef union
2936 {
2937 uint64_t i;
2938 double d;
2939 } double_conv_t;
2940
2941 typedef union
2942 {
2943 uint32_t i;
2944 float f;
2945 } single_conv_t;
2946
2947 typedef union
2948 {
2949 uint32_t i;
2950 float f;
2951 } half_conv_t;
2952
2953 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2954 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2955 (depending on the type of the instruction). IMM8 will be expanded to a
2956 single-precision floating-point value (SIZE == 4) or a double-precision
2957 floating-point value (SIZE == 8). A half-precision floating-point value
2958 (SIZE == 2) is expanded to a single-precision floating-point value. The
2959 expanded value is returned. */
2960
2961 static uint64_t
2962 expand_fp_imm (int size, uint32_t imm8)
2963 {
2964 uint64_t imm = 0;
2965 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2966
2967 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2968 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2969 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2970 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2971 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2972 if (size == 8)
2973 {
2974 imm = (imm8_7 << (63-32)) /* imm8<7> */
2975 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2976 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2977 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2978 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2979 imm <<= 32;
2980 }
2981 else if (size == 4 || size == 2)
2982 {
2983 imm = (imm8_7 << 31) /* imm8<7> */
2984 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2985 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2986 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2987 }
2988 else
2989 {
2990 /* An unsupported size. */
2991 assert (0);
2992 }
2993
2994 return imm;
2995 }
2996
2997 /* Produce the string representation of the register list operand *OPND
2998 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2999 the register name that comes before the register number, such as "v". */
3000 static void
3001 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3002 const char *prefix)
3003 {
3004 const int num_regs = opnd->reglist.num_regs;
3005 const int first_reg = opnd->reglist.first_regno;
3006 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3007 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3008 char tb[8]; /* Temporary buffer. */
3009
3010 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3011 assert (num_regs >= 1 && num_regs <= 4);
3012
3013 /* Prepare the index if any. */
3014 if (opnd->reglist.has_index)
3015 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3016 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3017 else
3018 tb[0] = '\0';
3019
3020 /* The hyphenated form is preferred for disassembly if there are
3021 more than two registers in the list, and the register numbers
3022 are monotonically increasing in increments of one. */
3023 if (num_regs > 2 && last_reg > first_reg)
3024 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3025 prefix, last_reg, qlf_name, tb);
3026 else
3027 {
3028 const int reg0 = first_reg;
3029 const int reg1 = (first_reg + 1) & 0x1f;
3030 const int reg2 = (first_reg + 2) & 0x1f;
3031 const int reg3 = (first_reg + 3) & 0x1f;
3032
3033 switch (num_regs)
3034 {
3035 case 1:
3036 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3037 break;
3038 case 2:
3039 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3040 prefix, reg1, qlf_name, tb);
3041 break;
3042 case 3:
3043 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3044 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3045 prefix, reg2, qlf_name, tb);
3046 break;
3047 case 4:
3048 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3049 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3050 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3051 break;
3052 }
3053 }
3054 }
3055
3056 /* Print the register+immediate address in OPND to BUF, which has SIZE
3057 characters. BASE is the name of the base register. */
3058
3059 static void
3060 print_immediate_offset_address (char *buf, size_t size,
3061 const aarch64_opnd_info *opnd,
3062 const char *base)
3063 {
3064 if (opnd->addr.writeback)
3065 {
3066 if (opnd->addr.preind)
3067 {
3068 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3069 snprintf (buf, size, "[%s]!", base);
3070 else
3071 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3072 }
3073 else
3074 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3075 }
3076 else
3077 {
3078 if (opnd->shifter.operator_present)
3079 {
3080 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3081 snprintf (buf, size, "[%s, #%d, mul vl]",
3082 base, opnd->addr.offset.imm);
3083 }
3084 else if (opnd->addr.offset.imm)
3085 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3086 else
3087 snprintf (buf, size, "[%s]", base);
3088 }
3089 }
3090
3091 /* Produce the string representation of the register offset address operand
3092 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3093 the names of the base and offset registers. */
3094 static void
3095 print_register_offset_address (char *buf, size_t size,
3096 const aarch64_opnd_info *opnd,
3097 const char *base, const char *offset)
3098 {
3099 char tb[16]; /* Temporary buffer. */
3100 bfd_boolean print_extend_p = TRUE;
3101 bfd_boolean print_amount_p = TRUE;
3102 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3103
3104 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3105 || !opnd->shifter.amount_present))
3106 {
3107 /* Not print the shift/extend amount when the amount is zero and
3108 when it is not the special case of 8-bit load/store instruction. */
3109 print_amount_p = FALSE;
3110 /* Likewise, no need to print the shift operator LSL in such a
3111 situation. */
3112 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3113 print_extend_p = FALSE;
3114 }
3115
3116 /* Prepare for the extend/shift. */
3117 if (print_extend_p)
3118 {
3119 if (print_amount_p)
3120 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3121 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3122 (opnd->shifter.amount % 100));
3123 else
3124 snprintf (tb, sizeof (tb), ", %s", shift_name);
3125 }
3126 else
3127 tb[0] = '\0';
3128
3129 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3130 }
3131
3132 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3133 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3134 PC, PCREL_P and ADDRESS are used to pass in and return information about
3135 the PC-relative address calculation, where the PC value is passed in
3136 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3137 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3138 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3139
3140 The function serves both the disassembler and the assembler diagnostics
3141 issuer, which is the reason why it lives in this file. */
3142
3143 void
3144 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3145 const aarch64_opcode *opcode,
3146 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3147 bfd_vma *address, char** notes)
3148 {
3149 unsigned int i, num_conds;
3150 const char *name = NULL;
3151 const aarch64_opnd_info *opnd = opnds + idx;
3152 enum aarch64_modifier_kind kind;
3153 uint64_t addr, enum_value;
3154
3155 buf[0] = '\0';
3156 if (pcrel_p)
3157 *pcrel_p = 0;
3158
3159 switch (opnd->type)
3160 {
3161 case AARCH64_OPND_Rd:
3162 case AARCH64_OPND_Rn:
3163 case AARCH64_OPND_Rm:
3164 case AARCH64_OPND_Rt:
3165 case AARCH64_OPND_Rt2:
3166 case AARCH64_OPND_Rs:
3167 case AARCH64_OPND_Ra:
3168 case AARCH64_OPND_Rt_SYS:
3169 case AARCH64_OPND_PAIRREG:
3170 case AARCH64_OPND_SVE_Rm:
3171 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3172 the <ic_op>, therefore we use opnd->present to override the
3173 generic optional-ness information. */
3174 if (opnd->type == AARCH64_OPND_Rt_SYS)
3175 {
3176 if (!opnd->present)
3177 break;
3178 }
3179 /* Omit the operand, e.g. RET. */
3180 else if (optional_operand_p (opcode, idx)
3181 && (opnd->reg.regno
3182 == get_optional_operand_default_value (opcode)))
3183 break;
3184 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3185 || opnd->qualifier == AARCH64_OPND_QLF_X);
3186 snprintf (buf, size, "%s",
3187 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3188 break;
3189
3190 case AARCH64_OPND_Rd_SP:
3191 case AARCH64_OPND_Rn_SP:
3192 case AARCH64_OPND_Rt_SP:
3193 case AARCH64_OPND_SVE_Rn_SP:
3194 case AARCH64_OPND_Rm_SP:
3195 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3196 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3197 || opnd->qualifier == AARCH64_OPND_QLF_X
3198 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3199 snprintf (buf, size, "%s",
3200 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3201 break;
3202
3203 case AARCH64_OPND_Rm_EXT:
3204 kind = opnd->shifter.kind;
3205 assert (idx == 1 || idx == 2);
3206 if ((aarch64_stack_pointer_p (opnds)
3207 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3208 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3209 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3210 && kind == AARCH64_MOD_UXTW)
3211 || (opnd->qualifier == AARCH64_OPND_QLF_X
3212 && kind == AARCH64_MOD_UXTX)))
3213 {
3214 /* 'LSL' is the preferred form in this case. */
3215 kind = AARCH64_MOD_LSL;
3216 if (opnd->shifter.amount == 0)
3217 {
3218 /* Shifter omitted. */
3219 snprintf (buf, size, "%s",
3220 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3221 break;
3222 }
3223 }
3224 if (opnd->shifter.amount)
3225 snprintf (buf, size, "%s, %s #%" PRIi64,
3226 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3227 aarch64_operand_modifiers[kind].name,
3228 opnd->shifter.amount);
3229 else
3230 snprintf (buf, size, "%s, %s",
3231 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3232 aarch64_operand_modifiers[kind].name);
3233 break;
3234
3235 case AARCH64_OPND_Rm_SFT:
3236 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3237 || opnd->qualifier == AARCH64_OPND_QLF_X);
3238 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3239 snprintf (buf, size, "%s",
3240 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3241 else
3242 snprintf (buf, size, "%s, %s #%" PRIi64,
3243 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3244 aarch64_operand_modifiers[opnd->shifter.kind].name,
3245 opnd->shifter.amount);
3246 break;
3247
3248 case AARCH64_OPND_Fd:
3249 case AARCH64_OPND_Fn:
3250 case AARCH64_OPND_Fm:
3251 case AARCH64_OPND_Fa:
3252 case AARCH64_OPND_Ft:
3253 case AARCH64_OPND_Ft2:
3254 case AARCH64_OPND_Sd:
3255 case AARCH64_OPND_Sn:
3256 case AARCH64_OPND_Sm:
3257 case AARCH64_OPND_SVE_VZn:
3258 case AARCH64_OPND_SVE_Vd:
3259 case AARCH64_OPND_SVE_Vm:
3260 case AARCH64_OPND_SVE_Vn:
3261 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3262 opnd->reg.regno);
3263 break;
3264
3265 case AARCH64_OPND_Va:
3266 case AARCH64_OPND_Vd:
3267 case AARCH64_OPND_Vn:
3268 case AARCH64_OPND_Vm:
3269 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3270 aarch64_get_qualifier_name (opnd->qualifier));
3271 break;
3272
3273 case AARCH64_OPND_Ed:
3274 case AARCH64_OPND_En:
3275 case AARCH64_OPND_Em:
3276 case AARCH64_OPND_Em16:
3277 case AARCH64_OPND_SM3_IMM2:
3278 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3279 aarch64_get_qualifier_name (opnd->qualifier),
3280 opnd->reglane.index);
3281 break;
3282
3283 case AARCH64_OPND_VdD1:
3284 case AARCH64_OPND_VnD1:
3285 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3286 break;
3287
3288 case AARCH64_OPND_LVn:
3289 case AARCH64_OPND_LVt:
3290 case AARCH64_OPND_LVt_AL:
3291 case AARCH64_OPND_LEt:
3292 print_register_list (buf, size, opnd, "v");
3293 break;
3294
3295 case AARCH64_OPND_SVE_Pd:
3296 case AARCH64_OPND_SVE_Pg3:
3297 case AARCH64_OPND_SVE_Pg4_5:
3298 case AARCH64_OPND_SVE_Pg4_10:
3299 case AARCH64_OPND_SVE_Pg4_16:
3300 case AARCH64_OPND_SVE_Pm:
3301 case AARCH64_OPND_SVE_Pn:
3302 case AARCH64_OPND_SVE_Pt:
3303 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3304 snprintf (buf, size, "p%d", opnd->reg.regno);
3305 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3306 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3307 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3308 aarch64_get_qualifier_name (opnd->qualifier));
3309 else
3310 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3311 aarch64_get_qualifier_name (opnd->qualifier));
3312 break;
3313
3314 case AARCH64_OPND_SVE_Za_5:
3315 case AARCH64_OPND_SVE_Za_16:
3316 case AARCH64_OPND_SVE_Zd:
3317 case AARCH64_OPND_SVE_Zm_5:
3318 case AARCH64_OPND_SVE_Zm_16:
3319 case AARCH64_OPND_SVE_Zn:
3320 case AARCH64_OPND_SVE_Zt:
3321 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3322 snprintf (buf, size, "z%d", opnd->reg.regno);
3323 else
3324 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3325 aarch64_get_qualifier_name (opnd->qualifier));
3326 break;
3327
3328 case AARCH64_OPND_SVE_ZnxN:
3329 case AARCH64_OPND_SVE_ZtxN:
3330 print_register_list (buf, size, opnd, "z");
3331 break;
3332
3333 case AARCH64_OPND_SVE_Zm3_INDEX:
3334 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3335 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3336 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3337 case AARCH64_OPND_SVE_Zm4_INDEX:
3338 case AARCH64_OPND_SVE_Zn_INDEX:
3339 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3340 aarch64_get_qualifier_name (opnd->qualifier),
3341 opnd->reglane.index);
3342 break;
3343
3344 case AARCH64_OPND_CRn:
3345 case AARCH64_OPND_CRm:
3346 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3347 break;
3348
3349 case AARCH64_OPND_IDX:
3350 case AARCH64_OPND_MASK:
3351 case AARCH64_OPND_IMM:
3352 case AARCH64_OPND_IMM_2:
3353 case AARCH64_OPND_WIDTH:
3354 case AARCH64_OPND_UIMM3_OP1:
3355 case AARCH64_OPND_UIMM3_OP2:
3356 case AARCH64_OPND_BIT_NUM:
3357 case AARCH64_OPND_IMM_VLSL:
3358 case AARCH64_OPND_IMM_VLSR:
3359 case AARCH64_OPND_SHLL_IMM:
3360 case AARCH64_OPND_IMM0:
3361 case AARCH64_OPND_IMMR:
3362 case AARCH64_OPND_IMMS:
3363 case AARCH64_OPND_FBITS:
3364 case AARCH64_OPND_TME_UIMM16:
3365 case AARCH64_OPND_SIMM5:
3366 case AARCH64_OPND_SVE_SHLIMM_PRED:
3367 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3368 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3369 case AARCH64_OPND_SVE_SHRIMM_PRED:
3370 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3371 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3372 case AARCH64_OPND_SVE_SIMM5:
3373 case AARCH64_OPND_SVE_SIMM5B:
3374 case AARCH64_OPND_SVE_SIMM6:
3375 case AARCH64_OPND_SVE_SIMM8:
3376 case AARCH64_OPND_SVE_UIMM3:
3377 case AARCH64_OPND_SVE_UIMM7:
3378 case AARCH64_OPND_SVE_UIMM8:
3379 case AARCH64_OPND_SVE_UIMM8_53:
3380 case AARCH64_OPND_IMM_ROT1:
3381 case AARCH64_OPND_IMM_ROT2:
3382 case AARCH64_OPND_IMM_ROT3:
3383 case AARCH64_OPND_SVE_IMM_ROT1:
3384 case AARCH64_OPND_SVE_IMM_ROT2:
3385 case AARCH64_OPND_SVE_IMM_ROT3:
3386 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3387 break;
3388
3389 case AARCH64_OPND_SVE_I1_HALF_ONE:
3390 case AARCH64_OPND_SVE_I1_HALF_TWO:
3391 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3392 {
3393 single_conv_t c;
3394 c.i = opnd->imm.value;
3395 snprintf (buf, size, "#%.1f", c.f);
3396 break;
3397 }
3398
3399 case AARCH64_OPND_SVE_PATTERN:
3400 if (optional_operand_p (opcode, idx)
3401 && opnd->imm.value == get_optional_operand_default_value (opcode))
3402 break;
3403 enum_value = opnd->imm.value;
3404 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3405 if (aarch64_sve_pattern_array[enum_value])
3406 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3407 else
3408 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3409 break;
3410
3411 case AARCH64_OPND_SVE_PATTERN_SCALED:
3412 if (optional_operand_p (opcode, idx)
3413 && !opnd->shifter.operator_present
3414 && opnd->imm.value == get_optional_operand_default_value (opcode))
3415 break;
3416 enum_value = opnd->imm.value;
3417 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3418 if (aarch64_sve_pattern_array[opnd->imm.value])
3419 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3420 else
3421 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3422 if (opnd->shifter.operator_present)
3423 {
3424 size_t len = strlen (buf);
3425 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3426 aarch64_operand_modifiers[opnd->shifter.kind].name,
3427 opnd->shifter.amount);
3428 }
3429 break;
3430
3431 case AARCH64_OPND_SVE_PRFOP:
3432 enum_value = opnd->imm.value;
3433 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3434 if (aarch64_sve_prfop_array[enum_value])
3435 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3436 else
3437 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3438 break;
3439
3440 case AARCH64_OPND_IMM_MOV:
3441 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3442 {
3443 case 4: /* e.g. MOV Wd, #<imm32>. */
3444 {
3445 int imm32 = opnd->imm.value;
3446 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3447 }
3448 break;
3449 case 8: /* e.g. MOV Xd, #<imm64>. */
3450 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3451 opnd->imm.value, opnd->imm.value);
3452 break;
3453 default: assert (0);
3454 }
3455 break;
3456
3457 case AARCH64_OPND_FPIMM0:
3458 snprintf (buf, size, "#0.0");
3459 break;
3460
3461 case AARCH64_OPND_LIMM:
3462 case AARCH64_OPND_AIMM:
3463 case AARCH64_OPND_HALF:
3464 case AARCH64_OPND_SVE_INV_LIMM:
3465 case AARCH64_OPND_SVE_LIMM:
3466 case AARCH64_OPND_SVE_LIMM_MOV:
3467 if (opnd->shifter.amount)
3468 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3469 opnd->shifter.amount);
3470 else
3471 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3472 break;
3473
3474 case AARCH64_OPND_SIMD_IMM:
3475 case AARCH64_OPND_SIMD_IMM_SFT:
3476 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3477 || opnd->shifter.kind == AARCH64_MOD_NONE)
3478 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3479 else
3480 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3481 aarch64_operand_modifiers[opnd->shifter.kind].name,
3482 opnd->shifter.amount);
3483 break;
3484
3485 case AARCH64_OPND_SVE_AIMM:
3486 case AARCH64_OPND_SVE_ASIMM:
3487 if (opnd->shifter.amount)
3488 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3489 opnd->shifter.amount);
3490 else
3491 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3492 break;
3493
3494 case AARCH64_OPND_FPIMM:
3495 case AARCH64_OPND_SIMD_FPIMM:
3496 case AARCH64_OPND_SVE_FPIMM8:
3497 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3498 {
3499 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3500 {
3501 half_conv_t c;
3502 c.i = expand_fp_imm (2, opnd->imm.value);
3503 snprintf (buf, size, "#%.18e", c.f);
3504 }
3505 break;
3506 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3507 {
3508 single_conv_t c;
3509 c.i = expand_fp_imm (4, opnd->imm.value);
3510 snprintf (buf, size, "#%.18e", c.f);
3511 }
3512 break;
3513 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3514 {
3515 double_conv_t c;
3516 c.i = expand_fp_imm (8, opnd->imm.value);
3517 snprintf (buf, size, "#%.18e", c.d);
3518 }
3519 break;
3520 default: assert (0);
3521 }
3522 break;
3523
3524 case AARCH64_OPND_CCMP_IMM:
3525 case AARCH64_OPND_NZCV:
3526 case AARCH64_OPND_EXCEPTION:
3527 case AARCH64_OPND_UIMM4:
3528 case AARCH64_OPND_UIMM4_ADDG:
3529 case AARCH64_OPND_UIMM7:
3530 case AARCH64_OPND_UIMM10:
3531 if (optional_operand_p (opcode, idx) == TRUE
3532 && (opnd->imm.value ==
3533 (int64_t) get_optional_operand_default_value (opcode)))
3534 /* Omit the operand, e.g. DCPS1. */
3535 break;
3536 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3537 break;
3538
3539 case AARCH64_OPND_COND:
3540 case AARCH64_OPND_COND1:
3541 snprintf (buf, size, "%s", opnd->cond->names[0]);
3542 num_conds = ARRAY_SIZE (opnd->cond->names);
3543 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3544 {
3545 size_t len = strlen (buf);
3546 if (i == 1)
3547 snprintf (buf + len, size - len, " // %s = %s",
3548 opnd->cond->names[0], opnd->cond->names[i]);
3549 else
3550 snprintf (buf + len, size - len, ", %s",
3551 opnd->cond->names[i]);
3552 }
3553 break;
3554
3555 case AARCH64_OPND_ADDR_ADRP:
3556 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3557 + opnd->imm.value;
3558 if (pcrel_p)
3559 *pcrel_p = 1;
3560 if (address)
3561 *address = addr;
3562 /* This is not necessary during the disassembling, as print_address_func
3563 in the disassemble_info will take care of the printing. But some
3564 other callers may be still interested in getting the string in *STR,
3565 so here we do snprintf regardless. */
3566 snprintf (buf, size, "#0x%" PRIx64, addr);
3567 break;
3568
3569 case AARCH64_OPND_ADDR_PCREL14:
3570 case AARCH64_OPND_ADDR_PCREL19:
3571 case AARCH64_OPND_ADDR_PCREL21:
3572 case AARCH64_OPND_ADDR_PCREL26:
3573 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3574 if (pcrel_p)
3575 *pcrel_p = 1;
3576 if (address)
3577 *address = addr;
3578 /* This is not necessary during the disassembling, as print_address_func
3579 in the disassemble_info will take care of the printing. But some
3580 other callers may be still interested in getting the string in *STR,
3581 so here we do snprintf regardless. */
3582 snprintf (buf, size, "#0x%" PRIx64, addr);
3583 break;
3584
3585 case AARCH64_OPND_ADDR_SIMPLE:
3586 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3587 case AARCH64_OPND_SIMD_ADDR_POST:
3588 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3589 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3590 {
3591 if (opnd->addr.offset.is_reg)
3592 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3593 else
3594 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3595 }
3596 else
3597 snprintf (buf, size, "[%s]", name);
3598 break;
3599
3600 case AARCH64_OPND_ADDR_REGOFF:
3601 case AARCH64_OPND_SVE_ADDR_R:
3602 case AARCH64_OPND_SVE_ADDR_RR:
3603 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3604 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3605 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3606 case AARCH64_OPND_SVE_ADDR_RX:
3607 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3608 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3609 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3610 print_register_offset_address
3611 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3612 get_offset_int_reg_name (opnd));
3613 break;
3614
3615 case AARCH64_OPND_SVE_ADDR_ZX:
3616 print_register_offset_address
3617 (buf, size, opnd,
3618 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3619 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3620 break;
3621
3622 case AARCH64_OPND_SVE_ADDR_RZ:
3623 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3624 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3625 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3626 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3627 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3628 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3629 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3630 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3631 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3632 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3633 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3634 print_register_offset_address
3635 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3636 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3637 break;
3638
3639 case AARCH64_OPND_ADDR_SIMM7:
3640 case AARCH64_OPND_ADDR_SIMM9:
3641 case AARCH64_OPND_ADDR_SIMM9_2:
3642 case AARCH64_OPND_ADDR_SIMM10:
3643 case AARCH64_OPND_ADDR_SIMM11:
3644 case AARCH64_OPND_ADDR_SIMM13:
3645 case AARCH64_OPND_ADDR_OFFSET:
3646 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3647 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3648 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3649 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3650 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3651 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3652 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3653 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3654 case AARCH64_OPND_SVE_ADDR_RI_U6:
3655 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3656 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3657 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3658 print_immediate_offset_address
3659 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3660 break;
3661
3662 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3663 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3664 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3665 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3666 print_immediate_offset_address
3667 (buf, size, opnd,
3668 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3669 break;
3670
3671 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3672 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3673 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3674 print_register_offset_address
3675 (buf, size, opnd,
3676 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3677 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3678 break;
3679
3680 case AARCH64_OPND_ADDR_UIMM12:
3681 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3682 if (opnd->addr.offset.imm)
3683 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3684 else
3685 snprintf (buf, size, "[%s]", name);
3686 break;
3687
3688 case AARCH64_OPND_SYSREG:
3689 for (i = 0; aarch64_sys_regs[i].name; ++i)
3690 {
3691 bfd_boolean exact_match
3692 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3693 == opnd->sysreg.flags;
3694
3695 /* Try and find an exact match, But if that fails, return the first
3696 partial match that was found. */
3697 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3698 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3699 && (name == NULL || exact_match))
3700 {
3701 name = aarch64_sys_regs[i].name;
3702 if (exact_match)
3703 {
3704 if (notes)
3705 *notes = NULL;
3706 break;
3707 }
3708
3709 /* If we didn't match exactly, that means the presense of a flag
3710 indicates what we didn't want for this instruction. e.g. If
3711 F_REG_READ is there, that means we were looking for a write
3712 register. See aarch64_ext_sysreg. */
3713 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3714 *notes = _("reading from a write-only register");
3715 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3716 *notes = _("writing to a read-only register");
3717 }
3718 }
3719
3720 if (name)
3721 snprintf (buf, size, "%s", name);
3722 else
3723 {
3724 /* Implementation defined system register. */
3725 unsigned int value = opnd->sysreg.value;
3726 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3727 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3728 value & 0x7);
3729 }
3730 break;
3731
3732 case AARCH64_OPND_PSTATEFIELD:
3733 for (i = 0; aarch64_pstatefields[i].name; ++i)
3734 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3735 break;
3736 assert (aarch64_pstatefields[i].name);
3737 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3738 break;
3739
3740 case AARCH64_OPND_SYSREG_AT:
3741 case AARCH64_OPND_SYSREG_DC:
3742 case AARCH64_OPND_SYSREG_IC:
3743 case AARCH64_OPND_SYSREG_TLBI:
3744 case AARCH64_OPND_SYSREG_SR:
3745 snprintf (buf, size, "%s", opnd->sysins_op->name);
3746 break;
3747
3748 case AARCH64_OPND_BARRIER:
3749 snprintf (buf, size, "%s", opnd->barrier->name);
3750 break;
3751
3752 case AARCH64_OPND_BARRIER_ISB:
3753 /* Operand can be omitted, e.g. in DCPS1. */
3754 if (! optional_operand_p (opcode, idx)
3755 || (opnd->barrier->value
3756 != get_optional_operand_default_value (opcode)))
3757 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3758 break;
3759
3760 case AARCH64_OPND_PRFOP:
3761 if (opnd->prfop->name != NULL)
3762 snprintf (buf, size, "%s", opnd->prfop->name);
3763 else
3764 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3765 break;
3766
3767 case AARCH64_OPND_BARRIER_PSB:
3768 case AARCH64_OPND_BTI_TARGET:
3769 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3770 snprintf (buf, size, "%s", opnd->hint_option->name);
3771 break;
3772
3773 default:
3774 assert (0);
3775 }
3776 }
3777
3778 #define CPENC(op0,op1,crn,crm,op2) \
3780 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3781 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3782 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3783 /* for 3.9.10 System Instructions */
3784 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3785
3786 #define C0 0
3787 #define C1 1
3788 #define C2 2
3789 #define C3 3
3790 #define C4 4
3791 #define C5 5
3792 #define C6 6
3793 #define C7 7
3794 #define C8 8
3795 #define C9 9
3796 #define C10 10
3797 #define C11 11
3798 #define C12 12
3799 #define C13 13
3800 #define C14 14
3801 #define C15 15
3802
3803 /* TODO there is one more issues need to be resolved
3804 1. handle cpu-implementation-defined system registers. */
3805 const aarch64_sys_reg aarch64_sys_regs [] =
3806 {
3807 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3808 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3809 { "elr_el1", CPEN_(0,C0,1), 0 },
3810 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3811 { "sp_el0", CPEN_(0,C1,0), 0 },
3812 { "spsel", CPEN_(0,C2,0), 0 },
3813 { "daif", CPEN_(3,C2,1), 0 },
3814 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3815 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3816 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3817 { "nzcv", CPEN_(3,C2,0), 0 },
3818 { "ssbs", CPEN_(3,C2,6), F_ARCHEXT },
3819 { "fpcr", CPEN_(3,C4,0), 0 },
3820 { "fpsr", CPEN_(3,C4,1), 0 },
3821 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3822 { "dlr_el0", CPEN_(3,C5,1), 0 },
3823 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3824 { "elr_el2", CPEN_(4,C0,1), 0 },
3825 { "sp_el1", CPEN_(4,C1,0), 0 },
3826 { "spsr_irq", CPEN_(4,C3,0), 0 },
3827 { "spsr_abt", CPEN_(4,C3,1), 0 },
3828 { "spsr_und", CPEN_(4,C3,2), 0 },
3829 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3830 { "spsr_el3", CPEN_(6,C0,0), 0 },
3831 { "elr_el3", CPEN_(6,C0,1), 0 },
3832 { "sp_el2", CPEN_(6,C1,0), 0 },
3833 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3834 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3835 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3836 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3837 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3838 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3839 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3840 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3841 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3842 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3843 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3844 { "id_pfr2_el1", CPENC(3,0,C0,C3,4), F_ARCHEXT | F_REG_READ}, /* RO */
3845 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3846 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3847 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3848 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3849 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3850 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3851 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3852 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3853 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3854 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3855 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3856 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3857 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3858 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3859 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3860 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3861 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3862 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3863 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3864 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3865 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3866 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3867 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3868 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3869 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3870 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3871 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3872 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3873 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3874 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3875 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3876 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3877 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3878 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3879 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3880 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3881 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3882 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3883 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3884 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3885 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3886 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3887 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3888 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3889 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3890 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3891 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3892 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3893 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3894 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3895 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3896 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3897 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3898 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3899 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3900 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3901 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3902 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3903 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3904 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3905 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3906 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3907 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3908 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3909 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3910 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3911 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3912 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3913 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3914 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3915 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3916 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3917 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3918 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3919 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3920 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3921 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3922 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3923 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3924 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3925 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3926 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3927 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3928 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3929 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3930 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3931 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3932 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3933 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3934 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3935 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3936 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3937 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3938 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3939 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3940 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3941 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3942 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3943 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3944 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3945 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3946 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3947 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3948 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3949 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3950 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3951 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3952 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3953 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3954 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3955 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3956 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3957 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3958 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3959 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3960 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3961 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3962 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3963 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3964 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3965 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3966 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3967 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3968 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3969 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3970 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3971 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3972 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3973 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3974 { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3975 { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3976 { "tco", CPENC(3,3,C4,C2,7), F_ARCHEXT },
3977 { "tfsre0_el1", CPENC(3,0,C5,C6,1), F_ARCHEXT },
3978 { "tfsr_el1", CPENC(3,0,C5,C6,0), F_ARCHEXT },
3979 { "tfsr_el2", CPENC(3,4,C5,C6,0), F_ARCHEXT },
3980 { "tfsr_el3", CPENC(3,6,C5,C6,0), F_ARCHEXT },
3981 { "tfsr_el12", CPENC(3,5,C5,C6,0), F_ARCHEXT },
3982 { "rgsr_el1", CPENC(3,0,C1,C0,5), F_ARCHEXT },
3983 { "gcr_el1", CPENC(3,0,C1,C0,6), F_ARCHEXT },
3984 { "gmid_el1", CPENC(3,1,C0,C0,4), F_ARCHEXT | F_REG_READ }, /* RO */
3985 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3986 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3987 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3988 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3989 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3990 { "scxtnum_el0", CPENC(3,3,C13,C0,7), F_ARCHEXT },
3991 { "scxtnum_el1", CPENC(3,0,C13,C0,7), F_ARCHEXT },
3992 { "scxtnum_el2", CPENC(3,4,C13,C0,7), F_ARCHEXT },
3993 { "scxtnum_el12", CPENC(3,5,C13,C0,7), F_ARCHEXT },
3994 { "scxtnum_el3", CPENC(3,6,C13,C0,7), F_ARCHEXT },
3995 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3996 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3997 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3998 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3999 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
4000 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
4001 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
4002 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
4003 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
4004 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
4005 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
4006 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
4007 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
4008 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
4009 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
4010 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
4011 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
4012 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
4013 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
4014 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
4015 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
4016 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
4017 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
4018 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
4019 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
4020 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
4021 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
4022 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
4023 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
4024 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
4025 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
4026 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
4027 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
4028 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
4029 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
4030 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
4031 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
4032 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
4033 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
4034 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
4035 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
4036 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
4037 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
4038 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
4039 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
4040 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
4041 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
4042 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
4043 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
4044 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
4045 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
4046 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
4047 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
4048 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
4049 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
4050 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
4051 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
4052 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
4053 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
4054 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
4055 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
4056 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
4057 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
4058 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
4059 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
4060 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
4061 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
4062 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
4063 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
4064 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
4065 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
4066 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
4067 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
4068 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
4069 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
4070 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
4071 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
4072 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
4073 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
4074 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
4075 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
4076 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
4077 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
4078 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
4079 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
4080 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
4081 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
4082 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
4083 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
4084 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
4085 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
4086 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
4087 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
4088 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
4089 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
4090 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
4091 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
4092 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
4093 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
4094 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
4095 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
4096 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
4097 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
4098 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
4099 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
4100 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
4101 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
4102 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
4103 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
4104 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
4105 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
4106 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
4107 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
4108 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
4109 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
4110 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
4111 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
4112 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
4113 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
4114 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
4115 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
4116 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
4117 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
4118 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
4119 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
4120 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
4121 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
4122 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
4123 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
4124 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
4125 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
4126 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
4127 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
4128 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
4129 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
4130 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
4131 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
4132 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
4133 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
4134 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
4135 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
4136 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
4137 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
4138 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
4139 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
4140 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
4141 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
4142 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
4143 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
4144 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
4145 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
4146 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
4147 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4148 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4149 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4150 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4151 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4152 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4153 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4154 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4155 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4156 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4157 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4158 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4159 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4160 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4161 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4162 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4163 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4164 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4165 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4166 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4167 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4168 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4169 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4170 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4171 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4172 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4173 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4174 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4175 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4176 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4177 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4178 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4179 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4180 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4181 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4182 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4183 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4184 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4185 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4186 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4187 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4188 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4189 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4190 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4191 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4192 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4193 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4194 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4195 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4196 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4197 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4198 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4199 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4200 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4201
4202 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4203 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4204 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4205 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4206 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4207 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4208 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4209 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4210 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4211 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4212 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4213 { 0, CPENC(0,0,0,0,0), 0 },
4214 };
4215
4216 bfd_boolean
4217 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4218 {
4219 return (reg->flags & F_DEPRECATED) != 0;
4220 }
4221
4222 bfd_boolean
4223 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4224 const aarch64_sys_reg *reg)
4225 {
4226 if (!(reg->flags & F_ARCHEXT))
4227 return TRUE;
4228
4229 /* PAN. Values are from aarch64_sys_regs. */
4230 if (reg->value == CPEN_(0,C2,3)
4231 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4232 return FALSE;
4233
4234 /* SCXTNUM_ELx registers. */
4235 if ((reg->value == CPENC (3, 3, C13, C0, 7)
4236 || reg->value == CPENC (3, 0, C13, C0, 7)
4237 || reg->value == CPENC (3, 4, C13, C0, 7)
4238 || reg->value == CPENC (3, 6, C13, C0, 7)
4239 || reg->value == CPENC (3, 5, C13, C0, 7))
4240 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4241 return FALSE;
4242
4243 /* ID_PFR2_EL1 register. */
4244 if (reg->value == CPENC(3, 0, C0, C3, 4)
4245 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4246 return FALSE;
4247
4248 /* SSBS. Values are from aarch64_sys_regs. */
4249 if (reg->value == CPEN_(3,C2,6)
4250 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4251 return FALSE;
4252
4253 /* Virtualization host extensions: system registers. */
4254 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4255 || reg->value == CPENC (3, 4, C13, C0, 1)
4256 || reg->value == CPENC (3, 4, C14, C3, 0)
4257 || reg->value == CPENC (3, 4, C14, C3, 1)
4258 || reg->value == CPENC (3, 4, C14, C3, 2))
4259 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4260 return FALSE;
4261
4262 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4263 if ((reg->value == CPEN_ (5, C0, 0)
4264 || reg->value == CPEN_ (5, C0, 1)
4265 || reg->value == CPENC (3, 5, C1, C0, 0)
4266 || reg->value == CPENC (3, 5, C1, C0, 2)
4267 || reg->value == CPENC (3, 5, C2, C0, 0)
4268 || reg->value == CPENC (3, 5, C2, C0, 1)
4269 || reg->value == CPENC (3, 5, C2, C0, 2)
4270 || reg->value == CPENC (3, 5, C5, C1, 0)
4271 || reg->value == CPENC (3, 5, C5, C1, 1)
4272 || reg->value == CPENC (3, 5, C5, C2, 0)
4273 || reg->value == CPENC (3, 5, C6, C0, 0)
4274 || reg->value == CPENC (3, 5, C10, C2, 0)
4275 || reg->value == CPENC (3, 5, C10, C3, 0)
4276 || reg->value == CPENC (3, 5, C12, C0, 0)
4277 || reg->value == CPENC (3, 5, C13, C0, 1)
4278 || reg->value == CPENC (3, 5, C14, C1, 0))
4279 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4280 return FALSE;
4281
4282 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4283 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4284 || reg->value == CPENC (3, 5, C14, C2, 1)
4285 || reg->value == CPENC (3, 5, C14, C2, 2)
4286 || reg->value == CPENC (3, 5, C14, C3, 0)
4287 || reg->value == CPENC (3, 5, C14, C3, 1)
4288 || reg->value == CPENC (3, 5, C14, C3, 2))
4289 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4290 return FALSE;
4291
4292 /* ARMv8.2 features. */
4293
4294 /* ID_AA64MMFR2_EL1. */
4295 if (reg->value == CPENC (3, 0, C0, C7, 2)
4296 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4297 return FALSE;
4298
4299 /* PSTATE.UAO. */
4300 if (reg->value == CPEN_ (0, C2, 4)
4301 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4302 return FALSE;
4303
4304 /* RAS extension. */
4305
4306 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4307 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4308 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4309 || reg->value == CPENC (3, 0, C5, C3, 1)
4310 || reg->value == CPENC (3, 0, C5, C3, 2)
4311 || reg->value == CPENC (3, 0, C5, C3, 3)
4312 || reg->value == CPENC (3, 0, C5, C4, 0)
4313 || reg->value == CPENC (3, 0, C5, C4, 1)
4314 || reg->value == CPENC (3, 0, C5, C4, 2)
4315 || reg->value == CPENC (3, 0, C5, C4, 3)
4316 || reg->value == CPENC (3, 0, C5, C5, 0)
4317 || reg->value == CPENC (3, 0, C5, C5, 1))
4318 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4319 return FALSE;
4320
4321 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4322 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4323 || reg->value == CPENC (3, 0, C12, C1, 1)
4324 || reg->value == CPENC (3, 4, C12, C1, 1))
4325 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4326 return FALSE;
4327
4328 /* Statistical Profiling extension. */
4329 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4330 || reg->value == CPENC (3, 0, C9, C10, 1)
4331 || reg->value == CPENC (3, 0, C9, C10, 3)
4332 || reg->value == CPENC (3, 0, C9, C10, 7)
4333 || reg->value == CPENC (3, 0, C9, C9, 0)
4334 || reg->value == CPENC (3, 0, C9, C9, 2)
4335 || reg->value == CPENC (3, 0, C9, C9, 3)
4336 || reg->value == CPENC (3, 0, C9, C9, 4)
4337 || reg->value == CPENC (3, 0, C9, C9, 5)
4338 || reg->value == CPENC (3, 0, C9, C9, 6)
4339 || reg->value == CPENC (3, 0, C9, C9, 7)
4340 || reg->value == CPENC (3, 4, C9, C9, 0)
4341 || reg->value == CPENC (3, 5, C9, C9, 0))
4342 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4343 return FALSE;
4344
4345 /* ARMv8.3 Pointer authentication keys. */
4346 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4347 || reg->value == CPENC (3, 0, C2, C1, 1)
4348 || reg->value == CPENC (3, 0, C2, C1, 2)
4349 || reg->value == CPENC (3, 0, C2, C1, 3)
4350 || reg->value == CPENC (3, 0, C2, C2, 0)
4351 || reg->value == CPENC (3, 0, C2, C2, 1)
4352 || reg->value == CPENC (3, 0, C2, C2, 2)
4353 || reg->value == CPENC (3, 0, C2, C2, 3)
4354 || reg->value == CPENC (3, 0, C2, C3, 0)
4355 || reg->value == CPENC (3, 0, C2, C3, 1))
4356 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4357 return FALSE;
4358
4359 /* SVE. */
4360 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4361 || reg->value == CPENC (3, 0, C1, C2, 0)
4362 || reg->value == CPENC (3, 4, C1, C2, 0)
4363 || reg->value == CPENC (3, 6, C1, C2, 0)
4364 || reg->value == CPENC (3, 5, C1, C2, 0)
4365 || reg->value == CPENC (3, 0, C0, C0, 7))
4366 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4367 return FALSE;
4368
4369 /* ARMv8.4 features. */
4370
4371 /* PSTATE.DIT. */
4372 if (reg->value == CPEN_ (3, C2, 5)
4373 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4374 return FALSE;
4375
4376 /* Virtualization extensions. */
4377 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4378 || reg->value == CPENC(3, 4, C2, C6, 0)
4379 || reg->value == CPENC(3, 4, C14, C4, 0)
4380 || reg->value == CPENC(3, 4, C14, C4, 2)
4381 || reg->value == CPENC(3, 4, C14, C4, 1)
4382 || reg->value == CPENC(3, 4, C14, C5, 0)
4383 || reg->value == CPENC(3, 4, C14, C5, 2)
4384 || reg->value == CPENC(3, 4, C14, C5, 1)
4385 || reg->value == CPENC(3, 4, C1, C3, 1)
4386 || reg->value == CPENC(3, 4, C2, C2, 0))
4387 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4388 return FALSE;
4389
4390 /* ARMv8.4 TLB instructions. */
4391 if ((reg->value == CPENS (0, C8, C1, 0)
4392 || reg->value == CPENS (0, C8, C1, 1)
4393 || reg->value == CPENS (0, C8, C1, 2)
4394 || reg->value == CPENS (0, C8, C1, 3)
4395 || reg->value == CPENS (0, C8, C1, 5)
4396 || reg->value == CPENS (0, C8, C1, 7)
4397 || reg->value == CPENS (4, C8, C4, 0)
4398 || reg->value == CPENS (4, C8, C4, 4)
4399 || reg->value == CPENS (4, C8, C1, 1)
4400 || reg->value == CPENS (4, C8, C1, 5)
4401 || reg->value == CPENS (4, C8, C1, 6)
4402 || reg->value == CPENS (6, C8, C1, 1)
4403 || reg->value == CPENS (6, C8, C1, 5)
4404 || reg->value == CPENS (4, C8, C1, 0)
4405 || reg->value == CPENS (4, C8, C1, 4)
4406 || reg->value == CPENS (6, C8, C1, 0)
4407 || reg->value == CPENS (0, C8, C6, 1)
4408 || reg->value == CPENS (0, C8, C6, 3)
4409 || reg->value == CPENS (0, C8, C6, 5)
4410 || reg->value == CPENS (0, C8, C6, 7)
4411 || reg->value == CPENS (0, C8, C2, 1)
4412 || reg->value == CPENS (0, C8, C2, 3)
4413 || reg->value == CPENS (0, C8, C2, 5)
4414 || reg->value == CPENS (0, C8, C2, 7)
4415 || reg->value == CPENS (0, C8, C5, 1)
4416 || reg->value == CPENS (0, C8, C5, 3)
4417 || reg->value == CPENS (0, C8, C5, 5)
4418 || reg->value == CPENS (0, C8, C5, 7)
4419 || reg->value == CPENS (4, C8, C0, 2)
4420 || reg->value == CPENS (4, C8, C0, 6)
4421 || reg->value == CPENS (4, C8, C4, 2)
4422 || reg->value == CPENS (4, C8, C4, 6)
4423 || reg->value == CPENS (4, C8, C4, 3)
4424 || reg->value == CPENS (4, C8, C4, 7)
4425 || reg->value == CPENS (4, C8, C6, 1)
4426 || reg->value == CPENS (4, C8, C6, 5)
4427 || reg->value == CPENS (4, C8, C2, 1)
4428 || reg->value == CPENS (4, C8, C2, 5)
4429 || reg->value == CPENS (4, C8, C5, 1)
4430 || reg->value == CPENS (4, C8, C5, 5)
4431 || reg->value == CPENS (6, C8, C6, 1)
4432 || reg->value == CPENS (6, C8, C6, 5)
4433 || reg->value == CPENS (6, C8, C2, 1)
4434 || reg->value == CPENS (6, C8, C2, 5)
4435 || reg->value == CPENS (6, C8, C5, 1)
4436 || reg->value == CPENS (6, C8, C5, 5))
4437 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4438 return FALSE;
4439
4440 /* Random Number Instructions. For now they are available
4441 (and optional) only with ARMv8.5-A. */
4442 if ((reg->value == CPENC (3, 3, C2, C4, 0)
4443 || reg->value == CPENC (3, 3, C2, C4, 1))
4444 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4445 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4446 return FALSE;
4447
4448 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4449 if ((reg->value == CPENC (3, 3, C4, C2, 7)
4450 || reg->value == CPENC (3, 0, C5, C6, 1)
4451 || reg->value == CPENC (3, 0, C5, C6, 0)
4452 || reg->value == CPENC (3, 4, C5, C6, 0)
4453 || reg->value == CPENC (3, 6, C5, C6, 0)
4454 || reg->value == CPENC (3, 5, C5, C6, 0)
4455 || reg->value == CPENC (3, 0, C1, C0, 5)
4456 || reg->value == CPENC (3, 0, C1, C0, 6)
4457 || reg->value == CPENC (3, 1, C0, C0, 4))
4458 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4459 return FALSE;
4460
4461 return TRUE;
4462 }
4463
4464 /* The CPENC below is fairly misleading, the fields
4465 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4466 by ins_pstatefield, which just shifts the value by the width of the fields
4467 in a loop. So if you CPENC them only the first value will be set, the rest
4468 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4469 value of 0b110000000001000000 (0x30040) while what you want is
4470 0b011010 (0x1a). */
4471 const aarch64_sys_reg aarch64_pstatefields [] =
4472 {
4473 { "spsel", 0x05, 0 },
4474 { "daifset", 0x1e, 0 },
4475 { "daifclr", 0x1f, 0 },
4476 { "pan", 0x04, F_ARCHEXT },
4477 { "uao", 0x03, F_ARCHEXT },
4478 { "ssbs", 0x19, F_ARCHEXT },
4479 { "dit", 0x1a, F_ARCHEXT },
4480 { "tco", 0x1c, F_ARCHEXT },
4481 { 0, CPENC(0,0,0,0,0), 0 },
4482 };
4483
4484 bfd_boolean
4485 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4486 const aarch64_sys_reg *reg)
4487 {
4488 if (!(reg->flags & F_ARCHEXT))
4489 return TRUE;
4490
4491 /* PAN. Values are from aarch64_pstatefields. */
4492 if (reg->value == 0x04
4493 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4494 return FALSE;
4495
4496 /* UAO. Values are from aarch64_pstatefields. */
4497 if (reg->value == 0x03
4498 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4499 return FALSE;
4500
4501 /* SSBS. Values are from aarch64_pstatefields. */
4502 if (reg->value == 0x19
4503 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4504 return FALSE;
4505
4506 /* DIT. Values are from aarch64_pstatefields. */
4507 if (reg->value == 0x1a
4508 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4509 return FALSE;
4510
4511 /* TCO. Values are from aarch64_pstatefields. */
4512 if (reg->value == 0x1c
4513 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4514 return FALSE;
4515
4516 return TRUE;
4517 }
4518
4519 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4520 {
4521 { "ialluis", CPENS(0,C7,C1,0), 0 },
4522 { "iallu", CPENS(0,C7,C5,0), 0 },
4523 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4524 { 0, CPENS(0,0,0,0), 0 }
4525 };
4526
4527 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4528 {
4529 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4530 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4531 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4532 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4533 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4534 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4535 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4536 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4537 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4538 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4539 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4540 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4541 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4542 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4543 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4544 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4545 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4546 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4547 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4548 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4549 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4550 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4551 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4552 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4553 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4554 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4555 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4556 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4557 { 0, CPENS(0,0,0,0), 0 }
4558 };
4559
4560 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4561 {
4562 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4563 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4564 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4565 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4566 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4567 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4568 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4569 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4570 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4571 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4572 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4573 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4574 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4575 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4576 { 0, CPENS(0,0,0,0), 0 }
4577 };
4578
4579 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4580 {
4581 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4582 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4583 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4584 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4585 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4586 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4587 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4588 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4589 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4590 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4591 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4592 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4593 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4594 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4595 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4596 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4597 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4598 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4599 { "alle2", CPENS(4,C8,C7,0), 0 },
4600 { "alle2is", CPENS(4,C8,C3,0), 0 },
4601 { "alle1", CPENS(4,C8,C7,4), 0 },
4602 { "alle1is", CPENS(4,C8,C3,4), 0 },
4603 { "alle3", CPENS(6,C8,C7,0), 0 },
4604 { "alle3is", CPENS(6,C8,C3,0), 0 },
4605 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4606 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4607 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4608 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4609 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4610 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4611 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4612 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4613
4614 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4615 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4616 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4617 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4618 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4619 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4620 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4621 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4622 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4623 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4624 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4625 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4626 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4627 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4628 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4629 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4630
4631 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4632 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4633 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4634 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4635 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4636 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4637 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4638 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4639 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4640 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4641 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4642 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4643 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4644 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4645 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4646 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4647 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4648 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4649 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4650 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4651 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4652 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4653 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4654 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4655 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4656 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4657 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4658 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4659 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4660 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4661
4662 { 0, CPENS(0,0,0,0), 0 }
4663 };
4664
4665 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4666 {
4667 /* RCTX is somewhat unique in a way that it has different values
4668 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4669 Thus op2 is masked out and instead encoded directly in the
4670 aarch64_opcode_table entries for the respective instructions. */
4671 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4672
4673 { 0, CPENS(0,0,0,0), 0 }
4674 };
4675
4676 bfd_boolean
4677 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4678 {
4679 return (sys_ins_reg->flags & F_HASXT) != 0;
4680 }
4681
4682 extern bfd_boolean
4683 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4684 const aarch64_sys_ins_reg *reg)
4685 {
4686 if (!(reg->flags & F_ARCHEXT))
4687 return TRUE;
4688
4689 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4690 if (reg->value == CPENS (3, C7, C12, 1)
4691 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4692 return FALSE;
4693
4694 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4695 if (reg->value == CPENS (3, C7, C13, 1)
4696 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4697 return FALSE;
4698
4699 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4700 if ((reg->value == CPENS (0, C7, C6, 3)
4701 || reg->value == CPENS (0, C7, C6, 4)
4702 || reg->value == CPENS (0, C7, C10, 4)
4703 || reg->value == CPENS (0, C7, C14, 4)
4704 || reg->value == CPENS (3, C7, C10, 3)
4705 || reg->value == CPENS (3, C7, C12, 3)
4706 || reg->value == CPENS (3, C7, C13, 3)
4707 || reg->value == CPENS (3, C7, C14, 3)
4708 || reg->value == CPENS (3, C7, C4, 3)
4709 || reg->value == CPENS (0, C7, C6, 5)
4710 || reg->value == CPENS (0, C7, C6, 6)
4711 || reg->value == CPENS (0, C7, C10, 6)
4712 || reg->value == CPENS (0, C7, C14, 6)
4713 || reg->value == CPENS (3, C7, C10, 5)
4714 || reg->value == CPENS (3, C7, C12, 5)
4715 || reg->value == CPENS (3, C7, C13, 5)
4716 || reg->value == CPENS (3, C7, C14, 5)
4717 || reg->value == CPENS (3, C7, C4, 4))
4718 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4719 return FALSE;
4720
4721 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4722 if ((reg->value == CPENS (0, C7, C9, 0)
4723 || reg->value == CPENS (0, C7, C9, 1))
4724 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4725 return FALSE;
4726
4727 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4728 if (reg->value == CPENS (3, C7, C3, 0)
4729 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4730 return FALSE;
4731
4732 return TRUE;
4733 }
4734
4735 #undef C0
4736 #undef C1
4737 #undef C2
4738 #undef C3
4739 #undef C4
4740 #undef C5
4741 #undef C6
4742 #undef C7
4743 #undef C8
4744 #undef C9
4745 #undef C10
4746 #undef C11
4747 #undef C12
4748 #undef C13
4749 #undef C14
4750 #undef C15
4751
4752 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4753 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4754
4755 static enum err_type
4756 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4757 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4758 bfd_boolean encoding ATTRIBUTE_UNUSED,
4759 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4760 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4761 {
4762 int t = BITS (insn, 4, 0);
4763 int n = BITS (insn, 9, 5);
4764 int t2 = BITS (insn, 14, 10);
4765
4766 if (BIT (insn, 23))
4767 {
4768 /* Write back enabled. */
4769 if ((t == n || t2 == n) && n != 31)
4770 return ERR_UND;
4771 }
4772
4773 if (BIT (insn, 22))
4774 {
4775 /* Load */
4776 if (t == t2)
4777 return ERR_UND;
4778 }
4779
4780 return ERR_OK;
4781 }
4782
4783 /* Verifier for vector by element 3 operands functions where the
4784 conditions `if sz:L == 11 then UNDEFINED` holds. */
4785
4786 static enum err_type
4787 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4788 bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4789 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4790 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4791 {
4792 const aarch64_insn undef_pattern = 0x3;
4793 aarch64_insn value;
4794
4795 assert (inst->opcode);
4796 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4797 value = encoding ? inst->value : insn;
4798 assert (value);
4799
4800 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4801 return ERR_UND;
4802
4803 return ERR_OK;
4804 }
4805
4806 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4807 If INST is NULL the given insn_sequence is cleared and the sequence is left
4808 uninitialized. */
4809
4810 void
4811 init_insn_sequence (const struct aarch64_inst *inst,
4812 aarch64_instr_sequence *insn_sequence)
4813 {
4814 int num_req_entries = 0;
4815 insn_sequence->next_insn = 0;
4816 insn_sequence->num_insns = num_req_entries;
4817 if (insn_sequence->instr)
4818 XDELETE (insn_sequence->instr);
4819 insn_sequence->instr = NULL;
4820
4821 if (inst)
4822 {
4823 insn_sequence->instr = XNEW (aarch64_inst);
4824 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4825 }
4826
4827 /* Handle all the cases here. May need to think of something smarter than
4828 a giant if/else chain if this grows. At that time, a lookup table may be
4829 best. */
4830 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4831 num_req_entries = 1;
4832
4833 if (insn_sequence->current_insns)
4834 XDELETEVEC (insn_sequence->current_insns);
4835 insn_sequence->current_insns = NULL;
4836
4837 if (num_req_entries != 0)
4838 {
4839 size_t size = num_req_entries * sizeof (aarch64_inst);
4840 insn_sequence->current_insns
4841 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4842 memset (insn_sequence->current_insns, 0, size);
4843 }
4844 }
4845
4846
4847 /* This function verifies that the instruction INST adheres to its specified
4848 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4849 returned and MISMATCH_DETAIL contains the reason why verification failed.
4850
4851 The function is called both during assembly and disassembly. If assembling
4852 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4853 and will contain the PC of the current instruction w.r.t to the section.
4854
4855 If ENCODING and PC=0 then you are at a start of a section. The constraints
4856 are verified against the given state insn_sequence which is updated as it
4857 transitions through the verification. */
4858
4859 enum err_type
4860 verify_constraints (const struct aarch64_inst *inst,
4861 const aarch64_insn insn ATTRIBUTE_UNUSED,
4862 bfd_vma pc,
4863 bfd_boolean encoding,
4864 aarch64_operand_error *mismatch_detail,
4865 aarch64_instr_sequence *insn_sequence)
4866 {
4867 assert (inst);
4868 assert (inst->opcode);
4869
4870 const struct aarch64_opcode *opcode = inst->opcode;
4871 if (!opcode->constraints && !insn_sequence->instr)
4872 return ERR_OK;
4873
4874 assert (insn_sequence);
4875
4876 enum err_type res = ERR_OK;
4877
4878 /* This instruction puts a constraint on the insn_sequence. */
4879 if (opcode->flags & F_SCAN)
4880 {
4881 if (insn_sequence->instr)
4882 {
4883 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4884 mismatch_detail->error = _("instruction opens new dependency "
4885 "sequence without ending previous one");
4886 mismatch_detail->index = -1;
4887 mismatch_detail->non_fatal = TRUE;
4888 res = ERR_VFI;
4889 }
4890
4891 init_insn_sequence (inst, insn_sequence);
4892 return res;
4893 }
4894
4895 /* Verify constraints on an existing sequence. */
4896 if (insn_sequence->instr)
4897 {
4898 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4899 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4900 closed a previous one that we should have. */
4901 if (!encoding && pc == 0)
4902 {
4903 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4904 mismatch_detail->error = _("previous `movprfx' sequence not closed");
4905 mismatch_detail->index = -1;
4906 mismatch_detail->non_fatal = TRUE;
4907 res = ERR_VFI;
4908 /* Reset the sequence. */
4909 init_insn_sequence (NULL, insn_sequence);
4910 return res;
4911 }
4912
4913 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4914 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4915 {
4916 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4917 instruction for better error messages. */
4918 if (!opcode->avariant
4919 || !(*opcode->avariant &
4920 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
4921 {
4922 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4923 mismatch_detail->error = _("SVE instruction expected after "
4924 "`movprfx'");
4925 mismatch_detail->index = -1;
4926 mismatch_detail->non_fatal = TRUE;
4927 res = ERR_VFI;
4928 goto done;
4929 }
4930
4931 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4932 instruction that is allowed to be used with a MOVPRFX. */
4933 if (!(opcode->constraints & C_SCAN_MOVPRFX))
4934 {
4935 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4936 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4937 "expected");
4938 mismatch_detail->index = -1;
4939 mismatch_detail->non_fatal = TRUE;
4940 res = ERR_VFI;
4941 goto done;
4942 }
4943
4944 /* Next check for usage of the predicate register. */
4945 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4946 aarch64_opnd_info blk_pred, inst_pred;
4947 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4948 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4949 bfd_boolean predicated = FALSE;
4950 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4951
4952 /* Determine if the movprfx instruction used is predicated or not. */
4953 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4954 {
4955 predicated = TRUE;
4956 blk_pred = insn_sequence->instr->operands[1];
4957 }
4958
4959 unsigned char max_elem_size = 0;
4960 unsigned char current_elem_size;
4961 int num_op_used = 0, last_op_usage = 0;
4962 int i, inst_pred_idx = -1;
4963 int num_ops = aarch64_num_of_operands (opcode);
4964 for (i = 0; i < num_ops; i++)
4965 {
4966 aarch64_opnd_info inst_op = inst->operands[i];
4967 switch (inst_op.type)
4968 {
4969 case AARCH64_OPND_SVE_Zd:
4970 case AARCH64_OPND_SVE_Zm_5:
4971 case AARCH64_OPND_SVE_Zm_16:
4972 case AARCH64_OPND_SVE_Zn:
4973 case AARCH64_OPND_SVE_Zt:
4974 case AARCH64_OPND_SVE_Vm:
4975 case AARCH64_OPND_SVE_Vn:
4976 case AARCH64_OPND_Va:
4977 case AARCH64_OPND_Vn:
4978 case AARCH64_OPND_Vm:
4979 case AARCH64_OPND_Sn:
4980 case AARCH64_OPND_Sm:
4981 if (inst_op.reg.regno == blk_dest.reg.regno)
4982 {
4983 num_op_used++;
4984 last_op_usage = i;
4985 }
4986 current_elem_size
4987 = aarch64_get_qualifier_esize (inst_op.qualifier);
4988 if (current_elem_size > max_elem_size)
4989 max_elem_size = current_elem_size;
4990 break;
4991 case AARCH64_OPND_SVE_Pd:
4992 case AARCH64_OPND_SVE_Pg3:
4993 case AARCH64_OPND_SVE_Pg4_5:
4994 case AARCH64_OPND_SVE_Pg4_10:
4995 case AARCH64_OPND_SVE_Pg4_16:
4996 case AARCH64_OPND_SVE_Pm:
4997 case AARCH64_OPND_SVE_Pn:
4998 case AARCH64_OPND_SVE_Pt:
4999 inst_pred = inst_op;
5000 inst_pred_idx = i;
5001 break;
5002 default:
5003 break;
5004 }
5005 }
5006
5007 assert (max_elem_size != 0);
5008 aarch64_opnd_info inst_dest = inst->operands[0];
5009 /* Determine the size that should be used to compare against the
5010 movprfx size. */
5011 current_elem_size
5012 = opcode->constraints & C_MAX_ELEM
5013 ? max_elem_size
5014 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5015
5016 /* If movprfx is predicated do some extra checks. */
5017 if (predicated)
5018 {
5019 /* The instruction must be predicated. */
5020 if (inst_pred_idx < 0)
5021 {
5022 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5023 mismatch_detail->error = _("predicated instruction expected "
5024 "after `movprfx'");
5025 mismatch_detail->index = -1;
5026 mismatch_detail->non_fatal = TRUE;
5027 res = ERR_VFI;
5028 goto done;
5029 }
5030
5031 /* The instruction must have a merging predicate. */
5032 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5033 {
5034 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5035 mismatch_detail->error = _("merging predicate expected due "
5036 "to preceding `movprfx'");
5037 mismatch_detail->index = inst_pred_idx;
5038 mismatch_detail->non_fatal = TRUE;
5039 res = ERR_VFI;
5040 goto done;
5041 }
5042
5043 /* The same register must be used in instruction. */
5044 if (blk_pred.reg.regno != inst_pred.reg.regno)
5045 {
5046 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5047 mismatch_detail->error = _("predicate register differs "
5048 "from that in preceding "
5049 "`movprfx'");
5050 mismatch_detail->index = inst_pred_idx;
5051 mismatch_detail->non_fatal = TRUE;
5052 res = ERR_VFI;
5053 goto done;
5054 }
5055 }
5056
5057 /* Destructive operations by definition must allow one usage of the
5058 same register. */
5059 int allowed_usage
5060 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5061
5062 /* Operand is not used at all. */
5063 if (num_op_used == 0)
5064 {
5065 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5066 mismatch_detail->error = _("output register of preceding "
5067 "`movprfx' not used in current "
5068 "instruction");
5069 mismatch_detail->index = 0;
5070 mismatch_detail->non_fatal = TRUE;
5071 res = ERR_VFI;
5072 goto done;
5073 }
5074
5075 /* We now know it's used, now determine exactly where it's used. */
5076 if (blk_dest.reg.regno != inst_dest.reg.regno)
5077 {
5078 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5079 mismatch_detail->error = _("output register of preceding "
5080 "`movprfx' expected as output");
5081 mismatch_detail->index = 0;
5082 mismatch_detail->non_fatal = TRUE;
5083 res = ERR_VFI;
5084 goto done;
5085 }
5086
5087 /* Operand used more than allowed for the specific opcode type. */
5088 if (num_op_used > allowed_usage)
5089 {
5090 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5091 mismatch_detail->error = _("output register of preceding "
5092 "`movprfx' used as input");
5093 mismatch_detail->index = last_op_usage;
5094 mismatch_detail->non_fatal = TRUE;
5095 res = ERR_VFI;
5096 goto done;
5097 }
5098
5099 /* Now the only thing left is the qualifiers checks. The register
5100 must have the same maximum element size. */
5101 if (inst_dest.qualifier
5102 && blk_dest.qualifier
5103 && current_elem_size
5104 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5105 {
5106 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5107 mismatch_detail->error = _("register size not compatible with "
5108 "previous `movprfx'");
5109 mismatch_detail->index = 0;
5110 mismatch_detail->non_fatal = TRUE;
5111 res = ERR_VFI;
5112 goto done;
5113 }
5114 }
5115
5116 done:
5117 /* Add the new instruction to the sequence. */
5118 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5119 inst, sizeof (aarch64_inst));
5120
5121 /* Check if sequence is now full. */
5122 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5123 {
5124 /* Sequence is full, but we don't have anything special to do for now,
5125 so clear and reset it. */
5126 init_insn_sequence (NULL, insn_sequence);
5127 }
5128 }
5129
5130 return res;
5131 }
5132
5133
5134 /* Return true if VALUE cannot be moved into an SVE register using DUP
5135 (with any element size, not just ESIZE) and if using DUPM would
5136 therefore be OK. ESIZE is the number of bytes in the immediate. */
5137
5138 bfd_boolean
5139 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5140 {
5141 int64_t svalue = uvalue;
5142 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5143
5144 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5145 return FALSE;
5146 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5147 {
5148 svalue = (int32_t) uvalue;
5149 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5150 {
5151 svalue = (int16_t) uvalue;
5152 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5153 return FALSE;
5154 }
5155 }
5156 if ((svalue & 0xff) == 0)
5157 svalue /= 256;
5158 return svalue < -128 || svalue >= 128;
5159 }
5160
5161 /* Include the opcode description table as well as the operand description
5162 table. */
5163 #define VERIFIER(x) verify_##x
5164 #include "aarch64-tbl.h"
5165