aarch64-opc.c revision 1.8 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2022 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bool
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return (qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q);
110 }
111
112 static inline bool
113 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_S_B
116 && qualifier <= AARCH64_OPND_QLF_S_Q);
117 }
118
119 enum data_pattern
120 {
121 DP_UNKNOWN,
122 DP_VECTOR_3SAME,
123 DP_VECTOR_LONG,
124 DP_VECTOR_WIDE,
125 DP_VECTOR_ACROSS_LANES,
126 };
127
128 static const char significant_operand_index [] =
129 {
130 0, /* DP_UNKNOWN, by default using operand 0. */
131 0, /* DP_VECTOR_3SAME */
132 1, /* DP_VECTOR_LONG */
133 2, /* DP_VECTOR_WIDE */
134 1, /* DP_VECTOR_ACROSS_LANES */
135 };
136
137 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
138 the data pattern.
139 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140 corresponds to one of a sequence of operands. */
141
142 static enum data_pattern
143 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144 {
145 if (vector_qualifier_p (qualifiers[0]))
146 {
147 /* e.g. v.4s, v.4s, v.4s
148 or v.4h, v.4h, v.h[3]. */
149 if (qualifiers[0] == qualifiers[1]
150 && vector_qualifier_p (qualifiers[2])
151 && (aarch64_get_qualifier_esize (qualifiers[0])
152 == aarch64_get_qualifier_esize (qualifiers[1]))
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[2])))
155 return DP_VECTOR_3SAME;
156 /* e.g. v.8h, v.8b, v.8b.
157 or v.4s, v.4h, v.h[2].
158 or v.8h, v.16b. */
159 if (vector_qualifier_p (qualifiers[1])
160 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161 && (aarch64_get_qualifier_esize (qualifiers[0])
162 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163 return DP_VECTOR_LONG;
164 /* e.g. v.8h, v.8h, v.8b. */
165 if (qualifiers[0] == qualifiers[1]
166 && vector_qualifier_p (qualifiers[2])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[1])))
172 return DP_VECTOR_WIDE;
173 }
174 else if (fp_qualifier_p (qualifiers[0]))
175 {
176 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
177 if (vector_qualifier_p (qualifiers[1])
178 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179 return DP_VECTOR_ACROSS_LANES;
180 }
181
182 return DP_UNKNOWN;
183 }
184
185 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186 the AdvSIMD instructions. */
187 /* N.B. it is possible to do some optimization that doesn't call
188 get_data_pattern each time when we need to select an operand. We can
189 either buffer the caculated the result or statically generate the data,
190 however, it is not obvious that the optimization will bring significant
191 benefit. */
192
193 int
194 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195 {
196 return
197 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198 }
199
200 /* Instruction bit-fields.
202 + Keep synced with 'enum aarch64_field_kind'. */
203 const aarch64_field fields[] =
204 {
205 { 0, 0 }, /* NIL. */
206 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
207 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
208 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
209 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
210 { 5, 19 }, /* imm19: e.g. in CBZ. */
211 { 5, 19 }, /* immhi: e.g. in ADRP. */
212 { 29, 2 }, /* immlo: e.g. in ADRP. */
213 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
214 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
215 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
216 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
217 { 0, 5 }, /* Rt: in load/store instructions. */
218 { 0, 5 }, /* Rd: in many integer instructions. */
219 { 5, 5 }, /* Rn: in many integer instructions. */
220 { 10, 5 }, /* Rt2: in load/store pair instructions. */
221 { 10, 5 }, /* Ra: in fp instructions. */
222 { 5, 3 }, /* op2: in the system instructions. */
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 12, 4 }, /* CRn: in the system instructions. */
225 { 16, 3 }, /* op1: in the system instructions. */
226 { 19, 2 }, /* op0: in the system instructions. */
227 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
228 { 12, 4 }, /* cond: condition flags as a source operand. */
229 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
230 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
231 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
232 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
233 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
234 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
235 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
236 { 12, 1 }, /* S: in load/store reg offset instructions. */
237 { 21, 2 }, /* hw: in move wide constant instructions. */
238 { 22, 2 }, /* opc: in load/store reg offset instructions. */
239 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
240 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
241 { 22, 2 }, /* type: floating point type field in fp data inst. */
242 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
243 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
244 { 15, 6 }, /* imm6_2: in rmif instructions. */
245 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
246 { 0, 4 }, /* imm4_2: in rmif instructions. */
247 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
248 { 5, 4 }, /* imm4_5: in SME instructions. */
249 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
250 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
251 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
252 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
253 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
254 { 5, 14 }, /* imm14: in test bit and branch instructions. */
255 { 5, 16 }, /* imm16: in exception instructions. */
256 { 0, 16 }, /* imm16_2: in udf instruction. */
257 { 0, 26 }, /* imm26: in unconditional branch instructions. */
258 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
259 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
260 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
261 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
262 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
263 { 22, 1 }, /* N: in logical (immediate) instructions. */
264 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
265 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
266 { 31, 1 }, /* sf: in integer data processing instructions. */
267 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
268 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
269 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
270 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
271 { 31, 1 }, /* b5: in the test bit and branch instructions. */
272 { 19, 5 }, /* b40: in the test bit and branch instructions. */
273 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
274 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
275 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
276 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
277 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
278 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
279 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
280 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
281 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
282 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
283 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
284 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
285 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
286 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
287 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
288 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
289 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
290 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
291 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
293 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
294 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
295 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
296 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
297 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
298 { 5, 1 }, /* SVE_i1: single-bit immediate. */
299 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
300 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
301 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
302 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
303 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
304 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
305 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
306 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
307 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
308 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
309 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
310 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
311 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
312 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
313 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
314 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
315 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
316 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
317 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
318 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
319 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
320 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
321 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
322 { 16, 4 }, /* SVE_tsz: triangular size select. */
323 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
324 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
325 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
326 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
327 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
328 { 0, 2 }, /* SME ZAda tile ZA0-ZA3. */
329 { 0, 3 }, /* SME ZAda tile ZA0-ZA7. */
330 { 22, 2 }, /* SME_size_10: size<1>, size<0> class field, [23:22]. */
331 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
332 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
333 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
334 { 13, 3 }, /* SME Pm second source scalable predicate register P0-P7. */
335 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
336 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
337 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
338 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
339 { 18, 3 }, /* SME_tshl: immediate and qualifier field, bits [20:18]. */
340 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
341 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
342 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
343 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
344 { 22, 1 }, /* sz: 1-bit element size select. */
345 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
346 };
347
348 enum aarch64_operand_class
349 aarch64_get_operand_class (enum aarch64_opnd type)
350 {
351 return aarch64_operands[type].op_class;
352 }
353
354 const char *
355 aarch64_get_operand_name (enum aarch64_opnd type)
356 {
357 return aarch64_operands[type].name;
358 }
359
360 /* Get operand description string.
361 This is usually for the diagnosis purpose. */
362 const char *
363 aarch64_get_operand_desc (enum aarch64_opnd type)
364 {
365 return aarch64_operands[type].desc;
366 }
367
368 /* Table of all conditional affixes. */
369 const aarch64_cond aarch64_conds[16] =
370 {
371 {{"eq", "none"}, 0x0},
372 {{"ne", "any"}, 0x1},
373 {{"cs", "hs", "nlast"}, 0x2},
374 {{"cc", "lo", "ul", "last"}, 0x3},
375 {{"mi", "first"}, 0x4},
376 {{"pl", "nfrst"}, 0x5},
377 {{"vs"}, 0x6},
378 {{"vc"}, 0x7},
379 {{"hi", "pmore"}, 0x8},
380 {{"ls", "plast"}, 0x9},
381 {{"ge", "tcont"}, 0xa},
382 {{"lt", "tstop"}, 0xb},
383 {{"gt"}, 0xc},
384 {{"le"}, 0xd},
385 {{"al"}, 0xe},
386 {{"nv"}, 0xf},
387 };
388
389 const aarch64_cond *
390 get_cond_from_value (aarch64_insn value)
391 {
392 assert (value < 16);
393 return &aarch64_conds[(unsigned int) value];
394 }
395
396 const aarch64_cond *
397 get_inverted_cond (const aarch64_cond *cond)
398 {
399 return &aarch64_conds[cond->value ^ 0x1];
400 }
401
402 /* Table describing the operand extension/shifting operators; indexed by
403 enum aarch64_modifier_kind.
404
405 The value column provides the most common values for encoding modifiers,
406 which enables table-driven encoding/decoding for the modifiers. */
407 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
408 {
409 {"none", 0x0},
410 {"msl", 0x0},
411 {"ror", 0x3},
412 {"asr", 0x2},
413 {"lsr", 0x1},
414 {"lsl", 0x0},
415 {"uxtb", 0x0},
416 {"uxth", 0x1},
417 {"uxtw", 0x2},
418 {"uxtx", 0x3},
419 {"sxtb", 0x4},
420 {"sxth", 0x5},
421 {"sxtw", 0x6},
422 {"sxtx", 0x7},
423 {"mul", 0x0},
424 {"mul vl", 0x0},
425 {NULL, 0},
426 };
427
428 enum aarch64_modifier_kind
429 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
430 {
431 return desc - aarch64_operand_modifiers;
432 }
433
434 aarch64_insn
435 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
436 {
437 return aarch64_operand_modifiers[kind].value;
438 }
439
440 enum aarch64_modifier_kind
441 aarch64_get_operand_modifier_from_value (aarch64_insn value,
442 bool extend_p)
443 {
444 if (extend_p)
445 return AARCH64_MOD_UXTB + value;
446 else
447 return AARCH64_MOD_LSL - value;
448 }
449
450 bool
451 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
452 {
453 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
454 }
455
456 static inline bool
457 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
458 {
459 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
460 }
461
462 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
463 {
464 { "#0x00", 0x0 },
465 { "oshld", 0x1 },
466 { "oshst", 0x2 },
467 { "osh", 0x3 },
468 { "#0x04", 0x4 },
469 { "nshld", 0x5 },
470 { "nshst", 0x6 },
471 { "nsh", 0x7 },
472 { "#0x08", 0x8 },
473 { "ishld", 0x9 },
474 { "ishst", 0xa },
475 { "ish", 0xb },
476 { "#0x0c", 0xc },
477 { "ld", 0xd },
478 { "st", 0xe },
479 { "sy", 0xf },
480 };
481
482 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
483 { /* CRm<3:2> #imm */
484 { "oshnxs", 16 }, /* 00 16 */
485 { "nshnxs", 20 }, /* 01 20 */
486 { "ishnxs", 24 }, /* 10 24 */
487 { "synxs", 28 }, /* 11 28 */
488 };
489
490 /* Table describing the operands supported by the aliases of the HINT
491 instruction.
492
493 The name column is the operand that is accepted for the alias. The value
494 column is the hint number of the alias. The list of operands is terminated
495 by NULL in the name column. */
496
497 const struct aarch64_name_value_pair aarch64_hint_options[] =
498 {
499 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
500 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
501 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
502 { "c", HINT_OPD_C }, /* BTI C. */
503 { "j", HINT_OPD_J }, /* BTI J. */
504 { "jc", HINT_OPD_JC }, /* BTI JC. */
505 { NULL, HINT_OPD_NULL },
506 };
507
508 /* op -> op: load = 0 instruction = 1 store = 2
509 l -> level: 1-3
510 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
511 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
512 const struct aarch64_name_value_pair aarch64_prfops[32] =
513 {
514 { "pldl1keep", B(0, 1, 0) },
515 { "pldl1strm", B(0, 1, 1) },
516 { "pldl2keep", B(0, 2, 0) },
517 { "pldl2strm", B(0, 2, 1) },
518 { "pldl3keep", B(0, 3, 0) },
519 { "pldl3strm", B(0, 3, 1) },
520 { NULL, 0x06 },
521 { NULL, 0x07 },
522 { "plil1keep", B(1, 1, 0) },
523 { "plil1strm", B(1, 1, 1) },
524 { "plil2keep", B(1, 2, 0) },
525 { "plil2strm", B(1, 2, 1) },
526 { "plil3keep", B(1, 3, 0) },
527 { "plil3strm", B(1, 3, 1) },
528 { NULL, 0x0e },
529 { NULL, 0x0f },
530 { "pstl1keep", B(2, 1, 0) },
531 { "pstl1strm", B(2, 1, 1) },
532 { "pstl2keep", B(2, 2, 0) },
533 { "pstl2strm", B(2, 2, 1) },
534 { "pstl3keep", B(2, 3, 0) },
535 { "pstl3strm", B(2, 3, 1) },
536 { NULL, 0x16 },
537 { NULL, 0x17 },
538 { NULL, 0x18 },
539 { NULL, 0x19 },
540 { NULL, 0x1a },
541 { NULL, 0x1b },
542 { NULL, 0x1c },
543 { NULL, 0x1d },
544 { NULL, 0x1e },
545 { NULL, 0x1f },
546 };
547 #undef B
548
549 /* Utilities on value constraint. */
551
552 static inline int
553 value_in_range_p (int64_t value, int low, int high)
554 {
555 return (value >= low && value <= high) ? 1 : 0;
556 }
557
558 /* Return true if VALUE is a multiple of ALIGN. */
559 static inline int
560 value_aligned_p (int64_t value, int align)
561 {
562 return (value % align) == 0;
563 }
564
565 /* A signed value fits in a field. */
566 static inline int
567 value_fit_signed_field_p (int64_t value, unsigned width)
568 {
569 assert (width < 32);
570 if (width < sizeof (value) * 8)
571 {
572 int64_t lim = (uint64_t) 1 << (width - 1);
573 if (value >= -lim && value < lim)
574 return 1;
575 }
576 return 0;
577 }
578
579 /* An unsigned value fits in a field. */
580 static inline int
581 value_fit_unsigned_field_p (int64_t value, unsigned width)
582 {
583 assert (width < 32);
584 if (width < sizeof (value) * 8)
585 {
586 int64_t lim = (uint64_t) 1 << width;
587 if (value >= 0 && value < lim)
588 return 1;
589 }
590 return 0;
591 }
592
593 /* Return 1 if OPERAND is SP or WSP. */
594 int
595 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
596 {
597 return ((aarch64_get_operand_class (operand->type)
598 == AARCH64_OPND_CLASS_INT_REG)
599 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
600 && operand->reg.regno == 31);
601 }
602
603 /* Return 1 if OPERAND is XZR or WZP. */
604 int
605 aarch64_zero_register_p (const aarch64_opnd_info *operand)
606 {
607 return ((aarch64_get_operand_class (operand->type)
608 == AARCH64_OPND_CLASS_INT_REG)
609 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
610 && operand->reg.regno == 31);
611 }
612
613 /* Return true if the operand *OPERAND that has the operand code
614 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
615 qualified by the qualifier TARGET. */
616
617 static inline int
618 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
619 aarch64_opnd_qualifier_t target)
620 {
621 switch (operand->qualifier)
622 {
623 case AARCH64_OPND_QLF_W:
624 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
625 return 1;
626 break;
627 case AARCH64_OPND_QLF_X:
628 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
629 return 1;
630 break;
631 case AARCH64_OPND_QLF_WSP:
632 if (target == AARCH64_OPND_QLF_W
633 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
634 return 1;
635 break;
636 case AARCH64_OPND_QLF_SP:
637 if (target == AARCH64_OPND_QLF_X
638 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
639 return 1;
640 break;
641 default:
642 break;
643 }
644
645 return 0;
646 }
647
648 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
649 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
650
651 Return NIL if more than one expected qualifiers are found. */
652
653 aarch64_opnd_qualifier_t
654 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
655 int idx,
656 const aarch64_opnd_qualifier_t known_qlf,
657 int known_idx)
658 {
659 int i, saved_i;
660
661 /* Special case.
662
663 When the known qualifier is NIL, we have to assume that there is only
664 one qualifier sequence in the *QSEQ_LIST and return the corresponding
665 qualifier directly. One scenario is that for instruction
666 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
667 which has only one possible valid qualifier sequence
668 NIL, S_D
669 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
670 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
671
672 Because the qualifier NIL has dual roles in the qualifier sequence:
673 it can mean no qualifier for the operand, or the qualifer sequence is
674 not in use (when all qualifiers in the sequence are NILs), we have to
675 handle this special case here. */
676 if (known_qlf == AARCH64_OPND_NIL)
677 {
678 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
679 return qseq_list[0][idx];
680 }
681
682 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
683 {
684 if (qseq_list[i][known_idx] == known_qlf)
685 {
686 if (saved_i != -1)
687 /* More than one sequences are found to have KNOWN_QLF at
688 KNOWN_IDX. */
689 return AARCH64_OPND_NIL;
690 saved_i = i;
691 }
692 }
693
694 return qseq_list[saved_i][idx];
695 }
696
697 enum operand_qualifier_kind
698 {
699 OQK_NIL,
700 OQK_OPD_VARIANT,
701 OQK_VALUE_IN_RANGE,
702 OQK_MISC,
703 };
704
705 /* Operand qualifier description. */
706 struct operand_qualifier_data
707 {
708 /* The usage of the three data fields depends on the qualifier kind. */
709 int data0;
710 int data1;
711 int data2;
712 /* Description. */
713 const char *desc;
714 /* Kind. */
715 enum operand_qualifier_kind kind;
716 };
717
718 /* Indexed by the operand qualifier enumerators. */
719 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
720 {
721 {0, 0, 0, "NIL", OQK_NIL},
722
723 /* Operand variant qualifiers.
724 First 3 fields:
725 element size, number of elements and common value for encoding. */
726
727 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
728 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
729 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
730 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
731
732 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
733 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
734 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
735 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
736 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
737 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
738 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
739
740 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
741 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
742 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
743 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
744 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
745 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
746 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
747 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
748 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
749 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
750 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
751
752 {0, 0, 0, "z", OQK_OPD_VARIANT},
753 {0, 0, 0, "m", OQK_OPD_VARIANT},
754
755 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
756 {16, 0, 0, "tag", OQK_OPD_VARIANT},
757
758 /* Qualifiers constraining the value range.
759 First 3 fields:
760 Lower bound, higher bound, unused. */
761
762 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
763 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
764 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
765 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
766 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
767 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
768 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
769
770 /* Qualifiers for miscellaneous purpose.
771 First 3 fields:
772 unused, unused and unused. */
773
774 {0, 0, 0, "lsl", 0},
775 {0, 0, 0, "msl", 0},
776
777 {0, 0, 0, "retrieving", 0},
778 };
779
780 static inline bool
781 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
782 {
783 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
784 }
785
786 static inline bool
787 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
788 {
789 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
790 }
791
792 const char*
793 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
794 {
795 return aarch64_opnd_qualifiers[qualifier].desc;
796 }
797
798 /* Given an operand qualifier, return the expected data element size
799 of a qualified operand. */
800 unsigned char
801 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
802 {
803 assert (operand_variant_qualifier_p (qualifier));
804 return aarch64_opnd_qualifiers[qualifier].data0;
805 }
806
807 unsigned char
808 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
809 {
810 assert (operand_variant_qualifier_p (qualifier));
811 return aarch64_opnd_qualifiers[qualifier].data1;
812 }
813
814 aarch64_insn
815 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
816 {
817 assert (operand_variant_qualifier_p (qualifier));
818 return aarch64_opnd_qualifiers[qualifier].data2;
819 }
820
821 static int
822 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
823 {
824 assert (qualifier_value_in_range_constraint_p (qualifier));
825 return aarch64_opnd_qualifiers[qualifier].data0;
826 }
827
828 static int
829 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
830 {
831 assert (qualifier_value_in_range_constraint_p (qualifier));
832 return aarch64_opnd_qualifiers[qualifier].data1;
833 }
834
835 #ifdef DEBUG_AARCH64
836 void
837 aarch64_verbose (const char *str, ...)
838 {
839 va_list ap;
840 va_start (ap, str);
841 printf ("#### ");
842 vprintf (str, ap);
843 printf ("\n");
844 va_end (ap);
845 }
846
847 static inline void
848 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
849 {
850 int i;
851 printf ("#### \t");
852 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
853 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
854 printf ("\n");
855 }
856
857 static void
858 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
859 const aarch64_opnd_qualifier_t *qualifier)
860 {
861 int i;
862 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
863
864 aarch64_verbose ("dump_match_qualifiers:");
865 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
866 curr[i] = opnd[i].qualifier;
867 dump_qualifier_sequence (curr);
868 aarch64_verbose ("against");
869 dump_qualifier_sequence (qualifier);
870 }
871 #endif /* DEBUG_AARCH64 */
872
873 /* This function checks if the given instruction INSN is a destructive
874 instruction based on the usage of the registers. It does not recognize
875 unary destructive instructions. */
876 bool
877 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
878 {
879 int i = 0;
880 const enum aarch64_opnd *opnds = opcode->operands;
881
882 if (opnds[0] == AARCH64_OPND_NIL)
883 return false;
884
885 while (opnds[++i] != AARCH64_OPND_NIL)
886 if (opnds[i] == opnds[0])
887 return true;
888
889 return false;
890 }
891
892 /* TODO improve this, we can have an extra field at the runtime to
893 store the number of operands rather than calculating it every time. */
894
895 int
896 aarch64_num_of_operands (const aarch64_opcode *opcode)
897 {
898 int i = 0;
899 const enum aarch64_opnd *opnds = opcode->operands;
900 while (opnds[i++] != AARCH64_OPND_NIL)
901 ;
902 --i;
903 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
904 return i;
905 }
906
907 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
908 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
909
910 N.B. on the entry, it is very likely that only some operands in *INST
911 have had their qualifiers been established.
912
913 If STOP_AT is not -1, the function will only try to match
914 the qualifier sequence for operands before and including the operand
915 of index STOP_AT; and on success *RET will only be filled with the first
916 (STOP_AT+1) qualifiers.
917
918 A couple examples of the matching algorithm:
919
920 X,W,NIL should match
921 X,W,NIL
922
923 NIL,NIL should match
924 X ,NIL
925
926 Apart from serving the main encoding routine, this can also be called
927 during or after the operand decoding. */
928
929 int
930 aarch64_find_best_match (const aarch64_inst *inst,
931 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
932 int stop_at, aarch64_opnd_qualifier_t *ret)
933 {
934 int found = 0;
935 int i, num_opnds;
936 const aarch64_opnd_qualifier_t *qualifiers;
937
938 num_opnds = aarch64_num_of_operands (inst->opcode);
939 if (num_opnds == 0)
940 {
941 DEBUG_TRACE ("SUCCEED: no operand");
942 return 1;
943 }
944
945 if (stop_at < 0 || stop_at >= num_opnds)
946 stop_at = num_opnds - 1;
947
948 /* For each pattern. */
949 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
950 {
951 int j;
952 qualifiers = *qualifiers_list;
953
954 /* Start as positive. */
955 found = 1;
956
957 DEBUG_TRACE ("%d", i);
958 #ifdef DEBUG_AARCH64
959 if (debug_dump)
960 dump_match_qualifiers (inst->operands, qualifiers);
961 #endif
962
963 /* Most opcodes has much fewer patterns in the list.
964 First NIL qualifier indicates the end in the list. */
965 if (empty_qualifier_sequence_p (qualifiers))
966 {
967 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
968 if (i)
969 found = 0;
970 break;
971 }
972
973 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
974 {
975 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
976 {
977 /* Either the operand does not have qualifier, or the qualifier
978 for the operand needs to be deduced from the qualifier
979 sequence.
980 In the latter case, any constraint checking related with
981 the obtained qualifier should be done later in
982 operand_general_constraint_met_p. */
983 continue;
984 }
985 else if (*qualifiers != inst->operands[j].qualifier)
986 {
987 /* Unless the target qualifier can also qualify the operand
988 (which has already had a non-nil qualifier), non-equal
989 qualifiers are generally un-matched. */
990 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
991 continue;
992 else
993 {
994 found = 0;
995 break;
996 }
997 }
998 else
999 continue; /* Equal qualifiers are certainly matched. */
1000 }
1001
1002 /* Qualifiers established. */
1003 if (found == 1)
1004 break;
1005 }
1006
1007 if (found == 1)
1008 {
1009 /* Fill the result in *RET. */
1010 int j;
1011 qualifiers = *qualifiers_list;
1012
1013 DEBUG_TRACE ("complete qualifiers using list %d", i);
1014 #ifdef DEBUG_AARCH64
1015 if (debug_dump)
1016 dump_qualifier_sequence (qualifiers);
1017 #endif
1018
1019 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1020 ret[j] = *qualifiers;
1021 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1022 ret[j] = AARCH64_OPND_QLF_NIL;
1023
1024 DEBUG_TRACE ("SUCCESS");
1025 return 1;
1026 }
1027
1028 DEBUG_TRACE ("FAIL");
1029 return 0;
1030 }
1031
1032 /* Operand qualifier matching and resolving.
1033
1034 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1035 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1036
1037 if UPDATE_P, update the qualifier(s) in *INST after the matching
1038 succeeds. */
1039
1040 static int
1041 match_operands_qualifier (aarch64_inst *inst, bool update_p)
1042 {
1043 int i, nops;
1044 aarch64_opnd_qualifier_seq_t qualifiers;
1045
1046 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1047 qualifiers))
1048 {
1049 DEBUG_TRACE ("matching FAIL");
1050 return 0;
1051 }
1052
1053 if (inst->opcode->flags & F_STRICT)
1054 {
1055 /* Require an exact qualifier match, even for NIL qualifiers. */
1056 nops = aarch64_num_of_operands (inst->opcode);
1057 for (i = 0; i < nops; ++i)
1058 if (inst->operands[i].qualifier != qualifiers[i])
1059 return false;
1060 }
1061
1062 /* Update the qualifiers. */
1063 if (update_p)
1064 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1065 {
1066 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1067 break;
1068 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1069 "update %s with %s for operand %d",
1070 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1071 aarch64_get_qualifier_name (qualifiers[i]), i);
1072 inst->operands[i].qualifier = qualifiers[i];
1073 }
1074
1075 DEBUG_TRACE ("matching SUCCESS");
1076 return 1;
1077 }
1078
1079 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1080 register by MOVZ.
1081
1082 IS32 indicates whether value is a 32-bit immediate or not.
1083 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1084 amount will be returned in *SHIFT_AMOUNT. */
1085
1086 bool
1087 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1088 {
1089 int amount;
1090
1091 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1092
1093 if (is32)
1094 {
1095 /* Allow all zeros or all ones in top 32-bits, so that
1096 32-bit constant expressions like ~0x80000000 are
1097 permitted. */
1098 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1099 /* Immediate out of range. */
1100 return false;
1101 value &= 0xffffffff;
1102 }
1103
1104 /* first, try movz then movn */
1105 amount = -1;
1106 if ((value & ((uint64_t) 0xffff << 0)) == value)
1107 amount = 0;
1108 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1109 amount = 16;
1110 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1111 amount = 32;
1112 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1113 amount = 48;
1114
1115 if (amount == -1)
1116 {
1117 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1118 return false;
1119 }
1120
1121 if (shift_amount != NULL)
1122 *shift_amount = amount;
1123
1124 DEBUG_TRACE ("exit true with amount %d", amount);
1125
1126 return true;
1127 }
1128
1129 /* Build the accepted values for immediate logical SIMD instructions.
1130
1131 The standard encodings of the immediate value are:
1132 N imms immr SIMD size R S
1133 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1134 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1135 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1136 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1137 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1138 0 11110s 00000r 2 UInt(r) UInt(s)
1139 where all-ones value of S is reserved.
1140
1141 Let's call E the SIMD size.
1142
1143 The immediate value is: S+1 bits '1' rotated to the right by R.
1144
1145 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1146 (remember S != E - 1). */
1147
1148 #define TOTAL_IMM_NB 5334
1149
1150 typedef struct
1151 {
1152 uint64_t imm;
1153 aarch64_insn encoding;
1154 } simd_imm_encoding;
1155
1156 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1157
1158 static int
1159 simd_imm_encoding_cmp(const void *i1, const void *i2)
1160 {
1161 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1162 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1163
1164 if (imm1->imm < imm2->imm)
1165 return -1;
1166 if (imm1->imm > imm2->imm)
1167 return +1;
1168 return 0;
1169 }
1170
1171 /* immediate bitfield standard encoding
1172 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1173 1 ssssss rrrrrr 64 rrrrrr ssssss
1174 0 0sssss 0rrrrr 32 rrrrr sssss
1175 0 10ssss 00rrrr 16 rrrr ssss
1176 0 110sss 000rrr 8 rrr sss
1177 0 1110ss 0000rr 4 rr ss
1178 0 11110s 00000r 2 r s */
1179 static inline int
1180 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1181 {
1182 return (is64 << 12) | (r << 6) | s;
1183 }
1184
1185 static void
1186 build_immediate_table (void)
1187 {
1188 uint32_t log_e, e, s, r, s_mask;
1189 uint64_t mask, imm;
1190 int nb_imms;
1191 int is64;
1192
1193 nb_imms = 0;
1194 for (log_e = 1; log_e <= 6; log_e++)
1195 {
1196 /* Get element size. */
1197 e = 1u << log_e;
1198 if (log_e == 6)
1199 {
1200 is64 = 1;
1201 mask = 0xffffffffffffffffull;
1202 s_mask = 0;
1203 }
1204 else
1205 {
1206 is64 = 0;
1207 mask = (1ull << e) - 1;
1208 /* log_e s_mask
1209 1 ((1 << 4) - 1) << 2 = 111100
1210 2 ((1 << 3) - 1) << 3 = 111000
1211 3 ((1 << 2) - 1) << 4 = 110000
1212 4 ((1 << 1) - 1) << 5 = 100000
1213 5 ((1 << 0) - 1) << 6 = 000000 */
1214 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1215 }
1216 for (s = 0; s < e - 1; s++)
1217 for (r = 0; r < e; r++)
1218 {
1219 /* s+1 consecutive bits to 1 (s < 63) */
1220 imm = (1ull << (s + 1)) - 1;
1221 /* rotate right by r */
1222 if (r != 0)
1223 imm = (imm >> r) | ((imm << (e - r)) & mask);
1224 /* replicate the constant depending on SIMD size */
1225 switch (log_e)
1226 {
1227 case 1: imm = (imm << 2) | imm;
1228 /* Fall through. */
1229 case 2: imm = (imm << 4) | imm;
1230 /* Fall through. */
1231 case 3: imm = (imm << 8) | imm;
1232 /* Fall through. */
1233 case 4: imm = (imm << 16) | imm;
1234 /* Fall through. */
1235 case 5: imm = (imm << 32) | imm;
1236 /* Fall through. */
1237 case 6: break;
1238 default: abort ();
1239 }
1240 simd_immediates[nb_imms].imm = imm;
1241 simd_immediates[nb_imms].encoding =
1242 encode_immediate_bitfield(is64, s | s_mask, r);
1243 nb_imms++;
1244 }
1245 }
1246 assert (nb_imms == TOTAL_IMM_NB);
1247 qsort(simd_immediates, nb_imms,
1248 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1249 }
1250
1251 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1252 be accepted by logical (immediate) instructions
1253 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1254
1255 ESIZE is the number of bytes in the decoded immediate value.
1256 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1257 VALUE will be returned in *ENCODING. */
1258
1259 bool
1260 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1261 {
1262 simd_imm_encoding imm_enc;
1263 const simd_imm_encoding *imm_encoding;
1264 static bool initialized = false;
1265 uint64_t upper;
1266 int i;
1267
1268 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1269 value, esize);
1270
1271 if (!initialized)
1272 {
1273 build_immediate_table ();
1274 initialized = true;
1275 }
1276
1277 /* Allow all zeros or all ones in top bits, so that
1278 constant expressions like ~1 are permitted. */
1279 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1280 if ((value & ~upper) != value && (value | upper) != value)
1281 return false;
1282
1283 /* Replicate to a full 64-bit value. */
1284 value &= ~upper;
1285 for (i = esize * 8; i < 64; i *= 2)
1286 value |= (value << i);
1287
1288 imm_enc.imm = value;
1289 imm_encoding = (const simd_imm_encoding *)
1290 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1291 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1292 if (imm_encoding == NULL)
1293 {
1294 DEBUG_TRACE ("exit with false");
1295 return false;
1296 }
1297 if (encoding != NULL)
1298 *encoding = imm_encoding->encoding;
1299 DEBUG_TRACE ("exit with true");
1300 return true;
1301 }
1302
1303 /* If 64-bit immediate IMM is in the format of
1304 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1305 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1306 of value "abcdefgh". Otherwise return -1. */
1307 int
1308 aarch64_shrink_expanded_imm8 (uint64_t imm)
1309 {
1310 int i, ret;
1311 uint32_t byte;
1312
1313 ret = 0;
1314 for (i = 0; i < 8; i++)
1315 {
1316 byte = (imm >> (8 * i)) & 0xff;
1317 if (byte == 0xff)
1318 ret |= 1 << i;
1319 else if (byte != 0x00)
1320 return -1;
1321 }
1322 return ret;
1323 }
1324
1325 /* Utility inline functions for operand_general_constraint_met_p. */
1326
1327 static inline void
1328 set_error (aarch64_operand_error *mismatch_detail,
1329 enum aarch64_operand_error_kind kind, int idx,
1330 const char* error)
1331 {
1332 if (mismatch_detail == NULL)
1333 return;
1334 mismatch_detail->kind = kind;
1335 mismatch_detail->index = idx;
1336 mismatch_detail->error = error;
1337 }
1338
1339 static inline void
1340 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1341 const char* error)
1342 {
1343 if (mismatch_detail == NULL)
1344 return;
1345 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1346 }
1347
1348 static inline void
1349 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1350 int idx, int lower_bound, int upper_bound,
1351 const char* error)
1352 {
1353 if (mismatch_detail == NULL)
1354 return;
1355 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1356 mismatch_detail->data[0].i = lower_bound;
1357 mismatch_detail->data[1].i = upper_bound;
1358 }
1359
1360 static inline void
1361 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1362 int idx, int lower_bound, int upper_bound)
1363 {
1364 if (mismatch_detail == NULL)
1365 return;
1366 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1367 _("immediate value"));
1368 }
1369
1370 static inline void
1371 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1372 int idx, int lower_bound, int upper_bound)
1373 {
1374 if (mismatch_detail == NULL)
1375 return;
1376 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1377 _("immediate offset"));
1378 }
1379
1380 static inline void
1381 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1382 int idx, int lower_bound, int upper_bound)
1383 {
1384 if (mismatch_detail == NULL)
1385 return;
1386 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1387 _("register number"));
1388 }
1389
1390 static inline void
1391 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1392 int idx, int lower_bound, int upper_bound)
1393 {
1394 if (mismatch_detail == NULL)
1395 return;
1396 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1397 _("register element index"));
1398 }
1399
1400 static inline void
1401 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1402 int idx, int lower_bound, int upper_bound)
1403 {
1404 if (mismatch_detail == NULL)
1405 return;
1406 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1407 _("shift amount"));
1408 }
1409
1410 /* Report that the MUL modifier in operand IDX should be in the range
1411 [LOWER_BOUND, UPPER_BOUND]. */
1412 static inline void
1413 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1414 int idx, int lower_bound, int upper_bound)
1415 {
1416 if (mismatch_detail == NULL)
1417 return;
1418 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1419 _("multiplier"));
1420 }
1421
1422 static inline void
1423 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1424 int alignment)
1425 {
1426 if (mismatch_detail == NULL)
1427 return;
1428 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1429 mismatch_detail->data[0].i = alignment;
1430 }
1431
1432 static inline void
1433 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1434 int expected_num)
1435 {
1436 if (mismatch_detail == NULL)
1437 return;
1438 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1439 mismatch_detail->data[0].i = expected_num;
1440 }
1441
1442 static inline void
1443 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1444 const char* error)
1445 {
1446 if (mismatch_detail == NULL)
1447 return;
1448 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1449 }
1450
1451 /* General constraint checking based on operand code.
1452
1453 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1454 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1455
1456 This function has to be called after the qualifiers for all operands
1457 have been resolved.
1458
1459 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1460 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1461 of error message during the disassembling where error message is not
1462 wanted. We avoid the dynamic construction of strings of error messages
1463 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1464 use a combination of error code, static string and some integer data to
1465 represent an error. */
1466
1467 static int
1468 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1469 enum aarch64_opnd type,
1470 const aarch64_opcode *opcode,
1471 aarch64_operand_error *mismatch_detail)
1472 {
1473 unsigned num, modifiers, shift;
1474 unsigned char size;
1475 int64_t imm, min_value, max_value;
1476 uint64_t uvalue, mask;
1477 const aarch64_opnd_info *opnd = opnds + idx;
1478 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1479 int i;
1480
1481 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1482
1483 switch (aarch64_operands[type].op_class)
1484 {
1485 case AARCH64_OPND_CLASS_INT_REG:
1486 /* Check pair reg constraints for cas* instructions. */
1487 if (type == AARCH64_OPND_PAIRREG)
1488 {
1489 assert (idx == 1 || idx == 3);
1490 if (opnds[idx - 1].reg.regno % 2 != 0)
1491 {
1492 set_syntax_error (mismatch_detail, idx - 1,
1493 _("reg pair must start from even reg"));
1494 return 0;
1495 }
1496 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1497 {
1498 set_syntax_error (mismatch_detail, idx,
1499 _("reg pair must be contiguous"));
1500 return 0;
1501 }
1502 break;
1503 }
1504
1505 /* <Xt> may be optional in some IC and TLBI instructions. */
1506 if (type == AARCH64_OPND_Rt_SYS)
1507 {
1508 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1509 == AARCH64_OPND_CLASS_SYSTEM));
1510 if (opnds[1].present
1511 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1512 {
1513 set_other_error (mismatch_detail, idx, _("extraneous register"));
1514 return 0;
1515 }
1516 if (!opnds[1].present
1517 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1518 {
1519 set_other_error (mismatch_detail, idx, _("missing register"));
1520 return 0;
1521 }
1522 }
1523 switch (qualifier)
1524 {
1525 case AARCH64_OPND_QLF_WSP:
1526 case AARCH64_OPND_QLF_SP:
1527 if (!aarch64_stack_pointer_p (opnd))
1528 {
1529 set_other_error (mismatch_detail, idx,
1530 _("stack pointer register expected"));
1531 return 0;
1532 }
1533 break;
1534 default:
1535 break;
1536 }
1537 break;
1538
1539 case AARCH64_OPND_CLASS_SVE_REG:
1540 switch (type)
1541 {
1542 case AARCH64_OPND_SVE_Zm3_INDEX:
1543 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1544 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1545 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1546 case AARCH64_OPND_SVE_Zm4_INDEX:
1547 size = get_operand_fields_width (get_operand_from_code (type));
1548 shift = get_operand_specific_data (&aarch64_operands[type]);
1549 mask = (1 << shift) - 1;
1550 if (opnd->reg.regno > mask)
1551 {
1552 assert (mask == 7 || mask == 15);
1553 set_other_error (mismatch_detail, idx,
1554 mask == 15
1555 ? _("z0-z15 expected")
1556 : _("z0-z7 expected"));
1557 return 0;
1558 }
1559 mask = (1u << (size - shift)) - 1;
1560 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1561 {
1562 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1563 return 0;
1564 }
1565 break;
1566
1567 case AARCH64_OPND_SVE_Zn_INDEX:
1568 size = aarch64_get_qualifier_esize (opnd->qualifier);
1569 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1570 {
1571 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1572 0, 64 / size - 1);
1573 return 0;
1574 }
1575 break;
1576
1577 case AARCH64_OPND_SVE_ZnxN:
1578 case AARCH64_OPND_SVE_ZtxN:
1579 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1580 {
1581 set_other_error (mismatch_detail, idx,
1582 _("invalid register list"));
1583 return 0;
1584 }
1585 break;
1586
1587 default:
1588 break;
1589 }
1590 break;
1591
1592 case AARCH64_OPND_CLASS_PRED_REG:
1593 if (opnd->reg.regno >= 8
1594 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1595 {
1596 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1597 return 0;
1598 }
1599 break;
1600
1601 case AARCH64_OPND_CLASS_COND:
1602 if (type == AARCH64_OPND_COND1
1603 && (opnds[idx].cond->value & 0xe) == 0xe)
1604 {
1605 /* Not allow AL or NV. */
1606 set_syntax_error (mismatch_detail, idx, NULL);
1607 }
1608 break;
1609
1610 case AARCH64_OPND_CLASS_ADDRESS:
1611 /* Check writeback. */
1612 switch (opcode->iclass)
1613 {
1614 case ldst_pos:
1615 case ldst_unscaled:
1616 case ldstnapair_offs:
1617 case ldstpair_off:
1618 case ldst_unpriv:
1619 if (opnd->addr.writeback == 1)
1620 {
1621 set_syntax_error (mismatch_detail, idx,
1622 _("unexpected address writeback"));
1623 return 0;
1624 }
1625 break;
1626 case ldst_imm10:
1627 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1628 {
1629 set_syntax_error (mismatch_detail, idx,
1630 _("unexpected address writeback"));
1631 return 0;
1632 }
1633 break;
1634 case ldst_imm9:
1635 case ldstpair_indexed:
1636 case asisdlsep:
1637 case asisdlsop:
1638 if (opnd->addr.writeback == 0)
1639 {
1640 set_syntax_error (mismatch_detail, idx,
1641 _("address writeback expected"));
1642 return 0;
1643 }
1644 break;
1645 default:
1646 assert (opnd->addr.writeback == 0);
1647 break;
1648 }
1649 switch (type)
1650 {
1651 case AARCH64_OPND_ADDR_SIMM7:
1652 /* Scaled signed 7 bits immediate offset. */
1653 /* Get the size of the data element that is accessed, which may be
1654 different from that of the source register size,
1655 e.g. in strb/ldrb. */
1656 size = aarch64_get_qualifier_esize (opnd->qualifier);
1657 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1658 {
1659 set_offset_out_of_range_error (mismatch_detail, idx,
1660 -64 * size, 63 * size);
1661 return 0;
1662 }
1663 if (!value_aligned_p (opnd->addr.offset.imm, size))
1664 {
1665 set_unaligned_error (mismatch_detail, idx, size);
1666 return 0;
1667 }
1668 break;
1669 case AARCH64_OPND_ADDR_OFFSET:
1670 case AARCH64_OPND_ADDR_SIMM9:
1671 /* Unscaled signed 9 bits immediate offset. */
1672 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1673 {
1674 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1675 return 0;
1676 }
1677 break;
1678
1679 case AARCH64_OPND_ADDR_SIMM9_2:
1680 /* Unscaled signed 9 bits immediate offset, which has to be negative
1681 or unaligned. */
1682 size = aarch64_get_qualifier_esize (qualifier);
1683 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1684 && !value_aligned_p (opnd->addr.offset.imm, size))
1685 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1686 return 1;
1687 set_other_error (mismatch_detail, idx,
1688 _("negative or unaligned offset expected"));
1689 return 0;
1690
1691 case AARCH64_OPND_ADDR_SIMM10:
1692 /* Scaled signed 10 bits immediate offset. */
1693 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1694 {
1695 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1696 return 0;
1697 }
1698 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1699 {
1700 set_unaligned_error (mismatch_detail, idx, 8);
1701 return 0;
1702 }
1703 break;
1704
1705 case AARCH64_OPND_ADDR_SIMM11:
1706 /* Signed 11 bits immediate offset (multiple of 16). */
1707 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1708 {
1709 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1710 return 0;
1711 }
1712
1713 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1714 {
1715 set_unaligned_error (mismatch_detail, idx, 16);
1716 return 0;
1717 }
1718 break;
1719
1720 case AARCH64_OPND_ADDR_SIMM13:
1721 /* Signed 13 bits immediate offset (multiple of 16). */
1722 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1723 {
1724 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1725 return 0;
1726 }
1727
1728 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1729 {
1730 set_unaligned_error (mismatch_detail, idx, 16);
1731 return 0;
1732 }
1733 break;
1734
1735 case AARCH64_OPND_SIMD_ADDR_POST:
1736 /* AdvSIMD load/store multiple structures, post-index. */
1737 assert (idx == 1);
1738 if (opnd->addr.offset.is_reg)
1739 {
1740 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1741 return 1;
1742 else
1743 {
1744 set_other_error (mismatch_detail, idx,
1745 _("invalid register offset"));
1746 return 0;
1747 }
1748 }
1749 else
1750 {
1751 const aarch64_opnd_info *prev = &opnds[idx-1];
1752 unsigned num_bytes; /* total number of bytes transferred. */
1753 /* The opcode dependent area stores the number of elements in
1754 each structure to be loaded/stored. */
1755 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1756 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1757 /* Special handling of loading single structure to all lane. */
1758 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1759 * aarch64_get_qualifier_esize (prev->qualifier);
1760 else
1761 num_bytes = prev->reglist.num_regs
1762 * aarch64_get_qualifier_esize (prev->qualifier)
1763 * aarch64_get_qualifier_nelem (prev->qualifier);
1764 if ((int) num_bytes != opnd->addr.offset.imm)
1765 {
1766 set_other_error (mismatch_detail, idx,
1767 _("invalid post-increment amount"));
1768 return 0;
1769 }
1770 }
1771 break;
1772
1773 case AARCH64_OPND_ADDR_REGOFF:
1774 /* Get the size of the data element that is accessed, which may be
1775 different from that of the source register size,
1776 e.g. in strb/ldrb. */
1777 size = aarch64_get_qualifier_esize (opnd->qualifier);
1778 /* It is either no shift or shift by the binary logarithm of SIZE. */
1779 if (opnd->shifter.amount != 0
1780 && opnd->shifter.amount != (int)get_logsz (size))
1781 {
1782 set_other_error (mismatch_detail, idx,
1783 _("invalid shift amount"));
1784 return 0;
1785 }
1786 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1787 operators. */
1788 switch (opnd->shifter.kind)
1789 {
1790 case AARCH64_MOD_UXTW:
1791 case AARCH64_MOD_LSL:
1792 case AARCH64_MOD_SXTW:
1793 case AARCH64_MOD_SXTX: break;
1794 default:
1795 set_other_error (mismatch_detail, idx,
1796 _("invalid extend/shift operator"));
1797 return 0;
1798 }
1799 break;
1800
1801 case AARCH64_OPND_ADDR_UIMM12:
1802 imm = opnd->addr.offset.imm;
1803 /* Get the size of the data element that is accessed, which may be
1804 different from that of the source register size,
1805 e.g. in strb/ldrb. */
1806 size = aarch64_get_qualifier_esize (qualifier);
1807 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1808 {
1809 set_offset_out_of_range_error (mismatch_detail, idx,
1810 0, 4095 * size);
1811 return 0;
1812 }
1813 if (!value_aligned_p (opnd->addr.offset.imm, size))
1814 {
1815 set_unaligned_error (mismatch_detail, idx, size);
1816 return 0;
1817 }
1818 break;
1819
1820 case AARCH64_OPND_ADDR_PCREL14:
1821 case AARCH64_OPND_ADDR_PCREL19:
1822 case AARCH64_OPND_ADDR_PCREL21:
1823 case AARCH64_OPND_ADDR_PCREL26:
1824 imm = opnd->imm.value;
1825 if (operand_need_shift_by_two (get_operand_from_code (type)))
1826 {
1827 /* The offset value in a PC-relative branch instruction is alway
1828 4-byte aligned and is encoded without the lowest 2 bits. */
1829 if (!value_aligned_p (imm, 4))
1830 {
1831 set_unaligned_error (mismatch_detail, idx, 4);
1832 return 0;
1833 }
1834 /* Right shift by 2 so that we can carry out the following check
1835 canonically. */
1836 imm >>= 2;
1837 }
1838 size = get_operand_fields_width (get_operand_from_code (type));
1839 if (!value_fit_signed_field_p (imm, size))
1840 {
1841 set_other_error (mismatch_detail, idx,
1842 _("immediate out of range"));
1843 return 0;
1844 }
1845 break;
1846
1847 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
1848 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
1849 {
1850 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
1851 return 0;
1852 }
1853 break;
1854
1855 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1856 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1857 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1858 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1859 min_value = -8;
1860 max_value = 7;
1861 sve_imm_offset_vl:
1862 assert (!opnd->addr.offset.is_reg);
1863 assert (opnd->addr.preind);
1864 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1865 min_value *= num;
1866 max_value *= num;
1867 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1868 || (opnd->shifter.operator_present
1869 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1870 {
1871 set_other_error (mismatch_detail, idx,
1872 _("invalid addressing mode"));
1873 return 0;
1874 }
1875 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1876 {
1877 set_offset_out_of_range_error (mismatch_detail, idx,
1878 min_value, max_value);
1879 return 0;
1880 }
1881 if (!value_aligned_p (opnd->addr.offset.imm, num))
1882 {
1883 set_unaligned_error (mismatch_detail, idx, num);
1884 return 0;
1885 }
1886 break;
1887
1888 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1889 min_value = -32;
1890 max_value = 31;
1891 goto sve_imm_offset_vl;
1892
1893 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1894 min_value = -256;
1895 max_value = 255;
1896 goto sve_imm_offset_vl;
1897
1898 case AARCH64_OPND_SVE_ADDR_RI_U6:
1899 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1900 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1901 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1902 min_value = 0;
1903 max_value = 63;
1904 sve_imm_offset:
1905 assert (!opnd->addr.offset.is_reg);
1906 assert (opnd->addr.preind);
1907 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1908 min_value *= num;
1909 max_value *= num;
1910 if (opnd->shifter.operator_present
1911 || opnd->shifter.amount_present)
1912 {
1913 set_other_error (mismatch_detail, idx,
1914 _("invalid addressing mode"));
1915 return 0;
1916 }
1917 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1918 {
1919 set_offset_out_of_range_error (mismatch_detail, idx,
1920 min_value, max_value);
1921 return 0;
1922 }
1923 if (!value_aligned_p (opnd->addr.offset.imm, num))
1924 {
1925 set_unaligned_error (mismatch_detail, idx, num);
1926 return 0;
1927 }
1928 break;
1929
1930 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1931 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1932 min_value = -8;
1933 max_value = 7;
1934 goto sve_imm_offset;
1935
1936 case AARCH64_OPND_SVE_ADDR_ZX:
1937 /* Everything is already ensured by parse_operands or
1938 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1939 argument type). */
1940 assert (opnd->addr.offset.is_reg);
1941 assert (opnd->addr.preind);
1942 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1943 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1944 assert (opnd->shifter.operator_present == 0);
1945 break;
1946
1947 case AARCH64_OPND_SVE_ADDR_R:
1948 case AARCH64_OPND_SVE_ADDR_RR:
1949 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1950 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1951 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1952 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
1953 case AARCH64_OPND_SVE_ADDR_RX:
1954 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1955 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1956 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1957 case AARCH64_OPND_SVE_ADDR_RZ:
1958 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1959 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1960 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1961 modifiers = 1 << AARCH64_MOD_LSL;
1962 sve_rr_operand:
1963 assert (opnd->addr.offset.is_reg);
1964 assert (opnd->addr.preind);
1965 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1966 && opnd->addr.offset.regno == 31)
1967 {
1968 set_other_error (mismatch_detail, idx,
1969 _("index register xzr is not allowed"));
1970 return 0;
1971 }
1972 if (((1 << opnd->shifter.kind) & modifiers) == 0
1973 || (opnd->shifter.amount
1974 != get_operand_specific_data (&aarch64_operands[type])))
1975 {
1976 set_other_error (mismatch_detail, idx,
1977 _("invalid addressing mode"));
1978 return 0;
1979 }
1980 break;
1981
1982 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1983 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1984 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1985 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1986 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1987 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1988 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1989 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1990 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1991 goto sve_rr_operand;
1992
1993 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1994 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1995 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1996 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1997 min_value = 0;
1998 max_value = 31;
1999 goto sve_imm_offset;
2000
2001 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2002 modifiers = 1 << AARCH64_MOD_LSL;
2003 sve_zz_operand:
2004 assert (opnd->addr.offset.is_reg);
2005 assert (opnd->addr.preind);
2006 if (((1 << opnd->shifter.kind) & modifiers) == 0
2007 || opnd->shifter.amount < 0
2008 || opnd->shifter.amount > 3)
2009 {
2010 set_other_error (mismatch_detail, idx,
2011 _("invalid addressing mode"));
2012 return 0;
2013 }
2014 break;
2015
2016 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2017 modifiers = (1 << AARCH64_MOD_SXTW);
2018 goto sve_zz_operand;
2019
2020 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2021 modifiers = 1 << AARCH64_MOD_UXTW;
2022 goto sve_zz_operand;
2023
2024 default:
2025 break;
2026 }
2027 break;
2028
2029 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2030 if (type == AARCH64_OPND_LEt)
2031 {
2032 /* Get the upper bound for the element index. */
2033 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2034 if (!value_in_range_p (opnd->reglist.index, 0, num))
2035 {
2036 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2037 return 0;
2038 }
2039 }
2040 /* The opcode dependent area stores the number of elements in
2041 each structure to be loaded/stored. */
2042 num = get_opcode_dependent_value (opcode);
2043 switch (type)
2044 {
2045 case AARCH64_OPND_LVt:
2046 assert (num >= 1 && num <= 4);
2047 /* Unless LD1/ST1, the number of registers should be equal to that
2048 of the structure elements. */
2049 if (num != 1 && opnd->reglist.num_regs != num)
2050 {
2051 set_reg_list_error (mismatch_detail, idx, num);
2052 return 0;
2053 }
2054 break;
2055 case AARCH64_OPND_LVt_AL:
2056 case AARCH64_OPND_LEt:
2057 assert (num >= 1 && num <= 4);
2058 /* The number of registers should be equal to that of the structure
2059 elements. */
2060 if (opnd->reglist.num_regs != num)
2061 {
2062 set_reg_list_error (mismatch_detail, idx, num);
2063 return 0;
2064 }
2065 break;
2066 default:
2067 break;
2068 }
2069 break;
2070
2071 case AARCH64_OPND_CLASS_IMMEDIATE:
2072 /* Constraint check on immediate operand. */
2073 imm = opnd->imm.value;
2074 /* E.g. imm_0_31 constrains value to be 0..31. */
2075 if (qualifier_value_in_range_constraint_p (qualifier)
2076 && !value_in_range_p (imm, get_lower_bound (qualifier),
2077 get_upper_bound (qualifier)))
2078 {
2079 set_imm_out_of_range_error (mismatch_detail, idx,
2080 get_lower_bound (qualifier),
2081 get_upper_bound (qualifier));
2082 return 0;
2083 }
2084
2085 switch (type)
2086 {
2087 case AARCH64_OPND_AIMM:
2088 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2089 {
2090 set_other_error (mismatch_detail, idx,
2091 _("invalid shift operator"));
2092 return 0;
2093 }
2094 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2095 {
2096 set_other_error (mismatch_detail, idx,
2097 _("shift amount must be 0 or 12"));
2098 return 0;
2099 }
2100 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2101 {
2102 set_other_error (mismatch_detail, idx,
2103 _("immediate out of range"));
2104 return 0;
2105 }
2106 break;
2107
2108 case AARCH64_OPND_HALF:
2109 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2110 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2111 {
2112 set_other_error (mismatch_detail, idx,
2113 _("invalid shift operator"));
2114 return 0;
2115 }
2116 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2117 if (!value_aligned_p (opnd->shifter.amount, 16))
2118 {
2119 set_other_error (mismatch_detail, idx,
2120 _("shift amount must be a multiple of 16"));
2121 return 0;
2122 }
2123 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2124 {
2125 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2126 0, size * 8 - 16);
2127 return 0;
2128 }
2129 if (opnd->imm.value < 0)
2130 {
2131 set_other_error (mismatch_detail, idx,
2132 _("negative immediate value not allowed"));
2133 return 0;
2134 }
2135 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2136 {
2137 set_other_error (mismatch_detail, idx,
2138 _("immediate out of range"));
2139 return 0;
2140 }
2141 break;
2142
2143 case AARCH64_OPND_IMM_MOV:
2144 {
2145 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2146 imm = opnd->imm.value;
2147 assert (idx == 1);
2148 switch (opcode->op)
2149 {
2150 case OP_MOV_IMM_WIDEN:
2151 imm = ~imm;
2152 /* Fall through. */
2153 case OP_MOV_IMM_WIDE:
2154 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2155 {
2156 set_other_error (mismatch_detail, idx,
2157 _("immediate out of range"));
2158 return 0;
2159 }
2160 break;
2161 case OP_MOV_IMM_LOG:
2162 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2163 {
2164 set_other_error (mismatch_detail, idx,
2165 _("immediate out of range"));
2166 return 0;
2167 }
2168 break;
2169 default:
2170 assert (0);
2171 return 0;
2172 }
2173 }
2174 break;
2175
2176 case AARCH64_OPND_NZCV:
2177 case AARCH64_OPND_CCMP_IMM:
2178 case AARCH64_OPND_EXCEPTION:
2179 case AARCH64_OPND_UNDEFINED:
2180 case AARCH64_OPND_TME_UIMM16:
2181 case AARCH64_OPND_UIMM4:
2182 case AARCH64_OPND_UIMM4_ADDG:
2183 case AARCH64_OPND_UIMM7:
2184 case AARCH64_OPND_UIMM3_OP1:
2185 case AARCH64_OPND_UIMM3_OP2:
2186 case AARCH64_OPND_SVE_UIMM3:
2187 case AARCH64_OPND_SVE_UIMM7:
2188 case AARCH64_OPND_SVE_UIMM8:
2189 case AARCH64_OPND_SVE_UIMM8_53:
2190 size = get_operand_fields_width (get_operand_from_code (type));
2191 assert (size < 32);
2192 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2193 {
2194 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2195 (1u << size) - 1);
2196 return 0;
2197 }
2198 break;
2199
2200 case AARCH64_OPND_UIMM10:
2201 /* Scaled unsigned 10 bits immediate offset. */
2202 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2203 {
2204 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2205 return 0;
2206 }
2207
2208 if (!value_aligned_p (opnd->imm.value, 16))
2209 {
2210 set_unaligned_error (mismatch_detail, idx, 16);
2211 return 0;
2212 }
2213 break;
2214
2215 case AARCH64_OPND_SIMM5:
2216 case AARCH64_OPND_SVE_SIMM5:
2217 case AARCH64_OPND_SVE_SIMM5B:
2218 case AARCH64_OPND_SVE_SIMM6:
2219 case AARCH64_OPND_SVE_SIMM8:
2220 size = get_operand_fields_width (get_operand_from_code (type));
2221 assert (size < 32);
2222 if (!value_fit_signed_field_p (opnd->imm.value, size))
2223 {
2224 set_imm_out_of_range_error (mismatch_detail, idx,
2225 -(1 << (size - 1)),
2226 (1 << (size - 1)) - 1);
2227 return 0;
2228 }
2229 break;
2230
2231 case AARCH64_OPND_WIDTH:
2232 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2233 && opnds[0].type == AARCH64_OPND_Rd);
2234 size = get_upper_bound (qualifier);
2235 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2236 /* lsb+width <= reg.size */
2237 {
2238 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2239 size - opnds[idx-1].imm.value);
2240 return 0;
2241 }
2242 break;
2243
2244 case AARCH64_OPND_LIMM:
2245 case AARCH64_OPND_SVE_LIMM:
2246 {
2247 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2248 uint64_t uimm = opnd->imm.value;
2249 if (opcode->op == OP_BIC)
2250 uimm = ~uimm;
2251 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2252 {
2253 set_other_error (mismatch_detail, idx,
2254 _("immediate out of range"));
2255 return 0;
2256 }
2257 }
2258 break;
2259
2260 case AARCH64_OPND_IMM0:
2261 case AARCH64_OPND_FPIMM0:
2262 if (opnd->imm.value != 0)
2263 {
2264 set_other_error (mismatch_detail, idx,
2265 _("immediate zero expected"));
2266 return 0;
2267 }
2268 break;
2269
2270 case AARCH64_OPND_IMM_ROT1:
2271 case AARCH64_OPND_IMM_ROT2:
2272 case AARCH64_OPND_SVE_IMM_ROT2:
2273 if (opnd->imm.value != 0
2274 && opnd->imm.value != 90
2275 && opnd->imm.value != 180
2276 && opnd->imm.value != 270)
2277 {
2278 set_other_error (mismatch_detail, idx,
2279 _("rotate expected to be 0, 90, 180 or 270"));
2280 return 0;
2281 }
2282 break;
2283
2284 case AARCH64_OPND_IMM_ROT3:
2285 case AARCH64_OPND_SVE_IMM_ROT1:
2286 case AARCH64_OPND_SVE_IMM_ROT3:
2287 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2288 {
2289 set_other_error (mismatch_detail, idx,
2290 _("rotate expected to be 90 or 270"));
2291 return 0;
2292 }
2293 break;
2294
2295 case AARCH64_OPND_SHLL_IMM:
2296 assert (idx == 2);
2297 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2298 if (opnd->imm.value != size)
2299 {
2300 set_other_error (mismatch_detail, idx,
2301 _("invalid shift amount"));
2302 return 0;
2303 }
2304 break;
2305
2306 case AARCH64_OPND_IMM_VLSL:
2307 size = aarch64_get_qualifier_esize (qualifier);
2308 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2309 {
2310 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2311 size * 8 - 1);
2312 return 0;
2313 }
2314 break;
2315
2316 case AARCH64_OPND_IMM_VLSR:
2317 size = aarch64_get_qualifier_esize (qualifier);
2318 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2319 {
2320 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2321 return 0;
2322 }
2323 break;
2324
2325 case AARCH64_OPND_SIMD_IMM:
2326 case AARCH64_OPND_SIMD_IMM_SFT:
2327 /* Qualifier check. */
2328 switch (qualifier)
2329 {
2330 case AARCH64_OPND_QLF_LSL:
2331 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2332 {
2333 set_other_error (mismatch_detail, idx,
2334 _("invalid shift operator"));
2335 return 0;
2336 }
2337 break;
2338 case AARCH64_OPND_QLF_MSL:
2339 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2340 {
2341 set_other_error (mismatch_detail, idx,
2342 _("invalid shift operator"));
2343 return 0;
2344 }
2345 break;
2346 case AARCH64_OPND_QLF_NIL:
2347 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2348 {
2349 set_other_error (mismatch_detail, idx,
2350 _("shift is not permitted"));
2351 return 0;
2352 }
2353 break;
2354 default:
2355 assert (0);
2356 return 0;
2357 }
2358 /* Is the immediate valid? */
2359 assert (idx == 1);
2360 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2361 {
2362 /* uimm8 or simm8 */
2363 if (!value_in_range_p (opnd->imm.value, -128, 255))
2364 {
2365 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2366 return 0;
2367 }
2368 }
2369 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2370 {
2371 /* uimm64 is not
2372 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2373 ffffffffgggggggghhhhhhhh'. */
2374 set_other_error (mismatch_detail, idx,
2375 _("invalid value for immediate"));
2376 return 0;
2377 }
2378 /* Is the shift amount valid? */
2379 switch (opnd->shifter.kind)
2380 {
2381 case AARCH64_MOD_LSL:
2382 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2383 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2384 {
2385 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2386 (size - 1) * 8);
2387 return 0;
2388 }
2389 if (!value_aligned_p (opnd->shifter.amount, 8))
2390 {
2391 set_unaligned_error (mismatch_detail, idx, 8);
2392 return 0;
2393 }
2394 break;
2395 case AARCH64_MOD_MSL:
2396 /* Only 8 and 16 are valid shift amount. */
2397 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2398 {
2399 set_other_error (mismatch_detail, idx,
2400 _("shift amount must be 0 or 16"));
2401 return 0;
2402 }
2403 break;
2404 default:
2405 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2406 {
2407 set_other_error (mismatch_detail, idx,
2408 _("invalid shift operator"));
2409 return 0;
2410 }
2411 break;
2412 }
2413 break;
2414
2415 case AARCH64_OPND_FPIMM:
2416 case AARCH64_OPND_SIMD_FPIMM:
2417 case AARCH64_OPND_SVE_FPIMM8:
2418 if (opnd->imm.is_fp == 0)
2419 {
2420 set_other_error (mismatch_detail, idx,
2421 _("floating-point immediate expected"));
2422 return 0;
2423 }
2424 /* The value is expected to be an 8-bit floating-point constant with
2425 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2426 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2427 instruction). */
2428 if (!value_in_range_p (opnd->imm.value, 0, 255))
2429 {
2430 set_other_error (mismatch_detail, idx,
2431 _("immediate out of range"));
2432 return 0;
2433 }
2434 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2435 {
2436 set_other_error (mismatch_detail, idx,
2437 _("invalid shift operator"));
2438 return 0;
2439 }
2440 break;
2441
2442 case AARCH64_OPND_SVE_AIMM:
2443 min_value = 0;
2444 sve_aimm:
2445 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2446 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2447 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2448 uvalue = opnd->imm.value;
2449 shift = opnd->shifter.amount;
2450 if (size == 1)
2451 {
2452 if (shift != 0)
2453 {
2454 set_other_error (mismatch_detail, idx,
2455 _("no shift amount allowed for"
2456 " 8-bit constants"));
2457 return 0;
2458 }
2459 }
2460 else
2461 {
2462 if (shift != 0 && shift != 8)
2463 {
2464 set_other_error (mismatch_detail, idx,
2465 _("shift amount must be 0 or 8"));
2466 return 0;
2467 }
2468 if (shift == 0 && (uvalue & 0xff) == 0)
2469 {
2470 shift = 8;
2471 uvalue = (int64_t) uvalue / 256;
2472 }
2473 }
2474 mask >>= shift;
2475 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2476 {
2477 set_other_error (mismatch_detail, idx,
2478 _("immediate too big for element size"));
2479 return 0;
2480 }
2481 uvalue = (uvalue - min_value) & mask;
2482 if (uvalue > 0xff)
2483 {
2484 set_other_error (mismatch_detail, idx,
2485 _("invalid arithmetic immediate"));
2486 return 0;
2487 }
2488 break;
2489
2490 case AARCH64_OPND_SVE_ASIMM:
2491 min_value = -128;
2492 goto sve_aimm;
2493
2494 case AARCH64_OPND_SVE_I1_HALF_ONE:
2495 assert (opnd->imm.is_fp);
2496 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2497 {
2498 set_other_error (mismatch_detail, idx,
2499 _("floating-point value must be 0.5 or 1.0"));
2500 return 0;
2501 }
2502 break;
2503
2504 case AARCH64_OPND_SVE_I1_HALF_TWO:
2505 assert (opnd->imm.is_fp);
2506 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2507 {
2508 set_other_error (mismatch_detail, idx,
2509 _("floating-point value must be 0.5 or 2.0"));
2510 return 0;
2511 }
2512 break;
2513
2514 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2515 assert (opnd->imm.is_fp);
2516 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2517 {
2518 set_other_error (mismatch_detail, idx,
2519 _("floating-point value must be 0.0 or 1.0"));
2520 return 0;
2521 }
2522 break;
2523
2524 case AARCH64_OPND_SVE_INV_LIMM:
2525 {
2526 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2527 uint64_t uimm = ~opnd->imm.value;
2528 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2529 {
2530 set_other_error (mismatch_detail, idx,
2531 _("immediate out of range"));
2532 return 0;
2533 }
2534 }
2535 break;
2536
2537 case AARCH64_OPND_SVE_LIMM_MOV:
2538 {
2539 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2540 uint64_t uimm = opnd->imm.value;
2541 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2542 {
2543 set_other_error (mismatch_detail, idx,
2544 _("immediate out of range"));
2545 return 0;
2546 }
2547 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2548 {
2549 set_other_error (mismatch_detail, idx,
2550 _("invalid replicated MOV immediate"));
2551 return 0;
2552 }
2553 }
2554 break;
2555
2556 case AARCH64_OPND_SVE_PATTERN_SCALED:
2557 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2558 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2559 {
2560 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2561 return 0;
2562 }
2563 break;
2564
2565 case AARCH64_OPND_SVE_SHLIMM_PRED:
2566 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2567 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2568 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2569 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2570 {
2571 set_imm_out_of_range_error (mismatch_detail, idx,
2572 0, 8 * size - 1);
2573 return 0;
2574 }
2575 break;
2576
2577 case AARCH64_OPND_SVE_SHRIMM_PRED:
2578 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2579 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2580 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2581 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2582 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2583 {
2584 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2585 return 0;
2586 }
2587 break;
2588
2589 default:
2590 break;
2591 }
2592 break;
2593
2594 case AARCH64_OPND_CLASS_SYSTEM:
2595 switch (type)
2596 {
2597 case AARCH64_OPND_PSTATEFIELD:
2598 for (i = 0; aarch64_pstatefields[i].name; ++i)
2599 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2600 break;
2601 assert (aarch64_pstatefields[i].name);
2602 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2603 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2604 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2605 {
2606 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2607 return 0;
2608 }
2609 break;
2610 default:
2611 break;
2612 }
2613 break;
2614
2615 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2616 /* Get the upper bound for the element index. */
2617 if (opcode->op == OP_FCMLA_ELEM)
2618 /* FCMLA index range depends on the vector size of other operands
2619 and is halfed because complex numbers take two elements. */
2620 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2621 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2622 else
2623 num = 16;
2624 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2625 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2626
2627 /* Index out-of-range. */
2628 if (!value_in_range_p (opnd->reglane.index, 0, num))
2629 {
2630 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2631 return 0;
2632 }
2633 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2634 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2635 number is encoded in "size:M:Rm":
2636 size <Vm>
2637 00 RESERVED
2638 01 0:Rm
2639 10 M:Rm
2640 11 RESERVED */
2641 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2642 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2643 {
2644 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2645 return 0;
2646 }
2647 break;
2648
2649 case AARCH64_OPND_CLASS_MODIFIED_REG:
2650 assert (idx == 1 || idx == 2);
2651 switch (type)
2652 {
2653 case AARCH64_OPND_Rm_EXT:
2654 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2655 && opnd->shifter.kind != AARCH64_MOD_LSL)
2656 {
2657 set_other_error (mismatch_detail, idx,
2658 _("extend operator expected"));
2659 return 0;
2660 }
2661 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2662 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2663 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2664 case. */
2665 if (!aarch64_stack_pointer_p (opnds + 0)
2666 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2667 {
2668 if (!opnd->shifter.operator_present)
2669 {
2670 set_other_error (mismatch_detail, idx,
2671 _("missing extend operator"));
2672 return 0;
2673 }
2674 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2675 {
2676 set_other_error (mismatch_detail, idx,
2677 _("'LSL' operator not allowed"));
2678 return 0;
2679 }
2680 }
2681 assert (opnd->shifter.operator_present /* Default to LSL. */
2682 || opnd->shifter.kind == AARCH64_MOD_LSL);
2683 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2684 {
2685 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2686 return 0;
2687 }
2688 /* In the 64-bit form, the final register operand is written as Wm
2689 for all but the (possibly omitted) UXTX/LSL and SXTX
2690 operators.
2691 N.B. GAS allows X register to be used with any operator as a
2692 programming convenience. */
2693 if (qualifier == AARCH64_OPND_QLF_X
2694 && opnd->shifter.kind != AARCH64_MOD_LSL
2695 && opnd->shifter.kind != AARCH64_MOD_UXTX
2696 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2697 {
2698 set_other_error (mismatch_detail, idx, _("W register expected"));
2699 return 0;
2700 }
2701 break;
2702
2703 case AARCH64_OPND_Rm_SFT:
2704 /* ROR is not available to the shifted register operand in
2705 arithmetic instructions. */
2706 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2707 {
2708 set_other_error (mismatch_detail, idx,
2709 _("shift operator expected"));
2710 return 0;
2711 }
2712 if (opnd->shifter.kind == AARCH64_MOD_ROR
2713 && opcode->iclass != log_shift)
2714 {
2715 set_other_error (mismatch_detail, idx,
2716 _("'ROR' operator not allowed"));
2717 return 0;
2718 }
2719 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2720 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2721 {
2722 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2723 return 0;
2724 }
2725 break;
2726
2727 default:
2728 break;
2729 }
2730 break;
2731
2732 default:
2733 break;
2734 }
2735
2736 return 1;
2737 }
2738
2739 /* Main entrypoint for the operand constraint checking.
2740
2741 Return 1 if operands of *INST meet the constraint applied by the operand
2742 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2743 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2744 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2745 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2746 error kind when it is notified that an instruction does not pass the check).
2747
2748 Un-determined operand qualifiers may get established during the process. */
2749
2750 int
2751 aarch64_match_operands_constraint (aarch64_inst *inst,
2752 aarch64_operand_error *mismatch_detail)
2753 {
2754 int i;
2755
2756 DEBUG_TRACE ("enter");
2757
2758 i = inst->opcode->tied_operand;
2759
2760 if (i > 0)
2761 {
2762 /* Check for tied_operands with specific opcode iclass. */
2763 switch (inst->opcode->iclass)
2764 {
2765 /* For SME LDR and STR instructions #imm must have the same numerical
2766 value for both operands.
2767 */
2768 case sme_ldr:
2769 case sme_str:
2770 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array);
2771 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
2772 if (inst->operands[0].za_tile_vector.index.imm
2773 != inst->operands[1].addr.offset.imm)
2774 {
2775 if (mismatch_detail)
2776 {
2777 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
2778 mismatch_detail->index = i;
2779 }
2780 return 0;
2781 }
2782 break;
2783
2784 default:
2785 /* Check for cases where a source register needs to be the same as the
2786 destination register. Do this before matching qualifiers since if
2787 an instruction has both invalid tying and invalid qualifiers,
2788 the error about qualifiers would suggest several alternative
2789 instructions that also have invalid tying. */
2790 if (inst->operands[0].reg.regno
2791 != inst->operands[i].reg.regno)
2792 {
2793 if (mismatch_detail)
2794 {
2795 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2796 mismatch_detail->index = i;
2797 mismatch_detail->error = NULL;
2798 }
2799 return 0;
2800 }
2801 break;
2802 }
2803 }
2804
2805 /* Match operands' qualifier.
2806 *INST has already had qualifier establish for some, if not all, of
2807 its operands; we need to find out whether these established
2808 qualifiers match one of the qualifier sequence in
2809 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2810 with the corresponding qualifier in such a sequence.
2811 Only basic operand constraint checking is done here; the more thorough
2812 constraint checking will carried out by operand_general_constraint_met_p,
2813 which has be to called after this in order to get all of the operands'
2814 qualifiers established. */
2815 if (match_operands_qualifier (inst, true /* update_p */) == 0)
2816 {
2817 DEBUG_TRACE ("FAIL on operand qualifier matching");
2818 if (mismatch_detail)
2819 {
2820 /* Return an error type to indicate that it is the qualifier
2821 matching failure; we don't care about which operand as there
2822 are enough information in the opcode table to reproduce it. */
2823 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2824 mismatch_detail->index = -1;
2825 mismatch_detail->error = NULL;
2826 }
2827 return 0;
2828 }
2829
2830 /* Match operands' constraint. */
2831 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2832 {
2833 enum aarch64_opnd type = inst->opcode->operands[i];
2834 if (type == AARCH64_OPND_NIL)
2835 break;
2836 if (inst->operands[i].skip)
2837 {
2838 DEBUG_TRACE ("skip the incomplete operand %d", i);
2839 continue;
2840 }
2841 if (operand_general_constraint_met_p (inst->operands, i, type,
2842 inst->opcode, mismatch_detail) == 0)
2843 {
2844 DEBUG_TRACE ("FAIL on operand %d", i);
2845 return 0;
2846 }
2847 }
2848
2849 DEBUG_TRACE ("PASS");
2850
2851 return 1;
2852 }
2853
2854 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2855 Also updates the TYPE of each INST->OPERANDS with the corresponding
2856 value of OPCODE->OPERANDS.
2857
2858 Note that some operand qualifiers may need to be manually cleared by
2859 the caller before it further calls the aarch64_opcode_encode; by
2860 doing this, it helps the qualifier matching facilities work
2861 properly. */
2862
2863 const aarch64_opcode*
2864 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2865 {
2866 int i;
2867 const aarch64_opcode *old = inst->opcode;
2868
2869 inst->opcode = opcode;
2870
2871 /* Update the operand types. */
2872 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2873 {
2874 inst->operands[i].type = opcode->operands[i];
2875 if (opcode->operands[i] == AARCH64_OPND_NIL)
2876 break;
2877 }
2878
2879 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2880
2881 return old;
2882 }
2883
2884 int
2885 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2886 {
2887 int i;
2888 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2889 if (operands[i] == operand)
2890 return i;
2891 else if (operands[i] == AARCH64_OPND_NIL)
2892 break;
2893 return -1;
2894 }
2895
2896 /* R0...R30, followed by FOR31. */
2898 #define BANK(R, FOR31) \
2899 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2900 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2901 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2902 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2903 /* [0][0] 32-bit integer regs with sp Wn
2904 [0][1] 64-bit integer regs with sp Xn sf=1
2905 [1][0] 32-bit integer regs with #0 Wn
2906 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2907 static const char *int_reg[2][2][32] = {
2908 #define R32(X) "w" #X
2909 #define R64(X) "x" #X
2910 { BANK (R32, "wsp"), BANK (R64, "sp") },
2911 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2912 #undef R64
2913 #undef R32
2914 };
2915
2916 /* Names of the SVE vector registers, first with .S suffixes,
2917 then with .D suffixes. */
2918
2919 static const char *sve_reg[2][32] = {
2920 #define ZS(X) "z" #X ".s"
2921 #define ZD(X) "z" #X ".d"
2922 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2923 #undef ZD
2924 #undef ZS
2925 };
2926 #undef BANK
2927
2928 /* Return the integer register name.
2929 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2930
2931 static inline const char *
2932 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2933 {
2934 const int has_zr = sp_reg_p ? 0 : 1;
2935 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2936 return int_reg[has_zr][is_64][regno];
2937 }
2938
2939 /* Like get_int_reg_name, but IS_64 is always 1. */
2940
2941 static inline const char *
2942 get_64bit_int_reg_name (int regno, int sp_reg_p)
2943 {
2944 const int has_zr = sp_reg_p ? 0 : 1;
2945 return int_reg[has_zr][1][regno];
2946 }
2947
2948 /* Get the name of the integer offset register in OPND, using the shift type
2949 to decide whether it's a word or doubleword. */
2950
2951 static inline const char *
2952 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2953 {
2954 switch (opnd->shifter.kind)
2955 {
2956 case AARCH64_MOD_UXTW:
2957 case AARCH64_MOD_SXTW:
2958 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2959
2960 case AARCH64_MOD_LSL:
2961 case AARCH64_MOD_SXTX:
2962 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2963
2964 default:
2965 abort ();
2966 }
2967 }
2968
2969 /* Get the name of the SVE vector offset register in OPND, using the operand
2970 qualifier to decide whether the suffix should be .S or .D. */
2971
2972 static inline const char *
2973 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2974 {
2975 assert (qualifier == AARCH64_OPND_QLF_S_S
2976 || qualifier == AARCH64_OPND_QLF_S_D);
2977 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2978 }
2979
2980 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2981
2982 typedef union
2983 {
2984 uint64_t i;
2985 double d;
2986 } double_conv_t;
2987
2988 typedef union
2989 {
2990 uint32_t i;
2991 float f;
2992 } single_conv_t;
2993
2994 typedef union
2995 {
2996 uint32_t i;
2997 float f;
2998 } half_conv_t;
2999
3000 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3001 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3002 (depending on the type of the instruction). IMM8 will be expanded to a
3003 single-precision floating-point value (SIZE == 4) or a double-precision
3004 floating-point value (SIZE == 8). A half-precision floating-point value
3005 (SIZE == 2) is expanded to a single-precision floating-point value. The
3006 expanded value is returned. */
3007
3008 static uint64_t
3009 expand_fp_imm (int size, uint32_t imm8)
3010 {
3011 uint64_t imm = 0;
3012 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3013
3014 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3015 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3016 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3017 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3018 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3019 if (size == 8)
3020 {
3021 imm = (imm8_7 << (63-32)) /* imm8<7> */
3022 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3023 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3024 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3025 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3026 imm <<= 32;
3027 }
3028 else if (size == 4 || size == 2)
3029 {
3030 imm = (imm8_7 << 31) /* imm8<7> */
3031 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3032 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3033 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3034 }
3035 else
3036 {
3037 /* An unsupported size. */
3038 assert (0);
3039 }
3040
3041 return imm;
3042 }
3043
3044 /* Produce the string representation of the register list operand *OPND
3045 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3046 the register name that comes before the register number, such as "v". */
3047 static void
3048 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3049 const char *prefix)
3050 {
3051 const int num_regs = opnd->reglist.num_regs;
3052 const int first_reg = opnd->reglist.first_regno;
3053 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3054 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3055 char tb[8]; /* Temporary buffer. */
3056
3057 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3058 assert (num_regs >= 1 && num_regs <= 4);
3059
3060 /* Prepare the index if any. */
3061 if (opnd->reglist.has_index)
3062 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3063 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3064 else
3065 tb[0] = '\0';
3066
3067 /* The hyphenated form is preferred for disassembly if there are
3068 more than two registers in the list, and the register numbers
3069 are monotonically increasing in increments of one. */
3070 if (num_regs > 2 && last_reg > first_reg)
3071 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3072 prefix, last_reg, qlf_name, tb);
3073 else
3074 {
3075 const int reg0 = first_reg;
3076 const int reg1 = (first_reg + 1) & 0x1f;
3077 const int reg2 = (first_reg + 2) & 0x1f;
3078 const int reg3 = (first_reg + 3) & 0x1f;
3079
3080 switch (num_regs)
3081 {
3082 case 1:
3083 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3084 break;
3085 case 2:
3086 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3087 prefix, reg1, qlf_name, tb);
3088 break;
3089 case 3:
3090 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3091 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3092 prefix, reg2, qlf_name, tb);
3093 break;
3094 case 4:
3095 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3096 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3097 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3098 break;
3099 }
3100 }
3101 }
3102
3103 /* Print the register+immediate address in OPND to BUF, which has SIZE
3104 characters. BASE is the name of the base register. */
3105
3106 static void
3107 print_immediate_offset_address (char *buf, size_t size,
3108 const aarch64_opnd_info *opnd,
3109 const char *base)
3110 {
3111 if (opnd->addr.writeback)
3112 {
3113 if (opnd->addr.preind)
3114 {
3115 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3116 snprintf (buf, size, "[%s]!", base);
3117 else
3118 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3119 }
3120 else
3121 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3122 }
3123 else
3124 {
3125 if (opnd->shifter.operator_present)
3126 {
3127 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3128 snprintf (buf, size, "[%s, #%d, mul vl]",
3129 base, opnd->addr.offset.imm);
3130 }
3131 else if (opnd->addr.offset.imm)
3132 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3133 else
3134 snprintf (buf, size, "[%s]", base);
3135 }
3136 }
3137
3138 /* Produce the string representation of the register offset address operand
3139 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3140 the names of the base and offset registers. */
3141 static void
3142 print_register_offset_address (char *buf, size_t size,
3143 const aarch64_opnd_info *opnd,
3144 const char *base, const char *offset)
3145 {
3146 char tb[16]; /* Temporary buffer. */
3147 bool print_extend_p = true;
3148 bool print_amount_p = true;
3149 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3150
3151 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3152 || !opnd->shifter.amount_present))
3153 {
3154 /* Not print the shift/extend amount when the amount is zero and
3155 when it is not the special case of 8-bit load/store instruction. */
3156 print_amount_p = false;
3157 /* Likewise, no need to print the shift operator LSL in such a
3158 situation. */
3159 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3160 print_extend_p = false;
3161 }
3162
3163 /* Prepare for the extend/shift. */
3164 if (print_extend_p)
3165 {
3166 if (print_amount_p)
3167 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3168 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3169 (opnd->shifter.amount % 100));
3170 else
3171 snprintf (tb, sizeof (tb), ", %s", shift_name);
3172 }
3173 else
3174 tb[0] = '\0';
3175
3176 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3177 }
3178
3179 /* Print ZA tiles from imm8 in ZERO instruction.
3180
3181 The preferred disassembly of this instruction uses the shortest list of tile
3182 names that represent the encoded immediate mask.
3183
3184 For example:
3185 * An all-ones immediate is disassembled as {ZA}.
3186 * An all-zeros immediate is disassembled as an empty list { }.
3187 */
3188 static void
3189 print_sme_za_list(char *buf, size_t size, int mask)
3190 {
3191 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3192 "za1.s", "za2.s", "za3.s", "za0.d",
3193 "za1.d", "za2.d", "za3.d", "za4.d",
3194 "za5.d", "za6.d", "za7.d", " " };
3195 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3196 0x22, 0x44, 0x88, 0x01,
3197 0x02, 0x04, 0x08, 0x10,
3198 0x20, 0x40, 0x80, 0x00 };
3199 int i, k;
3200 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3201
3202 k = snprintf (buf, size, "{");
3203 for (i = 0; i < ZAN_SIZE; i++)
3204 {
3205 if ((mask & zan_v[i]) == zan_v[i])
3206 {
3207 mask &= ~zan_v[i];
3208 if (k > 1)
3209 k += snprintf (buf + k, size - k, ", %s", zan[i]);
3210 else
3211 k += snprintf (buf + k, size - k, "%s", zan[i]);
3212 }
3213 if (mask == 0)
3214 break;
3215 }
3216 snprintf (buf + k, size - k, "}");
3217 }
3218
3219 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3220 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3221 PC, PCREL_P and ADDRESS are used to pass in and return information about
3222 the PC-relative address calculation, where the PC value is passed in
3223 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3224 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3225 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3226
3227 The function serves both the disassembler and the assembler diagnostics
3228 issuer, which is the reason why it lives in this file. */
3229
3230 void
3231 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3232 const aarch64_opcode *opcode,
3233 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3234 bfd_vma *address, char** notes,
3235 char *comment, size_t comment_size,
3236 aarch64_feature_set features)
3237 {
3238 unsigned int i, num_conds;
3239 const char *name = NULL;
3240 const aarch64_opnd_info *opnd = opnds + idx;
3241 enum aarch64_modifier_kind kind;
3242 uint64_t addr, enum_value;
3243
3244 if (comment != NULL)
3245 {
3246 assert (comment_size > 0);
3247 comment[0] = '\0';
3248 }
3249 else
3250 assert (comment_size == 0);
3251
3252 buf[0] = '\0';
3253 if (pcrel_p)
3254 *pcrel_p = 0;
3255
3256 switch (opnd->type)
3257 {
3258 case AARCH64_OPND_Rd:
3259 case AARCH64_OPND_Rn:
3260 case AARCH64_OPND_Rm:
3261 case AARCH64_OPND_Rt:
3262 case AARCH64_OPND_Rt2:
3263 case AARCH64_OPND_Rs:
3264 case AARCH64_OPND_Ra:
3265 case AARCH64_OPND_Rt_LS64:
3266 case AARCH64_OPND_Rt_SYS:
3267 case AARCH64_OPND_PAIRREG:
3268 case AARCH64_OPND_SVE_Rm:
3269 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3270 the <ic_op>, therefore we use opnd->present to override the
3271 generic optional-ness information. */
3272 if (opnd->type == AARCH64_OPND_Rt_SYS)
3273 {
3274 if (!opnd->present)
3275 break;
3276 }
3277 /* Omit the operand, e.g. RET. */
3278 else if (optional_operand_p (opcode, idx)
3279 && (opnd->reg.regno
3280 == get_optional_operand_default_value (opcode)))
3281 break;
3282 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3283 || opnd->qualifier == AARCH64_OPND_QLF_X);
3284 snprintf (buf, size, "%s",
3285 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3286 break;
3287
3288 case AARCH64_OPND_Rd_SP:
3289 case AARCH64_OPND_Rn_SP:
3290 case AARCH64_OPND_Rt_SP:
3291 case AARCH64_OPND_SVE_Rn_SP:
3292 case AARCH64_OPND_Rm_SP:
3293 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3294 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3295 || opnd->qualifier == AARCH64_OPND_QLF_X
3296 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3297 snprintf (buf, size, "%s",
3298 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3299 break;
3300
3301 case AARCH64_OPND_Rm_EXT:
3302 kind = opnd->shifter.kind;
3303 assert (idx == 1 || idx == 2);
3304 if ((aarch64_stack_pointer_p (opnds)
3305 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3306 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3307 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3308 && kind == AARCH64_MOD_UXTW)
3309 || (opnd->qualifier == AARCH64_OPND_QLF_X
3310 && kind == AARCH64_MOD_UXTX)))
3311 {
3312 /* 'LSL' is the preferred form in this case. */
3313 kind = AARCH64_MOD_LSL;
3314 if (opnd->shifter.amount == 0)
3315 {
3316 /* Shifter omitted. */
3317 snprintf (buf, size, "%s",
3318 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3319 break;
3320 }
3321 }
3322 if (opnd->shifter.amount)
3323 snprintf (buf, size, "%s, %s #%" PRIi64,
3324 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3325 aarch64_operand_modifiers[kind].name,
3326 opnd->shifter.amount);
3327 else
3328 snprintf (buf, size, "%s, %s",
3329 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3330 aarch64_operand_modifiers[kind].name);
3331 break;
3332
3333 case AARCH64_OPND_Rm_SFT:
3334 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3335 || opnd->qualifier == AARCH64_OPND_QLF_X);
3336 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3337 snprintf (buf, size, "%s",
3338 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3339 else
3340 snprintf (buf, size, "%s, %s #%" PRIi64,
3341 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3342 aarch64_operand_modifiers[opnd->shifter.kind].name,
3343 opnd->shifter.amount);
3344 break;
3345
3346 case AARCH64_OPND_Fd:
3347 case AARCH64_OPND_Fn:
3348 case AARCH64_OPND_Fm:
3349 case AARCH64_OPND_Fa:
3350 case AARCH64_OPND_Ft:
3351 case AARCH64_OPND_Ft2:
3352 case AARCH64_OPND_Sd:
3353 case AARCH64_OPND_Sn:
3354 case AARCH64_OPND_Sm:
3355 case AARCH64_OPND_SVE_VZn:
3356 case AARCH64_OPND_SVE_Vd:
3357 case AARCH64_OPND_SVE_Vm:
3358 case AARCH64_OPND_SVE_Vn:
3359 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3360 opnd->reg.regno);
3361 break;
3362
3363 case AARCH64_OPND_Va:
3364 case AARCH64_OPND_Vd:
3365 case AARCH64_OPND_Vn:
3366 case AARCH64_OPND_Vm:
3367 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3368 aarch64_get_qualifier_name (opnd->qualifier));
3369 break;
3370
3371 case AARCH64_OPND_Ed:
3372 case AARCH64_OPND_En:
3373 case AARCH64_OPND_Em:
3374 case AARCH64_OPND_Em16:
3375 case AARCH64_OPND_SM3_IMM2:
3376 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3377 aarch64_get_qualifier_name (opnd->qualifier),
3378 opnd->reglane.index);
3379 break;
3380
3381 case AARCH64_OPND_VdD1:
3382 case AARCH64_OPND_VnD1:
3383 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3384 break;
3385
3386 case AARCH64_OPND_LVn:
3387 case AARCH64_OPND_LVt:
3388 case AARCH64_OPND_LVt_AL:
3389 case AARCH64_OPND_LEt:
3390 print_register_list (buf, size, opnd, "v");
3391 break;
3392
3393 case AARCH64_OPND_SVE_Pd:
3394 case AARCH64_OPND_SVE_Pg3:
3395 case AARCH64_OPND_SVE_Pg4_5:
3396 case AARCH64_OPND_SVE_Pg4_10:
3397 case AARCH64_OPND_SVE_Pg4_16:
3398 case AARCH64_OPND_SVE_Pm:
3399 case AARCH64_OPND_SVE_Pn:
3400 case AARCH64_OPND_SVE_Pt:
3401 case AARCH64_OPND_SME_Pm:
3402 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3403 snprintf (buf, size, "p%d", opnd->reg.regno);
3404 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3405 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3406 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3407 aarch64_get_qualifier_name (opnd->qualifier));
3408 else
3409 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3410 aarch64_get_qualifier_name (opnd->qualifier));
3411 break;
3412
3413 case AARCH64_OPND_SVE_Za_5:
3414 case AARCH64_OPND_SVE_Za_16:
3415 case AARCH64_OPND_SVE_Zd:
3416 case AARCH64_OPND_SVE_Zm_5:
3417 case AARCH64_OPND_SVE_Zm_16:
3418 case AARCH64_OPND_SVE_Zn:
3419 case AARCH64_OPND_SVE_Zt:
3420 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3421 snprintf (buf, size, "z%d", opnd->reg.regno);
3422 else
3423 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3424 aarch64_get_qualifier_name (opnd->qualifier));
3425 break;
3426
3427 case AARCH64_OPND_SVE_ZnxN:
3428 case AARCH64_OPND_SVE_ZtxN:
3429 print_register_list (buf, size, opnd, "z");
3430 break;
3431
3432 case AARCH64_OPND_SVE_Zm3_INDEX:
3433 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3434 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3435 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3436 case AARCH64_OPND_SVE_Zm4_INDEX:
3437 case AARCH64_OPND_SVE_Zn_INDEX:
3438 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3439 aarch64_get_qualifier_name (opnd->qualifier),
3440 opnd->reglane.index);
3441 break;
3442
3443 case AARCH64_OPND_SME_ZAda_2b:
3444 case AARCH64_OPND_SME_ZAda_3b:
3445 snprintf (buf, size, "za%d.%s", opnd->reg.regno,
3446 aarch64_get_qualifier_name (opnd->qualifier));
3447 break;
3448
3449 case AARCH64_OPND_SME_ZA_HV_idx_src:
3450 case AARCH64_OPND_SME_ZA_HV_idx_dest:
3451 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3452 snprintf (buf, size, "%sza%d%c.%s[w%d, %d]%s",
3453 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3454 opnd->za_tile_vector.regno,
3455 opnd->za_tile_vector.v == 1 ? 'v' : 'h',
3456 aarch64_get_qualifier_name (opnd->qualifier),
3457 opnd->za_tile_vector.index.regno,
3458 opnd->za_tile_vector.index.imm,
3459 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3460 break;
3461
3462 case AARCH64_OPND_SME_list_of_64bit_tiles:
3463 print_sme_za_list (buf, size, opnd->reg.regno);
3464 break;
3465
3466 case AARCH64_OPND_SME_ZA_array:
3467 snprintf (buf, size, "za[w%d, %d]",
3468 opnd->za_tile_vector.index.regno,
3469 opnd->za_tile_vector.index.imm);
3470 break;
3471
3472 case AARCH64_OPND_SME_SM_ZA:
3473 snprintf (buf, size, "%s", opnd->reg.regno == 's' ? "sm" : "za");
3474 break;
3475
3476 case AARCH64_OPND_SME_PnT_Wm_imm:
3477 snprintf (buf, size, "p%d.%s[w%d, %d]",
3478 opnd->za_tile_vector.regno,
3479 aarch64_get_qualifier_name (opnd->qualifier),
3480 opnd->za_tile_vector.index.regno,
3481 opnd->za_tile_vector.index.imm);
3482 break;
3483
3484 case AARCH64_OPND_CRn:
3485 case AARCH64_OPND_CRm:
3486 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3487 break;
3488
3489 case AARCH64_OPND_IDX:
3490 case AARCH64_OPND_MASK:
3491 case AARCH64_OPND_IMM:
3492 case AARCH64_OPND_IMM_2:
3493 case AARCH64_OPND_WIDTH:
3494 case AARCH64_OPND_UIMM3_OP1:
3495 case AARCH64_OPND_UIMM3_OP2:
3496 case AARCH64_OPND_BIT_NUM:
3497 case AARCH64_OPND_IMM_VLSL:
3498 case AARCH64_OPND_IMM_VLSR:
3499 case AARCH64_OPND_SHLL_IMM:
3500 case AARCH64_OPND_IMM0:
3501 case AARCH64_OPND_IMMR:
3502 case AARCH64_OPND_IMMS:
3503 case AARCH64_OPND_UNDEFINED:
3504 case AARCH64_OPND_FBITS:
3505 case AARCH64_OPND_TME_UIMM16:
3506 case AARCH64_OPND_SIMM5:
3507 case AARCH64_OPND_SVE_SHLIMM_PRED:
3508 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3509 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3510 case AARCH64_OPND_SVE_SHRIMM_PRED:
3511 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3512 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3513 case AARCH64_OPND_SVE_SIMM5:
3514 case AARCH64_OPND_SVE_SIMM5B:
3515 case AARCH64_OPND_SVE_SIMM6:
3516 case AARCH64_OPND_SVE_SIMM8:
3517 case AARCH64_OPND_SVE_UIMM3:
3518 case AARCH64_OPND_SVE_UIMM7:
3519 case AARCH64_OPND_SVE_UIMM8:
3520 case AARCH64_OPND_SVE_UIMM8_53:
3521 case AARCH64_OPND_IMM_ROT1:
3522 case AARCH64_OPND_IMM_ROT2:
3523 case AARCH64_OPND_IMM_ROT3:
3524 case AARCH64_OPND_SVE_IMM_ROT1:
3525 case AARCH64_OPND_SVE_IMM_ROT2:
3526 case AARCH64_OPND_SVE_IMM_ROT3:
3527 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3528 break;
3529
3530 case AARCH64_OPND_SVE_I1_HALF_ONE:
3531 case AARCH64_OPND_SVE_I1_HALF_TWO:
3532 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3533 {
3534 single_conv_t c;
3535 c.i = opnd->imm.value;
3536 snprintf (buf, size, "#%.1f", c.f);
3537 break;
3538 }
3539
3540 case AARCH64_OPND_SVE_PATTERN:
3541 if (optional_operand_p (opcode, idx)
3542 && opnd->imm.value == get_optional_operand_default_value (opcode))
3543 break;
3544 enum_value = opnd->imm.value;
3545 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3546 if (aarch64_sve_pattern_array[enum_value])
3547 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3548 else
3549 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3550 break;
3551
3552 case AARCH64_OPND_SVE_PATTERN_SCALED:
3553 if (optional_operand_p (opcode, idx)
3554 && !opnd->shifter.operator_present
3555 && opnd->imm.value == get_optional_operand_default_value (opcode))
3556 break;
3557 enum_value = opnd->imm.value;
3558 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3559 if (aarch64_sve_pattern_array[opnd->imm.value])
3560 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3561 else
3562 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3563 if (opnd->shifter.operator_present)
3564 {
3565 size_t len = strlen (buf);
3566 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3567 aarch64_operand_modifiers[opnd->shifter.kind].name,
3568 opnd->shifter.amount);
3569 }
3570 break;
3571
3572 case AARCH64_OPND_SVE_PRFOP:
3573 enum_value = opnd->imm.value;
3574 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3575 if (aarch64_sve_prfop_array[enum_value])
3576 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3577 else
3578 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3579 break;
3580
3581 case AARCH64_OPND_IMM_MOV:
3582 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3583 {
3584 case 4: /* e.g. MOV Wd, #<imm32>. */
3585 {
3586 int imm32 = opnd->imm.value;
3587 snprintf (buf, size, "#0x%-20x", imm32);
3588 snprintf (comment, comment_size, "#%d", imm32);
3589 }
3590 break;
3591 case 8: /* e.g. MOV Xd, #<imm64>. */
3592 snprintf (buf, size, "#0x%-20" PRIx64, opnd->imm.value);
3593 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
3594 break;
3595 default:
3596 snprintf (buf, size, "<invalid>");
3597 break;
3598 }
3599 break;
3600
3601 case AARCH64_OPND_FPIMM0:
3602 snprintf (buf, size, "#0.0");
3603 break;
3604
3605 case AARCH64_OPND_LIMM:
3606 case AARCH64_OPND_AIMM:
3607 case AARCH64_OPND_HALF:
3608 case AARCH64_OPND_SVE_INV_LIMM:
3609 case AARCH64_OPND_SVE_LIMM:
3610 case AARCH64_OPND_SVE_LIMM_MOV:
3611 if (opnd->shifter.amount)
3612 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3613 opnd->shifter.amount);
3614 else
3615 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3616 break;
3617
3618 case AARCH64_OPND_SIMD_IMM:
3619 case AARCH64_OPND_SIMD_IMM_SFT:
3620 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3621 || opnd->shifter.kind == AARCH64_MOD_NONE)
3622 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3623 else
3624 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3625 aarch64_operand_modifiers[opnd->shifter.kind].name,
3626 opnd->shifter.amount);
3627 break;
3628
3629 case AARCH64_OPND_SVE_AIMM:
3630 case AARCH64_OPND_SVE_ASIMM:
3631 if (opnd->shifter.amount)
3632 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3633 opnd->shifter.amount);
3634 else
3635 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3636 break;
3637
3638 case AARCH64_OPND_FPIMM:
3639 case AARCH64_OPND_SIMD_FPIMM:
3640 case AARCH64_OPND_SVE_FPIMM8:
3641 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3642 {
3643 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3644 {
3645 half_conv_t c;
3646 c.i = expand_fp_imm (2, opnd->imm.value);
3647 snprintf (buf, size, "#%.18e", c.f);
3648 }
3649 break;
3650 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3651 {
3652 single_conv_t c;
3653 c.i = expand_fp_imm (4, opnd->imm.value);
3654 snprintf (buf, size, "#%.18e", c.f);
3655 }
3656 break;
3657 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3658 {
3659 double_conv_t c;
3660 c.i = expand_fp_imm (8, opnd->imm.value);
3661 snprintf (buf, size, "#%.18e", c.d);
3662 }
3663 break;
3664 default:
3665 snprintf (buf, size, "<invalid>");
3666 break;
3667 }
3668 break;
3669
3670 case AARCH64_OPND_CCMP_IMM:
3671 case AARCH64_OPND_NZCV:
3672 case AARCH64_OPND_EXCEPTION:
3673 case AARCH64_OPND_UIMM4:
3674 case AARCH64_OPND_UIMM4_ADDG:
3675 case AARCH64_OPND_UIMM7:
3676 case AARCH64_OPND_UIMM10:
3677 if (optional_operand_p (opcode, idx)
3678 && (opnd->imm.value ==
3679 (int64_t) get_optional_operand_default_value (opcode)))
3680 /* Omit the operand, e.g. DCPS1. */
3681 break;
3682 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3683 break;
3684
3685 case AARCH64_OPND_COND:
3686 case AARCH64_OPND_COND1:
3687 snprintf (buf, size, "%s", opnd->cond->names[0]);
3688 num_conds = ARRAY_SIZE (opnd->cond->names);
3689 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3690 {
3691 size_t len = comment != NULL ? strlen (comment) : 0;
3692 if (i == 1)
3693 snprintf (comment + len, comment_size - len, "%s = %s",
3694 opnd->cond->names[0], opnd->cond->names[i]);
3695 else
3696 snprintf (comment + len, comment_size - len, ", %s",
3697 opnd->cond->names[i]);
3698 }
3699 break;
3700
3701 case AARCH64_OPND_ADDR_ADRP:
3702 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3703 + opnd->imm.value;
3704 if (pcrel_p)
3705 *pcrel_p = 1;
3706 if (address)
3707 *address = addr;
3708 /* This is not necessary during the disassembling, as print_address_func
3709 in the disassemble_info will take care of the printing. But some
3710 other callers may be still interested in getting the string in *STR,
3711 so here we do snprintf regardless. */
3712 snprintf (buf, size, "#0x%" PRIx64, addr);
3713 break;
3714
3715 case AARCH64_OPND_ADDR_PCREL14:
3716 case AARCH64_OPND_ADDR_PCREL19:
3717 case AARCH64_OPND_ADDR_PCREL21:
3718 case AARCH64_OPND_ADDR_PCREL26:
3719 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3720 if (pcrel_p)
3721 *pcrel_p = 1;
3722 if (address)
3723 *address = addr;
3724 /* This is not necessary during the disassembling, as print_address_func
3725 in the disassemble_info will take care of the printing. But some
3726 other callers may be still interested in getting the string in *STR,
3727 so here we do snprintf regardless. */
3728 snprintf (buf, size, "#0x%" PRIx64, addr);
3729 break;
3730
3731 case AARCH64_OPND_ADDR_SIMPLE:
3732 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3733 case AARCH64_OPND_SIMD_ADDR_POST:
3734 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3735 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3736 {
3737 if (opnd->addr.offset.is_reg)
3738 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3739 else
3740 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3741 }
3742 else
3743 snprintf (buf, size, "[%s]", name);
3744 break;
3745
3746 case AARCH64_OPND_ADDR_REGOFF:
3747 case AARCH64_OPND_SVE_ADDR_R:
3748 case AARCH64_OPND_SVE_ADDR_RR:
3749 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3750 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3751 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3752 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
3753 case AARCH64_OPND_SVE_ADDR_RX:
3754 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3755 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3756 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3757 print_register_offset_address
3758 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3759 get_offset_int_reg_name (opnd));
3760 break;
3761
3762 case AARCH64_OPND_SVE_ADDR_ZX:
3763 print_register_offset_address
3764 (buf, size, opnd,
3765 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3766 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3767 break;
3768
3769 case AARCH64_OPND_SVE_ADDR_RZ:
3770 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3771 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3772 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3773 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3774 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3775 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3776 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3777 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3778 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3779 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3780 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3781 print_register_offset_address
3782 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3783 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3784 break;
3785
3786 case AARCH64_OPND_ADDR_SIMM7:
3787 case AARCH64_OPND_ADDR_SIMM9:
3788 case AARCH64_OPND_ADDR_SIMM9_2:
3789 case AARCH64_OPND_ADDR_SIMM10:
3790 case AARCH64_OPND_ADDR_SIMM11:
3791 case AARCH64_OPND_ADDR_SIMM13:
3792 case AARCH64_OPND_ADDR_OFFSET:
3793 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
3794 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3795 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3796 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3797 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3798 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3799 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3800 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3801 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3802 case AARCH64_OPND_SVE_ADDR_RI_U6:
3803 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3804 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3805 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3806 print_immediate_offset_address
3807 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3808 break;
3809
3810 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3811 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3812 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3813 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3814 print_immediate_offset_address
3815 (buf, size, opnd,
3816 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3817 break;
3818
3819 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3820 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3821 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3822 print_register_offset_address
3823 (buf, size, opnd,
3824 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3825 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3826 break;
3827
3828 case AARCH64_OPND_ADDR_UIMM12:
3829 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3830 if (opnd->addr.offset.imm)
3831 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3832 else
3833 snprintf (buf, size, "[%s]", name);
3834 break;
3835
3836 case AARCH64_OPND_SYSREG:
3837 for (i = 0; aarch64_sys_regs[i].name; ++i)
3838 {
3839 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
3840
3841 bool exact_match
3842 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
3843 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
3844 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
3845
3846 /* Try and find an exact match, But if that fails, return the first
3847 partial match that was found. */
3848 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3849 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
3850 && (name == NULL || exact_match))
3851 {
3852 name = aarch64_sys_regs[i].name;
3853 if (exact_match)
3854 {
3855 if (notes)
3856 *notes = NULL;
3857 break;
3858 }
3859
3860 /* If we didn't match exactly, that means the presense of a flag
3861 indicates what we didn't want for this instruction. e.g. If
3862 F_REG_READ is there, that means we were looking for a write
3863 register. See aarch64_ext_sysreg. */
3864 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3865 *notes = _("reading from a write-only register");
3866 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3867 *notes = _("writing to a read-only register");
3868 }
3869 }
3870
3871 if (name)
3872 snprintf (buf, size, "%s", name);
3873 else
3874 {
3875 /* Implementation defined system register. */
3876 unsigned int value = opnd->sysreg.value;
3877 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3878 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3879 value & 0x7);
3880 }
3881 break;
3882
3883 case AARCH64_OPND_PSTATEFIELD:
3884 for (i = 0; aarch64_pstatefields[i].name; ++i)
3885 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3886 {
3887 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
3888 SVCRZA and SVCRSMZA. */
3889 uint32_t flags = aarch64_pstatefields[i].flags;
3890 if (flags & F_REG_IN_CRM
3891 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
3892 != PSTATE_DECODE_CRM (flags)))
3893 continue;
3894 break;
3895 }
3896 assert (aarch64_pstatefields[i].name);
3897 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3898 break;
3899
3900 case AARCH64_OPND_SYSREG_AT:
3901 case AARCH64_OPND_SYSREG_DC:
3902 case AARCH64_OPND_SYSREG_IC:
3903 case AARCH64_OPND_SYSREG_TLBI:
3904 case AARCH64_OPND_SYSREG_SR:
3905 snprintf (buf, size, "%s", opnd->sysins_op->name);
3906 break;
3907
3908 case AARCH64_OPND_BARRIER:
3909 case AARCH64_OPND_BARRIER_DSB_NXS:
3910 snprintf (buf, size, "%s", opnd->barrier->name);
3911 break;
3912
3913 case AARCH64_OPND_BARRIER_ISB:
3914 /* Operand can be omitted, e.g. in DCPS1. */
3915 if (! optional_operand_p (opcode, idx)
3916 || (opnd->barrier->value
3917 != get_optional_operand_default_value (opcode)))
3918 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3919 break;
3920
3921 case AARCH64_OPND_PRFOP:
3922 if (opnd->prfop->name != NULL)
3923 snprintf (buf, size, "%s", opnd->prfop->name);
3924 else
3925 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3926 break;
3927
3928 case AARCH64_OPND_BARRIER_PSB:
3929 snprintf (buf, size, "csync");
3930 break;
3931
3932 case AARCH64_OPND_BTI_TARGET:
3933 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3934 snprintf (buf, size, "%s", opnd->hint_option->name);
3935 break;
3936
3937 case AARCH64_OPND_MOPS_ADDR_Rd:
3938 case AARCH64_OPND_MOPS_ADDR_Rs:
3939 snprintf (buf, size, "[%s]!",
3940 get_int_reg_name (opnd->reg.regno, AARCH64_OPND_QLF_X, 0));
3941 break;
3942
3943 case AARCH64_OPND_MOPS_WB_Rn:
3944 snprintf (buf, size, "%s!",
3945 get_int_reg_name (opnd->reg.regno, AARCH64_OPND_QLF_X, 0));
3946 break;
3947
3948 default:
3949 snprintf (buf, size, "<invalid>");
3950 break;
3951 }
3952 }
3953
3954 #define CPENC(op0,op1,crn,crm,op2) \
3956 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3957 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3958 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3959 /* for 3.9.10 System Instructions */
3960 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3961
3962 #define C0 0
3963 #define C1 1
3964 #define C2 2
3965 #define C3 3
3966 #define C4 4
3967 #define C5 5
3968 #define C6 6
3969 #define C7 7
3970 #define C8 8
3971 #define C9 9
3972 #define C10 10
3973 #define C11 11
3974 #define C12 12
3975 #define C13 13
3976 #define C14 14
3977 #define C15 15
3978
3979 #define SYSREG(name, encoding, flags, features) \
3980 { name, encoding, flags, features }
3981
3982 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
3983
3984 #define SR_FEAT(n,e,f,feat) \
3985 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
3986
3987 #define SR_FEAT2(n,e,f,fe1,fe2) \
3988 SYSREG ((n), (e), (f) | F_ARCHEXT, \
3989 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
3990
3991 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
3992 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
3993
3994 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
3995 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
3996 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
3997 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
3998 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
3999 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
4000 #define SR_V8_6(n,e,f) SR_FEAT (n,e,f,V8_6)
4001 #define SR_V8_7(n,e,f) SR_FEAT (n,e,f,V8_7)
4002 #define SR_V8_8(n,e,f) SR_FEAT (n,e,f,V8_8)
4003 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4004 #define SR_GIC(n,e,f) SR_CORE (n,e,f)
4005 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4006 #define SR_AMU(n,e,f) SR_FEAT (n,e,f,V8_4)
4007 #define SR_LOR(n,e,f) SR_FEAT (n,e,f,LOR)
4008 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
4009 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
4010 #define SR_RNG(n,e,f) SR_FEAT (n,e,f,RNG)
4011 #define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
4012 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
4013 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
4014 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4015 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4016 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
4017 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4018
4019 #define SR_EXPAND_ELx(f,x) \
4020 f (x, 1), \
4021 f (x, 2), \
4022 f (x, 3), \
4023 f (x, 4), \
4024 f (x, 5), \
4025 f (x, 6), \
4026 f (x, 7), \
4027 f (x, 8), \
4028 f (x, 9), \
4029 f (x, 10), \
4030 f (x, 11), \
4031 f (x, 12), \
4032 f (x, 13), \
4033 f (x, 14), \
4034 f (x, 15),
4035
4036 #define SR_EXPAND_EL12(f) \
4037 SR_EXPAND_ELx (f,1) \
4038 SR_EXPAND_ELx (f,2)
4039
4040 /* TODO there is one more issues need to be resolved
4041 1. handle cpu-implementation-defined system registers.
4042
4043 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4044 respectively. If neither of these are set then the register is read-write. */
4045 const aarch64_sys_reg aarch64_sys_regs [] =
4046 {
4047 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4048 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4049 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4050 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4051 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4052 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4053 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4054 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4055 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4056 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4057 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4058 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4059 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4060 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4061 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4062 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4063 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4064 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4065 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4066 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4067 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4068 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4069 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4070 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4071 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4072 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4073 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4074 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4075 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4076 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4077 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4078 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4079 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4080 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4081 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4082 SR_CORE ("id_dfr1_el1", CPENC (3,0,C0,C3,5), F_REG_READ),
4083 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4084 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4085 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4086 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4087 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4088 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4089 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4090 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4091 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4092 SR_CORE ("id_mmfr5_el1", CPENC (3,0,C0,C3,6), F_REG_READ),
4093 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4094 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4095 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4096 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4097 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4098 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4099 SR_CORE ("id_isar6_el1", CPENC (3,0,C0,C2,7), F_REG_READ),
4100 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4101 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4102 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4103 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4104 SR_V8_3 ("ccsidr2_el1", CPENC (3,1,C0,C0,2), F_REG_READ),
4105 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4106 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4107 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4108 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4109 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4110 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4111 SR_CORE ("id_aa64isar2_el1", CPENC (3,0,C0,C6,2), F_REG_READ),
4112 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4113 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4114 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4115 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4116 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4117 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4118 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4119 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4120 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4121 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4122 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4123 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4124 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4125 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4126 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4127 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4128 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4129 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4130 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4131 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4132 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4133 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4134 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4135 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4136 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4137 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4138 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4139 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4140 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4141 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4142 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4143 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4144 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4145 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4146 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4147 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4148 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4149 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4150 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4151 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4152 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4153 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4154 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4155 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4156 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4157 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4158 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4159 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4160 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4161 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4162 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4163 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4164 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4165 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4166 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4167 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4168 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4169 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4170 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4171 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4172 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4173 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4174 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4175 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4176 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4177 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4178 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4179 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4180 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4181 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4182 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4183 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4184 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4185 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4186 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4187 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4188 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4189 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4190 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4191 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4192 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4193 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4194 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4195 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4196 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4197 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4198 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4199 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4200 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4201 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4202 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4203 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4204 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4205 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4206 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4207 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4208 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4209 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4210 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4211 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4212 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4213 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4214 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4215 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4216 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4217 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4218 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4219 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4220 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4221 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4222 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4223 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4224 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4225 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4226 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4227 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4228 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4229 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4230 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4231 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4232 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4233 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4234 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4235 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4236 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4237 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4238 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4239 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4240 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4241 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4242 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4243 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4244 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4245 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4246 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4247 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4248 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4249 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4250 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4251 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4252 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4253 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4254 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4255 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4256 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4257 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4258 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4259 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4260 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4261 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4262 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4263 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4264 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4265 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4266 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4267 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4268 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4269 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4270 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4271 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4272 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4273 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4274 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4275 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4276 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4277 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4278 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4279 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4280 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4281 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4282 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4283 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4284 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4285 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4286 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4287 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4288 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4289 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4290 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4291 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4292 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4293 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4294 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4295 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4296 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4297 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4298 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4299 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4300 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4301 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4302 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4303 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4304 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4305 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4306 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4307 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4308 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4309 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4310 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4311 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4312 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4313 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4314 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4315 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4316 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4317 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4318 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4319 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4320 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4321 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4322 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4323 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4324 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4325 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4326 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4327 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4328 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4329 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4330 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4331 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4332 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4333 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4334 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4335 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4336 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4337 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4338 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4339 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4340 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4341 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4342 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4343 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4344 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4345 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4346 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4347 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4348 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4349 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4350 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4351 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4352 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4353 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4354 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4355 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4356 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4357 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4358 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4359 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4360 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4361 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4362 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4363 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4364 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4365 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4366 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4367 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4368 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4369 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), F_REG_READ),
4370 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4371 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4372 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4373 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4374 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4375 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4376 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4377 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4378 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4379 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4380 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4381 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4382 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4383 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4384 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4385 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4386 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4387 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4388 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4389 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4390 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4391 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4392 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4393 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4394 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4395 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4396 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4397 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4398 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4399 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4400 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4401 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4402 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4403 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4404 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4405 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4406 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4407 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4408 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4409 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4410 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4411 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4412 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4413 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4414 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4415 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4416 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4417 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4418 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4419 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4420 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4421 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4422 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4423 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4424 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4425 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4426 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4427 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4428 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4429 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4430 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4431 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4432 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4433 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4434 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4435 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4436 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4437 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4438 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4439 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4440 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4441 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4442 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4443 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4444 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4445 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4446 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4447 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4448 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4449 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4450
4451 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4452 SR_V8_4 ("trfcr_el1", CPENC (3,0,C1,C2,1), 0),
4453 SR_V8_4 ("pmmir_el1", CPENC (3,0,C9,C14,6), F_REG_READ),
4454 SR_V8_4 ("trfcr_el2", CPENC (3,4,C1,C2,1), 0),
4455 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4456 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4457 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4458 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4459 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4460 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4461 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4462 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4463 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4464 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4465 SR_V8_4 ("trfcr_el12", CPENC (3,5,C1,C2,1), 0),
4466
4467 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4468 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4469 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4470 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4471 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4472 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4473 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4474 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4475 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4476 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4477 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4478 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4479 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4480 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4481 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4482 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4483
4484 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4485 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4486 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4487 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4488
4489 #define ENC_BARLAR(x,n,lar) \
4490 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4491
4492 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4493 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4494
4495 SR_EXPAND_EL12 (PRBARn_ELx)
4496 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4497 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4498 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4499 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4500 SR_EXPAND_EL12 (PRLARn_ELx)
4501 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4502 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4503 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4504
4505 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4506 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4507 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4508 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4509 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4510 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4511 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4512
4513 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4514 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4515 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4516 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4517 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4518 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4519 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4520 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4521 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4522 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4523 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4524 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4525 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4526 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4527 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4528 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4529 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4530 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4531 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4532 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4533 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4534 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4535 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4536 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4537 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4538 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4539 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4540 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4541 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4542 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4543 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4544 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4545 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4546 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4547 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4548 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4549 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4550 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4551 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4552 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4553 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4554 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4555 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4556 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4557 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4558 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4559 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4560 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4561 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4562 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4563 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4564 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4565 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4566 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4567 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4568 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4569 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4570 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4571 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4572 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4573 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4574 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4575 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4576 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4577 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4578 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4579 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4580 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4581 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4582 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4583 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4584 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4585 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4586 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4587 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4588 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4589 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4590 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4591 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4592 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4593 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4594 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4595 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4596 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4597 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4598 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4599 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4600 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4601 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4602 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4603 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4604 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4605 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4606 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4607 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4608 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4609 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4610 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4611 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4612 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4613 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4614 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4615 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4616 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4617 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4618 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4619 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4620 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4621 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4622 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4623 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4624 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4625 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4626 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4627 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4628 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4629 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4630 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4631 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4632 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4633 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4634 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4635 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4636 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4637 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4638 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4639 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4640 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4641 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4642 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4643 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4644 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4645 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4646 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4647 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4648 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4649 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4650 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4651 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4652 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4653 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4654 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4655 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4656 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4657 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4658 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4659 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4660 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4661 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4662 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4663 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4664 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4665 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4666 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4667 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4668 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4669 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4670 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4671 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4672 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4673 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4674 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4675 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4676 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4677 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4678 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4679 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4680 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4681 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4682 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4683 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4684 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4685 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4686 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4687 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4688 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4689 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4690 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4691 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4692 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
4693 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
4694 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
4695 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
4696 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
4697 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
4698 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
4699 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
4700 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
4701 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
4702 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
4703 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
4704 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
4705 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
4706 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
4707 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
4708 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
4709 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
4710 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
4711 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
4712 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
4713 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
4714 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
4715 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
4716 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
4717 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
4718 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
4719 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
4720 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
4721 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
4722 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
4723 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
4724 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
4725 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
4726 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
4727 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
4728
4729 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
4730 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
4731 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
4732 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
4733 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
4734 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
4735 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
4736 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
4737 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
4738 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
4739 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
4740 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
4741
4742 SR_LOR ("lorid_el1", CPENC (3,0,C10,C4,7), F_REG_READ),
4743 SR_LOR ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
4744 SR_LOR ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
4745 SR_LOR ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
4746 SR_LOR ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
4747
4748 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
4749 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
4750 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
4751 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
4752 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
4753
4754 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
4755 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
4756 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
4757 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
4758 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
4759 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
4760 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
4761 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
4762 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
4763 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
4764 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
4765 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
4766 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
4767 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
4768 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
4769 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
4770 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
4771 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
4772 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
4773 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
4774 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
4775 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
4776 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
4777 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
4778 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
4779 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
4780 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
4781 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
4782 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
4783 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
4784 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
4785 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
4786 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
4787 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
4788 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
4789 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
4790 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
4791 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
4792 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
4793 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
4794 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
4795 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
4796 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
4797 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
4798 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
4799 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
4800 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
4801 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
4802 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
4803 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
4804 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
4805 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
4806 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
4807 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
4808 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
4809 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
4810 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
4811 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
4812 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
4813 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
4814 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
4815 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
4816 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
4817 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
4818 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
4819 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
4820 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
4821 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
4822 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
4823 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
4824 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
4825 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
4826 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
4827 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
4828 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
4829 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
4830 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
4831 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
4832 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
4833 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
4834 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
4835 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
4836 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
4837 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
4838 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
4839 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
4840 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
4841 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
4842 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
4843 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
4844 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
4845 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
4846 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
4847 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
4848 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
4849 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
4850 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
4851 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
4852 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
4853 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
4854 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
4855 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
4856 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
4857 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
4858 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
4859
4860 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
4861
4862 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), 0),
4863 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
4864 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
4865
4866 SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
4867 SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
4868 SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
4869 SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
4870 SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
4871 SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
4872 SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
4873 SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
4874 SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
4875 SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
4876 SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
4877
4878 SR_AMU ("amcr_el0", CPENC (3,3,C13,C2,0), 0),
4879 SR_AMU ("amcfgr_el0", CPENC (3,3,C13,C2,1), F_REG_READ),
4880 SR_AMU ("amcgcr_el0", CPENC (3,3,C13,C2,2), F_REG_READ),
4881 SR_AMU ("amuserenr_el0", CPENC (3,3,C13,C2,3), 0),
4882 SR_AMU ("amcntenclr0_el0", CPENC (3,3,C13,C2,4), 0),
4883 SR_AMU ("amcntenset0_el0", CPENC (3,3,C13,C2,5), 0),
4884 SR_AMU ("amcntenclr1_el0", CPENC (3,3,C13,C3,0), 0),
4885 SR_AMU ("amcntenset1_el0", CPENC (3,3,C13,C3,1), 0),
4886 SR_AMU ("amevcntr00_el0", CPENC (3,3,C13,C4,0), 0),
4887 SR_AMU ("amevcntr01_el0", CPENC (3,3,C13,C4,1), 0),
4888 SR_AMU ("amevcntr02_el0", CPENC (3,3,C13,C4,2), 0),
4889 SR_AMU ("amevcntr03_el0", CPENC (3,3,C13,C4,3), 0),
4890 SR_AMU ("amevtyper00_el0", CPENC (3,3,C13,C6,0), F_REG_READ),
4891 SR_AMU ("amevtyper01_el0", CPENC (3,3,C13,C6,1), F_REG_READ),
4892 SR_AMU ("amevtyper02_el0", CPENC (3,3,C13,C6,2), F_REG_READ),
4893 SR_AMU ("amevtyper03_el0", CPENC (3,3,C13,C6,3), F_REG_READ),
4894 SR_AMU ("amevcntr10_el0", CPENC (3,3,C13,C12,0), 0),
4895 SR_AMU ("amevcntr11_el0", CPENC (3,3,C13,C12,1), 0),
4896 SR_AMU ("amevcntr12_el0", CPENC (3,3,C13,C12,2), 0),
4897 SR_AMU ("amevcntr13_el0", CPENC (3,3,C13,C12,3), 0),
4898 SR_AMU ("amevcntr14_el0", CPENC (3,3,C13,C12,4), 0),
4899 SR_AMU ("amevcntr15_el0", CPENC (3,3,C13,C12,5), 0),
4900 SR_AMU ("amevcntr16_el0", CPENC (3,3,C13,C12,6), 0),
4901 SR_AMU ("amevcntr17_el0", CPENC (3,3,C13,C12,7), 0),
4902 SR_AMU ("amevcntr18_el0", CPENC (3,3,C13,C13,0), 0),
4903 SR_AMU ("amevcntr19_el0", CPENC (3,3,C13,C13,1), 0),
4904 SR_AMU ("amevcntr110_el0", CPENC (3,3,C13,C13,2), 0),
4905 SR_AMU ("amevcntr111_el0", CPENC (3,3,C13,C13,3), 0),
4906 SR_AMU ("amevcntr112_el0", CPENC (3,3,C13,C13,4), 0),
4907 SR_AMU ("amevcntr113_el0", CPENC (3,3,C13,C13,5), 0),
4908 SR_AMU ("amevcntr114_el0", CPENC (3,3,C13,C13,6), 0),
4909 SR_AMU ("amevcntr115_el0", CPENC (3,3,C13,C13,7), 0),
4910 SR_AMU ("amevtyper10_el0", CPENC (3,3,C13,C14,0), 0),
4911 SR_AMU ("amevtyper11_el0", CPENC (3,3,C13,C14,1), 0),
4912 SR_AMU ("amevtyper12_el0", CPENC (3,3,C13,C14,2), 0),
4913 SR_AMU ("amevtyper13_el0", CPENC (3,3,C13,C14,3), 0),
4914 SR_AMU ("amevtyper14_el0", CPENC (3,3,C13,C14,4), 0),
4915 SR_AMU ("amevtyper15_el0", CPENC (3,3,C13,C14,5), 0),
4916 SR_AMU ("amevtyper16_el0", CPENC (3,3,C13,C14,6), 0),
4917 SR_AMU ("amevtyper17_el0", CPENC (3,3,C13,C14,7), 0),
4918 SR_AMU ("amevtyper18_el0", CPENC (3,3,C13,C15,0), 0),
4919 SR_AMU ("amevtyper19_el0", CPENC (3,3,C13,C15,1), 0),
4920 SR_AMU ("amevtyper110_el0", CPENC (3,3,C13,C15,2), 0),
4921 SR_AMU ("amevtyper111_el0", CPENC (3,3,C13,C15,3), 0),
4922 SR_AMU ("amevtyper112_el0", CPENC (3,3,C13,C15,4), 0),
4923 SR_AMU ("amevtyper113_el0", CPENC (3,3,C13,C15,5), 0),
4924 SR_AMU ("amevtyper114_el0", CPENC (3,3,C13,C15,6), 0),
4925 SR_AMU ("amevtyper115_el0", CPENC (3,3,C13,C15,7), 0),
4926
4927 SR_GIC ("icc_pmr_el1", CPENC (3,0,C4,C6,0), 0),
4928 SR_GIC ("icc_iar0_el1", CPENC (3,0,C12,C8,0), F_REG_READ),
4929 SR_GIC ("icc_eoir0_el1", CPENC (3,0,C12,C8,1), F_REG_WRITE),
4930 SR_GIC ("icc_hppir0_el1", CPENC (3,0,C12,C8,2), F_REG_READ),
4931 SR_GIC ("icc_bpr0_el1", CPENC (3,0,C12,C8,3), 0),
4932 SR_GIC ("icc_ap0r0_el1", CPENC (3,0,C12,C8,4), 0),
4933 SR_GIC ("icc_ap0r1_el1", CPENC (3,0,C12,C8,5), 0),
4934 SR_GIC ("icc_ap0r2_el1", CPENC (3,0,C12,C8,6), 0),
4935 SR_GIC ("icc_ap0r3_el1", CPENC (3,0,C12,C8,7), 0),
4936 SR_GIC ("icc_ap1r0_el1", CPENC (3,0,C12,C9,0), 0),
4937 SR_GIC ("icc_ap1r1_el1", CPENC (3,0,C12,C9,1), 0),
4938 SR_GIC ("icc_ap1r2_el1", CPENC (3,0,C12,C9,2), 0),
4939 SR_GIC ("icc_ap1r3_el1", CPENC (3,0,C12,C9,3), 0),
4940 SR_GIC ("icc_dir_el1", CPENC (3,0,C12,C11,1), F_REG_WRITE),
4941 SR_GIC ("icc_rpr_el1", CPENC (3,0,C12,C11,3), F_REG_READ),
4942 SR_GIC ("icc_sgi1r_el1", CPENC (3,0,C12,C11,5), F_REG_WRITE),
4943 SR_GIC ("icc_asgi1r_el1", CPENC (3,0,C12,C11,6), F_REG_WRITE),
4944 SR_GIC ("icc_sgi0r_el1", CPENC (3,0,C12,C11,7), F_REG_WRITE),
4945 SR_GIC ("icc_iar1_el1", CPENC (3,0,C12,C12,0), F_REG_READ),
4946 SR_GIC ("icc_eoir1_el1", CPENC (3,0,C12,C12,1), F_REG_WRITE),
4947 SR_GIC ("icc_hppir1_el1", CPENC (3,0,C12,C12,2), F_REG_READ),
4948 SR_GIC ("icc_bpr1_el1", CPENC (3,0,C12,C12,3), 0),
4949 SR_GIC ("icc_ctlr_el1", CPENC (3,0,C12,C12,4), 0),
4950 SR_GIC ("icc_igrpen0_el1", CPENC (3,0,C12,C12,6), 0),
4951 SR_GIC ("icc_igrpen1_el1", CPENC (3,0,C12,C12,7), 0),
4952 SR_GIC ("ich_ap0r0_el2", CPENC (3,4,C12,C8,0), 0),
4953 SR_GIC ("ich_ap0r1_el2", CPENC (3,4,C12,C8,1), 0),
4954 SR_GIC ("ich_ap0r2_el2", CPENC (3,4,C12,C8,2), 0),
4955 SR_GIC ("ich_ap0r3_el2", CPENC (3,4,C12,C8,3), 0),
4956 SR_GIC ("ich_ap1r0_el2", CPENC (3,4,C12,C9,0), 0),
4957 SR_GIC ("ich_ap1r1_el2", CPENC (3,4,C12,C9,1), 0),
4958 SR_GIC ("ich_ap1r2_el2", CPENC (3,4,C12,C9,2), 0),
4959 SR_GIC ("ich_ap1r3_el2", CPENC (3,4,C12,C9,3), 0),
4960 SR_GIC ("ich_hcr_el2", CPENC (3,4,C12,C11,0), 0),
4961 SR_GIC ("ich_misr_el2", CPENC (3,4,C12,C11,2), F_REG_READ),
4962 SR_GIC ("ich_eisr_el2", CPENC (3,4,C12,C11,3), F_REG_READ),
4963 SR_GIC ("ich_elrsr_el2", CPENC (3,4,C12,C11,5), F_REG_READ),
4964 SR_GIC ("ich_vmcr_el2", CPENC (3,4,C12,C11,7), 0),
4965 SR_GIC ("ich_lr0_el2", CPENC (3,4,C12,C12,0), 0),
4966 SR_GIC ("ich_lr1_el2", CPENC (3,4,C12,C12,1), 0),
4967 SR_GIC ("ich_lr2_el2", CPENC (3,4,C12,C12,2), 0),
4968 SR_GIC ("ich_lr3_el2", CPENC (3,4,C12,C12,3), 0),
4969 SR_GIC ("ich_lr4_el2", CPENC (3,4,C12,C12,4), 0),
4970 SR_GIC ("ich_lr5_el2", CPENC (3,4,C12,C12,5), 0),
4971 SR_GIC ("ich_lr6_el2", CPENC (3,4,C12,C12,6), 0),
4972 SR_GIC ("ich_lr7_el2", CPENC (3,4,C12,C12,7), 0),
4973 SR_GIC ("ich_lr8_el2", CPENC (3,4,C12,C13,0), 0),
4974 SR_GIC ("ich_lr9_el2", CPENC (3,4,C12,C13,1), 0),
4975 SR_GIC ("ich_lr10_el2", CPENC (3,4,C12,C13,2), 0),
4976 SR_GIC ("ich_lr11_el2", CPENC (3,4,C12,C13,3), 0),
4977 SR_GIC ("ich_lr12_el2", CPENC (3,4,C12,C13,4), 0),
4978 SR_GIC ("ich_lr13_el2", CPENC (3,4,C12,C13,5), 0),
4979 SR_GIC ("ich_lr14_el2", CPENC (3,4,C12,C13,6), 0),
4980 SR_GIC ("ich_lr15_el2", CPENC (3,4,C12,C13,7), 0),
4981 SR_GIC ("icc_igrpen1_el3", CPENC (3,6,C12,C12,7), 0),
4982
4983 SR_V8_6 ("amcg1idr_el0", CPENC (3,3,C13,C2,6), F_REG_READ),
4984 SR_V8_6 ("cntpctss_el0", CPENC (3,3,C14,C0,5), F_REG_READ),
4985 SR_V8_6 ("cntvctss_el0", CPENC (3,3,C14,C0,6), F_REG_READ),
4986 SR_V8_6 ("hfgrtr_el2", CPENC (3,4,C1,C1,4), 0),
4987 SR_V8_6 ("hfgwtr_el2", CPENC (3,4,C1,C1,5), 0),
4988 SR_V8_6 ("hfgitr_el2", CPENC (3,4,C1,C1,6), 0),
4989 SR_V8_6 ("hdfgrtr_el2", CPENC (3,4,C3,C1,4), 0),
4990 SR_V8_6 ("hdfgwtr_el2", CPENC (3,4,C3,C1,5), 0),
4991 SR_V8_6 ("hafgrtr_el2", CPENC (3,4,C3,C1,6), 0),
4992 SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0), 0),
4993 SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1), 0),
4994 SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2), 0),
4995 SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3), 0),
4996 SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4), 0),
4997 SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5), 0),
4998 SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6), 0),
4999 SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7), 0),
5000 SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0), 0),
5001 SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1), 0),
5002 SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2), 0),
5003 SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3), 0),
5004 SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4), 0),
5005 SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5), 0),
5006 SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6), 0),
5007 SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7), 0),
5008 SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0), 0),
5009 SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1), 0),
5010 SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2), 0),
5011 SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3), 0),
5012 SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4), 0),
5013 SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5), 0),
5014 SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6), 0),
5015 SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7), 0),
5016 SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0), 0),
5017 SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1), 0),
5018 SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
5019 SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
5020 SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
5021 SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
5022 SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
5023 SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
5024 SR_V8_6 ("cntpoff_el2", CPENC (3,4,C14,C0,6), 0),
5025
5026 SR_V8_7 ("pmsnevfr_el1", CPENC (3,0,C9,C9,1), 0),
5027 SR_V8_7 ("hcrx_el2", CPENC (3,4,C1,C2,2), 0),
5028
5029 SR_V8_8 ("allint", CPENC (3,0,C4,C3,0), 0),
5030 SR_V8_8 ("icc_nmiar1_el1", CPENC (3,0,C12,C9,5), F_REG_READ),
5031
5032 { 0, CPENC (0,0,0,0,0), 0, 0 }
5033 };
5034
5035 bool
5036 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5037 {
5038 return (reg_flags & F_DEPRECATED) != 0;
5039 }
5040
5041 /* The CPENC below is fairly misleading, the fields
5042 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5043 by ins_pstatefield, which just shifts the value by the width of the fields
5044 in a loop. So if you CPENC them only the first value will be set, the rest
5045 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5046 value of 0b110000000001000000 (0x30040) while what you want is
5047 0b011010 (0x1a). */
5048 const aarch64_sys_reg aarch64_pstatefields [] =
5049 {
5050 SR_CORE ("spsel", 0x05, F_REG_MAX_VALUE (1)),
5051 SR_CORE ("daifset", 0x1e, F_REG_MAX_VALUE (15)),
5052 SR_CORE ("daifclr", 0x1f, F_REG_MAX_VALUE (15)),
5053 SR_PAN ("pan", 0x04, F_REG_MAX_VALUE (1)),
5054 SR_V8_2 ("uao", 0x03, F_REG_MAX_VALUE (1)),
5055 SR_SSBS ("ssbs", 0x19, F_REG_MAX_VALUE (1)),
5056 SR_V8_4 ("dit", 0x1a, F_REG_MAX_VALUE (1)),
5057 SR_MEMTAG ("tco", 0x1c, F_REG_MAX_VALUE (1)),
5058 SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
5059 | F_REG_MAX_VALUE (1)),
5060 SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
5061 | F_REG_MAX_VALUE (1)),
5062 SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
5063 | F_REG_MAX_VALUE (1)),
5064 SR_V8_8 ("allint", 0x08, F_REG_MAX_VALUE (1)),
5065 { 0, CPENC (0,0,0,0,0), 0, 0 },
5066 };
5067
5068 bool
5069 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5070 const aarch64_sys_reg *reg)
5071 {
5072 if (!(reg->flags & F_ARCHEXT))
5073 return true;
5074
5075 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5076 }
5077
5078 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5079 {
5080 { "ialluis", CPENS(0,C7,C1,0), 0 },
5081 { "iallu", CPENS(0,C7,C5,0), 0 },
5082 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
5083 { 0, CPENS(0,0,0,0), 0 }
5084 };
5085
5086 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5087 {
5088 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
5089 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
5090 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
5091 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
5092 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
5093 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
5094 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
5095 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
5096 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
5097 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
5098 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5099 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5100 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
5101 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5102 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5103 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
5104 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5105 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5106 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5107 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5108 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5109 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5110 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
5111 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5112 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5113 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
5114 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5115 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5116 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
5117 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
5118 { 0, CPENS(0,0,0,0), 0 }
5119 };
5120
5121 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5122 {
5123 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
5124 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
5125 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
5126 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
5127 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
5128 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
5129 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
5130 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
5131 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
5132 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
5133 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
5134 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
5135 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5136 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5137 { 0, CPENS(0,0,0,0), 0 }
5138 };
5139
5140 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5141 {
5142 { "vmalle1", CPENS(0,C8,C7,0), 0 },
5143 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
5144 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
5145 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
5146 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5147 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
5148 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
5149 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
5150 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5151 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5152 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
5153 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
5154 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
5155 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
5156 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5157 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5158 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
5159 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
5160 { "alle2", CPENS(4,C8,C7,0), 0 },
5161 { "alle2is", CPENS(4,C8,C3,0), 0 },
5162 { "alle1", CPENS(4,C8,C7,4), 0 },
5163 { "alle1is", CPENS(4,C8,C3,4), 0 },
5164 { "alle3", CPENS(6,C8,C7,0), 0 },
5165 { "alle3is", CPENS(6,C8,C3,0), 0 },
5166 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
5167 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
5168 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
5169 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
5170 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
5171 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
5172 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
5173 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
5174
5175 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
5176 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5177 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5178 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5179 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5180 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5181 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5182 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5183 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5184 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5185 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5186 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5187 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5188 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
5189 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
5190 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
5191
5192 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5193 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5194 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5195 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5196 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5197 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5198 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5199 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5200 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5201 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5202 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5203 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5204 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5205 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5206 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5207 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5208 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5209 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5210 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5211 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5212 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5213 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5214 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5215 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5216 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5217 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5218 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5219 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5220 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5221 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5222
5223 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5224 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5225 { "paallos", CPENS (6, C8, C1, 4), 0},
5226 { "paall", CPENS (6, C8, C7, 4), 0},
5227
5228 { 0, CPENS(0,0,0,0), 0 }
5229 };
5230
5231 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5232 {
5233 /* RCTX is somewhat unique in a way that it has different values
5234 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5235 Thus op2 is masked out and instead encoded directly in the
5236 aarch64_opcode_table entries for the respective instructions. */
5237 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5238
5239 { 0, CPENS(0,0,0,0), 0 }
5240 };
5241
5242 bool
5243 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5244 {
5245 return (sys_ins_reg->flags & F_HASXT) != 0;
5246 }
5247
5248 extern bool
5249 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5250 const char *reg_name,
5251 aarch64_insn reg_value,
5252 uint32_t reg_flags,
5253 aarch64_feature_set reg_features)
5254 {
5255 /* Armv8-R has no EL3. */
5256 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5257 {
5258 const char *suffix = strrchr (reg_name, '_');
5259 if (suffix && !strcmp (suffix, "_el3"))
5260 return false;
5261 }
5262
5263 if (!(reg_flags & F_ARCHEXT))
5264 return true;
5265
5266 if (reg_features
5267 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5268 return true;
5269
5270 /* ARMv8.4 TLB instructions. */
5271 if ((reg_value == CPENS (0, C8, C1, 0)
5272 || reg_value == CPENS (0, C8, C1, 1)
5273 || reg_value == CPENS (0, C8, C1, 2)
5274 || reg_value == CPENS (0, C8, C1, 3)
5275 || reg_value == CPENS (0, C8, C1, 5)
5276 || reg_value == CPENS (0, C8, C1, 7)
5277 || reg_value == CPENS (4, C8, C4, 0)
5278 || reg_value == CPENS (4, C8, C4, 4)
5279 || reg_value == CPENS (4, C8, C1, 1)
5280 || reg_value == CPENS (4, C8, C1, 5)
5281 || reg_value == CPENS (4, C8, C1, 6)
5282 || reg_value == CPENS (6, C8, C1, 1)
5283 || reg_value == CPENS (6, C8, C1, 5)
5284 || reg_value == CPENS (4, C8, C1, 0)
5285 || reg_value == CPENS (4, C8, C1, 4)
5286 || reg_value == CPENS (6, C8, C1, 0)
5287 || reg_value == CPENS (0, C8, C6, 1)
5288 || reg_value == CPENS (0, C8, C6, 3)
5289 || reg_value == CPENS (0, C8, C6, 5)
5290 || reg_value == CPENS (0, C8, C6, 7)
5291 || reg_value == CPENS (0, C8, C2, 1)
5292 || reg_value == CPENS (0, C8, C2, 3)
5293 || reg_value == CPENS (0, C8, C2, 5)
5294 || reg_value == CPENS (0, C8, C2, 7)
5295 || reg_value == CPENS (0, C8, C5, 1)
5296 || reg_value == CPENS (0, C8, C5, 3)
5297 || reg_value == CPENS (0, C8, C5, 5)
5298 || reg_value == CPENS (0, C8, C5, 7)
5299 || reg_value == CPENS (4, C8, C0, 2)
5300 || reg_value == CPENS (4, C8, C0, 6)
5301 || reg_value == CPENS (4, C8, C4, 2)
5302 || reg_value == CPENS (4, C8, C4, 6)
5303 || reg_value == CPENS (4, C8, C4, 3)
5304 || reg_value == CPENS (4, C8, C4, 7)
5305 || reg_value == CPENS (4, C8, C6, 1)
5306 || reg_value == CPENS (4, C8, C6, 5)
5307 || reg_value == CPENS (4, C8, C2, 1)
5308 || reg_value == CPENS (4, C8, C2, 5)
5309 || reg_value == CPENS (4, C8, C5, 1)
5310 || reg_value == CPENS (4, C8, C5, 5)
5311 || reg_value == CPENS (6, C8, C6, 1)
5312 || reg_value == CPENS (6, C8, C6, 5)
5313 || reg_value == CPENS (6, C8, C2, 1)
5314 || reg_value == CPENS (6, C8, C2, 5)
5315 || reg_value == CPENS (6, C8, C5, 1)
5316 || reg_value == CPENS (6, C8, C5, 5))
5317 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5318 return true;
5319
5320 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5321 if (reg_value == CPENS (3, C7, C12, 1)
5322 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5323 return true;
5324
5325 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5326 if (reg_value == CPENS (3, C7, C13, 1)
5327 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5328 return true;
5329
5330 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5331 if ((reg_value == CPENS (0, C7, C6, 3)
5332 || reg_value == CPENS (0, C7, C6, 4)
5333 || reg_value == CPENS (0, C7, C10, 4)
5334 || reg_value == CPENS (0, C7, C14, 4)
5335 || reg_value == CPENS (3, C7, C10, 3)
5336 || reg_value == CPENS (3, C7, C12, 3)
5337 || reg_value == CPENS (3, C7, C13, 3)
5338 || reg_value == CPENS (3, C7, C14, 3)
5339 || reg_value == CPENS (3, C7, C4, 3)
5340 || reg_value == CPENS (0, C7, C6, 5)
5341 || reg_value == CPENS (0, C7, C6, 6)
5342 || reg_value == CPENS (0, C7, C10, 6)
5343 || reg_value == CPENS (0, C7, C14, 6)
5344 || reg_value == CPENS (3, C7, C10, 5)
5345 || reg_value == CPENS (3, C7, C12, 5)
5346 || reg_value == CPENS (3, C7, C13, 5)
5347 || reg_value == CPENS (3, C7, C14, 5)
5348 || reg_value == CPENS (3, C7, C4, 4))
5349 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5350 return true;
5351
5352 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5353 if ((reg_value == CPENS (0, C7, C9, 0)
5354 || reg_value == CPENS (0, C7, C9, 1))
5355 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5356 return true;
5357
5358 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5359 if (reg_value == CPENS (3, C7, C3, 0)
5360 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5361 return true;
5362
5363 return false;
5364 }
5365
5366 #undef C0
5367 #undef C1
5368 #undef C2
5369 #undef C3
5370 #undef C4
5371 #undef C5
5372 #undef C6
5373 #undef C7
5374 #undef C8
5375 #undef C9
5376 #undef C10
5377 #undef C11
5378 #undef C12
5379 #undef C13
5380 #undef C14
5381 #undef C15
5382
5383 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5384 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5385
5386 static enum err_type
5387 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5388 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5389 bool encoding ATTRIBUTE_UNUSED,
5390 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5391 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5392 {
5393 int t = BITS (insn, 4, 0);
5394 int n = BITS (insn, 9, 5);
5395 int t2 = BITS (insn, 14, 10);
5396
5397 if (BIT (insn, 23))
5398 {
5399 /* Write back enabled. */
5400 if ((t == n || t2 == n) && n != 31)
5401 return ERR_UND;
5402 }
5403
5404 if (BIT (insn, 22))
5405 {
5406 /* Load */
5407 if (t == t2)
5408 return ERR_UND;
5409 }
5410
5411 return ERR_OK;
5412 }
5413
5414 /* Verifier for vector by element 3 operands functions where the
5415 conditions `if sz:L == 11 then UNDEFINED` holds. */
5416
5417 static enum err_type
5418 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5419 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5420 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5421 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5422 {
5423 const aarch64_insn undef_pattern = 0x3;
5424 aarch64_insn value;
5425
5426 assert (inst->opcode);
5427 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5428 value = encoding ? inst->value : insn;
5429 assert (value);
5430
5431 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5432 return ERR_UND;
5433
5434 return ERR_OK;
5435 }
5436
5437 /* Check an instruction that takes three register operands and that
5438 requires the register numbers to be distinct from one another. */
5439
5440 static enum err_type
5441 verify_three_different_regs (const struct aarch64_inst *inst,
5442 const aarch64_insn insn ATTRIBUTE_UNUSED,
5443 bfd_vma pc ATTRIBUTE_UNUSED,
5444 bool encoding ATTRIBUTE_UNUSED,
5445 aarch64_operand_error *mismatch_detail
5446 ATTRIBUTE_UNUSED,
5447 aarch64_instr_sequence *insn_sequence
5448 ATTRIBUTE_UNUSED)
5449 {
5450 int rd, rs, rn;
5451
5452 rd = inst->operands[0].reg.regno;
5453 rs = inst->operands[1].reg.regno;
5454 rn = inst->operands[2].reg.regno;
5455 if (rd == rs || rd == rn || rs == rn)
5456 {
5457 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5458 mismatch_detail->error
5459 = _("the three register operands must be distinct from one another");
5460 mismatch_detail->index = -1;
5461 return ERR_UND;
5462 }
5463
5464 return ERR_OK;
5465 }
5466
5467 /* Add INST to the end of INSN_SEQUENCE. */
5468
5469 static void
5470 add_insn_to_sequence (const struct aarch64_inst *inst,
5471 aarch64_instr_sequence *insn_sequence)
5472 {
5473 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5474 }
5475
5476 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5477 If INST is NULL the given insn_sequence is cleared and the sequence is left
5478 uninitialized. */
5479
5480 void
5481 init_insn_sequence (const struct aarch64_inst *inst,
5482 aarch64_instr_sequence *insn_sequence)
5483 {
5484 int num_req_entries = 0;
5485
5486 if (insn_sequence->instr)
5487 {
5488 XDELETE (insn_sequence->instr);
5489 insn_sequence->instr = NULL;
5490 }
5491
5492 /* Handle all the cases here. May need to think of something smarter than
5493 a giant if/else chain if this grows. At that time, a lookup table may be
5494 best. */
5495 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5496 num_req_entries = 1;
5497 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5498 num_req_entries = 2;
5499
5500 insn_sequence->num_added_insns = 0;
5501 insn_sequence->num_allocated_insns = num_req_entries;
5502
5503 if (num_req_entries != 0)
5504 {
5505 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5506 add_insn_to_sequence (inst, insn_sequence);
5507 }
5508 }
5509
5510 /* Subroutine of verify_constraints. Check whether the instruction
5511 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5512 expectations are met. Return true if the check passes, otherwise
5513 describe the problem in MISMATCH_DETAIL.
5514
5515 IS_NEW_SECTION is true if INST is assumed to start a new section.
5516 The other arguments are as for verify_constraints. */
5517
5518 static bool
5519 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5520 bool is_new_section,
5521 aarch64_operand_error *mismatch_detail,
5522 aarch64_instr_sequence *insn_sequence)
5523 {
5524 const struct aarch64_opcode *opcode;
5525 const struct aarch64_inst *prev_insn;
5526 int i;
5527
5528 opcode = inst->opcode;
5529 if (insn_sequence->instr)
5530 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5531 else
5532 prev_insn = NULL;
5533
5534 if (prev_insn
5535 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5536 && prev_insn->opcode != opcode - 1)
5537 {
5538 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5539 mismatch_detail->error = NULL;
5540 mismatch_detail->index = -1;
5541 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5542 mismatch_detail->data[1].s = prev_insn->opcode->name;
5543 mismatch_detail->non_fatal = true;
5544 return false;
5545 }
5546
5547 if (opcode->constraints & C_SCAN_MOPS_PME)
5548 {
5549 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5550 {
5551 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5552 mismatch_detail->error = NULL;
5553 mismatch_detail->index = -1;
5554 mismatch_detail->data[0].s = opcode->name;
5555 mismatch_detail->data[1].s = opcode[-1].name;
5556 mismatch_detail->non_fatal = true;
5557 return false;
5558 }
5559
5560 for (i = 0; i < 3; ++i)
5561 /* There's no specific requirement for the data register to be
5562 the same between consecutive SET* instructions. */
5563 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5564 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5565 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5566 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5567 {
5568 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5569 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5570 mismatch_detail->error = _("destination register differs from "
5571 "preceding instruction");
5572 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5573 mismatch_detail->error = _("source register differs from "
5574 "preceding instruction");
5575 else
5576 mismatch_detail->error = _("size register differs from "
5577 "preceding instruction");
5578 mismatch_detail->index = i;
5579 mismatch_detail->non_fatal = true;
5580 return false;
5581 }
5582 }
5583
5584 return true;
5585 }
5586
5587 /* This function verifies that the instruction INST adheres to its specified
5588 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5589 returned and MISMATCH_DETAIL contains the reason why verification failed.
5590
5591 The function is called both during assembly and disassembly. If assembling
5592 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5593 and will contain the PC of the current instruction w.r.t to the section.
5594
5595 If ENCODING and PC=0 then you are at a start of a section. The constraints
5596 are verified against the given state insn_sequence which is updated as it
5597 transitions through the verification. */
5598
5599 enum err_type
5600 verify_constraints (const struct aarch64_inst *inst,
5601 const aarch64_insn insn ATTRIBUTE_UNUSED,
5602 bfd_vma pc,
5603 bool encoding,
5604 aarch64_operand_error *mismatch_detail,
5605 aarch64_instr_sequence *insn_sequence)
5606 {
5607 assert (inst);
5608 assert (inst->opcode);
5609
5610 const struct aarch64_opcode *opcode = inst->opcode;
5611 if (!opcode->constraints && !insn_sequence->instr)
5612 return ERR_OK;
5613
5614 assert (insn_sequence);
5615
5616 enum err_type res = ERR_OK;
5617
5618 /* This instruction puts a constraint on the insn_sequence. */
5619 if (opcode->flags & F_SCAN)
5620 {
5621 if (insn_sequence->instr)
5622 {
5623 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5624 mismatch_detail->error = _("instruction opens new dependency "
5625 "sequence without ending previous one");
5626 mismatch_detail->index = -1;
5627 mismatch_detail->non_fatal = true;
5628 res = ERR_VFI;
5629 }
5630
5631 init_insn_sequence (inst, insn_sequence);
5632 return res;
5633 }
5634
5635 bool is_new_section = (!encoding && pc == 0);
5636 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5637 insn_sequence))
5638 {
5639 res = ERR_VFI;
5640 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5641 init_insn_sequence (NULL, insn_sequence);
5642 }
5643
5644 /* Verify constraints on an existing sequence. */
5645 if (insn_sequence->instr)
5646 {
5647 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5648 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5649 closed a previous one that we should have. */
5650 if (is_new_section && res == ERR_OK)
5651 {
5652 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5653 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5654 mismatch_detail->index = -1;
5655 mismatch_detail->non_fatal = true;
5656 res = ERR_VFI;
5657 /* Reset the sequence. */
5658 init_insn_sequence (NULL, insn_sequence);
5659 return res;
5660 }
5661
5662 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5663 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5664 {
5665 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5666 instruction for better error messages. */
5667 if (!opcode->avariant
5668 || !(*opcode->avariant &
5669 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5670 {
5671 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5672 mismatch_detail->error = _("SVE instruction expected after "
5673 "`movprfx'");
5674 mismatch_detail->index = -1;
5675 mismatch_detail->non_fatal = true;
5676 res = ERR_VFI;
5677 goto done;
5678 }
5679
5680 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5681 instruction that is allowed to be used with a MOVPRFX. */
5682 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5683 {
5684 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5685 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5686 "expected");
5687 mismatch_detail->index = -1;
5688 mismatch_detail->non_fatal = true;
5689 res = ERR_VFI;
5690 goto done;
5691 }
5692
5693 /* Next check for usage of the predicate register. */
5694 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5695 aarch64_opnd_info blk_pred, inst_pred;
5696 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5697 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5698 bool predicated = false;
5699 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5700
5701 /* Determine if the movprfx instruction used is predicated or not. */
5702 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5703 {
5704 predicated = true;
5705 blk_pred = insn_sequence->instr->operands[1];
5706 }
5707
5708 unsigned char max_elem_size = 0;
5709 unsigned char current_elem_size;
5710 int num_op_used = 0, last_op_usage = 0;
5711 int i, inst_pred_idx = -1;
5712 int num_ops = aarch64_num_of_operands (opcode);
5713 for (i = 0; i < num_ops; i++)
5714 {
5715 aarch64_opnd_info inst_op = inst->operands[i];
5716 switch (inst_op.type)
5717 {
5718 case AARCH64_OPND_SVE_Zd:
5719 case AARCH64_OPND_SVE_Zm_5:
5720 case AARCH64_OPND_SVE_Zm_16:
5721 case AARCH64_OPND_SVE_Zn:
5722 case AARCH64_OPND_SVE_Zt:
5723 case AARCH64_OPND_SVE_Vm:
5724 case AARCH64_OPND_SVE_Vn:
5725 case AARCH64_OPND_Va:
5726 case AARCH64_OPND_Vn:
5727 case AARCH64_OPND_Vm:
5728 case AARCH64_OPND_Sn:
5729 case AARCH64_OPND_Sm:
5730 if (inst_op.reg.regno == blk_dest.reg.regno)
5731 {
5732 num_op_used++;
5733 last_op_usage = i;
5734 }
5735 current_elem_size
5736 = aarch64_get_qualifier_esize (inst_op.qualifier);
5737 if (current_elem_size > max_elem_size)
5738 max_elem_size = current_elem_size;
5739 break;
5740 case AARCH64_OPND_SVE_Pd:
5741 case AARCH64_OPND_SVE_Pg3:
5742 case AARCH64_OPND_SVE_Pg4_5:
5743 case AARCH64_OPND_SVE_Pg4_10:
5744 case AARCH64_OPND_SVE_Pg4_16:
5745 case AARCH64_OPND_SVE_Pm:
5746 case AARCH64_OPND_SVE_Pn:
5747 case AARCH64_OPND_SVE_Pt:
5748 case AARCH64_OPND_SME_Pm:
5749 inst_pred = inst_op;
5750 inst_pred_idx = i;
5751 break;
5752 default:
5753 break;
5754 }
5755 }
5756
5757 assert (max_elem_size != 0);
5758 aarch64_opnd_info inst_dest = inst->operands[0];
5759 /* Determine the size that should be used to compare against the
5760 movprfx size. */
5761 current_elem_size
5762 = opcode->constraints & C_MAX_ELEM
5763 ? max_elem_size
5764 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5765
5766 /* If movprfx is predicated do some extra checks. */
5767 if (predicated)
5768 {
5769 /* The instruction must be predicated. */
5770 if (inst_pred_idx < 0)
5771 {
5772 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5773 mismatch_detail->error = _("predicated instruction expected "
5774 "after `movprfx'");
5775 mismatch_detail->index = -1;
5776 mismatch_detail->non_fatal = true;
5777 res = ERR_VFI;
5778 goto done;
5779 }
5780
5781 /* The instruction must have a merging predicate. */
5782 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5783 {
5784 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5785 mismatch_detail->error = _("merging predicate expected due "
5786 "to preceding `movprfx'");
5787 mismatch_detail->index = inst_pred_idx;
5788 mismatch_detail->non_fatal = true;
5789 res = ERR_VFI;
5790 goto done;
5791 }
5792
5793 /* The same register must be used in instruction. */
5794 if (blk_pred.reg.regno != inst_pred.reg.regno)
5795 {
5796 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5797 mismatch_detail->error = _("predicate register differs "
5798 "from that in preceding "
5799 "`movprfx'");
5800 mismatch_detail->index = inst_pred_idx;
5801 mismatch_detail->non_fatal = true;
5802 res = ERR_VFI;
5803 goto done;
5804 }
5805 }
5806
5807 /* Destructive operations by definition must allow one usage of the
5808 same register. */
5809 int allowed_usage
5810 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5811
5812 /* Operand is not used at all. */
5813 if (num_op_used == 0)
5814 {
5815 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5816 mismatch_detail->error = _("output register of preceding "
5817 "`movprfx' not used in current "
5818 "instruction");
5819 mismatch_detail->index = 0;
5820 mismatch_detail->non_fatal = true;
5821 res = ERR_VFI;
5822 goto done;
5823 }
5824
5825 /* We now know it's used, now determine exactly where it's used. */
5826 if (blk_dest.reg.regno != inst_dest.reg.regno)
5827 {
5828 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5829 mismatch_detail->error = _("output register of preceding "
5830 "`movprfx' expected as output");
5831 mismatch_detail->index = 0;
5832 mismatch_detail->non_fatal = true;
5833 res = ERR_VFI;
5834 goto done;
5835 }
5836
5837 /* Operand used more than allowed for the specific opcode type. */
5838 if (num_op_used > allowed_usage)
5839 {
5840 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5841 mismatch_detail->error = _("output register of preceding "
5842 "`movprfx' used as input");
5843 mismatch_detail->index = last_op_usage;
5844 mismatch_detail->non_fatal = true;
5845 res = ERR_VFI;
5846 goto done;
5847 }
5848
5849 /* Now the only thing left is the qualifiers checks. The register
5850 must have the same maximum element size. */
5851 if (inst_dest.qualifier
5852 && blk_dest.qualifier
5853 && current_elem_size
5854 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5855 {
5856 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5857 mismatch_detail->error = _("register size not compatible with "
5858 "previous `movprfx'");
5859 mismatch_detail->index = 0;
5860 mismatch_detail->non_fatal = true;
5861 res = ERR_VFI;
5862 goto done;
5863 }
5864 }
5865
5866 done:
5867 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
5868 /* We've checked the last instruction in the sequence and so
5869 don't need the sequence any more. */
5870 init_insn_sequence (NULL, insn_sequence);
5871 else
5872 add_insn_to_sequence (inst, insn_sequence);
5873 }
5874
5875 return res;
5876 }
5877
5878
5879 /* Return true if VALUE cannot be moved into an SVE register using DUP
5880 (with any element size, not just ESIZE) and if using DUPM would
5881 therefore be OK. ESIZE is the number of bytes in the immediate. */
5882
5883 bool
5884 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5885 {
5886 int64_t svalue = uvalue;
5887 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5888
5889 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5890 return false;
5891 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5892 {
5893 svalue = (int32_t) uvalue;
5894 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5895 {
5896 svalue = (int16_t) uvalue;
5897 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5898 return false;
5899 }
5900 }
5901 if ((svalue & 0xff) == 0)
5902 svalue /= 256;
5903 return svalue < -128 || svalue >= 128;
5904 }
5905
5906 /* Include the opcode description table as well as the operand description
5907 table. */
5908 #define VERIFIER(x) verify_##x
5909 #include "aarch64-tbl.h"
5910