aarch64-opc.c revision 1.10 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2022 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bool
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return (qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q);
110 }
111
112 static inline bool
113 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_S_B
116 && qualifier <= AARCH64_OPND_QLF_S_Q);
117 }
118
119 enum data_pattern
120 {
121 DP_UNKNOWN,
122 DP_VECTOR_3SAME,
123 DP_VECTOR_LONG,
124 DP_VECTOR_WIDE,
125 DP_VECTOR_ACROSS_LANES,
126 };
127
128 static const char significant_operand_index [] =
129 {
130 0, /* DP_UNKNOWN, by default using operand 0. */
131 0, /* DP_VECTOR_3SAME */
132 1, /* DP_VECTOR_LONG */
133 2, /* DP_VECTOR_WIDE */
134 1, /* DP_VECTOR_ACROSS_LANES */
135 };
136
137 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
138 the data pattern.
139 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140 corresponds to one of a sequence of operands. */
141
142 static enum data_pattern
143 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144 {
145 if (vector_qualifier_p (qualifiers[0]))
146 {
147 /* e.g. v.4s, v.4s, v.4s
148 or v.4h, v.4h, v.h[3]. */
149 if (qualifiers[0] == qualifiers[1]
150 && vector_qualifier_p (qualifiers[2])
151 && (aarch64_get_qualifier_esize (qualifiers[0])
152 == aarch64_get_qualifier_esize (qualifiers[1]))
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[2])))
155 return DP_VECTOR_3SAME;
156 /* e.g. v.8h, v.8b, v.8b.
157 or v.4s, v.4h, v.h[2].
158 or v.8h, v.16b. */
159 if (vector_qualifier_p (qualifiers[1])
160 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161 && (aarch64_get_qualifier_esize (qualifiers[0])
162 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163 return DP_VECTOR_LONG;
164 /* e.g. v.8h, v.8h, v.8b. */
165 if (qualifiers[0] == qualifiers[1]
166 && vector_qualifier_p (qualifiers[2])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[1])))
172 return DP_VECTOR_WIDE;
173 }
174 else if (fp_qualifier_p (qualifiers[0]))
175 {
176 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
177 if (vector_qualifier_p (qualifiers[1])
178 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179 return DP_VECTOR_ACROSS_LANES;
180 }
181
182 return DP_UNKNOWN;
183 }
184
185 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186 the AdvSIMD instructions. */
187 /* N.B. it is possible to do some optimization that doesn't call
188 get_data_pattern each time when we need to select an operand. We can
189 either buffer the caculated the result or statically generate the data,
190 however, it is not obvious that the optimization will bring significant
191 benefit. */
192
193 int
194 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195 {
196 return
197 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198 }
199
200 /* Instruction bit-fields.
202 + Keep synced with 'enum aarch64_field_kind'. */
203 const aarch64_field fields[] =
204 {
205 { 0, 0 }, /* NIL. */
206 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
207 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
208 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
209 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
210 { 5, 19 }, /* imm19: e.g. in CBZ. */
211 { 5, 19 }, /* immhi: e.g. in ADRP. */
212 { 29, 2 }, /* immlo: e.g. in ADRP. */
213 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
214 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
215 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
216 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
217 { 0, 5 }, /* Rt: in load/store instructions. */
218 { 0, 5 }, /* Rd: in many integer instructions. */
219 { 5, 5 }, /* Rn: in many integer instructions. */
220 { 10, 5 }, /* Rt2: in load/store pair instructions. */
221 { 10, 5 }, /* Ra: in fp instructions. */
222 { 5, 3 }, /* op2: in the system instructions. */
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 12, 4 }, /* CRn: in the system instructions. */
225 { 16, 3 }, /* op1: in the system instructions. */
226 { 19, 2 }, /* op0: in the system instructions. */
227 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
228 { 12, 4 }, /* cond: condition flags as a source operand. */
229 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
230 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
231 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
232 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
233 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
234 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
235 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
236 { 12, 1 }, /* S: in load/store reg offset instructions. */
237 { 21, 2 }, /* hw: in move wide constant instructions. */
238 { 22, 2 }, /* opc: in load/store reg offset instructions. */
239 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
240 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
241 { 22, 2 }, /* type: floating point type field in fp data inst. */
242 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
243 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
244 { 15, 6 }, /* imm6_2: in rmif instructions. */
245 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
246 { 0, 4 }, /* imm4_2: in rmif instructions. */
247 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
248 { 5, 4 }, /* imm4_5: in SME instructions. */
249 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
250 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
251 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
252 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
253 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
254 { 5, 14 }, /* imm14: in test bit and branch instructions. */
255 { 5, 16 }, /* imm16: in exception instructions. */
256 { 0, 16 }, /* imm16_2: in udf instruction. */
257 { 0, 26 }, /* imm26: in unconditional branch instructions. */
258 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
259 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
260 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
261 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
262 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
263 { 22, 1 }, /* N: in logical (immediate) instructions. */
264 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
265 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
266 { 31, 1 }, /* sf: in integer data processing instructions. */
267 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
268 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
269 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
270 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
271 { 31, 1 }, /* b5: in the test bit and branch instructions. */
272 { 19, 5 }, /* b40: in the test bit and branch instructions. */
273 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
274 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
275 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
276 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
277 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
278 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
279 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
280 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
281 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
282 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
283 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
284 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
285 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
286 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
287 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
288 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
289 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
290 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
291 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
293 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
294 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
295 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
296 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
297 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
298 { 5, 1 }, /* SVE_i1: single-bit immediate. */
299 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
300 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
301 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
302 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
303 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
304 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
305 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
306 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
307 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
308 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
309 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
310 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
311 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
312 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
313 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
314 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
315 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
316 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
317 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
318 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
319 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
320 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
321 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
322 { 16, 4 }, /* SVE_tsz: triangular size select. */
323 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
324 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
325 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
326 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
327 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
328 { 0, 2 }, /* SME ZAda tile ZA0-ZA3. */
329 { 0, 3 }, /* SME ZAda tile ZA0-ZA7. */
330 { 22, 2 }, /* SME_size_10: size<1>, size<0> class field, [23:22]. */
331 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
332 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
333 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
334 { 13, 3 }, /* SME Pm second source scalable predicate register P0-P7. */
335 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
336 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
337 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
338 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
339 { 18, 3 }, /* SME_tshl: immediate and qualifier field, bits [20:18]. */
340 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
341 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
342 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
343 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
344 { 22, 1 }, /* sz: 1-bit element size select. */
345 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
346 { 10, 8 }, /* CSSC_imm8. */
347 };
348
349 enum aarch64_operand_class
350 aarch64_get_operand_class (enum aarch64_opnd type)
351 {
352 return aarch64_operands[type].op_class;
353 }
354
355 const char *
356 aarch64_get_operand_name (enum aarch64_opnd type)
357 {
358 return aarch64_operands[type].name;
359 }
360
361 /* Get operand description string.
362 This is usually for the diagnosis purpose. */
363 const char *
364 aarch64_get_operand_desc (enum aarch64_opnd type)
365 {
366 return aarch64_operands[type].desc;
367 }
368
369 /* Table of all conditional affixes. */
370 const aarch64_cond aarch64_conds[16] =
371 {
372 {{"eq", "none"}, 0x0},
373 {{"ne", "any"}, 0x1},
374 {{"cs", "hs", "nlast"}, 0x2},
375 {{"cc", "lo", "ul", "last"}, 0x3},
376 {{"mi", "first"}, 0x4},
377 {{"pl", "nfrst"}, 0x5},
378 {{"vs"}, 0x6},
379 {{"vc"}, 0x7},
380 {{"hi", "pmore"}, 0x8},
381 {{"ls", "plast"}, 0x9},
382 {{"ge", "tcont"}, 0xa},
383 {{"lt", "tstop"}, 0xb},
384 {{"gt"}, 0xc},
385 {{"le"}, 0xd},
386 {{"al"}, 0xe},
387 {{"nv"}, 0xf},
388 };
389
390 const aarch64_cond *
391 get_cond_from_value (aarch64_insn value)
392 {
393 assert (value < 16);
394 return &aarch64_conds[(unsigned int) value];
395 }
396
397 const aarch64_cond *
398 get_inverted_cond (const aarch64_cond *cond)
399 {
400 return &aarch64_conds[cond->value ^ 0x1];
401 }
402
403 /* Table describing the operand extension/shifting operators; indexed by
404 enum aarch64_modifier_kind.
405
406 The value column provides the most common values for encoding modifiers,
407 which enables table-driven encoding/decoding for the modifiers. */
408 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
409 {
410 {"none", 0x0},
411 {"msl", 0x0},
412 {"ror", 0x3},
413 {"asr", 0x2},
414 {"lsr", 0x1},
415 {"lsl", 0x0},
416 {"uxtb", 0x0},
417 {"uxth", 0x1},
418 {"uxtw", 0x2},
419 {"uxtx", 0x3},
420 {"sxtb", 0x4},
421 {"sxth", 0x5},
422 {"sxtw", 0x6},
423 {"sxtx", 0x7},
424 {"mul", 0x0},
425 {"mul vl", 0x0},
426 {NULL, 0},
427 };
428
429 enum aarch64_modifier_kind
430 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
431 {
432 return desc - aarch64_operand_modifiers;
433 }
434
435 aarch64_insn
436 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
437 {
438 return aarch64_operand_modifiers[kind].value;
439 }
440
441 enum aarch64_modifier_kind
442 aarch64_get_operand_modifier_from_value (aarch64_insn value,
443 bool extend_p)
444 {
445 if (extend_p)
446 return AARCH64_MOD_UXTB + value;
447 else
448 return AARCH64_MOD_LSL - value;
449 }
450
451 bool
452 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
453 {
454 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
455 }
456
457 static inline bool
458 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
459 {
460 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
461 }
462
463 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
464 {
465 { "#0x00", 0x0 },
466 { "oshld", 0x1 },
467 { "oshst", 0x2 },
468 { "osh", 0x3 },
469 { "#0x04", 0x4 },
470 { "nshld", 0x5 },
471 { "nshst", 0x6 },
472 { "nsh", 0x7 },
473 { "#0x08", 0x8 },
474 { "ishld", 0x9 },
475 { "ishst", 0xa },
476 { "ish", 0xb },
477 { "#0x0c", 0xc },
478 { "ld", 0xd },
479 { "st", 0xe },
480 { "sy", 0xf },
481 };
482
483 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
484 { /* CRm<3:2> #imm */
485 { "oshnxs", 16 }, /* 00 16 */
486 { "nshnxs", 20 }, /* 01 20 */
487 { "ishnxs", 24 }, /* 10 24 */
488 { "synxs", 28 }, /* 11 28 */
489 };
490
491 /* Table describing the operands supported by the aliases of the HINT
492 instruction.
493
494 The name column is the operand that is accepted for the alias. The value
495 column is the hint number of the alias. The list of operands is terminated
496 by NULL in the name column. */
497
498 const struct aarch64_name_value_pair aarch64_hint_options[] =
499 {
500 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
501 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
502 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
503 { "c", HINT_OPD_C }, /* BTI C. */
504 { "j", HINT_OPD_J }, /* BTI J. */
505 { "jc", HINT_OPD_JC }, /* BTI JC. */
506 { NULL, HINT_OPD_NULL },
507 };
508
509 /* op -> op: load = 0 instruction = 1 store = 2
510 l -> level: 1-3
511 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
512 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
513 const struct aarch64_name_value_pair aarch64_prfops[32] =
514 {
515 { "pldl1keep", B(0, 1, 0) },
516 { "pldl1strm", B(0, 1, 1) },
517 { "pldl2keep", B(0, 2, 0) },
518 { "pldl2strm", B(0, 2, 1) },
519 { "pldl3keep", B(0, 3, 0) },
520 { "pldl3strm", B(0, 3, 1) },
521 { NULL, 0x06 },
522 { NULL, 0x07 },
523 { "plil1keep", B(1, 1, 0) },
524 { "plil1strm", B(1, 1, 1) },
525 { "plil2keep", B(1, 2, 0) },
526 { "plil2strm", B(1, 2, 1) },
527 { "plil3keep", B(1, 3, 0) },
528 { "plil3strm", B(1, 3, 1) },
529 { NULL, 0x0e },
530 { NULL, 0x0f },
531 { "pstl1keep", B(2, 1, 0) },
532 { "pstl1strm", B(2, 1, 1) },
533 { "pstl2keep", B(2, 2, 0) },
534 { "pstl2strm", B(2, 2, 1) },
535 { "pstl3keep", B(2, 3, 0) },
536 { "pstl3strm", B(2, 3, 1) },
537 { NULL, 0x16 },
538 { NULL, 0x17 },
539 { NULL, 0x18 },
540 { NULL, 0x19 },
541 { NULL, 0x1a },
542 { NULL, 0x1b },
543 { NULL, 0x1c },
544 { NULL, 0x1d },
545 { NULL, 0x1e },
546 { NULL, 0x1f },
547 };
548 #undef B
549
550 /* Utilities on value constraint. */
552
553 static inline int
554 value_in_range_p (int64_t value, int low, int high)
555 {
556 return (value >= low && value <= high) ? 1 : 0;
557 }
558
559 /* Return true if VALUE is a multiple of ALIGN. */
560 static inline int
561 value_aligned_p (int64_t value, int align)
562 {
563 return (value % align) == 0;
564 }
565
566 /* A signed value fits in a field. */
567 static inline int
568 value_fit_signed_field_p (int64_t value, unsigned width)
569 {
570 assert (width < 32);
571 if (width < sizeof (value) * 8)
572 {
573 int64_t lim = (uint64_t) 1 << (width - 1);
574 if (value >= -lim && value < lim)
575 return 1;
576 }
577 return 0;
578 }
579
580 /* An unsigned value fits in a field. */
581 static inline int
582 value_fit_unsigned_field_p (int64_t value, unsigned width)
583 {
584 assert (width < 32);
585 if (width < sizeof (value) * 8)
586 {
587 int64_t lim = (uint64_t) 1 << width;
588 if (value >= 0 && value < lim)
589 return 1;
590 }
591 return 0;
592 }
593
594 /* Return 1 if OPERAND is SP or WSP. */
595 int
596 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
597 {
598 return ((aarch64_get_operand_class (operand->type)
599 == AARCH64_OPND_CLASS_INT_REG)
600 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
601 && operand->reg.regno == 31);
602 }
603
604 /* Return 1 if OPERAND is XZR or WZP. */
605 int
606 aarch64_zero_register_p (const aarch64_opnd_info *operand)
607 {
608 return ((aarch64_get_operand_class (operand->type)
609 == AARCH64_OPND_CLASS_INT_REG)
610 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
611 && operand->reg.regno == 31);
612 }
613
614 /* Return true if the operand *OPERAND that has the operand code
615 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
616 qualified by the qualifier TARGET. */
617
618 static inline int
619 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
620 aarch64_opnd_qualifier_t target)
621 {
622 switch (operand->qualifier)
623 {
624 case AARCH64_OPND_QLF_W:
625 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
626 return 1;
627 break;
628 case AARCH64_OPND_QLF_X:
629 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
630 return 1;
631 break;
632 case AARCH64_OPND_QLF_WSP:
633 if (target == AARCH64_OPND_QLF_W
634 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
635 return 1;
636 break;
637 case AARCH64_OPND_QLF_SP:
638 if (target == AARCH64_OPND_QLF_X
639 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
640 return 1;
641 break;
642 default:
643 break;
644 }
645
646 return 0;
647 }
648
649 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
650 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
651
652 Return NIL if more than one expected qualifiers are found. */
653
654 aarch64_opnd_qualifier_t
655 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
656 int idx,
657 const aarch64_opnd_qualifier_t known_qlf,
658 int known_idx)
659 {
660 int i, saved_i;
661
662 /* Special case.
663
664 When the known qualifier is NIL, we have to assume that there is only
665 one qualifier sequence in the *QSEQ_LIST and return the corresponding
666 qualifier directly. One scenario is that for instruction
667 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
668 which has only one possible valid qualifier sequence
669 NIL, S_D
670 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
671 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
672
673 Because the qualifier NIL has dual roles in the qualifier sequence:
674 it can mean no qualifier for the operand, or the qualifer sequence is
675 not in use (when all qualifiers in the sequence are NILs), we have to
676 handle this special case here. */
677 if (known_qlf == AARCH64_OPND_NIL)
678 {
679 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
680 return qseq_list[0][idx];
681 }
682
683 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
684 {
685 if (qseq_list[i][known_idx] == known_qlf)
686 {
687 if (saved_i != -1)
688 /* More than one sequences are found to have KNOWN_QLF at
689 KNOWN_IDX. */
690 return AARCH64_OPND_NIL;
691 saved_i = i;
692 }
693 }
694
695 return qseq_list[saved_i][idx];
696 }
697
698 enum operand_qualifier_kind
699 {
700 OQK_NIL,
701 OQK_OPD_VARIANT,
702 OQK_VALUE_IN_RANGE,
703 OQK_MISC,
704 };
705
706 /* Operand qualifier description. */
707 struct operand_qualifier_data
708 {
709 /* The usage of the three data fields depends on the qualifier kind. */
710 int data0;
711 int data1;
712 int data2;
713 /* Description. */
714 const char *desc;
715 /* Kind. */
716 enum operand_qualifier_kind kind;
717 };
718
719 /* Indexed by the operand qualifier enumerators. */
720 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
721 {
722 {0, 0, 0, "NIL", OQK_NIL},
723
724 /* Operand variant qualifiers.
725 First 3 fields:
726 element size, number of elements and common value for encoding. */
727
728 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
729 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
730 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
731 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
732
733 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
734 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
735 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
736 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
737 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
738 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
739 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
740
741 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
742 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
743 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
744 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
745 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
746 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
747 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
748 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
749 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
750 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
751 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
752
753 {0, 0, 0, "z", OQK_OPD_VARIANT},
754 {0, 0, 0, "m", OQK_OPD_VARIANT},
755
756 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
757 {16, 0, 0, "tag", OQK_OPD_VARIANT},
758
759 /* Qualifiers constraining the value range.
760 First 3 fields:
761 Lower bound, higher bound, unused. */
762
763 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
764 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
765 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
766 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
767 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
768 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
769 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
770
771 /* Qualifiers for miscellaneous purpose.
772 First 3 fields:
773 unused, unused and unused. */
774
775 {0, 0, 0, "lsl", 0},
776 {0, 0, 0, "msl", 0},
777
778 {0, 0, 0, "retrieving", 0},
779 };
780
781 static inline bool
782 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
783 {
784 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
785 }
786
787 static inline bool
788 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
789 {
790 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
791 }
792
793 const char*
794 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
795 {
796 return aarch64_opnd_qualifiers[qualifier].desc;
797 }
798
799 /* Given an operand qualifier, return the expected data element size
800 of a qualified operand. */
801 unsigned char
802 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
803 {
804 assert (operand_variant_qualifier_p (qualifier));
805 return aarch64_opnd_qualifiers[qualifier].data0;
806 }
807
808 unsigned char
809 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
810 {
811 assert (operand_variant_qualifier_p (qualifier));
812 return aarch64_opnd_qualifiers[qualifier].data1;
813 }
814
815 aarch64_insn
816 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
817 {
818 assert (operand_variant_qualifier_p (qualifier));
819 return aarch64_opnd_qualifiers[qualifier].data2;
820 }
821
822 static int
823 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
824 {
825 assert (qualifier_value_in_range_constraint_p (qualifier));
826 return aarch64_opnd_qualifiers[qualifier].data0;
827 }
828
829 static int
830 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
831 {
832 assert (qualifier_value_in_range_constraint_p (qualifier));
833 return aarch64_opnd_qualifiers[qualifier].data1;
834 }
835
836 #ifdef DEBUG_AARCH64
837 void
838 aarch64_verbose (const char *str, ...)
839 {
840 va_list ap;
841 va_start (ap, str);
842 printf ("#### ");
843 vprintf (str, ap);
844 printf ("\n");
845 va_end (ap);
846 }
847
848 static inline void
849 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
850 {
851 int i;
852 printf ("#### \t");
853 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
854 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
855 printf ("\n");
856 }
857
858 static void
859 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
860 const aarch64_opnd_qualifier_t *qualifier)
861 {
862 int i;
863 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
864
865 aarch64_verbose ("dump_match_qualifiers:");
866 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
867 curr[i] = opnd[i].qualifier;
868 dump_qualifier_sequence (curr);
869 aarch64_verbose ("against");
870 dump_qualifier_sequence (qualifier);
871 }
872 #endif /* DEBUG_AARCH64 */
873
874 /* This function checks if the given instruction INSN is a destructive
875 instruction based on the usage of the registers. It does not recognize
876 unary destructive instructions. */
877 bool
878 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
879 {
880 int i = 0;
881 const enum aarch64_opnd *opnds = opcode->operands;
882
883 if (opnds[0] == AARCH64_OPND_NIL)
884 return false;
885
886 while (opnds[++i] != AARCH64_OPND_NIL)
887 if (opnds[i] == opnds[0])
888 return true;
889
890 return false;
891 }
892
893 /* TODO improve this, we can have an extra field at the runtime to
894 store the number of operands rather than calculating it every time. */
895
896 int
897 aarch64_num_of_operands (const aarch64_opcode *opcode)
898 {
899 int i = 0;
900 const enum aarch64_opnd *opnds = opcode->operands;
901 while (opnds[i++] != AARCH64_OPND_NIL)
902 ;
903 --i;
904 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
905 return i;
906 }
907
908 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
909 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
910
911 N.B. on the entry, it is very likely that only some operands in *INST
912 have had their qualifiers been established.
913
914 If STOP_AT is not -1, the function will only try to match
915 the qualifier sequence for operands before and including the operand
916 of index STOP_AT; and on success *RET will only be filled with the first
917 (STOP_AT+1) qualifiers.
918
919 A couple examples of the matching algorithm:
920
921 X,W,NIL should match
922 X,W,NIL
923
924 NIL,NIL should match
925 X ,NIL
926
927 Apart from serving the main encoding routine, this can also be called
928 during or after the operand decoding. */
929
930 int
931 aarch64_find_best_match (const aarch64_inst *inst,
932 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
933 int stop_at, aarch64_opnd_qualifier_t *ret)
934 {
935 int found = 0;
936 int i, num_opnds;
937 const aarch64_opnd_qualifier_t *qualifiers;
938
939 num_opnds = aarch64_num_of_operands (inst->opcode);
940 if (num_opnds == 0)
941 {
942 DEBUG_TRACE ("SUCCEED: no operand");
943 return 1;
944 }
945
946 if (stop_at < 0 || stop_at >= num_opnds)
947 stop_at = num_opnds - 1;
948
949 /* For each pattern. */
950 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
951 {
952 int j;
953 qualifiers = *qualifiers_list;
954
955 /* Start as positive. */
956 found = 1;
957
958 DEBUG_TRACE ("%d", i);
959 #ifdef DEBUG_AARCH64
960 if (debug_dump)
961 dump_match_qualifiers (inst->operands, qualifiers);
962 #endif
963
964 /* The first entry should be taken literally, even if it's an empty
965 qualifier sequence. (This matters for strict testing.) In other
966 positions an empty sequence acts as a terminator. */
967 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
968 {
969 found = 0;
970 break;
971 }
972
973 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
974 {
975 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
976 && !(inst->opcode->flags & F_STRICT))
977 {
978 /* Either the operand does not have qualifier, or the qualifier
979 for the operand needs to be deduced from the qualifier
980 sequence.
981 In the latter case, any constraint checking related with
982 the obtained qualifier should be done later in
983 operand_general_constraint_met_p. */
984 continue;
985 }
986 else if (*qualifiers != inst->operands[j].qualifier)
987 {
988 /* Unless the target qualifier can also qualify the operand
989 (which has already had a non-nil qualifier), non-equal
990 qualifiers are generally un-matched. */
991 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
992 continue;
993 else
994 {
995 found = 0;
996 break;
997 }
998 }
999 else
1000 continue; /* Equal qualifiers are certainly matched. */
1001 }
1002
1003 /* Qualifiers established. */
1004 if (found == 1)
1005 break;
1006 }
1007
1008 if (found == 1)
1009 {
1010 /* Fill the result in *RET. */
1011 int j;
1012 qualifiers = *qualifiers_list;
1013
1014 DEBUG_TRACE ("complete qualifiers using list %d", i);
1015 #ifdef DEBUG_AARCH64
1016 if (debug_dump)
1017 dump_qualifier_sequence (qualifiers);
1018 #endif
1019
1020 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1021 ret[j] = *qualifiers;
1022 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1023 ret[j] = AARCH64_OPND_QLF_NIL;
1024
1025 DEBUG_TRACE ("SUCCESS");
1026 return 1;
1027 }
1028
1029 DEBUG_TRACE ("FAIL");
1030 return 0;
1031 }
1032
1033 /* Operand qualifier matching and resolving.
1034
1035 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1036 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1037
1038 if UPDATE_P, update the qualifier(s) in *INST after the matching
1039 succeeds. */
1040
1041 static int
1042 match_operands_qualifier (aarch64_inst *inst, bool update_p)
1043 {
1044 int i;
1045 aarch64_opnd_qualifier_seq_t qualifiers;
1046
1047 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1048 qualifiers))
1049 {
1050 DEBUG_TRACE ("matching FAIL");
1051 return 0;
1052 }
1053
1054 /* Update the qualifiers. */
1055 if (update_p)
1056 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1057 {
1058 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1059 break;
1060 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1061 "update %s with %s for operand %d",
1062 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1063 aarch64_get_qualifier_name (qualifiers[i]), i);
1064 inst->operands[i].qualifier = qualifiers[i];
1065 }
1066
1067 DEBUG_TRACE ("matching SUCCESS");
1068 return 1;
1069 }
1070
1071 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1072 register by MOVZ.
1073
1074 IS32 indicates whether value is a 32-bit immediate or not.
1075 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1076 amount will be returned in *SHIFT_AMOUNT. */
1077
1078 bool
1079 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1080 {
1081 int amount;
1082
1083 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1084
1085 if (is32)
1086 {
1087 /* Allow all zeros or all ones in top 32-bits, so that
1088 32-bit constant expressions like ~0x80000000 are
1089 permitted. */
1090 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1091 /* Immediate out of range. */
1092 return false;
1093 value &= 0xffffffff;
1094 }
1095
1096 /* first, try movz then movn */
1097 amount = -1;
1098 if ((value & ((uint64_t) 0xffff << 0)) == value)
1099 amount = 0;
1100 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1101 amount = 16;
1102 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1103 amount = 32;
1104 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1105 amount = 48;
1106
1107 if (amount == -1)
1108 {
1109 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1110 return false;
1111 }
1112
1113 if (shift_amount != NULL)
1114 *shift_amount = amount;
1115
1116 DEBUG_TRACE ("exit true with amount %d", amount);
1117
1118 return true;
1119 }
1120
1121 /* Build the accepted values for immediate logical SIMD instructions.
1122
1123 The standard encodings of the immediate value are:
1124 N imms immr SIMD size R S
1125 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1126 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1127 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1128 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1129 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1130 0 11110s 00000r 2 UInt(r) UInt(s)
1131 where all-ones value of S is reserved.
1132
1133 Let's call E the SIMD size.
1134
1135 The immediate value is: S+1 bits '1' rotated to the right by R.
1136
1137 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1138 (remember S != E - 1). */
1139
1140 #define TOTAL_IMM_NB 5334
1141
1142 typedef struct
1143 {
1144 uint64_t imm;
1145 aarch64_insn encoding;
1146 } simd_imm_encoding;
1147
1148 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1149
1150 static int
1151 simd_imm_encoding_cmp(const void *i1, const void *i2)
1152 {
1153 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1154 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1155
1156 if (imm1->imm < imm2->imm)
1157 return -1;
1158 if (imm1->imm > imm2->imm)
1159 return +1;
1160 return 0;
1161 }
1162
1163 /* immediate bitfield standard encoding
1164 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1165 1 ssssss rrrrrr 64 rrrrrr ssssss
1166 0 0sssss 0rrrrr 32 rrrrr sssss
1167 0 10ssss 00rrrr 16 rrrr ssss
1168 0 110sss 000rrr 8 rrr sss
1169 0 1110ss 0000rr 4 rr ss
1170 0 11110s 00000r 2 r s */
1171 static inline int
1172 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1173 {
1174 return (is64 << 12) | (r << 6) | s;
1175 }
1176
1177 static void
1178 build_immediate_table (void)
1179 {
1180 uint32_t log_e, e, s, r, s_mask;
1181 uint64_t mask, imm;
1182 int nb_imms;
1183 int is64;
1184
1185 nb_imms = 0;
1186 for (log_e = 1; log_e <= 6; log_e++)
1187 {
1188 /* Get element size. */
1189 e = 1u << log_e;
1190 if (log_e == 6)
1191 {
1192 is64 = 1;
1193 mask = 0xffffffffffffffffull;
1194 s_mask = 0;
1195 }
1196 else
1197 {
1198 is64 = 0;
1199 mask = (1ull << e) - 1;
1200 /* log_e s_mask
1201 1 ((1 << 4) - 1) << 2 = 111100
1202 2 ((1 << 3) - 1) << 3 = 111000
1203 3 ((1 << 2) - 1) << 4 = 110000
1204 4 ((1 << 1) - 1) << 5 = 100000
1205 5 ((1 << 0) - 1) << 6 = 000000 */
1206 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1207 }
1208 for (s = 0; s < e - 1; s++)
1209 for (r = 0; r < e; r++)
1210 {
1211 /* s+1 consecutive bits to 1 (s < 63) */
1212 imm = (1ull << (s + 1)) - 1;
1213 /* rotate right by r */
1214 if (r != 0)
1215 imm = (imm >> r) | ((imm << (e - r)) & mask);
1216 /* replicate the constant depending on SIMD size */
1217 switch (log_e)
1218 {
1219 case 1: imm = (imm << 2) | imm;
1220 /* Fall through. */
1221 case 2: imm = (imm << 4) | imm;
1222 /* Fall through. */
1223 case 3: imm = (imm << 8) | imm;
1224 /* Fall through. */
1225 case 4: imm = (imm << 16) | imm;
1226 /* Fall through. */
1227 case 5: imm = (imm << 32) | imm;
1228 /* Fall through. */
1229 case 6: break;
1230 default: abort ();
1231 }
1232 simd_immediates[nb_imms].imm = imm;
1233 simd_immediates[nb_imms].encoding =
1234 encode_immediate_bitfield(is64, s | s_mask, r);
1235 nb_imms++;
1236 }
1237 }
1238 assert (nb_imms == TOTAL_IMM_NB);
1239 qsort(simd_immediates, nb_imms,
1240 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1241 }
1242
1243 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1244 be accepted by logical (immediate) instructions
1245 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1246
1247 ESIZE is the number of bytes in the decoded immediate value.
1248 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1249 VALUE will be returned in *ENCODING. */
1250
1251 bool
1252 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1253 {
1254 simd_imm_encoding imm_enc;
1255 const simd_imm_encoding *imm_encoding;
1256 static bool initialized = false;
1257 uint64_t upper;
1258 int i;
1259
1260 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1261 value, esize);
1262
1263 if (!initialized)
1264 {
1265 build_immediate_table ();
1266 initialized = true;
1267 }
1268
1269 /* Allow all zeros or all ones in top bits, so that
1270 constant expressions like ~1 are permitted. */
1271 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1272 if ((value & ~upper) != value && (value | upper) != value)
1273 return false;
1274
1275 /* Replicate to a full 64-bit value. */
1276 value &= ~upper;
1277 for (i = esize * 8; i < 64; i *= 2)
1278 value |= (value << i);
1279
1280 imm_enc.imm = value;
1281 imm_encoding = (const simd_imm_encoding *)
1282 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1283 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1284 if (imm_encoding == NULL)
1285 {
1286 DEBUG_TRACE ("exit with false");
1287 return false;
1288 }
1289 if (encoding != NULL)
1290 *encoding = imm_encoding->encoding;
1291 DEBUG_TRACE ("exit with true");
1292 return true;
1293 }
1294
1295 /* If 64-bit immediate IMM is in the format of
1296 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1297 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1298 of value "abcdefgh". Otherwise return -1. */
1299 int
1300 aarch64_shrink_expanded_imm8 (uint64_t imm)
1301 {
1302 int i, ret;
1303 uint32_t byte;
1304
1305 ret = 0;
1306 for (i = 0; i < 8; i++)
1307 {
1308 byte = (imm >> (8 * i)) & 0xff;
1309 if (byte == 0xff)
1310 ret |= 1 << i;
1311 else if (byte != 0x00)
1312 return -1;
1313 }
1314 return ret;
1315 }
1316
1317 /* Utility inline functions for operand_general_constraint_met_p. */
1318
1319 static inline void
1320 set_error (aarch64_operand_error *mismatch_detail,
1321 enum aarch64_operand_error_kind kind, int idx,
1322 const char* error)
1323 {
1324 if (mismatch_detail == NULL)
1325 return;
1326 mismatch_detail->kind = kind;
1327 mismatch_detail->index = idx;
1328 mismatch_detail->error = error;
1329 }
1330
1331 static inline void
1332 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1333 const char* error)
1334 {
1335 if (mismatch_detail == NULL)
1336 return;
1337 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1338 }
1339
1340 static inline void
1341 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1342 int idx, int lower_bound, int upper_bound,
1343 const char* error)
1344 {
1345 if (mismatch_detail == NULL)
1346 return;
1347 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1348 mismatch_detail->data[0].i = lower_bound;
1349 mismatch_detail->data[1].i = upper_bound;
1350 }
1351
1352 static inline void
1353 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1354 int idx, int lower_bound, int upper_bound)
1355 {
1356 if (mismatch_detail == NULL)
1357 return;
1358 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1359 _("immediate value"));
1360 }
1361
1362 static inline void
1363 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1364 int idx, int lower_bound, int upper_bound)
1365 {
1366 if (mismatch_detail == NULL)
1367 return;
1368 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1369 _("immediate offset"));
1370 }
1371
1372 static inline void
1373 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1374 int idx, int lower_bound, int upper_bound)
1375 {
1376 if (mismatch_detail == NULL)
1377 return;
1378 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1379 _("register number"));
1380 }
1381
1382 static inline void
1383 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1384 int idx, int lower_bound, int upper_bound)
1385 {
1386 if (mismatch_detail == NULL)
1387 return;
1388 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1389 _("register element index"));
1390 }
1391
1392 static inline void
1393 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1394 int idx, int lower_bound, int upper_bound)
1395 {
1396 if (mismatch_detail == NULL)
1397 return;
1398 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1399 _("shift amount"));
1400 }
1401
1402 /* Report that the MUL modifier in operand IDX should be in the range
1403 [LOWER_BOUND, UPPER_BOUND]. */
1404 static inline void
1405 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1406 int idx, int lower_bound, int upper_bound)
1407 {
1408 if (mismatch_detail == NULL)
1409 return;
1410 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1411 _("multiplier"));
1412 }
1413
1414 static inline void
1415 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1416 int alignment)
1417 {
1418 if (mismatch_detail == NULL)
1419 return;
1420 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1421 mismatch_detail->data[0].i = alignment;
1422 }
1423
1424 static inline void
1425 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1426 int expected_num)
1427 {
1428 if (mismatch_detail == NULL)
1429 return;
1430 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1431 mismatch_detail->data[0].i = expected_num;
1432 }
1433
1434 static inline void
1435 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1436 const char* error)
1437 {
1438 if (mismatch_detail == NULL)
1439 return;
1440 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1441 }
1442
1443 /* General constraint checking based on operand code.
1444
1445 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1446 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1447
1448 This function has to be called after the qualifiers for all operands
1449 have been resolved.
1450
1451 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1452 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1453 of error message during the disassembling where error message is not
1454 wanted. We avoid the dynamic construction of strings of error messages
1455 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1456 use a combination of error code, static string and some integer data to
1457 represent an error. */
1458
1459 static int
1460 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1461 enum aarch64_opnd type,
1462 const aarch64_opcode *opcode,
1463 aarch64_operand_error *mismatch_detail)
1464 {
1465 unsigned num, modifiers, shift;
1466 unsigned char size;
1467 int64_t imm, min_value, max_value;
1468 uint64_t uvalue, mask;
1469 const aarch64_opnd_info *opnd = opnds + idx;
1470 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1471 int i;
1472
1473 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1474
1475 switch (aarch64_operands[type].op_class)
1476 {
1477 case AARCH64_OPND_CLASS_INT_REG:
1478 /* Check pair reg constraints for cas* instructions. */
1479 if (type == AARCH64_OPND_PAIRREG)
1480 {
1481 assert (idx == 1 || idx == 3);
1482 if (opnds[idx - 1].reg.regno % 2 != 0)
1483 {
1484 set_syntax_error (mismatch_detail, idx - 1,
1485 _("reg pair must start from even reg"));
1486 return 0;
1487 }
1488 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1489 {
1490 set_syntax_error (mismatch_detail, idx,
1491 _("reg pair must be contiguous"));
1492 return 0;
1493 }
1494 break;
1495 }
1496
1497 /* <Xt> may be optional in some IC and TLBI instructions. */
1498 if (type == AARCH64_OPND_Rt_SYS)
1499 {
1500 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1501 == AARCH64_OPND_CLASS_SYSTEM));
1502 if (opnds[1].present
1503 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1504 {
1505 set_other_error (mismatch_detail, idx, _("extraneous register"));
1506 return 0;
1507 }
1508 if (!opnds[1].present
1509 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1510 {
1511 set_other_error (mismatch_detail, idx, _("missing register"));
1512 return 0;
1513 }
1514 }
1515 switch (qualifier)
1516 {
1517 case AARCH64_OPND_QLF_WSP:
1518 case AARCH64_OPND_QLF_SP:
1519 if (!aarch64_stack_pointer_p (opnd))
1520 {
1521 set_other_error (mismatch_detail, idx,
1522 _("stack pointer register expected"));
1523 return 0;
1524 }
1525 break;
1526 default:
1527 break;
1528 }
1529 break;
1530
1531 case AARCH64_OPND_CLASS_SVE_REG:
1532 switch (type)
1533 {
1534 case AARCH64_OPND_SVE_Zm3_INDEX:
1535 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1536 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1537 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1538 case AARCH64_OPND_SVE_Zm4_INDEX:
1539 size = get_operand_fields_width (get_operand_from_code (type));
1540 shift = get_operand_specific_data (&aarch64_operands[type]);
1541 mask = (1 << shift) - 1;
1542 if (opnd->reg.regno > mask)
1543 {
1544 assert (mask == 7 || mask == 15);
1545 set_other_error (mismatch_detail, idx,
1546 mask == 15
1547 ? _("z0-z15 expected")
1548 : _("z0-z7 expected"));
1549 return 0;
1550 }
1551 mask = (1u << (size - shift)) - 1;
1552 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1553 {
1554 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1555 return 0;
1556 }
1557 break;
1558
1559 case AARCH64_OPND_SVE_Zn_INDEX:
1560 size = aarch64_get_qualifier_esize (opnd->qualifier);
1561 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1562 {
1563 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1564 0, 64 / size - 1);
1565 return 0;
1566 }
1567 break;
1568
1569 case AARCH64_OPND_SVE_ZnxN:
1570 case AARCH64_OPND_SVE_ZtxN:
1571 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1572 {
1573 set_other_error (mismatch_detail, idx,
1574 _("invalid register list"));
1575 return 0;
1576 }
1577 break;
1578
1579 default:
1580 break;
1581 }
1582 break;
1583
1584 case AARCH64_OPND_CLASS_PRED_REG:
1585 if (opnd->reg.regno >= 8
1586 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1587 {
1588 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1589 return 0;
1590 }
1591 break;
1592
1593 case AARCH64_OPND_CLASS_COND:
1594 if (type == AARCH64_OPND_COND1
1595 && (opnds[idx].cond->value & 0xe) == 0xe)
1596 {
1597 /* Not allow AL or NV. */
1598 set_syntax_error (mismatch_detail, idx, NULL);
1599 }
1600 break;
1601
1602 case AARCH64_OPND_CLASS_ADDRESS:
1603 /* Check writeback. */
1604 switch (opcode->iclass)
1605 {
1606 case ldst_pos:
1607 case ldst_unscaled:
1608 case ldstnapair_offs:
1609 case ldstpair_off:
1610 case ldst_unpriv:
1611 if (opnd->addr.writeback == 1)
1612 {
1613 set_syntax_error (mismatch_detail, idx,
1614 _("unexpected address writeback"));
1615 return 0;
1616 }
1617 break;
1618 case ldst_imm10:
1619 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1620 {
1621 set_syntax_error (mismatch_detail, idx,
1622 _("unexpected address writeback"));
1623 return 0;
1624 }
1625 break;
1626 case ldst_imm9:
1627 case ldstpair_indexed:
1628 case asisdlsep:
1629 case asisdlsop:
1630 if (opnd->addr.writeback == 0)
1631 {
1632 set_syntax_error (mismatch_detail, idx,
1633 _("address writeback expected"));
1634 return 0;
1635 }
1636 break;
1637 default:
1638 assert (opnd->addr.writeback == 0);
1639 break;
1640 }
1641 switch (type)
1642 {
1643 case AARCH64_OPND_ADDR_SIMM7:
1644 /* Scaled signed 7 bits immediate offset. */
1645 /* Get the size of the data element that is accessed, which may be
1646 different from that of the source register size,
1647 e.g. in strb/ldrb. */
1648 size = aarch64_get_qualifier_esize (opnd->qualifier);
1649 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1650 {
1651 set_offset_out_of_range_error (mismatch_detail, idx,
1652 -64 * size, 63 * size);
1653 return 0;
1654 }
1655 if (!value_aligned_p (opnd->addr.offset.imm, size))
1656 {
1657 set_unaligned_error (mismatch_detail, idx, size);
1658 return 0;
1659 }
1660 break;
1661 case AARCH64_OPND_ADDR_OFFSET:
1662 case AARCH64_OPND_ADDR_SIMM9:
1663 /* Unscaled signed 9 bits immediate offset. */
1664 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1665 {
1666 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1667 return 0;
1668 }
1669 break;
1670
1671 case AARCH64_OPND_ADDR_SIMM9_2:
1672 /* Unscaled signed 9 bits immediate offset, which has to be negative
1673 or unaligned. */
1674 size = aarch64_get_qualifier_esize (qualifier);
1675 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1676 && !value_aligned_p (opnd->addr.offset.imm, size))
1677 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1678 return 1;
1679 set_other_error (mismatch_detail, idx,
1680 _("negative or unaligned offset expected"));
1681 return 0;
1682
1683 case AARCH64_OPND_ADDR_SIMM10:
1684 /* Scaled signed 10 bits immediate offset. */
1685 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1686 {
1687 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1688 return 0;
1689 }
1690 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1691 {
1692 set_unaligned_error (mismatch_detail, idx, 8);
1693 return 0;
1694 }
1695 break;
1696
1697 case AARCH64_OPND_ADDR_SIMM11:
1698 /* Signed 11 bits immediate offset (multiple of 16). */
1699 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1700 {
1701 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1702 return 0;
1703 }
1704
1705 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1706 {
1707 set_unaligned_error (mismatch_detail, idx, 16);
1708 return 0;
1709 }
1710 break;
1711
1712 case AARCH64_OPND_ADDR_SIMM13:
1713 /* Signed 13 bits immediate offset (multiple of 16). */
1714 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1715 {
1716 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1717 return 0;
1718 }
1719
1720 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1721 {
1722 set_unaligned_error (mismatch_detail, idx, 16);
1723 return 0;
1724 }
1725 break;
1726
1727 case AARCH64_OPND_SIMD_ADDR_POST:
1728 /* AdvSIMD load/store multiple structures, post-index. */
1729 assert (idx == 1);
1730 if (opnd->addr.offset.is_reg)
1731 {
1732 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1733 return 1;
1734 else
1735 {
1736 set_other_error (mismatch_detail, idx,
1737 _("invalid register offset"));
1738 return 0;
1739 }
1740 }
1741 else
1742 {
1743 const aarch64_opnd_info *prev = &opnds[idx-1];
1744 unsigned num_bytes; /* total number of bytes transferred. */
1745 /* The opcode dependent area stores the number of elements in
1746 each structure to be loaded/stored. */
1747 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1748 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1749 /* Special handling of loading single structure to all lane. */
1750 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1751 * aarch64_get_qualifier_esize (prev->qualifier);
1752 else
1753 num_bytes = prev->reglist.num_regs
1754 * aarch64_get_qualifier_esize (prev->qualifier)
1755 * aarch64_get_qualifier_nelem (prev->qualifier);
1756 if ((int) num_bytes != opnd->addr.offset.imm)
1757 {
1758 set_other_error (mismatch_detail, idx,
1759 _("invalid post-increment amount"));
1760 return 0;
1761 }
1762 }
1763 break;
1764
1765 case AARCH64_OPND_ADDR_REGOFF:
1766 /* Get the size of the data element that is accessed, which may be
1767 different from that of the source register size,
1768 e.g. in strb/ldrb. */
1769 size = aarch64_get_qualifier_esize (opnd->qualifier);
1770 /* It is either no shift or shift by the binary logarithm of SIZE. */
1771 if (opnd->shifter.amount != 0
1772 && opnd->shifter.amount != (int)get_logsz (size))
1773 {
1774 set_other_error (mismatch_detail, idx,
1775 _("invalid shift amount"));
1776 return 0;
1777 }
1778 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1779 operators. */
1780 switch (opnd->shifter.kind)
1781 {
1782 case AARCH64_MOD_UXTW:
1783 case AARCH64_MOD_LSL:
1784 case AARCH64_MOD_SXTW:
1785 case AARCH64_MOD_SXTX: break;
1786 default:
1787 set_other_error (mismatch_detail, idx,
1788 _("invalid extend/shift operator"));
1789 return 0;
1790 }
1791 break;
1792
1793 case AARCH64_OPND_ADDR_UIMM12:
1794 imm = opnd->addr.offset.imm;
1795 /* Get the size of the data element that is accessed, which may be
1796 different from that of the source register size,
1797 e.g. in strb/ldrb. */
1798 size = aarch64_get_qualifier_esize (qualifier);
1799 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1800 {
1801 set_offset_out_of_range_error (mismatch_detail, idx,
1802 0, 4095 * size);
1803 return 0;
1804 }
1805 if (!value_aligned_p (opnd->addr.offset.imm, size))
1806 {
1807 set_unaligned_error (mismatch_detail, idx, size);
1808 return 0;
1809 }
1810 break;
1811
1812 case AARCH64_OPND_ADDR_PCREL14:
1813 case AARCH64_OPND_ADDR_PCREL19:
1814 case AARCH64_OPND_ADDR_PCREL21:
1815 case AARCH64_OPND_ADDR_PCREL26:
1816 imm = opnd->imm.value;
1817 if (operand_need_shift_by_two (get_operand_from_code (type)))
1818 {
1819 /* The offset value in a PC-relative branch instruction is alway
1820 4-byte aligned and is encoded without the lowest 2 bits. */
1821 if (!value_aligned_p (imm, 4))
1822 {
1823 set_unaligned_error (mismatch_detail, idx, 4);
1824 return 0;
1825 }
1826 /* Right shift by 2 so that we can carry out the following check
1827 canonically. */
1828 imm >>= 2;
1829 }
1830 size = get_operand_fields_width (get_operand_from_code (type));
1831 if (!value_fit_signed_field_p (imm, size))
1832 {
1833 set_other_error (mismatch_detail, idx,
1834 _("immediate out of range"));
1835 return 0;
1836 }
1837 break;
1838
1839 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
1840 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
1841 {
1842 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
1843 return 0;
1844 }
1845 break;
1846
1847 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1848 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1849 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1850 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1851 min_value = -8;
1852 max_value = 7;
1853 sve_imm_offset_vl:
1854 assert (!opnd->addr.offset.is_reg);
1855 assert (opnd->addr.preind);
1856 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1857 min_value *= num;
1858 max_value *= num;
1859 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1860 || (opnd->shifter.operator_present
1861 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1862 {
1863 set_other_error (mismatch_detail, idx,
1864 _("invalid addressing mode"));
1865 return 0;
1866 }
1867 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1868 {
1869 set_offset_out_of_range_error (mismatch_detail, idx,
1870 min_value, max_value);
1871 return 0;
1872 }
1873 if (!value_aligned_p (opnd->addr.offset.imm, num))
1874 {
1875 set_unaligned_error (mismatch_detail, idx, num);
1876 return 0;
1877 }
1878 break;
1879
1880 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1881 min_value = -32;
1882 max_value = 31;
1883 goto sve_imm_offset_vl;
1884
1885 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1886 min_value = -256;
1887 max_value = 255;
1888 goto sve_imm_offset_vl;
1889
1890 case AARCH64_OPND_SVE_ADDR_RI_U6:
1891 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1892 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1893 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1894 min_value = 0;
1895 max_value = 63;
1896 sve_imm_offset:
1897 assert (!opnd->addr.offset.is_reg);
1898 assert (opnd->addr.preind);
1899 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1900 min_value *= num;
1901 max_value *= num;
1902 if (opnd->shifter.operator_present
1903 || opnd->shifter.amount_present)
1904 {
1905 set_other_error (mismatch_detail, idx,
1906 _("invalid addressing mode"));
1907 return 0;
1908 }
1909 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1910 {
1911 set_offset_out_of_range_error (mismatch_detail, idx,
1912 min_value, max_value);
1913 return 0;
1914 }
1915 if (!value_aligned_p (opnd->addr.offset.imm, num))
1916 {
1917 set_unaligned_error (mismatch_detail, idx, num);
1918 return 0;
1919 }
1920 break;
1921
1922 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1923 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1924 min_value = -8;
1925 max_value = 7;
1926 goto sve_imm_offset;
1927
1928 case AARCH64_OPND_SVE_ADDR_ZX:
1929 /* Everything is already ensured by parse_operands or
1930 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1931 argument type). */
1932 assert (opnd->addr.offset.is_reg);
1933 assert (opnd->addr.preind);
1934 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1935 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1936 assert (opnd->shifter.operator_present == 0);
1937 break;
1938
1939 case AARCH64_OPND_SVE_ADDR_R:
1940 case AARCH64_OPND_SVE_ADDR_RR:
1941 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1942 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1943 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1944 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
1945 case AARCH64_OPND_SVE_ADDR_RX:
1946 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1947 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1948 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1949 case AARCH64_OPND_SVE_ADDR_RZ:
1950 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1951 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1952 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1953 modifiers = 1 << AARCH64_MOD_LSL;
1954 sve_rr_operand:
1955 assert (opnd->addr.offset.is_reg);
1956 assert (opnd->addr.preind);
1957 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1958 && opnd->addr.offset.regno == 31)
1959 {
1960 set_other_error (mismatch_detail, idx,
1961 _("index register xzr is not allowed"));
1962 return 0;
1963 }
1964 if (((1 << opnd->shifter.kind) & modifiers) == 0
1965 || (opnd->shifter.amount
1966 != get_operand_specific_data (&aarch64_operands[type])))
1967 {
1968 set_other_error (mismatch_detail, idx,
1969 _("invalid addressing mode"));
1970 return 0;
1971 }
1972 break;
1973
1974 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1975 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1976 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1977 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1978 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1979 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1980 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1981 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1982 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1983 goto sve_rr_operand;
1984
1985 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1986 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1987 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1988 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1989 min_value = 0;
1990 max_value = 31;
1991 goto sve_imm_offset;
1992
1993 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1994 modifiers = 1 << AARCH64_MOD_LSL;
1995 sve_zz_operand:
1996 assert (opnd->addr.offset.is_reg);
1997 assert (opnd->addr.preind);
1998 if (((1 << opnd->shifter.kind) & modifiers) == 0
1999 || opnd->shifter.amount < 0
2000 || opnd->shifter.amount > 3)
2001 {
2002 set_other_error (mismatch_detail, idx,
2003 _("invalid addressing mode"));
2004 return 0;
2005 }
2006 break;
2007
2008 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2009 modifiers = (1 << AARCH64_MOD_SXTW);
2010 goto sve_zz_operand;
2011
2012 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2013 modifiers = 1 << AARCH64_MOD_UXTW;
2014 goto sve_zz_operand;
2015
2016 default:
2017 break;
2018 }
2019 break;
2020
2021 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2022 if (type == AARCH64_OPND_LEt)
2023 {
2024 /* Get the upper bound for the element index. */
2025 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2026 if (!value_in_range_p (opnd->reglist.index, 0, num))
2027 {
2028 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2029 return 0;
2030 }
2031 }
2032 /* The opcode dependent area stores the number of elements in
2033 each structure to be loaded/stored. */
2034 num = get_opcode_dependent_value (opcode);
2035 switch (type)
2036 {
2037 case AARCH64_OPND_LVt:
2038 assert (num >= 1 && num <= 4);
2039 /* Unless LD1/ST1, the number of registers should be equal to that
2040 of the structure elements. */
2041 if (num != 1 && opnd->reglist.num_regs != num)
2042 {
2043 set_reg_list_error (mismatch_detail, idx, num);
2044 return 0;
2045 }
2046 break;
2047 case AARCH64_OPND_LVt_AL:
2048 case AARCH64_OPND_LEt:
2049 assert (num >= 1 && num <= 4);
2050 /* The number of registers should be equal to that of the structure
2051 elements. */
2052 if (opnd->reglist.num_regs != num)
2053 {
2054 set_reg_list_error (mismatch_detail, idx, num);
2055 return 0;
2056 }
2057 break;
2058 default:
2059 break;
2060 }
2061 break;
2062
2063 case AARCH64_OPND_CLASS_IMMEDIATE:
2064 /* Constraint check on immediate operand. */
2065 imm = opnd->imm.value;
2066 /* E.g. imm_0_31 constrains value to be 0..31. */
2067 if (qualifier_value_in_range_constraint_p (qualifier)
2068 && !value_in_range_p (imm, get_lower_bound (qualifier),
2069 get_upper_bound (qualifier)))
2070 {
2071 set_imm_out_of_range_error (mismatch_detail, idx,
2072 get_lower_bound (qualifier),
2073 get_upper_bound (qualifier));
2074 return 0;
2075 }
2076
2077 switch (type)
2078 {
2079 case AARCH64_OPND_AIMM:
2080 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2081 {
2082 set_other_error (mismatch_detail, idx,
2083 _("invalid shift operator"));
2084 return 0;
2085 }
2086 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2087 {
2088 set_other_error (mismatch_detail, idx,
2089 _("shift amount must be 0 or 12"));
2090 return 0;
2091 }
2092 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2093 {
2094 set_other_error (mismatch_detail, idx,
2095 _("immediate out of range"));
2096 return 0;
2097 }
2098 break;
2099
2100 case AARCH64_OPND_HALF:
2101 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2102 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2103 {
2104 set_other_error (mismatch_detail, idx,
2105 _("invalid shift operator"));
2106 return 0;
2107 }
2108 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2109 if (!value_aligned_p (opnd->shifter.amount, 16))
2110 {
2111 set_other_error (mismatch_detail, idx,
2112 _("shift amount must be a multiple of 16"));
2113 return 0;
2114 }
2115 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2116 {
2117 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2118 0, size * 8 - 16);
2119 return 0;
2120 }
2121 if (opnd->imm.value < 0)
2122 {
2123 set_other_error (mismatch_detail, idx,
2124 _("negative immediate value not allowed"));
2125 return 0;
2126 }
2127 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2128 {
2129 set_other_error (mismatch_detail, idx,
2130 _("immediate out of range"));
2131 return 0;
2132 }
2133 break;
2134
2135 case AARCH64_OPND_IMM_MOV:
2136 {
2137 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2138 imm = opnd->imm.value;
2139 assert (idx == 1);
2140 switch (opcode->op)
2141 {
2142 case OP_MOV_IMM_WIDEN:
2143 imm = ~imm;
2144 /* Fall through. */
2145 case OP_MOV_IMM_WIDE:
2146 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2147 {
2148 set_other_error (mismatch_detail, idx,
2149 _("immediate out of range"));
2150 return 0;
2151 }
2152 break;
2153 case OP_MOV_IMM_LOG:
2154 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2155 {
2156 set_other_error (mismatch_detail, idx,
2157 _("immediate out of range"));
2158 return 0;
2159 }
2160 break;
2161 default:
2162 assert (0);
2163 return 0;
2164 }
2165 }
2166 break;
2167
2168 case AARCH64_OPND_NZCV:
2169 case AARCH64_OPND_CCMP_IMM:
2170 case AARCH64_OPND_EXCEPTION:
2171 case AARCH64_OPND_UNDEFINED:
2172 case AARCH64_OPND_TME_UIMM16:
2173 case AARCH64_OPND_UIMM4:
2174 case AARCH64_OPND_UIMM4_ADDG:
2175 case AARCH64_OPND_UIMM7:
2176 case AARCH64_OPND_UIMM3_OP1:
2177 case AARCH64_OPND_UIMM3_OP2:
2178 case AARCH64_OPND_SVE_UIMM3:
2179 case AARCH64_OPND_SVE_UIMM7:
2180 case AARCH64_OPND_SVE_UIMM8:
2181 case AARCH64_OPND_SVE_UIMM8_53:
2182 case AARCH64_OPND_CSSC_UIMM8:
2183 size = get_operand_fields_width (get_operand_from_code (type));
2184 assert (size < 32);
2185 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2186 {
2187 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2188 (1u << size) - 1);
2189 return 0;
2190 }
2191 break;
2192
2193 case AARCH64_OPND_UIMM10:
2194 /* Scaled unsigned 10 bits immediate offset. */
2195 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2196 {
2197 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2198 return 0;
2199 }
2200
2201 if (!value_aligned_p (opnd->imm.value, 16))
2202 {
2203 set_unaligned_error (mismatch_detail, idx, 16);
2204 return 0;
2205 }
2206 break;
2207
2208 case AARCH64_OPND_SIMM5:
2209 case AARCH64_OPND_SVE_SIMM5:
2210 case AARCH64_OPND_SVE_SIMM5B:
2211 case AARCH64_OPND_SVE_SIMM6:
2212 case AARCH64_OPND_SVE_SIMM8:
2213 case AARCH64_OPND_CSSC_SIMM8:
2214 size = get_operand_fields_width (get_operand_from_code (type));
2215 assert (size < 32);
2216 if (!value_fit_signed_field_p (opnd->imm.value, size))
2217 {
2218 set_imm_out_of_range_error (mismatch_detail, idx,
2219 -(1 << (size - 1)),
2220 (1 << (size - 1)) - 1);
2221 return 0;
2222 }
2223 break;
2224
2225 case AARCH64_OPND_WIDTH:
2226 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2227 && opnds[0].type == AARCH64_OPND_Rd);
2228 size = get_upper_bound (qualifier);
2229 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2230 /* lsb+width <= reg.size */
2231 {
2232 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2233 size - opnds[idx-1].imm.value);
2234 return 0;
2235 }
2236 break;
2237
2238 case AARCH64_OPND_LIMM:
2239 case AARCH64_OPND_SVE_LIMM:
2240 {
2241 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2242 uint64_t uimm = opnd->imm.value;
2243 if (opcode->op == OP_BIC)
2244 uimm = ~uimm;
2245 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2246 {
2247 set_other_error (mismatch_detail, idx,
2248 _("immediate out of range"));
2249 return 0;
2250 }
2251 }
2252 break;
2253
2254 case AARCH64_OPND_IMM0:
2255 case AARCH64_OPND_FPIMM0:
2256 if (opnd->imm.value != 0)
2257 {
2258 set_other_error (mismatch_detail, idx,
2259 _("immediate zero expected"));
2260 return 0;
2261 }
2262 break;
2263
2264 case AARCH64_OPND_IMM_ROT1:
2265 case AARCH64_OPND_IMM_ROT2:
2266 case AARCH64_OPND_SVE_IMM_ROT2:
2267 if (opnd->imm.value != 0
2268 && opnd->imm.value != 90
2269 && opnd->imm.value != 180
2270 && opnd->imm.value != 270)
2271 {
2272 set_other_error (mismatch_detail, idx,
2273 _("rotate expected to be 0, 90, 180 or 270"));
2274 return 0;
2275 }
2276 break;
2277
2278 case AARCH64_OPND_IMM_ROT3:
2279 case AARCH64_OPND_SVE_IMM_ROT1:
2280 case AARCH64_OPND_SVE_IMM_ROT3:
2281 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2282 {
2283 set_other_error (mismatch_detail, idx,
2284 _("rotate expected to be 90 or 270"));
2285 return 0;
2286 }
2287 break;
2288
2289 case AARCH64_OPND_SHLL_IMM:
2290 assert (idx == 2);
2291 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2292 if (opnd->imm.value != size)
2293 {
2294 set_other_error (mismatch_detail, idx,
2295 _("invalid shift amount"));
2296 return 0;
2297 }
2298 break;
2299
2300 case AARCH64_OPND_IMM_VLSL:
2301 size = aarch64_get_qualifier_esize (qualifier);
2302 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2303 {
2304 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2305 size * 8 - 1);
2306 return 0;
2307 }
2308 break;
2309
2310 case AARCH64_OPND_IMM_VLSR:
2311 size = aarch64_get_qualifier_esize (qualifier);
2312 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2313 {
2314 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2315 return 0;
2316 }
2317 break;
2318
2319 case AARCH64_OPND_SIMD_IMM:
2320 case AARCH64_OPND_SIMD_IMM_SFT:
2321 /* Qualifier check. */
2322 switch (qualifier)
2323 {
2324 case AARCH64_OPND_QLF_LSL:
2325 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2326 {
2327 set_other_error (mismatch_detail, idx,
2328 _("invalid shift operator"));
2329 return 0;
2330 }
2331 break;
2332 case AARCH64_OPND_QLF_MSL:
2333 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2334 {
2335 set_other_error (mismatch_detail, idx,
2336 _("invalid shift operator"));
2337 return 0;
2338 }
2339 break;
2340 case AARCH64_OPND_QLF_NIL:
2341 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2342 {
2343 set_other_error (mismatch_detail, idx,
2344 _("shift is not permitted"));
2345 return 0;
2346 }
2347 break;
2348 default:
2349 assert (0);
2350 return 0;
2351 }
2352 /* Is the immediate valid? */
2353 assert (idx == 1);
2354 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2355 {
2356 /* uimm8 or simm8 */
2357 if (!value_in_range_p (opnd->imm.value, -128, 255))
2358 {
2359 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2360 return 0;
2361 }
2362 }
2363 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2364 {
2365 /* uimm64 is not
2366 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2367 ffffffffgggggggghhhhhhhh'. */
2368 set_other_error (mismatch_detail, idx,
2369 _("invalid value for immediate"));
2370 return 0;
2371 }
2372 /* Is the shift amount valid? */
2373 switch (opnd->shifter.kind)
2374 {
2375 case AARCH64_MOD_LSL:
2376 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2377 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2378 {
2379 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2380 (size - 1) * 8);
2381 return 0;
2382 }
2383 if (!value_aligned_p (opnd->shifter.amount, 8))
2384 {
2385 set_unaligned_error (mismatch_detail, idx, 8);
2386 return 0;
2387 }
2388 break;
2389 case AARCH64_MOD_MSL:
2390 /* Only 8 and 16 are valid shift amount. */
2391 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2392 {
2393 set_other_error (mismatch_detail, idx,
2394 _("shift amount must be 0 or 16"));
2395 return 0;
2396 }
2397 break;
2398 default:
2399 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2400 {
2401 set_other_error (mismatch_detail, idx,
2402 _("invalid shift operator"));
2403 return 0;
2404 }
2405 break;
2406 }
2407 break;
2408
2409 case AARCH64_OPND_FPIMM:
2410 case AARCH64_OPND_SIMD_FPIMM:
2411 case AARCH64_OPND_SVE_FPIMM8:
2412 if (opnd->imm.is_fp == 0)
2413 {
2414 set_other_error (mismatch_detail, idx,
2415 _("floating-point immediate expected"));
2416 return 0;
2417 }
2418 /* The value is expected to be an 8-bit floating-point constant with
2419 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2420 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2421 instruction). */
2422 if (!value_in_range_p (opnd->imm.value, 0, 255))
2423 {
2424 set_other_error (mismatch_detail, idx,
2425 _("immediate out of range"));
2426 return 0;
2427 }
2428 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2429 {
2430 set_other_error (mismatch_detail, idx,
2431 _("invalid shift operator"));
2432 return 0;
2433 }
2434 break;
2435
2436 case AARCH64_OPND_SVE_AIMM:
2437 min_value = 0;
2438 sve_aimm:
2439 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2440 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2441 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2442 uvalue = opnd->imm.value;
2443 shift = opnd->shifter.amount;
2444 if (size == 1)
2445 {
2446 if (shift != 0)
2447 {
2448 set_other_error (mismatch_detail, idx,
2449 _("no shift amount allowed for"
2450 " 8-bit constants"));
2451 return 0;
2452 }
2453 }
2454 else
2455 {
2456 if (shift != 0 && shift != 8)
2457 {
2458 set_other_error (mismatch_detail, idx,
2459 _("shift amount must be 0 or 8"));
2460 return 0;
2461 }
2462 if (shift == 0 && (uvalue & 0xff) == 0)
2463 {
2464 shift = 8;
2465 uvalue = (int64_t) uvalue / 256;
2466 }
2467 }
2468 mask >>= shift;
2469 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2470 {
2471 set_other_error (mismatch_detail, idx,
2472 _("immediate too big for element size"));
2473 return 0;
2474 }
2475 uvalue = (uvalue - min_value) & mask;
2476 if (uvalue > 0xff)
2477 {
2478 set_other_error (mismatch_detail, idx,
2479 _("invalid arithmetic immediate"));
2480 return 0;
2481 }
2482 break;
2483
2484 case AARCH64_OPND_SVE_ASIMM:
2485 min_value = -128;
2486 goto sve_aimm;
2487
2488 case AARCH64_OPND_SVE_I1_HALF_ONE:
2489 assert (opnd->imm.is_fp);
2490 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2491 {
2492 set_other_error (mismatch_detail, idx,
2493 _("floating-point value must be 0.5 or 1.0"));
2494 return 0;
2495 }
2496 break;
2497
2498 case AARCH64_OPND_SVE_I1_HALF_TWO:
2499 assert (opnd->imm.is_fp);
2500 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2501 {
2502 set_other_error (mismatch_detail, idx,
2503 _("floating-point value must be 0.5 or 2.0"));
2504 return 0;
2505 }
2506 break;
2507
2508 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2509 assert (opnd->imm.is_fp);
2510 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2511 {
2512 set_other_error (mismatch_detail, idx,
2513 _("floating-point value must be 0.0 or 1.0"));
2514 return 0;
2515 }
2516 break;
2517
2518 case AARCH64_OPND_SVE_INV_LIMM:
2519 {
2520 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2521 uint64_t uimm = ~opnd->imm.value;
2522 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2523 {
2524 set_other_error (mismatch_detail, idx,
2525 _("immediate out of range"));
2526 return 0;
2527 }
2528 }
2529 break;
2530
2531 case AARCH64_OPND_SVE_LIMM_MOV:
2532 {
2533 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2534 uint64_t uimm = opnd->imm.value;
2535 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2536 {
2537 set_other_error (mismatch_detail, idx,
2538 _("immediate out of range"));
2539 return 0;
2540 }
2541 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2542 {
2543 set_other_error (mismatch_detail, idx,
2544 _("invalid replicated MOV immediate"));
2545 return 0;
2546 }
2547 }
2548 break;
2549
2550 case AARCH64_OPND_SVE_PATTERN_SCALED:
2551 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2552 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2553 {
2554 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2555 return 0;
2556 }
2557 break;
2558
2559 case AARCH64_OPND_SVE_SHLIMM_PRED:
2560 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2561 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2562 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2563 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2564 {
2565 set_imm_out_of_range_error (mismatch_detail, idx,
2566 0, 8 * size - 1);
2567 return 0;
2568 }
2569 break;
2570
2571 case AARCH64_OPND_SVE_SHRIMM_PRED:
2572 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2573 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2574 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2575 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2576 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2577 {
2578 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2579 return 0;
2580 }
2581 break;
2582
2583 default:
2584 break;
2585 }
2586 break;
2587
2588 case AARCH64_OPND_CLASS_SYSTEM:
2589 switch (type)
2590 {
2591 case AARCH64_OPND_PSTATEFIELD:
2592 for (i = 0; aarch64_pstatefields[i].name; ++i)
2593 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2594 break;
2595 assert (aarch64_pstatefields[i].name);
2596 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2597 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2598 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2599 {
2600 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2601 return 0;
2602 }
2603 break;
2604 default:
2605 break;
2606 }
2607 break;
2608
2609 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2610 /* Get the upper bound for the element index. */
2611 if (opcode->op == OP_FCMLA_ELEM)
2612 /* FCMLA index range depends on the vector size of other operands
2613 and is halfed because complex numbers take two elements. */
2614 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2615 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2616 else
2617 num = 16;
2618 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2619 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2620
2621 /* Index out-of-range. */
2622 if (!value_in_range_p (opnd->reglane.index, 0, num))
2623 {
2624 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2625 return 0;
2626 }
2627 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2628 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2629 number is encoded in "size:M:Rm":
2630 size <Vm>
2631 00 RESERVED
2632 01 0:Rm
2633 10 M:Rm
2634 11 RESERVED */
2635 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2636 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2637 {
2638 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2639 return 0;
2640 }
2641 break;
2642
2643 case AARCH64_OPND_CLASS_MODIFIED_REG:
2644 assert (idx == 1 || idx == 2);
2645 switch (type)
2646 {
2647 case AARCH64_OPND_Rm_EXT:
2648 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2649 && opnd->shifter.kind != AARCH64_MOD_LSL)
2650 {
2651 set_other_error (mismatch_detail, idx,
2652 _("extend operator expected"));
2653 return 0;
2654 }
2655 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2656 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2657 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2658 case. */
2659 if (!aarch64_stack_pointer_p (opnds + 0)
2660 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2661 {
2662 if (!opnd->shifter.operator_present)
2663 {
2664 set_other_error (mismatch_detail, idx,
2665 _("missing extend operator"));
2666 return 0;
2667 }
2668 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2669 {
2670 set_other_error (mismatch_detail, idx,
2671 _("'LSL' operator not allowed"));
2672 return 0;
2673 }
2674 }
2675 assert (opnd->shifter.operator_present /* Default to LSL. */
2676 || opnd->shifter.kind == AARCH64_MOD_LSL);
2677 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2678 {
2679 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2680 return 0;
2681 }
2682 /* In the 64-bit form, the final register operand is written as Wm
2683 for all but the (possibly omitted) UXTX/LSL and SXTX
2684 operators.
2685 N.B. GAS allows X register to be used with any operator as a
2686 programming convenience. */
2687 if (qualifier == AARCH64_OPND_QLF_X
2688 && opnd->shifter.kind != AARCH64_MOD_LSL
2689 && opnd->shifter.kind != AARCH64_MOD_UXTX
2690 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2691 {
2692 set_other_error (mismatch_detail, idx, _("W register expected"));
2693 return 0;
2694 }
2695 break;
2696
2697 case AARCH64_OPND_Rm_SFT:
2698 /* ROR is not available to the shifted register operand in
2699 arithmetic instructions. */
2700 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2701 {
2702 set_other_error (mismatch_detail, idx,
2703 _("shift operator expected"));
2704 return 0;
2705 }
2706 if (opnd->shifter.kind == AARCH64_MOD_ROR
2707 && opcode->iclass != log_shift)
2708 {
2709 set_other_error (mismatch_detail, idx,
2710 _("'ROR' operator not allowed"));
2711 return 0;
2712 }
2713 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2714 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2715 {
2716 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2717 return 0;
2718 }
2719 break;
2720
2721 default:
2722 break;
2723 }
2724 break;
2725
2726 default:
2727 break;
2728 }
2729
2730 return 1;
2731 }
2732
2733 /* Main entrypoint for the operand constraint checking.
2734
2735 Return 1 if operands of *INST meet the constraint applied by the operand
2736 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2737 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2738 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2739 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2740 error kind when it is notified that an instruction does not pass the check).
2741
2742 Un-determined operand qualifiers may get established during the process. */
2743
2744 int
2745 aarch64_match_operands_constraint (aarch64_inst *inst,
2746 aarch64_operand_error *mismatch_detail)
2747 {
2748 int i;
2749
2750 DEBUG_TRACE ("enter");
2751
2752 i = inst->opcode->tied_operand;
2753
2754 if (i > 0)
2755 {
2756 /* Check for tied_operands with specific opcode iclass. */
2757 switch (inst->opcode->iclass)
2758 {
2759 /* For SME LDR and STR instructions #imm must have the same numerical
2760 value for both operands.
2761 */
2762 case sme_ldr:
2763 case sme_str:
2764 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array);
2765 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
2766 if (inst->operands[0].za_tile_vector.index.imm
2767 != inst->operands[1].addr.offset.imm)
2768 {
2769 if (mismatch_detail)
2770 {
2771 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
2772 mismatch_detail->index = i;
2773 }
2774 return 0;
2775 }
2776 break;
2777
2778 default:
2779 /* Check for cases where a source register needs to be the same as the
2780 destination register. Do this before matching qualifiers since if
2781 an instruction has both invalid tying and invalid qualifiers,
2782 the error about qualifiers would suggest several alternative
2783 instructions that also have invalid tying. */
2784 if (inst->operands[0].reg.regno
2785 != inst->operands[i].reg.regno)
2786 {
2787 if (mismatch_detail)
2788 {
2789 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2790 mismatch_detail->index = i;
2791 mismatch_detail->error = NULL;
2792 }
2793 return 0;
2794 }
2795 break;
2796 }
2797 }
2798
2799 /* Match operands' qualifier.
2800 *INST has already had qualifier establish for some, if not all, of
2801 its operands; we need to find out whether these established
2802 qualifiers match one of the qualifier sequence in
2803 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2804 with the corresponding qualifier in such a sequence.
2805 Only basic operand constraint checking is done here; the more thorough
2806 constraint checking will carried out by operand_general_constraint_met_p,
2807 which has be to called after this in order to get all of the operands'
2808 qualifiers established. */
2809 if (match_operands_qualifier (inst, true /* update_p */) == 0)
2810 {
2811 DEBUG_TRACE ("FAIL on operand qualifier matching");
2812 if (mismatch_detail)
2813 {
2814 /* Return an error type to indicate that it is the qualifier
2815 matching failure; we don't care about which operand as there
2816 are enough information in the opcode table to reproduce it. */
2817 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2818 mismatch_detail->index = -1;
2819 mismatch_detail->error = NULL;
2820 }
2821 return 0;
2822 }
2823
2824 /* Match operands' constraint. */
2825 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2826 {
2827 enum aarch64_opnd type = inst->opcode->operands[i];
2828 if (type == AARCH64_OPND_NIL)
2829 break;
2830 if (inst->operands[i].skip)
2831 {
2832 DEBUG_TRACE ("skip the incomplete operand %d", i);
2833 continue;
2834 }
2835 if (operand_general_constraint_met_p (inst->operands, i, type,
2836 inst->opcode, mismatch_detail) == 0)
2837 {
2838 DEBUG_TRACE ("FAIL on operand %d", i);
2839 return 0;
2840 }
2841 }
2842
2843 DEBUG_TRACE ("PASS");
2844
2845 return 1;
2846 }
2847
2848 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2849 Also updates the TYPE of each INST->OPERANDS with the corresponding
2850 value of OPCODE->OPERANDS.
2851
2852 Note that some operand qualifiers may need to be manually cleared by
2853 the caller before it further calls the aarch64_opcode_encode; by
2854 doing this, it helps the qualifier matching facilities work
2855 properly. */
2856
2857 const aarch64_opcode*
2858 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2859 {
2860 int i;
2861 const aarch64_opcode *old = inst->opcode;
2862
2863 inst->opcode = opcode;
2864
2865 /* Update the operand types. */
2866 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2867 {
2868 inst->operands[i].type = opcode->operands[i];
2869 if (opcode->operands[i] == AARCH64_OPND_NIL)
2870 break;
2871 }
2872
2873 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2874
2875 return old;
2876 }
2877
2878 int
2879 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2880 {
2881 int i;
2882 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2883 if (operands[i] == operand)
2884 return i;
2885 else if (operands[i] == AARCH64_OPND_NIL)
2886 break;
2887 return -1;
2888 }
2889
2890 /* R0...R30, followed by FOR31. */
2892 #define BANK(R, FOR31) \
2893 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2894 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2895 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2896 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2897 /* [0][0] 32-bit integer regs with sp Wn
2898 [0][1] 64-bit integer regs with sp Xn sf=1
2899 [1][0] 32-bit integer regs with #0 Wn
2900 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2901 static const char *int_reg[2][2][32] = {
2902 #define R32(X) "w" #X
2903 #define R64(X) "x" #X
2904 { BANK (R32, "wsp"), BANK (R64, "sp") },
2905 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2906 #undef R64
2907 #undef R32
2908 };
2909
2910 /* Names of the SVE vector registers, first with .S suffixes,
2911 then with .D suffixes. */
2912
2913 static const char *sve_reg[2][32] = {
2914 #define ZS(X) "z" #X ".s"
2915 #define ZD(X) "z" #X ".d"
2916 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2917 #undef ZD
2918 #undef ZS
2919 };
2920 #undef BANK
2921
2922 /* Return the integer register name.
2923 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2924
2925 static inline const char *
2926 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2927 {
2928 const int has_zr = sp_reg_p ? 0 : 1;
2929 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2930 return int_reg[has_zr][is_64][regno];
2931 }
2932
2933 /* Like get_int_reg_name, but IS_64 is always 1. */
2934
2935 static inline const char *
2936 get_64bit_int_reg_name (int regno, int sp_reg_p)
2937 {
2938 const int has_zr = sp_reg_p ? 0 : 1;
2939 return int_reg[has_zr][1][regno];
2940 }
2941
2942 /* Get the name of the integer offset register in OPND, using the shift type
2943 to decide whether it's a word or doubleword. */
2944
2945 static inline const char *
2946 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2947 {
2948 switch (opnd->shifter.kind)
2949 {
2950 case AARCH64_MOD_UXTW:
2951 case AARCH64_MOD_SXTW:
2952 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2953
2954 case AARCH64_MOD_LSL:
2955 case AARCH64_MOD_SXTX:
2956 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2957
2958 default:
2959 abort ();
2960 }
2961 }
2962
2963 /* Get the name of the SVE vector offset register in OPND, using the operand
2964 qualifier to decide whether the suffix should be .S or .D. */
2965
2966 static inline const char *
2967 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2968 {
2969 assert (qualifier == AARCH64_OPND_QLF_S_S
2970 || qualifier == AARCH64_OPND_QLF_S_D);
2971 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2972 }
2973
2974 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2975
2976 typedef union
2977 {
2978 uint64_t i;
2979 double d;
2980 } double_conv_t;
2981
2982 typedef union
2983 {
2984 uint32_t i;
2985 float f;
2986 } single_conv_t;
2987
2988 typedef union
2989 {
2990 uint32_t i;
2991 float f;
2992 } half_conv_t;
2993
2994 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2995 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2996 (depending on the type of the instruction). IMM8 will be expanded to a
2997 single-precision floating-point value (SIZE == 4) or a double-precision
2998 floating-point value (SIZE == 8). A half-precision floating-point value
2999 (SIZE == 2) is expanded to a single-precision floating-point value. The
3000 expanded value is returned. */
3001
3002 static uint64_t
3003 expand_fp_imm (int size, uint32_t imm8)
3004 {
3005 uint64_t imm = 0;
3006 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3007
3008 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3009 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3010 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3011 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3012 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3013 if (size == 8)
3014 {
3015 imm = (imm8_7 << (63-32)) /* imm8<7> */
3016 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3017 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3018 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3019 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3020 imm <<= 32;
3021 }
3022 else if (size == 4 || size == 2)
3023 {
3024 imm = (imm8_7 << 31) /* imm8<7> */
3025 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3026 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3027 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3028 }
3029 else
3030 {
3031 /* An unsupported size. */
3032 assert (0);
3033 }
3034
3035 return imm;
3036 }
3037
3038 /* Return a string based on FMT with the register style applied. */
3039
3040 static const char *
3041 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3042 {
3043 const char *txt;
3044 va_list ap;
3045
3046 va_start (ap, fmt);
3047 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3048 va_end (ap);
3049
3050 return txt;
3051 }
3052
3053 /* Return a string based on FMT with the immediate style applied. */
3054
3055 static const char *
3056 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3057 {
3058 const char *txt;
3059 va_list ap;
3060
3061 va_start (ap, fmt);
3062 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3063 va_end (ap);
3064
3065 return txt;
3066 }
3067
3068 /* Return a string based on FMT with the sub-mnemonic style applied. */
3069
3070 static const char *
3071 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3072 {
3073 const char *txt;
3074 va_list ap;
3075
3076 va_start (ap, fmt);
3077 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3078 va_end (ap);
3079
3080 return txt;
3081 }
3082
3083 /* Return a string based on FMT with the address style applied. */
3084
3085 static const char *
3086 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3087 {
3088 const char *txt;
3089 va_list ap;
3090
3091 va_start (ap, fmt);
3092 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3093 va_end (ap);
3094
3095 return txt;
3096 }
3097
3098 /* Produce the string representation of the register list operand *OPND
3099 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3100 the register name that comes before the register number, such as "v". */
3101 static void
3102 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3103 const char *prefix, struct aarch64_styler *styler)
3104 {
3105 const int num_regs = opnd->reglist.num_regs;
3106 const int first_reg = opnd->reglist.first_regno;
3107 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3108 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3109 char tb[16]; /* Temporary buffer. */
3110
3111 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3112 assert (num_regs >= 1 && num_regs <= 4);
3113
3114 /* Prepare the index if any. */
3115 if (opnd->reglist.has_index)
3116 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3117 snprintf (tb, sizeof (tb), "[%s]",
3118 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3119 else
3120 tb[0] = '\0';
3121
3122 /* The hyphenated form is preferred for disassembly if there are
3123 more than two registers in the list, and the register numbers
3124 are monotonically increasing in increments of one. */
3125 if (num_regs > 2 && last_reg > first_reg)
3126 snprintf (buf, size, "{%s-%s}%s",
3127 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3128 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3129 else
3130 {
3131 const int reg0 = first_reg;
3132 const int reg1 = (first_reg + 1) & 0x1f;
3133 const int reg2 = (first_reg + 2) & 0x1f;
3134 const int reg3 = (first_reg + 3) & 0x1f;
3135
3136 switch (num_regs)
3137 {
3138 case 1:
3139 snprintf (buf, size, "{%s}%s",
3140 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3141 tb);
3142 break;
3143 case 2:
3144 snprintf (buf, size, "{%s, %s}%s",
3145 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3146 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3147 tb);
3148 break;
3149 case 3:
3150 snprintf (buf, size, "{%s, %s, %s}%s",
3151 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3152 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3153 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3154 tb);
3155 break;
3156 case 4:
3157 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3158 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3159 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3160 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3161 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3162 tb);
3163 break;
3164 }
3165 }
3166 }
3167
3168 /* Print the register+immediate address in OPND to BUF, which has SIZE
3169 characters. BASE is the name of the base register. */
3170
3171 static void
3172 print_immediate_offset_address (char *buf, size_t size,
3173 const aarch64_opnd_info *opnd,
3174 const char *base,
3175 struct aarch64_styler *styler)
3176 {
3177 if (opnd->addr.writeback)
3178 {
3179 if (opnd->addr.preind)
3180 {
3181 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3182 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3183 else
3184 snprintf (buf, size, "[%s, %s]!",
3185 style_reg (styler, base),
3186 style_imm (styler, "#%d", opnd->addr.offset.imm));
3187 }
3188 else
3189 snprintf (buf, size, "[%s], %s",
3190 style_reg (styler, base),
3191 style_imm (styler, "#%d", opnd->addr.offset.imm));
3192 }
3193 else
3194 {
3195 if (opnd->shifter.operator_present)
3196 {
3197 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3198 snprintf (buf, size, "[%s, %s, %s]",
3199 style_reg (styler, base),
3200 style_imm (styler, "#%d", opnd->addr.offset.imm),
3201 style_sub_mnem (styler, "mul vl"));
3202 }
3203 else if (opnd->addr.offset.imm)
3204 snprintf (buf, size, "[%s, %s]",
3205 style_reg (styler, base),
3206 style_imm (styler, "#%d", opnd->addr.offset.imm));
3207 else
3208 snprintf (buf, size, "[%s]", style_reg (styler, base));
3209 }
3210 }
3211
3212 /* Produce the string representation of the register offset address operand
3213 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3214 the names of the base and offset registers. */
3215 static void
3216 print_register_offset_address (char *buf, size_t size,
3217 const aarch64_opnd_info *opnd,
3218 const char *base, const char *offset,
3219 struct aarch64_styler *styler)
3220 {
3221 char tb[32]; /* Temporary buffer. */
3222 bool print_extend_p = true;
3223 bool print_amount_p = true;
3224 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3225
3226 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3227 || !opnd->shifter.amount_present))
3228 {
3229 /* Not print the shift/extend amount when the amount is zero and
3230 when it is not the special case of 8-bit load/store instruction. */
3231 print_amount_p = false;
3232 /* Likewise, no need to print the shift operator LSL in such a
3233 situation. */
3234 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3235 print_extend_p = false;
3236 }
3237
3238 /* Prepare for the extend/shift. */
3239 if (print_extend_p)
3240 {
3241 if (print_amount_p)
3242 snprintf (tb, sizeof (tb), ", %s %s",
3243 style_sub_mnem (styler, shift_name),
3244 style_imm (styler, "#%" PRIi64,
3245 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3246 (opnd->shifter.amount % 100)));
3247 else
3248 snprintf (tb, sizeof (tb), ", %s",
3249 style_sub_mnem (styler, shift_name));
3250 }
3251 else
3252 tb[0] = '\0';
3253
3254 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3255 style_reg (styler, offset), tb);
3256 }
3257
3258 /* Print ZA tiles from imm8 in ZERO instruction.
3259
3260 The preferred disassembly of this instruction uses the shortest list of tile
3261 names that represent the encoded immediate mask.
3262
3263 For example:
3264 * An all-ones immediate is disassembled as {ZA}.
3265 * An all-zeros immediate is disassembled as an empty list { }.
3266 */
3267 static void
3268 print_sme_za_list (char *buf, size_t size, int mask,
3269 struct aarch64_styler *styler)
3270 {
3271 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3272 "za1.s", "za2.s", "za3.s", "za0.d",
3273 "za1.d", "za2.d", "za3.d", "za4.d",
3274 "za5.d", "za6.d", "za7.d", " " };
3275 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3276 0x22, 0x44, 0x88, 0x01,
3277 0x02, 0x04, 0x08, 0x10,
3278 0x20, 0x40, 0x80, 0x00 };
3279 int i, k;
3280 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3281
3282 k = snprintf (buf, size, "{");
3283 for (i = 0; i < ZAN_SIZE; i++)
3284 {
3285 if ((mask & zan_v[i]) == zan_v[i])
3286 {
3287 mask &= ~zan_v[i];
3288 if (k > 1)
3289 k += snprintf (buf + k, size - k, ", ");
3290
3291 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3292 }
3293 if (mask == 0)
3294 break;
3295 }
3296 snprintf (buf + k, size - k, "}");
3297 }
3298
3299 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3300 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3301 PC, PCREL_P and ADDRESS are used to pass in and return information about
3302 the PC-relative address calculation, where the PC value is passed in
3303 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3304 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3305 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3306
3307 The function serves both the disassembler and the assembler diagnostics
3308 issuer, which is the reason why it lives in this file. */
3309
3310 void
3311 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3312 const aarch64_opcode *opcode,
3313 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3314 bfd_vma *address, char** notes,
3315 char *comment, size_t comment_size,
3316 aarch64_feature_set features,
3317 struct aarch64_styler *styler)
3318 {
3319 unsigned int i, num_conds;
3320 const char *name = NULL;
3321 const aarch64_opnd_info *opnd = opnds + idx;
3322 enum aarch64_modifier_kind kind;
3323 uint64_t addr, enum_value;
3324
3325 if (comment != NULL)
3326 {
3327 assert (comment_size > 0);
3328 comment[0] = '\0';
3329 }
3330 else
3331 assert (comment_size == 0);
3332
3333 buf[0] = '\0';
3334 if (pcrel_p)
3335 *pcrel_p = 0;
3336
3337 switch (opnd->type)
3338 {
3339 case AARCH64_OPND_Rd:
3340 case AARCH64_OPND_Rn:
3341 case AARCH64_OPND_Rm:
3342 case AARCH64_OPND_Rt:
3343 case AARCH64_OPND_Rt2:
3344 case AARCH64_OPND_Rs:
3345 case AARCH64_OPND_Ra:
3346 case AARCH64_OPND_Rt_LS64:
3347 case AARCH64_OPND_Rt_SYS:
3348 case AARCH64_OPND_PAIRREG:
3349 case AARCH64_OPND_SVE_Rm:
3350 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3351 the <ic_op>, therefore we use opnd->present to override the
3352 generic optional-ness information. */
3353 if (opnd->type == AARCH64_OPND_Rt_SYS)
3354 {
3355 if (!opnd->present)
3356 break;
3357 }
3358 /* Omit the operand, e.g. RET. */
3359 else if (optional_operand_p (opcode, idx)
3360 && (opnd->reg.regno
3361 == get_optional_operand_default_value (opcode)))
3362 break;
3363 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3364 || opnd->qualifier == AARCH64_OPND_QLF_X);
3365 snprintf (buf, size, "%s",
3366 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3367 opnd->qualifier, 0)));
3368 break;
3369
3370 case AARCH64_OPND_Rd_SP:
3371 case AARCH64_OPND_Rn_SP:
3372 case AARCH64_OPND_Rt_SP:
3373 case AARCH64_OPND_SVE_Rn_SP:
3374 case AARCH64_OPND_Rm_SP:
3375 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3376 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3377 || opnd->qualifier == AARCH64_OPND_QLF_X
3378 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3379 snprintf (buf, size, "%s",
3380 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3381 opnd->qualifier, 1)));
3382 break;
3383
3384 case AARCH64_OPND_Rm_EXT:
3385 kind = opnd->shifter.kind;
3386 assert (idx == 1 || idx == 2);
3387 if ((aarch64_stack_pointer_p (opnds)
3388 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3389 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3390 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3391 && kind == AARCH64_MOD_UXTW)
3392 || (opnd->qualifier == AARCH64_OPND_QLF_X
3393 && kind == AARCH64_MOD_UXTX)))
3394 {
3395 /* 'LSL' is the preferred form in this case. */
3396 kind = AARCH64_MOD_LSL;
3397 if (opnd->shifter.amount == 0)
3398 {
3399 /* Shifter omitted. */
3400 snprintf (buf, size, "%s",
3401 style_reg (styler,
3402 get_int_reg_name (opnd->reg.regno,
3403 opnd->qualifier, 0)));
3404 break;
3405 }
3406 }
3407 if (opnd->shifter.amount)
3408 snprintf (buf, size, "%s, %s %s",
3409 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3410 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3411 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3412 else
3413 snprintf (buf, size, "%s, %s",
3414 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3415 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3416 break;
3417
3418 case AARCH64_OPND_Rm_SFT:
3419 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3420 || opnd->qualifier == AARCH64_OPND_QLF_X);
3421 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3422 snprintf (buf, size, "%s",
3423 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3424 opnd->qualifier, 0)));
3425 else
3426 snprintf (buf, size, "%s, %s %s",
3427 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3428 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3429 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3430 break;
3431
3432 case AARCH64_OPND_Fd:
3433 case AARCH64_OPND_Fn:
3434 case AARCH64_OPND_Fm:
3435 case AARCH64_OPND_Fa:
3436 case AARCH64_OPND_Ft:
3437 case AARCH64_OPND_Ft2:
3438 case AARCH64_OPND_Sd:
3439 case AARCH64_OPND_Sn:
3440 case AARCH64_OPND_Sm:
3441 case AARCH64_OPND_SVE_VZn:
3442 case AARCH64_OPND_SVE_Vd:
3443 case AARCH64_OPND_SVE_Vm:
3444 case AARCH64_OPND_SVE_Vn:
3445 snprintf (buf, size, "%s",
3446 style_reg (styler, "%s%d",
3447 aarch64_get_qualifier_name (opnd->qualifier),
3448 opnd->reg.regno));
3449 break;
3450
3451 case AARCH64_OPND_Va:
3452 case AARCH64_OPND_Vd:
3453 case AARCH64_OPND_Vn:
3454 case AARCH64_OPND_Vm:
3455 snprintf (buf, size, "%s",
3456 style_reg (styler, "v%d.%s", opnd->reg.regno,
3457 aarch64_get_qualifier_name (opnd->qualifier)));
3458 break;
3459
3460 case AARCH64_OPND_Ed:
3461 case AARCH64_OPND_En:
3462 case AARCH64_OPND_Em:
3463 case AARCH64_OPND_Em16:
3464 case AARCH64_OPND_SM3_IMM2:
3465 snprintf (buf, size, "%s[%s]",
3466 style_reg (styler, "v%d.%s", opnd->reglane.regno,
3467 aarch64_get_qualifier_name (opnd->qualifier)),
3468 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3469 break;
3470
3471 case AARCH64_OPND_VdD1:
3472 case AARCH64_OPND_VnD1:
3473 snprintf (buf, size, "%s[%s]",
3474 style_reg (styler, "v%d.d", opnd->reg.regno),
3475 style_imm (styler, "1"));
3476 break;
3477
3478 case AARCH64_OPND_LVn:
3479 case AARCH64_OPND_LVt:
3480 case AARCH64_OPND_LVt_AL:
3481 case AARCH64_OPND_LEt:
3482 print_register_list (buf, size, opnd, "v", styler);
3483 break;
3484
3485 case AARCH64_OPND_SVE_Pd:
3486 case AARCH64_OPND_SVE_Pg3:
3487 case AARCH64_OPND_SVE_Pg4_5:
3488 case AARCH64_OPND_SVE_Pg4_10:
3489 case AARCH64_OPND_SVE_Pg4_16:
3490 case AARCH64_OPND_SVE_Pm:
3491 case AARCH64_OPND_SVE_Pn:
3492 case AARCH64_OPND_SVE_Pt:
3493 case AARCH64_OPND_SME_Pm:
3494 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3495 snprintf (buf, size, "%s",
3496 style_reg (styler, "p%d", opnd->reg.regno));
3497 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3498 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3499 snprintf (buf, size, "%s",
3500 style_reg (styler, "p%d/%s", opnd->reg.regno,
3501 aarch64_get_qualifier_name (opnd->qualifier)));
3502 else
3503 snprintf (buf, size, "%s",
3504 style_reg (styler, "p%d.%s", opnd->reg.regno,
3505 aarch64_get_qualifier_name (opnd->qualifier)));
3506 break;
3507
3508 case AARCH64_OPND_SVE_Za_5:
3509 case AARCH64_OPND_SVE_Za_16:
3510 case AARCH64_OPND_SVE_Zd:
3511 case AARCH64_OPND_SVE_Zm_5:
3512 case AARCH64_OPND_SVE_Zm_16:
3513 case AARCH64_OPND_SVE_Zn:
3514 case AARCH64_OPND_SVE_Zt:
3515 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3516 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
3517 else
3518 snprintf (buf, size, "%s",
3519 style_reg (styler, "z%d.%s", opnd->reg.regno,
3520 aarch64_get_qualifier_name (opnd->qualifier)));
3521 break;
3522
3523 case AARCH64_OPND_SVE_ZnxN:
3524 case AARCH64_OPND_SVE_ZtxN:
3525 print_register_list (buf, size, opnd, "z", styler);
3526 break;
3527
3528 case AARCH64_OPND_SVE_Zm3_INDEX:
3529 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3530 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3531 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3532 case AARCH64_OPND_SVE_Zm4_INDEX:
3533 case AARCH64_OPND_SVE_Zn_INDEX:
3534 snprintf (buf, size, "%s[%s]",
3535 style_reg (styler, "z%d.%s", opnd->reglane.regno,
3536 aarch64_get_qualifier_name (opnd->qualifier)),
3537 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3538 break;
3539
3540 case AARCH64_OPND_SME_ZAda_2b:
3541 case AARCH64_OPND_SME_ZAda_3b:
3542 snprintf (buf, size, "%s",
3543 style_reg (styler, "za%d.%s", opnd->reg.regno,
3544 aarch64_get_qualifier_name (opnd->qualifier)));
3545 break;
3546
3547 case AARCH64_OPND_SME_ZA_HV_idx_src:
3548 case AARCH64_OPND_SME_ZA_HV_idx_dest:
3549 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3550 snprintf (buf, size, "%s%s[%s, %s]%s",
3551 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3552 style_reg (styler, "za%d%c.%s",
3553 opnd->za_tile_vector.regno,
3554 opnd->za_tile_vector.v == 1 ? 'v' : 'h',
3555 aarch64_get_qualifier_name (opnd->qualifier)),
3556 style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3557 style_imm (styler, "%d", opnd->za_tile_vector.index.imm),
3558 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3559 break;
3560
3561 case AARCH64_OPND_SME_list_of_64bit_tiles:
3562 print_sme_za_list (buf, size, opnd->reg.regno, styler);
3563 break;
3564
3565 case AARCH64_OPND_SME_ZA_array:
3566 snprintf (buf, size, "%s[%s, %s]",
3567 style_reg (styler, "za"),
3568 style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3569 style_imm (styler, "%d", opnd->za_tile_vector.index.imm));
3570 break;
3571
3572 case AARCH64_OPND_SME_SM_ZA:
3573 snprintf (buf, size, "%s",
3574 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
3575 break;
3576
3577 case AARCH64_OPND_SME_PnT_Wm_imm:
3578 snprintf (buf, size, "%s[%s, %s]",
3579 style_reg (styler, "p%d.%s", opnd->za_tile_vector.regno,
3580 aarch64_get_qualifier_name (opnd->qualifier)),
3581 style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3582 style_imm (styler, "%d", opnd->za_tile_vector.index.imm));
3583 break;
3584
3585 case AARCH64_OPND_CRn:
3586 case AARCH64_OPND_CRm:
3587 snprintf (buf, size, "%s",
3588 style_reg (styler, "C%" PRIi64, opnd->imm.value));
3589 break;
3590
3591 case AARCH64_OPND_IDX:
3592 case AARCH64_OPND_MASK:
3593 case AARCH64_OPND_IMM:
3594 case AARCH64_OPND_IMM_2:
3595 case AARCH64_OPND_WIDTH:
3596 case AARCH64_OPND_UIMM3_OP1:
3597 case AARCH64_OPND_UIMM3_OP2:
3598 case AARCH64_OPND_BIT_NUM:
3599 case AARCH64_OPND_IMM_VLSL:
3600 case AARCH64_OPND_IMM_VLSR:
3601 case AARCH64_OPND_SHLL_IMM:
3602 case AARCH64_OPND_IMM0:
3603 case AARCH64_OPND_IMMR:
3604 case AARCH64_OPND_IMMS:
3605 case AARCH64_OPND_UNDEFINED:
3606 case AARCH64_OPND_FBITS:
3607 case AARCH64_OPND_TME_UIMM16:
3608 case AARCH64_OPND_SIMM5:
3609 case AARCH64_OPND_SVE_SHLIMM_PRED:
3610 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3611 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3612 case AARCH64_OPND_SVE_SHRIMM_PRED:
3613 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3614 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3615 case AARCH64_OPND_SVE_SIMM5:
3616 case AARCH64_OPND_SVE_SIMM5B:
3617 case AARCH64_OPND_SVE_SIMM6:
3618 case AARCH64_OPND_SVE_SIMM8:
3619 case AARCH64_OPND_SVE_UIMM3:
3620 case AARCH64_OPND_SVE_UIMM7:
3621 case AARCH64_OPND_SVE_UIMM8:
3622 case AARCH64_OPND_SVE_UIMM8_53:
3623 case AARCH64_OPND_IMM_ROT1:
3624 case AARCH64_OPND_IMM_ROT2:
3625 case AARCH64_OPND_IMM_ROT3:
3626 case AARCH64_OPND_SVE_IMM_ROT1:
3627 case AARCH64_OPND_SVE_IMM_ROT2:
3628 case AARCH64_OPND_SVE_IMM_ROT3:
3629 case AARCH64_OPND_CSSC_SIMM8:
3630 case AARCH64_OPND_CSSC_UIMM8:
3631 snprintf (buf, size, "%s",
3632 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3633 break;
3634
3635 case AARCH64_OPND_SVE_I1_HALF_ONE:
3636 case AARCH64_OPND_SVE_I1_HALF_TWO:
3637 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3638 {
3639 single_conv_t c;
3640 c.i = opnd->imm.value;
3641 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
3642 break;
3643 }
3644
3645 case AARCH64_OPND_SVE_PATTERN:
3646 if (optional_operand_p (opcode, idx)
3647 && opnd->imm.value == get_optional_operand_default_value (opcode))
3648 break;
3649 enum_value = opnd->imm.value;
3650 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3651 if (aarch64_sve_pattern_array[enum_value])
3652 snprintf (buf, size, "%s",
3653 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
3654 else
3655 snprintf (buf, size, "%s",
3656 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3657 break;
3658
3659 case AARCH64_OPND_SVE_PATTERN_SCALED:
3660 if (optional_operand_p (opcode, idx)
3661 && !opnd->shifter.operator_present
3662 && opnd->imm.value == get_optional_operand_default_value (opcode))
3663 break;
3664 enum_value = opnd->imm.value;
3665 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3666 if (aarch64_sve_pattern_array[opnd->imm.value])
3667 snprintf (buf, size, "%s",
3668 style_reg (styler,
3669 aarch64_sve_pattern_array[opnd->imm.value]));
3670 else
3671 snprintf (buf, size, "%s",
3672 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3673 if (opnd->shifter.operator_present)
3674 {
3675 size_t len = strlen (buf);
3676 const char *shift_name
3677 = aarch64_operand_modifiers[opnd->shifter.kind].name;
3678 snprintf (buf + len, size - len, ", %s %s",
3679 style_sub_mnem (styler, shift_name),
3680 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3681 }
3682 break;
3683
3684 case AARCH64_OPND_SVE_PRFOP:
3685 enum_value = opnd->imm.value;
3686 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3687 if (aarch64_sve_prfop_array[enum_value])
3688 snprintf (buf, size, "%s",
3689 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
3690 else
3691 snprintf (buf, size, "%s",
3692 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3693 break;
3694
3695 case AARCH64_OPND_IMM_MOV:
3696 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3697 {
3698 case 4: /* e.g. MOV Wd, #<imm32>. */
3699 {
3700 int imm32 = opnd->imm.value;
3701 snprintf (buf, size, "%s",
3702 style_imm (styler, "#0x%-20x", imm32));
3703 snprintf (comment, comment_size, "#%d", imm32);
3704 }
3705 break;
3706 case 8: /* e.g. MOV Xd, #<imm64>. */
3707 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
3708 opnd->imm.value));
3709 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
3710 break;
3711 default:
3712 snprintf (buf, size, "<invalid>");
3713 break;
3714 }
3715 break;
3716
3717 case AARCH64_OPND_FPIMM0:
3718 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
3719 break;
3720
3721 case AARCH64_OPND_LIMM:
3722 case AARCH64_OPND_AIMM:
3723 case AARCH64_OPND_HALF:
3724 case AARCH64_OPND_SVE_INV_LIMM:
3725 case AARCH64_OPND_SVE_LIMM:
3726 case AARCH64_OPND_SVE_LIMM_MOV:
3727 if (opnd->shifter.amount)
3728 snprintf (buf, size, "%s, %s %s",
3729 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3730 style_sub_mnem (styler, "lsl"),
3731 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3732 else
3733 snprintf (buf, size, "%s",
3734 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3735 break;
3736
3737 case AARCH64_OPND_SIMD_IMM:
3738 case AARCH64_OPND_SIMD_IMM_SFT:
3739 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3740 || opnd->shifter.kind == AARCH64_MOD_NONE)
3741 snprintf (buf, size, "%s",
3742 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3743 else
3744 snprintf (buf, size, "%s, %s %s",
3745 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3746 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3747 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3748 break;
3749
3750 case AARCH64_OPND_SVE_AIMM:
3751 case AARCH64_OPND_SVE_ASIMM:
3752 if (opnd->shifter.amount)
3753 snprintf (buf, size, "%s, %s %s",
3754 style_imm (styler, "#%" PRIi64, opnd->imm.value),
3755 style_sub_mnem (styler, "lsl"),
3756 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3757 else
3758 snprintf (buf, size, "%s",
3759 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3760 break;
3761
3762 case AARCH64_OPND_FPIMM:
3763 case AARCH64_OPND_SIMD_FPIMM:
3764 case AARCH64_OPND_SVE_FPIMM8:
3765 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3766 {
3767 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3768 {
3769 half_conv_t c;
3770 c.i = expand_fp_imm (2, opnd->imm.value);
3771 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3772 }
3773 break;
3774 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3775 {
3776 single_conv_t c;
3777 c.i = expand_fp_imm (4, opnd->imm.value);
3778 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3779 }
3780 break;
3781 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3782 {
3783 double_conv_t c;
3784 c.i = expand_fp_imm (8, opnd->imm.value);
3785 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
3786 }
3787 break;
3788 default:
3789 snprintf (buf, size, "<invalid>");
3790 break;
3791 }
3792 break;
3793
3794 case AARCH64_OPND_CCMP_IMM:
3795 case AARCH64_OPND_NZCV:
3796 case AARCH64_OPND_EXCEPTION:
3797 case AARCH64_OPND_UIMM4:
3798 case AARCH64_OPND_UIMM4_ADDG:
3799 case AARCH64_OPND_UIMM7:
3800 case AARCH64_OPND_UIMM10:
3801 if (optional_operand_p (opcode, idx)
3802 && (opnd->imm.value ==
3803 (int64_t) get_optional_operand_default_value (opcode)))
3804 /* Omit the operand, e.g. DCPS1. */
3805 break;
3806 snprintf (buf, size, "%s",
3807 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
3808 break;
3809
3810 case AARCH64_OPND_COND:
3811 case AARCH64_OPND_COND1:
3812 snprintf (buf, size, "%s",
3813 style_sub_mnem (styler, opnd->cond->names[0]));
3814 num_conds = ARRAY_SIZE (opnd->cond->names);
3815 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3816 {
3817 size_t len = comment != NULL ? strlen (comment) : 0;
3818 if (i == 1)
3819 snprintf (comment + len, comment_size - len, "%s = %s",
3820 opnd->cond->names[0], opnd->cond->names[i]);
3821 else
3822 snprintf (comment + len, comment_size - len, ", %s",
3823 opnd->cond->names[i]);
3824 }
3825 break;
3826
3827 case AARCH64_OPND_ADDR_ADRP:
3828 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3829 + opnd->imm.value;
3830 if (pcrel_p)
3831 *pcrel_p = 1;
3832 if (address)
3833 *address = addr;
3834 /* This is not necessary during the disassembling, as print_address_func
3835 in the disassemble_info will take care of the printing. But some
3836 other callers may be still interested in getting the string in *STR,
3837 so here we do snprintf regardless. */
3838 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
3839 break;
3840
3841 case AARCH64_OPND_ADDR_PCREL14:
3842 case AARCH64_OPND_ADDR_PCREL19:
3843 case AARCH64_OPND_ADDR_PCREL21:
3844 case AARCH64_OPND_ADDR_PCREL26:
3845 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3846 if (pcrel_p)
3847 *pcrel_p = 1;
3848 if (address)
3849 *address = addr;
3850 /* This is not necessary during the disassembling, as print_address_func
3851 in the disassemble_info will take care of the printing. But some
3852 other callers may be still interested in getting the string in *STR,
3853 so here we do snprintf regardless. */
3854 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
3855 break;
3856
3857 case AARCH64_OPND_ADDR_SIMPLE:
3858 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3859 case AARCH64_OPND_SIMD_ADDR_POST:
3860 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3861 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3862 {
3863 if (opnd->addr.offset.is_reg)
3864 snprintf (buf, size, "[%s], %s",
3865 style_reg (styler, name),
3866 style_reg (styler, "x%d", opnd->addr.offset.regno));
3867 else
3868 snprintf (buf, size, "[%s], %s",
3869 style_reg (styler, name),
3870 style_imm (styler, "#%d", opnd->addr.offset.imm));
3871 }
3872 else
3873 snprintf (buf, size, "[%s]", style_reg (styler, name));
3874 break;
3875
3876 case AARCH64_OPND_ADDR_REGOFF:
3877 case AARCH64_OPND_SVE_ADDR_R:
3878 case AARCH64_OPND_SVE_ADDR_RR:
3879 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3880 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3881 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3882 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
3883 case AARCH64_OPND_SVE_ADDR_RX:
3884 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3885 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3886 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3887 print_register_offset_address
3888 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3889 get_offset_int_reg_name (opnd), styler);
3890 break;
3891
3892 case AARCH64_OPND_SVE_ADDR_ZX:
3893 print_register_offset_address
3894 (buf, size, opnd,
3895 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3896 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
3897 break;
3898
3899 case AARCH64_OPND_SVE_ADDR_RZ:
3900 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3901 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3902 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3903 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3904 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3905 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3906 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3907 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3908 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3909 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3910 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3911 print_register_offset_address
3912 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3913 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
3914 styler);
3915 break;
3916
3917 case AARCH64_OPND_ADDR_SIMM7:
3918 case AARCH64_OPND_ADDR_SIMM9:
3919 case AARCH64_OPND_ADDR_SIMM9_2:
3920 case AARCH64_OPND_ADDR_SIMM10:
3921 case AARCH64_OPND_ADDR_SIMM11:
3922 case AARCH64_OPND_ADDR_SIMM13:
3923 case AARCH64_OPND_ADDR_OFFSET:
3924 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
3925 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3926 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3927 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3928 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3929 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3930 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3931 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3932 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3933 case AARCH64_OPND_SVE_ADDR_RI_U6:
3934 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3935 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3936 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3937 print_immediate_offset_address
3938 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3939 styler);
3940 break;
3941
3942 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3943 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3944 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3945 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3946 print_immediate_offset_address
3947 (buf, size, opnd,
3948 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3949 styler);
3950 break;
3951
3952 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3953 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3954 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3955 print_register_offset_address
3956 (buf, size, opnd,
3957 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3958 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
3959 styler);
3960 break;
3961
3962 case AARCH64_OPND_ADDR_UIMM12:
3963 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3964 if (opnd->addr.offset.imm)
3965 snprintf (buf, size, "[%s, %s]",
3966 style_reg (styler, name),
3967 style_imm (styler, "#%d", opnd->addr.offset.imm));
3968 else
3969 snprintf (buf, size, "[%s]", style_reg (styler, name));
3970 break;
3971
3972 case AARCH64_OPND_SYSREG:
3973 for (i = 0; aarch64_sys_regs[i].name; ++i)
3974 {
3975 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
3976
3977 bool exact_match
3978 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
3979 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
3980 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
3981
3982 /* Try and find an exact match, But if that fails, return the first
3983 partial match that was found. */
3984 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3985 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
3986 && (name == NULL || exact_match))
3987 {
3988 name = aarch64_sys_regs[i].name;
3989 if (exact_match)
3990 {
3991 if (notes)
3992 *notes = NULL;
3993 break;
3994 }
3995
3996 /* If we didn't match exactly, that means the presense of a flag
3997 indicates what we didn't want for this instruction. e.g. If
3998 F_REG_READ is there, that means we were looking for a write
3999 register. See aarch64_ext_sysreg. */
4000 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4001 *notes = _("reading from a write-only register");
4002 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4003 *notes = _("writing to a read-only register");
4004 }
4005 }
4006
4007 if (name)
4008 snprintf (buf, size, "%s", style_reg (styler, name));
4009 else
4010 {
4011 /* Implementation defined system register. */
4012 unsigned int value = opnd->sysreg.value;
4013 snprintf (buf, size, "%s",
4014 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4015 (value >> 14) & 0x3, (value >> 11) & 0x7,
4016 (value >> 7) & 0xf, (value >> 3) & 0xf,
4017 value & 0x7));
4018 }
4019 break;
4020
4021 case AARCH64_OPND_PSTATEFIELD:
4022 for (i = 0; aarch64_pstatefields[i].name; ++i)
4023 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4024 {
4025 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4026 SVCRZA and SVCRSMZA. */
4027 uint32_t flags = aarch64_pstatefields[i].flags;
4028 if (flags & F_REG_IN_CRM
4029 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4030 != PSTATE_DECODE_CRM (flags)))
4031 continue;
4032 break;
4033 }
4034 assert (aarch64_pstatefields[i].name);
4035 snprintf (buf, size, "%s",
4036 style_reg (styler, aarch64_pstatefields[i].name));
4037 break;
4038
4039 case AARCH64_OPND_SYSREG_AT:
4040 case AARCH64_OPND_SYSREG_DC:
4041 case AARCH64_OPND_SYSREG_IC:
4042 case AARCH64_OPND_SYSREG_TLBI:
4043 case AARCH64_OPND_SYSREG_SR:
4044 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4045 break;
4046
4047 case AARCH64_OPND_BARRIER:
4048 case AARCH64_OPND_BARRIER_DSB_NXS:
4049 {
4050 if (opnd->barrier->name[0] == '#')
4051 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4052 else
4053 snprintf (buf, size, "%s",
4054 style_sub_mnem (styler, opnd->barrier->name));
4055 }
4056 break;
4057
4058 case AARCH64_OPND_BARRIER_ISB:
4059 /* Operand can be omitted, e.g. in DCPS1. */
4060 if (! optional_operand_p (opcode, idx)
4061 || (opnd->barrier->value
4062 != get_optional_operand_default_value (opcode)))
4063 snprintf (buf, size, "%s",
4064 style_imm (styler, "#0x%x", opnd->barrier->value));
4065 break;
4066
4067 case AARCH64_OPND_PRFOP:
4068 if (opnd->prfop->name != NULL)
4069 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4070 else
4071 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4072 opnd->prfop->value));
4073 break;
4074
4075 case AARCH64_OPND_BARRIER_PSB:
4076 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4077 break;
4078
4079 case AARCH64_OPND_BTI_TARGET:
4080 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4081 snprintf (buf, size, "%s",
4082 style_sub_mnem (styler, opnd->hint_option->name));
4083 break;
4084
4085 case AARCH64_OPND_MOPS_ADDR_Rd:
4086 case AARCH64_OPND_MOPS_ADDR_Rs:
4087 snprintf (buf, size, "[%s]!",
4088 style_reg (styler,
4089 get_int_reg_name (opnd->reg.regno,
4090 AARCH64_OPND_QLF_X, 0)));
4091 break;
4092
4093 case AARCH64_OPND_MOPS_WB_Rn:
4094 snprintf (buf, size, "%s!",
4095 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4096 AARCH64_OPND_QLF_X, 0)));
4097 break;
4098
4099 default:
4100 snprintf (buf, size, "<invalid>");
4101 break;
4102 }
4103 }
4104
4105 #define CPENC(op0,op1,crn,crm,op2) \
4107 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4108 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4109 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4110 /* for 3.9.10 System Instructions */
4111 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4112
4113 #define C0 0
4114 #define C1 1
4115 #define C2 2
4116 #define C3 3
4117 #define C4 4
4118 #define C5 5
4119 #define C6 6
4120 #define C7 7
4121 #define C8 8
4122 #define C9 9
4123 #define C10 10
4124 #define C11 11
4125 #define C12 12
4126 #define C13 13
4127 #define C14 14
4128 #define C15 15
4129
4130 #define SYSREG(name, encoding, flags, features) \
4131 { name, encoding, flags, features }
4132
4133 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
4134
4135 #define SR_FEAT(n,e,f,feat) \
4136 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
4137
4138 #define SR_FEAT2(n,e,f,fe1,fe2) \
4139 SYSREG ((n), (e), (f) | F_ARCHEXT, \
4140 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
4141
4142 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
4143 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
4144
4145 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
4146 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
4147 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
4148 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
4149 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
4150 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
4151 #define SR_V8_6(n,e,f) SR_FEAT (n,e,f,V8_6)
4152 #define SR_V8_7(n,e,f) SR_FEAT (n,e,f,V8_7)
4153 #define SR_V8_8(n,e,f) SR_FEAT (n,e,f,V8_8)
4154 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4155 #define SR_GIC(n,e,f) SR_CORE (n,e,f)
4156 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4157 #define SR_AMU(n,e,f) SR_FEAT (n,e,f,V8_4)
4158 #define SR_LOR(n,e,f) SR_FEAT (n,e,f,LOR)
4159 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
4160 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
4161 #define SR_RNG(n,e,f) SR_FEAT (n,e,f,RNG)
4162 #define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
4163 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
4164 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
4165 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4166 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4167 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
4168 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4169
4170 #define SR_EXPAND_ELx(f,x) \
4171 f (x, 1), \
4172 f (x, 2), \
4173 f (x, 3), \
4174 f (x, 4), \
4175 f (x, 5), \
4176 f (x, 6), \
4177 f (x, 7), \
4178 f (x, 8), \
4179 f (x, 9), \
4180 f (x, 10), \
4181 f (x, 11), \
4182 f (x, 12), \
4183 f (x, 13), \
4184 f (x, 14), \
4185 f (x, 15),
4186
4187 #define SR_EXPAND_EL12(f) \
4188 SR_EXPAND_ELx (f,1) \
4189 SR_EXPAND_ELx (f,2)
4190
4191 /* TODO there is one more issues need to be resolved
4192 1. handle cpu-implementation-defined system registers.
4193
4194 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4195 respectively. If neither of these are set then the register is read-write. */
4196 const aarch64_sys_reg aarch64_sys_regs [] =
4197 {
4198 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4199 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4200 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4201 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4202 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4203 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4204 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4205 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4206 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4207 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4208 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4209 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4210 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4211 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4212 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4213 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4214 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4215 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4216 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4217 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4218 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4219 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4220 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4221 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4222 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4223 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4224 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4225 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4226 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4227 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4228 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4229 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4230 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4231 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4232 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4233 SR_CORE ("id_dfr1_el1", CPENC (3,0,C0,C3,5), F_REG_READ),
4234 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4235 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4236 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4237 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4238 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4239 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4240 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4241 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4242 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4243 SR_CORE ("id_mmfr5_el1", CPENC (3,0,C0,C3,6), F_REG_READ),
4244 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4245 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4246 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4247 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4248 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4249 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4250 SR_CORE ("id_isar6_el1", CPENC (3,0,C0,C2,7), F_REG_READ),
4251 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4252 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4253 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4254 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4255 SR_V8_3 ("ccsidr2_el1", CPENC (3,1,C0,C0,2), F_REG_READ),
4256 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4257 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4258 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4259 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4260 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4261 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4262 SR_CORE ("id_aa64isar2_el1", CPENC (3,0,C0,C6,2), F_REG_READ),
4263 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4264 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4265 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4266 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4267 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4268 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4269 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4270 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4271 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4272 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4273 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4274 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4275 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4276 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4277 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4278 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4279 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4280 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4281 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4282 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4283 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4284 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4285 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4286 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4287 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4288 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4289 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4290 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4291 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4292 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4293 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4294 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4295 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4296 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4297 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4298 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4299 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4300 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4301 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4302 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4303 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4304 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4305 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4306 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4307 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4308 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4309 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4310 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4311 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4312 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4313 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4314 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4315 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4316 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4317 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4318 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4319 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4320 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4321 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4322 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4323 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4324 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4325 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4326 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4327 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4328 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4329 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4330 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4331 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4332 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4333 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4334 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4335 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4336 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4337 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4338 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4339 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4340 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4341 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4342 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4343 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4344 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4345 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4346 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4347 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4348 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4349 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4350 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4351 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4352 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4353 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4354 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4355 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4356 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4357 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4358 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4359 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4360 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4361 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4362 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4363 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4364 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4365 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4366 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4367 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4368 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4369 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4370 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4371 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4372 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4373 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4374 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4375 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4376 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4377 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4378 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4379 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4380 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4381 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4382 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4383 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4384 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4385 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4386 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4387 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4388 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4389 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4390 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4391 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4392 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4393 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4394 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4395 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4396 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4397 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4398 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4399 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4400 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4401 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4402 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4403 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4404 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4405 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4406 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4407 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4408 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4409 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4410 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4411 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4412 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4413 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4414 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4415 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4416 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4417 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4418 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4419 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4420 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4421 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4422 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4423 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4424 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4425 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4426 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4427 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4428 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4429 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4430 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4431 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4432 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4433 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4434 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4435 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4436 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4437 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4438 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4439 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4440 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4441 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4442 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4443 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4444 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4445 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4446 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4447 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4448 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4449 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4450 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4451 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4452 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4453 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4454 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4455 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4456 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4457 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4458 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4459 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4460 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4461 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4462 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4463 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4464 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4465 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4466 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4467 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4468 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4469 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4470 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4471 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4472 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4473 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4474 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4475 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4476 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4477 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4478 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4479 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4480 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4481 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4482 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4483 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4484 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4485 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4486 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4487 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4488 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4489 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4490 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4491 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4492 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4493 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4494 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4495 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4496 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4497 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4498 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4499 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4500 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4501 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4502 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4503 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4504 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4505 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4506 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4507 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4508 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4509 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4510 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4511 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4512 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4513 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4514 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4515 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4516 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4517 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4518 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4519 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4520 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), F_REG_READ),
4521 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4522 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4523 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4524 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4525 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4526 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4527 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4528 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4529 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4530 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4531 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4532 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4533 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4534 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4535 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4536 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4537 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4538 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4539 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4540 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4541 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4542 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4543 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4544 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4545 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4546 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4547 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4548 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4549 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4550 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4551 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4552 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4553 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4554 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4555 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4556 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4557 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4558 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4559 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4560 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4561 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4562 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4563 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4564 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4565 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4566 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4567 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4568 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4569 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4570 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4571 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4572 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4573 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4574 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4575 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4576 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4577 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4578 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4579 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4580 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4581 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4582 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4583 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4584 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4585 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4586 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4587 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4588 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4589 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4590 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4591 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4592 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4593 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4594 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4595 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4596 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4597 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4598 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4599 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4600 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4601
4602 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4603 SR_V8_4 ("trfcr_el1", CPENC (3,0,C1,C2,1), 0),
4604 SR_V8_4 ("pmmir_el1", CPENC (3,0,C9,C14,6), F_REG_READ),
4605 SR_V8_4 ("trfcr_el2", CPENC (3,4,C1,C2,1), 0),
4606 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4607 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4608 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4609 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4610 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4611 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4612 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4613 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4614 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4615 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4616 SR_V8_4 ("trfcr_el12", CPENC (3,5,C1,C2,1), 0),
4617
4618 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4619 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4620 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4621 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4622 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4623 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4624 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4625 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4626 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4627 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4628 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4629 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4630 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4631 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4632 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4633 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4634
4635 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4636 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4637 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4638 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4639
4640 #define ENC_BARLAR(x,n,lar) \
4641 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4642
4643 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4644 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4645
4646 SR_EXPAND_EL12 (PRBARn_ELx)
4647 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4648 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4649 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4650 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4651 SR_EXPAND_EL12 (PRLARn_ELx)
4652 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4653 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4654 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4655
4656 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4657 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4658 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4659 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4660 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4661 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4662 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4663
4664 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4665 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4666 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4667 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4668 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4669 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4670 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4671 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4672 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4673 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4674 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4675 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4676 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4677 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4678 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4679 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4680 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4681 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4682 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4683 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4684 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4685 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4686 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4687 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4688 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4689 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4690 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4691 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4692 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4693 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4694 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4695 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4696 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4697 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4698 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4699 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4700 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4701 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4702 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4703 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4704 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4705 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4706 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4707 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4708 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4709 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4710 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4711 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4712 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4713 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4714 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4715 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4716 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4717 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4718 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4719 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4720 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4721 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4722 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4723 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4724 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4725 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4726 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4727 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4728 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4729 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4730 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4731 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4732 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4733 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4734 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4735 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4736 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4737 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4738 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4739 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4740 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4741 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4742 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4743 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4744 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4745 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4746 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4747 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4748 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4749 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4750 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4751 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4752 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4753 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4754 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4755 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4756 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4757 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4758 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4759 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4760 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4761 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4762 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4763 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4764 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4765 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4766 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4767 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4768 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4769 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4770 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4771 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4772 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4773 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4774 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4775 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4776 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4777 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4778 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4779 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4780 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4781 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4782 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4783 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4784 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4785 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4786 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4787 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4788 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4789 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4790 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4791 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4792 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4793 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4794 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4795 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4796 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4797 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4798 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4799 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4800 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4801 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4802 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4803 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4804 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4805 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4806 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4807 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4808 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4809 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4810 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4811 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4812 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4813 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4814 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4815 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4816 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4817 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4818 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4819 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4820 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4821 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4822 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4823 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4824 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4825 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4826 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4827 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4828 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4829 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4830 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4831 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4832 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4833 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4834 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4835 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4836 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4837 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4838 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4839 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4840 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4841 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4842 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4843 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
4844 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
4845 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
4846 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
4847 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
4848 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
4849 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
4850 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
4851 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
4852 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
4853 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
4854 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
4855 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
4856 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
4857 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
4858 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
4859 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
4860 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
4861 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
4862 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
4863 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
4864 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
4865 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
4866 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
4867 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
4868 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
4869 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
4870 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
4871 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
4872 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
4873 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
4874 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
4875 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
4876 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
4877 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
4878 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
4879
4880 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
4881 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
4882 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
4883 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
4884 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
4885 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
4886 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
4887 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
4888 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
4889 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
4890 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
4891 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
4892
4893 SR_LOR ("lorid_el1", CPENC (3,0,C10,C4,7), F_REG_READ),
4894 SR_LOR ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
4895 SR_LOR ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
4896 SR_LOR ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
4897 SR_LOR ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
4898
4899 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
4900 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
4901 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
4902 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
4903 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
4904
4905 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
4906 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
4907 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
4908 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
4909 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
4910 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
4911 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
4912 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
4913 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
4914 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
4915 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
4916 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
4917 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
4918 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
4919 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
4920 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
4921 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
4922 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
4923 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
4924 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
4925 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
4926 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
4927 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
4928 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
4929 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
4930 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
4931 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
4932 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
4933 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
4934 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
4935 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
4936 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
4937 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
4938 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
4939 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
4940 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
4941 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
4942 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
4943 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
4944 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
4945 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
4946 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
4947 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
4948 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
4949 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
4950 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
4951 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
4952 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
4953 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
4954 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
4955 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
4956 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
4957 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
4958 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
4959 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
4960 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
4961 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
4962 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
4963 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
4964 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
4965 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
4966 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
4967 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
4968 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
4969 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
4970 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
4971 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
4972 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
4973 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
4974 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
4975 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
4976 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
4977 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
4978 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
4979 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
4980 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
4981 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
4982 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
4983 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
4984 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
4985 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
4986 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
4987 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
4988 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
4989 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
4990 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
4991 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
4992 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
4993 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
4994 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
4995 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
4996 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
4997 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
4998 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
4999 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
5000 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
5001 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
5002 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
5003 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
5004 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
5005 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
5006 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
5007 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
5008 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
5009 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
5010
5011 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
5012
5013 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), 0),
5014 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
5015 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
5016
5017 SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
5018 SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
5019 SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
5020 SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
5021 SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
5022 SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
5023 SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
5024 SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
5025 SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
5026 SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
5027 SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
5028
5029 SR_AMU ("amcr_el0", CPENC (3,3,C13,C2,0), 0),
5030 SR_AMU ("amcfgr_el0", CPENC (3,3,C13,C2,1), F_REG_READ),
5031 SR_AMU ("amcgcr_el0", CPENC (3,3,C13,C2,2), F_REG_READ),
5032 SR_AMU ("amuserenr_el0", CPENC (3,3,C13,C2,3), 0),
5033 SR_AMU ("amcntenclr0_el0", CPENC (3,3,C13,C2,4), 0),
5034 SR_AMU ("amcntenset0_el0", CPENC (3,3,C13,C2,5), 0),
5035 SR_AMU ("amcntenclr1_el0", CPENC (3,3,C13,C3,0), 0),
5036 SR_AMU ("amcntenset1_el0", CPENC (3,3,C13,C3,1), 0),
5037 SR_AMU ("amevcntr00_el0", CPENC (3,3,C13,C4,0), 0),
5038 SR_AMU ("amevcntr01_el0", CPENC (3,3,C13,C4,1), 0),
5039 SR_AMU ("amevcntr02_el0", CPENC (3,3,C13,C4,2), 0),
5040 SR_AMU ("amevcntr03_el0", CPENC (3,3,C13,C4,3), 0),
5041 SR_AMU ("amevtyper00_el0", CPENC (3,3,C13,C6,0), F_REG_READ),
5042 SR_AMU ("amevtyper01_el0", CPENC (3,3,C13,C6,1), F_REG_READ),
5043 SR_AMU ("amevtyper02_el0", CPENC (3,3,C13,C6,2), F_REG_READ),
5044 SR_AMU ("amevtyper03_el0", CPENC (3,3,C13,C6,3), F_REG_READ),
5045 SR_AMU ("amevcntr10_el0", CPENC (3,3,C13,C12,0), 0),
5046 SR_AMU ("amevcntr11_el0", CPENC (3,3,C13,C12,1), 0),
5047 SR_AMU ("amevcntr12_el0", CPENC (3,3,C13,C12,2), 0),
5048 SR_AMU ("amevcntr13_el0", CPENC (3,3,C13,C12,3), 0),
5049 SR_AMU ("amevcntr14_el0", CPENC (3,3,C13,C12,4), 0),
5050 SR_AMU ("amevcntr15_el0", CPENC (3,3,C13,C12,5), 0),
5051 SR_AMU ("amevcntr16_el0", CPENC (3,3,C13,C12,6), 0),
5052 SR_AMU ("amevcntr17_el0", CPENC (3,3,C13,C12,7), 0),
5053 SR_AMU ("amevcntr18_el0", CPENC (3,3,C13,C13,0), 0),
5054 SR_AMU ("amevcntr19_el0", CPENC (3,3,C13,C13,1), 0),
5055 SR_AMU ("amevcntr110_el0", CPENC (3,3,C13,C13,2), 0),
5056 SR_AMU ("amevcntr111_el0", CPENC (3,3,C13,C13,3), 0),
5057 SR_AMU ("amevcntr112_el0", CPENC (3,3,C13,C13,4), 0),
5058 SR_AMU ("amevcntr113_el0", CPENC (3,3,C13,C13,5), 0),
5059 SR_AMU ("amevcntr114_el0", CPENC (3,3,C13,C13,6), 0),
5060 SR_AMU ("amevcntr115_el0", CPENC (3,3,C13,C13,7), 0),
5061 SR_AMU ("amevtyper10_el0", CPENC (3,3,C13,C14,0), 0),
5062 SR_AMU ("amevtyper11_el0", CPENC (3,3,C13,C14,1), 0),
5063 SR_AMU ("amevtyper12_el0", CPENC (3,3,C13,C14,2), 0),
5064 SR_AMU ("amevtyper13_el0", CPENC (3,3,C13,C14,3), 0),
5065 SR_AMU ("amevtyper14_el0", CPENC (3,3,C13,C14,4), 0),
5066 SR_AMU ("amevtyper15_el0", CPENC (3,3,C13,C14,5), 0),
5067 SR_AMU ("amevtyper16_el0", CPENC (3,3,C13,C14,6), 0),
5068 SR_AMU ("amevtyper17_el0", CPENC (3,3,C13,C14,7), 0),
5069 SR_AMU ("amevtyper18_el0", CPENC (3,3,C13,C15,0), 0),
5070 SR_AMU ("amevtyper19_el0", CPENC (3,3,C13,C15,1), 0),
5071 SR_AMU ("amevtyper110_el0", CPENC (3,3,C13,C15,2), 0),
5072 SR_AMU ("amevtyper111_el0", CPENC (3,3,C13,C15,3), 0),
5073 SR_AMU ("amevtyper112_el0", CPENC (3,3,C13,C15,4), 0),
5074 SR_AMU ("amevtyper113_el0", CPENC (3,3,C13,C15,5), 0),
5075 SR_AMU ("amevtyper114_el0", CPENC (3,3,C13,C15,6), 0),
5076 SR_AMU ("amevtyper115_el0", CPENC (3,3,C13,C15,7), 0),
5077
5078 SR_GIC ("icc_pmr_el1", CPENC (3,0,C4,C6,0), 0),
5079 SR_GIC ("icc_iar0_el1", CPENC (3,0,C12,C8,0), F_REG_READ),
5080 SR_GIC ("icc_eoir0_el1", CPENC (3,0,C12,C8,1), F_REG_WRITE),
5081 SR_GIC ("icc_hppir0_el1", CPENC (3,0,C12,C8,2), F_REG_READ),
5082 SR_GIC ("icc_bpr0_el1", CPENC (3,0,C12,C8,3), 0),
5083 SR_GIC ("icc_ap0r0_el1", CPENC (3,0,C12,C8,4), 0),
5084 SR_GIC ("icc_ap0r1_el1", CPENC (3,0,C12,C8,5), 0),
5085 SR_GIC ("icc_ap0r2_el1", CPENC (3,0,C12,C8,6), 0),
5086 SR_GIC ("icc_ap0r3_el1", CPENC (3,0,C12,C8,7), 0),
5087 SR_GIC ("icc_ap1r0_el1", CPENC (3,0,C12,C9,0), 0),
5088 SR_GIC ("icc_ap1r1_el1", CPENC (3,0,C12,C9,1), 0),
5089 SR_GIC ("icc_ap1r2_el1", CPENC (3,0,C12,C9,2), 0),
5090 SR_GIC ("icc_ap1r3_el1", CPENC (3,0,C12,C9,3), 0),
5091 SR_GIC ("icc_dir_el1", CPENC (3,0,C12,C11,1), F_REG_WRITE),
5092 SR_GIC ("icc_rpr_el1", CPENC (3,0,C12,C11,3), F_REG_READ),
5093 SR_GIC ("icc_sgi1r_el1", CPENC (3,0,C12,C11,5), F_REG_WRITE),
5094 SR_GIC ("icc_asgi1r_el1", CPENC (3,0,C12,C11,6), F_REG_WRITE),
5095 SR_GIC ("icc_sgi0r_el1", CPENC (3,0,C12,C11,7), F_REG_WRITE),
5096 SR_GIC ("icc_iar1_el1", CPENC (3,0,C12,C12,0), F_REG_READ),
5097 SR_GIC ("icc_eoir1_el1", CPENC (3,0,C12,C12,1), F_REG_WRITE),
5098 SR_GIC ("icc_hppir1_el1", CPENC (3,0,C12,C12,2), F_REG_READ),
5099 SR_GIC ("icc_bpr1_el1", CPENC (3,0,C12,C12,3), 0),
5100 SR_GIC ("icc_ctlr_el1", CPENC (3,0,C12,C12,4), 0),
5101 SR_GIC ("icc_igrpen0_el1", CPENC (3,0,C12,C12,6), 0),
5102 SR_GIC ("icc_igrpen1_el1", CPENC (3,0,C12,C12,7), 0),
5103 SR_GIC ("ich_ap0r0_el2", CPENC (3,4,C12,C8,0), 0),
5104 SR_GIC ("ich_ap0r1_el2", CPENC (3,4,C12,C8,1), 0),
5105 SR_GIC ("ich_ap0r2_el2", CPENC (3,4,C12,C8,2), 0),
5106 SR_GIC ("ich_ap0r3_el2", CPENC (3,4,C12,C8,3), 0),
5107 SR_GIC ("ich_ap1r0_el2", CPENC (3,4,C12,C9,0), 0),
5108 SR_GIC ("ich_ap1r1_el2", CPENC (3,4,C12,C9,1), 0),
5109 SR_GIC ("ich_ap1r2_el2", CPENC (3,4,C12,C9,2), 0),
5110 SR_GIC ("ich_ap1r3_el2", CPENC (3,4,C12,C9,3), 0),
5111 SR_GIC ("ich_hcr_el2", CPENC (3,4,C12,C11,0), 0),
5112 SR_GIC ("ich_misr_el2", CPENC (3,4,C12,C11,2), F_REG_READ),
5113 SR_GIC ("ich_eisr_el2", CPENC (3,4,C12,C11,3), F_REG_READ),
5114 SR_GIC ("ich_elrsr_el2", CPENC (3,4,C12,C11,5), F_REG_READ),
5115 SR_GIC ("ich_vmcr_el2", CPENC (3,4,C12,C11,7), 0),
5116 SR_GIC ("ich_lr0_el2", CPENC (3,4,C12,C12,0), 0),
5117 SR_GIC ("ich_lr1_el2", CPENC (3,4,C12,C12,1), 0),
5118 SR_GIC ("ich_lr2_el2", CPENC (3,4,C12,C12,2), 0),
5119 SR_GIC ("ich_lr3_el2", CPENC (3,4,C12,C12,3), 0),
5120 SR_GIC ("ich_lr4_el2", CPENC (3,4,C12,C12,4), 0),
5121 SR_GIC ("ich_lr5_el2", CPENC (3,4,C12,C12,5), 0),
5122 SR_GIC ("ich_lr6_el2", CPENC (3,4,C12,C12,6), 0),
5123 SR_GIC ("ich_lr7_el2", CPENC (3,4,C12,C12,7), 0),
5124 SR_GIC ("ich_lr8_el2", CPENC (3,4,C12,C13,0), 0),
5125 SR_GIC ("ich_lr9_el2", CPENC (3,4,C12,C13,1), 0),
5126 SR_GIC ("ich_lr10_el2", CPENC (3,4,C12,C13,2), 0),
5127 SR_GIC ("ich_lr11_el2", CPENC (3,4,C12,C13,3), 0),
5128 SR_GIC ("ich_lr12_el2", CPENC (3,4,C12,C13,4), 0),
5129 SR_GIC ("ich_lr13_el2", CPENC (3,4,C12,C13,5), 0),
5130 SR_GIC ("ich_lr14_el2", CPENC (3,4,C12,C13,6), 0),
5131 SR_GIC ("ich_lr15_el2", CPENC (3,4,C12,C13,7), 0),
5132 SR_GIC ("icc_igrpen1_el3", CPENC (3,6,C12,C12,7), 0),
5133
5134 SR_V8_6 ("amcg1idr_el0", CPENC (3,3,C13,C2,6), F_REG_READ),
5135 SR_V8_6 ("cntpctss_el0", CPENC (3,3,C14,C0,5), F_REG_READ),
5136 SR_V8_6 ("cntvctss_el0", CPENC (3,3,C14,C0,6), F_REG_READ),
5137 SR_V8_6 ("hfgrtr_el2", CPENC (3,4,C1,C1,4), 0),
5138 SR_V8_6 ("hfgwtr_el2", CPENC (3,4,C1,C1,5), 0),
5139 SR_V8_6 ("hfgitr_el2", CPENC (3,4,C1,C1,6), 0),
5140 SR_V8_6 ("hdfgrtr_el2", CPENC (3,4,C3,C1,4), 0),
5141 SR_V8_6 ("hdfgwtr_el2", CPENC (3,4,C3,C1,5), 0),
5142 SR_V8_6 ("hafgrtr_el2", CPENC (3,4,C3,C1,6), 0),
5143 SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0), 0),
5144 SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1), 0),
5145 SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2), 0),
5146 SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3), 0),
5147 SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4), 0),
5148 SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5), 0),
5149 SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6), 0),
5150 SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7), 0),
5151 SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0), 0),
5152 SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1), 0),
5153 SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2), 0),
5154 SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3), 0),
5155 SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4), 0),
5156 SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5), 0),
5157 SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6), 0),
5158 SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7), 0),
5159 SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0), 0),
5160 SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1), 0),
5161 SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2), 0),
5162 SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3), 0),
5163 SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4), 0),
5164 SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5), 0),
5165 SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6), 0),
5166 SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7), 0),
5167 SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0), 0),
5168 SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1), 0),
5169 SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
5170 SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
5171 SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
5172 SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
5173 SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
5174 SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
5175 SR_V8_6 ("cntpoff_el2", CPENC (3,4,C14,C0,6), 0),
5176
5177 SR_V8_7 ("pmsnevfr_el1", CPENC (3,0,C9,C9,1), 0),
5178 SR_V8_7 ("hcrx_el2", CPENC (3,4,C1,C2,2), 0),
5179
5180 SR_V8_8 ("allint", CPENC (3,0,C4,C3,0), 0),
5181 SR_V8_8 ("icc_nmiar1_el1", CPENC (3,0,C12,C9,5), F_REG_READ),
5182
5183 { 0, CPENC (0,0,0,0,0), 0, 0 }
5184 };
5185
5186 bool
5187 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5188 {
5189 return (reg_flags & F_DEPRECATED) != 0;
5190 }
5191
5192 /* The CPENC below is fairly misleading, the fields
5193 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5194 by ins_pstatefield, which just shifts the value by the width of the fields
5195 in a loop. So if you CPENC them only the first value will be set, the rest
5196 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5197 value of 0b110000000001000000 (0x30040) while what you want is
5198 0b011010 (0x1a). */
5199 const aarch64_sys_reg aarch64_pstatefields [] =
5200 {
5201 SR_CORE ("spsel", 0x05, F_REG_MAX_VALUE (1)),
5202 SR_CORE ("daifset", 0x1e, F_REG_MAX_VALUE (15)),
5203 SR_CORE ("daifclr", 0x1f, F_REG_MAX_VALUE (15)),
5204 SR_PAN ("pan", 0x04, F_REG_MAX_VALUE (1)),
5205 SR_V8_2 ("uao", 0x03, F_REG_MAX_VALUE (1)),
5206 SR_SSBS ("ssbs", 0x19, F_REG_MAX_VALUE (1)),
5207 SR_V8_4 ("dit", 0x1a, F_REG_MAX_VALUE (1)),
5208 SR_MEMTAG ("tco", 0x1c, F_REG_MAX_VALUE (1)),
5209 SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
5210 | F_REG_MAX_VALUE (1)),
5211 SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
5212 | F_REG_MAX_VALUE (1)),
5213 SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
5214 | F_REG_MAX_VALUE (1)),
5215 SR_V8_8 ("allint", 0x08, F_REG_MAX_VALUE (1)),
5216 { 0, CPENC (0,0,0,0,0), 0, 0 },
5217 };
5218
5219 bool
5220 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5221 const aarch64_sys_reg *reg)
5222 {
5223 if (!(reg->flags & F_ARCHEXT))
5224 return true;
5225
5226 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5227 }
5228
5229 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5230 {
5231 { "ialluis", CPENS(0,C7,C1,0), 0 },
5232 { "iallu", CPENS(0,C7,C5,0), 0 },
5233 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
5234 { 0, CPENS(0,0,0,0), 0 }
5235 };
5236
5237 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5238 {
5239 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
5240 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
5241 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
5242 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
5243 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
5244 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
5245 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
5246 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
5247 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
5248 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
5249 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5250 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5251 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
5252 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5253 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5254 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
5255 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5256 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5257 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5258 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5259 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5260 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5261 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
5262 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5263 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5264 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
5265 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5266 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5267 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
5268 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
5269 { 0, CPENS(0,0,0,0), 0 }
5270 };
5271
5272 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5273 {
5274 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
5275 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
5276 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
5277 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
5278 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
5279 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
5280 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
5281 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
5282 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
5283 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
5284 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
5285 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
5286 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5287 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5288 { 0, CPENS(0,0,0,0), 0 }
5289 };
5290
5291 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5292 {
5293 { "vmalle1", CPENS(0,C8,C7,0), 0 },
5294 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
5295 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
5296 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
5297 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5298 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
5299 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
5300 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
5301 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5302 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5303 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
5304 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
5305 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
5306 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
5307 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5308 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5309 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
5310 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
5311 { "alle2", CPENS(4,C8,C7,0), 0 },
5312 { "alle2is", CPENS(4,C8,C3,0), 0 },
5313 { "alle1", CPENS(4,C8,C7,4), 0 },
5314 { "alle1is", CPENS(4,C8,C3,4), 0 },
5315 { "alle3", CPENS(6,C8,C7,0), 0 },
5316 { "alle3is", CPENS(6,C8,C3,0), 0 },
5317 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
5318 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
5319 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
5320 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
5321 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
5322 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
5323 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
5324 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
5325
5326 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
5327 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5328 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5329 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5330 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5331 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5332 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5333 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5334 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5335 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5336 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5337 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5338 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5339 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
5340 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
5341 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
5342
5343 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5344 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5345 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5346 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5347 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5348 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5349 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5350 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5351 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5352 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5353 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5354 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5355 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5356 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5357 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5358 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5359 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5360 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5361 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5362 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5363 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5364 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5365 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5366 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5367 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5368 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5369 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5370 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5371 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5372 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5373
5374 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5375 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5376 { "paallos", CPENS (6, C8, C1, 4), 0},
5377 { "paall", CPENS (6, C8, C7, 4), 0},
5378
5379 { 0, CPENS(0,0,0,0), 0 }
5380 };
5381
5382 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5383 {
5384 /* RCTX is somewhat unique in a way that it has different values
5385 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5386 Thus op2 is masked out and instead encoded directly in the
5387 aarch64_opcode_table entries for the respective instructions. */
5388 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5389
5390 { 0, CPENS(0,0,0,0), 0 }
5391 };
5392
5393 bool
5394 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5395 {
5396 return (sys_ins_reg->flags & F_HASXT) != 0;
5397 }
5398
5399 extern bool
5400 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5401 const char *reg_name,
5402 aarch64_insn reg_value,
5403 uint32_t reg_flags,
5404 aarch64_feature_set reg_features)
5405 {
5406 /* Armv8-R has no EL3. */
5407 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5408 {
5409 const char *suffix = strrchr (reg_name, '_');
5410 if (suffix && !strcmp (suffix, "_el3"))
5411 return false;
5412 }
5413
5414 if (!(reg_flags & F_ARCHEXT))
5415 return true;
5416
5417 if (reg_features
5418 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5419 return true;
5420
5421 /* ARMv8.4 TLB instructions. */
5422 if ((reg_value == CPENS (0, C8, C1, 0)
5423 || reg_value == CPENS (0, C8, C1, 1)
5424 || reg_value == CPENS (0, C8, C1, 2)
5425 || reg_value == CPENS (0, C8, C1, 3)
5426 || reg_value == CPENS (0, C8, C1, 5)
5427 || reg_value == CPENS (0, C8, C1, 7)
5428 || reg_value == CPENS (4, C8, C4, 0)
5429 || reg_value == CPENS (4, C8, C4, 4)
5430 || reg_value == CPENS (4, C8, C1, 1)
5431 || reg_value == CPENS (4, C8, C1, 5)
5432 || reg_value == CPENS (4, C8, C1, 6)
5433 || reg_value == CPENS (6, C8, C1, 1)
5434 || reg_value == CPENS (6, C8, C1, 5)
5435 || reg_value == CPENS (4, C8, C1, 0)
5436 || reg_value == CPENS (4, C8, C1, 4)
5437 || reg_value == CPENS (6, C8, C1, 0)
5438 || reg_value == CPENS (0, C8, C6, 1)
5439 || reg_value == CPENS (0, C8, C6, 3)
5440 || reg_value == CPENS (0, C8, C6, 5)
5441 || reg_value == CPENS (0, C8, C6, 7)
5442 || reg_value == CPENS (0, C8, C2, 1)
5443 || reg_value == CPENS (0, C8, C2, 3)
5444 || reg_value == CPENS (0, C8, C2, 5)
5445 || reg_value == CPENS (0, C8, C2, 7)
5446 || reg_value == CPENS (0, C8, C5, 1)
5447 || reg_value == CPENS (0, C8, C5, 3)
5448 || reg_value == CPENS (0, C8, C5, 5)
5449 || reg_value == CPENS (0, C8, C5, 7)
5450 || reg_value == CPENS (4, C8, C0, 2)
5451 || reg_value == CPENS (4, C8, C0, 6)
5452 || reg_value == CPENS (4, C8, C4, 2)
5453 || reg_value == CPENS (4, C8, C4, 6)
5454 || reg_value == CPENS (4, C8, C4, 3)
5455 || reg_value == CPENS (4, C8, C4, 7)
5456 || reg_value == CPENS (4, C8, C6, 1)
5457 || reg_value == CPENS (4, C8, C6, 5)
5458 || reg_value == CPENS (4, C8, C2, 1)
5459 || reg_value == CPENS (4, C8, C2, 5)
5460 || reg_value == CPENS (4, C8, C5, 1)
5461 || reg_value == CPENS (4, C8, C5, 5)
5462 || reg_value == CPENS (6, C8, C6, 1)
5463 || reg_value == CPENS (6, C8, C6, 5)
5464 || reg_value == CPENS (6, C8, C2, 1)
5465 || reg_value == CPENS (6, C8, C2, 5)
5466 || reg_value == CPENS (6, C8, C5, 1)
5467 || reg_value == CPENS (6, C8, C5, 5))
5468 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5469 return true;
5470
5471 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5472 if (reg_value == CPENS (3, C7, C12, 1)
5473 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5474 return true;
5475
5476 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5477 if (reg_value == CPENS (3, C7, C13, 1)
5478 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5479 return true;
5480
5481 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5482 if ((reg_value == CPENS (0, C7, C6, 3)
5483 || reg_value == CPENS (0, C7, C6, 4)
5484 || reg_value == CPENS (0, C7, C10, 4)
5485 || reg_value == CPENS (0, C7, C14, 4)
5486 || reg_value == CPENS (3, C7, C10, 3)
5487 || reg_value == CPENS (3, C7, C12, 3)
5488 || reg_value == CPENS (3, C7, C13, 3)
5489 || reg_value == CPENS (3, C7, C14, 3)
5490 || reg_value == CPENS (3, C7, C4, 3)
5491 || reg_value == CPENS (0, C7, C6, 5)
5492 || reg_value == CPENS (0, C7, C6, 6)
5493 || reg_value == CPENS (0, C7, C10, 6)
5494 || reg_value == CPENS (0, C7, C14, 6)
5495 || reg_value == CPENS (3, C7, C10, 5)
5496 || reg_value == CPENS (3, C7, C12, 5)
5497 || reg_value == CPENS (3, C7, C13, 5)
5498 || reg_value == CPENS (3, C7, C14, 5)
5499 || reg_value == CPENS (3, C7, C4, 4))
5500 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5501 return true;
5502
5503 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5504 if ((reg_value == CPENS (0, C7, C9, 0)
5505 || reg_value == CPENS (0, C7, C9, 1))
5506 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5507 return true;
5508
5509 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5510 if (reg_value == CPENS (3, C7, C3, 0)
5511 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5512 return true;
5513
5514 return false;
5515 }
5516
5517 #undef C0
5518 #undef C1
5519 #undef C2
5520 #undef C3
5521 #undef C4
5522 #undef C5
5523 #undef C6
5524 #undef C7
5525 #undef C8
5526 #undef C9
5527 #undef C10
5528 #undef C11
5529 #undef C12
5530 #undef C13
5531 #undef C14
5532 #undef C15
5533
5534 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5535 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5536
5537 static enum err_type
5538 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5539 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5540 bool encoding ATTRIBUTE_UNUSED,
5541 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5542 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5543 {
5544 int t = BITS (insn, 4, 0);
5545 int n = BITS (insn, 9, 5);
5546 int t2 = BITS (insn, 14, 10);
5547
5548 if (BIT (insn, 23))
5549 {
5550 /* Write back enabled. */
5551 if ((t == n || t2 == n) && n != 31)
5552 return ERR_UND;
5553 }
5554
5555 if (BIT (insn, 22))
5556 {
5557 /* Load */
5558 if (t == t2)
5559 return ERR_UND;
5560 }
5561
5562 return ERR_OK;
5563 }
5564
5565 /* Verifier for vector by element 3 operands functions where the
5566 conditions `if sz:L == 11 then UNDEFINED` holds. */
5567
5568 static enum err_type
5569 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5570 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5571 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5572 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5573 {
5574 const aarch64_insn undef_pattern = 0x3;
5575 aarch64_insn value;
5576
5577 assert (inst->opcode);
5578 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5579 value = encoding ? inst->value : insn;
5580 assert (value);
5581
5582 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5583 return ERR_UND;
5584
5585 return ERR_OK;
5586 }
5587
5588 /* Check an instruction that takes three register operands and that
5589 requires the register numbers to be distinct from one another. */
5590
5591 static enum err_type
5592 verify_three_different_regs (const struct aarch64_inst *inst,
5593 const aarch64_insn insn ATTRIBUTE_UNUSED,
5594 bfd_vma pc ATTRIBUTE_UNUSED,
5595 bool encoding ATTRIBUTE_UNUSED,
5596 aarch64_operand_error *mismatch_detail
5597 ATTRIBUTE_UNUSED,
5598 aarch64_instr_sequence *insn_sequence
5599 ATTRIBUTE_UNUSED)
5600 {
5601 int rd, rs, rn;
5602
5603 rd = inst->operands[0].reg.regno;
5604 rs = inst->operands[1].reg.regno;
5605 rn = inst->operands[2].reg.regno;
5606 if (rd == rs || rd == rn || rs == rn)
5607 {
5608 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5609 mismatch_detail->error
5610 = _("the three register operands must be distinct from one another");
5611 mismatch_detail->index = -1;
5612 return ERR_UND;
5613 }
5614
5615 return ERR_OK;
5616 }
5617
5618 /* Add INST to the end of INSN_SEQUENCE. */
5619
5620 static void
5621 add_insn_to_sequence (const struct aarch64_inst *inst,
5622 aarch64_instr_sequence *insn_sequence)
5623 {
5624 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5625 }
5626
5627 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5628 If INST is NULL the given insn_sequence is cleared and the sequence is left
5629 uninitialized. */
5630
5631 void
5632 init_insn_sequence (const struct aarch64_inst *inst,
5633 aarch64_instr_sequence *insn_sequence)
5634 {
5635 int num_req_entries = 0;
5636
5637 if (insn_sequence->instr)
5638 {
5639 XDELETE (insn_sequence->instr);
5640 insn_sequence->instr = NULL;
5641 }
5642
5643 /* Handle all the cases here. May need to think of something smarter than
5644 a giant if/else chain if this grows. At that time, a lookup table may be
5645 best. */
5646 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5647 num_req_entries = 1;
5648 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5649 num_req_entries = 2;
5650
5651 insn_sequence->num_added_insns = 0;
5652 insn_sequence->num_allocated_insns = num_req_entries;
5653
5654 if (num_req_entries != 0)
5655 {
5656 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5657 add_insn_to_sequence (inst, insn_sequence);
5658 }
5659 }
5660
5661 /* Subroutine of verify_constraints. Check whether the instruction
5662 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5663 expectations are met. Return true if the check passes, otherwise
5664 describe the problem in MISMATCH_DETAIL.
5665
5666 IS_NEW_SECTION is true if INST is assumed to start a new section.
5667 The other arguments are as for verify_constraints. */
5668
5669 static bool
5670 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5671 bool is_new_section,
5672 aarch64_operand_error *mismatch_detail,
5673 aarch64_instr_sequence *insn_sequence)
5674 {
5675 const struct aarch64_opcode *opcode;
5676 const struct aarch64_inst *prev_insn;
5677 int i;
5678
5679 opcode = inst->opcode;
5680 if (insn_sequence->instr)
5681 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5682 else
5683 prev_insn = NULL;
5684
5685 if (prev_insn
5686 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5687 && prev_insn->opcode != opcode - 1)
5688 {
5689 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5690 mismatch_detail->error = NULL;
5691 mismatch_detail->index = -1;
5692 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5693 mismatch_detail->data[1].s = prev_insn->opcode->name;
5694 mismatch_detail->non_fatal = true;
5695 return false;
5696 }
5697
5698 if (opcode->constraints & C_SCAN_MOPS_PME)
5699 {
5700 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5701 {
5702 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5703 mismatch_detail->error = NULL;
5704 mismatch_detail->index = -1;
5705 mismatch_detail->data[0].s = opcode->name;
5706 mismatch_detail->data[1].s = opcode[-1].name;
5707 mismatch_detail->non_fatal = true;
5708 return false;
5709 }
5710
5711 for (i = 0; i < 3; ++i)
5712 /* There's no specific requirement for the data register to be
5713 the same between consecutive SET* instructions. */
5714 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5715 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5716 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5717 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5718 {
5719 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5720 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5721 mismatch_detail->error = _("destination register differs from "
5722 "preceding instruction");
5723 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5724 mismatch_detail->error = _("source register differs from "
5725 "preceding instruction");
5726 else
5727 mismatch_detail->error = _("size register differs from "
5728 "preceding instruction");
5729 mismatch_detail->index = i;
5730 mismatch_detail->non_fatal = true;
5731 return false;
5732 }
5733 }
5734
5735 return true;
5736 }
5737
5738 /* This function verifies that the instruction INST adheres to its specified
5739 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5740 returned and MISMATCH_DETAIL contains the reason why verification failed.
5741
5742 The function is called both during assembly and disassembly. If assembling
5743 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5744 and will contain the PC of the current instruction w.r.t to the section.
5745
5746 If ENCODING and PC=0 then you are at a start of a section. The constraints
5747 are verified against the given state insn_sequence which is updated as it
5748 transitions through the verification. */
5749
5750 enum err_type
5751 verify_constraints (const struct aarch64_inst *inst,
5752 const aarch64_insn insn ATTRIBUTE_UNUSED,
5753 bfd_vma pc,
5754 bool encoding,
5755 aarch64_operand_error *mismatch_detail,
5756 aarch64_instr_sequence *insn_sequence)
5757 {
5758 assert (inst);
5759 assert (inst->opcode);
5760
5761 const struct aarch64_opcode *opcode = inst->opcode;
5762 if (!opcode->constraints && !insn_sequence->instr)
5763 return ERR_OK;
5764
5765 assert (insn_sequence);
5766
5767 enum err_type res = ERR_OK;
5768
5769 /* This instruction puts a constraint on the insn_sequence. */
5770 if (opcode->flags & F_SCAN)
5771 {
5772 if (insn_sequence->instr)
5773 {
5774 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5775 mismatch_detail->error = _("instruction opens new dependency "
5776 "sequence without ending previous one");
5777 mismatch_detail->index = -1;
5778 mismatch_detail->non_fatal = true;
5779 res = ERR_VFI;
5780 }
5781
5782 init_insn_sequence (inst, insn_sequence);
5783 return res;
5784 }
5785
5786 bool is_new_section = (!encoding && pc == 0);
5787 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5788 insn_sequence))
5789 {
5790 res = ERR_VFI;
5791 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5792 init_insn_sequence (NULL, insn_sequence);
5793 }
5794
5795 /* Verify constraints on an existing sequence. */
5796 if (insn_sequence->instr)
5797 {
5798 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5799 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5800 closed a previous one that we should have. */
5801 if (is_new_section && res == ERR_OK)
5802 {
5803 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5804 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5805 mismatch_detail->index = -1;
5806 mismatch_detail->non_fatal = true;
5807 res = ERR_VFI;
5808 /* Reset the sequence. */
5809 init_insn_sequence (NULL, insn_sequence);
5810 return res;
5811 }
5812
5813 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5814 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5815 {
5816 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5817 instruction for better error messages. */
5818 if (!opcode->avariant
5819 || !(*opcode->avariant &
5820 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5821 {
5822 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5823 mismatch_detail->error = _("SVE instruction expected after "
5824 "`movprfx'");
5825 mismatch_detail->index = -1;
5826 mismatch_detail->non_fatal = true;
5827 res = ERR_VFI;
5828 goto done;
5829 }
5830
5831 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5832 instruction that is allowed to be used with a MOVPRFX. */
5833 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5834 {
5835 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5836 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5837 "expected");
5838 mismatch_detail->index = -1;
5839 mismatch_detail->non_fatal = true;
5840 res = ERR_VFI;
5841 goto done;
5842 }
5843
5844 /* Next check for usage of the predicate register. */
5845 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5846 aarch64_opnd_info blk_pred, inst_pred;
5847 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5848 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5849 bool predicated = false;
5850 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5851
5852 /* Determine if the movprfx instruction used is predicated or not. */
5853 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5854 {
5855 predicated = true;
5856 blk_pred = insn_sequence->instr->operands[1];
5857 }
5858
5859 unsigned char max_elem_size = 0;
5860 unsigned char current_elem_size;
5861 int num_op_used = 0, last_op_usage = 0;
5862 int i, inst_pred_idx = -1;
5863 int num_ops = aarch64_num_of_operands (opcode);
5864 for (i = 0; i < num_ops; i++)
5865 {
5866 aarch64_opnd_info inst_op = inst->operands[i];
5867 switch (inst_op.type)
5868 {
5869 case AARCH64_OPND_SVE_Zd:
5870 case AARCH64_OPND_SVE_Zm_5:
5871 case AARCH64_OPND_SVE_Zm_16:
5872 case AARCH64_OPND_SVE_Zn:
5873 case AARCH64_OPND_SVE_Zt:
5874 case AARCH64_OPND_SVE_Vm:
5875 case AARCH64_OPND_SVE_Vn:
5876 case AARCH64_OPND_Va:
5877 case AARCH64_OPND_Vn:
5878 case AARCH64_OPND_Vm:
5879 case AARCH64_OPND_Sn:
5880 case AARCH64_OPND_Sm:
5881 if (inst_op.reg.regno == blk_dest.reg.regno)
5882 {
5883 num_op_used++;
5884 last_op_usage = i;
5885 }
5886 current_elem_size
5887 = aarch64_get_qualifier_esize (inst_op.qualifier);
5888 if (current_elem_size > max_elem_size)
5889 max_elem_size = current_elem_size;
5890 break;
5891 case AARCH64_OPND_SVE_Pd:
5892 case AARCH64_OPND_SVE_Pg3:
5893 case AARCH64_OPND_SVE_Pg4_5:
5894 case AARCH64_OPND_SVE_Pg4_10:
5895 case AARCH64_OPND_SVE_Pg4_16:
5896 case AARCH64_OPND_SVE_Pm:
5897 case AARCH64_OPND_SVE_Pn:
5898 case AARCH64_OPND_SVE_Pt:
5899 case AARCH64_OPND_SME_Pm:
5900 inst_pred = inst_op;
5901 inst_pred_idx = i;
5902 break;
5903 default:
5904 break;
5905 }
5906 }
5907
5908 assert (max_elem_size != 0);
5909 aarch64_opnd_info inst_dest = inst->operands[0];
5910 /* Determine the size that should be used to compare against the
5911 movprfx size. */
5912 current_elem_size
5913 = opcode->constraints & C_MAX_ELEM
5914 ? max_elem_size
5915 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5916
5917 /* If movprfx is predicated do some extra checks. */
5918 if (predicated)
5919 {
5920 /* The instruction must be predicated. */
5921 if (inst_pred_idx < 0)
5922 {
5923 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5924 mismatch_detail->error = _("predicated instruction expected "
5925 "after `movprfx'");
5926 mismatch_detail->index = -1;
5927 mismatch_detail->non_fatal = true;
5928 res = ERR_VFI;
5929 goto done;
5930 }
5931
5932 /* The instruction must have a merging predicate. */
5933 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5934 {
5935 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5936 mismatch_detail->error = _("merging predicate expected due "
5937 "to preceding `movprfx'");
5938 mismatch_detail->index = inst_pred_idx;
5939 mismatch_detail->non_fatal = true;
5940 res = ERR_VFI;
5941 goto done;
5942 }
5943
5944 /* The same register must be used in instruction. */
5945 if (blk_pred.reg.regno != inst_pred.reg.regno)
5946 {
5947 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5948 mismatch_detail->error = _("predicate register differs "
5949 "from that in preceding "
5950 "`movprfx'");
5951 mismatch_detail->index = inst_pred_idx;
5952 mismatch_detail->non_fatal = true;
5953 res = ERR_VFI;
5954 goto done;
5955 }
5956 }
5957
5958 /* Destructive operations by definition must allow one usage of the
5959 same register. */
5960 int allowed_usage
5961 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5962
5963 /* Operand is not used at all. */
5964 if (num_op_used == 0)
5965 {
5966 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5967 mismatch_detail->error = _("output register of preceding "
5968 "`movprfx' not used in current "
5969 "instruction");
5970 mismatch_detail->index = 0;
5971 mismatch_detail->non_fatal = true;
5972 res = ERR_VFI;
5973 goto done;
5974 }
5975
5976 /* We now know it's used, now determine exactly where it's used. */
5977 if (blk_dest.reg.regno != inst_dest.reg.regno)
5978 {
5979 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5980 mismatch_detail->error = _("output register of preceding "
5981 "`movprfx' expected as output");
5982 mismatch_detail->index = 0;
5983 mismatch_detail->non_fatal = true;
5984 res = ERR_VFI;
5985 goto done;
5986 }
5987
5988 /* Operand used more than allowed for the specific opcode type. */
5989 if (num_op_used > allowed_usage)
5990 {
5991 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5992 mismatch_detail->error = _("output register of preceding "
5993 "`movprfx' used as input");
5994 mismatch_detail->index = last_op_usage;
5995 mismatch_detail->non_fatal = true;
5996 res = ERR_VFI;
5997 goto done;
5998 }
5999
6000 /* Now the only thing left is the qualifiers checks. The register
6001 must have the same maximum element size. */
6002 if (inst_dest.qualifier
6003 && blk_dest.qualifier
6004 && current_elem_size
6005 != aarch64_get_qualifier_esize (blk_dest.qualifier))
6006 {
6007 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6008 mismatch_detail->error = _("register size not compatible with "
6009 "previous `movprfx'");
6010 mismatch_detail->index = 0;
6011 mismatch_detail->non_fatal = true;
6012 res = ERR_VFI;
6013 goto done;
6014 }
6015 }
6016
6017 done:
6018 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
6019 /* We've checked the last instruction in the sequence and so
6020 don't need the sequence any more. */
6021 init_insn_sequence (NULL, insn_sequence);
6022 else
6023 add_insn_to_sequence (inst, insn_sequence);
6024 }
6025
6026 return res;
6027 }
6028
6029
6030 /* Return true if VALUE cannot be moved into an SVE register using DUP
6031 (with any element size, not just ESIZE) and if using DUPM would
6032 therefore be OK. ESIZE is the number of bytes in the immediate. */
6033
6034 bool
6035 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
6036 {
6037 int64_t svalue = uvalue;
6038 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
6039
6040 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
6041 return false;
6042 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
6043 {
6044 svalue = (int32_t) uvalue;
6045 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
6046 {
6047 svalue = (int16_t) uvalue;
6048 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
6049 return false;
6050 }
6051 }
6052 if ((svalue & 0xff) == 0)
6053 svalue /= 256;
6054 return svalue < -128 || svalue >= 128;
6055 }
6056
6057 /* Include the opcode description table as well as the operand description
6058 table. */
6059 #define VERIFIER(x) verify_##x
6060 #include "aarch64-tbl.h"
6061