aarch64-opc.c revision 1.10 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2025 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* The enumeration strings associated with each value of a 6-bit RPRFM
103 operation. */
104 const char *const aarch64_rprfmop_array[64] = {
105 "pldkeep",
106 "pstkeep",
107 0,
108 0,
109 "pldstrm",
110 "pststrm"
111 };
112
113 /* Vector length multiples for a predicate-as-counter operand. Used in things
114 like AARCH64_OPND_SME_VLxN_10. */
115 const char *const aarch64_sme_vlxn_array[2] = {
116 "vlx2",
117 "vlx4"
118 };
119
120 /* Values accepted by the brb alias. */
121 const char *const aarch64_brbop_array[] = {
122 "iall",
123 "inj",
124 };
125
126 /* Helper functions to determine which operand to be used to encode/decode
127 the size:Q fields for AdvSIMD instructions. */
128
129 static inline bool
130 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
131 {
132 return (qualifier >= AARCH64_OPND_QLF_V_8B
133 && qualifier <= AARCH64_OPND_QLF_V_1Q);
134 }
135
136 static inline bool
137 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
138 {
139 return (qualifier >= AARCH64_OPND_QLF_S_B
140 && qualifier <= AARCH64_OPND_QLF_S_Q);
141 }
142
143 enum data_pattern
144 {
145 DP_UNKNOWN,
146 DP_VECTOR_3SAME,
147 DP_VECTOR_LONG,
148 DP_VECTOR_WIDE,
149 DP_VECTOR_ACROSS_LANES,
150 };
151
152 static const char significant_operand_index [] =
153 {
154 0, /* DP_UNKNOWN, by default using operand 0. */
155 0, /* DP_VECTOR_3SAME */
156 1, /* DP_VECTOR_LONG */
157 2, /* DP_VECTOR_WIDE */
158 1, /* DP_VECTOR_ACROSS_LANES */
159 };
160
161 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
162 the data pattern.
163 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
164 corresponds to one of a sequence of operands. */
165
166 static enum data_pattern
167 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
168 {
169 if (vector_qualifier_p (qualifiers[0]))
170 {
171 /* e.g. v.4s, v.4s, v.4s
172 or v.4h, v.4h, v.h[3]. */
173 if (qualifiers[0] == qualifiers[1]
174 && vector_qualifier_p (qualifiers[2])
175 && (aarch64_get_qualifier_esize (qualifiers[0])
176 == aarch64_get_qualifier_esize (qualifiers[1]))
177 && (aarch64_get_qualifier_esize (qualifiers[0])
178 == aarch64_get_qualifier_esize (qualifiers[2])))
179 return DP_VECTOR_3SAME;
180 /* e.g. v.8h, v.8b, v.8b.
181 or v.4s, v.4h, v.h[2].
182 or v.8h, v.16b. */
183 if (vector_qualifier_p (qualifiers[1])
184 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
185 && (aarch64_get_qualifier_esize (qualifiers[0])
186 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
187 return DP_VECTOR_LONG;
188 /* e.g. v.8h, v.8h, v.8b. */
189 if (qualifiers[0] == qualifiers[1]
190 && vector_qualifier_p (qualifiers[2])
191 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
192 && (aarch64_get_qualifier_esize (qualifiers[0])
193 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
194 && (aarch64_get_qualifier_esize (qualifiers[0])
195 == aarch64_get_qualifier_esize (qualifiers[1])))
196 return DP_VECTOR_WIDE;
197 }
198 else if (fp_qualifier_p (qualifiers[0]))
199 {
200 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
201 if (vector_qualifier_p (qualifiers[1])
202 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
203 return DP_VECTOR_ACROSS_LANES;
204 }
205
206 return DP_UNKNOWN;
207 }
208
209 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
210 the AdvSIMD instructions. */
211 /* N.B. it is possible to do some optimization that doesn't call
212 get_data_pattern each time when we need to select an operand. We can
213 either buffer the caculated the result or statically generate the data,
214 however, it is not obvious that the optimization will bring significant
215 benefit. */
216
217 int
218 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
219 {
220 return
221 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
222 }
223
224 /* Instruction bit-fields.
226 + Keep synced with 'enum aarch64_field_kind'. */
227 const aarch64_field fields[] =
228 {
229 { 0, 0 }, /* NIL. */
230 { 8, 4 }, /* CRm: in the system instructions. */
231 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
232 { 12, 4 }, /* CRn: in the system instructions. */
233 { 10, 8 }, /* CSSC_imm8. */
234 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
235 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
236 { 0, 5 }, /* LSE128_Rt: Shared input+output operand register. */
237 { 16, 5 }, /* LSE128_Rt2: Shared input+output operand register 2. */
238 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
239 { 22, 1 }, /* N: in logical (immediate) instructions. */
240 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
241 { 10, 5 }, /* Ra: in fp instructions. */
242 { 0, 5 }, /* Rd: in many integer instructions. */
243 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
244 { 5, 5 }, /* Rn: in many integer instructions. */
245 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
246 { 0, 5 }, /* Rt: in load/store instructions. */
247 { 10, 5 }, /* Rt2: in load/store pair instructions. */
248 { 12, 1 }, /* S: in load/store reg offset instructions. */
249 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
250 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
251 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
252 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
253 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
254 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
255 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
256 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
257 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
258 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
259 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
260 { 0, 1 }, /* SME_ZAda_1b: tile ZA0-ZA1. */
261 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
262 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
263 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
264 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
265 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
266 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
267 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
268 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
269 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
270 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
271 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
272 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
273 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
274 { 12, 2 }, /* SME_size_12: bits [13:12]. */
275 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
276 { 23, 1 }, /* SME_sz_23: bit [23]. */
277 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
278 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
279 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
280 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
281 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
282 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
283 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
284 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
285 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
286 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
287 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
288 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
289 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
290 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
291 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
292 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
293 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
294 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
295 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
296 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
297 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
298 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
299 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
300 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
301 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
302 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
303 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
304 { 5, 1 }, /* SVE_i1: single-bit immediate. */
305 { 23, 1 }, /* SVE_i1_23: single-bit immediate. */
306 { 22, 2 }, /* SVE_i2: 2-bit index, bits [23,22]. */
307 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
308 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
309 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
310 { 22, 2 }, /* SVE_i3h3: two high bits of 3bit immediate, bits [22,23]. */
311 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
312 { 12, 1 }, /* SVE_i3l2: low bit of 3-bit immediate, bit 12. */
313 { 10, 2 }, /* SVE_i4l2: two low bits of 4bit immediate, bits [11,10]. */
314 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
315 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
316 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
317 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
318 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
319 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
320 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
321 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
322 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
323 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
324 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
325 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
326 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
327 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
328 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
329 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
330 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
331 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
332 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
333 { 17, 1 }, /* SVE_sz3: 1-bit element size select. */
334 { 14, 1 }, /* SVE_sz4: 1-bit element size select. */
335 { 16, 4 }, /* SVE_tsz: triangular size select. */
336 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
337 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
338 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
339 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
340 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
341 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
342 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
343 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
344 { 19, 5 }, /* b40: in the test bit and branch instructions. */
345 { 31, 1 }, /* b5: in the test bit and branch instructions. */
346 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
347 { 12, 4 }, /* cond: condition flags as a source operand. */
348 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
349 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
350 { 21, 2 }, /* hw: in move wide constant instructions. */
351 { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
352 { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
353 { 3, 1 }, /* imm1_3: general immediate in bits [3]. */
354 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
355 { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
356 { 14, 1 }, /* imm1_14: general immediate in bits [14]. */
357 { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
358 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
359 { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
360 { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
361 { 2, 2 }, /* imm2_2: general immediate in bits [3:2]. */
362 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
363 { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
364 { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
365 { 13, 2 }, /* imm2_13: 2-bit immediate, bits [14:13] */
366 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
367 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
368 { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
369 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
370 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
371 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
372 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
373 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
374 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
375 { 19, 3 }, /* imm3_19: general immediate in bits [21:19]. */
376 { 0, 4 }, /* imm4_0: in rmif instructions. */
377 { 5, 4 }, /* imm4_5: in SME instructions. */
378 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
379 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
380 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
381 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
382 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
383 { 15, 6 }, /* imm6_15: in rmif instructions. */
384 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
385 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
386 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
387 { 5, 9 }, /* imm9_5: in CB<cc> (immediate). */
388 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
389 { 5, 14 }, /* imm14: in test bit and branch instructions. */
390 { 0, 16 }, /* imm16_0: in udf instruction. */
391 { 5, 16 }, /* imm16_5: in exception instructions. */
392 { 17, 1 }, /* imm17_1: in 1 bit element index. */
393 { 17, 2 }, /* imm17_2: in 2 bits element index. */
394 { 5, 19 }, /* imm19: e.g. in CBZ. */
395 { 0, 26 }, /* imm26: in unconditional branch instructions. */
396 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
397 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
398 { 5, 19 }, /* immhi: e.g. in ADRP. */
399 { 29, 2 }, /* immlo: e.g. in ADRP. */
400 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
401 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
402 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
403 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
404 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
405 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
406 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
407 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
408 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
409 { 19, 2 }, /* op0: in the system instructions. */
410 { 16, 3 }, /* op1: in the system instructions. */
411 { 5, 3 }, /* op2: in the system instructions. */
412 { 22, 2 }, /* opc: in load/store reg offset instructions. */
413 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
414 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
415 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
416 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
417 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
418 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
419 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
420 { 31, 1 }, /* sf: in integer data processing instructions. */
421 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
422 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
423 { 22, 1 }, /* sz: 1-bit element size select. */
424 { 22, 2 }, /* type: floating point type field in fp data inst. */
425 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
426 { 5, 3 }, /* off3: immediate offset used to calculate slice number in a
427 ZA tile. */
428 { 5, 2 }, /* off2: immediate offset used to calculate slice number in
429 a ZA tile. */
430 { 7, 1 }, /* ZAn_1: name of the 1bit encoded ZA tile. */
431 { 5, 1 }, /* ol: immediate offset used to calculate slice number in a ZA
432 tile. */
433 { 6, 2 }, /* ZAn_2: name of the 2bit encoded ZA tile. */
434 { 5, 3 }, /* ZAn_3: name of the 3bit encoded ZA tile. */
435 { 6, 1 }, /* ZAn: name of the bit encoded ZA tile. */
436 { 12, 4 }, /* opc2: in rcpc3 ld/st inst deciding the pre/post-index. */
437 { 30, 2 }, /* rcpc3_size: in rcpc3 ld/st, field controls Rt/Rt2 width. */
438 { 5, 1 }, /* FLD_brbop: used in BRB to mean IALL or INJ. */
439 { 8, 1 }, /* ZA8_1: name of the 1 bit encoded ZA tile ZA0-ZA1. */
440 { 7, 2 }, /* ZA7_2: name of the 2 bits encoded ZA tile ZA0-ZA3. */
441 { 6, 3 }, /* ZA6_3: name of the 3 bits encoded ZA tile ZA0-ZA7. */
442 { 5, 4 }, /* ZA5_4: name of the 4 bits encoded ZA tile ZA0-ZA15. */
443 };
444
445 enum aarch64_operand_class
446 aarch64_get_operand_class (enum aarch64_opnd type)
447 {
448 return aarch64_operands[type].op_class;
449 }
450
451 const char *
452 aarch64_get_operand_name (enum aarch64_opnd type)
453 {
454 return aarch64_operands[type].name;
455 }
456
457 /* Get operand description string.
458 This is usually for the diagnosis purpose. */
459 const char *
460 aarch64_get_operand_desc (enum aarch64_opnd type)
461 {
462 return aarch64_operands[type].desc;
463 }
464
465 /* Table of all conditional affixes. */
466 const aarch64_cond aarch64_conds[16] =
467 {
468 {{"eq", "none"}, 0x0},
469 {{"ne", "any"}, 0x1},
470 {{"cs", "hs", "nlast"}, 0x2},
471 {{"cc", "lo", "ul", "last"}, 0x3},
472 {{"mi", "first"}, 0x4},
473 {{"pl", "nfrst"}, 0x5},
474 {{"vs"}, 0x6},
475 {{"vc"}, 0x7},
476 {{"hi", "pmore"}, 0x8},
477 {{"ls", "plast"}, 0x9},
478 {{"ge", "tcont"}, 0xa},
479 {{"lt", "tstop"}, 0xb},
480 {{"gt"}, 0xc},
481 {{"le"}, 0xd},
482 {{"al"}, 0xe},
483 {{"nv"}, 0xf},
484 };
485
486 const aarch64_cond *
487 get_cond_from_value (aarch64_insn value)
488 {
489 assert (value < 16);
490 return &aarch64_conds[(unsigned int) value];
491 }
492
493 const aarch64_cond *
494 get_inverted_cond (const aarch64_cond *cond)
495 {
496 return &aarch64_conds[cond->value ^ 0x1];
497 }
498
499 /* Table describing the operand extension/shifting operators; indexed by
500 enum aarch64_modifier_kind.
501
502 The value column provides the most common values for encoding modifiers,
503 which enables table-driven encoding/decoding for the modifiers. */
504 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
505 {
506 {"none", 0x0},
507 {"msl", 0x0},
508 {"ror", 0x3},
509 {"asr", 0x2},
510 {"lsr", 0x1},
511 {"lsl", 0x0},
512 {"uxtb", 0x0},
513 {"uxth", 0x1},
514 {"uxtw", 0x2},
515 {"uxtx", 0x3},
516 {"sxtb", 0x4},
517 {"sxth", 0x5},
518 {"sxtw", 0x6},
519 {"sxtx", 0x7},
520 {"mul", 0x0},
521 {"mul vl", 0x0},
522 {NULL, 0},
523 };
524
525 enum aarch64_modifier_kind
526 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
527 {
528 return desc - aarch64_operand_modifiers;
529 }
530
531 aarch64_insn
532 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
533 {
534 return aarch64_operand_modifiers[kind].value;
535 }
536
537 enum aarch64_modifier_kind
538 aarch64_get_operand_modifier_from_value (aarch64_insn value,
539 bool extend_p)
540 {
541 if (extend_p)
542 return AARCH64_MOD_UXTB + value;
543 else
544 return AARCH64_MOD_LSL - value;
545 }
546
547 bool
548 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
549 {
550 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
551 }
552
553 static inline bool
554 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
555 {
556 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
557 }
558
559 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
560 {
561 { "#0x00", 0x0 },
562 { "oshld", 0x1 },
563 { "oshst", 0x2 },
564 { "osh", 0x3 },
565 { "#0x04", 0x4 },
566 { "nshld", 0x5 },
567 { "nshst", 0x6 },
568 { "nsh", 0x7 },
569 { "#0x08", 0x8 },
570 { "ishld", 0x9 },
571 { "ishst", 0xa },
572 { "ish", 0xb },
573 { "#0x0c", 0xc },
574 { "ld", 0xd },
575 { "st", 0xe },
576 { "sy", 0xf },
577 };
578
579 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
580 { /* CRm<3:2> #imm */
581 { "oshnxs", 16 }, /* 00 16 */
582 { "nshnxs", 20 }, /* 01 20 */
583 { "ishnxs", 24 }, /* 10 24 */
584 { "synxs", 28 }, /* 11 28 */
585 };
586
587 /* Table describing the operands supported by the aliases of the HINT
588 instruction.
589
590 The name column is the operand that is accepted for the alias. The value
591 column is the hint number of the alias. The list of operands is terminated
592 by NULL in the name column. */
593
594 const struct aarch64_name_value_pair aarch64_hint_options[] =
595 {
596 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
597 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
598 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
599 { "dsync", HINT_OPD_DSYNC }, /* GCSB DSYNC. */
600 { "c", HINT_OPD_C }, /* BTI C. */
601 { "j", HINT_OPD_J }, /* BTI J. */
602 { "jc", HINT_OPD_JC }, /* BTI JC. */
603 { "keep", HINT_OPD_KEEP }, /* STSHH KEEP */
604 { "strm", HINT_OPD_STRM }, /* STSHH STRM */
605 { NULL, HINT_OPD_NULL },
606 };
607
608 /* op -> op: load = 0 instruction = 1 store = 2
609 l -> level: 1-3
610 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
611 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
612 const struct aarch64_name_value_pair aarch64_prfops[32] =
613 {
614 { "pldl1keep", B(0, 1, 0) },
615 { "pldl1strm", B(0, 1, 1) },
616 { "pldl2keep", B(0, 2, 0) },
617 { "pldl2strm", B(0, 2, 1) },
618 { "pldl3keep", B(0, 3, 0) },
619 { "pldl3strm", B(0, 3, 1) },
620 { "pldslckeep", B(0, 4, 0) },
621 { "pldslcstrm", B(0, 4, 1) },
622 { "plil1keep", B(1, 1, 0) },
623 { "plil1strm", B(1, 1, 1) },
624 { "plil2keep", B(1, 2, 0) },
625 { "plil2strm", B(1, 2, 1) },
626 { "plil3keep", B(1, 3, 0) },
627 { "plil3strm", B(1, 3, 1) },
628 { "plislckeep", B(1, 4, 0) },
629 { "plislcstrm", B(1, 4, 1) },
630 { "pstl1keep", B(2, 1, 0) },
631 { "pstl1strm", B(2, 1, 1) },
632 { "pstl2keep", B(2, 2, 0) },
633 { "pstl2strm", B(2, 2, 1) },
634 { "pstl3keep", B(2, 3, 0) },
635 { "pstl3strm", B(2, 3, 1) },
636 { "pstslckeep", B(2, 4, 0) },
637 { "pstslcstrm", B(2, 4, 1) },
638 { "ir", B(3, 1, 0) },
639 { NULL, 0x19 },
640 { NULL, 0x1a },
641 { NULL, 0x1b },
642 { NULL, 0x1c },
643 { NULL, 0x1d },
644 { NULL, 0x1e },
645 { NULL, 0x1f },
646 };
647 #undef B
648
649 /* Utilities on value constraint. */
651
652 static inline bool
653 value_in_range_p (int64_t value, int64_t low, int64_t high)
654 {
655 return (low <= value) && (value <= high);
656 }
657
658 /* Return true if VALUE is a multiple of ALIGN. */
659 static inline bool
660 value_aligned_p (int64_t value, int align)
661 {
662 return (value % align) == 0;
663 }
664
665 /* A signed value fits in a field. */
666 static inline bool
667 value_fit_signed_field_p (int64_t value, unsigned width)
668 {
669 assert (width < 32);
670 if (width < sizeof (value) * 8)
671 {
672 int64_t lim = (uint64_t) 1 << (width - 1);
673 if (value >= -lim && value < lim)
674 return true;
675 }
676 return false;
677 }
678
679 /* An unsigned value fits in a field. */
680 static inline bool
681 value_fit_unsigned_field_p (int64_t value, unsigned width)
682 {
683 assert (width < 32);
684 if (width < sizeof (value) * 8)
685 {
686 int64_t lim = (uint64_t) 1 << width;
687 if (value >= 0 && value < lim)
688 return true;
689 }
690 return false;
691 }
692
693 /* Return true if OPERAND is SP or WSP. */
694 bool
695 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
696 {
697 return ((aarch64_get_operand_class (operand->type)
698 == AARCH64_OPND_CLASS_INT_REG)
699 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
700 && operand->reg.regno == 31);
701 }
702
703 /* Return 1 if OPERAND is XZR or WZP. */
704 int
705 aarch64_zero_register_p (const aarch64_opnd_info *operand)
706 {
707 return ((aarch64_get_operand_class (operand->type)
708 == AARCH64_OPND_CLASS_INT_REG)
709 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
710 && operand->reg.regno == 31);
711 }
712
713 /* Return true if the operand *OPERAND that has the operand code
714 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
715 qualified by the qualifier TARGET. */
716
717 static inline bool
718 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
719 aarch64_opnd_qualifier_t target)
720 {
721 switch (operand->qualifier)
722 {
723 case AARCH64_OPND_QLF_W:
724 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
725 return true;
726 break;
727 case AARCH64_OPND_QLF_X:
728 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
729 return true;
730 break;
731 case AARCH64_OPND_QLF_WSP:
732 if (target == AARCH64_OPND_QLF_W
733 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
734 return true;
735 break;
736 case AARCH64_OPND_QLF_SP:
737 if (target == AARCH64_OPND_QLF_X
738 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
739 return true;
740 break;
741 default:
742 break;
743 }
744
745 return false;
746 }
747
748 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
749 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
750
751 Return NIL if more than one expected qualifiers are found. */
752
753 aarch64_opnd_qualifier_t
754 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
755 int idx,
756 const aarch64_opnd_qualifier_t known_qlf,
757 int known_idx)
758 {
759 int i, saved_i;
760
761 /* Special case.
762
763 When the known qualifier is NIL, we have to assume that there is only
764 one qualifier sequence in the *QSEQ_LIST and return the corresponding
765 qualifier directly. One scenario is that for instruction
766 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
767 which has only one possible valid qualifier sequence
768 NIL, S_D
769 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
770 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
771
772 Because the qualifier NIL has dual roles in the qualifier sequence:
773 it can mean no qualifier for the operand, or the qualifer sequence is
774 not in use (when all qualifiers in the sequence are NILs), we have to
775 handle this special case here. */
776 if (((enum aarch64_opnd) known_qlf) == AARCH64_OPND_NIL)
777 {
778 assert (((enum aarch64_opnd) qseq_list[0][known_idx]) == AARCH64_OPND_NIL);
779 return qseq_list[0][idx];
780 }
781
782 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
783 {
784 if (qseq_list[i][known_idx] == known_qlf)
785 {
786 if (saved_i != -1)
787 /* More than one sequences are found to have KNOWN_QLF at
788 KNOWN_IDX. */
789 return AARCH64_OPND_QLF_NIL;
790 saved_i = i;
791 }
792 }
793
794 return qseq_list[saved_i][idx];
795 }
796
797 enum operand_qualifier_kind
798 {
799 OQK_NIL,
800 OQK_OPD_VARIANT,
801 OQK_VALUE_IN_RANGE,
802 OQK_MISC,
803 };
804
805 /* Operand qualifier description. */
806 struct operand_qualifier_data
807 {
808 /* The usage of the three data fields depends on the qualifier kind. */
809 int data0;
810 int data1;
811 int data2;
812 /* Description. */
813 const char *desc;
814 /* Kind. */
815 enum operand_qualifier_kind kind;
816 };
817
818 /* Indexed by the operand qualifier enumerators. */
819 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
820 {
821 {0, 0, 0, "NIL", OQK_NIL},
822
823 /* Operand variant qualifiers.
824 First 3 fields:
825 element size, number of elements and common value for encoding. */
826
827 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
828 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
829 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
830 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
831
832 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
833 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
834 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
835 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
836 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
837 {2, 1, 0x0, "2b", OQK_OPD_VARIANT},
838 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
839 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
840
841 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
842 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
843 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
844 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
845 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
846 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
847 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
848 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
849 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
850 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
851 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
852
853 {0, 0, 0, "z", OQK_OPD_VARIANT},
854 {0, 0, 0, "m", OQK_OPD_VARIANT},
855
856 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
857 {16, 0, 0, "tag", OQK_OPD_VARIANT},
858
859 /* Qualifiers constraining the value range.
860 First 3 fields:
861 Lower bound, higher bound, unused. */
862
863 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
864 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
865 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
866 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
867 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
868 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
869 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
870
871 /* Qualifiers for miscellaneous purpose.
872 First 3 fields:
873 unused, unused and unused. */
874
875 {0, 0, 0, "lsl", 0},
876 {0, 0, 0, "msl", 0},
877
878 {0, 0, 0, "retrieving", 0},
879 };
880
881 static inline bool
882 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
883 {
884 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
885 }
886
887 static inline bool
888 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
889 {
890 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
891 }
892
893 const char*
894 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
895 {
896 return aarch64_opnd_qualifiers[qualifier].desc;
897 }
898
899 /* Given an operand qualifier, return the expected data element size
900 of a qualified operand. */
901 unsigned char
902 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
903 {
904 assert (operand_variant_qualifier_p (qualifier));
905 return aarch64_opnd_qualifiers[qualifier].data0;
906 }
907
908 unsigned char
909 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
910 {
911 assert (operand_variant_qualifier_p (qualifier));
912 return aarch64_opnd_qualifiers[qualifier].data1;
913 }
914
915 aarch64_insn
916 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
917 {
918 assert (operand_variant_qualifier_p (qualifier));
919 return aarch64_opnd_qualifiers[qualifier].data2;
920 }
921
922 static int
923 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
924 {
925 assert (qualifier_value_in_range_constraint_p (qualifier));
926 return aarch64_opnd_qualifiers[qualifier].data0;
927 }
928
929 static int
930 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
931 {
932 assert (qualifier_value_in_range_constraint_p (qualifier));
933 return aarch64_opnd_qualifiers[qualifier].data1;
934 }
935
936 #ifdef DEBUG_AARCH64
937 void
938 aarch64_verbose (const char *str, ...)
939 {
940 va_list ap;
941 va_start (ap, str);
942 printf ("#### ");
943 vprintf (str, ap);
944 printf ("\n");
945 va_end (ap);
946 }
947
948 static inline void
949 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
950 {
951 int i;
952 printf ("#### \t");
953 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
954 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
955 printf ("\n");
956 }
957
958 static void
959 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
960 const aarch64_opnd_qualifier_t *qualifier)
961 {
962 int i;
963 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
964
965 aarch64_verbose ("dump_match_qualifiers:");
966 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
967 curr[i] = opnd[i].qualifier;
968 dump_qualifier_sequence (curr);
969 aarch64_verbose ("against");
970 dump_qualifier_sequence (qualifier);
971 }
972 #endif /* DEBUG_AARCH64 */
973
974 /* This function checks if the given instruction INSN is a destructive
975 instruction based on the usage of the registers. It does not recognize
976 unary destructive instructions. */
977 bool
978 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
979 {
980 int i = 0;
981 const enum aarch64_opnd *opnds = opcode->operands;
982
983 if (opnds[0] == AARCH64_OPND_NIL)
984 return false;
985
986 while (opnds[++i] != AARCH64_OPND_NIL)
987 if (opnds[i] == opnds[0])
988 return true;
989
990 return false;
991 }
992
993 /* TODO improve this, we can have an extra field at the runtime to
994 store the number of operands rather than calculating it every time. */
995
996 int
997 aarch64_num_of_operands (const aarch64_opcode *opcode)
998 {
999 int i = 0;
1000 const enum aarch64_opnd *opnds = opcode->operands;
1001 while (opnds[i++] != AARCH64_OPND_NIL)
1002 ;
1003 --i;
1004 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
1005 return i;
1006 }
1007
1008 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
1009 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
1010
1011 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1012 This is always 0 if the function succeeds.
1013
1014 N.B. on the entry, it is very likely that only some operands in *INST
1015 have had their qualifiers been established.
1016
1017 If STOP_AT is not -1, the function will only try to match
1018 the qualifier sequence for operands before and including the operand
1019 of index STOP_AT; and on success *RET will only be filled with the first
1020 (STOP_AT+1) qualifiers.
1021
1022 A couple examples of the matching algorithm:
1023
1024 X,W,NIL should match
1025 X,W,NIL
1026
1027 NIL,NIL should match
1028 X ,NIL
1029
1030 Apart from serving the main encoding routine, this can also be called
1031 during or after the operand decoding. */
1032
1033 int
1034 aarch64_find_best_match (const aarch64_inst *inst,
1035 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
1036 int stop_at, aarch64_opnd_qualifier_t *ret,
1037 int *invalid_count)
1038 {
1039 int i, num_opnds, invalid, min_invalid;
1040 const aarch64_opnd_qualifier_t *qualifiers;
1041
1042 num_opnds = aarch64_num_of_operands (inst->opcode);
1043 if (num_opnds == 0)
1044 {
1045 DEBUG_TRACE ("SUCCEED: no operand");
1046 *invalid_count = 0;
1047 return 1;
1048 }
1049
1050 if (stop_at < 0 || stop_at >= num_opnds)
1051 stop_at = num_opnds - 1;
1052
1053 /* For each pattern. */
1054 min_invalid = num_opnds;
1055 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
1056 {
1057 int j;
1058 qualifiers = *qualifiers_list;
1059
1060 /* Start as positive. */
1061 invalid = 0;
1062
1063 DEBUG_TRACE ("%d", i);
1064 #ifdef DEBUG_AARCH64
1065 if (debug_dump)
1066 dump_match_qualifiers (inst->operands, qualifiers);
1067 #endif
1068
1069 /* The first entry should be taken literally, even if it's an empty
1070 qualifier sequence. (This matters for strict testing.) In other
1071 positions an empty sequence acts as a terminator. */
1072 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
1073 break;
1074
1075 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
1076 {
1077 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
1078 && !(inst->opcode->flags & F_STRICT))
1079 {
1080 /* Either the operand does not have qualifier, or the qualifier
1081 for the operand needs to be deduced from the qualifier
1082 sequence.
1083 In the latter case, any constraint checking related with
1084 the obtained qualifier should be done later in
1085 operand_general_constraint_met_p. */
1086 continue;
1087 }
1088 else if (*qualifiers != inst->operands[j].qualifier)
1089 {
1090 /* Unless the target qualifier can also qualify the operand
1091 (which has already had a non-nil qualifier), non-equal
1092 qualifiers are generally un-matched. */
1093 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
1094 continue;
1095 else
1096 invalid += 1;
1097 }
1098 else
1099 continue; /* Equal qualifiers are certainly matched. */
1100 }
1101
1102 if (min_invalid > invalid)
1103 min_invalid = invalid;
1104
1105 /* Qualifiers established. */
1106 if (min_invalid == 0)
1107 break;
1108 }
1109
1110 *invalid_count = min_invalid;
1111 if (min_invalid == 0)
1112 {
1113 /* Fill the result in *RET. */
1114 int j;
1115 qualifiers = *qualifiers_list;
1116
1117 DEBUG_TRACE ("complete qualifiers using list %d", i);
1118 #ifdef DEBUG_AARCH64
1119 if (debug_dump)
1120 dump_qualifier_sequence (qualifiers);
1121 #endif
1122
1123 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1124 ret[j] = *qualifiers;
1125 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1126 ret[j] = AARCH64_OPND_QLF_NIL;
1127
1128 DEBUG_TRACE ("SUCCESS");
1129 return 1;
1130 }
1131
1132 DEBUG_TRACE ("FAIL");
1133 return 0;
1134 }
1135
1136 /* Operand qualifier matching and resolving.
1137
1138 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1139 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1140
1141 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1142 This is always 0 if the function succeeds.
1143
1144 if UPDATE_P, update the qualifier(s) in *INST after the matching
1145 succeeds. */
1146
1147 static int
1148 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1149 int *invalid_count)
1150 {
1151 int i;
1152 aarch64_opnd_qualifier_seq_t qualifiers;
1153
1154 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1155 qualifiers, invalid_count))
1156 {
1157 DEBUG_TRACE ("matching FAIL");
1158 return 0;
1159 }
1160
1161 /* Update the qualifiers. */
1162 if (update_p)
1163 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1164 {
1165 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1166 break;
1167 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1168 "update %s with %s for operand %d",
1169 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1170 aarch64_get_qualifier_name (qualifiers[i]), i);
1171 inst->operands[i].qualifier = qualifiers[i];
1172 }
1173
1174 DEBUG_TRACE ("matching SUCCESS");
1175 return 1;
1176 }
1177
1178 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1179 register by MOVZ.
1180
1181 IS32 indicates whether value is a 32-bit immediate or not.
1182 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1183 amount will be returned in *SHIFT_AMOUNT. */
1184
1185 bool
1186 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1187 {
1188 int amount;
1189
1190 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1191
1192 if (is32)
1193 {
1194 /* Allow all zeros or all ones in top 32-bits, so that
1195 32-bit constant expressions like ~0x80000000 are
1196 permitted. */
1197 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1198 /* Immediate out of range. */
1199 return false;
1200 value &= 0xffffffff;
1201 }
1202
1203 /* first, try movz then movn */
1204 amount = -1;
1205 if ((value & ((uint64_t) 0xffff << 0)) == value)
1206 amount = 0;
1207 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1208 amount = 16;
1209 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1210 amount = 32;
1211 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1212 amount = 48;
1213
1214 if (amount == -1)
1215 {
1216 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1217 return false;
1218 }
1219
1220 if (shift_amount != NULL)
1221 *shift_amount = amount;
1222
1223 DEBUG_TRACE ("exit true with amount %d", amount);
1224
1225 return true;
1226 }
1227
1228 /* Build the accepted values for immediate logical SIMD instructions.
1229
1230 The standard encodings of the immediate value are:
1231 N imms immr SIMD size R S
1232 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1233 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1234 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1235 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1236 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1237 0 11110s 00000r 2 UInt(r) UInt(s)
1238 where all-ones value of S is reserved.
1239
1240 Let's call E the SIMD size.
1241
1242 The immediate value is: S+1 bits '1' rotated to the right by R.
1243
1244 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1245 (remember S != E - 1). */
1246
1247 #define TOTAL_IMM_NB 5334
1248
1249 typedef struct
1250 {
1251 uint64_t imm;
1252 aarch64_insn encoding;
1253 } simd_imm_encoding;
1254
1255 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1256
1257 static int
1258 simd_imm_encoding_cmp(const void *i1, const void *i2)
1259 {
1260 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1261 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1262
1263 if (imm1->imm < imm2->imm)
1264 return -1;
1265 if (imm1->imm > imm2->imm)
1266 return +1;
1267 return 0;
1268 }
1269
1270 /* immediate bitfield standard encoding
1271 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1272 1 ssssss rrrrrr 64 rrrrrr ssssss
1273 0 0sssss 0rrrrr 32 rrrrr sssss
1274 0 10ssss 00rrrr 16 rrrr ssss
1275 0 110sss 000rrr 8 rrr sss
1276 0 1110ss 0000rr 4 rr ss
1277 0 11110s 00000r 2 r s */
1278 static inline int
1279 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1280 {
1281 return (is64 << 12) | (r << 6) | s;
1282 }
1283
1284 static void
1285 build_immediate_table (void)
1286 {
1287 uint32_t log_e, e, s, r, s_mask;
1288 uint64_t mask, imm;
1289 int nb_imms;
1290 int is64;
1291
1292 nb_imms = 0;
1293 for (log_e = 1; log_e <= 6; log_e++)
1294 {
1295 /* Get element size. */
1296 e = 1u << log_e;
1297 if (log_e == 6)
1298 {
1299 is64 = 1;
1300 mask = 0xffffffffffffffffull;
1301 s_mask = 0;
1302 }
1303 else
1304 {
1305 is64 = 0;
1306 mask = (1ull << e) - 1;
1307 /* log_e s_mask
1308 1 ((1 << 4) - 1) << 2 = 111100
1309 2 ((1 << 3) - 1) << 3 = 111000
1310 3 ((1 << 2) - 1) << 4 = 110000
1311 4 ((1 << 1) - 1) << 5 = 100000
1312 5 ((1 << 0) - 1) << 6 = 000000 */
1313 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1314 }
1315 for (s = 0; s < e - 1; s++)
1316 for (r = 0; r < e; r++)
1317 {
1318 /* s+1 consecutive bits to 1 (s < 63) */
1319 imm = (1ull << (s + 1)) - 1;
1320 /* rotate right by r */
1321 if (r != 0)
1322 imm = (imm >> r) | ((imm << (e - r)) & mask);
1323 /* replicate the constant depending on SIMD size */
1324 switch (log_e)
1325 {
1326 case 1: imm = (imm << 2) | imm;
1327 /* Fall through. */
1328 case 2: imm = (imm << 4) | imm;
1329 /* Fall through. */
1330 case 3: imm = (imm << 8) | imm;
1331 /* Fall through. */
1332 case 4: imm = (imm << 16) | imm;
1333 /* Fall through. */
1334 case 5: imm = (imm << 32) | imm;
1335 /* Fall through. */
1336 case 6: break;
1337 default: abort ();
1338 }
1339 simd_immediates[nb_imms].imm = imm;
1340 simd_immediates[nb_imms].encoding =
1341 encode_immediate_bitfield(is64, s | s_mask, r);
1342 nb_imms++;
1343 }
1344 }
1345 assert (nb_imms == TOTAL_IMM_NB);
1346 qsort(simd_immediates, nb_imms,
1347 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1348 }
1349
1350 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1351 be accepted by logical (immediate) instructions
1352 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1353
1354 ESIZE is the number of bytes in the decoded immediate value.
1355 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1356 VALUE will be returned in *ENCODING. */
1357
1358 bool
1359 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1360 {
1361 simd_imm_encoding imm_enc;
1362 const simd_imm_encoding *imm_encoding;
1363 static bool initialized = false;
1364 uint64_t upper;
1365 int i;
1366
1367 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1368 value, esize);
1369
1370 if (!initialized)
1371 {
1372 build_immediate_table ();
1373 initialized = true;
1374 }
1375
1376 /* Allow all zeros or all ones in top bits, so that
1377 constant expressions like ~1 are permitted. */
1378 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1379 if ((value & ~upper) != value && (value | upper) != value)
1380 return false;
1381
1382 /* Replicate to a full 64-bit value. */
1383 value &= ~upper;
1384 for (i = esize * 8; i < 64; i *= 2)
1385 value |= (value << i);
1386
1387 imm_enc.imm = value;
1388 imm_encoding = (const simd_imm_encoding *)
1389 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1390 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1391 if (imm_encoding == NULL)
1392 {
1393 DEBUG_TRACE ("exit with false");
1394 return false;
1395 }
1396 if (encoding != NULL)
1397 *encoding = imm_encoding->encoding;
1398 DEBUG_TRACE ("exit with true");
1399 return true;
1400 }
1401
1402 /* If 64-bit immediate IMM is in the format of
1403 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1404 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1405 of value "abcdefgh". Otherwise return -1. */
1406 int
1407 aarch64_shrink_expanded_imm8 (uint64_t imm)
1408 {
1409 int i, ret;
1410 uint32_t byte;
1411
1412 ret = 0;
1413 for (i = 0; i < 8; i++)
1414 {
1415 byte = (imm >> (8 * i)) & 0xff;
1416 if (byte == 0xff)
1417 ret |= 1 << i;
1418 else if (byte != 0x00)
1419 return -1;
1420 }
1421 return ret;
1422 }
1423
1424 /* Utility inline functions for operand_general_constraint_met_p. */
1425
1426 static inline void
1427 set_error (aarch64_operand_error *mismatch_detail,
1428 enum aarch64_operand_error_kind kind, int idx,
1429 const char* error)
1430 {
1431 if (mismatch_detail == NULL)
1432 return;
1433 mismatch_detail->kind = kind;
1434 mismatch_detail->index = idx;
1435 mismatch_detail->error = error;
1436 }
1437
1438 static inline void
1439 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1440 const char* error)
1441 {
1442 if (mismatch_detail == NULL)
1443 return;
1444 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1445 }
1446
1447 static inline void
1448 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1449 const char *prefix, int lower_bound, int upper_bound)
1450 {
1451 if (mismatch_detail == NULL)
1452 return;
1453 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1454 mismatch_detail->data[0].s = prefix;
1455 mismatch_detail->data[1].i = lower_bound;
1456 mismatch_detail->data[2].i = upper_bound;
1457 }
1458
1459 static inline void
1460 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1461 int idx, int lower_bound, int upper_bound,
1462 const char* error)
1463 {
1464 if (mismatch_detail == NULL)
1465 return;
1466 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1467 mismatch_detail->data[0].i = lower_bound;
1468 mismatch_detail->data[1].i = upper_bound;
1469 }
1470
1471 static inline void
1472 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1473 int idx, int lower_bound, int upper_bound)
1474 {
1475 if (mismatch_detail == NULL)
1476 return;
1477 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1478 _("immediate value"));
1479 }
1480
1481 static inline void
1482 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1483 int idx, int lower_bound, int upper_bound)
1484 {
1485 if (mismatch_detail == NULL)
1486 return;
1487 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1488 _("immediate offset"));
1489 }
1490
1491 static inline void
1492 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1493 int idx, int lower_bound, int upper_bound)
1494 {
1495 if (mismatch_detail == NULL)
1496 return;
1497 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1498 _("register number"));
1499 }
1500
1501 static inline void
1502 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1503 int idx, int lower_bound, int upper_bound)
1504 {
1505 if (mismatch_detail == NULL)
1506 return;
1507 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1508 _("register element index"));
1509 }
1510
1511 static inline void
1512 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1513 int idx, int lower_bound, int upper_bound)
1514 {
1515 if (mismatch_detail == NULL)
1516 return;
1517 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1518 _("shift amount"));
1519 }
1520
1521 /* Report that the MUL modifier in operand IDX should be in the range
1522 [LOWER_BOUND, UPPER_BOUND]. */
1523 static inline void
1524 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1525 int idx, int lower_bound, int upper_bound)
1526 {
1527 if (mismatch_detail == NULL)
1528 return;
1529 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1530 _("multiplier"));
1531 }
1532
1533 static inline void
1534 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1535 int alignment)
1536 {
1537 if (mismatch_detail == NULL)
1538 return;
1539 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1540 mismatch_detail->data[0].i = alignment;
1541 }
1542
1543 static inline void
1544 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
1545 int expected_num)
1546 {
1547 if (mismatch_detail == NULL)
1548 return;
1549 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
1550 mismatch_detail->data[0].i = 1 << expected_num;
1551 }
1552
1553 static inline void
1554 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
1555 int expected_num)
1556 {
1557 if (mismatch_detail == NULL)
1558 return;
1559 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
1560 mismatch_detail->data[0].i = 1 << expected_num;
1561 }
1562
1563 static inline void
1564 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
1565 int idx, int expected)
1566 {
1567 if (mismatch_detail == NULL)
1568 return;
1569 set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
1570 mismatch_detail->data[0].i = expected;
1571 }
1572
1573 static inline void
1574 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1575 const char* error)
1576 {
1577 if (mismatch_detail == NULL)
1578 return;
1579 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1580 }
1581
1582 /* Check that indexed register operand OPND has a register in the range
1583 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1584 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1585
1586 static bool
1587 check_reglane (const aarch64_opnd_info *opnd,
1588 aarch64_operand_error *mismatch_detail, int idx,
1589 const char *prefix, int min_regno, int max_regno,
1590 int min_index, int max_index)
1591 {
1592 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1593 {
1594 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1595 max_regno);
1596 return false;
1597 }
1598 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1599 {
1600 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1601 max_index);
1602 return false;
1603 }
1604 return true;
1605 }
1606
1607 /* Check that register list operand OPND has NUM_REGS registers and a
1608 register stride of STRIDE. */
1609
1610 static bool
1611 check_reglist (const aarch64_opnd_info *opnd,
1612 aarch64_operand_error *mismatch_detail, int idx,
1613 int num_regs, int stride)
1614 {
1615 if (opnd->reglist.num_regs != num_regs)
1616 {
1617 set_reg_list_length_error (mismatch_detail, idx, num_regs);
1618 return false;
1619 }
1620 if (opnd->reglist.stride != stride)
1621 {
1622 set_reg_list_stride_error (mismatch_detail, idx, stride);
1623 return false;
1624 }
1625 return true;
1626 }
1627
1628 typedef struct
1629 {
1630 int64_t min;
1631 int64_t max;
1632 } imm_range_t;
1633
1634 static imm_range_t
1635 imm_range_min_max (unsigned size, bool signed_rng)
1636 {
1637 assert (size < 63);
1638 imm_range_t r;
1639 if (signed_rng)
1640 {
1641 r.max = (((int64_t) 0x1) << (size - 1)) - 1;
1642 r.min = - r.max - 1;
1643 }
1644 else
1645 {
1646 r.max = (((int64_t) 0x1) << size) - 1;
1647 r.min = 0;
1648 }
1649 return r;
1650 }
1651
1652 /* Check that an immediate value is in the range provided by the
1653 operand type. */
1654 static bool
1655 check_immediate_out_of_range (int64_t imm,
1656 enum aarch64_opnd type,
1657 aarch64_operand_error *mismatch_detail,
1658 int idx)
1659 {
1660 const aarch64_operand *operand = get_operand_from_code (type);
1661 uint8_t size = get_operand_fields_width (operand);
1662 bool unsigned_imm = operand_need_unsigned_offset (operand);
1663 bool (*value_fit_field) (int64_t, unsigned)
1664 = (unsigned_imm
1665 ? value_fit_unsigned_field_p
1666 : value_fit_signed_field_p);
1667
1668 if (!value_fit_field (imm, size))
1669 {
1670 imm_range_t rng = imm_range_min_max (size, !unsigned_imm);
1671 set_imm_out_of_range_error (mismatch_detail, idx, rng.min, rng.max);
1672 return false;
1673 }
1674 return true;
1675 }
1676
1677 /* Check that indexed ZA operand OPND has:
1678
1679 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1680
1681 - RANGE_SIZE consecutive immediate offsets.
1682
1683 - an initial immediate offset that is a multiple of RANGE_SIZE
1684 in the range [0, MAX_VALUE * RANGE_SIZE]
1685
1686 - a vector group size of GROUP_SIZE.
1687
1688 - STATUS_VG for cases where VGx2 or VGx4 is mandatory. */
1689 static bool
1690 check_za_access (const aarch64_opnd_info *opnd,
1691 aarch64_operand_error *mismatch_detail, int idx,
1692 int min_wreg, int max_value, unsigned int range_size,
1693 int group_size, bool status_vg)
1694 {
1695 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1696 {
1697 if (min_wreg == 12)
1698 set_other_error (mismatch_detail, idx,
1699 _("expected a selection register in the"
1700 " range w12-w15"));
1701 else if (min_wreg == 8)
1702 set_other_error (mismatch_detail, idx,
1703 _("expected a selection register in the"
1704 " range w8-w11"));
1705 else
1706 abort ();
1707 return false;
1708 }
1709
1710 int max_index = max_value * range_size;
1711 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
1712 {
1713 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
1714 return false;
1715 }
1716
1717 if ((opnd->indexed_za.index.imm % range_size) != 0)
1718 {
1719 assert (range_size == 2 || range_size == 4);
1720 set_other_error (mismatch_detail, idx,
1721 range_size == 2
1722 ? _("starting offset is not a multiple of 2")
1723 : _("starting offset is not a multiple of 4"));
1724 return false;
1725 }
1726
1727 if (opnd->indexed_za.index.countm1 != range_size - 1)
1728 {
1729 if (range_size == 1)
1730 set_other_error (mismatch_detail, idx,
1731 _("expected a single offset rather than"
1732 " a range"));
1733 else if (range_size == 2)
1734 set_other_error (mismatch_detail, idx,
1735 _("expected a range of two offsets"));
1736 else if (range_size == 4)
1737 set_other_error (mismatch_detail, idx,
1738 _("expected a range of four offsets"));
1739 else
1740 abort ();
1741 return false;
1742 }
1743
1744 /* The vector group specifier is optional in assembly code. */
1745 if (opnd->indexed_za.group_size != group_size
1746 && (status_vg || opnd->indexed_za.group_size != 0 ))
1747 {
1748 set_invalid_vg_size (mismatch_detail, idx, group_size);
1749 return false;
1750 }
1751
1752 return true;
1753 }
1754
1755 /* Given a load/store operation, calculate the size of transferred data via a
1756 cumulative sum of qualifier sizes preceding the address operand in the
1757 OPNDS operand list argument. */
1758 int
1759 calc_ldst_datasize (const aarch64_opnd_info *opnds)
1760 {
1761 unsigned num_bytes = 0; /* total number of bytes transferred. */
1762 enum aarch64_operand_class opnd_class;
1763 enum aarch64_opnd type;
1764
1765 for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
1766 {
1767 type = opnds[i].type;
1768 opnd_class = aarch64_operands[type].op_class;
1769 if (opnd_class == AARCH64_OPND_CLASS_ADDRESS)
1770 break;
1771 num_bytes += aarch64_get_qualifier_esize (opnds[i].qualifier);
1772 }
1773 return num_bytes;
1774 }
1775
1776
1777 /* General constraint checking based on operand code.
1778
1779 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1780 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1781
1782 This function has to be called after the qualifiers for all operands
1783 have been resolved.
1784
1785 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1786 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1787 of error message during the disassembling where error message is not
1788 wanted. We avoid the dynamic construction of strings of error messages
1789 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1790 use a combination of error code, static string and some integer data to
1791 represent an error. */
1792
1793 static bool
1794 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1795 enum aarch64_opnd type,
1796 const aarch64_opcode *opcode,
1797 aarch64_operand_error *mismatch_detail)
1798 {
1799 unsigned num, modifiers, shift;
1800 unsigned char size;
1801 int64_t imm, min_value, max_value;
1802 uint64_t uvalue, mask;
1803 const aarch64_opnd_info *opnd = opnds + idx;
1804 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1805 int i;
1806
1807 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1808
1809 switch (aarch64_operands[type].op_class)
1810 {
1811 case AARCH64_OPND_CLASS_INT_REG:
1812 /* Check for pair of xzr registers. */
1813 if (type == AARCH64_OPND_PAIRREG_OR_XZR
1814 && opnds[idx - 1].reg.regno == 0x1f)
1815 {
1816 if (opnds[idx].reg.regno != 0x1f)
1817 {
1818 set_syntax_error (mismatch_detail, idx - 1,
1819 _("second reg in pair should be xzr if first is"
1820 " xzr"));
1821 return false;
1822 }
1823 }
1824 /* Check pair reg constraints for instructions taking a pair of
1825 consecutively-numbered general-purpose registers. */
1826 else if (type == AARCH64_OPND_PAIRREG
1827 || type == AARCH64_OPND_PAIRREG_OR_XZR)
1828 {
1829 assert (idx == 1 || idx == 2 || idx == 3 || idx == 5);
1830 if (opnds[idx - 1].reg.regno % 2 != 0)
1831 {
1832 set_syntax_error (mismatch_detail, idx - 1,
1833 _("reg pair must start from even reg"));
1834 return false;
1835 }
1836 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1837 {
1838 set_syntax_error (mismatch_detail, idx,
1839 _("reg pair must be contiguous"));
1840 return false;
1841 }
1842 break;
1843 }
1844
1845 /* <Xt> may be optional in some IC and TLBI instructions. */
1846 if (type == AARCH64_OPND_Rt_SYS)
1847 {
1848 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1849 == AARCH64_OPND_CLASS_SYSTEM));
1850 if (opnds[1].present
1851 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1852 {
1853 set_other_error (mismatch_detail, idx, _("extraneous register"));
1854 return false;
1855 }
1856 if (!opnds[1].present
1857 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1858 {
1859 set_other_error (mismatch_detail, idx, _("missing register"));
1860 return false;
1861 }
1862 }
1863 switch (qualifier)
1864 {
1865 case AARCH64_OPND_QLF_WSP:
1866 case AARCH64_OPND_QLF_SP:
1867 if (!aarch64_stack_pointer_p (opnd))
1868 {
1869 set_other_error (mismatch_detail, idx,
1870 _("stack pointer register expected"));
1871 return false;
1872 }
1873 break;
1874 default:
1875 break;
1876 }
1877 break;
1878
1879 case AARCH64_OPND_CLASS_SVE_REG:
1880 switch (type)
1881 {
1882 case AARCH64_OPND_SVE_Zm3_INDEX:
1883 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1884 case AARCH64_OPND_SVE_Zm3_19_INDEX:
1885 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1886 case AARCH64_OPND_SVE_Zm3_10_INDEX:
1887 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1888 case AARCH64_OPND_SVE_Zm4_INDEX:
1889 size = get_operand_fields_width (get_operand_from_code (type));
1890 shift = get_operand_specific_data (&aarch64_operands[type]);
1891 if (!check_reglane (opnd, mismatch_detail, idx,
1892 "z", 0, (1 << shift) - 1,
1893 0, (1u << (size - shift)) - 1))
1894 return false;
1895 break;
1896
1897 case AARCH64_OPND_SVE_Zm1_23_INDEX:
1898 size = get_operand_fields_width (get_operand_from_code (type));
1899 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 1))
1900 return 0;
1901 break;
1902
1903 case AARCH64_OPND_SME_Zn_INDEX2_19:
1904 case AARCH64_OPND_SVE_Zm2_22_INDEX:
1905 size = get_operand_fields_width (get_operand_from_code (type));
1906 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 3))
1907 return 0;
1908 break;
1909
1910 case AARCH64_OPND_SVE_Zn_INDEX:
1911 size = aarch64_get_qualifier_esize (opnd->qualifier);
1912 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1913 0, 64 / size - 1))
1914 return false;
1915 break;
1916
1917 case AARCH64_OPND_SVE_Zn_5_INDEX:
1918 size = aarch64_get_qualifier_esize (opnd->qualifier);
1919 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1920 0, 16 / size - 1))
1921 return false;
1922 break;
1923
1924 case AARCH64_OPND_SME_PNn3_INDEX1:
1925 case AARCH64_OPND_SME_PNn3_INDEX2:
1926 size = get_operand_field_width (get_operand_from_code (type), 1);
1927 if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
1928 0, (1 << size) - 1))
1929 return false;
1930 break;
1931
1932 case AARCH64_OPND_SVE_Zm3_12_INDEX:
1933 case AARCH64_OPND_SME_Zn_INDEX1_16:
1934 case AARCH64_OPND_SME_Zn_INDEX2_15:
1935 case AARCH64_OPND_SME_Zn_INDEX2_16:
1936 case AARCH64_OPND_SME_Zn_INDEX3_14:
1937 case AARCH64_OPND_SME_Zn_INDEX3_15:
1938 case AARCH64_OPND_SME_Zn_INDEX4_14:
1939 case AARCH64_OPND_SVE_Zn0_INDEX:
1940 case AARCH64_OPND_SVE_Zn1_17_INDEX:
1941 case AARCH64_OPND_SVE_Zn2_18_INDEX:
1942 case AARCH64_OPND_SVE_Zn3_22_INDEX:
1943 case AARCH64_OPND_SVE_Zd0_INDEX:
1944 case AARCH64_OPND_SVE_Zd1_17_INDEX:
1945 case AARCH64_OPND_SVE_Zd2_18_INDEX:
1946 case AARCH64_OPND_SVE_Zd3_22_INDEX:
1947 size = get_operand_fields_width (get_operand_from_code (type)) - 5;
1948 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1949 0, (1 << size) - 1))
1950 return false;
1951 break;
1952
1953 case AARCH64_OPND_SME_Zm_INDEX1:
1954 case AARCH64_OPND_SME_Zm_INDEX2:
1955 case AARCH64_OPND_SME_Zm_INDEX2_3:
1956 case AARCH64_OPND_SME_Zm_INDEX3_1:
1957 case AARCH64_OPND_SME_Zm_INDEX3_2:
1958 case AARCH64_OPND_SME_Zm_INDEX3_3:
1959 case AARCH64_OPND_SME_Zm_INDEX3_10:
1960 case AARCH64_OPND_SME_Zm_INDEX4_1:
1961 case AARCH64_OPND_SME_Zm_INDEX4_2:
1962 case AARCH64_OPND_SME_Zm_INDEX4_3:
1963 case AARCH64_OPND_SME_Zm_INDEX4_10:
1964 size = get_operand_fields_width (get_operand_from_code (type)) - 4;
1965 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
1966 0, (1 << size) - 1))
1967 return false;
1968 break;
1969
1970 case AARCH64_OPND_SME_Zm:
1971 case AARCH64_OPND_SME_Zm_17:
1972 if (opnd->reg.regno > 15)
1973 {
1974 set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
1975 return false;
1976 }
1977 break;
1978
1979 case AARCH64_OPND_SME_PnT_Wm_imm:
1980 size = aarch64_get_qualifier_esize (opnd->qualifier);
1981 max_value = 16 / size - 1;
1982 if (!check_za_access (opnd, mismatch_detail, idx,
1983 12, max_value, 1, 0, get_opcode_dependent_value (opcode)))
1984 return false;
1985 break;
1986
1987 default:
1988 break;
1989 }
1990 break;
1991
1992 case AARCH64_OPND_CLASS_SVE_REGLIST:
1993 switch (type)
1994 {
1995 case AARCH64_OPND_SME_Pdx2:
1996 case AARCH64_OPND_SME_Zdnx2:
1997 case AARCH64_OPND_SME_Zdnx4:
1998 case AARCH64_OPND_SME_Zmx2:
1999 case AARCH64_OPND_SME_Zmx4:
2000 case AARCH64_OPND_SME_Znx2:
2001 case AARCH64_OPND_SME_Znx2_BIT_INDEX:
2002 case AARCH64_OPND_SME_Znx4:
2003 num = get_operand_specific_data (&aarch64_operands[type]);
2004 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2005 return false;
2006 if ((opnd->reglist.first_regno % num) != 0)
2007 {
2008 set_other_error (mismatch_detail, idx,
2009 _("start register out of range"));
2010 return false;
2011 }
2012 break;
2013
2014 case AARCH64_OPND_SME_Ztx2_STRIDED:
2015 case AARCH64_OPND_SME_Ztx4_STRIDED:
2016 /* 2-register lists have a stride of 8 and 4-register lists
2017 have a stride of 4. */
2018 num = get_operand_specific_data (&aarch64_operands[type]);
2019 if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
2020 return false;
2021 num = 16 | (opnd->reglist.stride - 1);
2022 if ((opnd->reglist.first_regno & ~num) != 0)
2023 {
2024 set_other_error (mismatch_detail, idx,
2025 _("start register out of range"));
2026 return false;
2027 }
2028 break;
2029
2030 case AARCH64_OPND_SME_PdxN:
2031 case AARCH64_OPND_SVE_ZnxN:
2032 case AARCH64_OPND_SVE_ZtxN:
2033 num = get_opcode_dependent_value (opcode);
2034 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2035 return false;
2036 break;
2037
2038 default:
2039 abort ();
2040 }
2041 break;
2042
2043 case AARCH64_OPND_CLASS_ZA_ACCESS:
2044 switch (type)
2045 {
2046 case AARCH64_OPND_SME_ZA_HV_idx_src:
2047 case AARCH64_OPND_SME_ZA_HV_idx_dest:
2048 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
2049 size = aarch64_get_qualifier_esize (opnd->qualifier);
2050 max_value = 16 / size - 1;
2051 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
2052 get_opcode_dependent_value (opcode),
2053 get_opcode_dependent_vg_status (opcode)))
2054 return false;
2055 break;
2056
2057 case AARCH64_OPND_SME_ZA_array_off4:
2058 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
2059 get_opcode_dependent_value (opcode),
2060 get_opcode_dependent_vg_status (opcode)))
2061 return false;
2062 break;
2063
2064 case AARCH64_OPND_SME_ZA_array_off3_0:
2065 case AARCH64_OPND_SME_ZA_array_off3_5:
2066 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
2067 get_opcode_dependent_value (opcode),
2068 get_opcode_dependent_vg_status (opcode)))
2069 return false;
2070 break;
2071
2072 case AARCH64_OPND_SME_ZA_array_off1x4:
2073 if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
2074 get_opcode_dependent_value (opcode),
2075 get_opcode_dependent_vg_status (opcode)))
2076 return false;
2077 break;
2078
2079 case AARCH64_OPND_SME_ZA_array_off2x2:
2080 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
2081 get_opcode_dependent_value (opcode),
2082 get_opcode_dependent_vg_status (opcode)))
2083 return false;
2084 break;
2085
2086 case AARCH64_OPND_SME_ZA_array_off2x4:
2087 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
2088 get_opcode_dependent_value (opcode),
2089 get_opcode_dependent_vg_status (opcode)))
2090 return false;
2091 break;
2092
2093 case AARCH64_OPND_SME_ZA_array_off3x2:
2094 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
2095 get_opcode_dependent_value (opcode),
2096 get_opcode_dependent_vg_status (opcode)))
2097 return false;
2098 break;
2099
2100 case AARCH64_OPND_SME_ZA_array_vrsb_1:
2101 if (!check_za_access (opnd, mismatch_detail, idx, 12, 7, 2,
2102 get_opcode_dependent_value (opcode),
2103 get_opcode_dependent_vg_status (opcode)))
2104 return false;
2105 break;
2106
2107 case AARCH64_OPND_SME_ZA_array_vrsh_1:
2108 if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 2,
2109 get_opcode_dependent_value (opcode),
2110 get_opcode_dependent_vg_status (opcode)))
2111 return false;
2112 break;
2113
2114 case AARCH64_OPND_SME_ZA_array_vrss_1:
2115 if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 2,
2116 get_opcode_dependent_value (opcode),
2117 get_opcode_dependent_vg_status (opcode)))
2118 return false;
2119 break;
2120
2121 case AARCH64_OPND_SME_ZA_array_vrsd_1:
2122 if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 2,
2123 get_opcode_dependent_value (opcode),
2124 get_opcode_dependent_vg_status (opcode)))
2125 return false;
2126 break;
2127
2128 case AARCH64_OPND_SME_ZA_array_vrsb_2:
2129 if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 4,
2130 get_opcode_dependent_value (opcode),
2131 get_opcode_dependent_vg_status (opcode)))
2132 return false;
2133 break;
2134
2135 case AARCH64_OPND_SME_ZA_array_vrsh_2:
2136 if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 4,
2137 get_opcode_dependent_value (opcode),
2138 get_opcode_dependent_vg_status (opcode)))
2139 return false;
2140 break;
2141
2142 case AARCH64_OPND_SME_ZA_ARRAY4:
2143 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
2144 get_opcode_dependent_value (opcode),
2145 get_opcode_dependent_vg_status (opcode)))
2146 return false;
2147 break;
2148
2149 case AARCH64_OPND_SME_ZA_array_vrss_2:
2150 case AARCH64_OPND_SME_ZA_array_vrsd_2:
2151 if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 4,
2152 get_opcode_dependent_value (opcode),
2153 get_opcode_dependent_vg_status (opcode)))
2154 return false;
2155 break;
2156
2157 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
2158 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
2159 size = aarch64_get_qualifier_esize (opnd->qualifier);
2160 num = get_opcode_dependent_value (opcode);
2161 max_value = 16 / num / size;
2162 if (max_value > 0)
2163 max_value -= 1;
2164 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, num,
2165 0, get_opcode_dependent_value (opcode)))
2166 return false;
2167 break;
2168
2169 default:
2170 abort ();
2171 }
2172 break;
2173
2174 case AARCH64_OPND_CLASS_PRED_REG:
2175 switch (type)
2176 {
2177 case AARCH64_OPND_SME_PNd3:
2178 case AARCH64_OPND_SME_PNg3:
2179 if (opnd->reg.regno < 8)
2180 {
2181 set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
2182 return false;
2183 }
2184 break;
2185
2186 default:
2187 if (opnd->reg.regno >= 8
2188 && get_operand_fields_width (get_operand_from_code (type)) == 3)
2189 {
2190 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
2191 return false;
2192 }
2193 break;
2194 }
2195 break;
2196
2197 case AARCH64_OPND_CLASS_COND:
2198 if (type == AARCH64_OPND_COND1
2199 && (opnds[idx].cond->value & 0xe) == 0xe)
2200 {
2201 /* Not allow AL or NV. */
2202 set_syntax_error (mismatch_detail, idx, NULL);
2203 }
2204 break;
2205
2206 case AARCH64_OPND_CLASS_ADDRESS:
2207 /* Check writeback. */
2208 switch (opcode->iclass)
2209 {
2210 case ldst_pos:
2211 case ldst_unscaled:
2212 case ldstnapair_offs:
2213 case ldstpair_off:
2214 case ldst_unpriv:
2215 if (opnd->addr.writeback == 1)
2216 {
2217 set_syntax_error (mismatch_detail, idx,
2218 _("unexpected address writeback"));
2219 return false;
2220 }
2221 break;
2222 case ldst_imm10:
2223 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
2224 {
2225 set_syntax_error (mismatch_detail, idx,
2226 _("unexpected address writeback"));
2227 return false;
2228 }
2229 break;
2230 case ldst_imm9:
2231 case ldstpair_indexed:
2232 case asisdlsep:
2233 case asisdlsop:
2234 if (opnd->addr.writeback == 0)
2235 {
2236 set_syntax_error (mismatch_detail, idx,
2237 _("address writeback expected"));
2238 return false;
2239 }
2240 break;
2241 case rcpc3:
2242 if (opnd->addr.writeback)
2243 if ((type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB
2244 && !opnd->addr.preind)
2245 || (type == AARCH64_OPND_RCPC3_ADDR_POSTIND
2246 && !opnd->addr.postind))
2247 {
2248 set_syntax_error (mismatch_detail, idx,
2249 _("unexpected address writeback"));
2250 return false;
2251 }
2252
2253 break;
2254 default:
2255 assert (opnd->addr.writeback == 0);
2256 break;
2257 }
2258 switch (type)
2259 {
2260 case AARCH64_OPND_ADDR_SIMM7:
2261 /* Scaled signed 7 bits immediate offset. */
2262 /* Get the size of the data element that is accessed, which may be
2263 different from that of the source register size,
2264 e.g. in strb/ldrb. */
2265 size = aarch64_get_qualifier_esize (opnd->qualifier);
2266 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
2267 {
2268 set_offset_out_of_range_error (mismatch_detail, idx,
2269 -64 * size, 63 * size);
2270 return false;
2271 }
2272 if (!value_aligned_p (opnd->addr.offset.imm, size))
2273 {
2274 set_unaligned_error (mismatch_detail, idx, size);
2275 return false;
2276 }
2277 break;
2278 case AARCH64_OPND_ADDR_OFFSET:
2279 case AARCH64_OPND_ADDR_SIMM9:
2280 /* Unscaled signed 9 bits immediate offset. */
2281 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2282 {
2283 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
2284 return false;
2285 }
2286 break;
2287
2288 case AARCH64_OPND_ADDR_SIMM9_2:
2289 /* Unscaled signed 9 bits immediate offset, which has to be negative
2290 or unaligned. */
2291 size = aarch64_get_qualifier_esize (qualifier);
2292 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
2293 && !value_aligned_p (opnd->addr.offset.imm, size))
2294 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
2295 return true;
2296 set_other_error (mismatch_detail, idx,
2297 _("negative or unaligned offset expected"));
2298 return false;
2299
2300 case AARCH64_OPND_ADDR_SIMM10:
2301 /* Scaled signed 10 bits immediate offset. */
2302 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
2303 {
2304 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
2305 return false;
2306 }
2307 if (!value_aligned_p (opnd->addr.offset.imm, 8))
2308 {
2309 set_unaligned_error (mismatch_detail, idx, 8);
2310 return false;
2311 }
2312 break;
2313
2314 case AARCH64_OPND_ADDR_SIMM11:
2315 /* Signed 11 bits immediate offset (multiple of 16). */
2316 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
2317 {
2318 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
2319 return false;
2320 }
2321
2322 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2323 {
2324 set_unaligned_error (mismatch_detail, idx, 16);
2325 return false;
2326 }
2327 break;
2328
2329 case AARCH64_OPND_ADDR_SIMM13:
2330 /* Signed 13 bits immediate offset (multiple of 16). */
2331 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
2332 {
2333 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
2334 return false;
2335 }
2336
2337 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2338 {
2339 set_unaligned_error (mismatch_detail, idx, 16);
2340 return false;
2341 }
2342 break;
2343
2344 case AARCH64_OPND_SIMD_ADDR_POST:
2345 /* AdvSIMD load/store multiple structures, post-index. */
2346 assert (idx == 1);
2347 if (opnd->addr.offset.is_reg)
2348 {
2349 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
2350 return true;
2351 else
2352 {
2353 set_other_error (mismatch_detail, idx,
2354 _("invalid register offset"));
2355 return false;
2356 }
2357 }
2358 else
2359 {
2360 const aarch64_opnd_info *prev = &opnds[idx-1];
2361 unsigned num_bytes; /* total number of bytes transferred. */
2362 /* The opcode dependent area stores the number of elements in
2363 each structure to be loaded/stored. */
2364 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
2365 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
2366 /* Special handling of loading single structure to all lane. */
2367 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
2368 * aarch64_get_qualifier_esize (prev->qualifier);
2369 else
2370 num_bytes = prev->reglist.num_regs
2371 * aarch64_get_qualifier_esize (prev->qualifier)
2372 * aarch64_get_qualifier_nelem (prev->qualifier);
2373 if ((int) num_bytes != opnd->addr.offset.imm)
2374 {
2375 set_other_error (mismatch_detail, idx,
2376 _("invalid post-increment amount"));
2377 return false;
2378 }
2379 }
2380 break;
2381
2382 case AARCH64_OPND_ADDR_REGOFF:
2383 /* Get the size of the data element that is accessed, which may be
2384 different from that of the source register size,
2385 e.g. in strb/ldrb. */
2386 size = aarch64_get_qualifier_esize (opnd->qualifier);
2387 /* It is either no shift or shift by the binary logarithm of SIZE. */
2388 if (opnd->shifter.amount != 0
2389 && opnd->shifter.amount != (int)get_logsz (size))
2390 {
2391 set_other_error (mismatch_detail, idx,
2392 _("invalid shift amount"));
2393 return false;
2394 }
2395 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2396 operators. */
2397 switch (opnd->shifter.kind)
2398 {
2399 case AARCH64_MOD_UXTW:
2400 case AARCH64_MOD_LSL:
2401 case AARCH64_MOD_SXTW:
2402 case AARCH64_MOD_SXTX: break;
2403 default:
2404 set_other_error (mismatch_detail, idx,
2405 _("invalid extend/shift operator"));
2406 return false;
2407 }
2408 break;
2409
2410 case AARCH64_OPND_ADDR_UIMM12:
2411 imm = opnd->addr.offset.imm;
2412 /* Get the size of the data element that is accessed, which may be
2413 different from that of the source register size,
2414 e.g. in strb/ldrb. */
2415 size = aarch64_get_qualifier_esize (qualifier);
2416 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
2417 {
2418 set_offset_out_of_range_error (mismatch_detail, idx,
2419 0, 4095 * size);
2420 return false;
2421 }
2422 if (!value_aligned_p (opnd->addr.offset.imm, size))
2423 {
2424 set_unaligned_error (mismatch_detail, idx, size);
2425 return false;
2426 }
2427 break;
2428
2429 case AARCH64_OPND_ADDR_PCREL9:
2430 case AARCH64_OPND_ADDR_PCREL14:
2431 case AARCH64_OPND_ADDR_PCREL19:
2432 case AARCH64_OPND_ADDR_PCREL21:
2433 case AARCH64_OPND_ADDR_PCREL26:
2434 {
2435 imm = opnd->imm.value;
2436 if (operand_need_shift_by_two (get_operand_from_code (type)))
2437 {
2438 /* The offset value in a PC-relative branch instruction is alway
2439 4-byte aligned and is encoded without the lowest 2 bits. */
2440 if (!value_aligned_p (imm, 4))
2441 {
2442 set_unaligned_error (mismatch_detail, idx, 4);
2443 return false;
2444 }
2445 /* Right shift by 2 so that we can carry out the following check
2446 canonically. */
2447 imm >>= 2;
2448 }
2449
2450 if (!check_immediate_out_of_range (imm, type, mismatch_detail, idx))
2451 return false;
2452 }
2453 break;
2454
2455 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
2456 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
2457 {
2458 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
2459 return false;
2460 }
2461 break;
2462
2463 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
2464 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
2465 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
2466 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
2467 min_value = -8;
2468 max_value = 7;
2469 sve_imm_offset_vl:
2470 assert (!opnd->addr.offset.is_reg);
2471 assert (opnd->addr.preind);
2472 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
2473 min_value *= num;
2474 max_value *= num;
2475 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
2476 || (opnd->shifter.operator_present
2477 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
2478 {
2479 set_other_error (mismatch_detail, idx,
2480 _("invalid addressing mode"));
2481 return false;
2482 }
2483 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2484 {
2485 set_offset_out_of_range_error (mismatch_detail, idx,
2486 min_value, max_value);
2487 return false;
2488 }
2489 if (!value_aligned_p (opnd->addr.offset.imm, num))
2490 {
2491 set_unaligned_error (mismatch_detail, idx, num);
2492 return false;
2493 }
2494 break;
2495
2496 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2497 min_value = -32;
2498 max_value = 31;
2499 goto sve_imm_offset_vl;
2500
2501 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2502 min_value = -256;
2503 max_value = 255;
2504 goto sve_imm_offset_vl;
2505
2506 case AARCH64_OPND_SVE_ADDR_RI_U6:
2507 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2508 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2509 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2510 min_value = 0;
2511 max_value = 63;
2512 sve_imm_offset:
2513 assert (!opnd->addr.offset.is_reg);
2514 assert (opnd->addr.preind);
2515 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2516 min_value *= num;
2517 max_value *= num;
2518 if (opnd->shifter.operator_present
2519 || opnd->shifter.amount_present)
2520 {
2521 set_other_error (mismatch_detail, idx,
2522 _("invalid addressing mode"));
2523 return false;
2524 }
2525 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2526 {
2527 set_offset_out_of_range_error (mismatch_detail, idx,
2528 min_value, max_value);
2529 return false;
2530 }
2531 if (!value_aligned_p (opnd->addr.offset.imm, num))
2532 {
2533 set_unaligned_error (mismatch_detail, idx, num);
2534 return false;
2535 }
2536 break;
2537
2538 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2539 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2540 min_value = -8;
2541 max_value = 7;
2542 goto sve_imm_offset;
2543
2544 case AARCH64_OPND_SVE_ADDR_ZX:
2545 /* Everything is already ensured by parse_operands or
2546 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2547 argument type). */
2548 assert (opnd->addr.offset.is_reg);
2549 assert (opnd->addr.preind);
2550 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2551 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2552 assert (opnd->shifter.operator_present == 0);
2553 break;
2554
2555 case AARCH64_OPND_SVE_ADDR_RR:
2556 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2557 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2558 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2559 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2560 case AARCH64_OPND_SVE_ADDR_RM:
2561 case AARCH64_OPND_SVE_ADDR_RM_LSL1:
2562 case AARCH64_OPND_SVE_ADDR_RM_LSL2:
2563 case AARCH64_OPND_SVE_ADDR_RM_LSL3:
2564 case AARCH64_OPND_SVE_ADDR_RM_LSL4:
2565 case AARCH64_OPND_SVE_ADDR_RX:
2566 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2567 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2568 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2569 case AARCH64_OPND_SVE_ADDR_RX_LSL4:
2570 case AARCH64_OPND_SVE_ADDR_RZ:
2571 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2572 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2573 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2574 modifiers = 1 << AARCH64_MOD_LSL;
2575 sve_rr_operand:
2576 assert (opnd->addr.offset.is_reg);
2577 assert (opnd->addr.preind);
2578 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2579 && opnd->addr.offset.regno == 31)
2580 {
2581 set_other_error (mismatch_detail, idx,
2582 _("index register xzr is not allowed"));
2583 return false;
2584 }
2585 if (((1 << opnd->shifter.kind) & modifiers) == 0
2586 || (opnd->shifter.amount
2587 != get_operand_specific_data (&aarch64_operands[type])))
2588 {
2589 set_other_error (mismatch_detail, idx,
2590 _("invalid addressing mode"));
2591 return false;
2592 }
2593 break;
2594
2595 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2596 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2597 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2598 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2599 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2600 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2601 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2602 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2603 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2604 goto sve_rr_operand;
2605
2606 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2607 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2608 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2609 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2610 min_value = 0;
2611 max_value = 31;
2612 goto sve_imm_offset;
2613
2614 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2615 modifiers = 1 << AARCH64_MOD_LSL;
2616 sve_zz_operand:
2617 assert (opnd->addr.offset.is_reg);
2618 assert (opnd->addr.preind);
2619 if (((1 << opnd->shifter.kind) & modifiers) == 0
2620 || opnd->shifter.amount < 0
2621 || opnd->shifter.amount > 3)
2622 {
2623 set_other_error (mismatch_detail, idx,
2624 _("invalid addressing mode"));
2625 return false;
2626 }
2627 break;
2628
2629 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2630 modifiers = (1 << AARCH64_MOD_SXTW);
2631 goto sve_zz_operand;
2632
2633 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2634 modifiers = 1 << AARCH64_MOD_UXTW;
2635 goto sve_zz_operand;
2636
2637 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
2638 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
2639 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
2640 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
2641 {
2642 int num_bytes = calc_ldst_datasize (opnds);
2643 int abs_offset = (type == AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
2644 || type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB)
2645 ? opnd->addr.offset.imm * -1
2646 : opnd->addr.offset.imm;
2647 if ((int) num_bytes != abs_offset
2648 && opnd->addr.offset.imm != 0)
2649 {
2650 set_other_error (mismatch_detail, idx,
2651 _("invalid increment amount"));
2652 return false;
2653 }
2654 }
2655 break;
2656
2657 case AARCH64_OPND_RCPC3_ADDR_OFFSET:
2658 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2659 {
2660 set_imm_out_of_range_error (mismatch_detail, idx, -256, 255);
2661 return false;
2662 }
2663
2664 default:
2665 break;
2666 }
2667 break;
2668
2669 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2670 if (type == AARCH64_OPND_LEt)
2671 {
2672 /* Get the upper bound for the element index. */
2673 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2674 if (!value_in_range_p (opnd->reglist.index, 0, num))
2675 {
2676 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2677 return false;
2678 }
2679 }
2680 /* The opcode dependent area stores the number of elements in
2681 each structure to be loaded/stored. */
2682 num = get_opcode_dependent_value (opcode);
2683 switch (type)
2684 {
2685 case AARCH64_OPND_LVn_LUT:
2686 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2687 return 0;
2688 break;
2689 case AARCH64_OPND_LVt:
2690 assert (num >= 1 && num <= 4);
2691 /* Unless LD1/ST1, the number of registers should be equal to that
2692 of the structure elements. */
2693 if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
2694 return false;
2695 break;
2696 case AARCH64_OPND_LVt_AL:
2697 case AARCH64_OPND_LEt:
2698 assert (num >= 1 && num <= 4);
2699 /* The number of registers should be equal to that of the structure
2700 elements. */
2701 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2702 return false;
2703 break;
2704 default:
2705 break;
2706 }
2707 if (opnd->reglist.stride != 1)
2708 {
2709 set_reg_list_stride_error (mismatch_detail, idx, 1);
2710 return false;
2711 }
2712 break;
2713
2714 case AARCH64_OPND_CLASS_IMMEDIATE:
2715 /* Constraint check on immediate operand. */
2716 imm = opnd->imm.value;
2717 /* E.g. imm_0_31 constrains value to be 0..31. */
2718 if (qualifier_value_in_range_constraint_p (qualifier)
2719 && !value_in_range_p (imm, get_lower_bound (qualifier),
2720 get_upper_bound (qualifier)))
2721 {
2722 set_imm_out_of_range_error (mismatch_detail, idx,
2723 get_lower_bound (qualifier),
2724 get_upper_bound (qualifier));
2725 return false;
2726 }
2727
2728 switch (type)
2729 {
2730 case AARCH64_OPND_AIMM:
2731 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2732 {
2733 set_other_error (mismatch_detail, idx,
2734 _("invalid shift operator"));
2735 return false;
2736 }
2737 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2738 {
2739 set_other_error (mismatch_detail, idx,
2740 _("shift amount must be 0 or 12"));
2741 return false;
2742 }
2743 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2744 {
2745 set_other_error (mismatch_detail, idx,
2746 _("immediate out of range"));
2747 return false;
2748 }
2749 break;
2750
2751 case AARCH64_OPND_HALF:
2752 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2753 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2754 {
2755 set_other_error (mismatch_detail, idx,
2756 _("invalid shift operator"));
2757 return false;
2758 }
2759 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2760 if (!value_aligned_p (opnd->shifter.amount, 16))
2761 {
2762 set_other_error (mismatch_detail, idx,
2763 _("shift amount must be a multiple of 16"));
2764 return false;
2765 }
2766 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2767 {
2768 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2769 0, size * 8 - 16);
2770 return false;
2771 }
2772 if (opnd->imm.value < 0)
2773 {
2774 set_other_error (mismatch_detail, idx,
2775 _("negative immediate value not allowed"));
2776 return false;
2777 }
2778 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2779 {
2780 set_other_error (mismatch_detail, idx,
2781 _("immediate out of range"));
2782 return false;
2783 }
2784 break;
2785
2786 case AARCH64_OPND_IMM_MOV:
2787 {
2788 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2789 imm = opnd->imm.value;
2790 assert (idx == 1);
2791 switch (opcode->op)
2792 {
2793 case OP_MOV_IMM_WIDEN:
2794 imm = ~imm;
2795 /* Fall through. */
2796 case OP_MOV_IMM_WIDE:
2797 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2798 {
2799 set_other_error (mismatch_detail, idx,
2800 _("immediate out of range"));
2801 return false;
2802 }
2803 break;
2804 case OP_MOV_IMM_LOG:
2805 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2806 {
2807 set_other_error (mismatch_detail, idx,
2808 _("immediate out of range"));
2809 return false;
2810 }
2811 break;
2812 default:
2813 assert (0);
2814 return false;
2815 }
2816 }
2817 break;
2818
2819 case AARCH64_OPND_NZCV:
2820 case AARCH64_OPND_CCMP_IMM:
2821 case AARCH64_OPND_EXCEPTION:
2822 case AARCH64_OPND_UNDEFINED:
2823 case AARCH64_OPND_TME_UIMM16:
2824 case AARCH64_OPND_UIMM4:
2825 case AARCH64_OPND_UIMM4_ADDG:
2826 case AARCH64_OPND_UIMM7:
2827 case AARCH64_OPND_UIMM3_OP1:
2828 case AARCH64_OPND_UIMM3_OP2:
2829 case AARCH64_OPND_SVE_UIMM3:
2830 case AARCH64_OPND_SVE_UIMM7:
2831 case AARCH64_OPND_SVE_UIMM8:
2832 case AARCH64_OPND_SVE_UIMM4:
2833 case AARCH64_OPND_SVE_UIMM8_53:
2834 case AARCH64_OPND_CSSC_UIMM8:
2835 size = get_operand_fields_width (get_operand_from_code (type));
2836 assert (size < 32);
2837 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2838 {
2839 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2840 (1u << size) - 1);
2841 return false;
2842 }
2843 break;
2844
2845 case AARCH64_OPND_UIMM10:
2846 /* Scaled unsigned 10 bits immediate offset. */
2847 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2848 {
2849 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2850 return false;
2851 }
2852
2853 if (!value_aligned_p (opnd->imm.value, 16))
2854 {
2855 set_unaligned_error (mismatch_detail, idx, 16);
2856 return false;
2857 }
2858 break;
2859
2860 case AARCH64_OPND_SIMM5:
2861 case AARCH64_OPND_SVE_SIMM5:
2862 case AARCH64_OPND_SVE_SIMM5B:
2863 case AARCH64_OPND_SVE_SIMM6:
2864 case AARCH64_OPND_SVE_SIMM8:
2865 case AARCH64_OPND_CSSC_SIMM8:
2866 size = get_operand_fields_width (get_operand_from_code (type));
2867 assert (size < 32);
2868 if (!value_fit_signed_field_p (opnd->imm.value, size))
2869 {
2870 imm_range_t rng = imm_range_min_max (size, true);
2871 set_imm_out_of_range_error (mismatch_detail, idx, rng.min,
2872 rng.max);
2873 return false;
2874 }
2875 break;
2876
2877 case AARCH64_OPND_WIDTH:
2878 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2879 && opnds[0].type == AARCH64_OPND_Rd);
2880 size = get_upper_bound (qualifier);
2881 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2882 /* lsb+width <= reg.size */
2883 {
2884 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2885 size - opnds[idx-1].imm.value);
2886 return false;
2887 }
2888 break;
2889
2890 case AARCH64_OPND_LIMM:
2891 case AARCH64_OPND_SVE_LIMM:
2892 {
2893 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2894 uint64_t uimm = opnd->imm.value;
2895 if (opcode->op == OP_BIC)
2896 uimm = ~uimm;
2897 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2898 {
2899 set_other_error (mismatch_detail, idx,
2900 _("immediate out of range"));
2901 return false;
2902 }
2903 }
2904 break;
2905
2906 case AARCH64_OPND_IMM0:
2907 case AARCH64_OPND_FPIMM0:
2908 if (opnd->imm.value != 0)
2909 {
2910 set_other_error (mismatch_detail, idx,
2911 _("immediate zero expected"));
2912 return false;
2913 }
2914 break;
2915
2916 case AARCH64_OPND_IMM_ROT1:
2917 case AARCH64_OPND_IMM_ROT2:
2918 case AARCH64_OPND_SVE_IMM_ROT2:
2919 if (opnd->imm.value != 0
2920 && opnd->imm.value != 90
2921 && opnd->imm.value != 180
2922 && opnd->imm.value != 270)
2923 {
2924 set_other_error (mismatch_detail, idx,
2925 _("rotate expected to be 0, 90, 180 or 270"));
2926 return false;
2927 }
2928 break;
2929
2930 case AARCH64_OPND_IMM_ROT3:
2931 case AARCH64_OPND_SVE_IMM_ROT1:
2932 case AARCH64_OPND_SVE_IMM_ROT3:
2933 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2934 {
2935 set_other_error (mismatch_detail, idx,
2936 _("rotate expected to be 90 or 270"));
2937 return false;
2938 }
2939 break;
2940
2941 case AARCH64_OPND_SHLL_IMM:
2942 assert (idx == 2);
2943 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2944 if (opnd->imm.value != size)
2945 {
2946 set_other_error (mismatch_detail, idx,
2947 _("invalid shift amount"));
2948 return false;
2949 }
2950 break;
2951
2952 case AARCH64_OPND_IMM_VLSL:
2953 size = aarch64_get_qualifier_esize (qualifier);
2954 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2955 {
2956 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2957 size * 8 - 1);
2958 return false;
2959 }
2960 break;
2961
2962 case AARCH64_OPND_IMM_VLSR:
2963 size = aarch64_get_qualifier_esize (qualifier);
2964 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2965 {
2966 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2967 return false;
2968 }
2969 break;
2970
2971 case AARCH64_OPND_SIMD_IMM:
2972 case AARCH64_OPND_SIMD_IMM_SFT:
2973 /* Qualifier check. */
2974 switch (qualifier)
2975 {
2976 case AARCH64_OPND_QLF_LSL:
2977 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2978 {
2979 set_other_error (mismatch_detail, idx,
2980 _("invalid shift operator"));
2981 return false;
2982 }
2983 break;
2984 case AARCH64_OPND_QLF_MSL:
2985 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2986 {
2987 set_other_error (mismatch_detail, idx,
2988 _("invalid shift operator"));
2989 return false;
2990 }
2991 break;
2992 case AARCH64_OPND_QLF_NIL:
2993 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2994 {
2995 set_other_error (mismatch_detail, idx,
2996 _("shift is not permitted"));
2997 return false;
2998 }
2999 break;
3000 default:
3001 assert (0);
3002 return false;
3003 }
3004 /* Is the immediate valid? */
3005 assert (idx == 1);
3006 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
3007 {
3008 /* uimm8 or simm8 */
3009 if (!value_in_range_p (opnd->imm.value, -128, 255))
3010 {
3011 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
3012 return false;
3013 }
3014 }
3015 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
3016 {
3017 /* uimm64 is not
3018 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
3019 ffffffffgggggggghhhhhhhh'. */
3020 set_other_error (mismatch_detail, idx,
3021 _("invalid value for immediate"));
3022 return false;
3023 }
3024 /* Is the shift amount valid? */
3025 switch (opnd->shifter.kind)
3026 {
3027 case AARCH64_MOD_LSL:
3028 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
3029 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
3030 {
3031 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
3032 (size - 1) * 8);
3033 return false;
3034 }
3035 if (!value_aligned_p (opnd->shifter.amount, 8))
3036 {
3037 set_unaligned_error (mismatch_detail, idx, 8);
3038 return false;
3039 }
3040 break;
3041 case AARCH64_MOD_MSL:
3042 /* Only 8 and 16 are valid shift amount. */
3043 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
3044 {
3045 set_other_error (mismatch_detail, idx,
3046 _("shift amount must be 0 or 16"));
3047 return false;
3048 }
3049 break;
3050 default:
3051 if (opnd->shifter.kind != AARCH64_MOD_NONE)
3052 {
3053 set_other_error (mismatch_detail, idx,
3054 _("invalid shift operator"));
3055 return false;
3056 }
3057 break;
3058 }
3059 break;
3060
3061 case AARCH64_OPND_FPIMM:
3062 case AARCH64_OPND_SIMD_FPIMM:
3063 case AARCH64_OPND_SVE_FPIMM8:
3064 if (opnd->imm.is_fp == 0)
3065 {
3066 set_other_error (mismatch_detail, idx,
3067 _("floating-point immediate expected"));
3068 return false;
3069 }
3070 /* The value is expected to be an 8-bit floating-point constant with
3071 sign, 3-bit exponent and normalized 4 bits of precision, encoded
3072 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
3073 instruction). */
3074 if (!value_in_range_p (opnd->imm.value, 0, 255))
3075 {
3076 set_other_error (mismatch_detail, idx,
3077 _("immediate out of range"));
3078 return false;
3079 }
3080 if (opnd->shifter.kind != AARCH64_MOD_NONE)
3081 {
3082 set_other_error (mismatch_detail, idx,
3083 _("invalid shift operator"));
3084 return false;
3085 }
3086 break;
3087
3088 case AARCH64_OPND_SVE_AIMM:
3089 min_value = 0;
3090 sve_aimm:
3091 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
3092 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
3093 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
3094 uvalue = opnd->imm.value;
3095 shift = opnd->shifter.amount;
3096 if (size == 1)
3097 {
3098 if (shift != 0)
3099 {
3100 set_other_error (mismatch_detail, idx,
3101 _("no shift amount allowed for"
3102 " 8-bit constants"));
3103 return false;
3104 }
3105 }
3106 else
3107 {
3108 if (shift != 0 && shift != 8)
3109 {
3110 set_other_error (mismatch_detail, idx,
3111 _("shift amount must be 0 or 8"));
3112 return false;
3113 }
3114 if (shift == 0 && (uvalue & 0xff) == 0)
3115 {
3116 shift = 8;
3117 uvalue = (int64_t) uvalue / 256;
3118 }
3119 }
3120 mask >>= shift;
3121 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
3122 {
3123 set_other_error (mismatch_detail, idx,
3124 _("immediate too big for element size"));
3125 return false;
3126 }
3127 uvalue = (uvalue - min_value) & mask;
3128 if (uvalue > 0xff)
3129 {
3130 set_other_error (mismatch_detail, idx,
3131 _("invalid arithmetic immediate"));
3132 return false;
3133 }
3134 break;
3135
3136 case AARCH64_OPND_SVE_ASIMM:
3137 min_value = -128;
3138 goto sve_aimm;
3139
3140 case AARCH64_OPND_SVE_I1_HALF_ONE:
3141 assert (opnd->imm.is_fp);
3142 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
3143 {
3144 set_other_error (mismatch_detail, idx,
3145 _("floating-point value must be 0.5 or 1.0"));
3146 return false;
3147 }
3148 break;
3149
3150 case AARCH64_OPND_SVE_I1_HALF_TWO:
3151 assert (opnd->imm.is_fp);
3152 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
3153 {
3154 set_other_error (mismatch_detail, idx,
3155 _("floating-point value must be 0.5 or 2.0"));
3156 return false;
3157 }
3158 break;
3159
3160 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3161 assert (opnd->imm.is_fp);
3162 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
3163 {
3164 set_other_error (mismatch_detail, idx,
3165 _("floating-point value must be 0.0 or 1.0"));
3166 return false;
3167 }
3168 break;
3169
3170 case AARCH64_OPND_SVE_INV_LIMM:
3171 {
3172 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
3173 uint64_t uimm = ~opnd->imm.value;
3174 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
3175 {
3176 set_other_error (mismatch_detail, idx,
3177 _("immediate out of range"));
3178 return false;
3179 }
3180 }
3181 break;
3182
3183 case AARCH64_OPND_SVE_LIMM_MOV:
3184 {
3185 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
3186 uint64_t uimm = opnd->imm.value;
3187 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
3188 {
3189 set_other_error (mismatch_detail, idx,
3190 _("immediate out of range"));
3191 return false;
3192 }
3193 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
3194 {
3195 set_other_error (mismatch_detail, idx,
3196 _("invalid replicated MOV immediate"));
3197 return false;
3198 }
3199 }
3200 break;
3201
3202 case AARCH64_OPND_SVE_PATTERN_SCALED:
3203 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
3204 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
3205 {
3206 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
3207 return false;
3208 }
3209 break;
3210
3211 case AARCH64_OPND_SVE_SHLIMM_PRED:
3212 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3213 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3214 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
3215 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
3216 {
3217 set_imm_out_of_range_error (mismatch_detail, idx,
3218 0, 8 * size - 1);
3219 return false;
3220 }
3221 break;
3222
3223 case AARCH64_OPND_SME_SHRIMM4:
3224 size = 1 << get_operand_fields_width (get_operand_from_code (type));
3225 if (!value_in_range_p (opnd->imm.value, 1, size))
3226 {
3227 set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
3228 return false;
3229 }
3230 break;
3231
3232 case AARCH64_OPND_SME_SHRIMM5:
3233 case AARCH64_OPND_SVE_SHRIMM_PRED:
3234 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3235 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3236 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
3237 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
3238 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
3239 {
3240 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
3241 return false;
3242 }
3243 break;
3244
3245 case AARCH64_OPND_SME_ZT0_INDEX:
3246 if (!value_in_range_p (opnd->imm.value, 0, 56))
3247 {
3248 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
3249 return false;
3250 }
3251 if (opnd->imm.value % 8 != 0)
3252 {
3253 set_other_error (mismatch_detail, idx,
3254 _("byte index must be a multiple of 8"));
3255 return false;
3256 }
3257 break;
3258
3259 case AARCH64_OPND_SME_ZT0_INDEX_MUL_VL:
3260 if (!value_in_range_p (opnd->imm.value, 0, 3))
3261 {
3262 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 3);
3263 return 0;
3264 }
3265 break;
3266
3267 default:
3268 break;
3269 }
3270 break;
3271
3272 case AARCH64_OPND_CLASS_SYSTEM:
3273 switch (type)
3274 {
3275 case AARCH64_OPND_PSTATEFIELD:
3276 for (i = 0; aarch64_pstatefields[i].name; ++i)
3277 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3278 break;
3279 assert (aarch64_pstatefields[i].name);
3280 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
3281 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
3282 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
3283 {
3284 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
3285 return false;
3286 }
3287 break;
3288 case AARCH64_OPND_PRFOP:
3289 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
3290 {
3291 set_other_error (mismatch_detail, idx,
3292 _("the register-index form of PRFM does"
3293 " not accept opcodes in the range 24-31"));
3294 return false;
3295 }
3296 break;
3297 default:
3298 break;
3299 }
3300 break;
3301
3302 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
3303 /* Get the upper bound for the element index. */
3304 if (opcode->op == OP_FCMLA_ELEM)
3305 /* FCMLA index range depends on the vector size of other operands
3306 and is halfed because complex numbers take two elements. */
3307 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
3308 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
3309 else if (opcode->iclass == lut)
3310 {
3311 size = get_operand_fields_width (get_operand_from_code (type)) - 5;
3312 if (!check_reglane (opnd, mismatch_detail, idx, "v", 0, 31,
3313 0, (1 << size) - 1))
3314 return 0;
3315 break;
3316 }
3317 else
3318 num = 16;
3319 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
3320 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
3321
3322 /* Index out-of-range. */
3323 if (!value_in_range_p (opnd->reglane.index, 0, num))
3324 {
3325 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
3326 return false;
3327 }
3328 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
3329 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
3330 number is encoded in "size:M:Rm":
3331 size <Vm>
3332 00 RESERVED
3333 01 0:Rm
3334 10 M:Rm
3335 11 RESERVED */
3336 if (type == AARCH64_OPND_Em16
3337 && (qualifier == AARCH64_OPND_QLF_S_H
3338 || qualifier == AARCH64_OPND_QLF_S_2B)
3339 && !value_in_range_p (opnd->reglane.regno, 0, 15))
3340 {
3341 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
3342 return false;
3343 }
3344 if (type == AARCH64_OPND_Em8
3345 && !value_in_range_p (opnd->reglane.regno, 0, 7))
3346 {
3347 set_regno_out_of_range_error (mismatch_detail, idx, 0, 7);
3348 return 0;
3349 }
3350 break;
3351
3352 case AARCH64_OPND_CLASS_MODIFIED_REG:
3353 assert (idx == 1 || idx == 2);
3354 switch (type)
3355 {
3356 case AARCH64_OPND_Rm_EXT:
3357 if (!aarch64_extend_operator_p (opnd->shifter.kind)
3358 && opnd->shifter.kind != AARCH64_MOD_LSL)
3359 {
3360 set_other_error (mismatch_detail, idx,
3361 _("extend operator expected"));
3362 return false;
3363 }
3364 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
3365 (i.e. SP), in which case it defaults to LSL. The LSL alias is
3366 only valid when "Rd" or "Rn" is '11111', and is preferred in that
3367 case. */
3368 if (!aarch64_stack_pointer_p (opnds + 0)
3369 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
3370 {
3371 if (!opnd->shifter.operator_present)
3372 {
3373 set_other_error (mismatch_detail, idx,
3374 _("missing extend operator"));
3375 return false;
3376 }
3377 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
3378 {
3379 set_other_error (mismatch_detail, idx,
3380 _("'LSL' operator not allowed"));
3381 return false;
3382 }
3383 }
3384 assert (opnd->shifter.operator_present /* Default to LSL. */
3385 || opnd->shifter.kind == AARCH64_MOD_LSL);
3386 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
3387 {
3388 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
3389 return false;
3390 }
3391 /* In the 64-bit form, the final register operand is written as Wm
3392 for all but the (possibly omitted) UXTX/LSL and SXTX
3393 operators.
3394 N.B. GAS allows X register to be used with any operator as a
3395 programming convenience. */
3396 if (qualifier == AARCH64_OPND_QLF_X
3397 && opnd->shifter.kind != AARCH64_MOD_LSL
3398 && opnd->shifter.kind != AARCH64_MOD_UXTX
3399 && opnd->shifter.kind != AARCH64_MOD_SXTX)
3400 {
3401 set_other_error (mismatch_detail, idx, _("W register expected"));
3402 return false;
3403 }
3404 break;
3405
3406 case AARCH64_OPND_Rm_SFT:
3407 /* ROR is not available to the shifted register operand in
3408 arithmetic instructions. */
3409 if (!aarch64_shift_operator_p (opnd->shifter.kind))
3410 {
3411 set_other_error (mismatch_detail, idx,
3412 _("shift operator expected"));
3413 return false;
3414 }
3415 if (opnd->shifter.kind == AARCH64_MOD_ROR
3416 && opcode->iclass != log_shift)
3417 {
3418 set_other_error (mismatch_detail, idx,
3419 _("'ROR' operator not allowed"));
3420 return false;
3421 }
3422 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
3423 if (!value_in_range_p (opnd->shifter.amount, 0, num))
3424 {
3425 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
3426 return false;
3427 }
3428 break;
3429
3430 case AARCH64_OPND_Rm_LSL:
3431 /* We expect here that opnd->shifter.kind != AARCH64_MOD_LSL
3432 because the parser already restricts the type of shift to LSL only,
3433 so another check of shift kind would be redundant. */
3434 if (!value_in_range_p (opnd->shifter.amount, 0, 7))
3435 {
3436 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 7);
3437 return false;
3438 }
3439 break;
3440
3441 default:
3442 break;
3443 }
3444 break;
3445
3446 default:
3447 break;
3448 }
3449
3450 return true;
3451 }
3452
3453 /* Main entrypoint for the operand constraint checking.
3454
3455 Return 1 if operands of *INST meet the constraint applied by the operand
3456 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3457 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3458 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3459 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3460 error kind when it is notified that an instruction does not pass the check).
3461
3462 Un-determined operand qualifiers may get established during the process. */
3463
3464 bool
3465 aarch64_match_operands_constraint (aarch64_inst *inst,
3466 aarch64_operand_error *mismatch_detail)
3467 {
3468 int i;
3469
3470 DEBUG_TRACE ("enter");
3471
3472 i = inst->opcode->tied_operand;
3473
3474 if (i > 0)
3475 {
3476 /* Check for tied_operands with specific opcode iclass. */
3477 switch (inst->opcode->iclass)
3478 {
3479 /* For SME LDR and STR instructions #imm must have the same numerical
3480 value for both operands.
3481 */
3482 case sme_ldr:
3483 case sme_str:
3484 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
3485 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
3486 if (inst->operands[0].indexed_za.index.imm
3487 != inst->operands[1].addr.offset.imm)
3488 {
3489 if (mismatch_detail)
3490 {
3491 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
3492 mismatch_detail->index = i;
3493 }
3494 return false;
3495 }
3496 break;
3497
3498 default:
3499 {
3500 /* Check for cases where a source register needs to be the
3501 same as the destination register. Do this before
3502 matching qualifiers since if an instruction has both
3503 invalid tying and invalid qualifiers, the error about
3504 qualifiers would suggest several alternative instructions
3505 that also have invalid tying. */
3506 enum aarch64_operand_class op_class
3507 = aarch64_get_operand_class (inst->operands[0].type);
3508 assert (aarch64_get_operand_class (inst->operands[i].type)
3509 == op_class);
3510 if (op_class == AARCH64_OPND_CLASS_SVE_REGLIST
3511 ? ((inst->operands[0].reglist.first_regno
3512 != inst->operands[i].reglist.first_regno)
3513 || (inst->operands[0].reglist.num_regs
3514 != inst->operands[i].reglist.num_regs)
3515 || (inst->operands[0].reglist.stride
3516 != inst->operands[i].reglist.stride))
3517 : (inst->operands[0].reg.regno
3518 != inst->operands[i].reg.regno))
3519 {
3520 if (mismatch_detail)
3521 {
3522 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
3523 mismatch_detail->index = i;
3524 mismatch_detail->error = NULL;
3525 }
3526 return false;
3527 }
3528 break;
3529 }
3530 }
3531 }
3532
3533 /* Match operands' qualifier.
3534 *INST has already had qualifier establish for some, if not all, of
3535 its operands; we need to find out whether these established
3536 qualifiers match one of the qualifier sequence in
3537 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3538 with the corresponding qualifier in such a sequence.
3539 Only basic operand constraint checking is done here; the more thorough
3540 constraint checking will carried out by operand_general_constraint_met_p,
3541 which has be to called after this in order to get all of the operands'
3542 qualifiers established. */
3543 int invalid_count;
3544 if (match_operands_qualifier (inst, true /* update_p */,
3545 &invalid_count) == 0)
3546 {
3547 DEBUG_TRACE ("FAIL on operand qualifier matching");
3548 if (mismatch_detail)
3549 {
3550 /* Return an error type to indicate that it is the qualifier
3551 matching failure; we don't care about which operand as there
3552 are enough information in the opcode table to reproduce it. */
3553 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
3554 mismatch_detail->index = -1;
3555 mismatch_detail->error = NULL;
3556 mismatch_detail->data[0].i = invalid_count;
3557 }
3558 return false;
3559 }
3560
3561 /* Match operands' constraint. */
3562 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3563 {
3564 enum aarch64_opnd type = inst->opcode->operands[i];
3565 if (type == AARCH64_OPND_NIL)
3566 break;
3567 if (inst->operands[i].skip)
3568 {
3569 DEBUG_TRACE ("skip the incomplete operand %d", i);
3570 continue;
3571 }
3572 if (!operand_general_constraint_met_p (inst->operands, i, type,
3573 inst->opcode, mismatch_detail))
3574 {
3575 DEBUG_TRACE ("FAIL on operand %d", i);
3576 return false;
3577 }
3578 }
3579
3580 DEBUG_TRACE ("PASS");
3581
3582 return true;
3583 }
3584
3585 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3586 Also updates the TYPE of each INST->OPERANDS with the corresponding
3587 value of OPCODE->OPERANDS.
3588
3589 Note that some operand qualifiers may need to be manually cleared by
3590 the caller before it further calls the aarch64_opcode_encode; by
3591 doing this, it helps the qualifier matching facilities work
3592 properly. */
3593
3594 const aarch64_opcode*
3595 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3596 {
3597 int i;
3598 const aarch64_opcode *old = inst->opcode;
3599
3600 inst->opcode = opcode;
3601
3602 /* Update the operand types. */
3603 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3604 {
3605 inst->operands[i].type = opcode->operands[i];
3606 if (opcode->operands[i] == AARCH64_OPND_NIL)
3607 break;
3608 }
3609
3610 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3611
3612 return old;
3613 }
3614
3615 int
3616 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3617 {
3618 int i;
3619 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3620 if (operands[i] == operand)
3621 return i;
3622 else if (operands[i] == AARCH64_OPND_NIL)
3623 break;
3624 return -1;
3625 }
3626
3627 /* R0...R30, followed by FOR31. */
3629 #define BANK(R, FOR31) \
3630 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3631 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3632 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3633 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3634 /* [0][0] 32-bit integer regs with sp Wn
3635 [0][1] 64-bit integer regs with sp Xn sf=1
3636 [1][0] 32-bit integer regs with #0 Wn
3637 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3638 static const char *int_reg[2][2][32] = {
3639 #define R32(X) "w" #X
3640 #define R64(X) "x" #X
3641 { BANK (R32, "wsp"), BANK (R64, "sp") },
3642 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3643 #undef R64
3644 #undef R32
3645 };
3646
3647 /* Names of the SVE vector registers, first with .S suffixes,
3648 then with .D suffixes. */
3649
3650 static const char *sve_reg[2][32] = {
3651 #define ZS(X) "z" #X ".s"
3652 #define ZD(X) "z" #X ".d"
3653 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3654 #undef ZD
3655 #undef ZS
3656 };
3657 #undef BANK
3658
3659 /* Return the integer register name.
3660 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3661
3662 static inline const char *
3663 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3664 {
3665 const int has_zr = sp_reg_p ? 0 : 1;
3666 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3667 return int_reg[has_zr][is_64][regno];
3668 }
3669
3670 /* Like get_int_reg_name, but IS_64 is always 1. */
3671
3672 static inline const char *
3673 get_64bit_int_reg_name (int regno, int sp_reg_p)
3674 {
3675 const int has_zr = sp_reg_p ? 0 : 1;
3676 return int_reg[has_zr][1][regno];
3677 }
3678
3679 /* Get the name of the integer offset register in OPND, using the shift type
3680 to decide whether it's a word or doubleword. */
3681
3682 static inline const char *
3683 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3684 {
3685 switch (opnd->shifter.kind)
3686 {
3687 case AARCH64_MOD_UXTW:
3688 case AARCH64_MOD_SXTW:
3689 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3690
3691 case AARCH64_MOD_LSL:
3692 case AARCH64_MOD_SXTX:
3693 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3694
3695 default:
3696 abort ();
3697 }
3698 }
3699
3700 /* Get the name of the SVE vector offset register in OPND, using the operand
3701 qualifier to decide whether the suffix should be .S or .D. */
3702
3703 static inline const char *
3704 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3705 {
3706 assert (qualifier == AARCH64_OPND_QLF_S_S
3707 || qualifier == AARCH64_OPND_QLF_S_D);
3708 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3709 }
3710
3711 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3712
3713 typedef union
3714 {
3715 uint64_t i;
3716 double d;
3717 } double_conv_t;
3718
3719 typedef union
3720 {
3721 uint32_t i;
3722 float f;
3723 } single_conv_t;
3724
3725 typedef union
3726 {
3727 uint32_t i;
3728 float f;
3729 } half_conv_t;
3730
3731 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3732 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3733 (depending on the type of the instruction). IMM8 will be expanded to a
3734 single-precision floating-point value (SIZE == 4) or a double-precision
3735 floating-point value (SIZE == 8). A half-precision floating-point value
3736 (SIZE == 2) is expanded to a single-precision floating-point value. The
3737 expanded value is returned. */
3738
3739 static uint64_t
3740 expand_fp_imm (int size, uint32_t imm8)
3741 {
3742 uint64_t imm = 0;
3743 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3744
3745 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3746 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3747 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3748 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3749 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3750 if (size == 8)
3751 {
3752 imm = (imm8_7 << (63-32)) /* imm8<7> */
3753 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3754 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3755 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3756 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3757 imm <<= 32;
3758 }
3759 else if (size == 4 || size == 2)
3760 {
3761 imm = (imm8_7 << 31) /* imm8<7> */
3762 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3763 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3764 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3765 }
3766 else
3767 {
3768 /* An unsupported size. */
3769 assert (0);
3770 }
3771
3772 return imm;
3773 }
3774
3775 /* Return a string based on FMT with the register style applied. */
3776
3777 static const char *
3778 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3779 {
3780 const char *txt;
3781 va_list ap;
3782
3783 va_start (ap, fmt);
3784 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3785 va_end (ap);
3786
3787 return txt;
3788 }
3789
3790 /* Return a string based on FMT with the immediate style applied. */
3791
3792 static const char *
3793 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3794 {
3795 const char *txt;
3796 va_list ap;
3797
3798 va_start (ap, fmt);
3799 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3800 va_end (ap);
3801
3802 return txt;
3803 }
3804
3805 /* Return a string based on FMT with the sub-mnemonic style applied. */
3806
3807 static const char *
3808 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3809 {
3810 const char *txt;
3811 va_list ap;
3812
3813 va_start (ap, fmt);
3814 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3815 va_end (ap);
3816
3817 return txt;
3818 }
3819
3820 /* Return a string based on FMT with the address style applied. */
3821
3822 static const char *
3823 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3824 {
3825 const char *txt;
3826 va_list ap;
3827
3828 va_start (ap, fmt);
3829 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3830 va_end (ap);
3831
3832 return txt;
3833 }
3834
3835 /* Produce the string representation of the register list operand *OPND
3836 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3837 the register name that comes before the register number, such as "v". */
3838 static void
3839 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3840 const char *prefix, struct aarch64_styler *styler)
3841 {
3842 const int mask = (prefix[0] == 'p' ? 15 : 31);
3843 const int num_regs = opnd->reglist.num_regs;
3844 const int stride = opnd->reglist.stride;
3845 const int first_reg = opnd->reglist.first_regno;
3846 const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
3847 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3848 char tb[16]; /* Temporary buffer. */
3849
3850 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3851 assert (num_regs >= 1 && num_regs <= 4);
3852
3853 /* Prepare the index if any. */
3854 if (opnd->reglist.has_index)
3855 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3856 snprintf (tb, sizeof (tb), "[%s]",
3857 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3858 else
3859 tb[0] = '\0';
3860
3861 /* The hyphenated form is preferred for disassembly if there is
3862 more than one register in the list, and the register numbers
3863 are monotonically increasing in increments of one. */
3864 if (stride == 1 && num_regs > 1)
3865 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3866 snprintf (buf, size, "{%s-%s}%s",
3867 style_reg (styler, "%s%d", prefix, first_reg),
3868 style_reg (styler, "%s%d", prefix, last_reg), tb);
3869 else
3870 snprintf (buf, size, "{%s-%s}%s",
3871 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3872 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3873 else
3874 {
3875 const int reg0 = first_reg;
3876 const int reg1 = (first_reg + stride) & mask;
3877 const int reg2 = (first_reg + stride * 2) & mask;
3878 const int reg3 = (first_reg + stride * 3) & mask;
3879
3880 switch (num_regs)
3881 {
3882 case 1:
3883 snprintf (buf, size, "{%s}%s",
3884 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3885 tb);
3886 break;
3887 case 2:
3888 snprintf (buf, size, "{%s, %s}%s",
3889 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3890 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3891 tb);
3892 break;
3893 case 3:
3894 snprintf (buf, size, "{%s, %s, %s}%s",
3895 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3896 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3897 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3898 tb);
3899 break;
3900 case 4:
3901 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3902 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3903 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3904 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3905 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3906 tb);
3907 break;
3908 }
3909 }
3910 }
3911
3912 /* Print the register+immediate address in OPND to BUF, which has SIZE
3913 characters. BASE is the name of the base register. */
3914
3915 static void
3916 print_immediate_offset_address (char *buf, size_t size,
3917 const aarch64_opnd_info *opnd,
3918 const char *base,
3919 struct aarch64_styler *styler)
3920 {
3921 if (opnd->addr.writeback)
3922 {
3923 if (opnd->addr.preind)
3924 {
3925 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3926 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3927 else
3928 snprintf (buf, size, "[%s, %s]!",
3929 style_reg (styler, base),
3930 style_imm (styler, "#%d", opnd->addr.offset.imm));
3931 }
3932 else
3933 snprintf (buf, size, "[%s], %s",
3934 style_reg (styler, base),
3935 style_imm (styler, "#%d", opnd->addr.offset.imm));
3936 }
3937 else
3938 {
3939 if (opnd->shifter.operator_present)
3940 {
3941 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3942 snprintf (buf, size, "[%s, %s, %s]",
3943 style_reg (styler, base),
3944 style_imm (styler, "#%d", opnd->addr.offset.imm),
3945 style_sub_mnem (styler, "mul vl"));
3946 }
3947 else if (opnd->addr.offset.imm)
3948 snprintf (buf, size, "[%s, %s]",
3949 style_reg (styler, base),
3950 style_imm (styler, "#%d", opnd->addr.offset.imm));
3951 else
3952 snprintf (buf, size, "[%s]", style_reg (styler, base));
3953 }
3954 }
3955
3956 /* Produce the string representation of the register offset address operand
3957 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3958 the names of the base and offset registers. */
3959 static void
3960 print_register_offset_address (char *buf, size_t size,
3961 const aarch64_opnd_info *opnd,
3962 const char *base, const char *offset,
3963 struct aarch64_styler *styler)
3964 {
3965 char tb[32]; /* Temporary buffer. */
3966 bool print_extend_p = true;
3967 bool print_amount_p = true;
3968 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3969
3970 /* This is the case where offset is the optional argument and the optional
3971 argument is ignored in the disassembly. */
3972 if (opnd->type == AARCH64_OPND_SVE_ADDR_ZX && offset != NULL
3973 && strcmp (offset,"xzr") == 0)
3974 {
3975 /* Example: [<Zn>.S{, <Xm>}].
3976 When the assembly is [Z0.S, XZR] or [Z0.S], Xm is XZR in both the cases
3977 and the preferred disassembly is [Z0.S], ignoring the optional Xm. */
3978 snprintf (buf, size, "[%s]", style_reg (styler, base));
3979 }
3980 else
3981 {
3982 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3983 || !opnd->shifter.amount_present))
3984 {
3985 /* Not print the shift/extend amount when the amount is zero and
3986 when it is not the special case of 8-bit load/store
3987 instruction. */
3988 print_amount_p = false;
3989 /* Likewise, no need to print the shift operator LSL in such a
3990 situation. */
3991 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3992 print_extend_p = false;
3993 }
3994
3995 /* Prepare for the extend/shift. */
3996 if (print_extend_p)
3997 {
3998 if (print_amount_p)
3999 snprintf (tb, sizeof (tb), ", %s %s",
4000 style_sub_mnem (styler, shift_name),
4001 style_imm (styler, "#%" PRIi64,
4002 /* PR 21096: The %100 is to silence a warning about possible
4003 truncation. */
4004 (opnd->shifter.amount % 100)));
4005 else
4006 snprintf (tb, sizeof (tb), ", %s",
4007 style_sub_mnem (styler, shift_name));
4008 }
4009 else
4010 tb[0] = '\0';
4011
4012 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
4013 style_reg (styler, offset), tb);
4014 }
4015 }
4016
4017 /* Print ZA tiles from imm8 in ZERO instruction.
4018
4019 The preferred disassembly of this instruction uses the shortest list of tile
4020 names that represent the encoded immediate mask.
4021
4022 For example:
4023 * An all-ones immediate is disassembled as {ZA}.
4024 * An all-zeros immediate is disassembled as an empty list { }.
4025 */
4026 static void
4027 print_sme_za_list (char *buf, size_t size, int mask,
4028 struct aarch64_styler *styler)
4029 {
4030 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
4031 "za1.s", "za2.s", "za3.s", "za0.d",
4032 "za1.d", "za2.d", "za3.d", "za4.d",
4033 "za5.d", "za6.d", "za7.d", " " };
4034 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
4035 0x22, 0x44, 0x88, 0x01,
4036 0x02, 0x04, 0x08, 0x10,
4037 0x20, 0x40, 0x80, 0x00 };
4038 int i, k;
4039 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
4040
4041 k = snprintf (buf, size, "{");
4042 for (i = 0; i < ZAN_SIZE; i++)
4043 {
4044 if ((mask & zan_v[i]) == zan_v[i])
4045 {
4046 mask &= ~zan_v[i];
4047 if (k > 1)
4048 k += snprintf (buf + k, size - k, ", ");
4049
4050 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
4051 }
4052 if (mask == 0)
4053 break;
4054 }
4055 snprintf (buf + k, size - k, "}");
4056 }
4057
4058 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
4059 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
4060 PC, PCREL_P and ADDRESS are used to pass in and return information about
4061 the PC-relative address calculation, where the PC value is passed in
4062 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
4063 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
4064 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
4065
4066 The function serves both the disassembler and the assembler diagnostics
4067 issuer, which is the reason why it lives in this file. */
4068
4069 void
4070 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
4071 const aarch64_opcode *opcode,
4072 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
4073 bfd_vma *address, char** notes,
4074 char *comment, size_t comment_size,
4075 aarch64_feature_set features,
4076 struct aarch64_styler *styler)
4077 {
4078 unsigned int i, num_conds;
4079 const char *name = NULL;
4080 const aarch64_opnd_info *opnd = opnds + idx;
4081 enum aarch64_modifier_kind kind;
4082 uint64_t addr, enum_value;
4083
4084 if (comment != NULL)
4085 {
4086 assert (comment_size > 0);
4087 comment[0] = '\0';
4088 }
4089 else
4090 assert (comment_size == 0);
4091
4092 buf[0] = '\0';
4093 if (pcrel_p)
4094 *pcrel_p = 0;
4095
4096 switch (opnd->type)
4097 {
4098 case AARCH64_OPND_Rd:
4099 case AARCH64_OPND_Rn:
4100 case AARCH64_OPND_Rm:
4101 case AARCH64_OPND_Rt:
4102 case AARCH64_OPND_Rt2:
4103 case AARCH64_OPND_Rs:
4104 case AARCH64_OPND_Ra:
4105 case AARCH64_OPND_Rt_IN_SYS_ALIASES:
4106 case AARCH64_OPND_Rt_LS64:
4107 case AARCH64_OPND_Rt_SYS:
4108 case AARCH64_OPND_PAIRREG:
4109 case AARCH64_OPND_PAIRREG_OR_XZR:
4110 case AARCH64_OPND_SVE_Rm:
4111 case AARCH64_OPND_LSE128_Rt:
4112 case AARCH64_OPND_LSE128_Rt2:
4113 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
4114 the <ic_op>, therefore we use opnd->present to override the
4115 generic optional-ness information. */
4116 if (opnd->type == AARCH64_OPND_Rt_SYS)
4117 {
4118 if (!opnd->present)
4119 break;
4120 }
4121 else if ((opnd->type == AARCH64_OPND_Rt_IN_SYS_ALIASES)
4122 && (opnd->reg.regno
4123 != get_optional_operand_default_value (opcode)))
4124 {
4125 /* Avoid printing an invalid additional value for Rt in SYS aliases such as
4126 BRB, provide a helpful comment instead */
4127 snprintf (comment, comment_size, "unpredictable encoding (Rt!=31): #%u", opnd->reg.regno);
4128 break;
4129 }
4130 /* Omit the operand, e.g. RET. */
4131 else if (optional_operand_p (opcode, idx)
4132 && (opnd->reg.regno
4133 == get_optional_operand_default_value (opcode)))
4134 break;
4135 assert (opnd->qualifier == AARCH64_OPND_QLF_W
4136 || opnd->qualifier == AARCH64_OPND_QLF_X);
4137 snprintf (buf, size, "%s",
4138 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4139 opnd->qualifier, 0)));
4140 break;
4141
4142 case AARCH64_OPND_Rd_SP:
4143 case AARCH64_OPND_Rn_SP:
4144 case AARCH64_OPND_Rt_SP:
4145 case AARCH64_OPND_SVE_Rn_SP:
4146 case AARCH64_OPND_Rm_SP:
4147 assert (opnd->qualifier == AARCH64_OPND_QLF_W
4148 || opnd->qualifier == AARCH64_OPND_QLF_WSP
4149 || opnd->qualifier == AARCH64_OPND_QLF_X
4150 || opnd->qualifier == AARCH64_OPND_QLF_SP);
4151 snprintf (buf, size, "%s",
4152 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4153 opnd->qualifier, 1)));
4154 break;
4155
4156 case AARCH64_OPND_Rm_EXT:
4157 kind = opnd->shifter.kind;
4158 assert (idx == 1 || idx == 2);
4159 if ((aarch64_stack_pointer_p (opnds)
4160 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
4161 && ((opnd->qualifier == AARCH64_OPND_QLF_W
4162 && opnds[0].qualifier == AARCH64_OPND_QLF_W
4163 && kind == AARCH64_MOD_UXTW)
4164 || (opnd->qualifier == AARCH64_OPND_QLF_X
4165 && kind == AARCH64_MOD_UXTX)))
4166 {
4167 /* 'LSL' is the preferred form in this case. */
4168 kind = AARCH64_MOD_LSL;
4169 if (opnd->shifter.amount == 0)
4170 {
4171 /* Shifter omitted. */
4172 snprintf (buf, size, "%s",
4173 style_reg (styler,
4174 get_int_reg_name (opnd->reg.regno,
4175 opnd->qualifier, 0)));
4176 break;
4177 }
4178 }
4179 if (opnd->shifter.amount)
4180 snprintf (buf, size, "%s, %s %s",
4181 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
4182 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
4183 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4184 else
4185 snprintf (buf, size, "%s, %s",
4186 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
4187 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
4188 break;
4189
4190 case AARCH64_OPND_Rm_SFT:
4191 assert (opnd->qualifier == AARCH64_OPND_QLF_W
4192 || opnd->qualifier == AARCH64_OPND_QLF_X);
4193 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
4194 snprintf (buf, size, "%s",
4195 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4196 opnd->qualifier, 0)));
4197 else
4198 snprintf (buf, size, "%s, %s %s",
4199 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
4200 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4201 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4202 break;
4203
4204 case AARCH64_OPND_Rm_LSL:
4205 assert (opnd->qualifier == AARCH64_OPND_QLF_X);
4206 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
4207 if (opnd->shifter.amount == 0)
4208 snprintf (buf, size, "%s",
4209 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4210 opnd->qualifier, 0)));
4211 else
4212 snprintf (buf, size, "%s, %s %s",
4213 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
4214 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4215 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4216 break;
4217
4218 case AARCH64_OPND_Fd:
4219 case AARCH64_OPND_Fn:
4220 case AARCH64_OPND_Fm:
4221 case AARCH64_OPND_Fa:
4222 case AARCH64_OPND_Ft:
4223 case AARCH64_OPND_Ft2:
4224 case AARCH64_OPND_Sd:
4225 case AARCH64_OPND_Sn:
4226 case AARCH64_OPND_Sm:
4227 case AARCH64_OPND_SVE_VZn:
4228 case AARCH64_OPND_SVE_Vd:
4229 case AARCH64_OPND_SVE_Vm:
4230 case AARCH64_OPND_SVE_Vn:
4231 snprintf (buf, size, "%s",
4232 style_reg (styler, "%s%d",
4233 aarch64_get_qualifier_name (opnd->qualifier),
4234 opnd->reg.regno));
4235 break;
4236
4237 case AARCH64_OPND_Va:
4238 case AARCH64_OPND_Vd:
4239 case AARCH64_OPND_Vn:
4240 case AARCH64_OPND_Vm:
4241 snprintf (buf, size, "%s",
4242 style_reg (styler, "v%d.%s", opnd->reg.regno,
4243 aarch64_get_qualifier_name (opnd->qualifier)));
4244 break;
4245
4246 case AARCH64_OPND_Ed:
4247 case AARCH64_OPND_En:
4248 case AARCH64_OPND_Em:
4249 case AARCH64_OPND_Em16:
4250 case AARCH64_OPND_Em8:
4251 case AARCH64_OPND_SM3_IMM2:
4252 snprintf (buf, size, "%s[%s]",
4253 style_reg (styler, "v%d.%s", opnd->reglane.regno,
4254 aarch64_get_qualifier_name (opnd->qualifier)),
4255 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4256 break;
4257
4258 case AARCH64_OPND_Em_INDEX1_14:
4259 case AARCH64_OPND_Em_INDEX2_13:
4260 case AARCH64_OPND_Em_INDEX3_12:
4261 snprintf (buf, size, "%s[%s]",
4262 style_reg (styler, "v%d", opnd->reglane.regno),
4263 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4264 break;
4265
4266 case AARCH64_OPND_VdD1:
4267 case AARCH64_OPND_VnD1:
4268 snprintf (buf, size, "%s[%s]",
4269 style_reg (styler, "v%d.d", opnd->reg.regno),
4270 style_imm (styler, "1"));
4271 break;
4272
4273 case AARCH64_OPND_LVn:
4274 case AARCH64_OPND_LVn_LUT:
4275 case AARCH64_OPND_LVt:
4276 case AARCH64_OPND_LVt_AL:
4277 case AARCH64_OPND_LEt:
4278 print_register_list (buf, size, opnd, "v", styler);
4279 break;
4280
4281 case AARCH64_OPND_SVE_Pd:
4282 case AARCH64_OPND_SVE_Pg3:
4283 case AARCH64_OPND_SVE_Pg4_5:
4284 case AARCH64_OPND_SVE_Pg4_10:
4285 case AARCH64_OPND_SVE_Pg4_16:
4286 case AARCH64_OPND_SVE_Pm:
4287 case AARCH64_OPND_SVE_Pn:
4288 case AARCH64_OPND_SVE_Pt:
4289 case AARCH64_OPND_SME_Pm:
4290 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4291 snprintf (buf, size, "%s",
4292 style_reg (styler, "p%d", opnd->reg.regno));
4293 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
4294 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
4295 snprintf (buf, size, "%s",
4296 style_reg (styler, "p%d/%s", opnd->reg.regno,
4297 aarch64_get_qualifier_name (opnd->qualifier)));
4298 else
4299 snprintf (buf, size, "%s",
4300 style_reg (styler, "p%d.%s", opnd->reg.regno,
4301 aarch64_get_qualifier_name (opnd->qualifier)));
4302 break;
4303
4304 case AARCH64_OPND_SVE_PNd:
4305 case AARCH64_OPND_SVE_PNg4_10:
4306 case AARCH64_OPND_SVE_PNn:
4307 case AARCH64_OPND_SVE_PNt:
4308 case AARCH64_OPND_SME_PNd3:
4309 case AARCH64_OPND_SME_PNg3:
4310 case AARCH64_OPND_SME_PNn:
4311 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4312 snprintf (buf, size, "%s",
4313 style_reg (styler, "pn%d", opnd->reg.regno));
4314 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
4315 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
4316 snprintf (buf, size, "%s",
4317 style_reg (styler, "pn%d/%s", opnd->reg.regno,
4318 aarch64_get_qualifier_name (opnd->qualifier)));
4319 else
4320 snprintf (buf, size, "%s",
4321 style_reg (styler, "pn%d.%s", opnd->reg.regno,
4322 aarch64_get_qualifier_name (opnd->qualifier)));
4323 break;
4324
4325 case AARCH64_OPND_SME_Pdx2:
4326 case AARCH64_OPND_SME_PdxN:
4327 print_register_list (buf, size, opnd, "p", styler);
4328 break;
4329
4330 case AARCH64_OPND_SME_PNn3_INDEX1:
4331 case AARCH64_OPND_SME_PNn3_INDEX2:
4332 snprintf (buf, size, "%s[%s]",
4333 style_reg (styler, "pn%d", opnd->reglane.regno),
4334 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4335 break;
4336
4337 case AARCH64_OPND_SVE_Za_5:
4338 case AARCH64_OPND_SVE_Za_16:
4339 case AARCH64_OPND_SVE_Zd:
4340 case AARCH64_OPND_SVE_Zm_5:
4341 case AARCH64_OPND_SVE_Zm_16:
4342 case AARCH64_OPND_SVE_Zn:
4343 case AARCH64_OPND_SVE_Zt:
4344 case AARCH64_OPND_SME_Zm:
4345 case AARCH64_OPND_SME_Zm_17:
4346 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4347 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
4348 else
4349 snprintf (buf, size, "%s",
4350 style_reg (styler, "z%d.%s", opnd->reg.regno,
4351 aarch64_get_qualifier_name (opnd->qualifier)));
4352 break;
4353
4354 case AARCH64_OPND_SVE_ZnxN:
4355 case AARCH64_OPND_SVE_ZtxN:
4356 case AARCH64_OPND_SME_Zdnx2:
4357 case AARCH64_OPND_SME_Zdnx4:
4358 case AARCH64_OPND_SME_Zmx2:
4359 case AARCH64_OPND_SME_Zmx4:
4360 case AARCH64_OPND_SME_Znx2:
4361 case AARCH64_OPND_SME_Znx2_BIT_INDEX:
4362 case AARCH64_OPND_SME_Znx4:
4363 case AARCH64_OPND_SME_Ztx2_STRIDED:
4364 case AARCH64_OPND_SME_Ztx4_STRIDED:
4365 print_register_list (buf, size, opnd, "z", styler);
4366 break;
4367
4368 case AARCH64_OPND_SVE_Zm1_23_INDEX:
4369 case AARCH64_OPND_SVE_Zm2_22_INDEX:
4370 case AARCH64_OPND_SVE_Zm3_INDEX:
4371 case AARCH64_OPND_SVE_Zm3_22_INDEX:
4372 case AARCH64_OPND_SVE_Zm3_19_INDEX:
4373 case AARCH64_OPND_SVE_Zm3_12_INDEX:
4374 case AARCH64_OPND_SVE_Zm3_11_INDEX:
4375 case AARCH64_OPND_SVE_Zm3_10_INDEX:
4376 case AARCH64_OPND_SVE_Zm4_11_INDEX:
4377 case AARCH64_OPND_SVE_Zm4_INDEX:
4378 case AARCH64_OPND_SVE_Zn_INDEX:
4379 case AARCH64_OPND_SME_Zm_INDEX1:
4380 case AARCH64_OPND_SME_Zm_INDEX2:
4381 case AARCH64_OPND_SME_Zm_INDEX2_3:
4382 case AARCH64_OPND_SME_Zm_INDEX3_1:
4383 case AARCH64_OPND_SME_Zm_INDEX3_2:
4384 case AARCH64_OPND_SME_Zm_INDEX3_3:
4385 case AARCH64_OPND_SME_Zm_INDEX3_10:
4386 case AARCH64_OPND_SVE_Zn_5_INDEX:
4387 case AARCH64_OPND_SME_Zm_INDEX4_1:
4388 case AARCH64_OPND_SME_Zm_INDEX4_2:
4389 case AARCH64_OPND_SME_Zm_INDEX4_3:
4390 case AARCH64_OPND_SME_Zm_INDEX4_10:
4391 case AARCH64_OPND_SME_Zn_INDEX1_16:
4392 case AARCH64_OPND_SME_Zn_INDEX2_15:
4393 case AARCH64_OPND_SME_Zn_INDEX2_16:
4394 case AARCH64_OPND_SME_Zn_INDEX2_19:
4395 case AARCH64_OPND_SME_Zn_INDEX3_14:
4396 case AARCH64_OPND_SME_Zn_INDEX3_15:
4397 case AARCH64_OPND_SME_Zn_INDEX4_14:
4398 snprintf (buf, size, "%s[%s]",
4399 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4400 ? style_reg (styler, "z%d", opnd->reglane.regno)
4401 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
4402 aarch64_get_qualifier_name (opnd->qualifier))),
4403 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4404 break;
4405
4406 case AARCH64_OPND_SVE_Zn0_INDEX:
4407 case AARCH64_OPND_SVE_Zn1_17_INDEX:
4408 case AARCH64_OPND_SVE_Zn2_18_INDEX:
4409 case AARCH64_OPND_SVE_Zn3_22_INDEX:
4410 case AARCH64_OPND_SVE_Zd0_INDEX:
4411 case AARCH64_OPND_SVE_Zd1_17_INDEX:
4412 case AARCH64_OPND_SVE_Zd2_18_INDEX:
4413 case AARCH64_OPND_SVE_Zd3_22_INDEX:
4414 if (opnd->reglane.index == 0)
4415 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
4416 else
4417 snprintf (buf, size, "%s[%s]",
4418 style_reg (styler, "z%d", opnd->reglane.regno),
4419 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4420 break;
4421
4422 case AARCH64_OPND_SME_ZAda_1b:
4423 case AARCH64_OPND_SME_ZAda_2b:
4424 case AARCH64_OPND_SME_ZAda_3b:
4425 snprintf (buf, size, "%s",
4426 style_reg (styler, "za%d.%s", opnd->reg.regno,
4427 aarch64_get_qualifier_name (opnd->qualifier)));
4428 break;
4429
4430 case AARCH64_OPND_SME_ZA_HV_idx_src:
4431 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
4432 case AARCH64_OPND_SME_ZA_HV_idx_dest:
4433 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
4434 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
4435 snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
4436 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
4437 style_reg (styler, "za%d%c.%s",
4438 opnd->indexed_za.regno,
4439 opnd->indexed_za.v == 1 ? 'v' : 'h',
4440 aarch64_get_qualifier_name (opnd->qualifier)),
4441 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4442 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4443 opnd->indexed_za.index.countm1 ? ":" : "",
4444 (opnd->indexed_za.index.countm1
4445 ? style_imm (styler, "%d",
4446 opnd->indexed_za.index.imm
4447 + opnd->indexed_za.index.countm1)
4448 : ""),
4449 opnd->indexed_za.group_size ? ", " : "",
4450 opnd->indexed_za.group_size == 2
4451 ? style_sub_mnem (styler, "vgx2")
4452 : opnd->indexed_za.group_size == 4
4453 ? style_sub_mnem (styler, "vgx4") : "",
4454 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
4455 break;
4456
4457 case AARCH64_OPND_SME_list_of_64bit_tiles:
4458 print_sme_za_list (buf, size, opnd->imm.value, styler);
4459 break;
4460
4461 case AARCH64_OPND_SME_ZA_array_off1x4:
4462 case AARCH64_OPND_SME_ZA_array_off2x2:
4463 case AARCH64_OPND_SME_ZA_array_off2x4:
4464 case AARCH64_OPND_SME_ZA_array_off3_0:
4465 case AARCH64_OPND_SME_ZA_array_off3_5:
4466 case AARCH64_OPND_SME_ZA_array_off3x2:
4467 case AARCH64_OPND_SME_ZA_array_off4:
4468 snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
4469 style_reg (styler, "za%s%s",
4470 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4471 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4472 ? ""
4473 : aarch64_get_qualifier_name (opnd->qualifier))),
4474 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4475 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4476 opnd->indexed_za.index.countm1 ? ":" : "",
4477 (opnd->indexed_za.index.countm1
4478 ? style_imm (styler, "%d",
4479 opnd->indexed_za.index.imm
4480 + opnd->indexed_za.index.countm1)
4481 : ""),
4482 opnd->indexed_za.group_size ? ", " : "",
4483 opnd->indexed_za.group_size == 2
4484 ? style_sub_mnem (styler, "vgx2")
4485 : opnd->indexed_za.group_size == 4
4486 ? style_sub_mnem (styler, "vgx4") : "");
4487 break;
4488
4489 case AARCH64_OPND_SME_ZA_array_vrsb_1:
4490 case AARCH64_OPND_SME_ZA_array_vrsh_1:
4491 case AARCH64_OPND_SME_ZA_array_vrss_1:
4492 case AARCH64_OPND_SME_ZA_array_vrsd_1:
4493 case AARCH64_OPND_SME_ZA_array_vrsb_2:
4494 case AARCH64_OPND_SME_ZA_array_vrsh_2:
4495 case AARCH64_OPND_SME_ZA_array_vrss_2:
4496 case AARCH64_OPND_SME_ZA_array_vrsd_2:
4497 case AARCH64_OPND_SME_ZA_ARRAY4:
4498 snprintf (buf, size, "%s [%s, %s%s%s]",
4499 style_reg (styler, "za%d%c%s%s",
4500 opnd->indexed_za.regno,
4501 opnd->indexed_za.v ? 'v': 'h',
4502 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4503 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4504 ? ""
4505 : aarch64_get_qualifier_name (opnd->qualifier))),
4506 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4507 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4508 opnd->indexed_za.index.countm1 ? ":" : "",
4509 opnd->indexed_za.index.countm1 ? style_imm (styler, "%d",
4510 opnd->indexed_za.index.imm
4511 + opnd->indexed_za.index.countm1):"");
4512 break;
4513
4514 case AARCH64_OPND_SME_SM_ZA:
4515 snprintf (buf, size, "%s",
4516 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
4517 break;
4518
4519 case AARCH64_OPND_SME_PnT_Wm_imm:
4520 snprintf (buf, size, "%s[%s, %s]",
4521 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
4522 aarch64_get_qualifier_name (opnd->qualifier)),
4523 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4524 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
4525 break;
4526
4527 case AARCH64_OPND_SME_VLxN_10:
4528 case AARCH64_OPND_SME_VLxN_13:
4529 enum_value = opnd->imm.value;
4530 assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
4531 snprintf (buf, size, "%s",
4532 style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
4533 break;
4534
4535 case AARCH64_OPND_BRBOP:
4536 enum_value = opnd->imm.value;
4537 assert (enum_value < ARRAY_SIZE (aarch64_brbop_array));
4538 snprintf (buf, size, "%s",
4539 style_sub_mnem (styler, aarch64_brbop_array[enum_value]));
4540 break;
4541
4542 case AARCH64_OPND_CRn:
4543 case AARCH64_OPND_CRm:
4544 snprintf (buf, size, "%s",
4545 style_reg (styler, "C%" PRIi64, opnd->imm.value));
4546 break;
4547
4548 case AARCH64_OPND_IDX:
4549 case AARCH64_OPND_MASK:
4550 case AARCH64_OPND_IMM:
4551 case AARCH64_OPND_IMM_2:
4552 case AARCH64_OPND_WIDTH:
4553 case AARCH64_OPND_UIMM3_OP1:
4554 case AARCH64_OPND_UIMM3_OP2:
4555 case AARCH64_OPND_BIT_NUM:
4556 case AARCH64_OPND_IMM_VLSL:
4557 case AARCH64_OPND_IMM_VLSR:
4558 case AARCH64_OPND_SHLL_IMM:
4559 case AARCH64_OPND_IMM0:
4560 case AARCH64_OPND_IMMR:
4561 case AARCH64_OPND_IMMS:
4562 case AARCH64_OPND_UNDEFINED:
4563 case AARCH64_OPND_FBITS:
4564 case AARCH64_OPND_TME_UIMM16:
4565 case AARCH64_OPND_SIMM5:
4566 case AARCH64_OPND_SME_SHRIMM4:
4567 case AARCH64_OPND_SME_SHRIMM5:
4568 case AARCH64_OPND_SVE_SHLIMM_PRED:
4569 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
4570 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
4571 case AARCH64_OPND_SVE_SHRIMM_PRED:
4572 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
4573 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
4574 case AARCH64_OPND_SVE_SIMM5:
4575 case AARCH64_OPND_SVE_SIMM5B:
4576 case AARCH64_OPND_SVE_SIMM6:
4577 case AARCH64_OPND_SVE_SIMM8:
4578 case AARCH64_OPND_SVE_UIMM3:
4579 case AARCH64_OPND_SVE_UIMM7:
4580 case AARCH64_OPND_SVE_UIMM8:
4581 case AARCH64_OPND_SVE_UIMM4:
4582 case AARCH64_OPND_SVE_UIMM8_53:
4583 case AARCH64_OPND_IMM_ROT1:
4584 case AARCH64_OPND_IMM_ROT2:
4585 case AARCH64_OPND_IMM_ROT3:
4586 case AARCH64_OPND_SVE_IMM_ROT1:
4587 case AARCH64_OPND_SVE_IMM_ROT2:
4588 case AARCH64_OPND_SVE_IMM_ROT3:
4589 case AARCH64_OPND_CSSC_SIMM8:
4590 case AARCH64_OPND_CSSC_UIMM8:
4591 snprintf (buf, size, "%s",
4592 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4593 break;
4594
4595 case AARCH64_OPND_SVE_I1_HALF_ONE:
4596 case AARCH64_OPND_SVE_I1_HALF_TWO:
4597 case AARCH64_OPND_SVE_I1_ZERO_ONE:
4598 {
4599 single_conv_t c;
4600 c.i = opnd->imm.value;
4601 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
4602 break;
4603 }
4604
4605 case AARCH64_OPND_SVE_PATTERN:
4606 if (optional_operand_p (opcode, idx)
4607 && opnd->imm.value == get_optional_operand_default_value (opcode))
4608 break;
4609 enum_value = opnd->imm.value;
4610 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4611 if (aarch64_sve_pattern_array[enum_value])
4612 snprintf (buf, size, "%s",
4613 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
4614 else
4615 snprintf (buf, size, "%s",
4616 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4617 break;
4618
4619 case AARCH64_OPND_SVE_PATTERN_SCALED:
4620 if (optional_operand_p (opcode, idx)
4621 && !opnd->shifter.operator_present
4622 && opnd->imm.value == get_optional_operand_default_value (opcode))
4623 break;
4624 enum_value = opnd->imm.value;
4625 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4626 if (aarch64_sve_pattern_array[opnd->imm.value])
4627 snprintf (buf, size, "%s",
4628 style_reg (styler,
4629 aarch64_sve_pattern_array[opnd->imm.value]));
4630 else
4631 snprintf (buf, size, "%s",
4632 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4633 if (opnd->shifter.operator_present)
4634 {
4635 size_t len = strlen (buf);
4636 const char *shift_name
4637 = aarch64_operand_modifiers[opnd->shifter.kind].name;
4638 snprintf (buf + len, size - len, ", %s %s",
4639 style_sub_mnem (styler, shift_name),
4640 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4641 }
4642 break;
4643
4644 case AARCH64_OPND_SVE_PRFOP:
4645 enum_value = opnd->imm.value;
4646 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
4647 if (aarch64_sve_prfop_array[enum_value])
4648 snprintf (buf, size, "%s",
4649 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
4650 else
4651 snprintf (buf, size, "%s",
4652 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4653 break;
4654
4655 case AARCH64_OPND_IMM_MOV:
4656 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4657 {
4658 case 4: /* e.g. MOV Wd, #<imm32>. */
4659 {
4660 int imm32 = opnd->imm.value;
4661 snprintf (buf, size, "%s",
4662 style_imm (styler, "#0x%-20x", imm32));
4663 snprintf (comment, comment_size, "#%d", imm32);
4664 }
4665 break;
4666 case 8: /* e.g. MOV Xd, #<imm64>. */
4667 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
4668 opnd->imm.value));
4669 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
4670 break;
4671 default:
4672 snprintf (buf, size, "<invalid>");
4673 break;
4674 }
4675 break;
4676
4677 case AARCH64_OPND_FPIMM0:
4678 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
4679 break;
4680
4681 case AARCH64_OPND_LIMM:
4682 case AARCH64_OPND_AIMM:
4683 case AARCH64_OPND_HALF:
4684 case AARCH64_OPND_SVE_INV_LIMM:
4685 case AARCH64_OPND_SVE_LIMM:
4686 case AARCH64_OPND_SVE_LIMM_MOV:
4687 if (opnd->shifter.amount)
4688 snprintf (buf, size, "%s, %s %s",
4689 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4690 style_sub_mnem (styler, "lsl"),
4691 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4692 else
4693 snprintf (buf, size, "%s",
4694 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4695 break;
4696
4697 case AARCH64_OPND_SIMD_IMM:
4698 case AARCH64_OPND_SIMD_IMM_SFT:
4699 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
4700 || opnd->shifter.kind == AARCH64_MOD_NONE)
4701 snprintf (buf, size, "%s",
4702 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4703 else
4704 snprintf (buf, size, "%s, %s %s",
4705 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4706 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4707 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4708 break;
4709
4710 case AARCH64_OPND_SVE_AIMM:
4711 case AARCH64_OPND_SVE_ASIMM:
4712 if (opnd->shifter.amount)
4713 snprintf (buf, size, "%s, %s %s",
4714 style_imm (styler, "#%" PRIi64, opnd->imm.value),
4715 style_sub_mnem (styler, "lsl"),
4716 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4717 else
4718 snprintf (buf, size, "%s",
4719 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4720 break;
4721
4722 case AARCH64_OPND_FPIMM:
4723 case AARCH64_OPND_SIMD_FPIMM:
4724 case AARCH64_OPND_SVE_FPIMM8:
4725 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4726 {
4727 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4728 {
4729 half_conv_t c;
4730 c.i = expand_fp_imm (2, opnd->imm.value);
4731 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4732 }
4733 break;
4734 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4735 {
4736 single_conv_t c;
4737 c.i = expand_fp_imm (4, opnd->imm.value);
4738 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4739 }
4740 break;
4741 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4742 {
4743 double_conv_t c;
4744 c.i = expand_fp_imm (8, opnd->imm.value);
4745 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
4746 }
4747 break;
4748 default:
4749 snprintf (buf, size, "<invalid>");
4750 break;
4751 }
4752 break;
4753
4754 case AARCH64_OPND_CCMP_IMM:
4755 case AARCH64_OPND_NZCV:
4756 case AARCH64_OPND_EXCEPTION:
4757 case AARCH64_OPND_UIMM4:
4758 case AARCH64_OPND_UIMM4_ADDG:
4759 case AARCH64_OPND_UIMM7:
4760 case AARCH64_OPND_UIMM10:
4761 if (optional_operand_p (opcode, idx)
4762 && (opnd->imm.value ==
4763 (int64_t) get_optional_operand_default_value (opcode)))
4764 /* Omit the operand, e.g. DCPS1. */
4765 break;
4766 snprintf (buf, size, "%s",
4767 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
4768 break;
4769
4770 case AARCH64_OPND_COND:
4771 case AARCH64_OPND_COND1:
4772 snprintf (buf, size, "%s",
4773 style_sub_mnem (styler, opnd->cond->names[0]));
4774 num_conds = ARRAY_SIZE (opnd->cond->names);
4775 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
4776 {
4777 size_t len = comment != NULL ? strlen (comment) : 0;
4778 if (i == 1)
4779 snprintf (comment + len, comment_size - len, "%s = %s",
4780 opnd->cond->names[0], opnd->cond->names[i]);
4781 else
4782 snprintf (comment + len, comment_size - len, ", %s",
4783 opnd->cond->names[i]);
4784 }
4785 break;
4786
4787 case AARCH64_OPND_ADDR_ADRP:
4788 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
4789 + opnd->imm.value;
4790 if (pcrel_p)
4791 *pcrel_p = 1;
4792 if (address)
4793 *address = addr;
4794 /* This is not necessary during the disassembling, as print_address_func
4795 in the disassemble_info will take care of the printing. But some
4796 other callers may be still interested in getting the string in *STR,
4797 so here we do snprintf regardless. */
4798 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
4799 break;
4800
4801 case AARCH64_OPND_ADDR_PCREL9:
4802 case AARCH64_OPND_ADDR_PCREL14:
4803 case AARCH64_OPND_ADDR_PCREL19:
4804 case AARCH64_OPND_ADDR_PCREL21:
4805 case AARCH64_OPND_ADDR_PCREL26:
4806 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
4807 if (pcrel_p)
4808 *pcrel_p = 1;
4809 if (address)
4810 *address = addr;
4811 /* This is not necessary during the disassembling, as print_address_func
4812 in the disassemble_info will take care of the printing. But some
4813 other callers may be still interested in getting the string in *STR,
4814 so here we do snprintf regardless. */
4815 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
4816 break;
4817
4818 case AARCH64_OPND_ADDR_SIMPLE:
4819 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4820 case AARCH64_OPND_SIMD_ADDR_POST:
4821 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4822 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
4823 {
4824 if (opnd->addr.offset.is_reg)
4825 snprintf (buf, size, "[%s], %s",
4826 style_reg (styler, name),
4827 style_reg (styler, "x%d", opnd->addr.offset.regno));
4828 else
4829 snprintf (buf, size, "[%s], %s",
4830 style_reg (styler, name),
4831 style_imm (styler, "#%d", opnd->addr.offset.imm));
4832 }
4833 else
4834 snprintf (buf, size, "[%s]", style_reg (styler, name));
4835 break;
4836
4837 case AARCH64_OPND_ADDR_REGOFF:
4838 case AARCH64_OPND_SVE_ADDR_RR:
4839 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
4840 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
4841 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
4842 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
4843 case AARCH64_OPND_SVE_ADDR_RM:
4844 case AARCH64_OPND_SVE_ADDR_RM_LSL1:
4845 case AARCH64_OPND_SVE_ADDR_RM_LSL2:
4846 case AARCH64_OPND_SVE_ADDR_RM_LSL3:
4847 case AARCH64_OPND_SVE_ADDR_RM_LSL4:
4848 case AARCH64_OPND_SVE_ADDR_RX:
4849 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
4850 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
4851 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
4852 case AARCH64_OPND_SVE_ADDR_RX_LSL4:
4853 print_register_offset_address
4854 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4855 get_offset_int_reg_name (opnd), styler);
4856 break;
4857
4858 case AARCH64_OPND_SVE_ADDR_ZX:
4859 print_register_offset_address
4860 (buf, size, opnd,
4861 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4862 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
4863 break;
4864
4865 case AARCH64_OPND_SVE_ADDR_RZ:
4866 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
4867 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
4868 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
4869 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4870 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4871 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4872 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4873 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4874 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4875 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4876 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4877 print_register_offset_address
4878 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4879 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4880 styler);
4881 break;
4882
4883 case AARCH64_OPND_ADDR_SIMM7:
4884 case AARCH64_OPND_ADDR_SIMM9:
4885 case AARCH64_OPND_ADDR_SIMM9_2:
4886 case AARCH64_OPND_ADDR_SIMM10:
4887 case AARCH64_OPND_ADDR_SIMM11:
4888 case AARCH64_OPND_ADDR_SIMM13:
4889 case AARCH64_OPND_RCPC3_ADDR_OFFSET:
4890 case AARCH64_OPND_ADDR_OFFSET:
4891 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
4892 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
4893 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
4894 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
4895 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4896 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4897 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4898 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4899 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4900 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4901 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4902 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4903 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4904 case AARCH64_OPND_SVE_ADDR_RI_U6:
4905 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4906 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4907 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4908 print_immediate_offset_address
4909 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4910 styler);
4911 break;
4912
4913 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4914 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4915 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4916 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4917 print_immediate_offset_address
4918 (buf, size, opnd,
4919 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4920 styler);
4921 break;
4922
4923 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4924 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4925 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4926 print_register_offset_address
4927 (buf, size, opnd,
4928 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4929 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4930 styler);
4931 break;
4932
4933 case AARCH64_OPND_ADDR_UIMM12:
4934 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4935 if (opnd->addr.offset.imm)
4936 snprintf (buf, size, "[%s, %s]",
4937 style_reg (styler, name),
4938 style_imm (styler, "#%d", opnd->addr.offset.imm));
4939 else
4940 snprintf (buf, size, "[%s]", style_reg (styler, name));
4941 break;
4942
4943 case AARCH64_OPND_SYSREG:
4944 case AARCH64_OPND_SYSREG128:
4945 for (i = 0; aarch64_sys_regs[i].name; ++i)
4946 {
4947 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4948
4949 bool exact_match
4950 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4951 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4952 && AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features);
4953
4954 /* Try and find an exact match, But if that fails, return the first
4955 partial match that was found. */
4956 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4957 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4958 && ! aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags)
4959 && (name == NULL || exact_match))
4960 {
4961 name = aarch64_sys_regs[i].name;
4962 if (exact_match)
4963 {
4964 if (notes)
4965 *notes = NULL;
4966 break;
4967 }
4968
4969 /* If we didn't match exactly, that means the presense of a flag
4970 indicates what we didn't want for this instruction. e.g. If
4971 F_REG_READ is there, that means we were looking for a write
4972 register. See aarch64_ext_sysreg. */
4973 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4974 *notes = _("reading from a write-only register");
4975 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4976 *notes = _("writing to a read-only register");
4977 }
4978 }
4979
4980 if (name)
4981 snprintf (buf, size, "%s", style_reg (styler, name));
4982 else
4983 {
4984 /* Implementation defined system register. */
4985 unsigned int value = opnd->sysreg.value;
4986 snprintf (buf, size, "%s",
4987 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4988 (value >> 14) & 0x3, (value >> 11) & 0x7,
4989 (value >> 7) & 0xf, (value >> 3) & 0xf,
4990 value & 0x7));
4991 }
4992 break;
4993
4994 case AARCH64_OPND_PSTATEFIELD:
4995 for (i = 0; aarch64_pstatefields[i].name; ++i)
4996 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4997 {
4998 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4999 SVCRZA and SVCRSMZA. */
5000 uint32_t flags = aarch64_pstatefields[i].flags;
5001 if (flags & F_REG_IN_CRM
5002 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
5003 != PSTATE_DECODE_CRM (flags)))
5004 continue;
5005 break;
5006 }
5007 assert (aarch64_pstatefields[i].name);
5008 snprintf (buf, size, "%s",
5009 style_reg (styler, aarch64_pstatefields[i].name));
5010 break;
5011
5012 case AARCH64_OPND_SYSREG_AT:
5013 case AARCH64_OPND_SYSREG_DC:
5014 case AARCH64_OPND_SYSREG_IC:
5015 case AARCH64_OPND_SYSREG_TLBI:
5016 case AARCH64_OPND_SYSREG_TLBIP:
5017 case AARCH64_OPND_SYSREG_SR:
5018 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
5019 break;
5020
5021 case AARCH64_OPND_BARRIER:
5022 case AARCH64_OPND_BARRIER_DSB_NXS:
5023 {
5024 if (opnd->barrier->name[0] == '#')
5025 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
5026 else
5027 snprintf (buf, size, "%s",
5028 style_sub_mnem (styler, opnd->barrier->name));
5029 }
5030 break;
5031
5032 case AARCH64_OPND_BARRIER_ISB:
5033 /* Operand can be omitted, e.g. in DCPS1. */
5034 if (! optional_operand_p (opcode, idx)
5035 || (opnd->barrier->value
5036 != get_optional_operand_default_value (opcode)))
5037 snprintf (buf, size, "%s",
5038 style_imm (styler, "#0x%x", opnd->barrier->value));
5039 break;
5040
5041 case AARCH64_OPND_PRFOP:
5042 if ((opnd->prfop->name == NULL)
5043 || (opcode->iclass != ldst_pos && opnd->prfop->value == 0x18))
5044 snprintf (buf, size, "%s",
5045 style_imm (styler, "#0x%02x", opnd->prfop->value));
5046 else
5047 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
5048 break;
5049
5050 case AARCH64_OPND_RPRFMOP:
5051 enum_value = opnd->imm.value;
5052 if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
5053 && aarch64_rprfmop_array[enum_value])
5054 snprintf (buf, size, "%s",
5055 style_reg (styler, aarch64_rprfmop_array[enum_value]));
5056 else
5057 snprintf (buf, size, "%s",
5058 style_imm (styler, "#%" PRIi64, opnd->imm.value));
5059 break;
5060
5061 case AARCH64_OPND_BARRIER_PSB:
5062 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
5063 break;
5064
5065 case AARCH64_OPND_X16:
5066 snprintf (buf, size, "%s", style_reg (styler, "x16"));
5067 break;
5068
5069 case AARCH64_OPND_SME_ZT0:
5070 snprintf (buf, size, "%s", style_reg (styler, "zt0"));
5071 break;
5072
5073 case AARCH64_OPND_SME_ZT0_INDEX:
5074 snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
5075 style_imm (styler, "%d", (int) opnd->imm.value));
5076 break;
5077 case AARCH64_OPND_SME_ZT0_INDEX_MUL_VL:
5078 snprintf (buf, size, "%s[%s, %s]", style_reg (styler, "zt0"),
5079 style_imm (styler, "%d", (int) opnd->imm.value),
5080 style_sub_mnem (styler, "mul vl"));
5081 break;
5082
5083 case AARCH64_OPND_SME_ZT0_LIST:
5084 snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
5085 break;
5086
5087 case AARCH64_OPND_BARRIER_GCSB:
5088 snprintf (buf, size, "%s", style_sub_mnem (styler, "dsync"));
5089 break;
5090
5091 case AARCH64_OPND_BTI_TARGET:
5092 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
5093 snprintf (buf, size, "%s",
5094 style_sub_mnem (styler, opnd->hint_option->name));
5095 break;
5096
5097 case AARCH64_OPND_STSHH_POLICY:
5098 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->hint_option->name));
5099 break;
5100
5101 case AARCH64_OPND_MOPS_ADDR_Rd:
5102 case AARCH64_OPND_MOPS_ADDR_Rs:
5103 snprintf (buf, size, "[%s]!",
5104 style_reg (styler,
5105 get_int_reg_name (opnd->reg.regno,
5106 AARCH64_OPND_QLF_X, 0)));
5107 break;
5108
5109 case AARCH64_OPND_MOPS_WB_Rn:
5110 snprintf (buf, size, "%s!",
5111 style_reg (styler, get_int_reg_name (opnd->reg.regno,
5112 AARCH64_OPND_QLF_X, 0)));
5113 break;
5114
5115 default:
5116 snprintf (buf, size, "<invalid>");
5117 break;
5118 }
5119 }
5120
5121 #define CPENC(op0,op1,crn,crm,op2) \
5123 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
5124 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
5125 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
5126 /* for 3.9.10 System Instructions */
5127 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
5128
5129 #define C0 0
5130 #define C1 1
5131 #define C2 2
5132 #define C3 3
5133 #define C4 4
5134 #define C5 5
5135 #define C6 6
5136 #define C7 7
5137 #define C8 8
5138 #define C9 9
5139 #define C10 10
5140 #define C11 11
5141 #define C12 12
5142 #define C13 13
5143 #define C14 14
5144 #define C15 15
5145
5146 /* TODO there is one more issues need to be resolved
5147 1. handle cpu-implementation-defined system registers.
5148
5149 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
5150 respectively. If neither of these are set then the register is read-write. */
5151 const aarch64_sys_reg aarch64_sys_regs [] =
5152 {
5153 #define SYSREG(name, encoding, flags, features) \
5154 { name, encoding, flags, features },
5155 #include "aarch64-sys-regs.def"
5156 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }
5157 #undef SYSREG
5158 };
5159
5160 bool
5161 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5162 {
5163 return (reg_flags & F_DEPRECATED) != 0;
5164 }
5165
5166 bool
5167 aarch64_sys_reg_128bit_p (const uint32_t reg_flags)
5168 {
5169 return (reg_flags & F_REG_128) != 0;
5170 }
5171
5172 bool
5173 aarch64_sys_reg_alias_p (const uint32_t reg_flags)
5174 {
5175 return (reg_flags & F_REG_ALIAS) != 0;
5176 }
5177
5178 /* The CPENC below is fairly misleading, the fields
5179 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5180 by ins_pstatefield, which just shifts the value by the width of the fields
5181 in a loop. So if you CPENC them only the first value will be set, the rest
5182 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5183 value of 0b110000000001000000 (0x30040) while what you want is
5184 0b011010 (0x1a). */
5185 const aarch64_sys_reg aarch64_pstatefields [] =
5186 {
5187 { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES },
5188 { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
5189 { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
5190 { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (PAN) },
5191 { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
5192 { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (SSBS) },
5193 { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_4A) },
5194 { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5195 { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
5196 | F_ARCHEXT, AARCH64_FEATURE (SME) },
5197 { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
5198 | F_ARCHEXT, AARCH64_FEATURE (SME) },
5199 { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
5200 | F_ARCHEXT, AARCH64_FEATURE (SME) },
5201 { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_8A) },
5202 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES },
5203 };
5204
5205 bool
5206 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5207 const aarch64_sys_reg *reg)
5208 {
5209 if (!(reg->flags & F_ARCHEXT))
5210 return true;
5211
5212 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5213 }
5214
5215 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5216 {
5217 { "ialluis", CPENS(0,C7,C1,0), 0, AARCH64_NO_FEATURES },
5218 { "iallu", CPENS(0,C7,C5,0), 0, AARCH64_NO_FEATURES },
5219 { "ivau", CPENS (3, C7, C5, 1), F_HASXT, AARCH64_NO_FEATURES },
5220 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5221 };
5222
5223 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5224 {
5225 { "zva", CPENS (3, C7, C4, 1), F_HASXT, AARCH64_NO_FEATURES },
5226 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5227 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5228 { "ivac", CPENS (0, C7, C6, 1), F_HASXT, AARCH64_NO_FEATURES },
5229 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5230 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5231 { "isw", CPENS (0, C7, C6, 2), F_HASXT, AARCH64_NO_FEATURES },
5232 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5233 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5234 { "cigdvaps", CPENS (0, C7, C15, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURES (2, MEMTAG, PoPS) },
5235 { "civaps", CPENS (0, C7, C15, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (PoPS) },
5236 { "cvac", CPENS (3, C7, C10, 1), F_HASXT, AARCH64_NO_FEATURES },
5237 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5238 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5239 { "cvaoc", CPENS (3, C7, C11, 0), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (OCCMO) },
5240 { "cgdvaoc", CPENS (3, C7, C11, 7), F_HASXT | F_ARCHEXT, AARCH64_FEATURES (2, OCCMO, MEMTAG) },
5241 { "csw", CPENS (0, C7, C10, 2), F_HASXT, AARCH64_NO_FEATURES },
5242 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5243 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5244 { "cvau", CPENS (3, C7, C11, 1), F_HASXT, AARCH64_NO_FEATURES },
5245 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
5246 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5247 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5248 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (CVADP) },
5249 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5250 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5251 { "civac", CPENS (3, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
5252 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5253 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5254 { "cisw", CPENS (0, C7, C14, 2), F_HASXT, AARCH64_NO_FEATURES },
5255 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5256 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
5257 { "civaoc", CPENS (3, C7, C15, 0), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (OCCMO) },
5258 { "cigdvaoc", CPENS (3, C7, C15, 7), F_HASXT | F_ARCHEXT, AARCH64_FEATURES (2, OCCMO, MEMTAG) },
5259 { "cipae", CPENS (4, C7, C14, 0), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_7A) },
5260 { "cigdpae", CPENS (4, C7, C14, 7), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_7A) },
5261 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
5262 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT, AARCH64_NO_FEATURES },
5263 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5264 };
5265
5266 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5267 {
5268 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
5269 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
5270 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT, AARCH64_NO_FEATURES },
5271 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT, AARCH64_NO_FEATURES },
5272 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT, AARCH64_NO_FEATURES },
5273 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT, AARCH64_NO_FEATURES },
5274 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT, AARCH64_NO_FEATURES },
5275 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT, AARCH64_NO_FEATURES },
5276 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
5277 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
5278 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
5279 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
5280 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
5281 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
5282 { "s1e1a", CPENS (0, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5283 { "s1e2a", CPENS (4, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5284 { "s1e3a", CPENS (6, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5285 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5286 };
5287
5288 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5289 {
5290 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT, AARCH64_NO_FEATURES },
5291 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT, AARCH64_NO_FEATURES },
5292 { "paallos", CPENS (6, C8, C1, 4), 0, AARCH64_NO_FEATURES },
5293 { "paall", CPENS (6, C8, C7, 4), 0, AARCH64_NO_FEATURES },
5294
5295 #define TLBI_XS_OP(OP, CODE, FLAGS) \
5296 { OP, CODE, FLAGS, AARCH64_NO_FEATURES }, \
5297 { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
5298
5299 TLBI_XS_OP ( "vmalle1", CPENS (0, C8, C7, 0), 0)
5300 TLBI_XS_OP ( "vae1", CPENS (0, C8, C7, 1), F_HASXT | F_REG_128)
5301 TLBI_XS_OP ( "aside1", CPENS (0, C8, C7, 2), F_HASXT )
5302 TLBI_XS_OP ( "vaae1", CPENS (0, C8, C7, 3), F_HASXT | F_REG_128)
5303 TLBI_XS_OP ( "vmalle1is", CPENS (0, C8, C3, 0), 0)
5304 TLBI_XS_OP ( "vae1is", CPENS (0, C8, C3, 1), F_HASXT | F_REG_128)
5305 TLBI_XS_OP ( "aside1is", CPENS (0, C8, C3, 2), F_HASXT )
5306 TLBI_XS_OP ( "vaae1is", CPENS (0, C8, C3, 3), F_HASXT | F_REG_128)
5307 TLBI_XS_OP ( "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT | F_REG_128)
5308 TLBI_XS_OP ( "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT | F_REG_128)
5309 TLBI_XS_OP ( "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT | F_REG_128)
5310 TLBI_XS_OP ( "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT | F_REG_128)
5311 TLBI_XS_OP ( "vae2", CPENS (4, C8, C7, 1), F_HASXT | F_REG_128)
5312 TLBI_XS_OP ( "vae2is", CPENS (4, C8, C3, 1), F_HASXT | F_REG_128)
5313 TLBI_XS_OP ( "vmalls12e1",CPENS (4, C8, C7, 6), 0)
5314 TLBI_XS_OP ( "vmalls12e1is",CPENS(4,C8, C3, 6), 0)
5315 TLBI_XS_OP ( "vae3", CPENS (6, C8, C7, 1), F_HASXT | F_REG_128)
5316 TLBI_XS_OP ( "vae3is", CPENS (6, C8, C3, 1), F_HASXT | F_REG_128)
5317 TLBI_XS_OP ( "alle2", CPENS (4, C8, C7, 0), 0)
5318 TLBI_XS_OP ( "alle2is", CPENS (4, C8, C3, 0), 0)
5319 TLBI_XS_OP ( "alle1", CPENS (4, C8, C7, 4), 0)
5320 TLBI_XS_OP ( "alle1is", CPENS (4, C8, C3, 4), 0)
5321 TLBI_XS_OP ( "alle3", CPENS (6, C8, C7, 0), 0)
5322 TLBI_XS_OP ( "alle3is", CPENS (6, C8, C3, 0), 0)
5323 TLBI_XS_OP ( "vale1is", CPENS (0, C8, C3, 5), F_HASXT | F_REG_128)
5324 TLBI_XS_OP ( "vale2is", CPENS (4, C8, C3, 5), F_HASXT | F_REG_128)
5325 TLBI_XS_OP ( "vale3is", CPENS (6, C8, C3, 5), F_HASXT | F_REG_128)
5326 TLBI_XS_OP ( "vaale1is", CPENS (0, C8, C3, 7), F_HASXT | F_REG_128)
5327 TLBI_XS_OP ( "vale1", CPENS (0, C8, C7, 5), F_HASXT | F_REG_128)
5328 TLBI_XS_OP ( "vale2", CPENS (4, C8, C7, 5), F_HASXT | F_REG_128)
5329 TLBI_XS_OP ( "vale3", CPENS (6, C8, C7, 5), F_HASXT | F_REG_128)
5330 TLBI_XS_OP ( "vaale1", CPENS (0, C8, C7, 7), F_HASXT | F_REG_128)
5331
5332 #undef TLBI_XS_OP
5333 #define TLBI_XS_OP(OP, CODE, FLAGS) \
5334 { OP, CODE, FLAGS | F_ARCHEXT, AARCH64_FEATURE (V8_4A) }, \
5335 { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
5336
5337 TLBI_XS_OP ( "vmalle1os", CPENS (0, C8, C1, 0), 0 )
5338 TLBI_XS_OP ( "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_REG_128 )
5339 TLBI_XS_OP ( "aside1os", CPENS (0, C8, C1, 2), F_HASXT )
5340 TLBI_XS_OP ( "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_REG_128 )
5341 TLBI_XS_OP ( "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_REG_128 )
5342 TLBI_XS_OP ( "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_REG_128 )
5343 TLBI_XS_OP ( "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_REG_128 )
5344 TLBI_XS_OP ( "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_REG_128 )
5345 TLBI_XS_OP ( "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_REG_128 )
5346 TLBI_XS_OP ( "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_REG_128 )
5347 TLBI_XS_OP ( "vmalls12e1os", CPENS (4, C8, C1, 6), 0 )
5348 TLBI_XS_OP ( "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_REG_128 )
5349 TLBI_XS_OP ( "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_REG_128 )
5350 TLBI_XS_OP ( "alle2os", CPENS (4, C8, C1, 0), 0 )
5351 TLBI_XS_OP ( "alle1os", CPENS (4, C8, C1, 4), 0 )
5352 TLBI_XS_OP ( "alle3os", CPENS (6, C8, C1, 0), 0 )
5353
5354 TLBI_XS_OP ( "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_REG_128 )
5355 TLBI_XS_OP ( "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_REG_128 )
5356 TLBI_XS_OP ( "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_REG_128 )
5357 TLBI_XS_OP ( "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_REG_128 )
5358 TLBI_XS_OP ( "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_REG_128 )
5359 TLBI_XS_OP ( "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_REG_128 )
5360 TLBI_XS_OP ( "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_REG_128 )
5361 TLBI_XS_OP ( "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_REG_128 )
5362 TLBI_XS_OP ( "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_REG_128 )
5363 TLBI_XS_OP ( "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_REG_128 )
5364 TLBI_XS_OP ( "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_REG_128 )
5365 TLBI_XS_OP ( "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_REG_128 )
5366 TLBI_XS_OP ( "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_REG_128 )
5367 TLBI_XS_OP ( "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_REG_128 )
5368 TLBI_XS_OP ( "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_REG_128 )
5369 TLBI_XS_OP ( "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_REG_128 )
5370 TLBI_XS_OP ( "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_REG_128 )
5371 TLBI_XS_OP ( "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_REG_128 )
5372 TLBI_XS_OP ( "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_REG_128 )
5373 TLBI_XS_OP ( "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_REG_128 )
5374 TLBI_XS_OP ( "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_REG_128 )
5375 TLBI_XS_OP ( "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_REG_128 )
5376 TLBI_XS_OP ( "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_REG_128 )
5377 TLBI_XS_OP ( "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_REG_128 )
5378 TLBI_XS_OP ( "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_REG_128 )
5379 TLBI_XS_OP ( "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_REG_128 )
5380 TLBI_XS_OP ( "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_REG_128 )
5381 TLBI_XS_OP ( "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_REG_128 )
5382 TLBI_XS_OP ( "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_REG_128 )
5383 TLBI_XS_OP ( "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_REG_128 )
5384
5385 #undef TLBI_XS_OP
5386
5387 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5388 };
5389
5390 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5391 {
5392 /* RCTX is somewhat unique in a way that it has different values
5393 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5394 Thus op2 is masked out and instead encoded directly in the
5395 aarch64_opcode_table entries for the respective instructions. */
5396 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE, AARCH64_FEATURE (PREDRES) }, /* WO */
5397 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5398 };
5399
5400 bool
5401 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5402 {
5403 return (sys_ins_reg->flags & F_HASXT) != 0;
5404 }
5405
5406 extern bool
5407 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5408 const char *reg_name,
5409 uint32_t reg_flags,
5410 const aarch64_feature_set *reg_features)
5411 {
5412 /* Armv8-R has no EL3. */
5413 if (AARCH64_CPU_HAS_FEATURE (features, V8R))
5414 {
5415 const char *suffix = strrchr (reg_name, '_');
5416 if (suffix && !strcmp (suffix, "_el3"))
5417 return false;
5418 }
5419
5420 if (!(reg_flags & F_ARCHEXT))
5421 return true;
5422
5423 return AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features);
5424 }
5425
5426 #undef C0
5427 #undef C1
5428 #undef C2
5429 #undef C3
5430 #undef C4
5431 #undef C5
5432 #undef C6
5433 #undef C7
5434 #undef C8
5435 #undef C9
5436 #undef C10
5437 #undef C11
5438 #undef C12
5439 #undef C13
5440 #undef C14
5441 #undef C15
5442
5443 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5444 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5445
5446 static enum err_type
5447 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5448 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5449 bool encoding ATTRIBUTE_UNUSED,
5450 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5451 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5452 {
5453 int t = BITS (insn, 4, 0);
5454 int n = BITS (insn, 9, 5);
5455 int t2 = BITS (insn, 14, 10);
5456
5457 if (BIT (insn, 23))
5458 {
5459 /* Write back enabled. */
5460 if ((t == n || t2 == n) && n != 31)
5461 return ERR_UND;
5462 }
5463
5464 if (BIT (insn, 22))
5465 {
5466 /* Load */
5467 if (t == t2)
5468 return ERR_UND;
5469 }
5470
5471 return ERR_OK;
5472 }
5473
5474 /* Verifier for vector by element 3 operands functions where the
5475 conditions `if sz:L == 11 then UNDEFINED` holds. */
5476
5477 static enum err_type
5478 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5479 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5480 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5481 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5482 {
5483 const aarch64_insn undef_pattern = 0x3;
5484 aarch64_insn value;
5485
5486 assert (inst->opcode);
5487 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5488 value = encoding ? inst->value : insn;
5489 assert (value);
5490
5491 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5492 return ERR_UND;
5493
5494 return ERR_OK;
5495 }
5496
5497 /* Check an instruction that takes three register operands and that
5498 requires the register numbers to be distinct from one another. */
5499
5500 static enum err_type
5501 verify_three_different_regs (const struct aarch64_inst *inst,
5502 const aarch64_insn insn ATTRIBUTE_UNUSED,
5503 bfd_vma pc ATTRIBUTE_UNUSED,
5504 bool encoding ATTRIBUTE_UNUSED,
5505 aarch64_operand_error *mismatch_detail
5506 ATTRIBUTE_UNUSED,
5507 aarch64_instr_sequence *insn_sequence
5508 ATTRIBUTE_UNUSED)
5509 {
5510 int rd, rs, rn;
5511
5512 rd = inst->operands[0].reg.regno;
5513 rs = inst->operands[1].reg.regno;
5514 rn = inst->operands[2].reg.regno;
5515 if (rd == rs || rd == rn || rs == rn)
5516 {
5517 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5518 mismatch_detail->error
5519 = _("the three register operands must be distinct from one another");
5520 mismatch_detail->index = -1;
5521 return ERR_UND;
5522 }
5523
5524 return ERR_OK;
5525 }
5526
5527 /* Add INST to the end of INSN_SEQUENCE. */
5528
5529 static void
5530 add_insn_to_sequence (const struct aarch64_inst *inst,
5531 aarch64_instr_sequence *insn_sequence)
5532 {
5533 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5534 }
5535
5536 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5537 If INST is NULL the given insn_sequence is cleared and the sequence is left
5538 uninitialized. */
5539
5540 void
5541 init_insn_sequence (const struct aarch64_inst *inst,
5542 aarch64_instr_sequence *insn_sequence)
5543 {
5544 int num_req_entries = 0;
5545
5546 if (insn_sequence->instr)
5547 {
5548 XDELETE (insn_sequence->instr);
5549 insn_sequence->instr = NULL;
5550 }
5551
5552 /* Handle all the cases here. May need to think of something smarter than
5553 a giant if/else chain if this grows. At that time, a lookup table may be
5554 best. */
5555 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5556 num_req_entries = 1;
5557 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5558 num_req_entries = 2;
5559
5560 insn_sequence->num_added_insns = 0;
5561 insn_sequence->num_allocated_insns = num_req_entries;
5562
5563 if (num_req_entries != 0)
5564 {
5565 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5566 add_insn_to_sequence (inst, insn_sequence);
5567 }
5568 }
5569
5570 /* Subroutine of verify_constraints. Check whether the instruction
5571 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5572 expectations are met. Return true if the check passes, otherwise
5573 describe the problem in MISMATCH_DETAIL.
5574
5575 IS_NEW_SECTION is true if INST is assumed to start a new section.
5576 The other arguments are as for verify_constraints. */
5577
5578 static bool
5579 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5580 bool is_new_section,
5581 aarch64_operand_error *mismatch_detail,
5582 aarch64_instr_sequence *insn_sequence)
5583 {
5584 const struct aarch64_opcode *opcode;
5585 const struct aarch64_inst *prev_insn;
5586 int i;
5587
5588 opcode = inst->opcode;
5589 if (insn_sequence->instr)
5590 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5591 else
5592 prev_insn = NULL;
5593
5594 if (prev_insn
5595 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5596 && prev_insn->opcode != opcode - 1)
5597 {
5598 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5599 mismatch_detail->error = NULL;
5600 mismatch_detail->index = -1;
5601 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5602 mismatch_detail->data[1].s = prev_insn->opcode->name;
5603 mismatch_detail->non_fatal = true;
5604 return false;
5605 }
5606
5607 if (opcode->constraints & C_SCAN_MOPS_PME)
5608 {
5609 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5610 {
5611 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5612 mismatch_detail->error = NULL;
5613 mismatch_detail->index = -1;
5614 mismatch_detail->data[0].s = opcode->name;
5615 mismatch_detail->data[1].s = opcode[-1].name;
5616 mismatch_detail->non_fatal = true;
5617 return false;
5618 }
5619
5620 for (i = 0; i < 3; ++i)
5621 /* There's no specific requirement for the data register to be
5622 the same between consecutive SET* instructions. */
5623 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5624 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5625 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5626 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5627 {
5628 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5629 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5630 mismatch_detail->error = _("destination register differs from "
5631 "preceding instruction");
5632 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5633 mismatch_detail->error = _("source register differs from "
5634 "preceding instruction");
5635 else
5636 mismatch_detail->error = _("size register differs from "
5637 "preceding instruction");
5638 mismatch_detail->index = i;
5639 mismatch_detail->non_fatal = true;
5640 return false;
5641 }
5642 }
5643
5644 return true;
5645 }
5646
5647 /* This function verifies that the instruction INST adheres to its specified
5648 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5649 returned and MISMATCH_DETAIL contains the reason why verification failed.
5650
5651 The function is called both during assembly and disassembly. If assembling
5652 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5653 and will contain the PC of the current instruction w.r.t to the section.
5654
5655 If ENCODING and PC=0 then you are at a start of a section. The constraints
5656 are verified against the given state insn_sequence which is updated as it
5657 transitions through the verification. */
5658
5659 enum err_type
5660 verify_constraints (const struct aarch64_inst *inst,
5661 const aarch64_insn insn ATTRIBUTE_UNUSED,
5662 bfd_vma pc,
5663 bool encoding,
5664 aarch64_operand_error *mismatch_detail,
5665 aarch64_instr_sequence *insn_sequence)
5666 {
5667 assert (inst);
5668 assert (inst->opcode);
5669
5670 const struct aarch64_opcode *opcode = inst->opcode;
5671 if (!opcode->constraints && !insn_sequence->instr)
5672 return ERR_OK;
5673
5674 assert (insn_sequence);
5675
5676 enum err_type res = ERR_OK;
5677
5678 /* This instruction puts a constraint on the insn_sequence. */
5679 if (opcode->flags & F_SCAN)
5680 {
5681 if (insn_sequence->instr)
5682 {
5683 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5684 mismatch_detail->error = _("instruction opens new dependency "
5685 "sequence without ending previous one");
5686 mismatch_detail->index = -1;
5687 mismatch_detail->non_fatal = true;
5688 res = ERR_VFI;
5689 }
5690
5691 init_insn_sequence (inst, insn_sequence);
5692 return res;
5693 }
5694
5695 bool is_new_section = (!encoding && pc == 0);
5696 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5697 insn_sequence))
5698 {
5699 res = ERR_VFI;
5700 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5701 init_insn_sequence (NULL, insn_sequence);
5702 }
5703
5704 /* Verify constraints on an existing sequence. */
5705 if (insn_sequence->instr)
5706 {
5707 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5708 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5709 closed a previous one that we should have. */
5710 if (is_new_section && res == ERR_OK)
5711 {
5712 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5713 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5714 mismatch_detail->index = -1;
5715 mismatch_detail->non_fatal = true;
5716 res = ERR_VFI;
5717 /* Reset the sequence. */
5718 init_insn_sequence (NULL, insn_sequence);
5719 return res;
5720 }
5721
5722 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5723 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5724 {
5725 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5726 instruction for better error messages. */
5727 bool sve_operand_p = false;
5728 for (int i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5729 {
5730 enum aarch64_operand_class op_class
5731 = aarch64_get_operand_class (opcode->operands[i]);
5732 if (op_class == AARCH64_OPND_CLASS_SVE_REG
5733 || op_class == AARCH64_OPND_CLASS_SVE_REGLIST
5734 || op_class == AARCH64_OPND_CLASS_PRED_REG)
5735 {
5736 sve_operand_p = true;
5737 break;
5738 }
5739 }
5740
5741 if (!sve_operand_p)
5742 {
5743 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5744 mismatch_detail->error = _("SVE instruction expected after "
5745 "`movprfx'");
5746 mismatch_detail->index = -1;
5747 mismatch_detail->non_fatal = true;
5748 res = ERR_VFI;
5749 goto done;
5750 }
5751
5752 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5753 instruction that is allowed to be used with a MOVPRFX. */
5754 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5755 {
5756 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5757 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5758 "expected");
5759 mismatch_detail->index = -1;
5760 mismatch_detail->non_fatal = true;
5761 res = ERR_VFI;
5762 goto done;
5763 }
5764
5765 /* Next check for usage of the predicate register. */
5766 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5767 aarch64_opnd_info blk_pred, inst_pred;
5768 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5769 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5770 bool predicated = false;
5771 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5772
5773 /* Determine if the movprfx instruction used is predicated or not. */
5774 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5775 {
5776 predicated = true;
5777 blk_pred = insn_sequence->instr->operands[1];
5778 }
5779
5780 unsigned char max_elem_size = 0;
5781 unsigned char current_elem_size;
5782 int num_op_used = 0, last_op_usage = 0;
5783 int i, inst_pred_idx = -1;
5784 int num_ops = aarch64_num_of_operands (opcode);
5785 for (i = 0; i < num_ops; i++)
5786 {
5787 aarch64_opnd_info inst_op = inst->operands[i];
5788 switch (inst_op.type)
5789 {
5790 case AARCH64_OPND_SVE_Zd:
5791 case AARCH64_OPND_SVE_Zm_5:
5792 case AARCH64_OPND_SVE_Zm_16:
5793 case AARCH64_OPND_SVE_Zn:
5794 case AARCH64_OPND_SVE_Zt:
5795 case AARCH64_OPND_SVE_Vm:
5796 case AARCH64_OPND_SVE_Vn:
5797 case AARCH64_OPND_Va:
5798 case AARCH64_OPND_Vn:
5799 case AARCH64_OPND_Vm:
5800 case AARCH64_OPND_Sn:
5801 case AARCH64_OPND_Sm:
5802 if (inst_op.reg.regno == blk_dest.reg.regno)
5803 {
5804 num_op_used++;
5805 last_op_usage = i;
5806 }
5807 current_elem_size
5808 = aarch64_get_qualifier_esize (inst_op.qualifier);
5809 if (current_elem_size > max_elem_size)
5810 max_elem_size = current_elem_size;
5811 break;
5812 case AARCH64_OPND_SVE_Pd:
5813 case AARCH64_OPND_SVE_Pg3:
5814 case AARCH64_OPND_SVE_Pg4_5:
5815 case AARCH64_OPND_SVE_Pg4_10:
5816 case AARCH64_OPND_SVE_Pg4_16:
5817 case AARCH64_OPND_SVE_Pm:
5818 case AARCH64_OPND_SVE_Pn:
5819 case AARCH64_OPND_SVE_Pt:
5820 case AARCH64_OPND_SME_Pm:
5821 inst_pred = inst_op;
5822 inst_pred_idx = i;
5823 break;
5824 default:
5825 break;
5826 }
5827 }
5828
5829 assert (max_elem_size != 0);
5830 aarch64_opnd_info inst_dest = inst->operands[0];
5831 /* Determine the size that should be used to compare against the
5832 movprfx size. */
5833 current_elem_size
5834 = opcode->constraints & C_MAX_ELEM
5835 ? max_elem_size
5836 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5837
5838 /* If movprfx is predicated do some extra checks. */
5839 if (predicated)
5840 {
5841 /* The instruction must be predicated. */
5842 if (inst_pred_idx < 0)
5843 {
5844 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5845 mismatch_detail->error = _("predicated instruction expected "
5846 "after `movprfx'");
5847 mismatch_detail->index = -1;
5848 mismatch_detail->non_fatal = true;
5849 res = ERR_VFI;
5850 goto done;
5851 }
5852
5853 /* The instruction must have a merging predicate. */
5854 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5855 {
5856 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5857 mismatch_detail->error = _("merging predicate expected due "
5858 "to preceding `movprfx'");
5859 mismatch_detail->index = inst_pred_idx;
5860 mismatch_detail->non_fatal = true;
5861 res = ERR_VFI;
5862 goto done;
5863 }
5864
5865 /* The same register must be used in instruction. */
5866 if (blk_pred.reg.regno != inst_pred.reg.regno)
5867 {
5868 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5869 mismatch_detail->error = _("predicate register differs "
5870 "from that in preceding "
5871 "`movprfx'");
5872 mismatch_detail->index = inst_pred_idx;
5873 mismatch_detail->non_fatal = true;
5874 res = ERR_VFI;
5875 goto done;
5876 }
5877 }
5878
5879 /* Destructive operations by definition must allow one usage of the
5880 same register. */
5881 int allowed_usage
5882 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5883
5884 /* Operand is not used at all. */
5885 if (num_op_used == 0)
5886 {
5887 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5888 mismatch_detail->error = _("output register of preceding "
5889 "`movprfx' not used in current "
5890 "instruction");
5891 mismatch_detail->index = 0;
5892 mismatch_detail->non_fatal = true;
5893 res = ERR_VFI;
5894 goto done;
5895 }
5896
5897 /* We now know it's used, now determine exactly where it's used. */
5898 if (blk_dest.reg.regno != inst_dest.reg.regno)
5899 {
5900 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5901 mismatch_detail->error = _("output register of preceding "
5902 "`movprfx' expected as output");
5903 mismatch_detail->index = 0;
5904 mismatch_detail->non_fatal = true;
5905 res = ERR_VFI;
5906 goto done;
5907 }
5908
5909 /* Operand used more than allowed for the specific opcode type. */
5910 if (num_op_used > allowed_usage)
5911 {
5912 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5913 mismatch_detail->error = _("output register of preceding "
5914 "`movprfx' used as input");
5915 mismatch_detail->index = last_op_usage;
5916 mismatch_detail->non_fatal = true;
5917 res = ERR_VFI;
5918 goto done;
5919 }
5920
5921 /* Now the only thing left is the qualifiers checks. The register
5922 must have the same maximum element size. */
5923 if (inst_dest.qualifier
5924 && blk_dest.qualifier
5925 && current_elem_size
5926 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5927 {
5928 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5929 mismatch_detail->error = _("register size not compatible with "
5930 "previous `movprfx'");
5931 mismatch_detail->index = 0;
5932 mismatch_detail->non_fatal = true;
5933 res = ERR_VFI;
5934 goto done;
5935 }
5936 }
5937
5938 done:
5939 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
5940 /* We've checked the last instruction in the sequence and so
5941 don't need the sequence any more. */
5942 init_insn_sequence (NULL, insn_sequence);
5943 else
5944 add_insn_to_sequence (inst, insn_sequence);
5945 }
5946
5947 return res;
5948 }
5949
5950
5951 /* Return true if VALUE cannot be moved into an SVE register using DUP
5952 (with any element size, not just ESIZE) and if using DUPM would
5953 therefore be OK. ESIZE is the number of bytes in the immediate. */
5954
5955 bool
5956 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5957 {
5958 int64_t svalue = uvalue;
5959 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5960
5961 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5962 return false;
5963 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5964 {
5965 svalue = (int32_t) uvalue;
5966 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5967 {
5968 svalue = (int16_t) uvalue;
5969 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5970 return false;
5971 }
5972 }
5973 if ((svalue & 0xff) == 0)
5974 svalue /= 256;
5975 return svalue < -128 || svalue >= 128;
5976 }
5977
5978 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
5979 supports the instruction described by INST. */
5980
5981 bool
5982 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant,
5983 aarch64_inst *inst)
5984 {
5985 if (!inst->opcode->avariant
5986 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
5987 return false;
5988
5989 if (inst->opcode->iclass == sme_fp_sd
5990 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5991 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64))
5992 return false;
5993
5994 if (inst->opcode->iclass == sme_int_sd
5995 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5996 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64))
5997 return false;
5998
5999 return true;
6000 }
6001
6002 /* Include the opcode description table as well as the operand description
6003 table. */
6004 #define VERIFIER(x) verify_##x
6005 #include "aarch64-tbl.h"
6006