aarch64-opc.c revision 1.9 1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* The enumeration strings associated with each value of a 6-bit RPRFM
103 operation. */
104 const char *const aarch64_rprfmop_array[64] = {
105 "pldkeep",
106 "pstkeep",
107 0,
108 0,
109 "pldstrm",
110 "pststrm"
111 };
112
113 /* Vector length multiples for a predicate-as-counter operand. Used in things
114 like AARCH64_OPND_SME_VLxN_10. */
115 const char *const aarch64_sme_vlxn_array[2] = {
116 "vlx2",
117 "vlx4"
118 };
119
120 /* Helper functions to determine which operand to be used to encode/decode
121 the size:Q fields for AdvSIMD instructions. */
122
123 static inline bool
124 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
125 {
126 return (qualifier >= AARCH64_OPND_QLF_V_8B
127 && qualifier <= AARCH64_OPND_QLF_V_1Q);
128 }
129
130 static inline bool
131 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
132 {
133 return (qualifier >= AARCH64_OPND_QLF_S_B
134 && qualifier <= AARCH64_OPND_QLF_S_Q);
135 }
136
137 enum data_pattern
138 {
139 DP_UNKNOWN,
140 DP_VECTOR_3SAME,
141 DP_VECTOR_LONG,
142 DP_VECTOR_WIDE,
143 DP_VECTOR_ACROSS_LANES,
144 };
145
146 static const char significant_operand_index [] =
147 {
148 0, /* DP_UNKNOWN, by default using operand 0. */
149 0, /* DP_VECTOR_3SAME */
150 1, /* DP_VECTOR_LONG */
151 2, /* DP_VECTOR_WIDE */
152 1, /* DP_VECTOR_ACROSS_LANES */
153 };
154
155 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
156 the data pattern.
157 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
158 corresponds to one of a sequence of operands. */
159
160 static enum data_pattern
161 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
162 {
163 if (vector_qualifier_p (qualifiers[0]))
164 {
165 /* e.g. v.4s, v.4s, v.4s
166 or v.4h, v.4h, v.h[3]. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2])
169 && (aarch64_get_qualifier_esize (qualifiers[0])
170 == aarch64_get_qualifier_esize (qualifiers[1]))
171 && (aarch64_get_qualifier_esize (qualifiers[0])
172 == aarch64_get_qualifier_esize (qualifiers[2])))
173 return DP_VECTOR_3SAME;
174 /* e.g. v.8h, v.8b, v.8b.
175 or v.4s, v.4h, v.h[2].
176 or v.8h, v.16b. */
177 if (vector_qualifier_p (qualifiers[1])
178 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
179 && (aarch64_get_qualifier_esize (qualifiers[0])
180 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
181 return DP_VECTOR_LONG;
182 /* e.g. v.8h, v.8h, v.8b. */
183 if (qualifiers[0] == qualifiers[1]
184 && vector_qualifier_p (qualifiers[2])
185 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
186 && (aarch64_get_qualifier_esize (qualifiers[0])
187 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
188 && (aarch64_get_qualifier_esize (qualifiers[0])
189 == aarch64_get_qualifier_esize (qualifiers[1])))
190 return DP_VECTOR_WIDE;
191 }
192 else if (fp_qualifier_p (qualifiers[0]))
193 {
194 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
195 if (vector_qualifier_p (qualifiers[1])
196 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
197 return DP_VECTOR_ACROSS_LANES;
198 }
199
200 return DP_UNKNOWN;
201 }
202
203 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
204 the AdvSIMD instructions. */
205 /* N.B. it is possible to do some optimization that doesn't call
206 get_data_pattern each time when we need to select an operand. We can
207 either buffer the caculated the result or statically generate the data,
208 however, it is not obvious that the optimization will bring significant
209 benefit. */
210
211 int
212 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
213 {
214 return
215 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
216 }
217
218 /* Instruction bit-fields.
220 + Keep synced with 'enum aarch64_field_kind'. */
221 const aarch64_field fields[] =
222 {
223 { 0, 0 }, /* NIL. */
224 { 8, 4 }, /* CRm: in the system instructions. */
225 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
226 { 12, 4 }, /* CRn: in the system instructions. */
227 { 10, 8 }, /* CSSC_imm8. */
228 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
229 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
230 { 0, 5 }, /* LSE128_Rt: Shared input+output operand register. */
231 { 16, 5 }, /* LSE128_Rt2: Shared input+output operand register 2. */
232 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
233 { 22, 1 }, /* N: in logical (immediate) instructions. */
234 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
235 { 10, 5 }, /* Ra: in fp instructions. */
236 { 0, 5 }, /* Rd: in many integer instructions. */
237 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
238 { 5, 5 }, /* Rn: in many integer instructions. */
239 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
240 { 0, 5 }, /* Rt: in load/store instructions. */
241 { 10, 5 }, /* Rt2: in load/store pair instructions. */
242 { 12, 1 }, /* S: in load/store reg offset instructions. */
243 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
244 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
245 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
246 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
247 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
248 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
249 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
250 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
251 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
252 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
253 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
254 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
255 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
256 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
257 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
258 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
259 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
260 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
261 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
262 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
263 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
264 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
265 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
266 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
267 { 12, 2 }, /* SME_size_12: bits [13:12]. */
268 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
269 { 23, 1 }, /* SME_sz_23: bit [23]. */
270 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
271 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
272 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
273 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
274 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
275 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
276 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
277 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
278 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
279 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
280 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
281 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
282 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
283 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
284 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
285 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
286 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
287 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
288 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
290 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
292 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
293 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
294 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
295 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
296 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
297 { 5, 1 }, /* SVE_i1: single-bit immediate. */
298 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
299 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
300 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
301 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
302 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
303 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
304 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
305 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
306 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
307 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
308 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
309 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
310 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
311 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
312 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
313 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
314 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
315 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
316 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
317 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
318 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
319 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
320 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
321 { 16, 4 }, /* SVE_tsz: triangular size select. */
322 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
323 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
324 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
325 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
326 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
327 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
328 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
329 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
330 { 19, 5 }, /* b40: in the test bit and branch instructions. */
331 { 31, 1 }, /* b5: in the test bit and branch instructions. */
332 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
333 { 12, 4 }, /* cond: condition flags as a source operand. */
334 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
335 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
336 { 21, 2 }, /* hw: in move wide constant instructions. */
337 { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
338 { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
339 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
340 { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
341 { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
342 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
343 { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
344 { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
345 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
346 { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
347 { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
348 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
349 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
350 { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
351 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
352 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
353 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
354 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
355 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
356 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
357 { 0, 4 }, /* imm4_0: in rmif instructions. */
358 { 5, 4 }, /* imm4_5: in SME instructions. */
359 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
360 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
361 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
362 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
363 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
364 { 15, 6 }, /* imm6_15: in rmif instructions. */
365 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
366 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
367 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
368 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
369 { 5, 14 }, /* imm14: in test bit and branch instructions. */
370 { 0, 16 }, /* imm16_0: in udf instruction. */
371 { 5, 16 }, /* imm16_5: in exception instructions. */
372 { 5, 19 }, /* imm19: e.g. in CBZ. */
373 { 0, 26 }, /* imm26: in unconditional branch instructions. */
374 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
375 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
376 { 5, 19 }, /* immhi: e.g. in ADRP. */
377 { 29, 2 }, /* immlo: e.g. in ADRP. */
378 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
379 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
380 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
381 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
382 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
383 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
384 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
385 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
386 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
387 { 19, 2 }, /* op0: in the system instructions. */
388 { 16, 3 }, /* op1: in the system instructions. */
389 { 5, 3 }, /* op2: in the system instructions. */
390 { 22, 2 }, /* opc: in load/store reg offset instructions. */
391 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
392 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
393 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
394 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
395 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
396 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
397 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
398 { 31, 1 }, /* sf: in integer data processing instructions. */
399 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
400 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
401 { 22, 1 }, /* sz: 1-bit element size select. */
402 { 22, 2 }, /* type: floating point type field in fp data inst. */
403 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
404 { 5, 3 }, /* off3: immediate offset used to calculate slice number in a
405 ZA tile. */
406 { 5, 2 }, /* off2: immediate offset used to calculate slice number in
407 a ZA tile. */
408 { 7, 1 }, /* ZAn_1: name of the 1bit encoded ZA tile. */
409 { 5, 1 }, /* ol: immediate offset used to calculate slice number in a ZA
410 tile. */
411 { 6, 2 }, /* ZAn_2: name of the 2bit encoded ZA tile. */
412 { 5, 3 }, /* ZAn_3: name of the 3bit encoded ZA tile. */
413 { 6, 1 }, /* ZAn: name of the bit encoded ZA tile. */
414 { 12, 4 }, /* opc2: in rcpc3 ld/st inst deciding the pre/post-index. */
415 { 30, 2 }, /* rcpc3_size: in rcpc3 ld/st, field controls Rt/Rt2 width. */
416 };
417
418 enum aarch64_operand_class
419 aarch64_get_operand_class (enum aarch64_opnd type)
420 {
421 return aarch64_operands[type].op_class;
422 }
423
424 const char *
425 aarch64_get_operand_name (enum aarch64_opnd type)
426 {
427 return aarch64_operands[type].name;
428 }
429
430 /* Get operand description string.
431 This is usually for the diagnosis purpose. */
432 const char *
433 aarch64_get_operand_desc (enum aarch64_opnd type)
434 {
435 return aarch64_operands[type].desc;
436 }
437
438 /* Table of all conditional affixes. */
439 const aarch64_cond aarch64_conds[16] =
440 {
441 {{"eq", "none"}, 0x0},
442 {{"ne", "any"}, 0x1},
443 {{"cs", "hs", "nlast"}, 0x2},
444 {{"cc", "lo", "ul", "last"}, 0x3},
445 {{"mi", "first"}, 0x4},
446 {{"pl", "nfrst"}, 0x5},
447 {{"vs"}, 0x6},
448 {{"vc"}, 0x7},
449 {{"hi", "pmore"}, 0x8},
450 {{"ls", "plast"}, 0x9},
451 {{"ge", "tcont"}, 0xa},
452 {{"lt", "tstop"}, 0xb},
453 {{"gt"}, 0xc},
454 {{"le"}, 0xd},
455 {{"al"}, 0xe},
456 {{"nv"}, 0xf},
457 };
458
459 const aarch64_cond *
460 get_cond_from_value (aarch64_insn value)
461 {
462 assert (value < 16);
463 return &aarch64_conds[(unsigned int) value];
464 }
465
466 const aarch64_cond *
467 get_inverted_cond (const aarch64_cond *cond)
468 {
469 return &aarch64_conds[cond->value ^ 0x1];
470 }
471
472 /* Table describing the operand extension/shifting operators; indexed by
473 enum aarch64_modifier_kind.
474
475 The value column provides the most common values for encoding modifiers,
476 which enables table-driven encoding/decoding for the modifiers. */
477 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
478 {
479 {"none", 0x0},
480 {"msl", 0x0},
481 {"ror", 0x3},
482 {"asr", 0x2},
483 {"lsr", 0x1},
484 {"lsl", 0x0},
485 {"uxtb", 0x0},
486 {"uxth", 0x1},
487 {"uxtw", 0x2},
488 {"uxtx", 0x3},
489 {"sxtb", 0x4},
490 {"sxth", 0x5},
491 {"sxtw", 0x6},
492 {"sxtx", 0x7},
493 {"mul", 0x0},
494 {"mul vl", 0x0},
495 {NULL, 0},
496 };
497
498 enum aarch64_modifier_kind
499 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
500 {
501 return desc - aarch64_operand_modifiers;
502 }
503
504 aarch64_insn
505 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
506 {
507 return aarch64_operand_modifiers[kind].value;
508 }
509
510 enum aarch64_modifier_kind
511 aarch64_get_operand_modifier_from_value (aarch64_insn value,
512 bool extend_p)
513 {
514 if (extend_p)
515 return AARCH64_MOD_UXTB + value;
516 else
517 return AARCH64_MOD_LSL - value;
518 }
519
520 bool
521 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
522 {
523 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
524 }
525
526 static inline bool
527 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
528 {
529 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
530 }
531
532 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
533 {
534 { "#0x00", 0x0 },
535 { "oshld", 0x1 },
536 { "oshst", 0x2 },
537 { "osh", 0x3 },
538 { "#0x04", 0x4 },
539 { "nshld", 0x5 },
540 { "nshst", 0x6 },
541 { "nsh", 0x7 },
542 { "#0x08", 0x8 },
543 { "ishld", 0x9 },
544 { "ishst", 0xa },
545 { "ish", 0xb },
546 { "#0x0c", 0xc },
547 { "ld", 0xd },
548 { "st", 0xe },
549 { "sy", 0xf },
550 };
551
552 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
553 { /* CRm<3:2> #imm */
554 { "oshnxs", 16 }, /* 00 16 */
555 { "nshnxs", 20 }, /* 01 20 */
556 { "ishnxs", 24 }, /* 10 24 */
557 { "synxs", 28 }, /* 11 28 */
558 };
559
560 /* Table describing the operands supported by the aliases of the HINT
561 instruction.
562
563 The name column is the operand that is accepted for the alias. The value
564 column is the hint number of the alias. The list of operands is terminated
565 by NULL in the name column. */
566
567 const struct aarch64_name_value_pair aarch64_hint_options[] =
568 {
569 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
570 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
571 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
572 { "dsync", HINT_OPD_DSYNC }, /* GCSB DSYNC. */
573 { "c", HINT_OPD_C }, /* BTI C. */
574 { "j", HINT_OPD_J }, /* BTI J. */
575 { "jc", HINT_OPD_JC }, /* BTI JC. */
576 { NULL, HINT_OPD_NULL },
577 };
578
579 /* op -> op: load = 0 instruction = 1 store = 2
580 l -> level: 1-3
581 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
582 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
583 const struct aarch64_name_value_pair aarch64_prfops[32] =
584 {
585 { "pldl1keep", B(0, 1, 0) },
586 { "pldl1strm", B(0, 1, 1) },
587 { "pldl2keep", B(0, 2, 0) },
588 { "pldl2strm", B(0, 2, 1) },
589 { "pldl3keep", B(0, 3, 0) },
590 { "pldl3strm", B(0, 3, 1) },
591 { "pldslckeep", B(0, 4, 0) },
592 { "pldslcstrm", B(0, 4, 1) },
593 { "plil1keep", B(1, 1, 0) },
594 { "plil1strm", B(1, 1, 1) },
595 { "plil2keep", B(1, 2, 0) },
596 { "plil2strm", B(1, 2, 1) },
597 { "plil3keep", B(1, 3, 0) },
598 { "plil3strm", B(1, 3, 1) },
599 { "plislckeep", B(1, 4, 0) },
600 { "plislcstrm", B(1, 4, 1) },
601 { "pstl1keep", B(2, 1, 0) },
602 { "pstl1strm", B(2, 1, 1) },
603 { "pstl2keep", B(2, 2, 0) },
604 { "pstl2strm", B(2, 2, 1) },
605 { "pstl3keep", B(2, 3, 0) },
606 { "pstl3strm", B(2, 3, 1) },
607 { "pstslckeep", B(2, 4, 0) },
608 { "pstslcstrm", B(2, 4, 1) },
609 { NULL, 0x18 },
610 { NULL, 0x19 },
611 { NULL, 0x1a },
612 { NULL, 0x1b },
613 { NULL, 0x1c },
614 { NULL, 0x1d },
615 { NULL, 0x1e },
616 { NULL, 0x1f },
617 };
618 #undef B
619
620 /* Utilities on value constraint. */
622
623 static inline int
624 value_in_range_p (int64_t value, int low, int high)
625 {
626 return (value >= low && value <= high) ? 1 : 0;
627 }
628
629 /* Return true if VALUE is a multiple of ALIGN. */
630 static inline int
631 value_aligned_p (int64_t value, int align)
632 {
633 return (value % align) == 0;
634 }
635
636 /* A signed value fits in a field. */
637 static inline int
638 value_fit_signed_field_p (int64_t value, unsigned width)
639 {
640 assert (width < 32);
641 if (width < sizeof (value) * 8)
642 {
643 int64_t lim = (uint64_t) 1 << (width - 1);
644 if (value >= -lim && value < lim)
645 return 1;
646 }
647 return 0;
648 }
649
650 /* An unsigned value fits in a field. */
651 static inline int
652 value_fit_unsigned_field_p (int64_t value, unsigned width)
653 {
654 assert (width < 32);
655 if (width < sizeof (value) * 8)
656 {
657 int64_t lim = (uint64_t) 1 << width;
658 if (value >= 0 && value < lim)
659 return 1;
660 }
661 return 0;
662 }
663
664 /* Return 1 if OPERAND is SP or WSP. */
665 int
666 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
667 {
668 return ((aarch64_get_operand_class (operand->type)
669 == AARCH64_OPND_CLASS_INT_REG)
670 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
671 && operand->reg.regno == 31);
672 }
673
674 /* Return 1 if OPERAND is XZR or WZP. */
675 int
676 aarch64_zero_register_p (const aarch64_opnd_info *operand)
677 {
678 return ((aarch64_get_operand_class (operand->type)
679 == AARCH64_OPND_CLASS_INT_REG)
680 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
681 && operand->reg.regno == 31);
682 }
683
684 /* Return true if the operand *OPERAND that has the operand code
685 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
686 qualified by the qualifier TARGET. */
687
688 static inline int
689 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
690 aarch64_opnd_qualifier_t target)
691 {
692 switch (operand->qualifier)
693 {
694 case AARCH64_OPND_QLF_W:
695 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
696 return 1;
697 break;
698 case AARCH64_OPND_QLF_X:
699 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
700 return 1;
701 break;
702 case AARCH64_OPND_QLF_WSP:
703 if (target == AARCH64_OPND_QLF_W
704 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
705 return 1;
706 break;
707 case AARCH64_OPND_QLF_SP:
708 if (target == AARCH64_OPND_QLF_X
709 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
710 return 1;
711 break;
712 default:
713 break;
714 }
715
716 return 0;
717 }
718
719 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
720 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
721
722 Return NIL if more than one expected qualifiers are found. */
723
724 aarch64_opnd_qualifier_t
725 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
726 int idx,
727 const aarch64_opnd_qualifier_t known_qlf,
728 int known_idx)
729 {
730 int i, saved_i;
731
732 /* Special case.
733
734 When the known qualifier is NIL, we have to assume that there is only
735 one qualifier sequence in the *QSEQ_LIST and return the corresponding
736 qualifier directly. One scenario is that for instruction
737 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
738 which has only one possible valid qualifier sequence
739 NIL, S_D
740 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
741 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
742
743 Because the qualifier NIL has dual roles in the qualifier sequence:
744 it can mean no qualifier for the operand, or the qualifer sequence is
745 not in use (when all qualifiers in the sequence are NILs), we have to
746 handle this special case here. */
747 if (known_qlf == AARCH64_OPND_NIL)
748 {
749 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
750 return qseq_list[0][idx];
751 }
752
753 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
754 {
755 if (qseq_list[i][known_idx] == known_qlf)
756 {
757 if (saved_i != -1)
758 /* More than one sequences are found to have KNOWN_QLF at
759 KNOWN_IDX. */
760 return AARCH64_OPND_NIL;
761 saved_i = i;
762 }
763 }
764
765 return qseq_list[saved_i][idx];
766 }
767
768 enum operand_qualifier_kind
769 {
770 OQK_NIL,
771 OQK_OPD_VARIANT,
772 OQK_VALUE_IN_RANGE,
773 OQK_MISC,
774 };
775
776 /* Operand qualifier description. */
777 struct operand_qualifier_data
778 {
779 /* The usage of the three data fields depends on the qualifier kind. */
780 int data0;
781 int data1;
782 int data2;
783 /* Description. */
784 const char *desc;
785 /* Kind. */
786 enum operand_qualifier_kind kind;
787 };
788
789 /* Indexed by the operand qualifier enumerators. */
790 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
791 {
792 {0, 0, 0, "NIL", OQK_NIL},
793
794 /* Operand variant qualifiers.
795 First 3 fields:
796 element size, number of elements and common value for encoding. */
797
798 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
799 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
800 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
801 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
802
803 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
804 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
805 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
806 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
807 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
808 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
809 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
810
811 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
812 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
813 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
814 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
815 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
816 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
817 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
818 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
819 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
820 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
821 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
822
823 {0, 0, 0, "z", OQK_OPD_VARIANT},
824 {0, 0, 0, "m", OQK_OPD_VARIANT},
825
826 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
827 {16, 0, 0, "tag", OQK_OPD_VARIANT},
828
829 /* Qualifiers constraining the value range.
830 First 3 fields:
831 Lower bound, higher bound, unused. */
832
833 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
834 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
835 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
836 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
837 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
838 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
839 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
840
841 /* Qualifiers for miscellaneous purpose.
842 First 3 fields:
843 unused, unused and unused. */
844
845 {0, 0, 0, "lsl", 0},
846 {0, 0, 0, "msl", 0},
847
848 {0, 0, 0, "retrieving", 0},
849 };
850
851 static inline bool
852 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
853 {
854 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
855 }
856
857 static inline bool
858 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
859 {
860 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
861 }
862
863 const char*
864 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
865 {
866 return aarch64_opnd_qualifiers[qualifier].desc;
867 }
868
869 /* Given an operand qualifier, return the expected data element size
870 of a qualified operand. */
871 unsigned char
872 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
873 {
874 assert (operand_variant_qualifier_p (qualifier));
875 return aarch64_opnd_qualifiers[qualifier].data0;
876 }
877
878 unsigned char
879 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
880 {
881 assert (operand_variant_qualifier_p (qualifier));
882 return aarch64_opnd_qualifiers[qualifier].data1;
883 }
884
885 aarch64_insn
886 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
887 {
888 assert (operand_variant_qualifier_p (qualifier));
889 return aarch64_opnd_qualifiers[qualifier].data2;
890 }
891
892 static int
893 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
894 {
895 assert (qualifier_value_in_range_constraint_p (qualifier));
896 return aarch64_opnd_qualifiers[qualifier].data0;
897 }
898
899 static int
900 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
901 {
902 assert (qualifier_value_in_range_constraint_p (qualifier));
903 return aarch64_opnd_qualifiers[qualifier].data1;
904 }
905
906 #ifdef DEBUG_AARCH64
907 void
908 aarch64_verbose (const char *str, ...)
909 {
910 va_list ap;
911 va_start (ap, str);
912 printf ("#### ");
913 vprintf (str, ap);
914 printf ("\n");
915 va_end (ap);
916 }
917
918 static inline void
919 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
920 {
921 int i;
922 printf ("#### \t");
923 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
924 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
925 printf ("\n");
926 }
927
928 static void
929 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
930 const aarch64_opnd_qualifier_t *qualifier)
931 {
932 int i;
933 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
934
935 aarch64_verbose ("dump_match_qualifiers:");
936 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
937 curr[i] = opnd[i].qualifier;
938 dump_qualifier_sequence (curr);
939 aarch64_verbose ("against");
940 dump_qualifier_sequence (qualifier);
941 }
942 #endif /* DEBUG_AARCH64 */
943
944 /* This function checks if the given instruction INSN is a destructive
945 instruction based on the usage of the registers. It does not recognize
946 unary destructive instructions. */
947 bool
948 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
949 {
950 int i = 0;
951 const enum aarch64_opnd *opnds = opcode->operands;
952
953 if (opnds[0] == AARCH64_OPND_NIL)
954 return false;
955
956 while (opnds[++i] != AARCH64_OPND_NIL)
957 if (opnds[i] == opnds[0])
958 return true;
959
960 return false;
961 }
962
963 /* TODO improve this, we can have an extra field at the runtime to
964 store the number of operands rather than calculating it every time. */
965
966 int
967 aarch64_num_of_operands (const aarch64_opcode *opcode)
968 {
969 int i = 0;
970 const enum aarch64_opnd *opnds = opcode->operands;
971 while (opnds[i++] != AARCH64_OPND_NIL)
972 ;
973 --i;
974 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
975 return i;
976 }
977
978 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
979 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
980
981 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
982 This is always 0 if the function succeeds.
983
984 N.B. on the entry, it is very likely that only some operands in *INST
985 have had their qualifiers been established.
986
987 If STOP_AT is not -1, the function will only try to match
988 the qualifier sequence for operands before and including the operand
989 of index STOP_AT; and on success *RET will only be filled with the first
990 (STOP_AT+1) qualifiers.
991
992 A couple examples of the matching algorithm:
993
994 X,W,NIL should match
995 X,W,NIL
996
997 NIL,NIL should match
998 X ,NIL
999
1000 Apart from serving the main encoding routine, this can also be called
1001 during or after the operand decoding. */
1002
1003 int
1004 aarch64_find_best_match (const aarch64_inst *inst,
1005 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
1006 int stop_at, aarch64_opnd_qualifier_t *ret,
1007 int *invalid_count)
1008 {
1009 int i, num_opnds, invalid, min_invalid;
1010 const aarch64_opnd_qualifier_t *qualifiers;
1011
1012 num_opnds = aarch64_num_of_operands (inst->opcode);
1013 if (num_opnds == 0)
1014 {
1015 DEBUG_TRACE ("SUCCEED: no operand");
1016 *invalid_count = 0;
1017 return 1;
1018 }
1019
1020 if (stop_at < 0 || stop_at >= num_opnds)
1021 stop_at = num_opnds - 1;
1022
1023 /* For each pattern. */
1024 min_invalid = num_opnds;
1025 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
1026 {
1027 int j;
1028 qualifiers = *qualifiers_list;
1029
1030 /* Start as positive. */
1031 invalid = 0;
1032
1033 DEBUG_TRACE ("%d", i);
1034 #ifdef DEBUG_AARCH64
1035 if (debug_dump)
1036 dump_match_qualifiers (inst->operands, qualifiers);
1037 #endif
1038
1039 /* The first entry should be taken literally, even if it's an empty
1040 qualifier sequence. (This matters for strict testing.) In other
1041 positions an empty sequence acts as a terminator. */
1042 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
1043 break;
1044
1045 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
1046 {
1047 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
1048 && !(inst->opcode->flags & F_STRICT))
1049 {
1050 /* Either the operand does not have qualifier, or the qualifier
1051 for the operand needs to be deduced from the qualifier
1052 sequence.
1053 In the latter case, any constraint checking related with
1054 the obtained qualifier should be done later in
1055 operand_general_constraint_met_p. */
1056 continue;
1057 }
1058 else if (*qualifiers != inst->operands[j].qualifier)
1059 {
1060 /* Unless the target qualifier can also qualify the operand
1061 (which has already had a non-nil qualifier), non-equal
1062 qualifiers are generally un-matched. */
1063 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
1064 continue;
1065 else
1066 invalid += 1;
1067 }
1068 else
1069 continue; /* Equal qualifiers are certainly matched. */
1070 }
1071
1072 if (min_invalid > invalid)
1073 min_invalid = invalid;
1074
1075 /* Qualifiers established. */
1076 if (min_invalid == 0)
1077 break;
1078 }
1079
1080 *invalid_count = min_invalid;
1081 if (min_invalid == 0)
1082 {
1083 /* Fill the result in *RET. */
1084 int j;
1085 qualifiers = *qualifiers_list;
1086
1087 DEBUG_TRACE ("complete qualifiers using list %d", i);
1088 #ifdef DEBUG_AARCH64
1089 if (debug_dump)
1090 dump_qualifier_sequence (qualifiers);
1091 #endif
1092
1093 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1094 ret[j] = *qualifiers;
1095 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1096 ret[j] = AARCH64_OPND_QLF_NIL;
1097
1098 DEBUG_TRACE ("SUCCESS");
1099 return 1;
1100 }
1101
1102 DEBUG_TRACE ("FAIL");
1103 return 0;
1104 }
1105
1106 /* Operand qualifier matching and resolving.
1107
1108 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1109 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1110
1111 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1112 This is always 0 if the function succeeds.
1113
1114 if UPDATE_P, update the qualifier(s) in *INST after the matching
1115 succeeds. */
1116
1117 static int
1118 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1119 int *invalid_count)
1120 {
1121 int i;
1122 aarch64_opnd_qualifier_seq_t qualifiers;
1123
1124 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1125 qualifiers, invalid_count))
1126 {
1127 DEBUG_TRACE ("matching FAIL");
1128 return 0;
1129 }
1130
1131 /* Update the qualifiers. */
1132 if (update_p)
1133 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1134 {
1135 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1136 break;
1137 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1138 "update %s with %s for operand %d",
1139 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1140 aarch64_get_qualifier_name (qualifiers[i]), i);
1141 inst->operands[i].qualifier = qualifiers[i];
1142 }
1143
1144 DEBUG_TRACE ("matching SUCCESS");
1145 return 1;
1146 }
1147
1148 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1149 register by MOVZ.
1150
1151 IS32 indicates whether value is a 32-bit immediate or not.
1152 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1153 amount will be returned in *SHIFT_AMOUNT. */
1154
1155 bool
1156 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1157 {
1158 int amount;
1159
1160 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1161
1162 if (is32)
1163 {
1164 /* Allow all zeros or all ones in top 32-bits, so that
1165 32-bit constant expressions like ~0x80000000 are
1166 permitted. */
1167 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1168 /* Immediate out of range. */
1169 return false;
1170 value &= 0xffffffff;
1171 }
1172
1173 /* first, try movz then movn */
1174 amount = -1;
1175 if ((value & ((uint64_t) 0xffff << 0)) == value)
1176 amount = 0;
1177 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1178 amount = 16;
1179 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1180 amount = 32;
1181 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1182 amount = 48;
1183
1184 if (amount == -1)
1185 {
1186 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1187 return false;
1188 }
1189
1190 if (shift_amount != NULL)
1191 *shift_amount = amount;
1192
1193 DEBUG_TRACE ("exit true with amount %d", amount);
1194
1195 return true;
1196 }
1197
1198 /* Build the accepted values for immediate logical SIMD instructions.
1199
1200 The standard encodings of the immediate value are:
1201 N imms immr SIMD size R S
1202 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1203 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1204 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1205 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1206 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1207 0 11110s 00000r 2 UInt(r) UInt(s)
1208 where all-ones value of S is reserved.
1209
1210 Let's call E the SIMD size.
1211
1212 The immediate value is: S+1 bits '1' rotated to the right by R.
1213
1214 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1215 (remember S != E - 1). */
1216
1217 #define TOTAL_IMM_NB 5334
1218
1219 typedef struct
1220 {
1221 uint64_t imm;
1222 aarch64_insn encoding;
1223 } simd_imm_encoding;
1224
1225 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1226
1227 static int
1228 simd_imm_encoding_cmp(const void *i1, const void *i2)
1229 {
1230 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1231 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1232
1233 if (imm1->imm < imm2->imm)
1234 return -1;
1235 if (imm1->imm > imm2->imm)
1236 return +1;
1237 return 0;
1238 }
1239
1240 /* immediate bitfield standard encoding
1241 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1242 1 ssssss rrrrrr 64 rrrrrr ssssss
1243 0 0sssss 0rrrrr 32 rrrrr sssss
1244 0 10ssss 00rrrr 16 rrrr ssss
1245 0 110sss 000rrr 8 rrr sss
1246 0 1110ss 0000rr 4 rr ss
1247 0 11110s 00000r 2 r s */
1248 static inline int
1249 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1250 {
1251 return (is64 << 12) | (r << 6) | s;
1252 }
1253
1254 static void
1255 build_immediate_table (void)
1256 {
1257 uint32_t log_e, e, s, r, s_mask;
1258 uint64_t mask, imm;
1259 int nb_imms;
1260 int is64;
1261
1262 nb_imms = 0;
1263 for (log_e = 1; log_e <= 6; log_e++)
1264 {
1265 /* Get element size. */
1266 e = 1u << log_e;
1267 if (log_e == 6)
1268 {
1269 is64 = 1;
1270 mask = 0xffffffffffffffffull;
1271 s_mask = 0;
1272 }
1273 else
1274 {
1275 is64 = 0;
1276 mask = (1ull << e) - 1;
1277 /* log_e s_mask
1278 1 ((1 << 4) - 1) << 2 = 111100
1279 2 ((1 << 3) - 1) << 3 = 111000
1280 3 ((1 << 2) - 1) << 4 = 110000
1281 4 ((1 << 1) - 1) << 5 = 100000
1282 5 ((1 << 0) - 1) << 6 = 000000 */
1283 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1284 }
1285 for (s = 0; s < e - 1; s++)
1286 for (r = 0; r < e; r++)
1287 {
1288 /* s+1 consecutive bits to 1 (s < 63) */
1289 imm = (1ull << (s + 1)) - 1;
1290 /* rotate right by r */
1291 if (r != 0)
1292 imm = (imm >> r) | ((imm << (e - r)) & mask);
1293 /* replicate the constant depending on SIMD size */
1294 switch (log_e)
1295 {
1296 case 1: imm = (imm << 2) | imm;
1297 /* Fall through. */
1298 case 2: imm = (imm << 4) | imm;
1299 /* Fall through. */
1300 case 3: imm = (imm << 8) | imm;
1301 /* Fall through. */
1302 case 4: imm = (imm << 16) | imm;
1303 /* Fall through. */
1304 case 5: imm = (imm << 32) | imm;
1305 /* Fall through. */
1306 case 6: break;
1307 default: abort ();
1308 }
1309 simd_immediates[nb_imms].imm = imm;
1310 simd_immediates[nb_imms].encoding =
1311 encode_immediate_bitfield(is64, s | s_mask, r);
1312 nb_imms++;
1313 }
1314 }
1315 assert (nb_imms == TOTAL_IMM_NB);
1316 qsort(simd_immediates, nb_imms,
1317 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1318 }
1319
1320 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1321 be accepted by logical (immediate) instructions
1322 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1323
1324 ESIZE is the number of bytes in the decoded immediate value.
1325 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1326 VALUE will be returned in *ENCODING. */
1327
1328 bool
1329 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1330 {
1331 simd_imm_encoding imm_enc;
1332 const simd_imm_encoding *imm_encoding;
1333 static bool initialized = false;
1334 uint64_t upper;
1335 int i;
1336
1337 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1338 value, esize);
1339
1340 if (!initialized)
1341 {
1342 build_immediate_table ();
1343 initialized = true;
1344 }
1345
1346 /* Allow all zeros or all ones in top bits, so that
1347 constant expressions like ~1 are permitted. */
1348 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1349 if ((value & ~upper) != value && (value | upper) != value)
1350 return false;
1351
1352 /* Replicate to a full 64-bit value. */
1353 value &= ~upper;
1354 for (i = esize * 8; i < 64; i *= 2)
1355 value |= (value << i);
1356
1357 imm_enc.imm = value;
1358 imm_encoding = (const simd_imm_encoding *)
1359 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1360 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1361 if (imm_encoding == NULL)
1362 {
1363 DEBUG_TRACE ("exit with false");
1364 return false;
1365 }
1366 if (encoding != NULL)
1367 *encoding = imm_encoding->encoding;
1368 DEBUG_TRACE ("exit with true");
1369 return true;
1370 }
1371
1372 /* If 64-bit immediate IMM is in the format of
1373 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1374 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1375 of value "abcdefgh". Otherwise return -1. */
1376 int
1377 aarch64_shrink_expanded_imm8 (uint64_t imm)
1378 {
1379 int i, ret;
1380 uint32_t byte;
1381
1382 ret = 0;
1383 for (i = 0; i < 8; i++)
1384 {
1385 byte = (imm >> (8 * i)) & 0xff;
1386 if (byte == 0xff)
1387 ret |= 1 << i;
1388 else if (byte != 0x00)
1389 return -1;
1390 }
1391 return ret;
1392 }
1393
1394 /* Utility inline functions for operand_general_constraint_met_p. */
1395
1396 static inline void
1397 set_error (aarch64_operand_error *mismatch_detail,
1398 enum aarch64_operand_error_kind kind, int idx,
1399 const char* error)
1400 {
1401 if (mismatch_detail == NULL)
1402 return;
1403 mismatch_detail->kind = kind;
1404 mismatch_detail->index = idx;
1405 mismatch_detail->error = error;
1406 }
1407
1408 static inline void
1409 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1410 const char* error)
1411 {
1412 if (mismatch_detail == NULL)
1413 return;
1414 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1415 }
1416
1417 static inline void
1418 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1419 const char *prefix, int lower_bound, int upper_bound)
1420 {
1421 if (mismatch_detail == NULL)
1422 return;
1423 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1424 mismatch_detail->data[0].s = prefix;
1425 mismatch_detail->data[1].i = lower_bound;
1426 mismatch_detail->data[2].i = upper_bound;
1427 }
1428
1429 static inline void
1430 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1431 int idx, int lower_bound, int upper_bound,
1432 const char* error)
1433 {
1434 if (mismatch_detail == NULL)
1435 return;
1436 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1437 mismatch_detail->data[0].i = lower_bound;
1438 mismatch_detail->data[1].i = upper_bound;
1439 }
1440
1441 static inline void
1442 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1443 int idx, int lower_bound, int upper_bound)
1444 {
1445 if (mismatch_detail == NULL)
1446 return;
1447 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1448 _("immediate value"));
1449 }
1450
1451 static inline void
1452 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1453 int idx, int lower_bound, int upper_bound)
1454 {
1455 if (mismatch_detail == NULL)
1456 return;
1457 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1458 _("immediate offset"));
1459 }
1460
1461 static inline void
1462 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1463 int idx, int lower_bound, int upper_bound)
1464 {
1465 if (mismatch_detail == NULL)
1466 return;
1467 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1468 _("register number"));
1469 }
1470
1471 static inline void
1472 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1473 int idx, int lower_bound, int upper_bound)
1474 {
1475 if (mismatch_detail == NULL)
1476 return;
1477 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1478 _("register element index"));
1479 }
1480
1481 static inline void
1482 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1483 int idx, int lower_bound, int upper_bound)
1484 {
1485 if (mismatch_detail == NULL)
1486 return;
1487 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1488 _("shift amount"));
1489 }
1490
1491 /* Report that the MUL modifier in operand IDX should be in the range
1492 [LOWER_BOUND, UPPER_BOUND]. */
1493 static inline void
1494 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1495 int idx, int lower_bound, int upper_bound)
1496 {
1497 if (mismatch_detail == NULL)
1498 return;
1499 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1500 _("multiplier"));
1501 }
1502
1503 static inline void
1504 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1505 int alignment)
1506 {
1507 if (mismatch_detail == NULL)
1508 return;
1509 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1510 mismatch_detail->data[0].i = alignment;
1511 }
1512
1513 static inline void
1514 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
1515 int expected_num)
1516 {
1517 if (mismatch_detail == NULL)
1518 return;
1519 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
1520 mismatch_detail->data[0].i = 1 << expected_num;
1521 }
1522
1523 static inline void
1524 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
1525 int expected_num)
1526 {
1527 if (mismatch_detail == NULL)
1528 return;
1529 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
1530 mismatch_detail->data[0].i = 1 << expected_num;
1531 }
1532
1533 static inline void
1534 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
1535 int idx, int expected)
1536 {
1537 if (mismatch_detail == NULL)
1538 return;
1539 set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
1540 mismatch_detail->data[0].i = expected;
1541 }
1542
1543 static inline void
1544 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1545 const char* error)
1546 {
1547 if (mismatch_detail == NULL)
1548 return;
1549 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1550 }
1551
1552 /* Check that indexed register operand OPND has a register in the range
1553 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1554 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1555
1556 static bool
1557 check_reglane (const aarch64_opnd_info *opnd,
1558 aarch64_operand_error *mismatch_detail, int idx,
1559 const char *prefix, int min_regno, int max_regno,
1560 int min_index, int max_index)
1561 {
1562 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1563 {
1564 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1565 max_regno);
1566 return false;
1567 }
1568 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1569 {
1570 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1571 max_index);
1572 return false;
1573 }
1574 return true;
1575 }
1576
1577 /* Check that register list operand OPND has NUM_REGS registers and a
1578 register stride of STRIDE. */
1579
1580 static bool
1581 check_reglist (const aarch64_opnd_info *opnd,
1582 aarch64_operand_error *mismatch_detail, int idx,
1583 int num_regs, int stride)
1584 {
1585 if (opnd->reglist.num_regs != num_regs)
1586 {
1587 set_reg_list_length_error (mismatch_detail, idx, num_regs);
1588 return false;
1589 }
1590 if (opnd->reglist.stride != stride)
1591 {
1592 set_reg_list_stride_error (mismatch_detail, idx, stride);
1593 return false;
1594 }
1595 return true;
1596 }
1597
1598 /* Check that indexed ZA operand OPND has:
1599
1600 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1601
1602 - RANGE_SIZE consecutive immediate offsets.
1603
1604 - an initial immediate offset that is a multiple of RANGE_SIZE
1605 in the range [0, MAX_VALUE * RANGE_SIZE]
1606
1607 - a vector group size of GROUP_SIZE. */
1608
1609 static bool
1610 check_za_access (const aarch64_opnd_info *opnd,
1611 aarch64_operand_error *mismatch_detail, int idx,
1612 int min_wreg, int max_value, unsigned int range_size,
1613 int group_size)
1614 {
1615 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1616 {
1617 if (min_wreg == 12)
1618 set_other_error (mismatch_detail, idx,
1619 _("expected a selection register in the"
1620 " range w12-w15"));
1621 else if (min_wreg == 8)
1622 set_other_error (mismatch_detail, idx,
1623 _("expected a selection register in the"
1624 " range w8-w11"));
1625 else
1626 abort ();
1627 return false;
1628 }
1629
1630 int max_index = max_value * range_size;
1631 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
1632 {
1633 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
1634 return false;
1635 }
1636
1637 if ((opnd->indexed_za.index.imm % range_size) != 0)
1638 {
1639 assert (range_size == 2 || range_size == 4);
1640 set_other_error (mismatch_detail, idx,
1641 range_size == 2
1642 ? _("starting offset is not a multiple of 2")
1643 : _("starting offset is not a multiple of 4"));
1644 return false;
1645 }
1646
1647 if (opnd->indexed_za.index.countm1 != range_size - 1)
1648 {
1649 if (range_size == 1)
1650 set_other_error (mismatch_detail, idx,
1651 _("expected a single offset rather than"
1652 " a range"));
1653 else if (range_size == 2)
1654 set_other_error (mismatch_detail, idx,
1655 _("expected a range of two offsets"));
1656 else if (range_size == 4)
1657 set_other_error (mismatch_detail, idx,
1658 _("expected a range of four offsets"));
1659 else
1660 abort ();
1661 return false;
1662 }
1663
1664 /* The vector group specifier is optional in assembly code. */
1665 if (opnd->indexed_za.group_size != 0
1666 && opnd->indexed_za.group_size != group_size)
1667 {
1668 set_invalid_vg_size (mismatch_detail, idx, group_size);
1669 return false;
1670 }
1671
1672 return true;
1673 }
1674
1675 /* Given a load/store operation, calculate the size of transferred data via a
1676 cumulative sum of qualifier sizes preceding the address operand in the
1677 OPNDS operand list argument. */
1678 int
1679 calc_ldst_datasize (const aarch64_opnd_info *opnds)
1680 {
1681 unsigned num_bytes = 0; /* total number of bytes transferred. */
1682 enum aarch64_operand_class opnd_class;
1683 enum aarch64_opnd type;
1684
1685 for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
1686 {
1687 type = opnds[i].type;
1688 opnd_class = aarch64_operands[type].op_class;
1689 if (opnd_class == AARCH64_OPND_CLASS_ADDRESS)
1690 break;
1691 num_bytes += aarch64_get_qualifier_esize (opnds[i].qualifier);
1692 }
1693 return num_bytes;
1694 }
1695
1696
1697 /* General constraint checking based on operand code.
1698
1699 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1700 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1701
1702 This function has to be called after the qualifiers for all operands
1703 have been resolved.
1704
1705 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1706 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1707 of error message during the disassembling where error message is not
1708 wanted. We avoid the dynamic construction of strings of error messages
1709 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1710 use a combination of error code, static string and some integer data to
1711 represent an error. */
1712
1713 static int
1714 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1715 enum aarch64_opnd type,
1716 const aarch64_opcode *opcode,
1717 aarch64_operand_error *mismatch_detail)
1718 {
1719 unsigned num, modifiers, shift;
1720 unsigned char size;
1721 int64_t imm, min_value, max_value;
1722 uint64_t uvalue, mask;
1723 const aarch64_opnd_info *opnd = opnds + idx;
1724 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1725 int i;
1726
1727 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1728
1729 switch (aarch64_operands[type].op_class)
1730 {
1731 case AARCH64_OPND_CLASS_INT_REG:
1732 /* Check for pair of xzr registers. */
1733 if (type == AARCH64_OPND_PAIRREG_OR_XZR
1734 && opnds[idx - 1].reg.regno == 0x1f)
1735 {
1736 if (opnds[idx].reg.regno != 0x1f)
1737 {
1738 set_syntax_error (mismatch_detail, idx - 1,
1739 _("second reg in pair should be xzr if first is"
1740 " xzr"));
1741 return 0;
1742 }
1743 }
1744 /* Check pair reg constraints for instructions taking a pair of
1745 consecutively-numbered general-purpose registers. */
1746 else if (type == AARCH64_OPND_PAIRREG
1747 || type == AARCH64_OPND_PAIRREG_OR_XZR)
1748 {
1749 assert (idx == 1 || idx == 2 || idx == 3 || idx == 5);
1750 if (opnds[idx - 1].reg.regno % 2 != 0)
1751 {
1752 set_syntax_error (mismatch_detail, idx - 1,
1753 _("reg pair must start from even reg"));
1754 return 0;
1755 }
1756 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1757 {
1758 set_syntax_error (mismatch_detail, idx,
1759 _("reg pair must be contiguous"));
1760 return 0;
1761 }
1762 break;
1763 }
1764
1765 /* <Xt> may be optional in some IC and TLBI instructions. */
1766 if (type == AARCH64_OPND_Rt_SYS)
1767 {
1768 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1769 == AARCH64_OPND_CLASS_SYSTEM));
1770 if (opnds[1].present
1771 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1772 {
1773 set_other_error (mismatch_detail, idx, _("extraneous register"));
1774 return 0;
1775 }
1776 if (!opnds[1].present
1777 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1778 {
1779 set_other_error (mismatch_detail, idx, _("missing register"));
1780 return 0;
1781 }
1782 }
1783 switch (qualifier)
1784 {
1785 case AARCH64_OPND_QLF_WSP:
1786 case AARCH64_OPND_QLF_SP:
1787 if (!aarch64_stack_pointer_p (opnd))
1788 {
1789 set_other_error (mismatch_detail, idx,
1790 _("stack pointer register expected"));
1791 return 0;
1792 }
1793 break;
1794 default:
1795 break;
1796 }
1797 break;
1798
1799 case AARCH64_OPND_CLASS_SVE_REG:
1800 switch (type)
1801 {
1802 case AARCH64_OPND_SVE_Zm3_INDEX:
1803 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1804 case AARCH64_OPND_SVE_Zm3_19_INDEX:
1805 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1806 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1807 case AARCH64_OPND_SVE_Zm4_INDEX:
1808 size = get_operand_fields_width (get_operand_from_code (type));
1809 shift = get_operand_specific_data (&aarch64_operands[type]);
1810 if (!check_reglane (opnd, mismatch_detail, idx,
1811 "z", 0, (1 << shift) - 1,
1812 0, (1u << (size - shift)) - 1))
1813 return 0;
1814 break;
1815
1816 case AARCH64_OPND_SVE_Zn_INDEX:
1817 size = aarch64_get_qualifier_esize (opnd->qualifier);
1818 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1819 0, 64 / size - 1))
1820 return 0;
1821 break;
1822
1823 case AARCH64_OPND_SVE_Zm_imm4:
1824 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 15))
1825 return 0;
1826 break;
1827
1828 case AARCH64_OPND_SVE_Zn_5_INDEX:
1829 size = aarch64_get_qualifier_esize (opnd->qualifier);
1830 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1831 0, 16 / size - 1))
1832 return 0;
1833 break;
1834
1835 case AARCH64_OPND_SME_PNn3_INDEX1:
1836 case AARCH64_OPND_SME_PNn3_INDEX2:
1837 size = get_operand_field_width (get_operand_from_code (type), 1);
1838 if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
1839 0, (1 << size) - 1))
1840 return 0;
1841 break;
1842
1843 case AARCH64_OPND_SME_Zn_INDEX1_16:
1844 case AARCH64_OPND_SME_Zn_INDEX2_15:
1845 case AARCH64_OPND_SME_Zn_INDEX2_16:
1846 case AARCH64_OPND_SME_Zn_INDEX3_14:
1847 case AARCH64_OPND_SME_Zn_INDEX3_15:
1848 case AARCH64_OPND_SME_Zn_INDEX4_14:
1849 size = get_operand_fields_width (get_operand_from_code (type)) - 5;
1850 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1851 0, (1 << size) - 1))
1852 return 0;
1853 break;
1854
1855 case AARCH64_OPND_SME_Zm_INDEX1:
1856 case AARCH64_OPND_SME_Zm_INDEX2:
1857 case AARCH64_OPND_SME_Zm_INDEX3_1:
1858 case AARCH64_OPND_SME_Zm_INDEX3_2:
1859 case AARCH64_OPND_SME_Zm_INDEX3_10:
1860 case AARCH64_OPND_SME_Zm_INDEX4_1:
1861 case AARCH64_OPND_SME_Zm_INDEX4_10:
1862 size = get_operand_fields_width (get_operand_from_code (type)) - 4;
1863 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
1864 0, (1 << size) - 1))
1865 return 0;
1866 break;
1867
1868 case AARCH64_OPND_SME_Zm:
1869 if (opnd->reg.regno > 15)
1870 {
1871 set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
1872 return 0;
1873 }
1874 break;
1875
1876 case AARCH64_OPND_SME_PnT_Wm_imm:
1877 size = aarch64_get_qualifier_esize (opnd->qualifier);
1878 max_value = 16 / size - 1;
1879 if (!check_za_access (opnd, mismatch_detail, idx,
1880 12, max_value, 1, 0))
1881 return 0;
1882 break;
1883
1884 default:
1885 break;
1886 }
1887 break;
1888
1889 case AARCH64_OPND_CLASS_SVE_REGLIST:
1890 switch (type)
1891 {
1892 case AARCH64_OPND_SME_Pdx2:
1893 case AARCH64_OPND_SME_Zdnx2:
1894 case AARCH64_OPND_SME_Zdnx4:
1895 case AARCH64_OPND_SME_Zmx2:
1896 case AARCH64_OPND_SME_Zmx4:
1897 case AARCH64_OPND_SME_Znx2:
1898 case AARCH64_OPND_SME_Znx4:
1899 case AARCH64_OPND_SME_Zt2:
1900 case AARCH64_OPND_SME_Zt3:
1901 case AARCH64_OPND_SME_Zt4:
1902 num = get_operand_specific_data (&aarch64_operands[type]);
1903 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1904 return 0;
1905 if ((opnd->reglist.first_regno % num) != 0)
1906 {
1907 set_other_error (mismatch_detail, idx,
1908 _("start register out of range"));
1909 return 0;
1910 }
1911 break;
1912
1913 case AARCH64_OPND_SME_Ztx2_STRIDED:
1914 case AARCH64_OPND_SME_Ztx4_STRIDED:
1915 /* 2-register lists have a stride of 8 and 4-register lists
1916 have a stride of 4. */
1917 num = get_operand_specific_data (&aarch64_operands[type]);
1918 if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
1919 return 0;
1920 num = 16 | (opnd->reglist.stride - 1);
1921 if ((opnd->reglist.first_regno & ~num) != 0)
1922 {
1923 set_other_error (mismatch_detail, idx,
1924 _("start register out of range"));
1925 return 0;
1926 }
1927 break;
1928
1929 case AARCH64_OPND_SME_PdxN:
1930 case AARCH64_OPND_SVE_ZnxN:
1931 case AARCH64_OPND_SVE_ZtxN:
1932 num = get_opcode_dependent_value (opcode);
1933 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1934 return 0;
1935 break;
1936
1937 default:
1938 abort ();
1939 }
1940 break;
1941
1942 case AARCH64_OPND_CLASS_ZA_ACCESS:
1943 switch (type)
1944 {
1945 case AARCH64_OPND_SME_ZA_HV_idx_src:
1946 case AARCH64_OPND_SME_ZA_HV_idx_dest:
1947 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
1948 size = aarch64_get_qualifier_esize (opnd->qualifier);
1949 max_value = 16 / size - 1;
1950 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
1951 get_opcode_dependent_value (opcode)))
1952 return 0;
1953 break;
1954
1955 case AARCH64_OPND_SME_ZA_array_off4:
1956 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
1957 get_opcode_dependent_value (opcode)))
1958 return 0;
1959 break;
1960
1961 case AARCH64_OPND_SME_ZA_array_off3_0:
1962 case AARCH64_OPND_SME_ZA_array_off3_5:
1963 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
1964 get_opcode_dependent_value (opcode)))
1965 return 0;
1966 break;
1967
1968 case AARCH64_OPND_SME_ZA_array_off1x4:
1969 if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
1970 get_opcode_dependent_value (opcode)))
1971 return 0;
1972 break;
1973
1974 case AARCH64_OPND_SME_ZA_array_off2x2:
1975 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
1976 get_opcode_dependent_value (opcode)))
1977 return 0;
1978 break;
1979
1980 case AARCH64_OPND_SME_ZA_array_off2x4:
1981 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
1982 get_opcode_dependent_value (opcode)))
1983 return 0;
1984 break;
1985
1986 case AARCH64_OPND_SME_ZA_array_off3x2:
1987 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
1988 get_opcode_dependent_value (opcode)))
1989 return 0;
1990 break;
1991
1992 case AARCH64_OPND_SME_ZA_array_vrsb_1:
1993 if (!check_za_access (opnd, mismatch_detail, idx, 12, 7, 2,
1994 get_opcode_dependent_value (opcode)))
1995 return 0;
1996 break;
1997
1998 case AARCH64_OPND_SME_ZA_array_vrsh_1:
1999 if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 2,
2000 get_opcode_dependent_value (opcode)))
2001 return 0;
2002 break;
2003
2004 case AARCH64_OPND_SME_ZA_array_vrss_1:
2005 if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 2,
2006 get_opcode_dependent_value (opcode)))
2007 return 0;
2008 break;
2009
2010 case AARCH64_OPND_SME_ZA_array_vrsd_1:
2011 if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 2,
2012 get_opcode_dependent_value (opcode)))
2013 return 0;
2014 break;
2015
2016 case AARCH64_OPND_SME_ZA_array_vrsb_2:
2017 if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 4,
2018 get_opcode_dependent_value (opcode)))
2019 return 0;
2020 break;
2021
2022 case AARCH64_OPND_SME_ZA_array_vrsh_2:
2023 if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 4,
2024 get_opcode_dependent_value (opcode)))
2025 return 0;
2026 break;
2027
2028 case AARCH64_OPND_SME_ZA_array_vrss_2:
2029 case AARCH64_OPND_SME_ZA_array_vrsd_2:
2030 if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 4,
2031 get_opcode_dependent_value (opcode)))
2032 return 0;
2033 break;
2034
2035 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
2036 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
2037 size = aarch64_get_qualifier_esize (opnd->qualifier);
2038 num = get_opcode_dependent_value (opcode);
2039 max_value = 16 / num / size;
2040 if (max_value > 0)
2041 max_value -= 1;
2042 if (!check_za_access (opnd, mismatch_detail, idx,
2043 12, max_value, num, 0))
2044 return 0;
2045 break;
2046
2047 default:
2048 abort ();
2049 }
2050 break;
2051
2052 case AARCH64_OPND_CLASS_PRED_REG:
2053 switch (type)
2054 {
2055 case AARCH64_OPND_SME_PNd3:
2056 case AARCH64_OPND_SME_PNg3:
2057 if (opnd->reg.regno < 8)
2058 {
2059 set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
2060 return 0;
2061 }
2062 break;
2063
2064 default:
2065 if (opnd->reg.regno >= 8
2066 && get_operand_fields_width (get_operand_from_code (type)) == 3)
2067 {
2068 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
2069 return 0;
2070 }
2071 break;
2072 }
2073 break;
2074
2075 case AARCH64_OPND_CLASS_COND:
2076 if (type == AARCH64_OPND_COND1
2077 && (opnds[idx].cond->value & 0xe) == 0xe)
2078 {
2079 /* Not allow AL or NV. */
2080 set_syntax_error (mismatch_detail, idx, NULL);
2081 }
2082 break;
2083
2084 case AARCH64_OPND_CLASS_ADDRESS:
2085 /* Check writeback. */
2086 switch (opcode->iclass)
2087 {
2088 case ldst_pos:
2089 case ldst_unscaled:
2090 case ldstnapair_offs:
2091 case ldstpair_off:
2092 case ldst_unpriv:
2093 if (opnd->addr.writeback == 1)
2094 {
2095 set_syntax_error (mismatch_detail, idx,
2096 _("unexpected address writeback"));
2097 return 0;
2098 }
2099 break;
2100 case ldst_imm10:
2101 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
2102 {
2103 set_syntax_error (mismatch_detail, idx,
2104 _("unexpected address writeback"));
2105 return 0;
2106 }
2107 break;
2108 case ldst_imm9:
2109 case ldstpair_indexed:
2110 case asisdlsep:
2111 case asisdlsop:
2112 if (opnd->addr.writeback == 0)
2113 {
2114 set_syntax_error (mismatch_detail, idx,
2115 _("address writeback expected"));
2116 return 0;
2117 }
2118 break;
2119 case rcpc3:
2120 if (opnd->addr.writeback)
2121 if ((type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB
2122 && !opnd->addr.preind)
2123 || (type == AARCH64_OPND_RCPC3_ADDR_POSTIND
2124 && !opnd->addr.postind))
2125 {
2126 set_syntax_error (mismatch_detail, idx,
2127 _("unexpected address writeback"));
2128 return 0;
2129 }
2130
2131 break;
2132 default:
2133 assert (opnd->addr.writeback == 0);
2134 break;
2135 }
2136 switch (type)
2137 {
2138 case AARCH64_OPND_ADDR_SIMM7:
2139 /* Scaled signed 7 bits immediate offset. */
2140 /* Get the size of the data element that is accessed, which may be
2141 different from that of the source register size,
2142 e.g. in strb/ldrb. */
2143 size = aarch64_get_qualifier_esize (opnd->qualifier);
2144 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
2145 {
2146 set_offset_out_of_range_error (mismatch_detail, idx,
2147 -64 * size, 63 * size);
2148 return 0;
2149 }
2150 if (!value_aligned_p (opnd->addr.offset.imm, size))
2151 {
2152 set_unaligned_error (mismatch_detail, idx, size);
2153 return 0;
2154 }
2155 break;
2156 case AARCH64_OPND_ADDR_OFFSET:
2157 case AARCH64_OPND_ADDR_SIMM9:
2158 /* Unscaled signed 9 bits immediate offset. */
2159 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2160 {
2161 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
2162 return 0;
2163 }
2164 break;
2165
2166 case AARCH64_OPND_ADDR_SIMM9_2:
2167 /* Unscaled signed 9 bits immediate offset, which has to be negative
2168 or unaligned. */
2169 size = aarch64_get_qualifier_esize (qualifier);
2170 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
2171 && !value_aligned_p (opnd->addr.offset.imm, size))
2172 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
2173 return 1;
2174 set_other_error (mismatch_detail, idx,
2175 _("negative or unaligned offset expected"));
2176 return 0;
2177
2178 case AARCH64_OPND_ADDR_SIMM10:
2179 /* Scaled signed 10 bits immediate offset. */
2180 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
2181 {
2182 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
2183 return 0;
2184 }
2185 if (!value_aligned_p (opnd->addr.offset.imm, 8))
2186 {
2187 set_unaligned_error (mismatch_detail, idx, 8);
2188 return 0;
2189 }
2190 break;
2191
2192 case AARCH64_OPND_ADDR_SIMM11:
2193 /* Signed 11 bits immediate offset (multiple of 16). */
2194 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
2195 {
2196 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
2197 return 0;
2198 }
2199
2200 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2201 {
2202 set_unaligned_error (mismatch_detail, idx, 16);
2203 return 0;
2204 }
2205 break;
2206
2207 case AARCH64_OPND_ADDR_SIMM13:
2208 /* Signed 13 bits immediate offset (multiple of 16). */
2209 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
2210 {
2211 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
2212 return 0;
2213 }
2214
2215 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2216 {
2217 set_unaligned_error (mismatch_detail, idx, 16);
2218 return 0;
2219 }
2220 break;
2221
2222 case AARCH64_OPND_SIMD_ADDR_POST:
2223 /* AdvSIMD load/store multiple structures, post-index. */
2224 assert (idx == 1);
2225 if (opnd->addr.offset.is_reg)
2226 {
2227 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
2228 return 1;
2229 else
2230 {
2231 set_other_error (mismatch_detail, idx,
2232 _("invalid register offset"));
2233 return 0;
2234 }
2235 }
2236 else
2237 {
2238 const aarch64_opnd_info *prev = &opnds[idx-1];
2239 unsigned num_bytes; /* total number of bytes transferred. */
2240 /* The opcode dependent area stores the number of elements in
2241 each structure to be loaded/stored. */
2242 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
2243 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
2244 /* Special handling of loading single structure to all lane. */
2245 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
2246 * aarch64_get_qualifier_esize (prev->qualifier);
2247 else
2248 num_bytes = prev->reglist.num_regs
2249 * aarch64_get_qualifier_esize (prev->qualifier)
2250 * aarch64_get_qualifier_nelem (prev->qualifier);
2251 if ((int) num_bytes != opnd->addr.offset.imm)
2252 {
2253 set_other_error (mismatch_detail, idx,
2254 _("invalid post-increment amount"));
2255 return 0;
2256 }
2257 }
2258 break;
2259
2260 case AARCH64_OPND_ADDR_REGOFF:
2261 /* Get the size of the data element that is accessed, which may be
2262 different from that of the source register size,
2263 e.g. in strb/ldrb. */
2264 size = aarch64_get_qualifier_esize (opnd->qualifier);
2265 /* It is either no shift or shift by the binary logarithm of SIZE. */
2266 if (opnd->shifter.amount != 0
2267 && opnd->shifter.amount != (int)get_logsz (size))
2268 {
2269 set_other_error (mismatch_detail, idx,
2270 _("invalid shift amount"));
2271 return 0;
2272 }
2273 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2274 operators. */
2275 switch (opnd->shifter.kind)
2276 {
2277 case AARCH64_MOD_UXTW:
2278 case AARCH64_MOD_LSL:
2279 case AARCH64_MOD_SXTW:
2280 case AARCH64_MOD_SXTX: break;
2281 default:
2282 set_other_error (mismatch_detail, idx,
2283 _("invalid extend/shift operator"));
2284 return 0;
2285 }
2286 break;
2287
2288 case AARCH64_OPND_ADDR_UIMM12:
2289 imm = opnd->addr.offset.imm;
2290 /* Get the size of the data element that is accessed, which may be
2291 different from that of the source register size,
2292 e.g. in strb/ldrb. */
2293 size = aarch64_get_qualifier_esize (qualifier);
2294 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
2295 {
2296 set_offset_out_of_range_error (mismatch_detail, idx,
2297 0, 4095 * size);
2298 return 0;
2299 }
2300 if (!value_aligned_p (opnd->addr.offset.imm, size))
2301 {
2302 set_unaligned_error (mismatch_detail, idx, size);
2303 return 0;
2304 }
2305 break;
2306
2307 case AARCH64_OPND_ADDR_PCREL14:
2308 case AARCH64_OPND_ADDR_PCREL19:
2309 case AARCH64_OPND_ADDR_PCREL21:
2310 case AARCH64_OPND_ADDR_PCREL26:
2311 imm = opnd->imm.value;
2312 if (operand_need_shift_by_two (get_operand_from_code (type)))
2313 {
2314 /* The offset value in a PC-relative branch instruction is alway
2315 4-byte aligned and is encoded without the lowest 2 bits. */
2316 if (!value_aligned_p (imm, 4))
2317 {
2318 set_unaligned_error (mismatch_detail, idx, 4);
2319 return 0;
2320 }
2321 /* Right shift by 2 so that we can carry out the following check
2322 canonically. */
2323 imm >>= 2;
2324 }
2325 size = get_operand_fields_width (get_operand_from_code (type));
2326 if (!value_fit_signed_field_p (imm, size))
2327 {
2328 set_other_error (mismatch_detail, idx,
2329 _("immediate out of range"));
2330 return 0;
2331 }
2332 break;
2333
2334 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
2335 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
2336 {
2337 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
2338 return 0;
2339 }
2340 break;
2341
2342 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
2343 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
2344 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
2345 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
2346 min_value = -8;
2347 max_value = 7;
2348 sve_imm_offset_vl:
2349 assert (!opnd->addr.offset.is_reg);
2350 assert (opnd->addr.preind);
2351 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
2352 min_value *= num;
2353 max_value *= num;
2354 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
2355 || (opnd->shifter.operator_present
2356 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
2357 {
2358 set_other_error (mismatch_detail, idx,
2359 _("invalid addressing mode"));
2360 return 0;
2361 }
2362 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2363 {
2364 set_offset_out_of_range_error (mismatch_detail, idx,
2365 min_value, max_value);
2366 return 0;
2367 }
2368 if (!value_aligned_p (opnd->addr.offset.imm, num))
2369 {
2370 set_unaligned_error (mismatch_detail, idx, num);
2371 return 0;
2372 }
2373 break;
2374
2375 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2376 min_value = -32;
2377 max_value = 31;
2378 goto sve_imm_offset_vl;
2379
2380 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2381 min_value = -256;
2382 max_value = 255;
2383 goto sve_imm_offset_vl;
2384
2385 case AARCH64_OPND_SVE_ADDR_RI_U6:
2386 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2387 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2388 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2389 min_value = 0;
2390 max_value = 63;
2391 sve_imm_offset:
2392 assert (!opnd->addr.offset.is_reg);
2393 assert (opnd->addr.preind);
2394 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2395 min_value *= num;
2396 max_value *= num;
2397 if (opnd->shifter.operator_present
2398 || opnd->shifter.amount_present)
2399 {
2400 set_other_error (mismatch_detail, idx,
2401 _("invalid addressing mode"));
2402 return 0;
2403 }
2404 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2405 {
2406 set_offset_out_of_range_error (mismatch_detail, idx,
2407 min_value, max_value);
2408 return 0;
2409 }
2410 if (!value_aligned_p (opnd->addr.offset.imm, num))
2411 {
2412 set_unaligned_error (mismatch_detail, idx, num);
2413 return 0;
2414 }
2415 break;
2416
2417 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2418 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2419 min_value = -8;
2420 max_value = 7;
2421 goto sve_imm_offset;
2422
2423 case AARCH64_OPND_SVE_ADDR_ZX:
2424 /* Everything is already ensured by parse_operands or
2425 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2426 argument type). */
2427 assert (opnd->addr.offset.is_reg);
2428 assert (opnd->addr.preind);
2429 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2430 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2431 assert (opnd->shifter.operator_present == 0);
2432 break;
2433
2434 case AARCH64_OPND_SVE_ADDR_R:
2435 case AARCH64_OPND_SVE_ADDR_RR:
2436 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2437 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2438 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2439 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2440 case AARCH64_OPND_SVE_ADDR_RX:
2441 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2442 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2443 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2444 case AARCH64_OPND_SVE_ADDR_RZ:
2445 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2446 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2447 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2448 modifiers = 1 << AARCH64_MOD_LSL;
2449 sve_rr_operand:
2450 assert (opnd->addr.offset.is_reg);
2451 assert (opnd->addr.preind);
2452 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2453 && opnd->addr.offset.regno == 31)
2454 {
2455 set_other_error (mismatch_detail, idx,
2456 _("index register xzr is not allowed"));
2457 return 0;
2458 }
2459 if (((1 << opnd->shifter.kind) & modifiers) == 0
2460 || (opnd->shifter.amount
2461 != get_operand_specific_data (&aarch64_operands[type])))
2462 {
2463 set_other_error (mismatch_detail, idx,
2464 _("invalid addressing mode"));
2465 return 0;
2466 }
2467 break;
2468
2469 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2470 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2471 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2472 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2473 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2474 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2475 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2476 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2477 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2478 goto sve_rr_operand;
2479
2480 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2481 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2482 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2483 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2484 min_value = 0;
2485 max_value = 31;
2486 goto sve_imm_offset;
2487
2488 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2489 modifiers = 1 << AARCH64_MOD_LSL;
2490 sve_zz_operand:
2491 assert (opnd->addr.offset.is_reg);
2492 assert (opnd->addr.preind);
2493 if (((1 << opnd->shifter.kind) & modifiers) == 0
2494 || opnd->shifter.amount < 0
2495 || opnd->shifter.amount > 3)
2496 {
2497 set_other_error (mismatch_detail, idx,
2498 _("invalid addressing mode"));
2499 return 0;
2500 }
2501 break;
2502
2503 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2504 modifiers = (1 << AARCH64_MOD_SXTW);
2505 goto sve_zz_operand;
2506
2507 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2508 modifiers = 1 << AARCH64_MOD_UXTW;
2509 goto sve_zz_operand;
2510
2511 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
2512 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
2513 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
2514 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
2515 {
2516 int num_bytes = calc_ldst_datasize (opnds);
2517 int abs_offset = (type == AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
2518 || type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB)
2519 ? opnd->addr.offset.imm * -1
2520 : opnd->addr.offset.imm;
2521 if ((int) num_bytes != abs_offset
2522 && opnd->addr.offset.imm != 0)
2523 {
2524 set_other_error (mismatch_detail, idx,
2525 _("invalid increment amount"));
2526 return 0;
2527 }
2528 }
2529 break;
2530
2531 case AARCH64_OPND_RCPC3_ADDR_OFFSET:
2532 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2533 {
2534 set_imm_out_of_range_error (mismatch_detail, idx, -256, 255);
2535 return 0;
2536 }
2537
2538 default:
2539 break;
2540 }
2541 break;
2542
2543 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2544 if (type == AARCH64_OPND_LEt)
2545 {
2546 /* Get the upper bound for the element index. */
2547 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2548 if (!value_in_range_p (opnd->reglist.index, 0, num))
2549 {
2550 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2551 return 0;
2552 }
2553 }
2554 /* The opcode dependent area stores the number of elements in
2555 each structure to be loaded/stored. */
2556 num = get_opcode_dependent_value (opcode);
2557 switch (type)
2558 {
2559 case AARCH64_OPND_LVt:
2560 assert (num >= 1 && num <= 4);
2561 /* Unless LD1/ST1, the number of registers should be equal to that
2562 of the structure elements. */
2563 if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
2564 return 0;
2565 break;
2566 case AARCH64_OPND_LVt_AL:
2567 case AARCH64_OPND_LEt:
2568 assert (num >= 1 && num <= 4);
2569 /* The number of registers should be equal to that of the structure
2570 elements. */
2571 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2572 return 0;
2573 break;
2574 default:
2575 break;
2576 }
2577 if (opnd->reglist.stride != 1)
2578 {
2579 set_reg_list_stride_error (mismatch_detail, idx, 1);
2580 return 0;
2581 }
2582 break;
2583
2584 case AARCH64_OPND_CLASS_IMMEDIATE:
2585 /* Constraint check on immediate operand. */
2586 imm = opnd->imm.value;
2587 /* E.g. imm_0_31 constrains value to be 0..31. */
2588 if (qualifier_value_in_range_constraint_p (qualifier)
2589 && !value_in_range_p (imm, get_lower_bound (qualifier),
2590 get_upper_bound (qualifier)))
2591 {
2592 set_imm_out_of_range_error (mismatch_detail, idx,
2593 get_lower_bound (qualifier),
2594 get_upper_bound (qualifier));
2595 return 0;
2596 }
2597
2598 switch (type)
2599 {
2600 case AARCH64_OPND_AIMM:
2601 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2602 {
2603 set_other_error (mismatch_detail, idx,
2604 _("invalid shift operator"));
2605 return 0;
2606 }
2607 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2608 {
2609 set_other_error (mismatch_detail, idx,
2610 _("shift amount must be 0 or 12"));
2611 return 0;
2612 }
2613 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2614 {
2615 set_other_error (mismatch_detail, idx,
2616 _("immediate out of range"));
2617 return 0;
2618 }
2619 break;
2620
2621 case AARCH64_OPND_HALF:
2622 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2623 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2624 {
2625 set_other_error (mismatch_detail, idx,
2626 _("invalid shift operator"));
2627 return 0;
2628 }
2629 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2630 if (!value_aligned_p (opnd->shifter.amount, 16))
2631 {
2632 set_other_error (mismatch_detail, idx,
2633 _("shift amount must be a multiple of 16"));
2634 return 0;
2635 }
2636 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2637 {
2638 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2639 0, size * 8 - 16);
2640 return 0;
2641 }
2642 if (opnd->imm.value < 0)
2643 {
2644 set_other_error (mismatch_detail, idx,
2645 _("negative immediate value not allowed"));
2646 return 0;
2647 }
2648 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2649 {
2650 set_other_error (mismatch_detail, idx,
2651 _("immediate out of range"));
2652 return 0;
2653 }
2654 break;
2655
2656 case AARCH64_OPND_IMM_MOV:
2657 {
2658 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2659 imm = opnd->imm.value;
2660 assert (idx == 1);
2661 switch (opcode->op)
2662 {
2663 case OP_MOV_IMM_WIDEN:
2664 imm = ~imm;
2665 /* Fall through. */
2666 case OP_MOV_IMM_WIDE:
2667 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2668 {
2669 set_other_error (mismatch_detail, idx,
2670 _("immediate out of range"));
2671 return 0;
2672 }
2673 break;
2674 case OP_MOV_IMM_LOG:
2675 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2676 {
2677 set_other_error (mismatch_detail, idx,
2678 _("immediate out of range"));
2679 return 0;
2680 }
2681 break;
2682 default:
2683 assert (0);
2684 return 0;
2685 }
2686 }
2687 break;
2688
2689 case AARCH64_OPND_NZCV:
2690 case AARCH64_OPND_CCMP_IMM:
2691 case AARCH64_OPND_EXCEPTION:
2692 case AARCH64_OPND_UNDEFINED:
2693 case AARCH64_OPND_TME_UIMM16:
2694 case AARCH64_OPND_UIMM4:
2695 case AARCH64_OPND_UIMM4_ADDG:
2696 case AARCH64_OPND_UIMM7:
2697 case AARCH64_OPND_UIMM3_OP1:
2698 case AARCH64_OPND_UIMM3_OP2:
2699 case AARCH64_OPND_SVE_UIMM3:
2700 case AARCH64_OPND_SVE_UIMM7:
2701 case AARCH64_OPND_SVE_UIMM8:
2702 case AARCH64_OPND_SVE_UIMM8_53:
2703 case AARCH64_OPND_CSSC_UIMM8:
2704 size = get_operand_fields_width (get_operand_from_code (type));
2705 assert (size < 32);
2706 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2707 {
2708 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2709 (1u << size) - 1);
2710 return 0;
2711 }
2712 break;
2713
2714 case AARCH64_OPND_UIMM10:
2715 /* Scaled unsigned 10 bits immediate offset. */
2716 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2717 {
2718 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2719 return 0;
2720 }
2721
2722 if (!value_aligned_p (opnd->imm.value, 16))
2723 {
2724 set_unaligned_error (mismatch_detail, idx, 16);
2725 return 0;
2726 }
2727 break;
2728
2729 case AARCH64_OPND_SIMM5:
2730 case AARCH64_OPND_SVE_SIMM5:
2731 case AARCH64_OPND_SVE_SIMM5B:
2732 case AARCH64_OPND_SVE_SIMM6:
2733 case AARCH64_OPND_SVE_SIMM8:
2734 case AARCH64_OPND_CSSC_SIMM8:
2735 size = get_operand_fields_width (get_operand_from_code (type));
2736 assert (size < 32);
2737 if (!value_fit_signed_field_p (opnd->imm.value, size))
2738 {
2739 set_imm_out_of_range_error (mismatch_detail, idx,
2740 -(1 << (size - 1)),
2741 (1 << (size - 1)) - 1);
2742 return 0;
2743 }
2744 break;
2745
2746 case AARCH64_OPND_WIDTH:
2747 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2748 && opnds[0].type == AARCH64_OPND_Rd);
2749 size = get_upper_bound (qualifier);
2750 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2751 /* lsb+width <= reg.size */
2752 {
2753 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2754 size - opnds[idx-1].imm.value);
2755 return 0;
2756 }
2757 break;
2758
2759 case AARCH64_OPND_LIMM:
2760 case AARCH64_OPND_SVE_LIMM:
2761 {
2762 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2763 uint64_t uimm = opnd->imm.value;
2764 if (opcode->op == OP_BIC)
2765 uimm = ~uimm;
2766 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2767 {
2768 set_other_error (mismatch_detail, idx,
2769 _("immediate out of range"));
2770 return 0;
2771 }
2772 }
2773 break;
2774
2775 case AARCH64_OPND_IMM0:
2776 case AARCH64_OPND_FPIMM0:
2777 if (opnd->imm.value != 0)
2778 {
2779 set_other_error (mismatch_detail, idx,
2780 _("immediate zero expected"));
2781 return 0;
2782 }
2783 break;
2784
2785 case AARCH64_OPND_IMM_ROT1:
2786 case AARCH64_OPND_IMM_ROT2:
2787 case AARCH64_OPND_SVE_IMM_ROT2:
2788 if (opnd->imm.value != 0
2789 && opnd->imm.value != 90
2790 && opnd->imm.value != 180
2791 && opnd->imm.value != 270)
2792 {
2793 set_other_error (mismatch_detail, idx,
2794 _("rotate expected to be 0, 90, 180 or 270"));
2795 return 0;
2796 }
2797 break;
2798
2799 case AARCH64_OPND_IMM_ROT3:
2800 case AARCH64_OPND_SVE_IMM_ROT1:
2801 case AARCH64_OPND_SVE_IMM_ROT3:
2802 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2803 {
2804 set_other_error (mismatch_detail, idx,
2805 _("rotate expected to be 90 or 270"));
2806 return 0;
2807 }
2808 break;
2809
2810 case AARCH64_OPND_SHLL_IMM:
2811 assert (idx == 2);
2812 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2813 if (opnd->imm.value != size)
2814 {
2815 set_other_error (mismatch_detail, idx,
2816 _("invalid shift amount"));
2817 return 0;
2818 }
2819 break;
2820
2821 case AARCH64_OPND_IMM_VLSL:
2822 size = aarch64_get_qualifier_esize (qualifier);
2823 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2824 {
2825 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2826 size * 8 - 1);
2827 return 0;
2828 }
2829 break;
2830
2831 case AARCH64_OPND_IMM_VLSR:
2832 size = aarch64_get_qualifier_esize (qualifier);
2833 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2834 {
2835 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2836 return 0;
2837 }
2838 break;
2839
2840 case AARCH64_OPND_SIMD_IMM:
2841 case AARCH64_OPND_SIMD_IMM_SFT:
2842 /* Qualifier check. */
2843 switch (qualifier)
2844 {
2845 case AARCH64_OPND_QLF_LSL:
2846 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2847 {
2848 set_other_error (mismatch_detail, idx,
2849 _("invalid shift operator"));
2850 return 0;
2851 }
2852 break;
2853 case AARCH64_OPND_QLF_MSL:
2854 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2855 {
2856 set_other_error (mismatch_detail, idx,
2857 _("invalid shift operator"));
2858 return 0;
2859 }
2860 break;
2861 case AARCH64_OPND_QLF_NIL:
2862 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2863 {
2864 set_other_error (mismatch_detail, idx,
2865 _("shift is not permitted"));
2866 return 0;
2867 }
2868 break;
2869 default:
2870 assert (0);
2871 return 0;
2872 }
2873 /* Is the immediate valid? */
2874 assert (idx == 1);
2875 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2876 {
2877 /* uimm8 or simm8 */
2878 if (!value_in_range_p (opnd->imm.value, -128, 255))
2879 {
2880 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2881 return 0;
2882 }
2883 }
2884 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2885 {
2886 /* uimm64 is not
2887 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2888 ffffffffgggggggghhhhhhhh'. */
2889 set_other_error (mismatch_detail, idx,
2890 _("invalid value for immediate"));
2891 return 0;
2892 }
2893 /* Is the shift amount valid? */
2894 switch (opnd->shifter.kind)
2895 {
2896 case AARCH64_MOD_LSL:
2897 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2898 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2899 {
2900 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2901 (size - 1) * 8);
2902 return 0;
2903 }
2904 if (!value_aligned_p (opnd->shifter.amount, 8))
2905 {
2906 set_unaligned_error (mismatch_detail, idx, 8);
2907 return 0;
2908 }
2909 break;
2910 case AARCH64_MOD_MSL:
2911 /* Only 8 and 16 are valid shift amount. */
2912 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2913 {
2914 set_other_error (mismatch_detail, idx,
2915 _("shift amount must be 0 or 16"));
2916 return 0;
2917 }
2918 break;
2919 default:
2920 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2921 {
2922 set_other_error (mismatch_detail, idx,
2923 _("invalid shift operator"));
2924 return 0;
2925 }
2926 break;
2927 }
2928 break;
2929
2930 case AARCH64_OPND_FPIMM:
2931 case AARCH64_OPND_SIMD_FPIMM:
2932 case AARCH64_OPND_SVE_FPIMM8:
2933 if (opnd->imm.is_fp == 0)
2934 {
2935 set_other_error (mismatch_detail, idx,
2936 _("floating-point immediate expected"));
2937 return 0;
2938 }
2939 /* The value is expected to be an 8-bit floating-point constant with
2940 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2941 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2942 instruction). */
2943 if (!value_in_range_p (opnd->imm.value, 0, 255))
2944 {
2945 set_other_error (mismatch_detail, idx,
2946 _("immediate out of range"));
2947 return 0;
2948 }
2949 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2950 {
2951 set_other_error (mismatch_detail, idx,
2952 _("invalid shift operator"));
2953 return 0;
2954 }
2955 break;
2956
2957 case AARCH64_OPND_SVE_AIMM:
2958 min_value = 0;
2959 sve_aimm:
2960 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2961 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2962 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2963 uvalue = opnd->imm.value;
2964 shift = opnd->shifter.amount;
2965 if (size == 1)
2966 {
2967 if (shift != 0)
2968 {
2969 set_other_error (mismatch_detail, idx,
2970 _("no shift amount allowed for"
2971 " 8-bit constants"));
2972 return 0;
2973 }
2974 }
2975 else
2976 {
2977 if (shift != 0 && shift != 8)
2978 {
2979 set_other_error (mismatch_detail, idx,
2980 _("shift amount must be 0 or 8"));
2981 return 0;
2982 }
2983 if (shift == 0 && (uvalue & 0xff) == 0)
2984 {
2985 shift = 8;
2986 uvalue = (int64_t) uvalue / 256;
2987 }
2988 }
2989 mask >>= shift;
2990 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2991 {
2992 set_other_error (mismatch_detail, idx,
2993 _("immediate too big for element size"));
2994 return 0;
2995 }
2996 uvalue = (uvalue - min_value) & mask;
2997 if (uvalue > 0xff)
2998 {
2999 set_other_error (mismatch_detail, idx,
3000 _("invalid arithmetic immediate"));
3001 return 0;
3002 }
3003 break;
3004
3005 case AARCH64_OPND_SVE_ASIMM:
3006 min_value = -128;
3007 goto sve_aimm;
3008
3009 case AARCH64_OPND_SVE_I1_HALF_ONE:
3010 assert (opnd->imm.is_fp);
3011 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
3012 {
3013 set_other_error (mismatch_detail, idx,
3014 _("floating-point value must be 0.5 or 1.0"));
3015 return 0;
3016 }
3017 break;
3018
3019 case AARCH64_OPND_SVE_I1_HALF_TWO:
3020 assert (opnd->imm.is_fp);
3021 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
3022 {
3023 set_other_error (mismatch_detail, idx,
3024 _("floating-point value must be 0.5 or 2.0"));
3025 return 0;
3026 }
3027 break;
3028
3029 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3030 assert (opnd->imm.is_fp);
3031 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
3032 {
3033 set_other_error (mismatch_detail, idx,
3034 _("floating-point value must be 0.0 or 1.0"));
3035 return 0;
3036 }
3037 break;
3038
3039 case AARCH64_OPND_SVE_INV_LIMM:
3040 {
3041 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
3042 uint64_t uimm = ~opnd->imm.value;
3043 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
3044 {
3045 set_other_error (mismatch_detail, idx,
3046 _("immediate out of range"));
3047 return 0;
3048 }
3049 }
3050 break;
3051
3052 case AARCH64_OPND_SVE_LIMM_MOV:
3053 {
3054 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
3055 uint64_t uimm = opnd->imm.value;
3056 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
3057 {
3058 set_other_error (mismatch_detail, idx,
3059 _("immediate out of range"));
3060 return 0;
3061 }
3062 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
3063 {
3064 set_other_error (mismatch_detail, idx,
3065 _("invalid replicated MOV immediate"));
3066 return 0;
3067 }
3068 }
3069 break;
3070
3071 case AARCH64_OPND_SVE_PATTERN_SCALED:
3072 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
3073 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
3074 {
3075 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
3076 return 0;
3077 }
3078 break;
3079
3080 case AARCH64_OPND_SVE_SHLIMM_PRED:
3081 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3082 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3083 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
3084 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
3085 {
3086 set_imm_out_of_range_error (mismatch_detail, idx,
3087 0, 8 * size - 1);
3088 return 0;
3089 }
3090 break;
3091
3092 case AARCH64_OPND_SME_SHRIMM4:
3093 size = 1 << get_operand_fields_width (get_operand_from_code (type));
3094 if (!value_in_range_p (opnd->imm.value, 1, size))
3095 {
3096 set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
3097 return 0;
3098 }
3099 break;
3100
3101 case AARCH64_OPND_SME_SHRIMM5:
3102 case AARCH64_OPND_SVE_SHRIMM_PRED:
3103 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3104 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3105 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
3106 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
3107 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
3108 {
3109 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
3110 return 0;
3111 }
3112 break;
3113
3114 case AARCH64_OPND_SME_ZT0_INDEX:
3115 if (!value_in_range_p (opnd->imm.value, 0, 56))
3116 {
3117 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
3118 return 0;
3119 }
3120 if (opnd->imm.value % 8 != 0)
3121 {
3122 set_other_error (mismatch_detail, idx,
3123 _("byte index must be a multiple of 8"));
3124 return 0;
3125 }
3126 break;
3127
3128 default:
3129 break;
3130 }
3131 break;
3132
3133 case AARCH64_OPND_CLASS_SYSTEM:
3134 switch (type)
3135 {
3136 case AARCH64_OPND_PSTATEFIELD:
3137 for (i = 0; aarch64_pstatefields[i].name; ++i)
3138 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3139 break;
3140 assert (aarch64_pstatefields[i].name);
3141 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
3142 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
3143 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
3144 {
3145 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
3146 return 0;
3147 }
3148 break;
3149 case AARCH64_OPND_PRFOP:
3150 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
3151 {
3152 set_other_error (mismatch_detail, idx,
3153 _("the register-index form of PRFM does"
3154 " not accept opcodes in the range 24-31"));
3155 return 0;
3156 }
3157 break;
3158 default:
3159 break;
3160 }
3161 break;
3162
3163 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
3164 /* Get the upper bound for the element index. */
3165 if (opcode->op == OP_FCMLA_ELEM)
3166 /* FCMLA index range depends on the vector size of other operands
3167 and is halfed because complex numbers take two elements. */
3168 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
3169 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
3170 else
3171 num = 16;
3172 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
3173 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
3174
3175 /* Index out-of-range. */
3176 if (!value_in_range_p (opnd->reglane.index, 0, num))
3177 {
3178 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
3179 return 0;
3180 }
3181 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
3182 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
3183 number is encoded in "size:M:Rm":
3184 size <Vm>
3185 00 RESERVED
3186 01 0:Rm
3187 10 M:Rm
3188 11 RESERVED */
3189 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
3190 && !value_in_range_p (opnd->reglane.regno, 0, 15))
3191 {
3192 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
3193 return 0;
3194 }
3195 break;
3196
3197 case AARCH64_OPND_CLASS_MODIFIED_REG:
3198 assert (idx == 1 || idx == 2);
3199 switch (type)
3200 {
3201 case AARCH64_OPND_Rm_EXT:
3202 if (!aarch64_extend_operator_p (opnd->shifter.kind)
3203 && opnd->shifter.kind != AARCH64_MOD_LSL)
3204 {
3205 set_other_error (mismatch_detail, idx,
3206 _("extend operator expected"));
3207 return 0;
3208 }
3209 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
3210 (i.e. SP), in which case it defaults to LSL. The LSL alias is
3211 only valid when "Rd" or "Rn" is '11111', and is preferred in that
3212 case. */
3213 if (!aarch64_stack_pointer_p (opnds + 0)
3214 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
3215 {
3216 if (!opnd->shifter.operator_present)
3217 {
3218 set_other_error (mismatch_detail, idx,
3219 _("missing extend operator"));
3220 return 0;
3221 }
3222 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
3223 {
3224 set_other_error (mismatch_detail, idx,
3225 _("'LSL' operator not allowed"));
3226 return 0;
3227 }
3228 }
3229 assert (opnd->shifter.operator_present /* Default to LSL. */
3230 || opnd->shifter.kind == AARCH64_MOD_LSL);
3231 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
3232 {
3233 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
3234 return 0;
3235 }
3236 /* In the 64-bit form, the final register operand is written as Wm
3237 for all but the (possibly omitted) UXTX/LSL and SXTX
3238 operators.
3239 N.B. GAS allows X register to be used with any operator as a
3240 programming convenience. */
3241 if (qualifier == AARCH64_OPND_QLF_X
3242 && opnd->shifter.kind != AARCH64_MOD_LSL
3243 && opnd->shifter.kind != AARCH64_MOD_UXTX
3244 && opnd->shifter.kind != AARCH64_MOD_SXTX)
3245 {
3246 set_other_error (mismatch_detail, idx, _("W register expected"));
3247 return 0;
3248 }
3249 break;
3250
3251 case AARCH64_OPND_Rm_SFT:
3252 /* ROR is not available to the shifted register operand in
3253 arithmetic instructions. */
3254 if (!aarch64_shift_operator_p (opnd->shifter.kind))
3255 {
3256 set_other_error (mismatch_detail, idx,
3257 _("shift operator expected"));
3258 return 0;
3259 }
3260 if (opnd->shifter.kind == AARCH64_MOD_ROR
3261 && opcode->iclass != log_shift)
3262 {
3263 set_other_error (mismatch_detail, idx,
3264 _("'ROR' operator not allowed"));
3265 return 0;
3266 }
3267 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
3268 if (!value_in_range_p (opnd->shifter.amount, 0, num))
3269 {
3270 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
3271 return 0;
3272 }
3273 break;
3274
3275 default:
3276 break;
3277 }
3278 break;
3279
3280 default:
3281 break;
3282 }
3283
3284 return 1;
3285 }
3286
3287 /* Main entrypoint for the operand constraint checking.
3288
3289 Return 1 if operands of *INST meet the constraint applied by the operand
3290 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3291 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3292 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3293 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3294 error kind when it is notified that an instruction does not pass the check).
3295
3296 Un-determined operand qualifiers may get established during the process. */
3297
3298 int
3299 aarch64_match_operands_constraint (aarch64_inst *inst,
3300 aarch64_operand_error *mismatch_detail)
3301 {
3302 int i;
3303
3304 DEBUG_TRACE ("enter");
3305
3306 i = inst->opcode->tied_operand;
3307
3308 if (i > 0)
3309 {
3310 /* Check for tied_operands with specific opcode iclass. */
3311 switch (inst->opcode->iclass)
3312 {
3313 /* For SME LDR and STR instructions #imm must have the same numerical
3314 value for both operands.
3315 */
3316 case sme_ldr:
3317 case sme_str:
3318 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
3319 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
3320 if (inst->operands[0].indexed_za.index.imm
3321 != inst->operands[1].addr.offset.imm)
3322 {
3323 if (mismatch_detail)
3324 {
3325 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
3326 mismatch_detail->index = i;
3327 }
3328 return 0;
3329 }
3330 break;
3331
3332 default:
3333 {
3334 /* Check for cases where a source register needs to be the
3335 same as the destination register. Do this before
3336 matching qualifiers since if an instruction has both
3337 invalid tying and invalid qualifiers, the error about
3338 qualifiers would suggest several alternative instructions
3339 that also have invalid tying. */
3340 enum aarch64_operand_class op_class
3341 = aarch64_get_operand_class (inst->operands[0].type);
3342 assert (aarch64_get_operand_class (inst->operands[i].type)
3343 == op_class);
3344 if (op_class == AARCH64_OPND_CLASS_SVE_REGLIST
3345 ? ((inst->operands[0].reglist.first_regno
3346 != inst->operands[i].reglist.first_regno)
3347 || (inst->operands[0].reglist.num_regs
3348 != inst->operands[i].reglist.num_regs)
3349 || (inst->operands[0].reglist.stride
3350 != inst->operands[i].reglist.stride))
3351 : (inst->operands[0].reg.regno
3352 != inst->operands[i].reg.regno))
3353 {
3354 if (mismatch_detail)
3355 {
3356 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
3357 mismatch_detail->index = i;
3358 mismatch_detail->error = NULL;
3359 }
3360 return 0;
3361 }
3362 break;
3363 }
3364 }
3365 }
3366
3367 /* Match operands' qualifier.
3368 *INST has already had qualifier establish for some, if not all, of
3369 its operands; we need to find out whether these established
3370 qualifiers match one of the qualifier sequence in
3371 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3372 with the corresponding qualifier in such a sequence.
3373 Only basic operand constraint checking is done here; the more thorough
3374 constraint checking will carried out by operand_general_constraint_met_p,
3375 which has be to called after this in order to get all of the operands'
3376 qualifiers established. */
3377 int invalid_count;
3378 if (match_operands_qualifier (inst, true /* update_p */,
3379 &invalid_count) == 0)
3380 {
3381 DEBUG_TRACE ("FAIL on operand qualifier matching");
3382 if (mismatch_detail)
3383 {
3384 /* Return an error type to indicate that it is the qualifier
3385 matching failure; we don't care about which operand as there
3386 are enough information in the opcode table to reproduce it. */
3387 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
3388 mismatch_detail->index = -1;
3389 mismatch_detail->error = NULL;
3390 mismatch_detail->data[0].i = invalid_count;
3391 }
3392 return 0;
3393 }
3394
3395 /* Match operands' constraint. */
3396 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3397 {
3398 enum aarch64_opnd type = inst->opcode->operands[i];
3399 if (type == AARCH64_OPND_NIL)
3400 break;
3401 if (inst->operands[i].skip)
3402 {
3403 DEBUG_TRACE ("skip the incomplete operand %d", i);
3404 continue;
3405 }
3406 if (operand_general_constraint_met_p (inst->operands, i, type,
3407 inst->opcode, mismatch_detail) == 0)
3408 {
3409 DEBUG_TRACE ("FAIL on operand %d", i);
3410 return 0;
3411 }
3412 }
3413
3414 DEBUG_TRACE ("PASS");
3415
3416 return 1;
3417 }
3418
3419 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3420 Also updates the TYPE of each INST->OPERANDS with the corresponding
3421 value of OPCODE->OPERANDS.
3422
3423 Note that some operand qualifiers may need to be manually cleared by
3424 the caller before it further calls the aarch64_opcode_encode; by
3425 doing this, it helps the qualifier matching facilities work
3426 properly. */
3427
3428 const aarch64_opcode*
3429 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3430 {
3431 int i;
3432 const aarch64_opcode *old = inst->opcode;
3433
3434 inst->opcode = opcode;
3435
3436 /* Update the operand types. */
3437 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3438 {
3439 inst->operands[i].type = opcode->operands[i];
3440 if (opcode->operands[i] == AARCH64_OPND_NIL)
3441 break;
3442 }
3443
3444 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3445
3446 return old;
3447 }
3448
3449 int
3450 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3451 {
3452 int i;
3453 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3454 if (operands[i] == operand)
3455 return i;
3456 else if (operands[i] == AARCH64_OPND_NIL)
3457 break;
3458 return -1;
3459 }
3460
3461 /* R0...R30, followed by FOR31. */
3463 #define BANK(R, FOR31) \
3464 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3465 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3466 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3467 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3468 /* [0][0] 32-bit integer regs with sp Wn
3469 [0][1] 64-bit integer regs with sp Xn sf=1
3470 [1][0] 32-bit integer regs with #0 Wn
3471 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3472 static const char *int_reg[2][2][32] = {
3473 #define R32(X) "w" #X
3474 #define R64(X) "x" #X
3475 { BANK (R32, "wsp"), BANK (R64, "sp") },
3476 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3477 #undef R64
3478 #undef R32
3479 };
3480
3481 /* Names of the SVE vector registers, first with .S suffixes,
3482 then with .D suffixes. */
3483
3484 static const char *sve_reg[2][32] = {
3485 #define ZS(X) "z" #X ".s"
3486 #define ZD(X) "z" #X ".d"
3487 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3488 #undef ZD
3489 #undef ZS
3490 };
3491 #undef BANK
3492
3493 /* Return the integer register name.
3494 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3495
3496 static inline const char *
3497 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3498 {
3499 const int has_zr = sp_reg_p ? 0 : 1;
3500 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3501 return int_reg[has_zr][is_64][regno];
3502 }
3503
3504 /* Like get_int_reg_name, but IS_64 is always 1. */
3505
3506 static inline const char *
3507 get_64bit_int_reg_name (int regno, int sp_reg_p)
3508 {
3509 const int has_zr = sp_reg_p ? 0 : 1;
3510 return int_reg[has_zr][1][regno];
3511 }
3512
3513 /* Get the name of the integer offset register in OPND, using the shift type
3514 to decide whether it's a word or doubleword. */
3515
3516 static inline const char *
3517 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3518 {
3519 switch (opnd->shifter.kind)
3520 {
3521 case AARCH64_MOD_UXTW:
3522 case AARCH64_MOD_SXTW:
3523 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3524
3525 case AARCH64_MOD_LSL:
3526 case AARCH64_MOD_SXTX:
3527 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3528
3529 default:
3530 abort ();
3531 }
3532 }
3533
3534 /* Get the name of the SVE vector offset register in OPND, using the operand
3535 qualifier to decide whether the suffix should be .S or .D. */
3536
3537 static inline const char *
3538 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3539 {
3540 assert (qualifier == AARCH64_OPND_QLF_S_S
3541 || qualifier == AARCH64_OPND_QLF_S_D);
3542 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3543 }
3544
3545 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3546
3547 typedef union
3548 {
3549 uint64_t i;
3550 double d;
3551 } double_conv_t;
3552
3553 typedef union
3554 {
3555 uint32_t i;
3556 float f;
3557 } single_conv_t;
3558
3559 typedef union
3560 {
3561 uint32_t i;
3562 float f;
3563 } half_conv_t;
3564
3565 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3566 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3567 (depending on the type of the instruction). IMM8 will be expanded to a
3568 single-precision floating-point value (SIZE == 4) or a double-precision
3569 floating-point value (SIZE == 8). A half-precision floating-point value
3570 (SIZE == 2) is expanded to a single-precision floating-point value. The
3571 expanded value is returned. */
3572
3573 static uint64_t
3574 expand_fp_imm (int size, uint32_t imm8)
3575 {
3576 uint64_t imm = 0;
3577 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3578
3579 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3580 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3581 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3582 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3583 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3584 if (size == 8)
3585 {
3586 imm = (imm8_7 << (63-32)) /* imm8<7> */
3587 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3588 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3589 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3590 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3591 imm <<= 32;
3592 }
3593 else if (size == 4 || size == 2)
3594 {
3595 imm = (imm8_7 << 31) /* imm8<7> */
3596 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3597 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3598 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3599 }
3600 else
3601 {
3602 /* An unsupported size. */
3603 assert (0);
3604 }
3605
3606 return imm;
3607 }
3608
3609 /* Return a string based on FMT with the register style applied. */
3610
3611 static const char *
3612 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3613 {
3614 const char *txt;
3615 va_list ap;
3616
3617 va_start (ap, fmt);
3618 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3619 va_end (ap);
3620
3621 return txt;
3622 }
3623
3624 /* Return a string based on FMT with the immediate style applied. */
3625
3626 static const char *
3627 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3628 {
3629 const char *txt;
3630 va_list ap;
3631
3632 va_start (ap, fmt);
3633 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3634 va_end (ap);
3635
3636 return txt;
3637 }
3638
3639 /* Return a string based on FMT with the sub-mnemonic style applied. */
3640
3641 static const char *
3642 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3643 {
3644 const char *txt;
3645 va_list ap;
3646
3647 va_start (ap, fmt);
3648 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3649 va_end (ap);
3650
3651 return txt;
3652 }
3653
3654 /* Return a string based on FMT with the address style applied. */
3655
3656 static const char *
3657 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3658 {
3659 const char *txt;
3660 va_list ap;
3661
3662 va_start (ap, fmt);
3663 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3664 va_end (ap);
3665
3666 return txt;
3667 }
3668
3669 /* Produce the string representation of the register list operand *OPND
3670 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3671 the register name that comes before the register number, such as "v". */
3672 static void
3673 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3674 const char *prefix, struct aarch64_styler *styler)
3675 {
3676 const int mask = (prefix[0] == 'p' ? 15 : 31);
3677 const int num_regs = opnd->reglist.num_regs;
3678 const int stride = opnd->reglist.stride;
3679 const int first_reg = opnd->reglist.first_regno;
3680 const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
3681 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3682 char tb[16]; /* Temporary buffer. */
3683
3684 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3685 assert (num_regs >= 1 && num_regs <= 4);
3686
3687 /* Prepare the index if any. */
3688 if (opnd->reglist.has_index)
3689 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3690 snprintf (tb, sizeof (tb), "[%s]",
3691 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3692 else
3693 tb[0] = '\0';
3694
3695 /* The hyphenated form is preferred for disassembly if there are
3696 more than two registers in the list, and the register numbers
3697 are monotonically increasing in increments of one. */
3698 if (stride == 1 && num_regs > 1
3699 && ((opnd->type != AARCH64_OPND_SME_Zt2)
3700 && (opnd->type != AARCH64_OPND_SME_Zt3)
3701 && (opnd->type != AARCH64_OPND_SME_Zt4)))
3702 snprintf (buf, size, "{%s-%s}%s",
3703 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3704 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3705 else
3706 {
3707 const int reg0 = first_reg;
3708 const int reg1 = (first_reg + stride) & mask;
3709 const int reg2 = (first_reg + stride * 2) & mask;
3710 const int reg3 = (first_reg + stride * 3) & mask;
3711
3712 switch (num_regs)
3713 {
3714 case 1:
3715 snprintf (buf, size, "{%s}%s",
3716 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3717 tb);
3718 break;
3719 case 2:
3720 snprintf (buf, size, "{%s, %s}%s",
3721 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3722 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3723 tb);
3724 break;
3725 case 3:
3726 snprintf (buf, size, "{%s, %s, %s}%s",
3727 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3728 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3729 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3730 tb);
3731 break;
3732 case 4:
3733 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3734 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3735 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3736 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3737 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3738 tb);
3739 break;
3740 }
3741 }
3742 }
3743
3744 /* Print the register+immediate address in OPND to BUF, which has SIZE
3745 characters. BASE is the name of the base register. */
3746
3747 static void
3748 print_immediate_offset_address (char *buf, size_t size,
3749 const aarch64_opnd_info *opnd,
3750 const char *base,
3751 struct aarch64_styler *styler)
3752 {
3753 if (opnd->addr.writeback)
3754 {
3755 if (opnd->addr.preind)
3756 {
3757 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3758 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3759 else
3760 snprintf (buf, size, "[%s, %s]!",
3761 style_reg (styler, base),
3762 style_imm (styler, "#%d", opnd->addr.offset.imm));
3763 }
3764 else
3765 snprintf (buf, size, "[%s], %s",
3766 style_reg (styler, base),
3767 style_imm (styler, "#%d", opnd->addr.offset.imm));
3768 }
3769 else
3770 {
3771 if (opnd->shifter.operator_present)
3772 {
3773 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3774 snprintf (buf, size, "[%s, %s, %s]",
3775 style_reg (styler, base),
3776 style_imm (styler, "#%d", opnd->addr.offset.imm),
3777 style_sub_mnem (styler, "mul vl"));
3778 }
3779 else if (opnd->addr.offset.imm)
3780 snprintf (buf, size, "[%s, %s]",
3781 style_reg (styler, base),
3782 style_imm (styler, "#%d", opnd->addr.offset.imm));
3783 else
3784 snprintf (buf, size, "[%s]", style_reg (styler, base));
3785 }
3786 }
3787
3788 /* Produce the string representation of the register offset address operand
3789 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3790 the names of the base and offset registers. */
3791 static void
3792 print_register_offset_address (char *buf, size_t size,
3793 const aarch64_opnd_info *opnd,
3794 const char *base, const char *offset,
3795 struct aarch64_styler *styler)
3796 {
3797 char tb[32]; /* Temporary buffer. */
3798 bool print_extend_p = true;
3799 bool print_amount_p = true;
3800 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3801
3802 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3803 || !opnd->shifter.amount_present))
3804 {
3805 /* Not print the shift/extend amount when the amount is zero and
3806 when it is not the special case of 8-bit load/store instruction. */
3807 print_amount_p = false;
3808 /* Likewise, no need to print the shift operator LSL in such a
3809 situation. */
3810 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3811 print_extend_p = false;
3812 }
3813
3814 /* Prepare for the extend/shift. */
3815 if (print_extend_p)
3816 {
3817 if (print_amount_p)
3818 snprintf (tb, sizeof (tb), ", %s %s",
3819 style_sub_mnem (styler, shift_name),
3820 style_imm (styler, "#%" PRIi64,
3821 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3822 (opnd->shifter.amount % 100)));
3823 else
3824 snprintf (tb, sizeof (tb), ", %s",
3825 style_sub_mnem (styler, shift_name));
3826 }
3827 else
3828 tb[0] = '\0';
3829
3830 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3831 style_reg (styler, offset), tb);
3832 }
3833
3834 /* Print ZA tiles from imm8 in ZERO instruction.
3835
3836 The preferred disassembly of this instruction uses the shortest list of tile
3837 names that represent the encoded immediate mask.
3838
3839 For example:
3840 * An all-ones immediate is disassembled as {ZA}.
3841 * An all-zeros immediate is disassembled as an empty list { }.
3842 */
3843 static void
3844 print_sme_za_list (char *buf, size_t size, int mask,
3845 struct aarch64_styler *styler)
3846 {
3847 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3848 "za1.s", "za2.s", "za3.s", "za0.d",
3849 "za1.d", "za2.d", "za3.d", "za4.d",
3850 "za5.d", "za6.d", "za7.d", " " };
3851 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3852 0x22, 0x44, 0x88, 0x01,
3853 0x02, 0x04, 0x08, 0x10,
3854 0x20, 0x40, 0x80, 0x00 };
3855 int i, k;
3856 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3857
3858 k = snprintf (buf, size, "{");
3859 for (i = 0; i < ZAN_SIZE; i++)
3860 {
3861 if ((mask & zan_v[i]) == zan_v[i])
3862 {
3863 mask &= ~zan_v[i];
3864 if (k > 1)
3865 k += snprintf (buf + k, size - k, ", ");
3866
3867 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3868 }
3869 if (mask == 0)
3870 break;
3871 }
3872 snprintf (buf + k, size - k, "}");
3873 }
3874
3875 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3876 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3877 PC, PCREL_P and ADDRESS are used to pass in and return information about
3878 the PC-relative address calculation, where the PC value is passed in
3879 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3880 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3881 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3882
3883 The function serves both the disassembler and the assembler diagnostics
3884 issuer, which is the reason why it lives in this file. */
3885
3886 void
3887 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3888 const aarch64_opcode *opcode,
3889 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3890 bfd_vma *address, char** notes,
3891 char *comment, size_t comment_size,
3892 aarch64_feature_set features,
3893 struct aarch64_styler *styler)
3894 {
3895 unsigned int i, num_conds;
3896 const char *name = NULL;
3897 const aarch64_opnd_info *opnd = opnds + idx;
3898 enum aarch64_modifier_kind kind;
3899 uint64_t addr, enum_value;
3900
3901 if (comment != NULL)
3902 {
3903 assert (comment_size > 0);
3904 comment[0] = '\0';
3905 }
3906 else
3907 assert (comment_size == 0);
3908
3909 buf[0] = '\0';
3910 if (pcrel_p)
3911 *pcrel_p = 0;
3912
3913 switch (opnd->type)
3914 {
3915 case AARCH64_OPND_Rd:
3916 case AARCH64_OPND_Rn:
3917 case AARCH64_OPND_Rm:
3918 case AARCH64_OPND_Rt:
3919 case AARCH64_OPND_Rt2:
3920 case AARCH64_OPND_Rs:
3921 case AARCH64_OPND_Ra:
3922 case AARCH64_OPND_Rt_LS64:
3923 case AARCH64_OPND_Rt_SYS:
3924 case AARCH64_OPND_PAIRREG:
3925 case AARCH64_OPND_PAIRREG_OR_XZR:
3926 case AARCH64_OPND_SVE_Rm:
3927 case AARCH64_OPND_LSE128_Rt:
3928 case AARCH64_OPND_LSE128_Rt2:
3929 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3930 the <ic_op>, therefore we use opnd->present to override the
3931 generic optional-ness information. */
3932 if (opnd->type == AARCH64_OPND_Rt_SYS)
3933 {
3934 if (!opnd->present)
3935 break;
3936 }
3937 /* Omit the operand, e.g. RET. */
3938 else if (optional_operand_p (opcode, idx)
3939 && (opnd->reg.regno
3940 == get_optional_operand_default_value (opcode)))
3941 break;
3942 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3943 || opnd->qualifier == AARCH64_OPND_QLF_X);
3944 snprintf (buf, size, "%s",
3945 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3946 opnd->qualifier, 0)));
3947 break;
3948
3949 case AARCH64_OPND_Rd_SP:
3950 case AARCH64_OPND_Rn_SP:
3951 case AARCH64_OPND_Rt_SP:
3952 case AARCH64_OPND_SVE_Rn_SP:
3953 case AARCH64_OPND_Rm_SP:
3954 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3955 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3956 || opnd->qualifier == AARCH64_OPND_QLF_X
3957 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3958 snprintf (buf, size, "%s",
3959 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3960 opnd->qualifier, 1)));
3961 break;
3962
3963 case AARCH64_OPND_Rm_EXT:
3964 kind = opnd->shifter.kind;
3965 assert (idx == 1 || idx == 2);
3966 if ((aarch64_stack_pointer_p (opnds)
3967 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3968 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3969 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3970 && kind == AARCH64_MOD_UXTW)
3971 || (opnd->qualifier == AARCH64_OPND_QLF_X
3972 && kind == AARCH64_MOD_UXTX)))
3973 {
3974 /* 'LSL' is the preferred form in this case. */
3975 kind = AARCH64_MOD_LSL;
3976 if (opnd->shifter.amount == 0)
3977 {
3978 /* Shifter omitted. */
3979 snprintf (buf, size, "%s",
3980 style_reg (styler,
3981 get_int_reg_name (opnd->reg.regno,
3982 opnd->qualifier, 0)));
3983 break;
3984 }
3985 }
3986 if (opnd->shifter.amount)
3987 snprintf (buf, size, "%s, %s %s",
3988 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3989 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3990 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3991 else
3992 snprintf (buf, size, "%s, %s",
3993 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3994 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3995 break;
3996
3997 case AARCH64_OPND_Rm_SFT:
3998 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3999 || opnd->qualifier == AARCH64_OPND_QLF_X);
4000 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
4001 snprintf (buf, size, "%s",
4002 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4003 opnd->qualifier, 0)));
4004 else
4005 snprintf (buf, size, "%s, %s %s",
4006 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
4007 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4008 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4009 break;
4010
4011 case AARCH64_OPND_Fd:
4012 case AARCH64_OPND_Fn:
4013 case AARCH64_OPND_Fm:
4014 case AARCH64_OPND_Fa:
4015 case AARCH64_OPND_Ft:
4016 case AARCH64_OPND_Ft2:
4017 case AARCH64_OPND_Sd:
4018 case AARCH64_OPND_Sn:
4019 case AARCH64_OPND_Sm:
4020 case AARCH64_OPND_SVE_VZn:
4021 case AARCH64_OPND_SVE_Vd:
4022 case AARCH64_OPND_SVE_Vm:
4023 case AARCH64_OPND_SVE_Vn:
4024 snprintf (buf, size, "%s",
4025 style_reg (styler, "%s%d",
4026 aarch64_get_qualifier_name (opnd->qualifier),
4027 opnd->reg.regno));
4028 break;
4029
4030 case AARCH64_OPND_Va:
4031 case AARCH64_OPND_Vd:
4032 case AARCH64_OPND_Vn:
4033 case AARCH64_OPND_Vm:
4034 snprintf (buf, size, "%s",
4035 style_reg (styler, "v%d.%s", opnd->reg.regno,
4036 aarch64_get_qualifier_name (opnd->qualifier)));
4037 break;
4038
4039 case AARCH64_OPND_Ed:
4040 case AARCH64_OPND_En:
4041 case AARCH64_OPND_Em:
4042 case AARCH64_OPND_Em16:
4043 case AARCH64_OPND_SM3_IMM2:
4044 snprintf (buf, size, "%s[%s]",
4045 style_reg (styler, "v%d.%s", opnd->reglane.regno,
4046 aarch64_get_qualifier_name (opnd->qualifier)),
4047 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4048 break;
4049
4050 case AARCH64_OPND_VdD1:
4051 case AARCH64_OPND_VnD1:
4052 snprintf (buf, size, "%s[%s]",
4053 style_reg (styler, "v%d.d", opnd->reg.regno),
4054 style_imm (styler, "1"));
4055 break;
4056
4057 case AARCH64_OPND_LVn:
4058 case AARCH64_OPND_LVt:
4059 case AARCH64_OPND_LVt_AL:
4060 case AARCH64_OPND_LEt:
4061 print_register_list (buf, size, opnd, "v", styler);
4062 break;
4063
4064 case AARCH64_OPND_SVE_Pd:
4065 case AARCH64_OPND_SVE_Pg3:
4066 case AARCH64_OPND_SVE_Pg4_5:
4067 case AARCH64_OPND_SVE_Pg4_10:
4068 case AARCH64_OPND_SVE_Pg4_16:
4069 case AARCH64_OPND_SVE_Pm:
4070 case AARCH64_OPND_SVE_Pn:
4071 case AARCH64_OPND_SVE_Pt:
4072 case AARCH64_OPND_SME_Pm:
4073 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4074 snprintf (buf, size, "%s",
4075 style_reg (styler, "p%d", opnd->reg.regno));
4076 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
4077 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
4078 snprintf (buf, size, "%s",
4079 style_reg (styler, "p%d/%s", opnd->reg.regno,
4080 aarch64_get_qualifier_name (opnd->qualifier)));
4081 else
4082 snprintf (buf, size, "%s",
4083 style_reg (styler, "p%d.%s", opnd->reg.regno,
4084 aarch64_get_qualifier_name (opnd->qualifier)));
4085 break;
4086
4087 case AARCH64_OPND_SVE_PNd:
4088 case AARCH64_OPND_SVE_PNg4_10:
4089 case AARCH64_OPND_SVE_PNn:
4090 case AARCH64_OPND_SVE_PNt:
4091 case AARCH64_OPND_SME_PNd3:
4092 case AARCH64_OPND_SME_PNg3:
4093 case AARCH64_OPND_SME_PNn:
4094 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4095 snprintf (buf, size, "%s",
4096 style_reg (styler, "pn%d", opnd->reg.regno));
4097 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
4098 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
4099 snprintf (buf, size, "%s",
4100 style_reg (styler, "pn%d/%s", opnd->reg.regno,
4101 aarch64_get_qualifier_name (opnd->qualifier)));
4102 else
4103 snprintf (buf, size, "%s",
4104 style_reg (styler, "pn%d.%s", opnd->reg.regno,
4105 aarch64_get_qualifier_name (opnd->qualifier)));
4106 break;
4107
4108 case AARCH64_OPND_SME_Pdx2:
4109 case AARCH64_OPND_SME_PdxN:
4110 print_register_list (buf, size, opnd, "p", styler);
4111 break;
4112
4113 case AARCH64_OPND_SME_PNn3_INDEX1:
4114 case AARCH64_OPND_SME_PNn3_INDEX2:
4115 snprintf (buf, size, "%s[%s]",
4116 style_reg (styler, "pn%d", opnd->reglane.regno),
4117 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4118 break;
4119
4120 case AARCH64_OPND_SVE_Za_5:
4121 case AARCH64_OPND_SVE_Za_16:
4122 case AARCH64_OPND_SVE_Zd:
4123 case AARCH64_OPND_SVE_Zm_5:
4124 case AARCH64_OPND_SVE_Zm_16:
4125 case AARCH64_OPND_SVE_Zn:
4126 case AARCH64_OPND_SVE_Zt:
4127 case AARCH64_OPND_SME_Zm:
4128 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4129 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
4130 else
4131 snprintf (buf, size, "%s",
4132 style_reg (styler, "z%d.%s", opnd->reg.regno,
4133 aarch64_get_qualifier_name (opnd->qualifier)));
4134 break;
4135
4136 case AARCH64_OPND_SVE_ZnxN:
4137 case AARCH64_OPND_SVE_ZtxN:
4138 case AARCH64_OPND_SME_Zdnx2:
4139 case AARCH64_OPND_SME_Zdnx4:
4140 case AARCH64_OPND_SME_Zmx2:
4141 case AARCH64_OPND_SME_Zmx4:
4142 case AARCH64_OPND_SME_Znx2:
4143 case AARCH64_OPND_SME_Znx4:
4144 case AARCH64_OPND_SME_Ztx2_STRIDED:
4145 case AARCH64_OPND_SME_Ztx4_STRIDED:
4146 case AARCH64_OPND_SME_Zt2:
4147 case AARCH64_OPND_SME_Zt3:
4148 case AARCH64_OPND_SME_Zt4:
4149 print_register_list (buf, size, opnd, "z", styler);
4150 break;
4151
4152 case AARCH64_OPND_SVE_Zm3_INDEX:
4153 case AARCH64_OPND_SVE_Zm3_22_INDEX:
4154 case AARCH64_OPND_SVE_Zm3_19_INDEX:
4155 case AARCH64_OPND_SVE_Zm3_11_INDEX:
4156 case AARCH64_OPND_SVE_Zm4_11_INDEX:
4157 case AARCH64_OPND_SVE_Zm4_INDEX:
4158 case AARCH64_OPND_SVE_Zn_INDEX:
4159 case AARCH64_OPND_SME_Zm_INDEX1:
4160 case AARCH64_OPND_SME_Zm_INDEX2:
4161 case AARCH64_OPND_SME_Zm_INDEX3_1:
4162 case AARCH64_OPND_SME_Zm_INDEX3_2:
4163 case AARCH64_OPND_SME_Zm_INDEX3_10:
4164 case AARCH64_OPND_SVE_Zn_5_INDEX:
4165 case AARCH64_OPND_SME_Zm_INDEX4_1:
4166 case AARCH64_OPND_SME_Zm_INDEX4_10:
4167 case AARCH64_OPND_SME_Zn_INDEX1_16:
4168 case AARCH64_OPND_SME_Zn_INDEX2_15:
4169 case AARCH64_OPND_SME_Zn_INDEX2_16:
4170 case AARCH64_OPND_SME_Zn_INDEX3_14:
4171 case AARCH64_OPND_SME_Zn_INDEX3_15:
4172 case AARCH64_OPND_SME_Zn_INDEX4_14:
4173 case AARCH64_OPND_SVE_Zm_imm4:
4174 snprintf (buf, size, "%s[%s]",
4175 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4176 ? style_reg (styler, "z%d", opnd->reglane.regno)
4177 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
4178 aarch64_get_qualifier_name (opnd->qualifier))),
4179 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4180 break;
4181
4182 case AARCH64_OPND_SME_ZAda_2b:
4183 case AARCH64_OPND_SME_ZAda_3b:
4184 snprintf (buf, size, "%s",
4185 style_reg (styler, "za%d.%s", opnd->reg.regno,
4186 aarch64_get_qualifier_name (opnd->qualifier)));
4187 break;
4188
4189 case AARCH64_OPND_SME_ZA_HV_idx_src:
4190 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
4191 case AARCH64_OPND_SME_ZA_HV_idx_dest:
4192 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
4193 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
4194 snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
4195 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
4196 style_reg (styler, "za%d%c.%s",
4197 opnd->indexed_za.regno,
4198 opnd->indexed_za.v == 1 ? 'v' : 'h',
4199 aarch64_get_qualifier_name (opnd->qualifier)),
4200 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4201 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4202 opnd->indexed_za.index.countm1 ? ":" : "",
4203 (opnd->indexed_za.index.countm1
4204 ? style_imm (styler, "%d",
4205 opnd->indexed_za.index.imm
4206 + opnd->indexed_za.index.countm1)
4207 : ""),
4208 opnd->indexed_za.group_size ? ", " : "",
4209 opnd->indexed_za.group_size == 2
4210 ? style_sub_mnem (styler, "vgx2")
4211 : opnd->indexed_za.group_size == 4
4212 ? style_sub_mnem (styler, "vgx4") : "",
4213 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
4214 break;
4215
4216 case AARCH64_OPND_SME_list_of_64bit_tiles:
4217 print_sme_za_list (buf, size, opnd->reg.regno, styler);
4218 break;
4219
4220 case AARCH64_OPND_SME_ZA_array_off1x4:
4221 case AARCH64_OPND_SME_ZA_array_off2x2:
4222 case AARCH64_OPND_SME_ZA_array_off2x4:
4223 case AARCH64_OPND_SME_ZA_array_off3_0:
4224 case AARCH64_OPND_SME_ZA_array_off3_5:
4225 case AARCH64_OPND_SME_ZA_array_off3x2:
4226 case AARCH64_OPND_SME_ZA_array_off4:
4227 snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
4228 style_reg (styler, "za%s%s",
4229 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4230 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4231 ? ""
4232 : aarch64_get_qualifier_name (opnd->qualifier))),
4233 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4234 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4235 opnd->indexed_za.index.countm1 ? ":" : "",
4236 (opnd->indexed_za.index.countm1
4237 ? style_imm (styler, "%d",
4238 opnd->indexed_za.index.imm
4239 + opnd->indexed_za.index.countm1)
4240 : ""),
4241 opnd->indexed_za.group_size ? ", " : "",
4242 opnd->indexed_za.group_size == 2
4243 ? style_sub_mnem (styler, "vgx2")
4244 : opnd->indexed_za.group_size == 4
4245 ? style_sub_mnem (styler, "vgx4") : "");
4246 break;
4247
4248 case AARCH64_OPND_SME_ZA_array_vrsb_1:
4249 case AARCH64_OPND_SME_ZA_array_vrsh_1:
4250 case AARCH64_OPND_SME_ZA_array_vrss_1:
4251 case AARCH64_OPND_SME_ZA_array_vrsd_1:
4252 case AARCH64_OPND_SME_ZA_array_vrsb_2:
4253 case AARCH64_OPND_SME_ZA_array_vrsh_2:
4254 case AARCH64_OPND_SME_ZA_array_vrss_2:
4255 case AARCH64_OPND_SME_ZA_array_vrsd_2:
4256 snprintf (buf, size, "%s [%s, %s%s%s]",
4257 style_reg (styler, "za%d%c%s%s",
4258 opnd->indexed_za.regno,
4259 opnd->indexed_za.v ? 'v': 'h',
4260 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4261 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4262 ? ""
4263 : aarch64_get_qualifier_name (opnd->qualifier))),
4264 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4265 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4266 opnd->indexed_za.index.countm1 ? ":" : "",
4267 opnd->indexed_za.index.countm1 ? style_imm (styler, "%d",
4268 opnd->indexed_za.index.imm
4269 + opnd->indexed_za.index.countm1):"");
4270 break;
4271
4272 case AARCH64_OPND_SME_SM_ZA:
4273 snprintf (buf, size, "%s",
4274 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
4275 break;
4276
4277 case AARCH64_OPND_SME_PnT_Wm_imm:
4278 snprintf (buf, size, "%s[%s, %s]",
4279 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
4280 aarch64_get_qualifier_name (opnd->qualifier)),
4281 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4282 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
4283 break;
4284
4285 case AARCH64_OPND_SME_VLxN_10:
4286 case AARCH64_OPND_SME_VLxN_13:
4287 enum_value = opnd->imm.value;
4288 assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
4289 snprintf (buf, size, "%s",
4290 style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
4291 break;
4292
4293 case AARCH64_OPND_CRn:
4294 case AARCH64_OPND_CRm:
4295 snprintf (buf, size, "%s",
4296 style_reg (styler, "C%" PRIi64, opnd->imm.value));
4297 break;
4298
4299 case AARCH64_OPND_IDX:
4300 case AARCH64_OPND_MASK:
4301 case AARCH64_OPND_IMM:
4302 case AARCH64_OPND_IMM_2:
4303 case AARCH64_OPND_WIDTH:
4304 case AARCH64_OPND_UIMM3_OP1:
4305 case AARCH64_OPND_UIMM3_OP2:
4306 case AARCH64_OPND_BIT_NUM:
4307 case AARCH64_OPND_IMM_VLSL:
4308 case AARCH64_OPND_IMM_VLSR:
4309 case AARCH64_OPND_SHLL_IMM:
4310 case AARCH64_OPND_IMM0:
4311 case AARCH64_OPND_IMMR:
4312 case AARCH64_OPND_IMMS:
4313 case AARCH64_OPND_UNDEFINED:
4314 case AARCH64_OPND_FBITS:
4315 case AARCH64_OPND_TME_UIMM16:
4316 case AARCH64_OPND_SIMM5:
4317 case AARCH64_OPND_SME_SHRIMM4:
4318 case AARCH64_OPND_SME_SHRIMM5:
4319 case AARCH64_OPND_SVE_SHLIMM_PRED:
4320 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
4321 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
4322 case AARCH64_OPND_SVE_SHRIMM_PRED:
4323 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
4324 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
4325 case AARCH64_OPND_SVE_SIMM5:
4326 case AARCH64_OPND_SVE_SIMM5B:
4327 case AARCH64_OPND_SVE_SIMM6:
4328 case AARCH64_OPND_SVE_SIMM8:
4329 case AARCH64_OPND_SVE_UIMM3:
4330 case AARCH64_OPND_SVE_UIMM7:
4331 case AARCH64_OPND_SVE_UIMM8:
4332 case AARCH64_OPND_SVE_UIMM8_53:
4333 case AARCH64_OPND_IMM_ROT1:
4334 case AARCH64_OPND_IMM_ROT2:
4335 case AARCH64_OPND_IMM_ROT3:
4336 case AARCH64_OPND_SVE_IMM_ROT1:
4337 case AARCH64_OPND_SVE_IMM_ROT2:
4338 case AARCH64_OPND_SVE_IMM_ROT3:
4339 case AARCH64_OPND_CSSC_SIMM8:
4340 case AARCH64_OPND_CSSC_UIMM8:
4341 snprintf (buf, size, "%s",
4342 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4343 break;
4344
4345 case AARCH64_OPND_SVE_I1_HALF_ONE:
4346 case AARCH64_OPND_SVE_I1_HALF_TWO:
4347 case AARCH64_OPND_SVE_I1_ZERO_ONE:
4348 {
4349 single_conv_t c;
4350 c.i = opnd->imm.value;
4351 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
4352 break;
4353 }
4354
4355 case AARCH64_OPND_SVE_PATTERN:
4356 if (optional_operand_p (opcode, idx)
4357 && opnd->imm.value == get_optional_operand_default_value (opcode))
4358 break;
4359 enum_value = opnd->imm.value;
4360 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4361 if (aarch64_sve_pattern_array[enum_value])
4362 snprintf (buf, size, "%s",
4363 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
4364 else
4365 snprintf (buf, size, "%s",
4366 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4367 break;
4368
4369 case AARCH64_OPND_SVE_PATTERN_SCALED:
4370 if (optional_operand_p (opcode, idx)
4371 && !opnd->shifter.operator_present
4372 && opnd->imm.value == get_optional_operand_default_value (opcode))
4373 break;
4374 enum_value = opnd->imm.value;
4375 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4376 if (aarch64_sve_pattern_array[opnd->imm.value])
4377 snprintf (buf, size, "%s",
4378 style_reg (styler,
4379 aarch64_sve_pattern_array[opnd->imm.value]));
4380 else
4381 snprintf (buf, size, "%s",
4382 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4383 if (opnd->shifter.operator_present)
4384 {
4385 size_t len = strlen (buf);
4386 const char *shift_name
4387 = aarch64_operand_modifiers[opnd->shifter.kind].name;
4388 snprintf (buf + len, size - len, ", %s %s",
4389 style_sub_mnem (styler, shift_name),
4390 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4391 }
4392 break;
4393
4394 case AARCH64_OPND_SVE_PRFOP:
4395 enum_value = opnd->imm.value;
4396 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
4397 if (aarch64_sve_prfop_array[enum_value])
4398 snprintf (buf, size, "%s",
4399 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
4400 else
4401 snprintf (buf, size, "%s",
4402 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4403 break;
4404
4405 case AARCH64_OPND_IMM_MOV:
4406 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4407 {
4408 case 4: /* e.g. MOV Wd, #<imm32>. */
4409 {
4410 int imm32 = opnd->imm.value;
4411 snprintf (buf, size, "%s",
4412 style_imm (styler, "#0x%-20x", imm32));
4413 snprintf (comment, comment_size, "#%d", imm32);
4414 }
4415 break;
4416 case 8: /* e.g. MOV Xd, #<imm64>. */
4417 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
4418 opnd->imm.value));
4419 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
4420 break;
4421 default:
4422 snprintf (buf, size, "<invalid>");
4423 break;
4424 }
4425 break;
4426
4427 case AARCH64_OPND_FPIMM0:
4428 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
4429 break;
4430
4431 case AARCH64_OPND_LIMM:
4432 case AARCH64_OPND_AIMM:
4433 case AARCH64_OPND_HALF:
4434 case AARCH64_OPND_SVE_INV_LIMM:
4435 case AARCH64_OPND_SVE_LIMM:
4436 case AARCH64_OPND_SVE_LIMM_MOV:
4437 if (opnd->shifter.amount)
4438 snprintf (buf, size, "%s, %s %s",
4439 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4440 style_sub_mnem (styler, "lsl"),
4441 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4442 else
4443 snprintf (buf, size, "%s",
4444 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4445 break;
4446
4447 case AARCH64_OPND_SIMD_IMM:
4448 case AARCH64_OPND_SIMD_IMM_SFT:
4449 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
4450 || opnd->shifter.kind == AARCH64_MOD_NONE)
4451 snprintf (buf, size, "%s",
4452 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4453 else
4454 snprintf (buf, size, "%s, %s %s",
4455 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4456 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4457 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4458 break;
4459
4460 case AARCH64_OPND_SVE_AIMM:
4461 case AARCH64_OPND_SVE_ASIMM:
4462 if (opnd->shifter.amount)
4463 snprintf (buf, size, "%s, %s %s",
4464 style_imm (styler, "#%" PRIi64, opnd->imm.value),
4465 style_sub_mnem (styler, "lsl"),
4466 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4467 else
4468 snprintf (buf, size, "%s",
4469 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4470 break;
4471
4472 case AARCH64_OPND_FPIMM:
4473 case AARCH64_OPND_SIMD_FPIMM:
4474 case AARCH64_OPND_SVE_FPIMM8:
4475 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4476 {
4477 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4478 {
4479 half_conv_t c;
4480 c.i = expand_fp_imm (2, opnd->imm.value);
4481 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4482 }
4483 break;
4484 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4485 {
4486 single_conv_t c;
4487 c.i = expand_fp_imm (4, opnd->imm.value);
4488 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4489 }
4490 break;
4491 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4492 {
4493 double_conv_t c;
4494 c.i = expand_fp_imm (8, opnd->imm.value);
4495 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
4496 }
4497 break;
4498 default:
4499 snprintf (buf, size, "<invalid>");
4500 break;
4501 }
4502 break;
4503
4504 case AARCH64_OPND_CCMP_IMM:
4505 case AARCH64_OPND_NZCV:
4506 case AARCH64_OPND_EXCEPTION:
4507 case AARCH64_OPND_UIMM4:
4508 case AARCH64_OPND_UIMM4_ADDG:
4509 case AARCH64_OPND_UIMM7:
4510 case AARCH64_OPND_UIMM10:
4511 if (optional_operand_p (opcode, idx)
4512 && (opnd->imm.value ==
4513 (int64_t) get_optional_operand_default_value (opcode)))
4514 /* Omit the operand, e.g. DCPS1. */
4515 break;
4516 snprintf (buf, size, "%s",
4517 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
4518 break;
4519
4520 case AARCH64_OPND_COND:
4521 case AARCH64_OPND_COND1:
4522 snprintf (buf, size, "%s",
4523 style_sub_mnem (styler, opnd->cond->names[0]));
4524 num_conds = ARRAY_SIZE (opnd->cond->names);
4525 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
4526 {
4527 size_t len = comment != NULL ? strlen (comment) : 0;
4528 if (i == 1)
4529 snprintf (comment + len, comment_size - len, "%s = %s",
4530 opnd->cond->names[0], opnd->cond->names[i]);
4531 else
4532 snprintf (comment + len, comment_size - len, ", %s",
4533 opnd->cond->names[i]);
4534 }
4535 break;
4536
4537 case AARCH64_OPND_ADDR_ADRP:
4538 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
4539 + opnd->imm.value;
4540 if (pcrel_p)
4541 *pcrel_p = 1;
4542 if (address)
4543 *address = addr;
4544 /* This is not necessary during the disassembling, as print_address_func
4545 in the disassemble_info will take care of the printing. But some
4546 other callers may be still interested in getting the string in *STR,
4547 so here we do snprintf regardless. */
4548 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
4549 break;
4550
4551 case AARCH64_OPND_ADDR_PCREL14:
4552 case AARCH64_OPND_ADDR_PCREL19:
4553 case AARCH64_OPND_ADDR_PCREL21:
4554 case AARCH64_OPND_ADDR_PCREL26:
4555 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
4556 if (pcrel_p)
4557 *pcrel_p = 1;
4558 if (address)
4559 *address = addr;
4560 /* This is not necessary during the disassembling, as print_address_func
4561 in the disassemble_info will take care of the printing. But some
4562 other callers may be still interested in getting the string in *STR,
4563 so here we do snprintf regardless. */
4564 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
4565 break;
4566
4567 case AARCH64_OPND_ADDR_SIMPLE:
4568 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4569 case AARCH64_OPND_SIMD_ADDR_POST:
4570 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4571 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
4572 {
4573 if (opnd->addr.offset.is_reg)
4574 snprintf (buf, size, "[%s], %s",
4575 style_reg (styler, name),
4576 style_reg (styler, "x%d", opnd->addr.offset.regno));
4577 else
4578 snprintf (buf, size, "[%s], %s",
4579 style_reg (styler, name),
4580 style_imm (styler, "#%d", opnd->addr.offset.imm));
4581 }
4582 else
4583 snprintf (buf, size, "[%s]", style_reg (styler, name));
4584 break;
4585
4586 case AARCH64_OPND_ADDR_REGOFF:
4587 case AARCH64_OPND_SVE_ADDR_R:
4588 case AARCH64_OPND_SVE_ADDR_RR:
4589 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
4590 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
4591 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
4592 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
4593 case AARCH64_OPND_SVE_ADDR_RX:
4594 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
4595 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
4596 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
4597 print_register_offset_address
4598 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4599 get_offset_int_reg_name (opnd), styler);
4600 break;
4601
4602 case AARCH64_OPND_SVE_ADDR_ZX:
4603 print_register_offset_address
4604 (buf, size, opnd,
4605 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4606 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
4607 break;
4608
4609 case AARCH64_OPND_SVE_ADDR_RZ:
4610 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
4611 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
4612 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
4613 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4614 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4615 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4616 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4617 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4618 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4619 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4620 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4621 print_register_offset_address
4622 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4623 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4624 styler);
4625 break;
4626
4627 case AARCH64_OPND_ADDR_SIMM7:
4628 case AARCH64_OPND_ADDR_SIMM9:
4629 case AARCH64_OPND_ADDR_SIMM9_2:
4630 case AARCH64_OPND_ADDR_SIMM10:
4631 case AARCH64_OPND_ADDR_SIMM11:
4632 case AARCH64_OPND_ADDR_SIMM13:
4633 case AARCH64_OPND_RCPC3_ADDR_OFFSET:
4634 case AARCH64_OPND_ADDR_OFFSET:
4635 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
4636 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
4637 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
4638 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
4639 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4640 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4641 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4642 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4643 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4644 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4645 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4646 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4647 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4648 case AARCH64_OPND_SVE_ADDR_RI_U6:
4649 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4650 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4651 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4652 print_immediate_offset_address
4653 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4654 styler);
4655 break;
4656
4657 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4658 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4659 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4660 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4661 print_immediate_offset_address
4662 (buf, size, opnd,
4663 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4664 styler);
4665 break;
4666
4667 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4668 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4669 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4670 print_register_offset_address
4671 (buf, size, opnd,
4672 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4673 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4674 styler);
4675 break;
4676
4677 case AARCH64_OPND_ADDR_UIMM12:
4678 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4679 if (opnd->addr.offset.imm)
4680 snprintf (buf, size, "[%s, %s]",
4681 style_reg (styler, name),
4682 style_imm (styler, "#%d", opnd->addr.offset.imm));
4683 else
4684 snprintf (buf, size, "[%s]", style_reg (styler, name));
4685 break;
4686
4687 case AARCH64_OPND_SYSREG:
4688 case AARCH64_OPND_SYSREG128:
4689 for (i = 0; aarch64_sys_regs[i].name; ++i)
4690 {
4691 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4692
4693 bool exact_match
4694 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4695 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4696 && AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features);
4697
4698 /* Try and find an exact match, But if that fails, return the first
4699 partial match that was found. */
4700 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4701 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4702 && ! aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags)
4703 && (name == NULL || exact_match))
4704 {
4705 name = aarch64_sys_regs[i].name;
4706 if (exact_match)
4707 {
4708 if (notes)
4709 *notes = NULL;
4710 break;
4711 }
4712
4713 /* If we didn't match exactly, that means the presense of a flag
4714 indicates what we didn't want for this instruction. e.g. If
4715 F_REG_READ is there, that means we were looking for a write
4716 register. See aarch64_ext_sysreg. */
4717 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4718 *notes = _("reading from a write-only register");
4719 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4720 *notes = _("writing to a read-only register");
4721 }
4722 }
4723
4724 if (name)
4725 snprintf (buf, size, "%s", style_reg (styler, name));
4726 else
4727 {
4728 /* Implementation defined system register. */
4729 unsigned int value = opnd->sysreg.value;
4730 snprintf (buf, size, "%s",
4731 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4732 (value >> 14) & 0x3, (value >> 11) & 0x7,
4733 (value >> 7) & 0xf, (value >> 3) & 0xf,
4734 value & 0x7));
4735 }
4736 break;
4737
4738 case AARCH64_OPND_PSTATEFIELD:
4739 for (i = 0; aarch64_pstatefields[i].name; ++i)
4740 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4741 {
4742 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4743 SVCRZA and SVCRSMZA. */
4744 uint32_t flags = aarch64_pstatefields[i].flags;
4745 if (flags & F_REG_IN_CRM
4746 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4747 != PSTATE_DECODE_CRM (flags)))
4748 continue;
4749 break;
4750 }
4751 assert (aarch64_pstatefields[i].name);
4752 snprintf (buf, size, "%s",
4753 style_reg (styler, aarch64_pstatefields[i].name));
4754 break;
4755
4756 case AARCH64_OPND_SYSREG_AT:
4757 case AARCH64_OPND_SYSREG_DC:
4758 case AARCH64_OPND_SYSREG_IC:
4759 case AARCH64_OPND_SYSREG_TLBI:
4760 case AARCH64_OPND_SYSREG_TLBIP:
4761 case AARCH64_OPND_SYSREG_SR:
4762 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4763 break;
4764
4765 case AARCH64_OPND_BARRIER:
4766 case AARCH64_OPND_BARRIER_DSB_NXS:
4767 {
4768 if (opnd->barrier->name[0] == '#')
4769 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4770 else
4771 snprintf (buf, size, "%s",
4772 style_sub_mnem (styler, opnd->barrier->name));
4773 }
4774 break;
4775
4776 case AARCH64_OPND_BARRIER_ISB:
4777 /* Operand can be omitted, e.g. in DCPS1. */
4778 if (! optional_operand_p (opcode, idx)
4779 || (opnd->barrier->value
4780 != get_optional_operand_default_value (opcode)))
4781 snprintf (buf, size, "%s",
4782 style_imm (styler, "#0x%x", opnd->barrier->value));
4783 break;
4784
4785 case AARCH64_OPND_PRFOP:
4786 if (opnd->prfop->name != NULL)
4787 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4788 else
4789 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4790 opnd->prfop->value));
4791 break;
4792
4793 case AARCH64_OPND_RPRFMOP:
4794 enum_value = opnd->imm.value;
4795 if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
4796 && aarch64_rprfmop_array[enum_value])
4797 snprintf (buf, size, "%s",
4798 style_reg (styler, aarch64_rprfmop_array[enum_value]));
4799 else
4800 snprintf (buf, size, "%s",
4801 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4802 break;
4803
4804 case AARCH64_OPND_BARRIER_PSB:
4805 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4806 break;
4807
4808 case AARCH64_OPND_X16:
4809 snprintf (buf, size, "%s", style_reg (styler, "x16"));
4810 break;
4811
4812 case AARCH64_OPND_SME_ZT0:
4813 snprintf (buf, size, "%s", style_reg (styler, "zt0"));
4814 break;
4815
4816 case AARCH64_OPND_SME_ZT0_INDEX:
4817 snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
4818 style_imm (styler, "%d", (int) opnd->imm.value));
4819 break;
4820
4821 case AARCH64_OPND_SME_ZT0_LIST:
4822 snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
4823 break;
4824
4825 case AARCH64_OPND_BARRIER_GCSB:
4826 snprintf (buf, size, "%s", style_sub_mnem (styler, "dsync"));
4827 break;
4828
4829 case AARCH64_OPND_BTI_TARGET:
4830 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4831 snprintf (buf, size, "%s",
4832 style_sub_mnem (styler, opnd->hint_option->name));
4833 break;
4834
4835 case AARCH64_OPND_MOPS_ADDR_Rd:
4836 case AARCH64_OPND_MOPS_ADDR_Rs:
4837 snprintf (buf, size, "[%s]!",
4838 style_reg (styler,
4839 get_int_reg_name (opnd->reg.regno,
4840 AARCH64_OPND_QLF_X, 0)));
4841 break;
4842
4843 case AARCH64_OPND_MOPS_WB_Rn:
4844 snprintf (buf, size, "%s!",
4845 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4846 AARCH64_OPND_QLF_X, 0)));
4847 break;
4848
4849 default:
4850 snprintf (buf, size, "<invalid>");
4851 break;
4852 }
4853 }
4854
4855 #define CPENC(op0,op1,crn,crm,op2) \
4857 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4858 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4859 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4860 /* for 3.9.10 System Instructions */
4861 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4862
4863 #define C0 0
4864 #define C1 1
4865 #define C2 2
4866 #define C3 3
4867 #define C4 4
4868 #define C5 5
4869 #define C6 6
4870 #define C7 7
4871 #define C8 8
4872 #define C9 9
4873 #define C10 10
4874 #define C11 11
4875 #define C12 12
4876 #define C13 13
4877 #define C14 14
4878 #define C15 15
4879
4880 /* TODO there is one more issues need to be resolved
4881 1. handle cpu-implementation-defined system registers.
4882
4883 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4884 respectively. If neither of these are set then the register is read-write. */
4885 const aarch64_sys_reg aarch64_sys_regs [] =
4886 {
4887 #define SYSREG(name, encoding, flags, features) \
4888 { name, encoding, flags, features },
4889 #include "aarch64-sys-regs.def"
4890 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }
4891 #undef SYSREG
4892 };
4893
4894 bool
4895 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4896 {
4897 return (reg_flags & F_DEPRECATED) != 0;
4898 }
4899
4900 bool
4901 aarch64_sys_reg_128bit_p (const uint32_t reg_flags)
4902 {
4903 return (reg_flags & F_REG_128) != 0;
4904 }
4905
4906 bool
4907 aarch64_sys_reg_alias_p (const uint32_t reg_flags)
4908 {
4909 return (reg_flags & F_REG_ALIAS) != 0;
4910 }
4911
4912 /* The CPENC below is fairly misleading, the fields
4913 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4914 by ins_pstatefield, which just shifts the value by the width of the fields
4915 in a loop. So if you CPENC them only the first value will be set, the rest
4916 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4917 value of 0b110000000001000000 (0x30040) while what you want is
4918 0b011010 (0x1a). */
4919 const aarch64_sys_reg aarch64_pstatefields [] =
4920 {
4921 { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES },
4922 { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4923 { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4924 { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (PAN) },
4925 { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
4926 { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (SSBS) },
4927 { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_4A) },
4928 { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4929 { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
4930 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4931 { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
4932 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4933 { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
4934 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4935 { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_8A) },
4936 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES },
4937 };
4938
4939 bool
4940 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4941 const aarch64_sys_reg *reg)
4942 {
4943 if (!(reg->flags & F_ARCHEXT))
4944 return true;
4945
4946 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
4947 }
4948
4949 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4950 {
4951 { "ialluis", CPENS(0,C7,C1,0), 0, AARCH64_NO_FEATURES },
4952 { "iallu", CPENS(0,C7,C5,0), 0, AARCH64_NO_FEATURES },
4953 { "ivau", CPENS (3, C7, C5, 1), F_HASXT, AARCH64_NO_FEATURES },
4954 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
4955 };
4956
4957 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4958 {
4959 { "zva", CPENS (3, C7, C4, 1), F_HASXT, AARCH64_NO_FEATURES },
4960 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4961 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4962 { "ivac", CPENS (0, C7, C6, 1), F_HASXT, AARCH64_NO_FEATURES },
4963 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4964 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4965 { "isw", CPENS (0, C7, C6, 2), F_HASXT, AARCH64_NO_FEATURES },
4966 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4967 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4968 { "cvac", CPENS (3, C7, C10, 1), F_HASXT, AARCH64_NO_FEATURES },
4969 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4970 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4971 { "csw", CPENS (0, C7, C10, 2), F_HASXT, AARCH64_NO_FEATURES },
4972 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4973 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4974 { "cvau", CPENS (3, C7, C11, 1), F_HASXT, AARCH64_NO_FEATURES },
4975 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
4976 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4977 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4978 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (CVADP) },
4979 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4980 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4981 { "civac", CPENS (3, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
4982 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4983 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4984 { "cisw", CPENS (0, C7, C14, 2), F_HASXT, AARCH64_NO_FEATURES },
4985 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4986 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4987 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
4988 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT, AARCH64_NO_FEATURES },
4989 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
4990 };
4991
4992 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4993 {
4994 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
4995 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
4996 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT, AARCH64_NO_FEATURES },
4997 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT, AARCH64_NO_FEATURES },
4998 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT, AARCH64_NO_FEATURES },
4999 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT, AARCH64_NO_FEATURES },
5000 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT, AARCH64_NO_FEATURES },
5001 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT, AARCH64_NO_FEATURES },
5002 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
5003 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
5004 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
5005 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
5006 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
5007 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
5008 { "s1e1a", CPENS (0, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5009 { "s1e2a", CPENS (4, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5010 { "s1e3a", CPENS (6, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5011 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5012 };
5013
5014 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5015 {
5016 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT, AARCH64_NO_FEATURES },
5017 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT, AARCH64_NO_FEATURES },
5018 { "paallos", CPENS (6, C8, C1, 4), 0, AARCH64_NO_FEATURES },
5019 { "paall", CPENS (6, C8, C7, 4), 0, AARCH64_NO_FEATURES },
5020
5021 #define TLBI_XS_OP(OP, CODE, FLAGS) \
5022 { OP, CODE, FLAGS, AARCH64_NO_FEATURES }, \
5023 { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
5024
5025 TLBI_XS_OP ( "vmalle1", CPENS (0, C8, C7, 0), 0)
5026 TLBI_XS_OP ( "vae1", CPENS (0, C8, C7, 1), F_HASXT | F_REG_128)
5027 TLBI_XS_OP ( "aside1", CPENS (0, C8, C7, 2), F_HASXT )
5028 TLBI_XS_OP ( "vaae1", CPENS (0, C8, C7, 3), F_HASXT | F_REG_128)
5029 TLBI_XS_OP ( "vmalle1is", CPENS (0, C8, C3, 0), 0)
5030 TLBI_XS_OP ( "vae1is", CPENS (0, C8, C3, 1), F_HASXT | F_REG_128)
5031 TLBI_XS_OP ( "aside1is", CPENS (0, C8, C3, 2), F_HASXT )
5032 TLBI_XS_OP ( "vaae1is", CPENS (0, C8, C3, 3), F_HASXT | F_REG_128)
5033 TLBI_XS_OP ( "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT | F_REG_128)
5034 TLBI_XS_OP ( "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT | F_REG_128)
5035 TLBI_XS_OP ( "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT | F_REG_128)
5036 TLBI_XS_OP ( "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT | F_REG_128)
5037 TLBI_XS_OP ( "vae2", CPENS (4, C8, C7, 1), F_HASXT | F_REG_128)
5038 TLBI_XS_OP ( "vae2is", CPENS (4, C8, C3, 1), F_HASXT | F_REG_128)
5039 TLBI_XS_OP ( "vmalls12e1",CPENS (4, C8, C7, 6), 0)
5040 TLBI_XS_OP ( "vmalls12e1is",CPENS(4,C8, C3, 6), 0)
5041 TLBI_XS_OP ( "vae3", CPENS (6, C8, C7, 1), F_HASXT | F_REG_128)
5042 TLBI_XS_OP ( "vae3is", CPENS (6, C8, C3, 1), F_HASXT | F_REG_128)
5043 TLBI_XS_OP ( "alle2", CPENS (4, C8, C7, 0), 0)
5044 TLBI_XS_OP ( "alle2is", CPENS (4, C8, C3, 0), 0)
5045 TLBI_XS_OP ( "alle1", CPENS (4, C8, C7, 4), 0)
5046 TLBI_XS_OP ( "alle1is", CPENS (4, C8, C3, 4), 0)
5047 TLBI_XS_OP ( "alle3", CPENS (6, C8, C7, 0), 0)
5048 TLBI_XS_OP ( "alle3is", CPENS (6, C8, C3, 0), 0)
5049 TLBI_XS_OP ( "vale1is", CPENS (0, C8, C3, 5), F_HASXT | F_REG_128)
5050 TLBI_XS_OP ( "vale2is", CPENS (4, C8, C3, 5), F_HASXT | F_REG_128)
5051 TLBI_XS_OP ( "vale3is", CPENS (6, C8, C3, 5), F_HASXT | F_REG_128)
5052 TLBI_XS_OP ( "vaale1is", CPENS (0, C8, C3, 7), F_HASXT | F_REG_128)
5053 TLBI_XS_OP ( "vale1", CPENS (0, C8, C7, 5), F_HASXT | F_REG_128)
5054 TLBI_XS_OP ( "vale2", CPENS (4, C8, C7, 5), F_HASXT | F_REG_128)
5055 TLBI_XS_OP ( "vale3", CPENS (6, C8, C7, 5), F_HASXT | F_REG_128)
5056 TLBI_XS_OP ( "vaale1", CPENS (0, C8, C7, 7), F_HASXT | F_REG_128)
5057
5058 #undef TLBI_XS_OP
5059 #define TLBI_XS_OP(OP, CODE, FLAGS) \
5060 { OP, CODE, FLAGS | F_ARCHEXT, AARCH64_FEATURE (V8_4A) }, \
5061 { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
5062
5063 TLBI_XS_OP ( "vmalle1os", CPENS (0, C8, C1, 0), 0 )
5064 TLBI_XS_OP ( "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_REG_128 )
5065 TLBI_XS_OP ( "aside1os", CPENS (0, C8, C1, 2), F_HASXT )
5066 TLBI_XS_OP ( "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_REG_128 )
5067 TLBI_XS_OP ( "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_REG_128 )
5068 TLBI_XS_OP ( "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_REG_128 )
5069 TLBI_XS_OP ( "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_REG_128 )
5070 TLBI_XS_OP ( "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_REG_128 )
5071 TLBI_XS_OP ( "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_REG_128 )
5072 TLBI_XS_OP ( "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_REG_128 )
5073 TLBI_XS_OP ( "vmalls12e1os", CPENS (4, C8, C1, 6), 0 )
5074 TLBI_XS_OP ( "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_REG_128 )
5075 TLBI_XS_OP ( "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_REG_128 )
5076 TLBI_XS_OP ( "alle2os", CPENS (4, C8, C1, 0), 0 )
5077 TLBI_XS_OP ( "alle1os", CPENS (4, C8, C1, 4), 0 )
5078 TLBI_XS_OP ( "alle3os", CPENS (6, C8, C1, 0), 0 )
5079
5080 TLBI_XS_OP ( "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_REG_128 )
5081 TLBI_XS_OP ( "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_REG_128 )
5082 TLBI_XS_OP ( "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_REG_128 )
5083 TLBI_XS_OP ( "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_REG_128 )
5084 TLBI_XS_OP ( "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_REG_128 )
5085 TLBI_XS_OP ( "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_REG_128 )
5086 TLBI_XS_OP ( "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_REG_128 )
5087 TLBI_XS_OP ( "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_REG_128 )
5088 TLBI_XS_OP ( "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_REG_128 )
5089 TLBI_XS_OP ( "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_REG_128 )
5090 TLBI_XS_OP ( "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_REG_128 )
5091 TLBI_XS_OP ( "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_REG_128 )
5092 TLBI_XS_OP ( "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_REG_128 )
5093 TLBI_XS_OP ( "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_REG_128 )
5094 TLBI_XS_OP ( "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_REG_128 )
5095 TLBI_XS_OP ( "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_REG_128 )
5096 TLBI_XS_OP ( "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_REG_128 )
5097 TLBI_XS_OP ( "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_REG_128 )
5098 TLBI_XS_OP ( "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_REG_128 )
5099 TLBI_XS_OP ( "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_REG_128 )
5100 TLBI_XS_OP ( "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_REG_128 )
5101 TLBI_XS_OP ( "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_REG_128 )
5102 TLBI_XS_OP ( "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_REG_128 )
5103 TLBI_XS_OP ( "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_REG_128 )
5104 TLBI_XS_OP ( "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_REG_128 )
5105 TLBI_XS_OP ( "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_REG_128 )
5106 TLBI_XS_OP ( "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_REG_128 )
5107 TLBI_XS_OP ( "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_REG_128 )
5108 TLBI_XS_OP ( "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_REG_128 )
5109 TLBI_XS_OP ( "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_REG_128 )
5110
5111 #undef TLBI_XS_OP
5112
5113 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5114 };
5115
5116 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5117 {
5118 /* RCTX is somewhat unique in a way that it has different values
5119 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5120 Thus op2 is masked out and instead encoded directly in the
5121 aarch64_opcode_table entries for the respective instructions. */
5122 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE, AARCH64_FEATURE (PREDRES) }, /* WO */
5123 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5124 };
5125
5126 bool
5127 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5128 {
5129 return (sys_ins_reg->flags & F_HASXT) != 0;
5130 }
5131
5132 extern bool
5133 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5134 const char *reg_name,
5135 uint32_t reg_flags,
5136 const aarch64_feature_set *reg_features)
5137 {
5138 /* Armv8-R has no EL3. */
5139 if (AARCH64_CPU_HAS_FEATURE (features, V8R))
5140 {
5141 const char *suffix = strrchr (reg_name, '_');
5142 if (suffix && !strcmp (suffix, "_el3"))
5143 return false;
5144 }
5145
5146 if (!(reg_flags & F_ARCHEXT))
5147 return true;
5148
5149 return AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features);
5150 }
5151
5152 #undef C0
5153 #undef C1
5154 #undef C2
5155 #undef C3
5156 #undef C4
5157 #undef C5
5158 #undef C6
5159 #undef C7
5160 #undef C8
5161 #undef C9
5162 #undef C10
5163 #undef C11
5164 #undef C12
5165 #undef C13
5166 #undef C14
5167 #undef C15
5168
5169 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5170 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5171
5172 static enum err_type
5173 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5174 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5175 bool encoding ATTRIBUTE_UNUSED,
5176 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5177 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5178 {
5179 int t = BITS (insn, 4, 0);
5180 int n = BITS (insn, 9, 5);
5181 int t2 = BITS (insn, 14, 10);
5182
5183 if (BIT (insn, 23))
5184 {
5185 /* Write back enabled. */
5186 if ((t == n || t2 == n) && n != 31)
5187 return ERR_UND;
5188 }
5189
5190 if (BIT (insn, 22))
5191 {
5192 /* Load */
5193 if (t == t2)
5194 return ERR_UND;
5195 }
5196
5197 return ERR_OK;
5198 }
5199
5200 /* Verifier for vector by element 3 operands functions where the
5201 conditions `if sz:L == 11 then UNDEFINED` holds. */
5202
5203 static enum err_type
5204 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5205 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5206 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5207 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5208 {
5209 const aarch64_insn undef_pattern = 0x3;
5210 aarch64_insn value;
5211
5212 assert (inst->opcode);
5213 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5214 value = encoding ? inst->value : insn;
5215 assert (value);
5216
5217 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5218 return ERR_UND;
5219
5220 return ERR_OK;
5221 }
5222
5223 /* Check an instruction that takes three register operands and that
5224 requires the register numbers to be distinct from one another. */
5225
5226 static enum err_type
5227 verify_three_different_regs (const struct aarch64_inst *inst,
5228 const aarch64_insn insn ATTRIBUTE_UNUSED,
5229 bfd_vma pc ATTRIBUTE_UNUSED,
5230 bool encoding ATTRIBUTE_UNUSED,
5231 aarch64_operand_error *mismatch_detail
5232 ATTRIBUTE_UNUSED,
5233 aarch64_instr_sequence *insn_sequence
5234 ATTRIBUTE_UNUSED)
5235 {
5236 int rd, rs, rn;
5237
5238 rd = inst->operands[0].reg.regno;
5239 rs = inst->operands[1].reg.regno;
5240 rn = inst->operands[2].reg.regno;
5241 if (rd == rs || rd == rn || rs == rn)
5242 {
5243 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5244 mismatch_detail->error
5245 = _("the three register operands must be distinct from one another");
5246 mismatch_detail->index = -1;
5247 return ERR_UND;
5248 }
5249
5250 return ERR_OK;
5251 }
5252
5253 /* Add INST to the end of INSN_SEQUENCE. */
5254
5255 static void
5256 add_insn_to_sequence (const struct aarch64_inst *inst,
5257 aarch64_instr_sequence *insn_sequence)
5258 {
5259 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5260 }
5261
5262 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5263 If INST is NULL the given insn_sequence is cleared and the sequence is left
5264 uninitialized. */
5265
5266 void
5267 init_insn_sequence (const struct aarch64_inst *inst,
5268 aarch64_instr_sequence *insn_sequence)
5269 {
5270 int num_req_entries = 0;
5271
5272 if (insn_sequence->instr)
5273 {
5274 XDELETE (insn_sequence->instr);
5275 insn_sequence->instr = NULL;
5276 }
5277
5278 /* Handle all the cases here. May need to think of something smarter than
5279 a giant if/else chain if this grows. At that time, a lookup table may be
5280 best. */
5281 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5282 num_req_entries = 1;
5283 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5284 num_req_entries = 2;
5285
5286 insn_sequence->num_added_insns = 0;
5287 insn_sequence->num_allocated_insns = num_req_entries;
5288
5289 if (num_req_entries != 0)
5290 {
5291 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5292 add_insn_to_sequence (inst, insn_sequence);
5293 }
5294 }
5295
5296 /* Subroutine of verify_constraints. Check whether the instruction
5297 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5298 expectations are met. Return true if the check passes, otherwise
5299 describe the problem in MISMATCH_DETAIL.
5300
5301 IS_NEW_SECTION is true if INST is assumed to start a new section.
5302 The other arguments are as for verify_constraints. */
5303
5304 static bool
5305 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5306 bool is_new_section,
5307 aarch64_operand_error *mismatch_detail,
5308 aarch64_instr_sequence *insn_sequence)
5309 {
5310 const struct aarch64_opcode *opcode;
5311 const struct aarch64_inst *prev_insn;
5312 int i;
5313
5314 opcode = inst->opcode;
5315 if (insn_sequence->instr)
5316 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5317 else
5318 prev_insn = NULL;
5319
5320 if (prev_insn
5321 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5322 && prev_insn->opcode != opcode - 1)
5323 {
5324 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5325 mismatch_detail->error = NULL;
5326 mismatch_detail->index = -1;
5327 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5328 mismatch_detail->data[1].s = prev_insn->opcode->name;
5329 mismatch_detail->non_fatal = true;
5330 return false;
5331 }
5332
5333 if (opcode->constraints & C_SCAN_MOPS_PME)
5334 {
5335 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5336 {
5337 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5338 mismatch_detail->error = NULL;
5339 mismatch_detail->index = -1;
5340 mismatch_detail->data[0].s = opcode->name;
5341 mismatch_detail->data[1].s = opcode[-1].name;
5342 mismatch_detail->non_fatal = true;
5343 return false;
5344 }
5345
5346 for (i = 0; i < 3; ++i)
5347 /* There's no specific requirement for the data register to be
5348 the same between consecutive SET* instructions. */
5349 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5350 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5351 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5352 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5353 {
5354 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5355 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5356 mismatch_detail->error = _("destination register differs from "
5357 "preceding instruction");
5358 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5359 mismatch_detail->error = _("source register differs from "
5360 "preceding instruction");
5361 else
5362 mismatch_detail->error = _("size register differs from "
5363 "preceding instruction");
5364 mismatch_detail->index = i;
5365 mismatch_detail->non_fatal = true;
5366 return false;
5367 }
5368 }
5369
5370 return true;
5371 }
5372
5373 /* This function verifies that the instruction INST adheres to its specified
5374 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5375 returned and MISMATCH_DETAIL contains the reason why verification failed.
5376
5377 The function is called both during assembly and disassembly. If assembling
5378 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5379 and will contain the PC of the current instruction w.r.t to the section.
5380
5381 If ENCODING and PC=0 then you are at a start of a section. The constraints
5382 are verified against the given state insn_sequence which is updated as it
5383 transitions through the verification. */
5384
5385 enum err_type
5386 verify_constraints (const struct aarch64_inst *inst,
5387 const aarch64_insn insn ATTRIBUTE_UNUSED,
5388 bfd_vma pc,
5389 bool encoding,
5390 aarch64_operand_error *mismatch_detail,
5391 aarch64_instr_sequence *insn_sequence)
5392 {
5393 assert (inst);
5394 assert (inst->opcode);
5395
5396 const struct aarch64_opcode *opcode = inst->opcode;
5397 if (!opcode->constraints && !insn_sequence->instr)
5398 return ERR_OK;
5399
5400 assert (insn_sequence);
5401
5402 enum err_type res = ERR_OK;
5403
5404 /* This instruction puts a constraint on the insn_sequence. */
5405 if (opcode->flags & F_SCAN)
5406 {
5407 if (insn_sequence->instr)
5408 {
5409 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5410 mismatch_detail->error = _("instruction opens new dependency "
5411 "sequence without ending previous one");
5412 mismatch_detail->index = -1;
5413 mismatch_detail->non_fatal = true;
5414 res = ERR_VFI;
5415 }
5416
5417 init_insn_sequence (inst, insn_sequence);
5418 return res;
5419 }
5420
5421 bool is_new_section = (!encoding && pc == 0);
5422 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5423 insn_sequence))
5424 {
5425 res = ERR_VFI;
5426 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5427 init_insn_sequence (NULL, insn_sequence);
5428 }
5429
5430 /* Verify constraints on an existing sequence. */
5431 if (insn_sequence->instr)
5432 {
5433 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5434 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5435 closed a previous one that we should have. */
5436 if (is_new_section && res == ERR_OK)
5437 {
5438 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5439 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5440 mismatch_detail->index = -1;
5441 mismatch_detail->non_fatal = true;
5442 res = ERR_VFI;
5443 /* Reset the sequence. */
5444 init_insn_sequence (NULL, insn_sequence);
5445 return res;
5446 }
5447
5448 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5449 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5450 {
5451 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5452 instruction for better error messages. */
5453 if (!opcode->avariant
5454 || (!AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
5455 && !AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2)))
5456 {
5457 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5458 mismatch_detail->error = _("SVE instruction expected after "
5459 "`movprfx'");
5460 mismatch_detail->index = -1;
5461 mismatch_detail->non_fatal = true;
5462 res = ERR_VFI;
5463 goto done;
5464 }
5465
5466 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5467 instruction that is allowed to be used with a MOVPRFX. */
5468 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5469 {
5470 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5471 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5472 "expected");
5473 mismatch_detail->index = -1;
5474 mismatch_detail->non_fatal = true;
5475 res = ERR_VFI;
5476 goto done;
5477 }
5478
5479 /* Next check for usage of the predicate register. */
5480 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5481 aarch64_opnd_info blk_pred, inst_pred;
5482 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5483 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5484 bool predicated = false;
5485 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5486
5487 /* Determine if the movprfx instruction used is predicated or not. */
5488 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5489 {
5490 predicated = true;
5491 blk_pred = insn_sequence->instr->operands[1];
5492 }
5493
5494 unsigned char max_elem_size = 0;
5495 unsigned char current_elem_size;
5496 int num_op_used = 0, last_op_usage = 0;
5497 int i, inst_pred_idx = -1;
5498 int num_ops = aarch64_num_of_operands (opcode);
5499 for (i = 0; i < num_ops; i++)
5500 {
5501 aarch64_opnd_info inst_op = inst->operands[i];
5502 switch (inst_op.type)
5503 {
5504 case AARCH64_OPND_SVE_Zd:
5505 case AARCH64_OPND_SVE_Zm_5:
5506 case AARCH64_OPND_SVE_Zm_16:
5507 case AARCH64_OPND_SVE_Zn:
5508 case AARCH64_OPND_SVE_Zt:
5509 case AARCH64_OPND_SVE_Vm:
5510 case AARCH64_OPND_SVE_Vn:
5511 case AARCH64_OPND_Va:
5512 case AARCH64_OPND_Vn:
5513 case AARCH64_OPND_Vm:
5514 case AARCH64_OPND_Sn:
5515 case AARCH64_OPND_Sm:
5516 if (inst_op.reg.regno == blk_dest.reg.regno)
5517 {
5518 num_op_used++;
5519 last_op_usage = i;
5520 }
5521 current_elem_size
5522 = aarch64_get_qualifier_esize (inst_op.qualifier);
5523 if (current_elem_size > max_elem_size)
5524 max_elem_size = current_elem_size;
5525 break;
5526 case AARCH64_OPND_SVE_Pd:
5527 case AARCH64_OPND_SVE_Pg3:
5528 case AARCH64_OPND_SVE_Pg4_5:
5529 case AARCH64_OPND_SVE_Pg4_10:
5530 case AARCH64_OPND_SVE_Pg4_16:
5531 case AARCH64_OPND_SVE_Pm:
5532 case AARCH64_OPND_SVE_Pn:
5533 case AARCH64_OPND_SVE_Pt:
5534 case AARCH64_OPND_SME_Pm:
5535 inst_pred = inst_op;
5536 inst_pred_idx = i;
5537 break;
5538 default:
5539 break;
5540 }
5541 }
5542
5543 assert (max_elem_size != 0);
5544 aarch64_opnd_info inst_dest = inst->operands[0];
5545 /* Determine the size that should be used to compare against the
5546 movprfx size. */
5547 current_elem_size
5548 = opcode->constraints & C_MAX_ELEM
5549 ? max_elem_size
5550 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5551
5552 /* If movprfx is predicated do some extra checks. */
5553 if (predicated)
5554 {
5555 /* The instruction must be predicated. */
5556 if (inst_pred_idx < 0)
5557 {
5558 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5559 mismatch_detail->error = _("predicated instruction expected "
5560 "after `movprfx'");
5561 mismatch_detail->index = -1;
5562 mismatch_detail->non_fatal = true;
5563 res = ERR_VFI;
5564 goto done;
5565 }
5566
5567 /* The instruction must have a merging predicate. */
5568 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5569 {
5570 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5571 mismatch_detail->error = _("merging predicate expected due "
5572 "to preceding `movprfx'");
5573 mismatch_detail->index = inst_pred_idx;
5574 mismatch_detail->non_fatal = true;
5575 res = ERR_VFI;
5576 goto done;
5577 }
5578
5579 /* The same register must be used in instruction. */
5580 if (blk_pred.reg.regno != inst_pred.reg.regno)
5581 {
5582 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5583 mismatch_detail->error = _("predicate register differs "
5584 "from that in preceding "
5585 "`movprfx'");
5586 mismatch_detail->index = inst_pred_idx;
5587 mismatch_detail->non_fatal = true;
5588 res = ERR_VFI;
5589 goto done;
5590 }
5591 }
5592
5593 /* Destructive operations by definition must allow one usage of the
5594 same register. */
5595 int allowed_usage
5596 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5597
5598 /* Operand is not used at all. */
5599 if (num_op_used == 0)
5600 {
5601 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5602 mismatch_detail->error = _("output register of preceding "
5603 "`movprfx' not used in current "
5604 "instruction");
5605 mismatch_detail->index = 0;
5606 mismatch_detail->non_fatal = true;
5607 res = ERR_VFI;
5608 goto done;
5609 }
5610
5611 /* We now know it's used, now determine exactly where it's used. */
5612 if (blk_dest.reg.regno != inst_dest.reg.regno)
5613 {
5614 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5615 mismatch_detail->error = _("output register of preceding "
5616 "`movprfx' expected as output");
5617 mismatch_detail->index = 0;
5618 mismatch_detail->non_fatal = true;
5619 res = ERR_VFI;
5620 goto done;
5621 }
5622
5623 /* Operand used more than allowed for the specific opcode type. */
5624 if (num_op_used > allowed_usage)
5625 {
5626 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5627 mismatch_detail->error = _("output register of preceding "
5628 "`movprfx' used as input");
5629 mismatch_detail->index = last_op_usage;
5630 mismatch_detail->non_fatal = true;
5631 res = ERR_VFI;
5632 goto done;
5633 }
5634
5635 /* Now the only thing left is the qualifiers checks. The register
5636 must have the same maximum element size. */
5637 if (inst_dest.qualifier
5638 && blk_dest.qualifier
5639 && current_elem_size
5640 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5641 {
5642 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5643 mismatch_detail->error = _("register size not compatible with "
5644 "previous `movprfx'");
5645 mismatch_detail->index = 0;
5646 mismatch_detail->non_fatal = true;
5647 res = ERR_VFI;
5648 goto done;
5649 }
5650 }
5651
5652 done:
5653 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
5654 /* We've checked the last instruction in the sequence and so
5655 don't need the sequence any more. */
5656 init_insn_sequence (NULL, insn_sequence);
5657 else
5658 add_insn_to_sequence (inst, insn_sequence);
5659 }
5660
5661 return res;
5662 }
5663
5664
5665 /* Return true if VALUE cannot be moved into an SVE register using DUP
5666 (with any element size, not just ESIZE) and if using DUPM would
5667 therefore be OK. ESIZE is the number of bytes in the immediate. */
5668
5669 bool
5670 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5671 {
5672 int64_t svalue = uvalue;
5673 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5674
5675 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5676 return false;
5677 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5678 {
5679 svalue = (int32_t) uvalue;
5680 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5681 {
5682 svalue = (int16_t) uvalue;
5683 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5684 return false;
5685 }
5686 }
5687 if ((svalue & 0xff) == 0)
5688 svalue /= 256;
5689 return svalue < -128 || svalue >= 128;
5690 }
5691
5692 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
5693 supports the instruction described by INST. */
5694
5695 bool
5696 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant,
5697 aarch64_inst *inst)
5698 {
5699 if (!inst->opcode->avariant
5700 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
5701 return false;
5702
5703 if (inst->opcode->iclass == sme_fp_sd
5704 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5705 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64))
5706 return false;
5707
5708 if (inst->opcode->iclass == sme_int_sd
5709 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5710 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64))
5711 return false;
5712
5713 return true;
5714 }
5715
5716 /* Include the opcode description table as well as the operand description
5717 table. */
5718 #define VERIFIER(x) verify_##x
5719 #include "aarch64-tbl.h"
5720