aarch64-sve-builtins-shapes.cc revision 1.1 1 1.1 mrg /* ACLE support for AArch64 SVE (function shapes)
2 1.1 mrg Copyright (C) 2018-2020 Free Software Foundation, Inc.
3 1.1 mrg
4 1.1 mrg This file is part of GCC.
5 1.1 mrg
6 1.1 mrg GCC is free software; you can redistribute it and/or modify it
7 1.1 mrg under the terms of the GNU General Public License as published by
8 1.1 mrg the Free Software Foundation; either version 3, or (at your option)
9 1.1 mrg any later version.
10 1.1 mrg
11 1.1 mrg GCC is distributed in the hope that it will be useful, but
12 1.1 mrg WITHOUT ANY WARRANTY; without even the implied warranty of
13 1.1 mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 1.1 mrg General Public License for more details.
15 1.1 mrg
16 1.1 mrg You should have received a copy of the GNU General Public License
17 1.1 mrg along with GCC; see the file COPYING3. If not see
18 1.1 mrg <http://www.gnu.org/licenses/>. */
19 1.1 mrg
20 1.1 mrg #include "config.h"
21 1.1 mrg #include "system.h"
22 1.1 mrg #include "coretypes.h"
23 1.1 mrg #include "tm.h"
24 1.1 mrg #include "tree.h"
25 1.1 mrg #include "rtl.h"
26 1.1 mrg #include "tm_p.h"
27 1.1 mrg #include "memmodel.h"
28 1.1 mrg #include "insn-codes.h"
29 1.1 mrg #include "optabs.h"
30 1.1 mrg #include "aarch64-sve-builtins.h"
31 1.1 mrg #include "aarch64-sve-builtins-shapes.h"
32 1.1 mrg
33 1.1 mrg /* In the comments below, _t0 represents the first type suffix and _t1
34 1.1 mrg represents the second. Square brackets enclose characters that are
35 1.1 mrg present in only the full name, not the overloaded name. Governing
36 1.1 mrg predicate arguments and predicate suffixes are not shown, since they
37 1.1 mrg depend on the predication type, which is a separate piece of
38 1.1 mrg information from the shape.
39 1.1 mrg
40 1.1 mrg Non-overloaded functions may have additional suffixes beyond the
41 1.1 mrg ones shown, if those suffixes don't affect the types in the type
42 1.1 mrg signature. E.g. the predicate form of svtrn1 has a _b<bits> suffix,
43 1.1 mrg but this does not affect the prototype, which is always
44 1.1 mrg "svbool_t(svbool_t, svbool_t)". */
45 1.1 mrg
46 1.1 mrg namespace aarch64_sve {
47 1.1 mrg
48 1.1 mrg /* Return a representation of "const T *". */
49 1.1 mrg static tree
50 1.1 mrg build_const_pointer (tree t)
51 1.1 mrg {
52 1.1 mrg return build_pointer_type (build_qualified_type (t, TYPE_QUAL_CONST));
53 1.1 mrg }
54 1.1 mrg
55 1.1 mrg /* If INSTANCE has a governing predicate, add it to the list of argument
56 1.1 mrg types in ARGUMENT_TYPES. RETURN_TYPE is the type returned by the
57 1.1 mrg function. */
58 1.1 mrg static void
59 1.1 mrg apply_predication (const function_instance &instance, tree return_type,
60 1.1 mrg vec<tree> &argument_types)
61 1.1 mrg {
62 1.1 mrg if (instance.pred != PRED_none)
63 1.1 mrg {
64 1.1 mrg argument_types.quick_insert (0, get_svbool_t ());
65 1.1 mrg /* For unary merge operations, the first argument is a vector with
66 1.1 mrg the same type as the result. For unary_convert_narrowt it also
67 1.1 mrg provides the "bottom" half of active elements, and is present
68 1.1 mrg for all types of predication. */
69 1.1 mrg if ((argument_types.length () == 2 && instance.pred == PRED_m)
70 1.1 mrg || instance.shape == shapes::unary_convert_narrowt)
71 1.1 mrg argument_types.quick_insert (0, return_type);
72 1.1 mrg }
73 1.1 mrg }
74 1.1 mrg
75 1.1 mrg /* Parse and move past an element type in FORMAT and return it as a type
76 1.1 mrg suffix. The format is:
77 1.1 mrg
78 1.1 mrg [01] - the element type in type suffix 0 or 1 of INSTANCE
79 1.1 mrg f<bits> - a floating-point type with the given number of bits
80 1.1 mrg f[01] - a floating-point type with the same width as type suffix 0 or 1
81 1.1 mrg B - bfloat16_t
82 1.1 mrg h<elt> - a half-sized version of <elt>
83 1.1 mrg p - a predicate (represented as TYPE_SUFFIX_b)
84 1.1 mrg q<elt> - a quarter-sized version of <elt>
85 1.1 mrg s<bits> - a signed type with the given number of bits
86 1.1 mrg s[01] - a signed type with the same width as type suffix 0 or 1
87 1.1 mrg u<bits> - an unsigned type with the given number of bits
88 1.1 mrg u[01] - an unsigned type with the same width as type suffix 0 or 1
89 1.1 mrg w<elt> - a 64-bit version of <elt> if <elt> is integral, otherwise <elt>
90 1.1 mrg
91 1.1 mrg where <elt> is another element type. */
92 1.1 mrg static type_suffix_index
93 1.1 mrg parse_element_type (const function_instance &instance, const char *&format)
94 1.1 mrg {
95 1.1 mrg int ch = *format++;
96 1.1 mrg
97 1.1 mrg if (ch == 'f' || ch == 's' || ch == 'u')
98 1.1 mrg {
99 1.1 mrg type_class_index tclass = (ch == 'f' ? TYPE_float
100 1.1 mrg : ch == 's' ? TYPE_signed
101 1.1 mrg : TYPE_unsigned);
102 1.1 mrg char *end;
103 1.1 mrg unsigned int bits = strtol (format, &end, 10);
104 1.1 mrg format = end;
105 1.1 mrg if (bits == 0 || bits == 1)
106 1.1 mrg bits = instance.type_suffix (bits).element_bits;
107 1.1 mrg return find_type_suffix (tclass, bits);
108 1.1 mrg }
109 1.1 mrg
110 1.1 mrg if (ch == 'w')
111 1.1 mrg {
112 1.1 mrg type_suffix_index suffix = parse_element_type (instance, format);
113 1.1 mrg if (type_suffixes[suffix].integer_p)
114 1.1 mrg return find_type_suffix (type_suffixes[suffix].tclass, 64);
115 1.1 mrg return suffix;
116 1.1 mrg }
117 1.1 mrg
118 1.1 mrg if (ch == 'p')
119 1.1 mrg return TYPE_SUFFIX_b;
120 1.1 mrg
121 1.1 mrg if (ch == 'B')
122 1.1 mrg return TYPE_SUFFIX_bf16;
123 1.1 mrg
124 1.1 mrg if (ch == 'q')
125 1.1 mrg {
126 1.1 mrg type_suffix_index suffix = parse_element_type (instance, format);
127 1.1 mrg return find_type_suffix (type_suffixes[suffix].tclass,
128 1.1 mrg type_suffixes[suffix].element_bits / 4);
129 1.1 mrg }
130 1.1 mrg
131 1.1 mrg if (ch == 'h')
132 1.1 mrg {
133 1.1 mrg type_suffix_index suffix = parse_element_type (instance, format);
134 1.1 mrg /* Widening and narrowing doesn't change the type for predicates;
135 1.1 mrg everything's still an svbool_t. */
136 1.1 mrg if (suffix == TYPE_SUFFIX_b)
137 1.1 mrg return suffix;
138 1.1 mrg return find_type_suffix (type_suffixes[suffix].tclass,
139 1.1 mrg type_suffixes[suffix].element_bits / 2);
140 1.1 mrg }
141 1.1 mrg
142 1.1 mrg if (ch == '0' || ch == '1')
143 1.1 mrg return instance.type_suffix_ids[ch - '0'];
144 1.1 mrg
145 1.1 mrg gcc_unreachable ();
146 1.1 mrg }
147 1.1 mrg
148 1.1 mrg /* Read and return a type from FORMAT for function INSTANCE. Advance
149 1.1 mrg FORMAT beyond the type string. The format is:
150 1.1 mrg
151 1.1 mrg _ - void
152 1.1 mrg al - array pointer for loads
153 1.1 mrg ap - array pointer for prefetches
154 1.1 mrg as - array pointer for stores
155 1.1 mrg b - base vector type (from a _<m0>base suffix)
156 1.1 mrg d - displacement vector type (from a _<m1>index or _<m1>offset suffix)
157 1.1 mrg e<name> - an enum with the given name
158 1.1 mrg s<elt> - a scalar type with the given element suffix
159 1.1 mrg t<elt> - a vector or tuple type with given element suffix [*1]
160 1.1 mrg v<elt> - a vector with the given element suffix
161 1.1 mrg
162 1.1 mrg where <elt> has the format described above parse_element_type
163 1.1 mrg
164 1.1 mrg [*1] the vectors_per_tuple function indicates whether the type should
165 1.1 mrg be a tuple, and if so, how many vectors it should contain. */
166 1.1 mrg static tree
167 1.1 mrg parse_type (const function_instance &instance, const char *&format)
168 1.1 mrg {
169 1.1 mrg int ch = *format++;
170 1.1 mrg
171 1.1 mrg if (ch == '_')
172 1.1 mrg return void_type_node;
173 1.1 mrg
174 1.1 mrg if (ch == 'a')
175 1.1 mrg {
176 1.1 mrg ch = *format++;
177 1.1 mrg if (ch == 'l')
178 1.1 mrg return build_const_pointer (instance.memory_scalar_type ());
179 1.1 mrg if (ch == 'p')
180 1.1 mrg return const_ptr_type_node;
181 1.1 mrg if (ch == 's')
182 1.1 mrg return build_pointer_type (instance.memory_scalar_type ());
183 1.1 mrg gcc_unreachable ();
184 1.1 mrg }
185 1.1 mrg
186 1.1 mrg if (ch == 'b')
187 1.1 mrg return instance.base_vector_type ();
188 1.1 mrg
189 1.1 mrg if (ch == 'd')
190 1.1 mrg return instance.displacement_vector_type ();
191 1.1 mrg
192 1.1 mrg if (ch == 'e')
193 1.1 mrg {
194 1.1 mrg if (strncmp (format, "pattern", 7) == 0)
195 1.1 mrg {
196 1.1 mrg format += 7;
197 1.1 mrg return acle_svpattern;
198 1.1 mrg }
199 1.1 mrg if (strncmp (format, "prfop", 5) == 0)
200 1.1 mrg {
201 1.1 mrg format += 5;
202 1.1 mrg return acle_svprfop;
203 1.1 mrg }
204 1.1 mrg gcc_unreachable ();
205 1.1 mrg }
206 1.1 mrg
207 1.1 mrg if (ch == 's')
208 1.1 mrg {
209 1.1 mrg type_suffix_index suffix = parse_element_type (instance, format);
210 1.1 mrg return scalar_types[type_suffixes[suffix].vector_type];
211 1.1 mrg }
212 1.1 mrg
213 1.1 mrg if (ch == 't')
214 1.1 mrg {
215 1.1 mrg type_suffix_index suffix = parse_element_type (instance, format);
216 1.1 mrg vector_type_index vector_type = type_suffixes[suffix].vector_type;
217 1.1 mrg unsigned int num_vectors = instance.vectors_per_tuple ();
218 1.1 mrg return acle_vector_types[num_vectors - 1][vector_type];
219 1.1 mrg }
220 1.1 mrg
221 1.1 mrg if (ch == 'v')
222 1.1 mrg {
223 1.1 mrg type_suffix_index suffix = parse_element_type (instance, format);
224 1.1 mrg return acle_vector_types[0][type_suffixes[suffix].vector_type];
225 1.1 mrg }
226 1.1 mrg
227 1.1 mrg gcc_unreachable ();
228 1.1 mrg }
229 1.1 mrg
230 1.1 mrg /* Read and move past any argument count at FORMAT for the function
231 1.1 mrg signature of INSTANCE. The counts are:
232 1.1 mrg
233 1.1 mrg *q: one argument per element in a 128-bit quadword (as for svdupq)
234 1.1 mrg *t: one argument per vector in a tuple (as for svcreate)
235 1.1 mrg
236 1.1 mrg Otherwise the count is 1. */
237 1.1 mrg static unsigned int
238 1.1 mrg parse_count (const function_instance &instance, const char *&format)
239 1.1 mrg {
240 1.1 mrg if (format[0] == '*' && format[1] == 'q')
241 1.1 mrg {
242 1.1 mrg format += 2;
243 1.1 mrg return instance.elements_per_vq (0);
244 1.1 mrg }
245 1.1 mrg if (format[0] == '*' && format[1] == 't')
246 1.1 mrg {
247 1.1 mrg format += 2;
248 1.1 mrg return instance.vectors_per_tuple ();
249 1.1 mrg }
250 1.1 mrg return 1;
251 1.1 mrg }
252 1.1 mrg
253 1.1 mrg /* Read a type signature for INSTANCE from FORMAT. Add the argument types
254 1.1 mrg to ARGUMENT_TYPES and return the return type.
255 1.1 mrg
256 1.1 mrg The format is a comma-separated list of types (as for parse_type),
257 1.1 mrg with the first type being the return type and the rest being the
258 1.1 mrg argument types. Each argument type can be followed by an optional
259 1.1 mrg count (as for parse_count). */
260 1.1 mrg static tree
261 1.1 mrg parse_signature (const function_instance &instance, const char *format,
262 1.1 mrg vec<tree> &argument_types)
263 1.1 mrg {
264 1.1 mrg tree return_type = parse_type (instance, format);
265 1.1 mrg while (format[0] == ',')
266 1.1 mrg {
267 1.1 mrg format += 1;
268 1.1 mrg tree argument_type = parse_type (instance, format);
269 1.1 mrg unsigned int count = parse_count (instance, format);
270 1.1 mrg for (unsigned int i = 0; i < count; ++i)
271 1.1 mrg argument_types.quick_push (argument_type);
272 1.1 mrg }
273 1.1 mrg gcc_assert (format[0] == 0);
274 1.1 mrg return return_type;
275 1.1 mrg }
276 1.1 mrg
277 1.1 mrg /* Add one function instance for GROUP, using mode suffix MODE_SUFFIX_ID,
278 1.1 mrg the type suffixes at index TI and the predication suffix at index PI.
279 1.1 mrg The other arguments are as for build_all. */
280 1.1 mrg static void
281 1.1 mrg build_one (function_builder &b, const char *signature,
282 1.1 mrg const function_group_info &group, mode_suffix_index mode_suffix_id,
283 1.1 mrg unsigned int ti, unsigned int pi, bool force_direct_overloads)
284 1.1 mrg {
285 1.1 mrg /* Byte forms of svdupq take 16 arguments. */
286 1.1 mrg auto_vec<tree, 16> argument_types;
287 1.1 mrg function_instance instance (group.base_name, *group.base, *group.shape,
288 1.1 mrg mode_suffix_id, group.types[ti],
289 1.1 mrg group.preds[pi]);
290 1.1 mrg tree return_type = parse_signature (instance, signature, argument_types);
291 1.1 mrg apply_predication (instance, return_type, argument_types);
292 1.1 mrg b.add_unique_function (instance, return_type, argument_types,
293 1.1 mrg group.required_extensions, force_direct_overloads);
294 1.1 mrg }
295 1.1 mrg
296 1.1 mrg /* GROUP describes some sort of gather or scatter operation. There are
297 1.1 mrg two cases:
298 1.1 mrg
299 1.1 mrg - If the function has any type suffixes (as for loads and stores), the
300 1.1 mrg first function type suffix specifies either a 32-bit or a 64-bit type,
301 1.1 mrg which in turn selects either MODE32 or MODE64 as the addressing mode.
302 1.1 mrg Add a function instance for every type and predicate combination
303 1.1 mrg in GROUP for which the associated addressing mode is not MODE_none.
304 1.1 mrg
305 1.1 mrg - If the function has no type suffixes (as for prefetches), add one
306 1.1 mrg MODE32 form and one MODE64 form for each predication type.
307 1.1 mrg
308 1.1 mrg The other arguments are as for build_all. */
309 1.1 mrg static void
310 1.1 mrg build_32_64 (function_builder &b, const char *signature,
311 1.1 mrg const function_group_info &group, mode_suffix_index mode32,
312 1.1 mrg mode_suffix_index mode64, bool force_direct_overloads = false)
313 1.1 mrg {
314 1.1 mrg for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
315 1.1 mrg if (group.types[0][0] == NUM_TYPE_SUFFIXES)
316 1.1 mrg {
317 1.1 mrg gcc_assert (mode32 != MODE_none && mode64 != MODE_none);
318 1.1 mrg build_one (b, signature, group, mode32, 0, pi,
319 1.1 mrg force_direct_overloads);
320 1.1 mrg build_one (b, signature, group, mode64, 0, pi,
321 1.1 mrg force_direct_overloads);
322 1.1 mrg }
323 1.1 mrg else
324 1.1 mrg for (unsigned int ti = 0; group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
325 1.1 mrg {
326 1.1 mrg unsigned int bits = type_suffixes[group.types[ti][0]].element_bits;
327 1.1 mrg gcc_assert (bits == 32 || bits == 64);
328 1.1 mrg mode_suffix_index mode = bits == 32 ? mode32 : mode64;
329 1.1 mrg if (mode != MODE_none)
330 1.1 mrg build_one (b, signature, group, mode, ti, pi,
331 1.1 mrg force_direct_overloads);
332 1.1 mrg }
333 1.1 mrg }
334 1.1 mrg
335 1.1 mrg /* For every type and predicate combination in GROUP, add one function
336 1.1 mrg that takes a scalar (pointer) base and a signed vector array index,
337 1.1 mrg and another that instead takes an unsigned vector array index.
338 1.1 mrg The vector array index has the same element size as the first
339 1.1 mrg function type suffix. SIGNATURE is as for build_all. */
340 1.1 mrg static void
341 1.1 mrg build_sv_index (function_builder &b, const char *signature,
342 1.1 mrg const function_group_info &group)
343 1.1 mrg {
344 1.1 mrg build_32_64 (b, signature, group, MODE_s32index, MODE_s64index);
345 1.1 mrg build_32_64 (b, signature, group, MODE_u32index, MODE_u64index);
346 1.1 mrg }
347 1.1 mrg
348 1.1 mrg /* Like build_sv_index, but only handle 64-bit types. */
349 1.1 mrg static void
350 1.1 mrg build_sv_index64 (function_builder &b, const char *signature,
351 1.1 mrg const function_group_info &group)
352 1.1 mrg {
353 1.1 mrg build_32_64 (b, signature, group, MODE_none, MODE_s64index);
354 1.1 mrg build_32_64 (b, signature, group, MODE_none, MODE_u64index);
355 1.1 mrg }
356 1.1 mrg
357 1.1 mrg /* Like build_sv_index, but taking vector byte offsets instead of vector
358 1.1 mrg array indices. */
359 1.1 mrg static void
360 1.1 mrg build_sv_offset (function_builder &b, const char *signature,
361 1.1 mrg const function_group_info &group)
362 1.1 mrg {
363 1.1 mrg build_32_64 (b, signature, group, MODE_s32offset, MODE_s64offset);
364 1.1 mrg build_32_64 (b, signature, group, MODE_u32offset, MODE_u64offset);
365 1.1 mrg }
366 1.1 mrg
367 1.1 mrg /* Like build_sv_offset, but exclude offsets that must be interpreted
368 1.1 mrg as signed (i.e. s32offset). */
369 1.1 mrg static void
370 1.1 mrg build_sv_uint_offset (function_builder &b, const char *signature,
371 1.1 mrg const function_group_info &group)
372 1.1 mrg {
373 1.1 mrg build_32_64 (b, signature, group, MODE_none, MODE_s64offset);
374 1.1 mrg build_32_64 (b, signature, group, MODE_u32offset, MODE_u64offset);
375 1.1 mrg }
376 1.1 mrg
377 1.1 mrg /* For every type and predicate combination in GROUP, add a function
378 1.1 mrg that takes a vector base address and no displacement. The vector
379 1.1 mrg base has the same element size as the first type suffix.
380 1.1 mrg
381 1.1 mrg The other arguments are as for build_all. */
382 1.1 mrg static void
383 1.1 mrg build_v_base (function_builder &b, const char *signature,
384 1.1 mrg const function_group_info &group,
385 1.1 mrg bool force_direct_overloads = false)
386 1.1 mrg {
387 1.1 mrg build_32_64 (b, signature, group, MODE_u32base, MODE_u64base,
388 1.1 mrg force_direct_overloads);
389 1.1 mrg }
390 1.1 mrg
391 1.1 mrg /* Like build_v_base, but for functions that also take a scalar array
392 1.1 mrg index. */
393 1.1 mrg static void
394 1.1 mrg build_vs_index (function_builder &b, const char *signature,
395 1.1 mrg const function_group_info &group,
396 1.1 mrg bool force_direct_overloads = false)
397 1.1 mrg {
398 1.1 mrg build_32_64 (b, signature, group, MODE_u32base_index, MODE_u64base_index,
399 1.1 mrg force_direct_overloads);
400 1.1 mrg }
401 1.1 mrg
402 1.1 mrg /* Like build_v_base, but for functions that also take a scalar byte
403 1.1 mrg offset. */
404 1.1 mrg static void
405 1.1 mrg build_vs_offset (function_builder &b, const char *signature,
406 1.1 mrg const function_group_info &group,
407 1.1 mrg bool force_direct_overloads = false)
408 1.1 mrg {
409 1.1 mrg build_32_64 (b, signature, group, MODE_u32base_offset, MODE_u64base_offset,
410 1.1 mrg force_direct_overloads);
411 1.1 mrg }
412 1.1 mrg
413 1.1 mrg /* Add a function instance for every type and predicate combination
414 1.1 mrg in GROUP. Take the function base name from GROUP and the mode suffix
415 1.1 mrg from MODE_SUFFIX_ID. Use SIGNATURE to construct the function signature
416 1.1 mrg without a governing predicate, then use apply_predication to add in the
417 1.1 mrg predicate. FORCE_DIRECT_OVERLOADS is true if there is a one-to-one
418 1.1 mrg mapping between "short" and "full" names, and if standard overload
419 1.1 mrg resolution therefore isn't necessary. */
420 1.1 mrg static void
421 1.1 mrg build_all (function_builder &b, const char *signature,
422 1.1 mrg const function_group_info &group, mode_suffix_index mode_suffix_id,
423 1.1 mrg bool force_direct_overloads = false)
424 1.1 mrg {
425 1.1 mrg for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
426 1.1 mrg for (unsigned int ti = 0;
427 1.1 mrg ti == 0 || group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
428 1.1 mrg build_one (b, signature, group, mode_suffix_id, ti, pi,
429 1.1 mrg force_direct_overloads);
430 1.1 mrg }
431 1.1 mrg
432 1.1 mrg /* TYPE is the largest type suffix associated with the arguments of R,
433 1.1 mrg but the result is twice as wide. Return the associated type suffix
434 1.1 mrg if it exists, otherwise report an appropriate error and return
435 1.1 mrg NUM_TYPE_SUFFIXES. */
436 1.1 mrg static type_suffix_index
437 1.1 mrg long_type_suffix (function_resolver &r, type_suffix_index type)
438 1.1 mrg {
439 1.1 mrg unsigned int element_bits = type_suffixes[type].element_bits;
440 1.1 mrg if (type_suffixes[type].integer_p && element_bits < 64)
441 1.1 mrg return find_type_suffix (type_suffixes[type].tclass, element_bits * 2);
442 1.1 mrg
443 1.1 mrg r.report_no_such_form (type);
444 1.1 mrg return NUM_TYPE_SUFFIXES;
445 1.1 mrg }
446 1.1 mrg
447 1.1 mrg /* Declare the function shape NAME, pointing it to an instance
448 1.1 mrg of class <NAME>_def. */
449 1.1 mrg #define SHAPE(NAME) \
450 1.1 mrg static CONSTEXPR const NAME##_def NAME##_obj; \
451 1.1 mrg namespace shapes { const function_shape *const NAME = &NAME##_obj; }
452 1.1 mrg
453 1.1 mrg /* Base class for functions that are not overloaded. */
454 1.1 mrg struct nonoverloaded_base : public function_shape
455 1.1 mrg {
456 1.1 mrg bool
457 1.1 mrg explicit_type_suffix_p (unsigned int) const OVERRIDE
458 1.1 mrg {
459 1.1 mrg return true;
460 1.1 mrg }
461 1.1 mrg
462 1.1 mrg tree
463 1.1 mrg resolve (function_resolver &) const OVERRIDE
464 1.1 mrg {
465 1.1 mrg gcc_unreachable ();
466 1.1 mrg }
467 1.1 mrg };
468 1.1 mrg
469 1.1 mrg /* Base class for overloaded functions. Bit N of EXPLICIT_MASK is true
470 1.1 mrg if type suffix N appears in the overloaded name. */
471 1.1 mrg template<unsigned int EXPLICIT_MASK>
472 1.1 mrg struct overloaded_base : public function_shape
473 1.1 mrg {
474 1.1 mrg bool
475 1.1 mrg explicit_type_suffix_p (unsigned int i) const OVERRIDE
476 1.1 mrg {
477 1.1 mrg return (EXPLICIT_MASK >> i) & 1;
478 1.1 mrg }
479 1.1 mrg };
480 1.1 mrg
481 1.1 mrg /* Base class for adr_index and adr_offset. */
482 1.1 mrg struct adr_base : public overloaded_base<0>
483 1.1 mrg {
484 1.1 mrg /* The function takes two arguments: a vector base and a vector displacement
485 1.1 mrg (either an index or an offset). Resolve based on them both. */
486 1.1 mrg tree
487 1.1 mrg resolve (function_resolver &r) const OVERRIDE
488 1.1 mrg {
489 1.1 mrg unsigned int i, nargs;
490 1.1 mrg mode_suffix_index mode;
491 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
492 1.1 mrg || (mode = r.resolve_adr_address (0)) == MODE_none)
493 1.1 mrg return error_mark_node;
494 1.1 mrg
495 1.1 mrg return r.resolve_to (mode);
496 1.1 mrg };
497 1.1 mrg };
498 1.1 mrg
499 1.1 mrg /* Base class for narrowing bottom binary functions that take an
500 1.1 mrg immediate second operand. The result is half the size of input
501 1.1 mrg and has class CLASS. */
502 1.1 mrg template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
503 1.1 mrg struct binary_imm_narrowb_base : public overloaded_base<0>
504 1.1 mrg {
505 1.1 mrg void
506 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
507 1.1 mrg {
508 1.1 mrg b.add_overloaded_functions (group, MODE_n);
509 1.1 mrg STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
510 1.1 mrg || CLASS == TYPE_unsigned);
511 1.1 mrg if (CLASS == TYPE_unsigned)
512 1.1 mrg build_all (b, "vhu0,v0,su64", group, MODE_n);
513 1.1 mrg else
514 1.1 mrg build_all (b, "vh0,v0,su64", group, MODE_n);
515 1.1 mrg }
516 1.1 mrg
517 1.1 mrg tree
518 1.1 mrg resolve (function_resolver &r) const OVERRIDE
519 1.1 mrg {
520 1.1 mrg return r.resolve_uniform (1, 1);
521 1.1 mrg }
522 1.1 mrg };
523 1.1 mrg
524 1.1 mrg /* The top equivalent of binary_imm_narrowb_base. It takes three arguments,
525 1.1 mrg with the first being the values of the even elements, which are typically
526 1.1 mrg the result of the narrowb operation. */
527 1.1 mrg template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
528 1.1 mrg struct binary_imm_narrowt_base : public overloaded_base<0>
529 1.1 mrg {
530 1.1 mrg void
531 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
532 1.1 mrg {
533 1.1 mrg b.add_overloaded_functions (group, MODE_n);
534 1.1 mrg STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
535 1.1 mrg || CLASS == TYPE_unsigned);
536 1.1 mrg if (CLASS == TYPE_unsigned)
537 1.1 mrg build_all (b, "vhu0,vhu0,v0,su64", group, MODE_n);
538 1.1 mrg else
539 1.1 mrg build_all (b, "vh0,vh0,v0,su64", group, MODE_n);
540 1.1 mrg }
541 1.1 mrg
542 1.1 mrg tree
543 1.1 mrg resolve (function_resolver &r) const OVERRIDE
544 1.1 mrg {
545 1.1 mrg unsigned int i, nargs;
546 1.1 mrg type_suffix_index type;
547 1.1 mrg if (!r.check_gp_argument (3, i, nargs)
548 1.1 mrg || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
549 1.1 mrg || !r.require_derived_vector_type (i, i + 1, type, CLASS, r.HALF_SIZE)
550 1.1 mrg || !r.require_integer_immediate (i + 2))
551 1.1 mrg return error_mark_node;
552 1.1 mrg
553 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
554 1.1 mrg }
555 1.1 mrg };
556 1.1 mrg
557 1.1 mrg /* Base class for long (i.e. narrow op narrow -> wide) binary functions
558 1.1 mrg that take an immediate second operand. The type suffix specifies
559 1.1 mrg the wider type. */
560 1.1 mrg struct binary_imm_long_base : public overloaded_base<0>
561 1.1 mrg {
562 1.1 mrg void
563 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
564 1.1 mrg {
565 1.1 mrg b.add_overloaded_functions (group, MODE_n);
566 1.1 mrg build_all (b, "v0,vh0,su64", group, MODE_n);
567 1.1 mrg }
568 1.1 mrg
569 1.1 mrg tree
570 1.1 mrg resolve (function_resolver &r) const OVERRIDE
571 1.1 mrg {
572 1.1 mrg unsigned int i, nargs;
573 1.1 mrg type_suffix_index type, result_type;
574 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
575 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
576 1.1 mrg || !r.require_integer_immediate (i + 1)
577 1.1 mrg || (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
578 1.1 mrg return error_mark_node;
579 1.1 mrg
580 1.1 mrg if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
581 1.1 mrg return res;
582 1.1 mrg
583 1.1 mrg return r.report_no_such_form (type);
584 1.1 mrg }
585 1.1 mrg };
586 1.1 mrg
587 1.1 mrg /* Base class for inc_dec and inc_dec_pat. */
588 1.1 mrg struct inc_dec_base : public overloaded_base<0>
589 1.1 mrg {
590 1.1 mrg CONSTEXPR inc_dec_base (bool pat_p) : m_pat_p (pat_p) {}
591 1.1 mrg
592 1.1 mrg /* Resolve based on the first argument only, which must be either a
593 1.1 mrg scalar or a vector. If it's a scalar, it must be a 32-bit or
594 1.1 mrg 64-bit integer. */
595 1.1 mrg tree
596 1.1 mrg resolve (function_resolver &r) const
597 1.1 mrg {
598 1.1 mrg unsigned int i, nargs;
599 1.1 mrg if (!r.check_gp_argument (m_pat_p ? 3 : 2, i, nargs)
600 1.1 mrg || !r.require_vector_or_scalar_type (i))
601 1.1 mrg return error_mark_node;
602 1.1 mrg
603 1.1 mrg mode_suffix_index mode;
604 1.1 mrg type_suffix_index type;
605 1.1 mrg if (r.scalar_argument_p (i))
606 1.1 mrg {
607 1.1 mrg mode = MODE_n;
608 1.1 mrg type = r.infer_integer_scalar_type (i);
609 1.1 mrg }
610 1.1 mrg else
611 1.1 mrg {
612 1.1 mrg mode = MODE_none;
613 1.1 mrg type = r.infer_vector_type (i);
614 1.1 mrg }
615 1.1 mrg if (type == NUM_TYPE_SUFFIXES)
616 1.1 mrg return error_mark_node;
617 1.1 mrg
618 1.1 mrg for (++i; i < nargs; ++i)
619 1.1 mrg if (!r.require_integer_immediate (i))
620 1.1 mrg return error_mark_node;
621 1.1 mrg
622 1.1 mrg return r.resolve_to (mode, type);
623 1.1 mrg }
624 1.1 mrg
625 1.1 mrg bool
626 1.1 mrg check (function_checker &c) const OVERRIDE
627 1.1 mrg {
628 1.1 mrg return c.require_immediate_range (m_pat_p ? 2 : 1, 1, 16);
629 1.1 mrg }
630 1.1 mrg
631 1.1 mrg bool m_pat_p;
632 1.1 mrg };
633 1.1 mrg
634 1.1 mrg /* Base class for load and load_replicate. */
635 1.1 mrg struct load_contiguous_base : public overloaded_base<0>
636 1.1 mrg {
637 1.1 mrg /* Resolve a call based purely on a pointer argument. The other arguments
638 1.1 mrg are a governing predicate and (for MODE_vnum) a vnum offset. */
639 1.1 mrg tree
640 1.1 mrg resolve (function_resolver &r) const OVERRIDE
641 1.1 mrg {
642 1.1 mrg bool vnum_p = r.mode_suffix_id == MODE_vnum;
643 1.1 mrg gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
644 1.1 mrg
645 1.1 mrg unsigned int i, nargs;
646 1.1 mrg type_suffix_index type;
647 1.1 mrg if (!r.check_gp_argument (vnum_p ? 2 : 1, i, nargs)
648 1.1 mrg || (type = r.infer_pointer_type (i)) == NUM_TYPE_SUFFIXES
649 1.1 mrg || (vnum_p && !r.require_scalar_type (i + 1, "int64_t")))
650 1.1 mrg return error_mark_node;
651 1.1 mrg
652 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
653 1.1 mrg }
654 1.1 mrg };
655 1.1 mrg
656 1.1 mrg /* Base class for gather loads that take a scalar base and a vector
657 1.1 mrg displacement (either an offset or an index). */
658 1.1 mrg struct load_gather_sv_base : public overloaded_base<0>
659 1.1 mrg {
660 1.1 mrg tree
661 1.1 mrg resolve (function_resolver &r) const OVERRIDE
662 1.1 mrg {
663 1.1 mrg unsigned int i, nargs;
664 1.1 mrg mode_suffix_index mode;
665 1.1 mrg type_suffix_index type;
666 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
667 1.1 mrg || (type = r.infer_pointer_type (i, true)) == NUM_TYPE_SUFFIXES
668 1.1 mrg || (mode = r.resolve_sv_displacement (i + 1, type, true),
669 1.1 mrg mode == MODE_none))
670 1.1 mrg return error_mark_node;
671 1.1 mrg
672 1.1 mrg return r.resolve_to (mode, type);
673 1.1 mrg }
674 1.1 mrg };
675 1.1 mrg
676 1.1 mrg /* Base class for load_ext_gather_index and load_ext_gather_offset,
677 1.1 mrg which differ only in the units of the displacement. */
678 1.1 mrg struct load_ext_gather_base : public overloaded_base<1>
679 1.1 mrg {
680 1.1 mrg /* Resolve a gather load that takes one of:
681 1.1 mrg
682 1.1 mrg - a scalar pointer base and a vector displacement
683 1.1 mrg - a vector base with no displacement or
684 1.1 mrg - a vector base and a scalar displacement
685 1.1 mrg
686 1.1 mrg The function has an explicit type suffix that determines the type
687 1.1 mrg of the loaded data. */
688 1.1 mrg tree
689 1.1 mrg resolve (function_resolver &r) const OVERRIDE
690 1.1 mrg {
691 1.1 mrg /* No resolution is needed for a vector base with no displacement;
692 1.1 mrg there's a one-to-one mapping between short and long names. */
693 1.1 mrg gcc_assert (r.displacement_units () != UNITS_none);
694 1.1 mrg
695 1.1 mrg type_suffix_index type = r.type_suffix_ids[0];
696 1.1 mrg
697 1.1 mrg unsigned int i, nargs;
698 1.1 mrg mode_suffix_index mode;
699 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
700 1.1 mrg || (mode = r.resolve_gather_address (i, type, true)) == MODE_none)
701 1.1 mrg return error_mark_node;
702 1.1 mrg
703 1.1 mrg return r.resolve_to (mode, type);
704 1.1 mrg }
705 1.1 mrg };
706 1.1 mrg
707 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t,
708 1.1 mrg sv<t0:quarter>_t) (for integer t0)
709 1.1 mrg sv<t0>_t svmmla[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t) (for floating-point t0)
710 1.1 mrg
711 1.1 mrg The functions act like the equivalent of "ternary_qq" for integer elements
712 1.1 mrg and normal vector-only ternary functions for floating-point elements. */
713 1.1 mrg struct mmla_def : public overloaded_base<0>
714 1.1 mrg {
715 1.1 mrg void
716 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
717 1.1 mrg {
718 1.1 mrg b.add_overloaded_functions (group, MODE_none);
719 1.1 mrg /* svmmla is distributed over several extensions. Allow the common
720 1.1 mrg denominator to define the overloaded svmmla function without
721 1.1 mrg defining any specific versions. */
722 1.1 mrg if (group.types[0][0] != NUM_TYPE_SUFFIXES)
723 1.1 mrg {
724 1.1 mrg if (type_suffixes[group.types[0][0]].float_p)
725 1.1 mrg build_all (b, "v0,v0,v0,v0", group, MODE_none);
726 1.1 mrg else
727 1.1 mrg build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
728 1.1 mrg }
729 1.1 mrg }
730 1.1 mrg
731 1.1 mrg tree
732 1.1 mrg resolve (function_resolver &r) const OVERRIDE
733 1.1 mrg {
734 1.1 mrg unsigned int i, nargs;
735 1.1 mrg type_suffix_index type;
736 1.1 mrg if (!r.check_gp_argument (3, i, nargs)
737 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
738 1.1 mrg return error_mark_node;
739 1.1 mrg
740 1.1 mrg /* Make sure that the function exists now, since not all forms
741 1.1 mrg follow a set pattern after this point. */
742 1.1 mrg tree res = r.resolve_to (r.mode_suffix_id, type);
743 1.1 mrg if (res == error_mark_node)
744 1.1 mrg return res;
745 1.1 mrg
746 1.1 mrg bool float_p = type_suffixes[type].float_p;
747 1.1 mrg unsigned int modifier = float_p ? r.SAME_SIZE : r.QUARTER_SIZE;
748 1.1 mrg if (!r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
749 1.1 mrg modifier)
750 1.1 mrg || !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
751 1.1 mrg modifier))
752 1.1 mrg return error_mark_node;
753 1.1 mrg
754 1.1 mrg return res;
755 1.1 mrg }
756 1.1 mrg };
757 1.1 mrg SHAPE (mmla)
758 1.1 mrg
759 1.1 mrg /* Base class for prefetch_gather_index and prefetch_gather_offset,
760 1.1 mrg which differ only in the units of the displacement. */
761 1.1 mrg struct prefetch_gather_base : public overloaded_base<0>
762 1.1 mrg {
763 1.1 mrg /* Resolve a gather prefetch that takes one of:
764 1.1 mrg
765 1.1 mrg - a scalar pointer base (const void *) and a vector displacement
766 1.1 mrg - a vector base with no displacement or
767 1.1 mrg - a vector base and a scalar displacement
768 1.1 mrg
769 1.1 mrg The prefetch operation is the final argument. This is purely a
770 1.1 mrg mode-based resolution; there are no type suffixes. */
771 1.1 mrg tree
772 1.1 mrg resolve (function_resolver &r) const OVERRIDE
773 1.1 mrg {
774 1.1 mrg bool has_displacement_p = r.displacement_units () != UNITS_none;
775 1.1 mrg
776 1.1 mrg unsigned int i, nargs;
777 1.1 mrg mode_suffix_index mode;
778 1.1 mrg if (!r.check_gp_argument (has_displacement_p ? 3 : 2, i, nargs)
779 1.1 mrg || (mode = r.resolve_gather_address (i, NUM_TYPE_SUFFIXES,
780 1.1 mrg false)) == MODE_none
781 1.1 mrg || !r.require_integer_immediate (nargs - 1))
782 1.1 mrg return error_mark_node;
783 1.1 mrg
784 1.1 mrg return r.resolve_to (mode);
785 1.1 mrg }
786 1.1 mrg };
787 1.1 mrg
788 1.1 mrg /* Wraps BASE to provide a narrowing shift right function. Argument N
789 1.1 mrg is an immediate shift amount in the range [1, sizeof(<t0>_t) * 4]. */
790 1.1 mrg template<typename BASE, unsigned int N>
791 1.1 mrg struct shift_right_imm_narrow_wrapper : public BASE
792 1.1 mrg {
793 1.1 mrg bool
794 1.1 mrg check (function_checker &c) const OVERRIDE
795 1.1 mrg {
796 1.1 mrg unsigned int bits = c.type_suffix (0).element_bits / 2;
797 1.1 mrg return c.require_immediate_range (N, 1, bits);
798 1.1 mrg }
799 1.1 mrg };
800 1.1 mrg
801 1.1 mrg /* Base class for store_scatter_index and store_scatter_offset,
802 1.1 mrg which differ only in the units of the displacement. */
803 1.1 mrg struct store_scatter_base : public overloaded_base<0>
804 1.1 mrg {
805 1.1 mrg /* Resolve a scatter store that takes one of:
806 1.1 mrg
807 1.1 mrg - a scalar pointer base and a vector displacement
808 1.1 mrg - a vector base with no displacement or
809 1.1 mrg - a vector base and a scalar displacement
810 1.1 mrg
811 1.1 mrg The stored data is the final argument, and it determines the
812 1.1 mrg type suffix. */
813 1.1 mrg tree
814 1.1 mrg resolve (function_resolver &r) const OVERRIDE
815 1.1 mrg {
816 1.1 mrg bool has_displacement_p = r.displacement_units () != UNITS_none;
817 1.1 mrg
818 1.1 mrg unsigned int i, nargs;
819 1.1 mrg mode_suffix_index mode;
820 1.1 mrg type_suffix_index type;
821 1.1 mrg if (!r.check_gp_argument (has_displacement_p ? 3 : 2, i, nargs)
822 1.1 mrg || (type = r.infer_sd_vector_type (nargs - 1)) == NUM_TYPE_SUFFIXES
823 1.1 mrg || (mode = r.resolve_gather_address (i, type, false)) == MODE_none)
824 1.1 mrg return error_mark_node;
825 1.1 mrg
826 1.1 mrg return r.resolve_to (mode, type);
827 1.1 mrg }
828 1.1 mrg };
829 1.1 mrg
830 1.1 mrg /* Base class for ternary operations in which the final argument is an
831 1.1 mrg immediate shift amount. The derived class should check the range. */
832 1.1 mrg struct ternary_shift_imm_base : public overloaded_base<0>
833 1.1 mrg {
834 1.1 mrg void
835 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
836 1.1 mrg {
837 1.1 mrg b.add_overloaded_functions (group, MODE_n);
838 1.1 mrg build_all (b, "v0,v0,v0,su64", group, MODE_n);
839 1.1 mrg }
840 1.1 mrg
841 1.1 mrg tree
842 1.1 mrg resolve (function_resolver &r) const OVERRIDE
843 1.1 mrg {
844 1.1 mrg return r.resolve_uniform (2, 1);
845 1.1 mrg }
846 1.1 mrg };
847 1.1 mrg
848 1.1 mrg /* Base class for ternary operations in which the first argument has the
849 1.1 mrg same element type as the result, and in which the second and third
850 1.1 mrg arguments have an element type that is derived the first.
851 1.1 mrg
852 1.1 mrg MODIFIER is the number of element bits in the second and third
853 1.1 mrg arguments, or a function_resolver modifier that says how this
854 1.1 mrg precision is derived from the first argument's elements.
855 1.1 mrg
856 1.1 mrg TYPE_CLASS2 and TYPE_CLASS3 are the type classes of the second and
857 1.1 mrg third arguments, or function_resolver::SAME_TYPE_CLASS if the type
858 1.1 mrg class is the same as the first argument. */
859 1.1 mrg template<unsigned int MODIFIER,
860 1.1 mrg type_class_index TYPE_CLASS2 = function_resolver::SAME_TYPE_CLASS,
861 1.1 mrg type_class_index TYPE_CLASS3 = function_resolver::SAME_TYPE_CLASS>
862 1.1 mrg struct ternary_resize2_opt_n_base : public overloaded_base<0>
863 1.1 mrg {
864 1.1 mrg tree
865 1.1 mrg resolve (function_resolver &r) const OVERRIDE
866 1.1 mrg {
867 1.1 mrg unsigned int i, nargs;
868 1.1 mrg type_suffix_index type;
869 1.1 mrg if (!r.check_gp_argument (3, i, nargs)
870 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
871 1.1 mrg || !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
872 1.1 mrg MODIFIER))
873 1.1 mrg return error_mark_node;
874 1.1 mrg
875 1.1 mrg return r.finish_opt_n_resolution (i + 2, i, type, TYPE_CLASS3, MODIFIER);
876 1.1 mrg }
877 1.1 mrg };
878 1.1 mrg
879 1.1 mrg /* Like ternary_resize2_opt_n_base, but for functions that don't take
880 1.1 mrg a final scalar argument. */
881 1.1 mrg template<unsigned int MODIFIER,
882 1.1 mrg type_class_index TYPE_CLASS2 = function_resolver::SAME_TYPE_CLASS,
883 1.1 mrg type_class_index TYPE_CLASS3 = function_resolver::SAME_TYPE_CLASS>
884 1.1 mrg struct ternary_resize2_base : public overloaded_base<0>
885 1.1 mrg {
886 1.1 mrg tree
887 1.1 mrg resolve (function_resolver &r) const OVERRIDE
888 1.1 mrg {
889 1.1 mrg unsigned int i, nargs;
890 1.1 mrg type_suffix_index type;
891 1.1 mrg if (!r.check_gp_argument (3, i, nargs)
892 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
893 1.1 mrg || !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
894 1.1 mrg MODIFIER)
895 1.1 mrg || !r.require_derived_vector_type (i + 2, i, type, TYPE_CLASS3,
896 1.1 mrg MODIFIER))
897 1.1 mrg return error_mark_node;
898 1.1 mrg
899 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
900 1.1 mrg }
901 1.1 mrg };
902 1.1 mrg
903 1.1 mrg /* Like ternary_resize2_opt_n_base, but for functions that take a final
904 1.1 mrg lane argument. */
905 1.1 mrg template<unsigned int MODIFIER,
906 1.1 mrg type_class_index TYPE_CLASS2 = function_resolver::SAME_TYPE_CLASS,
907 1.1 mrg type_class_index TYPE_CLASS3 = function_resolver::SAME_TYPE_CLASS>
908 1.1 mrg struct ternary_resize2_lane_base : public overloaded_base<0>
909 1.1 mrg {
910 1.1 mrg tree
911 1.1 mrg resolve (function_resolver &r) const OVERRIDE
912 1.1 mrg {
913 1.1 mrg unsigned int i, nargs;
914 1.1 mrg type_suffix_index type;
915 1.1 mrg if (!r.check_gp_argument (4, i, nargs)
916 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
917 1.1 mrg || !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
918 1.1 mrg MODIFIER)
919 1.1 mrg || !r.require_derived_vector_type (i + 2, i, type, TYPE_CLASS3,
920 1.1 mrg MODIFIER)
921 1.1 mrg || !r.require_integer_immediate (i + 3))
922 1.1 mrg return error_mark_node;
923 1.1 mrg
924 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
925 1.1 mrg }
926 1.1 mrg };
927 1.1 mrg
928 1.1 mrg /* A specialization of ternary_resize2_lane_base for bfloat16 elements,
929 1.1 mrg indexed in groups of N elements. */
930 1.1 mrg template<unsigned int N>
931 1.1 mrg struct ternary_bfloat_lane_base
932 1.1 mrg : public ternary_resize2_lane_base<16, TYPE_bfloat, TYPE_bfloat>
933 1.1 mrg {
934 1.1 mrg void
935 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
936 1.1 mrg {
937 1.1 mrg b.add_overloaded_functions (group, MODE_none);
938 1.1 mrg build_all (b, "v0,v0,vB,vB,su64", group, MODE_none);
939 1.1 mrg }
940 1.1 mrg
941 1.1 mrg bool
942 1.1 mrg check (function_checker &c) const OVERRIDE
943 1.1 mrg {
944 1.1 mrg return c.require_immediate_lane_index (3, N);
945 1.1 mrg }
946 1.1 mrg };
947 1.1 mrg
948 1.1 mrg /* A specialization of ternary_resize2_lane_base for quarter-sized
949 1.1 mrg elements. */
950 1.1 mrg template<type_class_index TYPE_CLASS2 = function_resolver::SAME_TYPE_CLASS,
951 1.1 mrg type_class_index TYPE_CLASS3 = function_resolver::SAME_TYPE_CLASS>
952 1.1 mrg struct ternary_qq_lane_base
953 1.1 mrg : public ternary_resize2_lane_base<function_resolver::QUARTER_SIZE,
954 1.1 mrg TYPE_CLASS2, TYPE_CLASS3>
955 1.1 mrg {
956 1.1 mrg bool
957 1.1 mrg check (function_checker &c) const OVERRIDE
958 1.1 mrg {
959 1.1 mrg return c.require_immediate_lane_index (3, 4);
960 1.1 mrg }
961 1.1 mrg };
962 1.1 mrg
963 1.1 mrg /* Base class for narrowing bottom unary functions. The result is half
964 1.1 mrg the size of input and has class CLASS. */
965 1.1 mrg template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
966 1.1 mrg struct unary_narrowb_base : public overloaded_base<0>
967 1.1 mrg {
968 1.1 mrg void
969 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
970 1.1 mrg {
971 1.1 mrg b.add_overloaded_functions (group, MODE_none);
972 1.1 mrg STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
973 1.1 mrg || CLASS == TYPE_unsigned);
974 1.1 mrg if (CLASS == TYPE_unsigned)
975 1.1 mrg build_all (b, "vhu0,v0", group, MODE_none);
976 1.1 mrg else
977 1.1 mrg build_all (b, "vh0,v0", group, MODE_none);
978 1.1 mrg }
979 1.1 mrg
980 1.1 mrg tree
981 1.1 mrg resolve (function_resolver &r) const OVERRIDE
982 1.1 mrg {
983 1.1 mrg return r.resolve_unary (CLASS, r.HALF_SIZE);
984 1.1 mrg }
985 1.1 mrg };
986 1.1 mrg
987 1.1 mrg /* The top equivalent of unary_imm_narrowb_base. All forms take the values
988 1.1 mrg of the even elements as an extra argument, before any governing predicate.
989 1.1 mrg These even elements are typically the result of the narrowb operation. */
990 1.1 mrg template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
991 1.1 mrg struct unary_narrowt_base : public overloaded_base<0>
992 1.1 mrg {
993 1.1 mrg void
994 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
995 1.1 mrg {
996 1.1 mrg b.add_overloaded_functions (group, MODE_none);
997 1.1 mrg STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
998 1.1 mrg || CLASS == TYPE_unsigned);
999 1.1 mrg if (CLASS == TYPE_unsigned)
1000 1.1 mrg build_all (b, "vhu0,vhu0,v0", group, MODE_none);
1001 1.1 mrg else
1002 1.1 mrg build_all (b, "vh0,vh0,v0", group, MODE_none);
1003 1.1 mrg }
1004 1.1 mrg
1005 1.1 mrg tree
1006 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1007 1.1 mrg {
1008 1.1 mrg unsigned int i, nargs;
1009 1.1 mrg type_suffix_index type;
1010 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1011 1.1 mrg || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
1012 1.1 mrg || !r.require_derived_vector_type (i, i + 1, type, CLASS, r.HALF_SIZE))
1013 1.1 mrg return error_mark_node;
1014 1.1 mrg
1015 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1016 1.1 mrg }
1017 1.1 mrg };
1018 1.1 mrg
1019 1.1 mrg /* sv<m0>_t svfoo[_m0base]_[m1]index(sv<m0>_t, sv<m1>_t)
1020 1.1 mrg
1021 1.1 mrg for all valid combinations of vector base type <m0> and vector
1022 1.1 mrg displacement type <m1>. */
1023 1.1 mrg struct adr_index_def : public adr_base
1024 1.1 mrg {
1025 1.1 mrg void
1026 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1027 1.1 mrg {
1028 1.1 mrg b.add_overloaded_functions (group, MODE_index);
1029 1.1 mrg build_all (b, "b,b,d", group, MODE_u32base_s32index);
1030 1.1 mrg build_all (b, "b,b,d", group, MODE_u32base_u32index);
1031 1.1 mrg build_all (b, "b,b,d", group, MODE_u64base_s64index);
1032 1.1 mrg build_all (b, "b,b,d", group, MODE_u64base_u64index);
1033 1.1 mrg }
1034 1.1 mrg };
1035 1.1 mrg SHAPE (adr_index)
1036 1.1 mrg
1037 1.1 mrg /* sv<m0>_t svfoo[_m0base]_[m1]offset(sv<m0>_t, sv<m1>_t).
1038 1.1 mrg
1039 1.1 mrg for all valid combinations of vector base type <m0> and vector
1040 1.1 mrg displacement type <m1>. */
1041 1.1 mrg struct adr_offset_def : public adr_base
1042 1.1 mrg {
1043 1.1 mrg void
1044 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1045 1.1 mrg {
1046 1.1 mrg b.add_overloaded_functions (group, MODE_offset);
1047 1.1 mrg build_all (b, "b,b,d", group, MODE_u32base_s32offset);
1048 1.1 mrg build_all (b, "b,b,d", group, MODE_u32base_u32offset);
1049 1.1 mrg build_all (b, "b,b,d", group, MODE_u64base_s64offset);
1050 1.1 mrg build_all (b, "b,b,d", group, MODE_u64base_u64offset);
1051 1.1 mrg }
1052 1.1 mrg };
1053 1.1 mrg SHAPE (adr_offset)
1054 1.1 mrg
1055 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1056 1.1 mrg
1057 1.1 mrg i.e. a binary operation with uniform types, but with no scalar form. */
1058 1.1 mrg struct binary_def : public overloaded_base<0>
1059 1.1 mrg {
1060 1.1 mrg void
1061 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1062 1.1 mrg {
1063 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1064 1.1 mrg build_all (b, "v0,v0,v0", group, MODE_none);
1065 1.1 mrg }
1066 1.1 mrg
1067 1.1 mrg tree
1068 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1069 1.1 mrg {
1070 1.1 mrg return r.resolve_uniform (2);
1071 1.1 mrg }
1072 1.1 mrg };
1073 1.1 mrg SHAPE (binary)
1074 1.1 mrg
1075 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:int>_t)
1076 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:int>_t).
1077 1.1 mrg
1078 1.1 mrg i.e. a version of the standard binary shape binary_opt_n in which
1079 1.1 mrg the final argument is always a signed integer. */
1080 1.1 mrg struct binary_int_opt_n_def : public overloaded_base<0>
1081 1.1 mrg {
1082 1.1 mrg void
1083 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1084 1.1 mrg {
1085 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1086 1.1 mrg build_all (b, "v0,v0,vs0", group, MODE_none);
1087 1.1 mrg build_all (b, "v0,v0,ss0", group, MODE_n);
1088 1.1 mrg }
1089 1.1 mrg
1090 1.1 mrg tree
1091 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1092 1.1 mrg {
1093 1.1 mrg unsigned int i, nargs;
1094 1.1 mrg type_suffix_index type;
1095 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1096 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1097 1.1 mrg return error_mark_node;
1098 1.1 mrg
1099 1.1 mrg return r.finish_opt_n_resolution (i + 1, i, type, TYPE_signed);
1100 1.1 mrg }
1101 1.1 mrg };
1102 1.1 mrg SHAPE (binary_int_opt_n)
1103 1.1 mrg
1104 1.1 mrg /* sv<t0>_t svfoo_<t0>(sv<t0>_t, sv<t0>_t, uint64_t)
1105 1.1 mrg
1106 1.1 mrg where the final argument is an integer constant expression in the
1107 1.1 mrg range [0, 16 / sizeof (<t0>_t) - 1]. */
1108 1.1 mrg struct binary_lane_def : public overloaded_base<0>
1109 1.1 mrg {
1110 1.1 mrg void
1111 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1112 1.1 mrg {
1113 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1114 1.1 mrg build_all (b, "v0,v0,v0,su64", group, MODE_none);
1115 1.1 mrg }
1116 1.1 mrg
1117 1.1 mrg tree
1118 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1119 1.1 mrg {
1120 1.1 mrg return r.resolve_uniform (2, 1);
1121 1.1 mrg }
1122 1.1 mrg
1123 1.1 mrg bool
1124 1.1 mrg check (function_checker &c) const OVERRIDE
1125 1.1 mrg {
1126 1.1 mrg return c.require_immediate_lane_index (2);
1127 1.1 mrg }
1128 1.1 mrg };
1129 1.1 mrg SHAPE (binary_lane)
1130 1.1 mrg
1131 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0:half>_t, sv<t0:half>_t, uint64_t).
1132 1.1 mrg
1133 1.1 mrg where the final argument is an integer constant expression in the
1134 1.1 mrg range [0, 32 / sizeof (<t0>_t) - 1]. */
1135 1.1 mrg struct binary_long_lane_def : public overloaded_base<0>
1136 1.1 mrg {
1137 1.1 mrg void
1138 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1139 1.1 mrg {
1140 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1141 1.1 mrg build_all (b, "v0,vh0,vh0,su64", group, MODE_none);
1142 1.1 mrg }
1143 1.1 mrg
1144 1.1 mrg tree
1145 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1146 1.1 mrg {
1147 1.1 mrg unsigned int i, nargs;
1148 1.1 mrg type_suffix_index type, result_type;
1149 1.1 mrg if (!r.check_gp_argument (3, i, nargs)
1150 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1151 1.1 mrg || !r.require_matching_vector_type (i + 1, type)
1152 1.1 mrg || !r.require_integer_immediate (i + 2)
1153 1.1 mrg || (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
1154 1.1 mrg return error_mark_node;
1155 1.1 mrg
1156 1.1 mrg if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
1157 1.1 mrg return res;
1158 1.1 mrg
1159 1.1 mrg return r.report_no_such_form (type);
1160 1.1 mrg }
1161 1.1 mrg
1162 1.1 mrg bool
1163 1.1 mrg check (function_checker &c) const OVERRIDE
1164 1.1 mrg {
1165 1.1 mrg return c.require_immediate_lane_index (2);
1166 1.1 mrg }
1167 1.1 mrg };
1168 1.1 mrg SHAPE (binary_long_lane)
1169 1.1 mrg
1170 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0:half>_t, sv<t0:half>_t)
1171 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0:half>_t, <t0:half>_t). */
1172 1.1 mrg struct binary_long_opt_n_def : public overloaded_base<0>
1173 1.1 mrg {
1174 1.1 mrg void
1175 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1176 1.1 mrg {
1177 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1178 1.1 mrg build_all (b, "v0,vh0,vh0", group, MODE_none);
1179 1.1 mrg build_all (b, "v0,vh0,sh0", group, MODE_n);
1180 1.1 mrg }
1181 1.1 mrg
1182 1.1 mrg tree
1183 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1184 1.1 mrg {
1185 1.1 mrg unsigned int i, nargs;
1186 1.1 mrg type_suffix_index type, result_type;
1187 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1188 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1189 1.1 mrg || (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
1190 1.1 mrg return error_mark_node;
1191 1.1 mrg
1192 1.1 mrg return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS,
1193 1.1 mrg r.SAME_SIZE, result_type);
1194 1.1 mrg }
1195 1.1 mrg };
1196 1.1 mrg SHAPE (binary_long_opt_n)
1197 1.1 mrg
1198 1.1 mrg /* sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0>_t).
1199 1.1 mrg
1200 1.1 mrg i.e. a binary operation in which the final argument is always a scalar
1201 1.1 mrg rather than a vector. */
1202 1.1 mrg struct binary_n_def : public overloaded_base<0>
1203 1.1 mrg {
1204 1.1 mrg void
1205 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1206 1.1 mrg {
1207 1.1 mrg b.add_overloaded_functions (group, MODE_n);
1208 1.1 mrg build_all (b, "v0,v0,s0", group, MODE_n);
1209 1.1 mrg }
1210 1.1 mrg
1211 1.1 mrg tree
1212 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1213 1.1 mrg {
1214 1.1 mrg unsigned int i, nargs;
1215 1.1 mrg type_suffix_index type;
1216 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1217 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1218 1.1 mrg || !r.require_derived_scalar_type (i + 1, r.SAME_TYPE_CLASS))
1219 1.1 mrg return error_mark_node;
1220 1.1 mrg
1221 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1222 1.1 mrg }
1223 1.1 mrg };
1224 1.1 mrg SHAPE (binary_n)
1225 1.1 mrg
1226 1.1 mrg /* sv<t0:half>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1227 1.1 mrg sv<t0:half>_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1228 1.1 mrg
1229 1.1 mrg i.e. a version of binary_opt_n in which the output elements are half the
1230 1.1 mrg width of the input elements. */
1231 1.1 mrg struct binary_narrowb_opt_n_def : public overloaded_base<0>
1232 1.1 mrg {
1233 1.1 mrg void
1234 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1235 1.1 mrg {
1236 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1237 1.1 mrg build_all (b, "vh0,v0,v0", group, MODE_none);
1238 1.1 mrg build_all (b, "vh0,v0,s0", group, MODE_n);
1239 1.1 mrg }
1240 1.1 mrg
1241 1.1 mrg tree
1242 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1243 1.1 mrg {
1244 1.1 mrg return r.resolve_uniform_opt_n (2);
1245 1.1 mrg }
1246 1.1 mrg };
1247 1.1 mrg SHAPE (binary_narrowb_opt_n)
1248 1.1 mrg
1249 1.1 mrg /* sv<t0:half>_t svfoo[_t0](sv<t0:half>_t, sv<t0>_t, sv<t0>_t)
1250 1.1 mrg sv<t0:half>_t svfoo[_n_t0](sv<t0:half>_t, sv<t0>_t, <t0>_t)
1251 1.1 mrg
1252 1.1 mrg This is the "top" counterpart to binary_narrowb_opt_n. */
1253 1.1 mrg struct binary_narrowt_opt_n_def : public overloaded_base<0>
1254 1.1 mrg {
1255 1.1 mrg void
1256 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1257 1.1 mrg {
1258 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1259 1.1 mrg build_all (b, "vh0,vh0,v0,v0", group, MODE_none);
1260 1.1 mrg build_all (b, "vh0,vh0,v0,s0", group, MODE_n);
1261 1.1 mrg }
1262 1.1 mrg
1263 1.1 mrg tree
1264 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1265 1.1 mrg {
1266 1.1 mrg unsigned int i, nargs;
1267 1.1 mrg type_suffix_index type;
1268 1.1 mrg if (!r.check_gp_argument (3, i, nargs)
1269 1.1 mrg || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
1270 1.1 mrg || !r.require_derived_vector_type (i, i + 1, type, r.SAME_TYPE_CLASS,
1271 1.1 mrg r.HALF_SIZE))
1272 1.1 mrg return error_mark_node;
1273 1.1 mrg
1274 1.1 mrg return r.finish_opt_n_resolution (i + 2, i + 1, type);
1275 1.1 mrg }
1276 1.1 mrg };
1277 1.1 mrg SHAPE (binary_narrowt_opt_n)
1278 1.1 mrg
1279 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1280 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1281 1.1 mrg
1282 1.1 mrg i.e. the standard shape for binary operations that operate on
1283 1.1 mrg uniform types. */
1284 1.1 mrg struct binary_opt_n_def : public overloaded_base<0>
1285 1.1 mrg {
1286 1.1 mrg void
1287 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1288 1.1 mrg {
1289 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1290 1.1 mrg build_all (b, "v0,v0,v0", group, MODE_none);
1291 1.1 mrg /* _b functions do not have an _n form, but are classified as
1292 1.1 mrg binary_opt_n so that they can be overloaded with vector
1293 1.1 mrg functions. */
1294 1.1 mrg if (group.types[0][0] == TYPE_SUFFIX_b)
1295 1.1 mrg gcc_assert (group.types[0][1] == NUM_TYPE_SUFFIXES);
1296 1.1 mrg else
1297 1.1 mrg build_all (b, "v0,v0,s0", group, MODE_n);
1298 1.1 mrg }
1299 1.1 mrg
1300 1.1 mrg tree
1301 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1302 1.1 mrg {
1303 1.1 mrg return r.resolve_uniform_opt_n (2);
1304 1.1 mrg }
1305 1.1 mrg };
1306 1.1 mrg SHAPE (binary_opt_n)
1307 1.1 mrg
1308 1.1 mrg /* svbool_t svfoo(svbool_t, svbool_t). */
1309 1.1 mrg struct binary_pred_def : public nonoverloaded_base
1310 1.1 mrg {
1311 1.1 mrg void
1312 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1313 1.1 mrg {
1314 1.1 mrg build_all (b, "v0,v0,v0", group, MODE_none);
1315 1.1 mrg }
1316 1.1 mrg };
1317 1.1 mrg SHAPE (binary_pred)
1318 1.1 mrg
1319 1.1 mrg /* sv<t0>_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, uint64_t)
1320 1.1 mrg
1321 1.1 mrg where the final argument must be 90 or 270. */
1322 1.1 mrg struct binary_rotate_def : public overloaded_base<0>
1323 1.1 mrg {
1324 1.1 mrg void
1325 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1326 1.1 mrg {
1327 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1328 1.1 mrg build_all (b, "v0,v0,v0,su64", group, MODE_none);
1329 1.1 mrg }
1330 1.1 mrg
1331 1.1 mrg tree
1332 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1333 1.1 mrg {
1334 1.1 mrg return r.resolve_uniform (2, 1);
1335 1.1 mrg }
1336 1.1 mrg
1337 1.1 mrg bool
1338 1.1 mrg check (function_checker &c) const OVERRIDE
1339 1.1 mrg {
1340 1.1 mrg return c.require_immediate_either_or (2, 90, 270);
1341 1.1 mrg }
1342 1.1 mrg };
1343 1.1 mrg SHAPE (binary_rotate)
1344 1.1 mrg
1345 1.1 mrg /* sv<t0>_t svfoo_t0(<t0>_t, <t0>_t)
1346 1.1 mrg
1347 1.1 mrg i.e. a binary function that takes two scalars and returns a vector.
1348 1.1 mrg An explicit type suffix is required. */
1349 1.1 mrg struct binary_scalar_def : public nonoverloaded_base
1350 1.1 mrg {
1351 1.1 mrg void
1352 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1353 1.1 mrg {
1354 1.1 mrg build_all (b, "v0,s0,s0", group, MODE_none);
1355 1.1 mrg }
1356 1.1 mrg };
1357 1.1 mrg SHAPE (binary_scalar)
1358 1.1 mrg
1359 1.1 mrg /* sv<t0:uint>_t svfoo[_t0](sv<t0>_t, sv<t0>_t).
1360 1.1 mrg
1361 1.1 mrg i.e. a version of "binary" that returns unsigned integers. */
1362 1.1 mrg struct binary_to_uint_def : public overloaded_base<0>
1363 1.1 mrg {
1364 1.1 mrg void
1365 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1366 1.1 mrg {
1367 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1368 1.1 mrg build_all (b, "vu0,v0,v0", group, MODE_none);
1369 1.1 mrg }
1370 1.1 mrg
1371 1.1 mrg tree
1372 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1373 1.1 mrg {
1374 1.1 mrg return r.resolve_uniform (2);
1375 1.1 mrg }
1376 1.1 mrg };
1377 1.1 mrg SHAPE (binary_to_uint)
1378 1.1 mrg
1379 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint>_t)
1380 1.1 mrg
1381 1.1 mrg i.e. a version of "binary" in which the final argument is always an
1382 1.1 mrg unsigned integer. */
1383 1.1 mrg struct binary_uint_def : public overloaded_base<0>
1384 1.1 mrg {
1385 1.1 mrg void
1386 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1387 1.1 mrg {
1388 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1389 1.1 mrg build_all (b, "v0,v0,vu0", group, MODE_none);
1390 1.1 mrg }
1391 1.1 mrg
1392 1.1 mrg tree
1393 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1394 1.1 mrg {
1395 1.1 mrg unsigned int i, nargs;
1396 1.1 mrg type_suffix_index type;
1397 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1398 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1399 1.1 mrg || !r.require_derived_vector_type (i + 1, i, type, TYPE_unsigned))
1400 1.1 mrg return error_mark_node;
1401 1.1 mrg
1402 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1403 1.1 mrg }
1404 1.1 mrg };
1405 1.1 mrg SHAPE (binary_uint)
1406 1.1 mrg
1407 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, <t0:uint>_t)
1408 1.1 mrg
1409 1.1 mrg i.e. a version of binary_n in which the final argument is always an
1410 1.1 mrg unsigned integer. */
1411 1.1 mrg struct binary_uint_n_def : public overloaded_base<0>
1412 1.1 mrg {
1413 1.1 mrg void
1414 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1415 1.1 mrg {
1416 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1417 1.1 mrg build_all (b, "v0,v0,su0", group, MODE_none);
1418 1.1 mrg }
1419 1.1 mrg
1420 1.1 mrg tree
1421 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1422 1.1 mrg {
1423 1.1 mrg unsigned int i, nargs;
1424 1.1 mrg type_suffix_index type;
1425 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1426 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1427 1.1 mrg || !r.require_derived_scalar_type (i + 1, TYPE_unsigned))
1428 1.1 mrg return error_mark_node;
1429 1.1 mrg
1430 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1431 1.1 mrg }
1432 1.1 mrg };
1433 1.1 mrg SHAPE (binary_uint_n)
1434 1.1 mrg
1435 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint>_t)
1436 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:uint>_t)
1437 1.1 mrg
1438 1.1 mrg i.e. a version of the standard binary shape binary_opt_n in which
1439 1.1 mrg the final argument is always an unsigned integer. */
1440 1.1 mrg struct binary_uint_opt_n_def : public overloaded_base<0>
1441 1.1 mrg {
1442 1.1 mrg void
1443 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1444 1.1 mrg {
1445 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1446 1.1 mrg build_all (b, "v0,v0,vu0", group, MODE_none);
1447 1.1 mrg build_all (b, "v0,v0,su0", group, MODE_n);
1448 1.1 mrg }
1449 1.1 mrg
1450 1.1 mrg tree
1451 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1452 1.1 mrg {
1453 1.1 mrg unsigned int i, nargs;
1454 1.1 mrg type_suffix_index type;
1455 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1456 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1457 1.1 mrg return error_mark_node;
1458 1.1 mrg
1459 1.1 mrg return r.finish_opt_n_resolution (i + 1, i, type, TYPE_unsigned);
1460 1.1 mrg }
1461 1.1 mrg };
1462 1.1 mrg SHAPE (binary_uint_opt_n)
1463 1.1 mrg
1464 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t).
1465 1.1 mrg
1466 1.1 mrg i.e. a version of binary_n in which the final argument is always
1467 1.1 mrg a 64-bit unsigned integer. */
1468 1.1 mrg struct binary_uint64_n_def : public overloaded_base<0>
1469 1.1 mrg {
1470 1.1 mrg void
1471 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1472 1.1 mrg {
1473 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1474 1.1 mrg build_all (b, "v0,v0,su64", group, MODE_none);
1475 1.1 mrg }
1476 1.1 mrg
1477 1.1 mrg tree
1478 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1479 1.1 mrg {
1480 1.1 mrg unsigned int i, nargs;
1481 1.1 mrg type_suffix_index type;
1482 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1483 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1484 1.1 mrg || !r.require_scalar_type (i + 1, "uint64_t"))
1485 1.1 mrg return error_mark_node;
1486 1.1 mrg
1487 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1488 1.1 mrg }
1489 1.1 mrg };
1490 1.1 mrg SHAPE (binary_uint64_n)
1491 1.1 mrg
1492 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, svuint64_t)
1493 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, uint64_t)
1494 1.1 mrg
1495 1.1 mrg i.e. a version of the standard binary shape binary_opt_n in which
1496 1.1 mrg the final argument is always a uint64_t. */
1497 1.1 mrg struct binary_uint64_opt_n_def : public overloaded_base<0>
1498 1.1 mrg {
1499 1.1 mrg void
1500 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1501 1.1 mrg {
1502 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1503 1.1 mrg build_all (b, "v0,v0,vu64", group, MODE_none);
1504 1.1 mrg build_all (b, "v0,v0,su64", group, MODE_n);
1505 1.1 mrg }
1506 1.1 mrg
1507 1.1 mrg tree
1508 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1509 1.1 mrg {
1510 1.1 mrg unsigned int i, nargs;
1511 1.1 mrg type_suffix_index type;
1512 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1513 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1514 1.1 mrg return error_mark_node;
1515 1.1 mrg
1516 1.1 mrg return r.finish_opt_n_resolution (i + 1, i, type, TYPE_unsigned, 64);
1517 1.1 mrg }
1518 1.1 mrg };
1519 1.1 mrg SHAPE (binary_uint64_opt_n)
1520 1.1 mrg
1521 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t). */
1522 1.1 mrg struct binary_wide_def : public overloaded_base<0>
1523 1.1 mrg {
1524 1.1 mrg void
1525 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1526 1.1 mrg {
1527 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1528 1.1 mrg build_all (b, "v0,v0,vh0", group, MODE_none);
1529 1.1 mrg }
1530 1.1 mrg
1531 1.1 mrg tree
1532 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1533 1.1 mrg {
1534 1.1 mrg unsigned int i, nargs;
1535 1.1 mrg type_suffix_index type;
1536 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1537 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1538 1.1 mrg || !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
1539 1.1 mrg r.HALF_SIZE))
1540 1.1 mrg return error_mark_node;
1541 1.1 mrg
1542 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1543 1.1 mrg }
1544 1.1 mrg };
1545 1.1 mrg SHAPE (binary_wide)
1546 1.1 mrg
1547 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t)
1548 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:half>_t). */
1549 1.1 mrg struct binary_wide_opt_n_def : public overloaded_base<0>
1550 1.1 mrg {
1551 1.1 mrg void
1552 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1553 1.1 mrg {
1554 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1555 1.1 mrg build_all (b, "v0,v0,vh0", group, MODE_none);
1556 1.1 mrg build_all (b, "v0,v0,sh0", group, MODE_n);
1557 1.1 mrg }
1558 1.1 mrg
1559 1.1 mrg tree
1560 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1561 1.1 mrg {
1562 1.1 mrg unsigned int i, nargs;
1563 1.1 mrg type_suffix_index type;
1564 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1565 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1566 1.1 mrg return error_mark_node;
1567 1.1 mrg
1568 1.1 mrg return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS,
1569 1.1 mrg r.HALF_SIZE);
1570 1.1 mrg }
1571 1.1 mrg };
1572 1.1 mrg SHAPE (binary_wide_opt_n)
1573 1.1 mrg
1574 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1575 1.1 mrg <t0>_t svfoo[_n_t0](<t0>_t, sv<t0>_t). */
1576 1.1 mrg struct clast_def : public overloaded_base<0>
1577 1.1 mrg {
1578 1.1 mrg void
1579 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1580 1.1 mrg {
1581 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1582 1.1 mrg build_all (b, "v0,v0,v0", group, MODE_none);
1583 1.1 mrg build_all (b, "s0,s0,v0", group, MODE_n);
1584 1.1 mrg }
1585 1.1 mrg
1586 1.1 mrg tree
1587 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1588 1.1 mrg {
1589 1.1 mrg unsigned int i, nargs;
1590 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1591 1.1 mrg || !r.require_vector_or_scalar_type (i))
1592 1.1 mrg return error_mark_node;
1593 1.1 mrg
1594 1.1 mrg if (r.scalar_argument_p (i))
1595 1.1 mrg {
1596 1.1 mrg type_suffix_index type;
1597 1.1 mrg if (!r.require_derived_scalar_type (i, r.SAME_TYPE_CLASS)
1598 1.1 mrg || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
1599 1.1 mrg return error_mark_node;
1600 1.1 mrg return r.resolve_to (MODE_n, type);
1601 1.1 mrg }
1602 1.1 mrg else
1603 1.1 mrg {
1604 1.1 mrg type_suffix_index type;
1605 1.1 mrg if ((type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1606 1.1 mrg || !r.require_matching_vector_type (i + 1, type))
1607 1.1 mrg return error_mark_node;
1608 1.1 mrg return r.resolve_to (MODE_none, type);
1609 1.1 mrg }
1610 1.1 mrg }
1611 1.1 mrg };
1612 1.1 mrg SHAPE (clast)
1613 1.1 mrg
1614 1.1 mrg /* svbool_t svfoo[_t0](sv<t0>_t, sv<t0>_t). */
1615 1.1 mrg struct compare_def : public overloaded_base<0>
1616 1.1 mrg {
1617 1.1 mrg void
1618 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1619 1.1 mrg {
1620 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1621 1.1 mrg build_all (b, "vp,v0,v0", group, MODE_none);
1622 1.1 mrg }
1623 1.1 mrg
1624 1.1 mrg tree
1625 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1626 1.1 mrg {
1627 1.1 mrg return r.resolve_uniform (2);
1628 1.1 mrg }
1629 1.1 mrg };
1630 1.1 mrg SHAPE (compare)
1631 1.1 mrg
1632 1.1 mrg /* svbool_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1633 1.1 mrg svbool_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1634 1.1 mrg
1635 1.1 mrg i.e. a comparison between two vectors, or between a vector and a scalar. */
1636 1.1 mrg struct compare_opt_n_def : public overloaded_base<0>
1637 1.1 mrg {
1638 1.1 mrg void
1639 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1640 1.1 mrg {
1641 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1642 1.1 mrg build_all (b, "vp,v0,v0", group, MODE_none);
1643 1.1 mrg build_all (b, "vp,v0,s0", group, MODE_n);
1644 1.1 mrg }
1645 1.1 mrg
1646 1.1 mrg tree
1647 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1648 1.1 mrg {
1649 1.1 mrg return r.resolve_uniform_opt_n (2);
1650 1.1 mrg }
1651 1.1 mrg };
1652 1.1 mrg SHAPE (compare_opt_n)
1653 1.1 mrg
1654 1.1 mrg /* svbool_t svfoo[_t0](const <t0>_t *, const <t0>_t *). */
1655 1.1 mrg struct compare_ptr_def : public overloaded_base<0>
1656 1.1 mrg {
1657 1.1 mrg void
1658 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1659 1.1 mrg {
1660 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1661 1.1 mrg build_all (b, "vp,al,al", group, MODE_none);
1662 1.1 mrg }
1663 1.1 mrg
1664 1.1 mrg tree
1665 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1666 1.1 mrg {
1667 1.1 mrg unsigned int i, nargs;
1668 1.1 mrg type_suffix_index type;
1669 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1670 1.1 mrg || (type = r.infer_pointer_type (i)) == NUM_TYPE_SUFFIXES
1671 1.1 mrg || !r.require_matching_pointer_type (i + 1, i, type))
1672 1.1 mrg return error_mark_node;
1673 1.1 mrg
1674 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1675 1.1 mrg }
1676 1.1 mrg };
1677 1.1 mrg SHAPE (compare_ptr)
1678 1.1 mrg
1679 1.1 mrg /* svbool_t svfoo_t0[_t1](<t1>_t, <t1>_t)
1680 1.1 mrg
1681 1.1 mrg where _t0 is a _b<bits> suffix that describes the predicate result.
1682 1.1 mrg There is no direct relationship between the element sizes of _t0
1683 1.1 mrg and _t1. */
1684 1.1 mrg struct compare_scalar_def : public overloaded_base<1>
1685 1.1 mrg {
1686 1.1 mrg void
1687 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1688 1.1 mrg {
1689 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1690 1.1 mrg build_all (b, "vp,s1,s1", group, MODE_none);
1691 1.1 mrg }
1692 1.1 mrg
1693 1.1 mrg tree
1694 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1695 1.1 mrg {
1696 1.1 mrg unsigned int i, nargs;
1697 1.1 mrg type_suffix_index type;
1698 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1699 1.1 mrg || (type = r.infer_integer_scalar_type (i)) == NUM_TYPE_SUFFIXES
1700 1.1 mrg || !r.require_matching_integer_scalar_type (i + 1, i, type))
1701 1.1 mrg return error_mark_node;
1702 1.1 mrg
1703 1.1 mrg return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
1704 1.1 mrg }
1705 1.1 mrg };
1706 1.1 mrg SHAPE (compare_scalar)
1707 1.1 mrg
1708 1.1 mrg /* svbool_t svfoo[_t0](sv<t0>_t, svint64_t) (for signed t0)
1709 1.1 mrg svbool_t svfoo[_n_t0](sv<t0>_t, int64_t) (for signed t0)
1710 1.1 mrg svbool_t svfoo[_t0](sv<t0>_t, svuint64_t) (for unsigned t0)
1711 1.1 mrg svbool_t svfoo[_n_t0](sv<t0>_t, uint64_t) (for unsigned t0)
1712 1.1 mrg
1713 1.1 mrg i.e. a comparison in which the second argument is 64 bits. */
1714 1.1 mrg struct compare_wide_opt_n_def : public overloaded_base<0>
1715 1.1 mrg {
1716 1.1 mrg void
1717 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1718 1.1 mrg {
1719 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1720 1.1 mrg build_all (b, "vp,v0,vw0", group, MODE_none);
1721 1.1 mrg build_all (b, "vp,v0,sw0", group, MODE_n);
1722 1.1 mrg }
1723 1.1 mrg
1724 1.1 mrg tree
1725 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1726 1.1 mrg {
1727 1.1 mrg unsigned int i, nargs;
1728 1.1 mrg type_suffix_index type;
1729 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1730 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1731 1.1 mrg return error_mark_node;
1732 1.1 mrg
1733 1.1 mrg return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS, 64);
1734 1.1 mrg }
1735 1.1 mrg };
1736 1.1 mrg SHAPE (compare_wide_opt_n)
1737 1.1 mrg
1738 1.1 mrg /* uint64_t svfoo(). */
1739 1.1 mrg struct count_inherent_def : public nonoverloaded_base
1740 1.1 mrg {
1741 1.1 mrg void
1742 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1743 1.1 mrg {
1744 1.1 mrg build_all (b, "su64", group, MODE_none);
1745 1.1 mrg }
1746 1.1 mrg };
1747 1.1 mrg SHAPE (count_inherent)
1748 1.1 mrg
1749 1.1 mrg /* uint64_t svfoo(enum svpattern). */
1750 1.1 mrg struct count_pat_def : public nonoverloaded_base
1751 1.1 mrg {
1752 1.1 mrg void
1753 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1754 1.1 mrg {
1755 1.1 mrg build_all (b, "su64,epattern", group, MODE_none);
1756 1.1 mrg }
1757 1.1 mrg };
1758 1.1 mrg SHAPE (count_pat)
1759 1.1 mrg
1760 1.1 mrg /* uint64_t svfoo(svbool_t). */
1761 1.1 mrg struct count_pred_def : public nonoverloaded_base
1762 1.1 mrg {
1763 1.1 mrg void
1764 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1765 1.1 mrg {
1766 1.1 mrg build_all (b, "su64,vp", group, MODE_none);
1767 1.1 mrg }
1768 1.1 mrg };
1769 1.1 mrg SHAPE (count_pred)
1770 1.1 mrg
1771 1.1 mrg /* uint64_t svfoo[_t0](sv<t0>_t). */
1772 1.1 mrg struct count_vector_def : public overloaded_base<0>
1773 1.1 mrg {
1774 1.1 mrg void
1775 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1776 1.1 mrg {
1777 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1778 1.1 mrg build_all (b, "su64,v0", group, MODE_none);
1779 1.1 mrg }
1780 1.1 mrg
1781 1.1 mrg tree
1782 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1783 1.1 mrg {
1784 1.1 mrg return r.resolve_uniform (1);
1785 1.1 mrg }
1786 1.1 mrg };
1787 1.1 mrg SHAPE (count_vector)
1788 1.1 mrg
1789 1.1 mrg /* sv<t0>xN_t svfoo[_t0](sv<t0>_t, ..., sv<t0>_t)
1790 1.1 mrg
1791 1.1 mrg where there are N arguments in total. */
1792 1.1 mrg struct create_def : public overloaded_base<0>
1793 1.1 mrg {
1794 1.1 mrg void
1795 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1796 1.1 mrg {
1797 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1798 1.1 mrg build_all (b, "t0,v0*t", group, MODE_none);
1799 1.1 mrg }
1800 1.1 mrg
1801 1.1 mrg tree
1802 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1803 1.1 mrg {
1804 1.1 mrg return r.resolve_uniform (r.vectors_per_tuple ());
1805 1.1 mrg }
1806 1.1 mrg };
1807 1.1 mrg SHAPE (create)
1808 1.1 mrg
1809 1.1 mrg /* sv<t0>_t svfoo[_n]_t0(<t0>_t, ..., <t0>_t)
1810 1.1 mrg
1811 1.1 mrg where there are enough arguments to fill 128 bits of data (or to
1812 1.1 mrg control 128 bits of data in the case of predicates). */
1813 1.1 mrg struct dupq_def : public overloaded_base<1>
1814 1.1 mrg {
1815 1.1 mrg void
1816 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1817 1.1 mrg {
1818 1.1 mrg /* The "_n" suffix is optional; the full name has it, but the short
1819 1.1 mrg name doesn't. */
1820 1.1 mrg build_all (b, "v0,s0*q", group, MODE_n, true);
1821 1.1 mrg }
1822 1.1 mrg
1823 1.1 mrg tree
1824 1.1 mrg resolve (function_resolver &) const OVERRIDE
1825 1.1 mrg {
1826 1.1 mrg /* The short forms just make "_n" implicit, so no resolution is needed. */
1827 1.1 mrg gcc_unreachable ();
1828 1.1 mrg }
1829 1.1 mrg };
1830 1.1 mrg SHAPE (dupq)
1831 1.1 mrg
1832 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, uint64_t)
1833 1.1 mrg
1834 1.1 mrg where the final argument is an integer constant expression that when
1835 1.1 mrg multiplied by the number of bytes in t0 is in the range [0, 255]. */
1836 1.1 mrg struct ext_def : public overloaded_base<0>
1837 1.1 mrg {
1838 1.1 mrg void
1839 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1840 1.1 mrg {
1841 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1842 1.1 mrg build_all (b, "v0,v0,v0,su64", group, MODE_none);
1843 1.1 mrg }
1844 1.1 mrg
1845 1.1 mrg tree
1846 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1847 1.1 mrg {
1848 1.1 mrg return r.resolve_uniform (2, 1);
1849 1.1 mrg }
1850 1.1 mrg
1851 1.1 mrg bool
1852 1.1 mrg check (function_checker &c) const OVERRIDE
1853 1.1 mrg {
1854 1.1 mrg unsigned int bytes = c.type_suffix (0).element_bytes;
1855 1.1 mrg return c.require_immediate_range (2, 0, 256 / bytes - 1);
1856 1.1 mrg }
1857 1.1 mrg };
1858 1.1 mrg SHAPE (ext)
1859 1.1 mrg
1860 1.1 mrg /* <t0>_t svfoo[_t0](<t0>_t, sv<t0>_t). */
1861 1.1 mrg struct fold_left_def : public overloaded_base<0>
1862 1.1 mrg {
1863 1.1 mrg void
1864 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1865 1.1 mrg {
1866 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1867 1.1 mrg build_all (b, "s0,s0,v0", group, MODE_none);
1868 1.1 mrg }
1869 1.1 mrg
1870 1.1 mrg tree
1871 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1872 1.1 mrg {
1873 1.1 mrg unsigned int i, nargs;
1874 1.1 mrg type_suffix_index type;
1875 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1876 1.1 mrg || !r.require_derived_scalar_type (i, r.SAME_TYPE_CLASS)
1877 1.1 mrg || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
1878 1.1 mrg return error_mark_node;
1879 1.1 mrg
1880 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1881 1.1 mrg }
1882 1.1 mrg };
1883 1.1 mrg SHAPE (fold_left)
1884 1.1 mrg
1885 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>xN_t, uint64_t)
1886 1.1 mrg
1887 1.1 mrg where the final argument is an integer constant expression in
1888 1.1 mrg the range [0, N - 1]. */
1889 1.1 mrg struct get_def : public overloaded_base<0>
1890 1.1 mrg {
1891 1.1 mrg void
1892 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1893 1.1 mrg {
1894 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1895 1.1 mrg build_all (b, "v0,t0,su64", group, MODE_none);
1896 1.1 mrg }
1897 1.1 mrg
1898 1.1 mrg tree
1899 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1900 1.1 mrg {
1901 1.1 mrg unsigned int i, nargs;
1902 1.1 mrg type_suffix_index type;
1903 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1904 1.1 mrg || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
1905 1.1 mrg || !r.require_integer_immediate (i + 1))
1906 1.1 mrg return error_mark_node;
1907 1.1 mrg
1908 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1909 1.1 mrg }
1910 1.1 mrg
1911 1.1 mrg bool
1912 1.1 mrg check (function_checker &c) const OVERRIDE
1913 1.1 mrg {
1914 1.1 mrg unsigned int nvectors = c.vectors_per_tuple ();
1915 1.1 mrg return c.require_immediate_range (1, 0, nvectors - 1);
1916 1.1 mrg }
1917 1.1 mrg };
1918 1.1 mrg SHAPE (get)
1919 1.1 mrg
1920 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t)
1921 1.1 mrg <t0>_t svfoo[_n_t0](<t0>_t, uint64_t)
1922 1.1 mrg
1923 1.1 mrg where the t0 in the vector form is a signed or unsigned integer
1924 1.1 mrg whose size is tied to the [bhwd] suffix of "svfoo". */
1925 1.1 mrg struct inc_dec_def : public inc_dec_base
1926 1.1 mrg {
1927 1.1 mrg CONSTEXPR inc_dec_def () : inc_dec_base (false) {}
1928 1.1 mrg
1929 1.1 mrg void
1930 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1931 1.1 mrg {
1932 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1933 1.1 mrg /* These functions are unusual in that the type suffixes for
1934 1.1 mrg the scalar and vector forms are not related. The vector
1935 1.1 mrg form always has exactly two potential suffixes while the
1936 1.1 mrg scalar form always has four. */
1937 1.1 mrg if (group.types[2][0] == NUM_TYPE_SUFFIXES)
1938 1.1 mrg build_all (b, "v0,v0,su64", group, MODE_none);
1939 1.1 mrg else
1940 1.1 mrg build_all (b, "s0,s0,su64", group, MODE_n);
1941 1.1 mrg }
1942 1.1 mrg };
1943 1.1 mrg SHAPE (inc_dec)
1944 1.1 mrg
1945 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, enum svpattern, uint64_t)
1946 1.1 mrg <t0>_t svfoo[_n_t0](<t0>_t, enum svpattern, uint64_t)
1947 1.1 mrg
1948 1.1 mrg where the t0 in the vector form is a signed or unsigned integer
1949 1.1 mrg whose size is tied to the [bhwd] suffix of "svfoo". */
1950 1.1 mrg struct inc_dec_pat_def : public inc_dec_base
1951 1.1 mrg {
1952 1.1 mrg CONSTEXPR inc_dec_pat_def () : inc_dec_base (true) {}
1953 1.1 mrg
1954 1.1 mrg void
1955 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1956 1.1 mrg {
1957 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1958 1.1 mrg /* These functions are unusual in that the type suffixes for
1959 1.1 mrg the scalar and vector forms are not related. The vector
1960 1.1 mrg form always has exactly two potential suffixes while the
1961 1.1 mrg scalar form always has four. */
1962 1.1 mrg if (group.types[2][0] == NUM_TYPE_SUFFIXES)
1963 1.1 mrg build_all (b, "v0,v0,epattern,su64", group, MODE_none);
1964 1.1 mrg else
1965 1.1 mrg build_all (b, "s0,s0,epattern,su64", group, MODE_n);
1966 1.1 mrg }
1967 1.1 mrg };
1968 1.1 mrg SHAPE (inc_dec_pat)
1969 1.1 mrg
1970 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, svbool_t). */
1971 1.1 mrg struct inc_dec_pred_def : public overloaded_base<0>
1972 1.1 mrg {
1973 1.1 mrg void
1974 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
1975 1.1 mrg {
1976 1.1 mrg b.add_overloaded_functions (group, MODE_none);
1977 1.1 mrg build_all (b, "v0,v0,vp", group, MODE_none);
1978 1.1 mrg }
1979 1.1 mrg
1980 1.1 mrg tree
1981 1.1 mrg resolve (function_resolver &r) const OVERRIDE
1982 1.1 mrg {
1983 1.1 mrg unsigned int i, nargs;
1984 1.1 mrg type_suffix_index type;
1985 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
1986 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1987 1.1 mrg || !r.require_vector_type (i + 1, VECTOR_TYPE_svbool_t))
1988 1.1 mrg return error_mark_node;
1989 1.1 mrg
1990 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
1991 1.1 mrg }
1992 1.1 mrg };
1993 1.1 mrg SHAPE (inc_dec_pred)
1994 1.1 mrg
1995 1.1 mrg /* <t0>_t svfoo[_n_t0]_t1(<t0>_t, svbool_t)
1996 1.1 mrg
1997 1.1 mrg where _t1 is a _b<bits> suffix that describes the svbool_t argument. */
1998 1.1 mrg struct inc_dec_pred_scalar_def : public overloaded_base<2>
1999 1.1 mrg {
2000 1.1 mrg void
2001 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2002 1.1 mrg {
2003 1.1 mrg b.add_overloaded_functions (group, MODE_n);
2004 1.1 mrg build_all (b, "s0,s0,vp", group, MODE_n);
2005 1.1 mrg }
2006 1.1 mrg
2007 1.1 mrg tree
2008 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2009 1.1 mrg {
2010 1.1 mrg unsigned int i, nargs;
2011 1.1 mrg type_suffix_index type;
2012 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
2013 1.1 mrg || (type = r.infer_integer_scalar_type (i)) == NUM_TYPE_SUFFIXES
2014 1.1 mrg || !r.require_vector_type (i + 1, VECTOR_TYPE_svbool_t))
2015 1.1 mrg return error_mark_node;
2016 1.1 mrg
2017 1.1 mrg return r.resolve_to (r.mode_suffix_id, type, r.type_suffix_ids[1]);
2018 1.1 mrg }
2019 1.1 mrg };
2020 1.1 mrg SHAPE (inc_dec_pred_scalar)
2021 1.1 mrg
2022 1.1 mrg /* sv<t0>[xN]_t svfoo_t0(). */
2023 1.1 mrg struct inherent_def : public nonoverloaded_base
2024 1.1 mrg {
2025 1.1 mrg void
2026 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2027 1.1 mrg {
2028 1.1 mrg build_all (b, "t0", group, MODE_none);
2029 1.1 mrg }
2030 1.1 mrg };
2031 1.1 mrg SHAPE (inherent)
2032 1.1 mrg
2033 1.1 mrg /* svbool_t svfoo[_b](). */
2034 1.1 mrg struct inherent_b_def : public overloaded_base<0>
2035 1.1 mrg {
2036 1.1 mrg void
2037 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2038 1.1 mrg {
2039 1.1 mrg /* The "_b" suffix is optional; the full name has it, but the short
2040 1.1 mrg name doesn't. */
2041 1.1 mrg build_all (b, "v0", group, MODE_none, true);
2042 1.1 mrg }
2043 1.1 mrg
2044 1.1 mrg tree
2045 1.1 mrg resolve (function_resolver &) const OVERRIDE
2046 1.1 mrg {
2047 1.1 mrg /* The short forms just make "_b" implicit, so no resolution is needed. */
2048 1.1 mrg gcc_unreachable ();
2049 1.1 mrg }
2050 1.1 mrg };
2051 1.1 mrg SHAPE (inherent_b)
2052 1.1 mrg
2053 1.1 mrg /* sv<t0>[xN]_t svfoo[_t0](const <t0>_t *)
2054 1.1 mrg sv<t0>[xN]_t svfoo_vnum[_t0](const <t0>_t *, int64_t). */
2055 1.1 mrg struct load_def : public load_contiguous_base
2056 1.1 mrg {
2057 1.1 mrg void
2058 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2059 1.1 mrg {
2060 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2061 1.1 mrg b.add_overloaded_functions (group, MODE_vnum);
2062 1.1 mrg build_all (b, "t0,al", group, MODE_none);
2063 1.1 mrg build_all (b, "t0,al,ss64", group, MODE_vnum);
2064 1.1 mrg }
2065 1.1 mrg };
2066 1.1 mrg SHAPE (load)
2067 1.1 mrg
2068 1.1 mrg /* sv<t0>_t svfoo_t0(const <X>_t *)
2069 1.1 mrg sv<t0>_t svfoo_vnum_t0(const <X>_t *, int64_t)
2070 1.1 mrg
2071 1.1 mrg where <X> is determined by the function base name. */
2072 1.1 mrg struct load_ext_def : public nonoverloaded_base
2073 1.1 mrg {
2074 1.1 mrg void
2075 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2076 1.1 mrg {
2077 1.1 mrg build_all (b, "t0,al", group, MODE_none);
2078 1.1 mrg build_all (b, "t0,al,ss64", group, MODE_vnum);
2079 1.1 mrg }
2080 1.1 mrg };
2081 1.1 mrg SHAPE (load_ext)
2082 1.1 mrg
2083 1.1 mrg /* sv<t0>_t svfoo_[s32]index_t0(const <X>_t *, svint32_t)
2084 1.1 mrg sv<t0>_t svfoo_[s64]index_t0(const <X>_t *, svint64_t)
2085 1.1 mrg sv<t0>_t svfoo_[u32]index_t0(const <X>_t *, svuint32_t)
2086 1.1 mrg sv<t0>_t svfoo_[u64]index_t0(const <X>_t *, svuint64_t)
2087 1.1 mrg
2088 1.1 mrg sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
2089 1.1 mrg sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
2090 1.1 mrg
2091 1.1 mrg where <X> is determined by the function base name. */
2092 1.1 mrg struct load_ext_gather_index_def : public load_ext_gather_base
2093 1.1 mrg {
2094 1.1 mrg void
2095 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2096 1.1 mrg {
2097 1.1 mrg b.add_overloaded_functions (group, MODE_index);
2098 1.1 mrg build_sv_index (b, "t0,al,d", group);
2099 1.1 mrg build_vs_index (b, "t0,b,ss64", group);
2100 1.1 mrg }
2101 1.1 mrg };
2102 1.1 mrg SHAPE (load_ext_gather_index)
2103 1.1 mrg
2104 1.1 mrg /* sv<t0>_t svfoo_[s64]index_t0(const <X>_t *, svint64_t)
2105 1.1 mrg sv<t0>_t svfoo_[u64]index_t0(const <X>_t *, svuint64_t)
2106 1.1 mrg
2107 1.1 mrg sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
2108 1.1 mrg sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
2109 1.1 mrg
2110 1.1 mrg where <X> is determined by the function base name. This is
2111 1.1 mrg load_ext_gather_index that doesn't support 32-bit vector indices. */
2112 1.1 mrg struct load_ext_gather_index_restricted_def : public load_ext_gather_base
2113 1.1 mrg {
2114 1.1 mrg void
2115 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2116 1.1 mrg {
2117 1.1 mrg b.add_overloaded_functions (group, MODE_index);
2118 1.1 mrg build_sv_index64 (b, "t0,al,d", group);
2119 1.1 mrg build_vs_index (b, "t0,b,ss64", group);
2120 1.1 mrg }
2121 1.1 mrg };
2122 1.1 mrg SHAPE (load_ext_gather_index_restricted)
2123 1.1 mrg
2124 1.1 mrg /* sv<t0>_t svfoo_[s32]offset_t0(const <X>_t *, svint32_t)
2125 1.1 mrg sv<t0>_t svfoo_[s64]offset_t0(const <X>_t *, svint64_t)
2126 1.1 mrg sv<t0>_t svfoo_[u32]offset_t0(const <X>_t *, svuint32_t)
2127 1.1 mrg sv<t0>_t svfoo_[u64]offset_t0(const <X>_t *, svuint64_t)
2128 1.1 mrg
2129 1.1 mrg sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2130 1.1 mrg sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2131 1.1 mrg
2132 1.1 mrg sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2133 1.1 mrg sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
2134 1.1 mrg
2135 1.1 mrg where <X> is determined by the function base name. */
2136 1.1 mrg struct load_ext_gather_offset_def : public load_ext_gather_base
2137 1.1 mrg {
2138 1.1 mrg void
2139 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2140 1.1 mrg {
2141 1.1 mrg b.add_overloaded_functions (group, MODE_offset);
2142 1.1 mrg build_sv_offset (b, "t0,al,d", group);
2143 1.1 mrg build_v_base (b, "t0,b", group, true);
2144 1.1 mrg build_vs_offset (b, "t0,b,ss64", group);
2145 1.1 mrg }
2146 1.1 mrg };
2147 1.1 mrg SHAPE (load_ext_gather_offset)
2148 1.1 mrg
2149 1.1 mrg /* sv<t0>_t svfoo_[s64]offset_t0(const <X>_t *, svint64_t)
2150 1.1 mrg sv<t0>_t svfoo_[u32]offset_t0(const <X>_t *, svuint32_t)
2151 1.1 mrg sv<t0>_t svfoo_[u64]offset_t0(const <X>_t *, svuint64_t)
2152 1.1 mrg
2153 1.1 mrg sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2154 1.1 mrg sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2155 1.1 mrg
2156 1.1 mrg sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2157 1.1 mrg sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
2158 1.1 mrg
2159 1.1 mrg where <X> is determined by the function base name. This is
2160 1.1 mrg load_ext_gather_offset without the s32 vector offset form. */
2161 1.1 mrg struct load_ext_gather_offset_restricted_def : public load_ext_gather_base
2162 1.1 mrg {
2163 1.1 mrg void
2164 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2165 1.1 mrg {
2166 1.1 mrg b.add_overloaded_functions (group, MODE_offset);
2167 1.1 mrg build_sv_uint_offset (b, "t0,al,d", group);
2168 1.1 mrg build_v_base (b, "t0,b", group, true);
2169 1.1 mrg build_vs_offset (b, "t0,b,ss64", group);
2170 1.1 mrg }
2171 1.1 mrg };
2172 1.1 mrg SHAPE (load_ext_gather_offset_restricted)
2173 1.1 mrg
2174 1.1 mrg /* sv<t0>_t svfoo_[s32]index[_t0](const <t0>_t *, svint32_t)
2175 1.1 mrg sv<t0>_t svfoo_[s64]index[_t0](const <t0>_t *, svint64_t)
2176 1.1 mrg sv<t0>_t svfoo_[u32]index[_t0](const <t0>_t *, svuint32_t)
2177 1.1 mrg sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t)
2178 1.1 mrg
2179 1.1 mrg sv<t0>_t svfoo_[s32]offset[_t0](const <t0>_t *, svint32_t)
2180 1.1 mrg sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
2181 1.1 mrg sv<t0>_t svfoo_[u32]offset[_t0](const <t0>_t *, svuint32_t)
2182 1.1 mrg sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t). */
2183 1.1 mrg struct load_gather_sv_def : public load_gather_sv_base
2184 1.1 mrg {
2185 1.1 mrg void
2186 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2187 1.1 mrg {
2188 1.1 mrg b.add_overloaded_functions (group, MODE_index);
2189 1.1 mrg b.add_overloaded_functions (group, MODE_offset);
2190 1.1 mrg build_sv_index (b, "t0,al,d", group);
2191 1.1 mrg build_sv_offset (b, "t0,al,d", group);
2192 1.1 mrg }
2193 1.1 mrg };
2194 1.1 mrg SHAPE (load_gather_sv)
2195 1.1 mrg
2196 1.1 mrg /* sv<t0>_t svfoo_[u32]index[_t0](const <t0>_t *, svuint32_t)
2197 1.1 mrg sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t)
2198 1.1 mrg
2199 1.1 mrg sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
2200 1.1 mrg sv<t0>_t svfoo_[u32]offset[_t0](const <t0>_t *, svuint32_t)
2201 1.1 mrg sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t)
2202 1.1 mrg
2203 1.1 mrg This is load_gather_sv without the 32-bit vector index forms and
2204 1.1 mrg without the s32 vector offset form. */
2205 1.1 mrg struct load_gather_sv_restricted_def : public load_gather_sv_base
2206 1.1 mrg {
2207 1.1 mrg void
2208 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2209 1.1 mrg {
2210 1.1 mrg b.add_overloaded_functions (group, MODE_index);
2211 1.1 mrg b.add_overloaded_functions (group, MODE_offset);
2212 1.1 mrg build_sv_index64 (b, "t0,al,d", group);
2213 1.1 mrg build_sv_uint_offset (b, "t0,al,d", group);
2214 1.1 mrg }
2215 1.1 mrg };
2216 1.1 mrg SHAPE (load_gather_sv_restricted)
2217 1.1 mrg
2218 1.1 mrg /* sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2219 1.1 mrg sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2220 1.1 mrg
2221 1.1 mrg sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
2222 1.1 mrg sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
2223 1.1 mrg
2224 1.1 mrg sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2225 1.1 mrg sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t). */
2226 1.1 mrg struct load_gather_vs_def : public overloaded_base<1>
2227 1.1 mrg {
2228 1.1 mrg void
2229 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2230 1.1 mrg {
2231 1.1 mrg /* The base vector mode is optional; the full name has it but the
2232 1.1 mrg short name doesn't. There is no ambiguity with SHAPE_load_gather_sv
2233 1.1 mrg because the latter uses an implicit type suffix. */
2234 1.1 mrg build_v_base (b, "t0,b", group, true);
2235 1.1 mrg build_vs_index (b, "t0,b,ss64", group, true);
2236 1.1 mrg build_vs_offset (b, "t0,b,ss64", group, true);
2237 1.1 mrg }
2238 1.1 mrg
2239 1.1 mrg tree
2240 1.1 mrg resolve (function_resolver &) const OVERRIDE
2241 1.1 mrg {
2242 1.1 mrg /* The short name just makes the base vector mode implicit;
2243 1.1 mrg no resolution is needed. */
2244 1.1 mrg gcc_unreachable ();
2245 1.1 mrg }
2246 1.1 mrg };
2247 1.1 mrg SHAPE (load_gather_vs)
2248 1.1 mrg
2249 1.1 mrg /* sv<t0>_t svfoo[_t0](const <t0>_t *)
2250 1.1 mrg
2251 1.1 mrg The only difference from "load" is that this shape has no vnum form. */
2252 1.1 mrg struct load_replicate_def : public load_contiguous_base
2253 1.1 mrg {
2254 1.1 mrg void
2255 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2256 1.1 mrg {
2257 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2258 1.1 mrg build_all (b, "t0,al", group, MODE_none);
2259 1.1 mrg }
2260 1.1 mrg };
2261 1.1 mrg SHAPE (load_replicate)
2262 1.1 mrg
2263 1.1 mrg /* svbool_t svfoo(enum svpattern). */
2264 1.1 mrg struct pattern_pred_def : public nonoverloaded_base
2265 1.1 mrg {
2266 1.1 mrg void
2267 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2268 1.1 mrg {
2269 1.1 mrg build_all (b, "vp,epattern", group, MODE_none);
2270 1.1 mrg }
2271 1.1 mrg };
2272 1.1 mrg SHAPE (pattern_pred)
2273 1.1 mrg
2274 1.1 mrg /* void svfoo(const void *, svprfop)
2275 1.1 mrg void svfoo_vnum(const void *, int64_t, svprfop). */
2276 1.1 mrg struct prefetch_def : public nonoverloaded_base
2277 1.1 mrg {
2278 1.1 mrg void
2279 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2280 1.1 mrg {
2281 1.1 mrg build_all (b, "_,ap,eprfop", group, MODE_none);
2282 1.1 mrg build_all (b, "_,ap,ss64,eprfop", group, MODE_vnum);
2283 1.1 mrg }
2284 1.1 mrg };
2285 1.1 mrg SHAPE (prefetch)
2286 1.1 mrg
2287 1.1 mrg /* void svfoo_[s32]index(const void *, svint32_t, svprfop)
2288 1.1 mrg void svfoo_[s64]index(const void *, svint64_t, svprfop)
2289 1.1 mrg void svfoo_[u32]index(const void *, svuint32_t, svprfop)
2290 1.1 mrg void svfoo_[u64]index(const void *, svuint64_t, svprfop)
2291 1.1 mrg
2292 1.1 mrg void svfoo[_u32base](svuint32_t, svprfop)
2293 1.1 mrg void svfoo[_u64base](svuint64_t, svprfop)
2294 1.1 mrg
2295 1.1 mrg void svfoo[_u32base]_index(svuint32_t, int64_t, svprfop)
2296 1.1 mrg void svfoo[_u64base]_index(svuint64_t, int64_t, svprfop). */
2297 1.1 mrg struct prefetch_gather_index_def : public prefetch_gather_base
2298 1.1 mrg {
2299 1.1 mrg void
2300 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2301 1.1 mrg {
2302 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2303 1.1 mrg b.add_overloaded_functions (group, MODE_index);
2304 1.1 mrg build_sv_index (b, "_,ap,d,eprfop", group);
2305 1.1 mrg build_v_base (b, "_,b,eprfop", group);
2306 1.1 mrg build_vs_index (b, "_,b,ss64,eprfop", group);
2307 1.1 mrg }
2308 1.1 mrg };
2309 1.1 mrg SHAPE (prefetch_gather_index)
2310 1.1 mrg
2311 1.1 mrg /* void svfoo_[s32]offset(const void *, svint32_t, svprfop)
2312 1.1 mrg void svfoo_[s64]offset(const void *, svint64_t, svprfop)
2313 1.1 mrg void svfoo_[u32]offset(const void *, svuint32_t, svprfop)
2314 1.1 mrg void svfoo_[u64]offset(const void *, svuint64_t, svprfop)
2315 1.1 mrg
2316 1.1 mrg void svfoo[_u32base](svuint32_t, svprfop)
2317 1.1 mrg void svfoo[_u64base](svuint64_t, svprfop)
2318 1.1 mrg
2319 1.1 mrg void svfoo[_u32base]_offset(svuint32_t, int64_t, svprfop)
2320 1.1 mrg void svfoo[_u64base]_offset(svuint64_t, int64_t, svprfop). */
2321 1.1 mrg struct prefetch_gather_offset_def : public prefetch_gather_base
2322 1.1 mrg {
2323 1.1 mrg void
2324 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2325 1.1 mrg {
2326 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2327 1.1 mrg b.add_overloaded_functions (group, MODE_offset);
2328 1.1 mrg build_sv_offset (b, "_,ap,d,eprfop", group);
2329 1.1 mrg build_v_base (b, "_,b,eprfop", group);
2330 1.1 mrg build_vs_offset (b, "_,b,ss64,eprfop", group);
2331 1.1 mrg }
2332 1.1 mrg };
2333 1.1 mrg SHAPE (prefetch_gather_offset)
2334 1.1 mrg
2335 1.1 mrg /* bool svfoo(svbool_t). */
2336 1.1 mrg struct ptest_def : public nonoverloaded_base
2337 1.1 mrg {
2338 1.1 mrg void
2339 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2340 1.1 mrg {
2341 1.1 mrg build_all (b, "sp,vp", group, MODE_none);
2342 1.1 mrg }
2343 1.1 mrg };
2344 1.1 mrg SHAPE (ptest)
2345 1.1 mrg
2346 1.1 mrg /* svbool_t svfoo(). */
2347 1.1 mrg struct rdffr_def : public nonoverloaded_base
2348 1.1 mrg {
2349 1.1 mrg void
2350 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2351 1.1 mrg {
2352 1.1 mrg build_all (b, "vp", group, MODE_none);
2353 1.1 mrg }
2354 1.1 mrg };
2355 1.1 mrg SHAPE (rdffr)
2356 1.1 mrg
2357 1.1 mrg /* <t0>_t svfoo[_t0](sv<t0>_t). */
2358 1.1 mrg struct reduction_def : public overloaded_base<0>
2359 1.1 mrg {
2360 1.1 mrg void
2361 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2362 1.1 mrg {
2363 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2364 1.1 mrg build_all (b, "s0,v0", group, MODE_none);
2365 1.1 mrg }
2366 1.1 mrg
2367 1.1 mrg tree
2368 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2369 1.1 mrg {
2370 1.1 mrg return r.resolve_uniform (1);
2371 1.1 mrg }
2372 1.1 mrg };
2373 1.1 mrg SHAPE (reduction)
2374 1.1 mrg
2375 1.1 mrg /* int64_t svfoo[_t0](sv<t0>_t) (for signed t0)
2376 1.1 mrg uint64_t svfoo[_t0](sv<t0>_t) (for unsigned t0)
2377 1.1 mrg <t0>_t svfoo[_t0](sv<t0>_t) (for floating-point t0)
2378 1.1 mrg
2379 1.1 mrg i.e. a version of "reduction" in which the return type for integers
2380 1.1 mrg always has 64 bits. */
2381 1.1 mrg struct reduction_wide_def : public overloaded_base<0>
2382 1.1 mrg {
2383 1.1 mrg void
2384 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2385 1.1 mrg {
2386 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2387 1.1 mrg build_all (b, "sw0,v0", group, MODE_none);
2388 1.1 mrg }
2389 1.1 mrg
2390 1.1 mrg tree
2391 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2392 1.1 mrg {
2393 1.1 mrg return r.resolve_uniform (1);
2394 1.1 mrg }
2395 1.1 mrg };
2396 1.1 mrg SHAPE (reduction_wide)
2397 1.1 mrg
2398 1.1 mrg /* sv<t0>xN_t svfoo[_t0](sv<t0>xN_t, uint64_t, sv<t0>_t)
2399 1.1 mrg
2400 1.1 mrg where the second argument is an integer constant expression in the
2401 1.1 mrg range [0, N - 1]. */
2402 1.1 mrg struct set_def : public overloaded_base<0>
2403 1.1 mrg {
2404 1.1 mrg void
2405 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2406 1.1 mrg {
2407 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2408 1.1 mrg build_all (b, "t0,t0,su64,v0", group, MODE_none);
2409 1.1 mrg }
2410 1.1 mrg
2411 1.1 mrg tree
2412 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2413 1.1 mrg {
2414 1.1 mrg unsigned int i, nargs;
2415 1.1 mrg type_suffix_index type;
2416 1.1 mrg if (!r.check_gp_argument (3, i, nargs)
2417 1.1 mrg || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
2418 1.1 mrg || !r.require_integer_immediate (i + 1)
2419 1.1 mrg || !r.require_derived_vector_type (i + 2, i, type))
2420 1.1 mrg return error_mark_node;
2421 1.1 mrg
2422 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
2423 1.1 mrg }
2424 1.1 mrg
2425 1.1 mrg bool
2426 1.1 mrg check (function_checker &c) const OVERRIDE
2427 1.1 mrg {
2428 1.1 mrg unsigned int nvectors = c.vectors_per_tuple ();
2429 1.1 mrg return c.require_immediate_range (1, 0, nvectors - 1);
2430 1.1 mrg }
2431 1.1 mrg };
2432 1.1 mrg SHAPE (set)
2433 1.1 mrg
2434 1.1 mrg /* void svfoo(). */
2435 1.1 mrg struct setffr_def : public nonoverloaded_base
2436 1.1 mrg {
2437 1.1 mrg void
2438 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2439 1.1 mrg {
2440 1.1 mrg build_all (b, "_", group, MODE_none);
2441 1.1 mrg }
2442 1.1 mrg };
2443 1.1 mrg SHAPE (setffr)
2444 1.1 mrg
2445 1.1 mrg /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2446 1.1 mrg
2447 1.1 mrg where the final argument must be an integer constant expression in the
2448 1.1 mrg range [0, sizeof (<t0>_t) * 8 - 1]. */
2449 1.1 mrg struct shift_left_imm_def : public overloaded_base<0>
2450 1.1 mrg {
2451 1.1 mrg void
2452 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2453 1.1 mrg {
2454 1.1 mrg b.add_overloaded_functions (group, MODE_n);
2455 1.1 mrg build_all (b, "v0,v0,su64", group, MODE_n);
2456 1.1 mrg }
2457 1.1 mrg
2458 1.1 mrg tree
2459 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2460 1.1 mrg {
2461 1.1 mrg return r.resolve_uniform (1, 1);
2462 1.1 mrg }
2463 1.1 mrg
2464 1.1 mrg bool
2465 1.1 mrg check (function_checker &c) const OVERRIDE
2466 1.1 mrg {
2467 1.1 mrg unsigned int bits = c.type_suffix (0).element_bits;
2468 1.1 mrg return c.require_immediate_range (1, 0, bits - 1);
2469 1.1 mrg }
2470 1.1 mrg };
2471 1.1 mrg SHAPE (shift_left_imm)
2472 1.1 mrg
2473 1.1 mrg /* sv<t0>_t svfoo[_n_t0])(sv<t0:half>_t, uint64_t)
2474 1.1 mrg
2475 1.1 mrg where the final argument must be an integer constant expression in the
2476 1.1 mrg range [0, sizeof (<t0>_t) * 4 - 1]. */
2477 1.1 mrg struct shift_left_imm_long_def : public binary_imm_long_base
2478 1.1 mrg {
2479 1.1 mrg bool
2480 1.1 mrg check (function_checker &c) const OVERRIDE
2481 1.1 mrg {
2482 1.1 mrg unsigned int bits = c.type_suffix (0).element_bits / 2;
2483 1.1 mrg return c.require_immediate_range (1, 0, bits - 1);
2484 1.1 mrg }
2485 1.1 mrg };
2486 1.1 mrg SHAPE (shift_left_imm_long)
2487 1.1 mrg
2488 1.1 mrg /* sv<t0:uint>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2489 1.1 mrg
2490 1.1 mrg where the final argument must be an integer constant expression in the
2491 1.1 mrg range [0, sizeof (<t0>_t) * 8 - 1]. */
2492 1.1 mrg struct shift_left_imm_to_uint_def : public shift_left_imm_def
2493 1.1 mrg {
2494 1.1 mrg void
2495 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2496 1.1 mrg {
2497 1.1 mrg b.add_overloaded_functions (group, MODE_n);
2498 1.1 mrg build_all (b, "vu0,v0,su64", group, MODE_n);
2499 1.1 mrg }
2500 1.1 mrg };
2501 1.1 mrg SHAPE (shift_left_imm_to_uint)
2502 1.1 mrg
2503 1.1 mrg /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2504 1.1 mrg
2505 1.1 mrg where the final argument must be an integer constant expression in the
2506 1.1 mrg range [1, sizeof (<t0>_t) * 8]. */
2507 1.1 mrg struct shift_right_imm_def : public overloaded_base<0>
2508 1.1 mrg {
2509 1.1 mrg void
2510 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2511 1.1 mrg {
2512 1.1 mrg b.add_overloaded_functions (group, MODE_n);
2513 1.1 mrg build_all (b, "v0,v0,su64", group, MODE_n);
2514 1.1 mrg }
2515 1.1 mrg
2516 1.1 mrg tree
2517 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2518 1.1 mrg {
2519 1.1 mrg return r.resolve_uniform (1, 1);
2520 1.1 mrg }
2521 1.1 mrg
2522 1.1 mrg bool
2523 1.1 mrg check (function_checker &c) const OVERRIDE
2524 1.1 mrg {
2525 1.1 mrg unsigned int bits = c.type_suffix (0).element_bits;
2526 1.1 mrg return c.require_immediate_range (1, 1, bits);
2527 1.1 mrg }
2528 1.1 mrg };
2529 1.1 mrg SHAPE (shift_right_imm)
2530 1.1 mrg
2531 1.1 mrg /* sv<t0:half>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2532 1.1 mrg
2533 1.1 mrg where the final argument must be an integer constant expression in the
2534 1.1 mrg range [1, sizeof (<t0>_t) * 4]. */
2535 1.1 mrg typedef shift_right_imm_narrow_wrapper<binary_imm_narrowb_base<>, 1>
2536 1.1 mrg shift_right_imm_narrowb_def;
2537 1.1 mrg SHAPE (shift_right_imm_narrowb)
2538 1.1 mrg
2539 1.1 mrg /* sv<t0:half>_t svfoo[_n_t0])(sv<t0:half>_t, sv<t0>_t, uint64_t)
2540 1.1 mrg
2541 1.1 mrg where the final argument must be an integer constant expression in the
2542 1.1 mrg range [1, sizeof (<t0>_t) * 4]. */
2543 1.1 mrg typedef shift_right_imm_narrow_wrapper<binary_imm_narrowt_base<>, 2>
2544 1.1 mrg shift_right_imm_narrowt_def;
2545 1.1 mrg SHAPE (shift_right_imm_narrowt)
2546 1.1 mrg
2547 1.1 mrg /* sv<t0:uint:half>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2548 1.1 mrg
2549 1.1 mrg where the final argument must be an integer constant expression in the
2550 1.1 mrg range [1, sizeof (<t0>_t) * 4]. */
2551 1.1 mrg typedef binary_imm_narrowb_base<TYPE_unsigned>
2552 1.1 mrg binary_imm_narrowb_base_unsigned;
2553 1.1 mrg typedef shift_right_imm_narrow_wrapper<binary_imm_narrowb_base_unsigned, 1>
2554 1.1 mrg shift_right_imm_narrowb_to_uint_def;
2555 1.1 mrg SHAPE (shift_right_imm_narrowb_to_uint)
2556 1.1 mrg
2557 1.1 mrg /* sv<t0:uint:half>_t svfoo[_n_t0])(sv<t0:uint:half>_t, sv<t0>_t, uint64_t)
2558 1.1 mrg
2559 1.1 mrg where the final argument must be an integer constant expression in the
2560 1.1 mrg range [1, sizeof (<t0>_t) * 4]. */
2561 1.1 mrg typedef binary_imm_narrowt_base<TYPE_unsigned>
2562 1.1 mrg binary_imm_narrowt_base_unsigned;
2563 1.1 mrg typedef shift_right_imm_narrow_wrapper<binary_imm_narrowt_base_unsigned, 2>
2564 1.1 mrg shift_right_imm_narrowt_to_uint_def;
2565 1.1 mrg SHAPE (shift_right_imm_narrowt_to_uint)
2566 1.1 mrg
2567 1.1 mrg /* void svfoo[_t0](<X>_t *, sv<t0>[xN]_t)
2568 1.1 mrg void svfoo_vnum[_t0](<X>_t *, int64_t, sv<t0>[xN]_t)
2569 1.1 mrg
2570 1.1 mrg where <X> might be tied to <t0> (for non-truncating stores) or might
2571 1.1 mrg depend on the function base name (for truncating stores). */
2572 1.1 mrg struct store_def : public overloaded_base<0>
2573 1.1 mrg {
2574 1.1 mrg void
2575 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2576 1.1 mrg {
2577 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2578 1.1 mrg b.add_overloaded_functions (group, MODE_vnum);
2579 1.1 mrg build_all (b, "_,as,t0", group, MODE_none);
2580 1.1 mrg build_all (b, "_,as,ss64,t0", group, MODE_vnum);
2581 1.1 mrg }
2582 1.1 mrg
2583 1.1 mrg tree
2584 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2585 1.1 mrg {
2586 1.1 mrg bool vnum_p = r.mode_suffix_id == MODE_vnum;
2587 1.1 mrg gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
2588 1.1 mrg
2589 1.1 mrg unsigned int i, nargs;
2590 1.1 mrg type_suffix_index type;
2591 1.1 mrg if (!r.check_gp_argument (vnum_p ? 3 : 2, i, nargs)
2592 1.1 mrg || !r.require_pointer_type (i)
2593 1.1 mrg || (vnum_p && !r.require_scalar_type (i + 1, "int64_t"))
2594 1.1 mrg || ((type = r.infer_tuple_type (nargs - 1)) == NUM_TYPE_SUFFIXES))
2595 1.1 mrg return error_mark_node;
2596 1.1 mrg
2597 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
2598 1.1 mrg }
2599 1.1 mrg };
2600 1.1 mrg SHAPE (store)
2601 1.1 mrg
2602 1.1 mrg /* void svfoo_[s32]index[_t0](<X>_t *, svint32_t, sv<t0>_t)
2603 1.1 mrg void svfoo_[s64]index[_t0](<X>_t *, svint64_t, sv<t0>_t)
2604 1.1 mrg void svfoo_[u32]index[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2605 1.1 mrg void svfoo_[u64]index[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2606 1.1 mrg
2607 1.1 mrg void svfoo[_u32base]_index[_t0](svuint32_t, int64_t, sv<t0>_t)
2608 1.1 mrg void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t)
2609 1.1 mrg
2610 1.1 mrg where <X> might be tied to <t0> (for non-truncating stores) or might
2611 1.1 mrg depend on the function base name (for truncating stores). */
2612 1.1 mrg struct store_scatter_index_def : public store_scatter_base
2613 1.1 mrg {
2614 1.1 mrg void
2615 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2616 1.1 mrg {
2617 1.1 mrg b.add_overloaded_functions (group, MODE_index);
2618 1.1 mrg build_sv_index (b, "_,as,d,t0", group);
2619 1.1 mrg build_vs_index (b, "_,b,ss64,t0", group);
2620 1.1 mrg }
2621 1.1 mrg };
2622 1.1 mrg SHAPE (store_scatter_index)
2623 1.1 mrg
2624 1.1 mrg /* void svfoo_[s64]index[_t0](<X>_t *, svint64_t, sv<t0>_t)
2625 1.1 mrg void svfoo_[u64]index[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2626 1.1 mrg
2627 1.1 mrg void svfoo[_u32base]_index[_t0](svuint32_t, int64_t, sv<t0>_t)
2628 1.1 mrg void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t)
2629 1.1 mrg
2630 1.1 mrg i.e. a version of store_scatter_index that doesn't support 32-bit
2631 1.1 mrg vector indices. */
2632 1.1 mrg struct store_scatter_index_restricted_def : public store_scatter_base
2633 1.1 mrg {
2634 1.1 mrg void
2635 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2636 1.1 mrg {
2637 1.1 mrg b.add_overloaded_functions (group, MODE_index);
2638 1.1 mrg build_sv_index64 (b, "_,as,d,t0", group);
2639 1.1 mrg build_vs_index (b, "_,b,ss64,t0", group);
2640 1.1 mrg }
2641 1.1 mrg };
2642 1.1 mrg SHAPE (store_scatter_index_restricted)
2643 1.1 mrg
2644 1.1 mrg /* void svfoo_[s32]offset[_t0](<X>_t *, svint32_t, sv<t0>_t)
2645 1.1 mrg void svfoo_[s64]offset[_t0](<X>_t *, svint64_t, sv<t0>_t)
2646 1.1 mrg void svfoo_[u32]offset[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2647 1.1 mrg void svfoo_[u64]offset[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2648 1.1 mrg
2649 1.1 mrg void svfoo[_u32base_t0](svuint32_t, sv<t0>_t)
2650 1.1 mrg void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
2651 1.1 mrg
2652 1.1 mrg void svfoo[_u32base]_offset[_t0](svuint32_t, int64_t, sv<t0>_t)
2653 1.1 mrg void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t)
2654 1.1 mrg
2655 1.1 mrg where <X> might be tied to <t0> (for non-truncating stores) or might
2656 1.1 mrg depend on the function base name (for truncating stores). */
2657 1.1 mrg struct store_scatter_offset_def : public store_scatter_base
2658 1.1 mrg {
2659 1.1 mrg void
2660 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2661 1.1 mrg {
2662 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2663 1.1 mrg b.add_overloaded_functions (group, MODE_offset);
2664 1.1 mrg build_sv_offset (b, "_,as,d,t0", group);
2665 1.1 mrg build_v_base (b, "_,b,t0", group);
2666 1.1 mrg build_vs_offset (b, "_,b,ss64,t0", group);
2667 1.1 mrg }
2668 1.1 mrg };
2669 1.1 mrg SHAPE (store_scatter_offset)
2670 1.1 mrg
2671 1.1 mrg /* void svfoo_[s64]offset[_t0](<X>_t *, svint64_t, sv<t0>_t)
2672 1.1 mrg void svfoo_[u32]offset[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2673 1.1 mrg void svfoo_[u64]offset[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2674 1.1 mrg
2675 1.1 mrg void svfoo[_u32base_t0](svuint32_t, sv<t0>_t)
2676 1.1 mrg void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
2677 1.1 mrg
2678 1.1 mrg void svfoo[_u32base]_offset[_t0](svuint32_t, int64_t, sv<t0>_t)
2679 1.1 mrg void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t)
2680 1.1 mrg
2681 1.1 mrg i.e. a version of store_scatter_offset that doesn't support svint32_t
2682 1.1 mrg offsets. */
2683 1.1 mrg struct store_scatter_offset_restricted_def : public store_scatter_base
2684 1.1 mrg {
2685 1.1 mrg void
2686 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2687 1.1 mrg {
2688 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2689 1.1 mrg b.add_overloaded_functions (group, MODE_offset);
2690 1.1 mrg build_sv_uint_offset (b, "_,as,d,t0", group);
2691 1.1 mrg build_v_base (b, "_,b,t0", group);
2692 1.1 mrg build_vs_offset (b, "_,b,ss64,t0", group);
2693 1.1 mrg }
2694 1.1 mrg };
2695 1.1 mrg SHAPE (store_scatter_offset_restricted)
2696 1.1 mrg
2697 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>xN_t, sv<t0:uint>_t). */
2698 1.1 mrg struct tbl_tuple_def : public overloaded_base<0>
2699 1.1 mrg {
2700 1.1 mrg void
2701 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2702 1.1 mrg {
2703 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2704 1.1 mrg build_all (b, "v0,t0,vu0", group, MODE_none);
2705 1.1 mrg }
2706 1.1 mrg
2707 1.1 mrg tree
2708 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2709 1.1 mrg {
2710 1.1 mrg unsigned int i, nargs;
2711 1.1 mrg type_suffix_index type;
2712 1.1 mrg if (!r.check_gp_argument (2, i, nargs)
2713 1.1 mrg || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
2714 1.1 mrg || !r.require_derived_vector_type (i + 1, i, type, TYPE_unsigned))
2715 1.1 mrg return error_mark_node;
2716 1.1 mrg
2717 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
2718 1.1 mrg }
2719 1.1 mrg };
2720 1.1 mrg SHAPE (tbl_tuple)
2721 1.1 mrg
2722 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, svbfloatt16_t, svbfloat16_t). */
2723 1.1 mrg struct ternary_bfloat_def
2724 1.1 mrg : public ternary_resize2_base<16, TYPE_bfloat, TYPE_bfloat>
2725 1.1 mrg {
2726 1.1 mrg void
2727 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2728 1.1 mrg {
2729 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2730 1.1 mrg build_all (b, "v0,v0,vB,vB", group, MODE_none);
2731 1.1 mrg }
2732 1.1 mrg };
2733 1.1 mrg SHAPE (ternary_bfloat)
2734 1.1 mrg
2735 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, svbfloat16_t, svbfloat16_t, uint64_t)
2736 1.1 mrg
2737 1.1 mrg where the final argument is an integer constant expression in the range
2738 1.1 mrg [0, 7]. */
2739 1.1 mrg typedef ternary_bfloat_lane_base<1> ternary_bfloat_lane_def;
2740 1.1 mrg SHAPE (ternary_bfloat_lane)
2741 1.1 mrg
2742 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, svbfloat16_t, svbfloat16_t, uint64_t)
2743 1.1 mrg
2744 1.1 mrg where the final argument is an integer constant expression in the range
2745 1.1 mrg [0, 3]. */
2746 1.1 mrg typedef ternary_bfloat_lane_base<2> ternary_bfloat_lanex2_def;
2747 1.1 mrg SHAPE (ternary_bfloat_lanex2)
2748 1.1 mrg
2749 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, svbfloatt16_t, svbfloat16_t)
2750 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, svbfloat16_t, bfloat16_t). */
2751 1.1 mrg struct ternary_bfloat_opt_n_def
2752 1.1 mrg : public ternary_resize2_opt_n_base<16, TYPE_bfloat, TYPE_bfloat>
2753 1.1 mrg {
2754 1.1 mrg void
2755 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2756 1.1 mrg {
2757 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2758 1.1 mrg build_all (b, "v0,v0,vB,vB", group, MODE_none);
2759 1.1 mrg build_all (b, "v0,v0,vB,sB", group, MODE_n);
2760 1.1 mrg }
2761 1.1 mrg };
2762 1.1 mrg SHAPE (ternary_bfloat_opt_n)
2763 1.1 mrg
2764 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:int:quarter>_t, sv<t0:uint:quarter>_t,
2765 1.1 mrg uint64_t)
2766 1.1 mrg
2767 1.1 mrg where the final argument is an integer constant expression in the range
2768 1.1 mrg [0, 16 / sizeof (<t0>_t) - 1]. */
2769 1.1 mrg struct ternary_intq_uintq_lane_def
2770 1.1 mrg : public ternary_qq_lane_base<TYPE_signed, TYPE_unsigned>
2771 1.1 mrg {
2772 1.1 mrg void
2773 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2774 1.1 mrg {
2775 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2776 1.1 mrg build_all (b, "v0,v0,vqs0,vqu0,su64", group, MODE_none);
2777 1.1 mrg }
2778 1.1 mrg };
2779 1.1 mrg SHAPE (ternary_intq_uintq_lane)
2780 1.1 mrg
2781 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:int:quarter>_t, sv<t0:uint:quarter>_t)
2782 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:int:quarter>_t,
2783 1.1 mrg <t0:uint:quarter>_t). */
2784 1.1 mrg struct ternary_intq_uintq_opt_n_def
2785 1.1 mrg : public ternary_resize2_opt_n_base<function_resolver::QUARTER_SIZE,
2786 1.1 mrg TYPE_signed, TYPE_unsigned>
2787 1.1 mrg {
2788 1.1 mrg void
2789 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2790 1.1 mrg {
2791 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2792 1.1 mrg build_all (b, "v0,v0,vqs0,vqu0", group, MODE_none);
2793 1.1 mrg build_all (b, "v0,v0,vqs0,squ0", group, MODE_n);
2794 1.1 mrg }
2795 1.1 mrg };
2796 1.1 mrg SHAPE (ternary_intq_uintq_opt_n)
2797 1.1 mrg
2798 1.1 mrg /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t)
2799 1.1 mrg
2800 1.1 mrg where the final argument is an integer constant expression in the
2801 1.1 mrg range [0, 16 / sizeof (<t0>_t) - 1]. */
2802 1.1 mrg struct ternary_lane_def : public overloaded_base<0>
2803 1.1 mrg {
2804 1.1 mrg void
2805 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2806 1.1 mrg {
2807 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2808 1.1 mrg build_all (b, "v0,v0,v0,v0,su64", group, MODE_none);
2809 1.1 mrg }
2810 1.1 mrg
2811 1.1 mrg tree
2812 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2813 1.1 mrg {
2814 1.1 mrg return r.resolve_uniform (3, 1);
2815 1.1 mrg }
2816 1.1 mrg
2817 1.1 mrg bool
2818 1.1 mrg check (function_checker &c) const OVERRIDE
2819 1.1 mrg {
2820 1.1 mrg return c.require_immediate_lane_index (3);
2821 1.1 mrg }
2822 1.1 mrg };
2823 1.1 mrg SHAPE (ternary_lane)
2824 1.1 mrg
2825 1.1 mrg /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t, uint64_t)
2826 1.1 mrg
2827 1.1 mrg where the penultimate argument is an integer constant expression in
2828 1.1 mrg the range [0, 8 / sizeof (<t0>_t) - 1] and where the final argument
2829 1.1 mrg is an integer constant expression in {0, 90, 180, 270}. */
2830 1.1 mrg struct ternary_lane_rotate_def : public overloaded_base<0>
2831 1.1 mrg {
2832 1.1 mrg void
2833 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2834 1.1 mrg {
2835 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2836 1.1 mrg build_all (b, "v0,v0,v0,v0,su64,su64", group, MODE_none);
2837 1.1 mrg }
2838 1.1 mrg
2839 1.1 mrg tree
2840 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2841 1.1 mrg {
2842 1.1 mrg return r.resolve_uniform (3, 2);
2843 1.1 mrg }
2844 1.1 mrg
2845 1.1 mrg bool
2846 1.1 mrg check (function_checker &c) const OVERRIDE
2847 1.1 mrg {
2848 1.1 mrg return (c.require_immediate_lane_index (3, 2)
2849 1.1 mrg && c.require_immediate_one_of (4, 0, 90, 180, 270));
2850 1.1 mrg }
2851 1.1 mrg };
2852 1.1 mrg SHAPE (ternary_lane_rotate)
2853 1.1 mrg
2854 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t, sv<t0:half>_t, uint64_t)
2855 1.1 mrg
2856 1.1 mrg where the final argument is an integer constant expression in the range
2857 1.1 mrg [0, 32 / sizeof (<t0>_t) - 1]. */
2858 1.1 mrg struct ternary_long_lane_def
2859 1.1 mrg : public ternary_resize2_lane_base<function_resolver::HALF_SIZE>
2860 1.1 mrg {
2861 1.1 mrg void
2862 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2863 1.1 mrg {
2864 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2865 1.1 mrg build_all (b, "v0,v0,vh0,vh0,su64", group, MODE_none);
2866 1.1 mrg }
2867 1.1 mrg
2868 1.1 mrg bool
2869 1.1 mrg check (function_checker &c) const OVERRIDE
2870 1.1 mrg {
2871 1.1 mrg return c.require_immediate_lane_index (3);
2872 1.1 mrg }
2873 1.1 mrg };
2874 1.1 mrg SHAPE (ternary_long_lane)
2875 1.1 mrg
2876 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t, sv<t0:half>_t)
2877 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:half>_t, <t0:half>_t)
2878 1.1 mrg
2879 1.1 mrg i.e. a version of the standard ternary shape ternary_opt_n in which
2880 1.1 mrg the element type of the last two arguments is the half-sized
2881 1.1 mrg equivalent of <t0>. */
2882 1.1 mrg struct ternary_long_opt_n_def
2883 1.1 mrg : public ternary_resize2_opt_n_base<function_resolver::HALF_SIZE>
2884 1.1 mrg {
2885 1.1 mrg void
2886 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2887 1.1 mrg {
2888 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2889 1.1 mrg build_all (b, "v0,v0,vh0,vh0", group, MODE_none);
2890 1.1 mrg build_all (b, "v0,v0,vh0,sh0", group, MODE_n);
2891 1.1 mrg }
2892 1.1 mrg };
2893 1.1 mrg SHAPE (ternary_long_opt_n)
2894 1.1 mrg
2895 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t)
2896 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0>_t, <t0>_t)
2897 1.1 mrg
2898 1.1 mrg i.e. the standard shape for ternary operations that operate on
2899 1.1 mrg uniform types. */
2900 1.1 mrg struct ternary_opt_n_def : public overloaded_base<0>
2901 1.1 mrg {
2902 1.1 mrg void
2903 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2904 1.1 mrg {
2905 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2906 1.1 mrg build_all (b, "v0,v0,v0,v0", group, MODE_none);
2907 1.1 mrg build_all (b, "v0,v0,v0,s0", group, MODE_n);
2908 1.1 mrg }
2909 1.1 mrg
2910 1.1 mrg tree
2911 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2912 1.1 mrg {
2913 1.1 mrg return r.resolve_uniform_opt_n (3);
2914 1.1 mrg }
2915 1.1 mrg };
2916 1.1 mrg SHAPE (ternary_opt_n)
2917 1.1 mrg
2918 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t, uint64_t)
2919 1.1 mrg
2920 1.1 mrg where the final argument is an integer constant expression in the range
2921 1.1 mrg [0, 16 / sizeof (<t0>_t) - 1]. */
2922 1.1 mrg struct ternary_qq_lane_def : public ternary_qq_lane_base<>
2923 1.1 mrg {
2924 1.1 mrg void
2925 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2926 1.1 mrg {
2927 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2928 1.1 mrg build_all (b, "v0,v0,vq0,vq0,su64", group, MODE_none);
2929 1.1 mrg }
2930 1.1 mrg };
2931 1.1 mrg SHAPE (ternary_qq_lane)
2932 1.1 mrg
2933 1.1 mrg /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
2934 1.1 mrg uint64_t)
2935 1.1 mrg
2936 1.1 mrg where the final argument is an integer constant expression in
2937 1.1 mrg {0, 90, 180, 270}. */
2938 1.1 mrg struct ternary_qq_lane_rotate_def : public overloaded_base<0>
2939 1.1 mrg {
2940 1.1 mrg void
2941 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2942 1.1 mrg {
2943 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2944 1.1 mrg build_all (b, "v0,v0,vq0,vq0,su64,su64", group, MODE_none);
2945 1.1 mrg }
2946 1.1 mrg
2947 1.1 mrg tree
2948 1.1 mrg resolve (function_resolver &r) const OVERRIDE
2949 1.1 mrg {
2950 1.1 mrg unsigned int i, nargs;
2951 1.1 mrg type_suffix_index type;
2952 1.1 mrg if (!r.check_gp_argument (5, i, nargs)
2953 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
2954 1.1 mrg || !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
2955 1.1 mrg r.QUARTER_SIZE)
2956 1.1 mrg || !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
2957 1.1 mrg r.QUARTER_SIZE)
2958 1.1 mrg || !r.require_integer_immediate (i + 3)
2959 1.1 mrg || !r.require_integer_immediate (i + 4))
2960 1.1 mrg return error_mark_node;
2961 1.1 mrg
2962 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
2963 1.1 mrg }
2964 1.1 mrg
2965 1.1 mrg bool
2966 1.1 mrg check (function_checker &c) const OVERRIDE
2967 1.1 mrg {
2968 1.1 mrg return (c.require_immediate_lane_index (3, 4)
2969 1.1 mrg && c.require_immediate_one_of (4, 0, 90, 180, 270));
2970 1.1 mrg }
2971 1.1 mrg };
2972 1.1 mrg SHAPE (ternary_qq_lane_rotate)
2973 1.1 mrg
2974 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t)
2975 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:quarter>_t, <t0:quarter>_t)
2976 1.1 mrg
2977 1.1 mrg i.e. a version of the standard ternary shape ternary_opt_n in which
2978 1.1 mrg the element type of the last two arguments is the quarter-sized
2979 1.1 mrg equivalent of <t0>. */
2980 1.1 mrg struct ternary_qq_opt_n_def
2981 1.1 mrg : public ternary_resize2_opt_n_base<function_resolver::QUARTER_SIZE>
2982 1.1 mrg {
2983 1.1 mrg void
2984 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
2985 1.1 mrg {
2986 1.1 mrg b.add_overloaded_functions (group, MODE_none);
2987 1.1 mrg build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
2988 1.1 mrg build_all (b, "v0,v0,vq0,sq0", group, MODE_n);
2989 1.1 mrg }
2990 1.1 mrg };
2991 1.1 mrg SHAPE (ternary_qq_opt_n)
2992 1.1 mrg
2993 1.1 mrg /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
2994 1.1 mrg uint64_t)
2995 1.1 mrg
2996 1.1 mrg where the final argument is an integer constant expression in
2997 1.1 mrg {0, 90, 180, 270}. */
2998 1.1 mrg struct ternary_qq_rotate_def : public overloaded_base<0>
2999 1.1 mrg {
3000 1.1 mrg void
3001 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3002 1.1 mrg {
3003 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3004 1.1 mrg build_all (b, "v0,v0,vq0,vq0,su64", group, MODE_none);
3005 1.1 mrg }
3006 1.1 mrg
3007 1.1 mrg tree
3008 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3009 1.1 mrg {
3010 1.1 mrg unsigned int i, nargs;
3011 1.1 mrg type_suffix_index type;
3012 1.1 mrg if (!r.check_gp_argument (4, i, nargs)
3013 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
3014 1.1 mrg || !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
3015 1.1 mrg r.QUARTER_SIZE)
3016 1.1 mrg || !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
3017 1.1 mrg r.QUARTER_SIZE)
3018 1.1 mrg || !r.require_integer_immediate (i + 3))
3019 1.1 mrg return error_mark_node;
3020 1.1 mrg
3021 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
3022 1.1 mrg }
3023 1.1 mrg
3024 1.1 mrg bool
3025 1.1 mrg check (function_checker &c) const OVERRIDE
3026 1.1 mrg {
3027 1.1 mrg return c.require_immediate_one_of (3, 0, 90, 180, 270);
3028 1.1 mrg }
3029 1.1 mrg };
3030 1.1 mrg SHAPE (ternary_qq_rotate)
3031 1.1 mrg
3032 1.1 mrg /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t)
3033 1.1 mrg
3034 1.1 mrg where the final argument is an integer constant expression in
3035 1.1 mrg {0, 90, 180, 270}. */
3036 1.1 mrg struct ternary_rotate_def : public overloaded_base<0>
3037 1.1 mrg {
3038 1.1 mrg void
3039 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3040 1.1 mrg {
3041 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3042 1.1 mrg build_all (b, "v0,v0,v0,v0,su64", group, MODE_none);
3043 1.1 mrg }
3044 1.1 mrg
3045 1.1 mrg tree
3046 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3047 1.1 mrg {
3048 1.1 mrg return r.resolve_uniform (3, 1);
3049 1.1 mrg }
3050 1.1 mrg
3051 1.1 mrg bool
3052 1.1 mrg check (function_checker &c) const OVERRIDE
3053 1.1 mrg {
3054 1.1 mrg return c.require_immediate_one_of (3, 0, 90, 180, 270);
3055 1.1 mrg }
3056 1.1 mrg };
3057 1.1 mrg SHAPE (ternary_rotate)
3058 1.1 mrg
3059 1.1 mrg /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, sv<t0>_t, uint64_t)
3060 1.1 mrg
3061 1.1 mrg where the final argument must be an integer constant expression in the
3062 1.1 mrg range [0, sizeof (<t0>_t) * 8 - 1]. */
3063 1.1 mrg struct ternary_shift_left_imm_def : public ternary_shift_imm_base
3064 1.1 mrg {
3065 1.1 mrg bool
3066 1.1 mrg check (function_checker &c) const OVERRIDE
3067 1.1 mrg {
3068 1.1 mrg unsigned int bits = c.type_suffix (0).element_bits;
3069 1.1 mrg return c.require_immediate_range (2, 0, bits - 1);
3070 1.1 mrg }
3071 1.1 mrg };
3072 1.1 mrg SHAPE (ternary_shift_left_imm)
3073 1.1 mrg
3074 1.1 mrg /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, sv<t0>_t, uint64_t)
3075 1.1 mrg
3076 1.1 mrg where the final argument must be an integer constant expression in the
3077 1.1 mrg range [1, sizeof (<t0>_t) * 8]. */
3078 1.1 mrg struct ternary_shift_right_imm_def : public ternary_shift_imm_base
3079 1.1 mrg {
3080 1.1 mrg bool
3081 1.1 mrg check (function_checker &c) const OVERRIDE
3082 1.1 mrg {
3083 1.1 mrg unsigned int bits = c.type_suffix (0).element_bits;
3084 1.1 mrg return c.require_immediate_range (2, 1, bits);
3085 1.1 mrg }
3086 1.1 mrg };
3087 1.1 mrg SHAPE (ternary_shift_right_imm)
3088 1.1 mrg
3089 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0:uint>_t). */
3090 1.1 mrg struct ternary_uint_def : public overloaded_base<0>
3091 1.1 mrg {
3092 1.1 mrg void
3093 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3094 1.1 mrg {
3095 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3096 1.1 mrg build_all (b, "v0,v0,v0,vu0", group, MODE_none);
3097 1.1 mrg }
3098 1.1 mrg
3099 1.1 mrg tree
3100 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3101 1.1 mrg {
3102 1.1 mrg unsigned int i, nargs;
3103 1.1 mrg type_suffix_index type;
3104 1.1 mrg if (!r.check_gp_argument (3, i, nargs)
3105 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
3106 1.1 mrg || !r.require_matching_vector_type (i + 1, type)
3107 1.1 mrg || !r.require_derived_vector_type (i + 2, i, type, TYPE_unsigned))
3108 1.1 mrg return error_mark_node;
3109 1.1 mrg
3110 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
3111 1.1 mrg }
3112 1.1 mrg };
3113 1.1 mrg SHAPE (ternary_uint)
3114 1.1 mrg
3115 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, svu<t0:uint:quarter>_t,
3116 1.1 mrg sv<t0:int:quarter>_t). */
3117 1.1 mrg struct ternary_uintq_intq_def
3118 1.1 mrg : public ternary_resize2_base<function_resolver::QUARTER_SIZE,
3119 1.1 mrg TYPE_unsigned, TYPE_signed>
3120 1.1 mrg {
3121 1.1 mrg void
3122 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3123 1.1 mrg {
3124 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3125 1.1 mrg build_all (b, "v0,v0,vqu0,vqs0", group, MODE_none);
3126 1.1 mrg }
3127 1.1 mrg };
3128 1.1 mrg SHAPE (ternary_uintq_intq)
3129 1.1 mrg
3130 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint:quarter>_t, sv<t0:int:quarter>_t,
3131 1.1 mrg uint64_t)
3132 1.1 mrg
3133 1.1 mrg where the final argument is an integer constant expression in the range
3134 1.1 mrg [0, 16 / sizeof (<t0>_t) - 1]. */
3135 1.1 mrg struct ternary_uintq_intq_lane_def
3136 1.1 mrg : public ternary_qq_lane_base<TYPE_unsigned, TYPE_signed>
3137 1.1 mrg {
3138 1.1 mrg void
3139 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3140 1.1 mrg {
3141 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3142 1.1 mrg build_all (b, "v0,v0,vqu0,vqs0,su64", group, MODE_none);
3143 1.1 mrg }
3144 1.1 mrg };
3145 1.1 mrg SHAPE (ternary_uintq_intq_lane)
3146 1.1 mrg
3147 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint:quarter>_t, sv<t0:int:quarter>_t)
3148 1.1 mrg sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:uint:quarter>_t,
3149 1.1 mrg <t0:int:quarter>_t). */
3150 1.1 mrg struct ternary_uintq_intq_opt_n_def
3151 1.1 mrg : public ternary_resize2_opt_n_base<function_resolver::QUARTER_SIZE,
3152 1.1 mrg TYPE_unsigned, TYPE_signed>
3153 1.1 mrg {
3154 1.1 mrg void
3155 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3156 1.1 mrg {
3157 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3158 1.1 mrg build_all (b, "v0,v0,vqu0,vqs0", group, MODE_none);
3159 1.1 mrg build_all (b, "v0,v0,vqu0,sqs0", group, MODE_n);
3160 1.1 mrg }
3161 1.1 mrg };
3162 1.1 mrg SHAPE (ternary_uintq_intq_opt_n)
3163 1.1 mrg
3164 1.1 mrg /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, uint64_t)
3165 1.1 mrg
3166 1.1 mrg where the final argument is an integer constant expression in the
3167 1.1 mrg range [0, 7]. */
3168 1.1 mrg struct tmad_def : public overloaded_base<0>
3169 1.1 mrg {
3170 1.1 mrg void
3171 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3172 1.1 mrg {
3173 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3174 1.1 mrg build_all (b, "v0,v0,v0,su64", group, MODE_none);
3175 1.1 mrg }
3176 1.1 mrg
3177 1.1 mrg tree
3178 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3179 1.1 mrg {
3180 1.1 mrg return r.resolve_uniform (2, 1);
3181 1.1 mrg }
3182 1.1 mrg
3183 1.1 mrg bool
3184 1.1 mrg check (function_checker &c) const OVERRIDE
3185 1.1 mrg {
3186 1.1 mrg return c.require_immediate_range (2, 0, 7);
3187 1.1 mrg }
3188 1.1 mrg };
3189 1.1 mrg SHAPE (tmad)
3190 1.1 mrg
3191 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0>_t)
3192 1.1 mrg
3193 1.1 mrg i.e. the standard shape for unary operations that operate on
3194 1.1 mrg uniform types. */
3195 1.1 mrg struct unary_def : public overloaded_base<0>
3196 1.1 mrg {
3197 1.1 mrg void
3198 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3199 1.1 mrg {
3200 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3201 1.1 mrg build_all (b, "v0,v0", group, MODE_none);
3202 1.1 mrg }
3203 1.1 mrg
3204 1.1 mrg tree
3205 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3206 1.1 mrg {
3207 1.1 mrg return r.resolve_unary ();
3208 1.1 mrg }
3209 1.1 mrg };
3210 1.1 mrg SHAPE (unary)
3211 1.1 mrg
3212 1.1 mrg /* sv<t0>_t svfoo_t0[_t1](sv<t1>_t)
3213 1.1 mrg
3214 1.1 mrg where the target type <t0> must be specified explicitly but the source
3215 1.1 mrg type <t1> can be inferred. */
3216 1.1 mrg struct unary_convert_def : public overloaded_base<1>
3217 1.1 mrg {
3218 1.1 mrg void
3219 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3220 1.1 mrg {
3221 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3222 1.1 mrg build_all (b, "v0,v1", group, MODE_none);
3223 1.1 mrg }
3224 1.1 mrg
3225 1.1 mrg tree
3226 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3227 1.1 mrg {
3228 1.1 mrg return r.resolve_unary (r.type_suffix (0).tclass,
3229 1.1 mrg r.type_suffix (0).element_bits);
3230 1.1 mrg }
3231 1.1 mrg };
3232 1.1 mrg SHAPE (unary_convert)
3233 1.1 mrg
3234 1.1 mrg /* sv<t0>_t svfoo_t0[_t1](sv<t0>_t, sv<t1>_t)
3235 1.1 mrg
3236 1.1 mrg This is a version of unary_convert in which the even-indexed
3237 1.1 mrg elements are passed in as a first parameter, before any governing
3238 1.1 mrg predicate. */
3239 1.1 mrg struct unary_convert_narrowt_def : public overloaded_base<1>
3240 1.1 mrg {
3241 1.1 mrg void
3242 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3243 1.1 mrg {
3244 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3245 1.1 mrg build_all (b, "v0,v1", group, MODE_none);
3246 1.1 mrg }
3247 1.1 mrg
3248 1.1 mrg tree
3249 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3250 1.1 mrg {
3251 1.1 mrg return r.resolve_unary (r.type_suffix (0).tclass,
3252 1.1 mrg r.type_suffix (0).element_bits, true);
3253 1.1 mrg }
3254 1.1 mrg };
3255 1.1 mrg SHAPE (unary_convert_narrowt)
3256 1.1 mrg
3257 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0:half>_t). */
3258 1.1 mrg struct unary_long_def : public overloaded_base<0>
3259 1.1 mrg {
3260 1.1 mrg void
3261 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3262 1.1 mrg {
3263 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3264 1.1 mrg build_all (b, "v0,vh0", group, MODE_none);
3265 1.1 mrg }
3266 1.1 mrg
3267 1.1 mrg tree
3268 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3269 1.1 mrg {
3270 1.1 mrg unsigned int i, nargs;
3271 1.1 mrg type_suffix_index type, result_type;
3272 1.1 mrg if (!r.check_gp_argument (1, i, nargs)
3273 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
3274 1.1 mrg || (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
3275 1.1 mrg return error_mark_node;
3276 1.1 mrg
3277 1.1 mrg if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
3278 1.1 mrg return res;
3279 1.1 mrg
3280 1.1 mrg return r.report_no_such_form (type);
3281 1.1 mrg }
3282 1.1 mrg };
3283 1.1 mrg SHAPE (unary_long)
3284 1.1 mrg
3285 1.1 mrg /* sv<t0>_t svfoo[_n]_t0(<t0>_t). */
3286 1.1 mrg struct unary_n_def : public overloaded_base<1>
3287 1.1 mrg {
3288 1.1 mrg void
3289 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3290 1.1 mrg {
3291 1.1 mrg /* The "_n" suffix is optional; the full name has it, but the short
3292 1.1 mrg name doesn't. */
3293 1.1 mrg build_all (b, "v0,s0", group, MODE_n, true);
3294 1.1 mrg }
3295 1.1 mrg
3296 1.1 mrg tree
3297 1.1 mrg resolve (function_resolver &) const OVERRIDE
3298 1.1 mrg {
3299 1.1 mrg /* The short forms just make "_n" implicit, so no resolution is needed. */
3300 1.1 mrg gcc_unreachable ();
3301 1.1 mrg }
3302 1.1 mrg };
3303 1.1 mrg SHAPE (unary_n)
3304 1.1 mrg
3305 1.1 mrg /* sv<t0:half>_t svfoo[_t0](sv<t0>_t). */
3306 1.1 mrg typedef unary_narrowb_base<> unary_narrowb_def;
3307 1.1 mrg SHAPE (unary_narrowb)
3308 1.1 mrg
3309 1.1 mrg /* sv<t0:half>_t svfoo[_t0](sv<t0:half>_t, sv<t0>_t). */
3310 1.1 mrg typedef unary_narrowt_base<> unary_narrowt_def;
3311 1.1 mrg SHAPE (unary_narrowt)
3312 1.1 mrg
3313 1.1 mrg /* sv<t0:uint:half>_t svfoo[_t0](sv<t0>_t). */
3314 1.1 mrg typedef unary_narrowb_base<TYPE_unsigned> unary_narrowb_to_uint_def;
3315 1.1 mrg SHAPE (unary_narrowb_to_uint)
3316 1.1 mrg
3317 1.1 mrg /* sv<t0:uint:half>_t svfoo[_t0](sv<t0:uint:half>_t, sv<t0>_t). */
3318 1.1 mrg typedef unary_narrowt_base<TYPE_unsigned> unary_narrowt_to_uint_def;
3319 1.1 mrg SHAPE (unary_narrowt_to_uint)
3320 1.1 mrg
3321 1.1 mrg /* svbool_t svfoo(svbool_t). */
3322 1.1 mrg struct unary_pred_def : public nonoverloaded_base
3323 1.1 mrg {
3324 1.1 mrg void
3325 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3326 1.1 mrg {
3327 1.1 mrg build_all (b, "v0,v0", group, MODE_none);
3328 1.1 mrg }
3329 1.1 mrg };
3330 1.1 mrg SHAPE (unary_pred)
3331 1.1 mrg
3332 1.1 mrg /* sv<t0:int>_t svfoo[_t0](sv<t0>_t)
3333 1.1 mrg
3334 1.1 mrg i.e. a version of "unary" in which the returned vector contains
3335 1.1 mrg signed integers. */
3336 1.1 mrg struct unary_to_int_def : public overloaded_base<0>
3337 1.1 mrg {
3338 1.1 mrg void
3339 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3340 1.1 mrg {
3341 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3342 1.1 mrg build_all (b, "vs0,v0", group, MODE_none);
3343 1.1 mrg }
3344 1.1 mrg
3345 1.1 mrg tree
3346 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3347 1.1 mrg {
3348 1.1 mrg return r.resolve_unary (TYPE_signed);
3349 1.1 mrg }
3350 1.1 mrg };
3351 1.1 mrg SHAPE (unary_to_int)
3352 1.1 mrg
3353 1.1 mrg /* sv<t0:uint>_t svfoo[_t0](sv<t0>_t)
3354 1.1 mrg
3355 1.1 mrg i.e. a version of "unary" in which the returned vector contains
3356 1.1 mrg unsigned integers. */
3357 1.1 mrg struct unary_to_uint_def : public overloaded_base<0>
3358 1.1 mrg {
3359 1.1 mrg void
3360 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3361 1.1 mrg {
3362 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3363 1.1 mrg build_all (b, "vu0,v0", group, MODE_none);
3364 1.1 mrg }
3365 1.1 mrg
3366 1.1 mrg tree
3367 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3368 1.1 mrg {
3369 1.1 mrg return r.resolve_unary (TYPE_unsigned);
3370 1.1 mrg }
3371 1.1 mrg };
3372 1.1 mrg SHAPE (unary_to_uint)
3373 1.1 mrg
3374 1.1 mrg /* sv<t0>_t svfoo[_t0](sv<t0:uint>_t)
3375 1.1 mrg
3376 1.1 mrg where <t0> always belongs a certain type class, and where <t0:uint>
3377 1.1 mrg therefore uniquely determines <t0>. */
3378 1.1 mrg struct unary_uint_def : public overloaded_base<0>
3379 1.1 mrg {
3380 1.1 mrg void
3381 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3382 1.1 mrg {
3383 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3384 1.1 mrg build_all (b, "v0,vu0", group, MODE_none);
3385 1.1 mrg }
3386 1.1 mrg
3387 1.1 mrg tree
3388 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3389 1.1 mrg {
3390 1.1 mrg unsigned int i, nargs;
3391 1.1 mrg type_suffix_index type;
3392 1.1 mrg if (!r.check_gp_argument (1, i, nargs)
3393 1.1 mrg || (type = r.infer_unsigned_vector_type (i)) == NUM_TYPE_SUFFIXES)
3394 1.1 mrg return error_mark_node;
3395 1.1 mrg
3396 1.1 mrg /* Search for a valid suffix with the same number of bits as TYPE. */
3397 1.1 mrg unsigned int element_bits = type_suffixes[type].element_bits;
3398 1.1 mrg if (type_suffixes[type].unsigned_p)
3399 1.1 mrg for (unsigned int j = 0; j < NUM_TYPE_SUFFIXES; ++j)
3400 1.1 mrg if (type_suffixes[j].element_bits == element_bits)
3401 1.1 mrg if (tree res = r.lookup_form (r.mode_suffix_id,
3402 1.1 mrg type_suffix_index (j)))
3403 1.1 mrg return res;
3404 1.1 mrg
3405 1.1 mrg return r.report_no_such_form (type);
3406 1.1 mrg }
3407 1.1 mrg };
3408 1.1 mrg SHAPE (unary_uint)
3409 1.1 mrg
3410 1.1 mrg /* sv<t0>_t svfoo[_<t0>](sv<t0:half>_t)
3411 1.1 mrg
3412 1.1 mrg i.e. a version of "unary" in which the source elements are half the
3413 1.1 mrg size of the destination elements, but have the same type class. */
3414 1.1 mrg struct unary_widen_def : public overloaded_base<0>
3415 1.1 mrg {
3416 1.1 mrg void
3417 1.1 mrg build (function_builder &b, const function_group_info &group) const OVERRIDE
3418 1.1 mrg {
3419 1.1 mrg b.add_overloaded_functions (group, MODE_none);
3420 1.1 mrg build_all (b, "v0,vh0", group, MODE_none);
3421 1.1 mrg }
3422 1.1 mrg
3423 1.1 mrg tree
3424 1.1 mrg resolve (function_resolver &r) const OVERRIDE
3425 1.1 mrg {
3426 1.1 mrg unsigned int i, nargs;
3427 1.1 mrg type_suffix_index type;
3428 1.1 mrg if (!r.check_gp_argument (1, i, nargs)
3429 1.1 mrg || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
3430 1.1 mrg return error_mark_node;
3431 1.1 mrg
3432 1.1 mrg /* There is only a single form for predicates. */
3433 1.1 mrg if (type == TYPE_SUFFIX_b)
3434 1.1 mrg return r.resolve_to (r.mode_suffix_id, type);
3435 1.1 mrg
3436 1.1 mrg if (type_suffixes[type].integer_p
3437 1.1 mrg && type_suffixes[type].element_bits < 64)
3438 1.1 mrg {
3439 1.1 mrg type_suffix_index wide_suffix
3440 1.1 mrg = find_type_suffix (type_suffixes[type].tclass,
3441 1.1 mrg type_suffixes[type].element_bits * 2);
3442 1.1 mrg if (tree res = r.lookup_form (r.mode_suffix_id, wide_suffix))
3443 1.1 mrg return res;
3444 1.1 mrg }
3445 1.1 mrg
3446 1.1 mrg return r.report_no_such_form (type);
3447 1.1 mrg }
3448 1.1 mrg };
3449 1.1 mrg SHAPE (unary_widen)
3450 1.1 mrg
3451 1.1 mrg }
3452