rs6000-c.cc revision 1.1.1.1 1 /* Subroutines for the C front end on the PowerPC architecture.
2 Copyright (C) 2002-2022 Free Software Foundation, Inc.
3
4 Contributed by Zack Weinberg <zack (at) codesourcery.com>
5 and Paolo Bonzini <bonzini (at) gnu.org>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #define IN_TARGET_CODE 1
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "target.h"
29 #include "c-family/c-common.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "stor-layout.h"
34 #include "c-family/c-pragma.h"
35 #include "langhooks.h"
36 #include "c/c-tree.h"
37
38 #include "rs6000-internal.h"
39
40 /* Handle the machine specific pragma longcall. Its syntax is
41
42 # pragma longcall ( TOGGLE )
43
44 where TOGGLE is either 0 or 1.
45
46 rs6000_default_long_calls is set to the value of TOGGLE, changing
47 whether or not new function declarations receive a longcall
48 attribute by default. */
49
50 void
51 rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
52 {
53 #define SYNTAX_ERROR(gmsgid) do { \
54 warning (OPT_Wpragmas, gmsgid); \
55 warning (OPT_Wpragmas, "ignoring malformed %<#pragma longcall%>"); \
56 return; \
57 } while (0)
58
59
60
61 tree x, n;
62
63 /* If we get here, generic code has already scanned the directive
64 leader and the word "longcall". */
65
66 if (pragma_lex (&x) != CPP_OPEN_PAREN)
67 SYNTAX_ERROR ("missing open paren");
68 if (pragma_lex (&n) != CPP_NUMBER)
69 SYNTAX_ERROR ("missing number");
70 if (pragma_lex (&x) != CPP_CLOSE_PAREN)
71 SYNTAX_ERROR ("missing close paren");
72
73 if (n != integer_zero_node && n != integer_one_node)
74 SYNTAX_ERROR ("number must be 0 or 1");
75
76 if (pragma_lex (&x) != CPP_EOF)
77 warning (OPT_Wpragmas, "junk at end of %<#pragma longcall%>");
78
79 rs6000_default_long_calls = (n == integer_one_node);
80 }
81
82 /* Handle defining many CPP flags based on TARGET_xxx. As a general
83 policy, rather than trying to guess what flags a user might want a
84 #define for, it's better to define a flag for everything. */
85
86 #define builtin_define(TXT) cpp_define (pfile, TXT)
87 #define builtin_assert(TXT) cpp_assert (pfile, TXT)
88
89 /* Keep the AltiVec keywords handy for fast comparisons. */
90 static GTY(()) tree __vector_keyword;
91 static GTY(()) tree vector_keyword;
92 static GTY(()) tree __pixel_keyword;
93 static GTY(()) tree pixel_keyword;
94 static GTY(()) tree __bool_keyword;
95 static GTY(()) tree bool_keyword;
96 static GTY(()) tree _Bool_keyword;
97 static GTY(()) tree __int128_type;
98 static GTY(()) tree __uint128_type;
99
100 /* Preserved across calls. */
101 static tree expand_bool_pixel;
102
103 static cpp_hashnode *
104 altivec_categorize_keyword (const cpp_token *tok)
105 {
106 if (tok->type == CPP_NAME)
107 {
108 cpp_hashnode *ident = tok->val.node.node;
109
110 if (ident == C_CPP_HASHNODE (vector_keyword))
111 return C_CPP_HASHNODE (__vector_keyword);
112
113 if (ident == C_CPP_HASHNODE (pixel_keyword))
114 return C_CPP_HASHNODE (__pixel_keyword);
115
116 if (ident == C_CPP_HASHNODE (bool_keyword))
117 return C_CPP_HASHNODE (__bool_keyword);
118
119 if (ident == C_CPP_HASHNODE (_Bool_keyword))
120 return C_CPP_HASHNODE (__bool_keyword);
121
122 return ident;
123 }
124
125 return 0;
126 }
127
128 static void
129 init_vector_keywords (void)
130 {
131 /* Keywords without two leading underscores are context-sensitive, and hence
132 implemented as conditional macros, controlled by the
133 rs6000_macro_to_expand() function below. If we have ISA 2.07 64-bit
134 support, record the __int128_t and __uint128_t types. */
135
136 __vector_keyword = get_identifier ("__vector");
137 C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
138
139 __pixel_keyword = get_identifier ("__pixel");
140 C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL;
141
142 __bool_keyword = get_identifier ("__bool");
143 C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL;
144
145 vector_keyword = get_identifier ("vector");
146 C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
147
148 pixel_keyword = get_identifier ("pixel");
149 C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL;
150
151 bool_keyword = get_identifier ("bool");
152 C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL;
153
154 _Bool_keyword = get_identifier ("_Bool");
155 C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL;
156
157 if (TARGET_VADDUQM)
158 {
159 __int128_type = get_identifier ("__int128_t");
160 __uint128_type = get_identifier ("__uint128_t");
161 }
162 }
163
164 /* Helper function to find out which RID_INT_N_* code is the one for
165 __int128, if any. Returns RID_MAX+1 if none apply, which is safe
166 (for our purposes, since we always expect to have __int128) to
167 compare against. */
168 static int
169 rid_int128(void)
170 {
171 int i;
172
173 for (i = 0; i < NUM_INT_N_ENTS; i ++)
174 if (int_n_enabled_p[i]
175 && int_n_data[i].bitsize == 128)
176 return RID_INT_N_0 + i;
177
178 return RID_MAX + 1;
179 }
180
181 /* Called to decide whether a conditional macro should be expanded.
182 Since we have exactly one such macro (i.e, 'vector'), we do not
183 need to examine the 'tok' parameter. */
184
185 static cpp_hashnode *
186 rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
187 {
188 cpp_hashnode *expand_this = tok->val.node.node;
189 cpp_hashnode *ident;
190
191 /* If the current machine does not have altivec, don't look for the
192 keywords. */
193 if (!TARGET_ALTIVEC)
194 return NULL;
195
196 ident = altivec_categorize_keyword (tok);
197
198 if (ident != expand_this)
199 expand_this = NULL;
200
201 if (ident == C_CPP_HASHNODE (__vector_keyword))
202 {
203 int idx = 0;
204 do
205 tok = cpp_peek_token (pfile, idx++);
206 while (tok->type == CPP_PADDING);
207 ident = altivec_categorize_keyword (tok);
208
209 if (ident == C_CPP_HASHNODE (__pixel_keyword))
210 {
211 expand_this = C_CPP_HASHNODE (__vector_keyword);
212 expand_bool_pixel = __pixel_keyword;
213 }
214 else if (ident == C_CPP_HASHNODE (__bool_keyword))
215 {
216 expand_this = C_CPP_HASHNODE (__vector_keyword);
217 expand_bool_pixel = __bool_keyword;
218 }
219 /* The boost libraries have code with Iterator::vector vector in it. If
220 we allow the normal handling, this module will be called recursively,
221 and the vector will be skipped.; */
222 else if (ident && (ident != C_CPP_HASHNODE (__vector_keyword)))
223 {
224 enum rid rid_code = (enum rid)(ident->rid_code);
225 bool is_macro = cpp_macro_p (ident);
226
227 /* If there is a function-like macro, check if it is going to be
228 invoked with or without arguments. Without following ( treat
229 it like non-macro, otherwise the following cpp_get_token eats
230 what should be preserved. */
231 if (is_macro && cpp_fun_like_macro_p (ident))
232 {
233 int idx2 = idx;
234 do
235 tok = cpp_peek_token (pfile, idx2++);
236 while (tok->type == CPP_PADDING);
237 if (tok->type != CPP_OPEN_PAREN)
238 is_macro = false;
239 }
240
241 if (is_macro)
242 {
243 do
244 (void) cpp_get_token (pfile);
245 while (--idx > 0);
246 do
247 tok = cpp_peek_token (pfile, idx++);
248 while (tok->type == CPP_PADDING);
249 ident = altivec_categorize_keyword (tok);
250 if (ident == C_CPP_HASHNODE (__pixel_keyword))
251 {
252 expand_this = C_CPP_HASHNODE (__vector_keyword);
253 expand_bool_pixel = __pixel_keyword;
254 rid_code = RID_MAX;
255 }
256 else if (ident == C_CPP_HASHNODE (__bool_keyword))
257 {
258 expand_this = C_CPP_HASHNODE (__vector_keyword);
259 expand_bool_pixel = __bool_keyword;
260 rid_code = RID_MAX;
261 }
262 else if (ident)
263 rid_code = (enum rid)(ident->rid_code);
264 }
265
266 if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
267 || rid_code == RID_SHORT || rid_code == RID_SIGNED
268 || rid_code == RID_INT || rid_code == RID_CHAR
269 || rid_code == RID_FLOAT
270 || (rid_code == RID_DOUBLE && TARGET_VSX)
271 || (rid_code == rid_int128 () && TARGET_VADDUQM))
272 {
273 expand_this = C_CPP_HASHNODE (__vector_keyword);
274 /* If the next keyword is bool or pixel, it
275 will need to be expanded as well. */
276 do
277 tok = cpp_peek_token (pfile, idx++);
278 while (tok->type == CPP_PADDING);
279 ident = altivec_categorize_keyword (tok);
280
281 if (ident == C_CPP_HASHNODE (__pixel_keyword))
282 expand_bool_pixel = __pixel_keyword;
283 else if (ident == C_CPP_HASHNODE (__bool_keyword))
284 expand_bool_pixel = __bool_keyword;
285 else
286 {
287 /* Try two tokens down, too. */
288 do
289 tok = cpp_peek_token (pfile, idx++);
290 while (tok->type == CPP_PADDING);
291 ident = altivec_categorize_keyword (tok);
292 if (ident == C_CPP_HASHNODE (__pixel_keyword))
293 expand_bool_pixel = __pixel_keyword;
294 else if (ident == C_CPP_HASHNODE (__bool_keyword))
295 expand_bool_pixel = __bool_keyword;
296 }
297 }
298
299 /* Support vector __int128_t, but we don't need to worry about bool
300 or pixel on this type. */
301 else if (TARGET_VADDUQM
302 && (ident == C_CPP_HASHNODE (__int128_type)
303 || ident == C_CPP_HASHNODE (__uint128_type)))
304 expand_this = C_CPP_HASHNODE (__vector_keyword);
305 }
306 }
307 else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword))
308 {
309 expand_this = C_CPP_HASHNODE (__pixel_keyword);
310 expand_bool_pixel = 0;
311 }
312 else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword))
313 {
314 expand_this = C_CPP_HASHNODE (__bool_keyword);
315 expand_bool_pixel = 0;
316 }
317
318 return expand_this;
319 }
320
321
322 /* Define or undefine a single macro. */
323
324 static void
325 rs6000_define_or_undefine_macro (bool define_p, const char *name)
326 {
327 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
328 fprintf (stderr, "#%s %s\n", (define_p) ? "define" : "undef", name);
329
330 if (define_p)
331 cpp_define (parse_in, name);
332 else
333 cpp_undef (parse_in, name);
334 }
335
336 /* Define or undefine macros based on the current target. If the user does
337 #pragma GCC target, we need to adjust the macros dynamically. Note, some of
338 the options needed for builtins have been moved to separate variables, so
339 have both the target flags and the builtin flags as arguments. */
340
341 void
342 rs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags,
343 HOST_WIDE_INT bu_mask)
344 {
345 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
346 fprintf (stderr,
347 "rs6000_target_modify_macros (%s, " HOST_WIDE_INT_PRINT_HEX
348 ", " HOST_WIDE_INT_PRINT_HEX ")\n",
349 (define_p) ? "define" : "undef",
350 flags, bu_mask);
351
352 /* Each of the flags mentioned below controls whether certain
353 preprocessor macros will be automatically defined when
354 preprocessing source files for compilation by this compiler.
355 While most of these flags can be enabled or disabled
356 explicitly by specifying certain command-line options when
357 invoking the compiler, there are also many ways in which these
358 flags are enabled or disabled implicitly, based on compiler
359 defaults, configuration choices, and on the presence of certain
360 related command-line options. Many, but not all, of these
361 implicit behaviors can be found in file "rs6000.cc", the
362 rs6000_option_override_internal() function.
363
364 In general, each of the flags may be automatically enabled in
365 any of the following conditions:
366
367 1. If no -mcpu target is specified on the command line and no
368 --with-cpu target is specified to the configure command line
369 and the TARGET_DEFAULT macro for this default cpu host
370 includes the flag, and the flag has not been explicitly disabled
371 by command-line options.
372
373 2. If the target specified with -mcpu=target on the command line, or
374 in the absence of a -mcpu=target command-line option, if the
375 target specified using --with-cpu=target on the configure
376 command line, is disqualified because the associated binary
377 tools (e.g. the assembler) lack support for the requested cpu,
378 and the TARGET_DEFAULT macro for this default cpu host
379 includes the flag, and the flag has not been explicitly disabled
380 by command-line options.
381
382 3. If either of the above two conditions apply except that the
383 TARGET_DEFAULT macro is defined to equal zero, and
384 TARGET_POWERPC64 and
385 a) BYTES_BIG_ENDIAN and the flag to be enabled is either
386 MASK_PPC_GFXOPT or MASK_POWERPC64 (flags for "powerpc64"
387 target), or
388 b) !BYTES_BIG_ENDIAN and the flag to be enabled is either
389 MASK_POWERPC64 or it is one of the flags included in
390 ISA_2_7_MASKS_SERVER (flags for "powerpc64le" target).
391
392 4. If a cpu has been requested with a -mcpu=target command-line option
393 and this cpu has not been disqualified due to shortcomings of the
394 binary tools, and the set of flags associated with the requested cpu
395 include the flag to be enabled. See rs6000-cpus.def for macro
396 definitions that represent various ABI standards
397 (e.g. ISA_2_1_MASKS, ISA_3_0_MASKS_SERVER) and for a list of
398 the specific flags that are associated with each of the cpu
399 choices that can be specified as the target of a -mcpu=target
400 compile option, or as the target of a --with-cpu=target
401 configure option. Target flags that are specified in either
402 of these two ways are considered "implicit" since the flags
403 are not mentioned specifically by name.
404
405 Additional documentation describing behavior specific to
406 particular flags is provided below, immediately preceding the
407 use of each relevant flag.
408
409 5. If there is no -mcpu=target command-line option, and the cpu
410 requested by a --with-cpu=target command-line option has not
411 been disqualified due to shortcomings of the binary tools, and
412 the set of flags associated with the specified target include
413 the flag to be enabled. See the notes immediately above for a
414 summary of the flags associated with particular cpu
415 definitions. */
416
417 /* rs6000_isa_flags based options. */
418 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC");
419 if ((flags & OPTION_MASK_PPC_GPOPT) != 0)
420 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCSQ");
421 if ((flags & OPTION_MASK_PPC_GFXOPT) != 0)
422 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCGR");
423 if ((flags & OPTION_MASK_POWERPC64) != 0)
424 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC64");
425 if ((flags & OPTION_MASK_MFCRF) != 0)
426 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR4");
427 if ((flags & OPTION_MASK_POPCNTB) != 0)
428 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5");
429 if ((flags & OPTION_MASK_FPRND) != 0)
430 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5X");
431 if ((flags & OPTION_MASK_CMPB) != 0)
432 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR6");
433 if ((flags & OPTION_MASK_POPCNTD) != 0)
434 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR7");
435 if ((flags & OPTION_MASK_POWER8) != 0)
436 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR8");
437 if ((flags & OPTION_MASK_MODULO) != 0)
438 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR9");
439 if ((flags & OPTION_MASK_POWER10) != 0)
440 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR10");
441 if ((flags & OPTION_MASK_SOFT_FLOAT) != 0)
442 rs6000_define_or_undefine_macro (define_p, "_SOFT_FLOAT");
443 if ((flags & OPTION_MASK_RECIP_PRECISION) != 0)
444 rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__");
445 /* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on
446 in any of the following conditions:
447 1. The operating system is Darwin and it is configured for 64
448 bit. (See darwin_rs6000_override_options.)
449 2. The operating system is Darwin and the operating system
450 version is 10.5 or higher and the user has not explicitly
451 disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and
452 the compiler is not producing code for integration within the
453 kernel. (See darwin_rs6000_override_options.)
454 Note that the OPTION_MASK_ALTIVEC flag is automatically turned
455 off in any of the following conditions:
456 1. The operating system does not support saving of AltiVec
457 registers (OS_MISSING_ALTIVEC).
458 2. If an inner context (as introduced by
459 __attribute__((__target__())) or #pragma GCC target()
460 requests a target that normally enables the
461 OPTION_MASK_ALTIVEC flag but the outer-most "main target"
462 does not support the rs6000_altivec_abi, this flag is
463 turned off for the inner context unless OPTION_MASK_ALTIVEC
464 was explicitly enabled for the inner context. */
465 if ((flags & OPTION_MASK_ALTIVEC) != 0)
466 {
467 const char *vec_str = (define_p) ? "__VEC__=10206" : "__VEC__";
468 rs6000_define_or_undefine_macro (define_p, "__ALTIVEC__");
469 rs6000_define_or_undefine_macro (define_p, vec_str);
470
471 /* Define this when supporting context-sensitive keywords. */
472 if (!flag_iso)
473 rs6000_define_or_undefine_macro (define_p, "__APPLE_ALTIVEC__");
474 if (rs6000_aix_extabi)
475 rs6000_define_or_undefine_macro (define_p, "__EXTABI__");
476 }
477 /* Note that the OPTION_MASK_VSX flag is automatically turned on in
478 the following conditions:
479 1. TARGET_P8_VECTOR is explicitly turned on and the OPTION_MASK_VSX
480 was not explicitly turned off. Hereafter, the OPTION_MASK_VSX
481 flag is considered to have been explicitly turned on.
482 Note that the OPTION_MASK_VSX flag is automatically turned off in
483 the following conditions:
484 1. The operating system does not support saving of AltiVec
485 registers (OS_MISSING_ALTIVEC).
486 2. If the option TARGET_HARD_FLOAT is turned off. Hereafter, the
487 OPTION_MASK_VSX flag is considered to have been turned off
488 explicitly.
489 3. If TARGET_AVOID_XFORM is turned on explicitly at the outermost
490 compilation context, or if it is turned on by any means in an
491 inner compilation context. Hereafter, the OPTION_MASK_VSX
492 flag is considered to have been turned off explicitly.
493 4. If TARGET_ALTIVEC was explicitly disabled. Hereafter, the
494 OPTION_MASK_VSX flag is considered to have been turned off
495 explicitly.
496 5. If an inner context (as introduced by
497 __attribute__((__target__())) or #pragma GCC target()
498 requests a target that normally enables the
499 OPTION_MASK_VSX flag but the outer-most "main target"
500 does not support the rs6000_altivec_abi, this flag is
501 turned off for the inner context unless OPTION_MASK_VSX
502 was explicitly enabled for the inner context. */
503 if ((flags & OPTION_MASK_VSX) != 0)
504 rs6000_define_or_undefine_macro (define_p, "__VSX__");
505 if ((flags & OPTION_MASK_HTM) != 0)
506 {
507 rs6000_define_or_undefine_macro (define_p, "__HTM__");
508 /* Tell the user that our HTM insn patterns act as memory barriers. */
509 rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__");
510 }
511 /* Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
512 on in the following conditions:
513 1. TARGET_P9_VECTOR is explicitly turned on and
514 OPTION_MASK_P8_VECTOR is not explicitly turned off.
515 Hereafter, the OPTION_MASK_P8_VECTOR flag is considered to
516 have been turned off explicitly.
517 Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
518 off in the following conditions:
519 1. If any of TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX
520 were turned off explicitly and OPTION_MASK_P8_VECTOR flag was
521 not turned on explicitly.
522 2. If TARGET_ALTIVEC is turned off. Hereafter, the
523 OPTION_MASK_P8_VECTOR flag is considered to have been turned off
524 explicitly.
525 3. If TARGET_VSX is turned off and OPTION_MASK_P8_VECTOR was not
526 explicitly enabled. If TARGET_VSX is explicitly enabled, the
527 OPTION_MASK_P8_VECTOR flag is hereafter also considered to
528 have been turned off explicitly. */
529 if ((flags & OPTION_MASK_P8_VECTOR) != 0)
530 rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__");
531 /* Note that the OPTION_MASK_P9_VECTOR flag is automatically turned
532 off in the following conditions:
533 1. If TARGET_P8_VECTOR is turned off and OPTION_MASK_P9_VECTOR is
534 not turned on explicitly. Hereafter, if OPTION_MASK_P8_VECTOR
535 was turned on explicitly, the OPTION_MASK_P9_VECTOR flag is
536 also considered to have been turned off explicitly.
537 Note that the OPTION_MASK_P9_VECTOR is automatically turned on
538 in the following conditions:
539 1. If TARGET_P9_MINMAX was turned on explicitly.
540 Hereafter, THE OPTION_MASK_P9_VECTOR flag is considered to
541 have been turned on explicitly. */
542 if ((flags & OPTION_MASK_P9_VECTOR) != 0)
543 rs6000_define_or_undefine_macro (define_p, "__POWER9_VECTOR__");
544 /* Note that the OPTION_MASK_QUAD_MEMORY flag is automatically
545 turned off in the following conditions:
546 1. If TARGET_POWERPC64 is turned off.
547 2. If WORDS_BIG_ENDIAN is false (non-atomic quad memory
548 load/store are disabled on little endian). */
549 if ((flags & OPTION_MASK_QUAD_MEMORY) != 0)
550 rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY__");
551 /* Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is automatically
552 turned off in the following conditions:
553 1. If TARGET_POWERPC64 is turned off.
554 Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is
555 automatically turned on in the following conditions:
556 1. If TARGET_QUAD_MEMORY and this flag was not explicitly
557 disabled. */
558 if ((flags & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
559 rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY_ATOMIC__");
560 /* Note that the OPTION_MASK_CRYPTO flag is automatically turned off
561 in the following conditions:
562 1. If any of TARGET_HARD_FLOAT or TARGET_ALTIVEC or TARGET_VSX
563 are turned off explicitly and OPTION_MASK_CRYPTO is not turned
564 on explicitly.
565 2. If TARGET_ALTIVEC is turned off. */
566 if ((flags & OPTION_MASK_CRYPTO) != 0)
567 rs6000_define_or_undefine_macro (define_p, "__CRYPTO__");
568 if ((flags & OPTION_MASK_FLOAT128_KEYWORD) != 0)
569 {
570 rs6000_define_or_undefine_macro (define_p, "__FLOAT128__");
571 if (define_p)
572 rs6000_define_or_undefine_macro (true, "__float128=__ieee128");
573 else
574 rs6000_define_or_undefine_macro (false, "__float128");
575 if (ieee128_float_type_node && define_p)
576 rs6000_define_or_undefine_macro (true, "__SIZEOF_FLOAT128__=16");
577 else
578 rs6000_define_or_undefine_macro (false, "__SIZEOF_FLOAT128__");
579 }
580 /* OPTION_MASK_FLOAT128_HARDWARE can be turned on if -mcpu=power9 is used or
581 via the target attribute/pragma. */
582 if ((flags & OPTION_MASK_FLOAT128_HW) != 0)
583 rs6000_define_or_undefine_macro (define_p, "__FLOAT128_HARDWARE__");
584
585 /* options from the builtin masks. */
586 /* Note that RS6000_BTM_CELL is enabled only if (rs6000_cpu ==
587 PROCESSOR_CELL) (e.g. -mcpu=cell). */
588 if ((bu_mask & RS6000_BTM_CELL) != 0)
589 rs6000_define_or_undefine_macro (define_p, "__PPU__");
590
591 /* Tell the user if we support the MMA instructions. */
592 if ((flags & OPTION_MASK_MMA) != 0)
593 rs6000_define_or_undefine_macro (define_p, "__MMA__");
594 /* Whether pc-relative code is being generated. */
595 if ((flags & OPTION_MASK_PCREL) != 0)
596 rs6000_define_or_undefine_macro (define_p, "__PCREL__");
597 /* Tell the user -mrop-protect is in play. */
598 if (rs6000_rop_protect)
599 rs6000_define_or_undefine_macro (define_p, "__ROP_PROTECT__");
600 }
601
602 void
603 rs6000_cpu_cpp_builtins (cpp_reader *pfile)
604 {
605 /* Define all of the common macros. */
606 rs6000_target_modify_macros (true, rs6000_isa_flags,
607 rs6000_builtin_mask_calculate ());
608
609 if (TARGET_FRE)
610 builtin_define ("__RECIP__");
611 if (TARGET_FRES)
612 builtin_define ("__RECIPF__");
613 if (TARGET_FRSQRTE)
614 builtin_define ("__RSQRTE__");
615 if (TARGET_FRSQRTES)
616 builtin_define ("__RSQRTEF__");
617 if (TARGET_FLOAT128_TYPE)
618 builtin_define ("__FLOAT128_TYPE__");
619 if (ibm128_float_type_node)
620 builtin_define ("__SIZEOF_IBM128__=16");
621 if (ieee128_float_type_node)
622 builtin_define ("__SIZEOF_IEEE128__=16");
623 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
624 builtin_define ("__BUILTIN_CPU_SUPPORTS__");
625 #endif
626
627 if (TARGET_EXTRA_BUILTINS && cpp_get_options (pfile)->lang != CLK_ASM)
628 {
629 /* Define the AltiVec syntactic elements. */
630 builtin_define ("__vector=__attribute__((altivec(vector__)))");
631 builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
632 builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
633
634 if (!flag_iso)
635 {
636 builtin_define ("vector=vector");
637 builtin_define ("pixel=pixel");
638 builtin_define ("bool=bool");
639 builtin_define ("_Bool=_Bool");
640 init_vector_keywords ();
641
642 /* Enable context-sensitive macros. */
643 cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
644 }
645 }
646 if (!TARGET_HARD_FLOAT)
647 builtin_define ("_SOFT_DOUBLE");
648 /* Used by lwarx/stwcx. errata work-around. */
649 if (rs6000_cpu == PROCESSOR_PPC405)
650 builtin_define ("__PPC405__");
651 /* Used by libstdc++. */
652 if (TARGET_NO_LWSYNC)
653 builtin_define ("__NO_LWSYNC__");
654
655 if (TARGET_EXTRA_BUILTINS)
656 {
657 /* For the VSX builtin functions identical to Altivec functions, just map
658 the altivec builtin into the vsx version (the altivec functions
659 generate VSX code if -mvsx). */
660 builtin_define ("__builtin_vsx_xxland=__builtin_vec_and");
661 builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc");
662 builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor");
663 builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or");
664 builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor");
665 builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel");
666 builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm");
667
668 /* Also map the a and m versions of the multiply/add instructions to the
669 builtin for people blindly going off the instruction manual. */
670 builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp");
671 builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp");
672 builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp");
673 builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp");
674 builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp");
675 builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp");
676 builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp");
677 builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp");
678 builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp");
679 builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp");
680 builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp");
681 builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp");
682 builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp");
683 builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp");
684 builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp");
685 builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp");
686 }
687
688 /* Map the old _Float128 'q' builtins into the new 'f128' builtins. */
689 if (TARGET_FLOAT128_TYPE)
690 {
691 builtin_define ("__builtin_fabsq=__builtin_fabsf128");
692 builtin_define ("__builtin_copysignq=__builtin_copysignf128");
693 builtin_define ("__builtin_nanq=__builtin_nanf128");
694 builtin_define ("__builtin_nansq=__builtin_nansf128");
695 builtin_define ("__builtin_infq=__builtin_inff128");
696 builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
697 }
698
699 /* Tell users they can use __builtin_bswap{16,64}. */
700 builtin_define ("__HAVE_BSWAP__");
701
702 /* May be overridden by target configuration. */
703 RS6000_CPU_CPP_ENDIAN_BUILTINS();
704
705 if (TARGET_LONG_DOUBLE_128)
706 {
707 builtin_define ("__LONG_DOUBLE_128__");
708 builtin_define ("__LONGDOUBLE128");
709
710 if (TARGET_IEEEQUAD)
711 {
712 /* Older versions of GLIBC used __attribute__((__KC__)) to create the
713 IEEE 128-bit floating point complex type for C++ (which does not
714 support _Float128 _Complex). If the default for long double is
715 IEEE 128-bit mode, the library would need to use
716 __attribute__((__TC__)) instead. Defining __KF__ and __KC__
717 is a stop-gap to build with the older libraries, until we
718 get an updated library. */
719 builtin_define ("__LONG_DOUBLE_IEEE128__");
720 builtin_define ("__KF__=__TF__");
721 builtin_define ("__KC__=__TC__");
722 }
723 else
724 builtin_define ("__LONG_DOUBLE_IBM128__");
725 }
726
727 switch (TARGET_CMODEL)
728 {
729 /* Deliberately omit __CMODEL_SMALL__ since that was the default
730 before --mcmodel support was added. */
731 case CMODEL_MEDIUM:
732 builtin_define ("__CMODEL_MEDIUM__");
733 break;
734 case CMODEL_LARGE:
735 builtin_define ("__CMODEL_LARGE__");
736 break;
737 default:
738 break;
739 }
740
741 switch (rs6000_current_abi)
742 {
743 case ABI_V4:
744 builtin_define ("_CALL_SYSV");
745 break;
746 case ABI_AIX:
747 builtin_define ("_CALL_AIXDESC");
748 builtin_define ("_CALL_AIX");
749 builtin_define ("_CALL_ELF=1");
750 break;
751 case ABI_ELFv2:
752 builtin_define ("_CALL_ELF=2");
753 break;
754 case ABI_DARWIN:
755 builtin_define ("_CALL_DARWIN");
756 break;
757 default:
758 break;
759 }
760
761 /* Vector element order. */
762 if (BYTES_BIG_ENDIAN)
763 builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__");
764 else
765 builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__");
766
767 /* Let the compiled code know if 'f' class registers will not be available. */
768 if (TARGET_SOFT_FLOAT)
769 builtin_define ("__NO_FPRS__");
770
771 /* Whether aggregates passed by value are aligned to a 16 byte boundary
772 if their alignment is 16 bytes or larger. */
773 if ((TARGET_MACHO && rs6000_darwin64_abi)
774 || DEFAULT_ABI == ABI_ELFv2
775 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
776 builtin_define ("__STRUCT_PARM_ALIGN__=16");
777 }
778
779
780
782 /* Convert a type stored into a struct altivec_builtin_types as ID,
783 into a tree. The types are in rs6000_builtin_types: negative values
784 create a pointer type for the type associated to ~ID. Note it is
785 a logical NOT, rather than a negation, otherwise you cannot represent
786 a pointer type for ID 0. */
787
788 static inline tree
789 rs6000_builtin_type (int id)
790 {
791 tree t;
792 t = rs6000_builtin_types[id < 0 ? ~id : id];
793 return id < 0 ? build_pointer_type (t) : t;
794 }
795
796 /* Check whether the type of an argument, T, is compatible with a type ID
797 stored into a struct altivec_builtin_types. Integer types are considered
798 compatible; otherwise, the language hook lang_hooks.types_compatible_p makes
799 the decision. Also allow long double and _Float128 to be compatible if
800 -mabi=ieeelongdouble. */
801
802 static inline bool
803 is_float128_p (tree t)
804 {
805 return (t == float128_type_node
806 || (TARGET_IEEEQUAD
807 && TARGET_LONG_DOUBLE_128
808 && t == long_double_type_node));
809 }
810
811
812 /* Return true iff ARGTYPE can be compatibly passed as PARMTYPE. */
813 static bool
814 rs6000_builtin_type_compatible (tree parmtype, tree argtype)
815 {
816 if (parmtype == error_mark_node)
817 return false;
818
819 if (INTEGRAL_TYPE_P (parmtype) && INTEGRAL_TYPE_P (argtype))
820 return true;
821
822 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
823 && is_float128_p (parmtype) && is_float128_p (argtype))
824 return true;
825
826 if (POINTER_TYPE_P (parmtype) && POINTER_TYPE_P (argtype))
827 {
828 parmtype = TREE_TYPE (parmtype);
829 argtype = TREE_TYPE (argtype);
830 if (TYPE_READONLY (argtype))
831 parmtype = build_qualified_type (parmtype, TYPE_QUAL_CONST);
832 }
833
834 return lang_hooks.types_compatible_p (parmtype, argtype);
835 }
836
837 /* In addition to calling fold_convert for EXPR of type TYPE, also
838 call c_fully_fold to remove any C_MAYBE_CONST_EXPRs that could be
839 hiding there (PR47197). */
840
841 static tree
842 fully_fold_convert (tree type, tree expr)
843 {
844 tree result = fold_convert (type, expr);
845 bool maybe_const = true;
846
847 if (!c_dialect_cxx ())
848 result = c_fully_fold (result, false, &maybe_const);
849
850 return result;
851 }
852
853 /* Build a tree for a function call to an Altivec non-overloaded builtin.
854 The overloaded builtin that matched the types and args is described
855 by DESC. The N arguments are given in ARGS, respectively.
856
857 Actually the only thing it does is calling fold_convert on ARGS, with
858 a small exception for vec_{all,any}_{ge,le} predicates. */
859
860 static tree
861 altivec_build_resolved_builtin (tree *args, int n, tree fntype, tree ret_type,
862 rs6000_gen_builtins bif_id,
863 rs6000_gen_builtins ovld_id)
864 {
865 tree argtypes = TYPE_ARG_TYPES (fntype);
866 tree arg_type[MAX_OVLD_ARGS];
867 tree fndecl = rs6000_builtin_decls[bif_id];
868
869 for (int i = 0; i < n; i++)
870 {
871 arg_type[i] = TREE_VALUE (argtypes);
872 argtypes = TREE_CHAIN (argtypes);
873 }
874
875 /* The AltiVec overloading implementation is overall gross, but this
876 is particularly disgusting. The vec_{all,any}_{ge,le} builtins
877 are completely different for floating-point vs. integer vector
878 types, because the former has vcmpgefp, but the latter should use
879 vcmpgtXX.
880
881 In practice, the second and third arguments are swapped, and the
882 condition (LT vs. EQ, which is recognizable by bit 1 of the first
883 argument) is reversed. Patch the arguments here before building
884 the resolved CALL_EXPR. */
885 if (n == 3
886 && ovld_id == RS6000_OVLD_VEC_CMPGE_P
887 && bif_id != RS6000_BIF_VCMPGEFP_P
888 && bif_id != RS6000_BIF_XVCMPGEDP_P)
889 {
890 std::swap (args[1], args[2]);
891 std::swap (arg_type[1], arg_type[2]);
892
893 args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
894 build_int_cst (NULL_TREE, 2));
895 }
896
897 for (int j = 0; j < n; j++)
898 args[j] = fully_fold_convert (arg_type[j], args[j]);
899
900 /* If the number of arguments to an overloaded function increases,
901 we must expand this switch. */
902 gcc_assert (MAX_OVLD_ARGS <= 4);
903
904 tree call;
905 switch (n)
906 {
907 case 0:
908 call = build_call_expr (fndecl, 0);
909 break;
910 case 1:
911 call = build_call_expr (fndecl, 1, args[0]);
912 break;
913 case 2:
914 call = build_call_expr (fndecl, 2, args[0], args[1]);
915 break;
916 case 3:
917 call = build_call_expr (fndecl, 3, args[0], args[1], args[2]);
918 break;
919 case 4:
920 call = build_call_expr (fndecl, 4, args[0], args[1], args[2], args[3]);
921 break;
922 default:
923 gcc_unreachable ();
924 }
925 return fold_convert (ret_type, call);
926 }
927
928 /* Enumeration of possible results from attempted overload resolution.
929 This is used by special-case helper functions to tell their caller
930 whether they succeeded and what still needs to be done.
931
932 unresolved = Still needs processing
933 resolved = Resolved (but may be an error_mark_node)
934 resolved_bad = An error that needs handling by the caller. */
935
936 enum resolution { unresolved, resolved, resolved_bad };
937
938 /* Resolve an overloaded vec_mul call and return a tree expression for the
939 resolved call if successful. ARGS contains the arguments to the call.
940 TYPES contains their types. RES must be set to indicate the status of
941 the resolution attempt. LOC contains statement location information. */
942
943 static tree
944 resolve_vec_mul (resolution *res, tree *args, tree *types, location_t loc)
945 {
946 /* vec_mul needs to be special cased because there are no instructions for it
947 for the {un}signed char, {un}signed short, and {un}signed int types. */
948
949 /* Both arguments must be vectors and the types must be compatible. */
950 if (TREE_CODE (types[0]) != VECTOR_TYPE
951 || !lang_hooks.types_compatible_p (types[0], types[1]))
952 {
953 *res = resolved_bad;
954 return error_mark_node;
955 }
956
957 switch (TYPE_MODE (TREE_TYPE (types[0])))
958 {
959 case E_QImode:
960 case E_HImode:
961 case E_SImode:
962 case E_DImode:
963 case E_TImode:
964 /* For scalar types just use a multiply expression. */
965 *res = resolved;
966 return fold_build2_loc (loc, MULT_EXPR, types[0], args[0],
967 fold_convert (types[0], args[1]));
968 case E_SFmode:
969 {
970 /* For floats use the xvmulsp instruction directly. */
971 *res = resolved;
972 tree call = rs6000_builtin_decls[RS6000_BIF_XVMULSP];
973 return build_call_expr (call, 2, args[0], args[1]);
974 }
975 case E_DFmode:
976 {
977 /* For doubles use the xvmuldp instruction directly. */
978 *res = resolved;
979 tree call = rs6000_builtin_decls[RS6000_BIF_XVMULDP];
980 return build_call_expr (call, 2, args[0], args[1]);
981 }
982 /* Other types are errors. */
983 default:
984 *res = resolved_bad;
985 return error_mark_node;
986 }
987 }
988
989 /* Resolve an overloaded vec_cmpne call and return a tree expression for the
990 resolved call if successful. ARGS contains the arguments to the call.
991 TYPES contains their types. RES must be set to indicate the status of
992 the resolution attempt. LOC contains statement location information. */
993
994 static tree
995 resolve_vec_cmpne (resolution *res, tree *args, tree *types, location_t loc)
996 {
997 /* vec_cmpne needs to be special cased because there are no instructions
998 for it (prior to power 9). */
999
1000 /* Both arguments must be vectors and the types must be compatible. */
1001 if (TREE_CODE (types[0]) != VECTOR_TYPE
1002 || !lang_hooks.types_compatible_p (types[0], types[1]))
1003 {
1004 *res = resolved_bad;
1005 return error_mark_node;
1006 }
1007
1008 machine_mode arg0_elt_mode = TYPE_MODE (TREE_TYPE (types[0]));
1009
1010 /* Power9 instructions provide the most efficient implementation of
1011 ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode
1012 or SFmode or DFmode. */
1013 if (!TARGET_P9_VECTOR
1014 || arg0_elt_mode == DImode
1015 || arg0_elt_mode == TImode
1016 || arg0_elt_mode == SFmode
1017 || arg0_elt_mode == DFmode)
1018 {
1019 switch (arg0_elt_mode)
1020 {
1021 /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb),
1022 vec_cmpeq (va, vb)). */
1023 /* Note: vec_nand also works but opt changes vec_nand's
1024 to vec_nor's anyway. */
1025 case E_QImode:
1026 case E_HImode:
1027 case E_SImode:
1028 case E_DImode:
1029 case E_TImode:
1030 case E_SFmode:
1031 case E_DFmode:
1032 {
1033 /* call = vec_cmpeq (va, vb)
1034 result = vec_nor (call, call). */
1035 vec<tree, va_gc> *params = make_tree_vector ();
1036 vec_safe_push (params, args[0]);
1037 vec_safe_push (params, args[1]);
1038 tree decl = rs6000_builtin_decls[RS6000_OVLD_VEC_CMPEQ];
1039 tree call = altivec_resolve_overloaded_builtin (loc, decl, params);
1040 /* Use save_expr to ensure that operands used more than once
1041 that may have side effects (like calls) are only evaluated
1042 once. */
1043 call = save_expr (call);
1044 params = make_tree_vector ();
1045 vec_safe_push (params, call);
1046 vec_safe_push (params, call);
1047 decl = rs6000_builtin_decls[RS6000_OVLD_VEC_NOR];
1048 *res = resolved;
1049 return altivec_resolve_overloaded_builtin (loc, decl, params);
1050 }
1051 /* Other types are errors. */
1052 default:
1053 *res = resolved_bad;
1054 return error_mark_node;
1055 }
1056 }
1057
1058 /* Otherwise this call is unresolved, and altivec_resolve_overloaded_builtin
1059 will later process the Power9 alternative. */
1060 *res = unresolved;
1061 return error_mark_node;
1062 }
1063
1064 /* Resolve an overloaded vec_adde or vec_sube call and return a tree expression
1065 for the resolved call if successful. ARGS contains the arguments to the
1066 call. TYPES contains their arguments. RES must be set to indicate the
1067 status of the resolution attempt. LOC contains statement location
1068 information. */
1069
1070 static tree
1071 resolve_vec_adde_sube (resolution *res, rs6000_gen_builtins fcode,
1072 tree *args, tree *types, location_t loc)
1073 {
1074 /* vec_adde needs to be special cased because there is no instruction
1075 for the {un}signed int version. */
1076
1077 /* All 3 arguments must be vectors of (signed or unsigned) (int or
1078 __int128) and the types must be compatible. */
1079 if (TREE_CODE (types[0]) != VECTOR_TYPE
1080 || !lang_hooks.types_compatible_p (types[0], types[1])
1081 || !lang_hooks.types_compatible_p (types[1], types[2]))
1082 {
1083 *res = resolved_bad;
1084 return error_mark_node;
1085 }
1086
1087 switch (TYPE_MODE (TREE_TYPE (types[0])))
1088 {
1089 /* For {un}signed ints,
1090 vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
1091 vec_and (carryv, 1)).
1092 vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
1093 vec_and (carryv, 1)). */
1094 case E_SImode:
1095 {
1096 vec<tree, va_gc> *params = make_tree_vector ();
1097 vec_safe_push (params, args[0]);
1098 vec_safe_push (params, args[1]);
1099
1100 tree add_sub_builtin;
1101 if (fcode == RS6000_OVLD_VEC_ADDE)
1102 add_sub_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADD];
1103 else
1104 add_sub_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUB];
1105
1106 tree call = altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1107 params);
1108 tree const1 = build_int_cstu (TREE_TYPE (types[0]), 1);
1109 tree ones_vector = build_vector_from_val (types[0], const1);
1110 tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, types[0],
1111 args[2], ones_vector);
1112 params = make_tree_vector ();
1113 vec_safe_push (params, call);
1114 vec_safe_push (params, and_expr);
1115 *res = resolved;
1116 return altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1117 params);
1118 }
1119 /* For {un}signed __int128s use the vaddeuqm/vsubeuqm instruction
1120 directly using the standard machinery. */
1121 case E_TImode:
1122 *res = unresolved;
1123 break;
1124
1125 /* Types other than {un}signed int and {un}signed __int128
1126 are errors. */
1127 default:
1128 *res = resolved_bad;
1129 }
1130
1131 return error_mark_node;
1132 }
1133
1134 /* Resolve an overloaded vec_addec or vec_subec call and return a tree
1135 expression for the resolved call if successful. ARGS contains the arguments
1136 to the call. TYPES contains their types. RES must be set to indicate the
1137 status of the resolution attempt. LOC contains statement location
1138 information. */
1139
1140 static tree
1141 resolve_vec_addec_subec (resolution *res, rs6000_gen_builtins fcode,
1142 tree *args, tree *types, location_t loc)
1143 {
1144 /* vec_addec and vec_subec needs to be special cased because there is
1145 no instruction for the (un)signed int version. */
1146
1147 /* All 3 arguments must be vectors of (signed or unsigned) (int or
1148 __int128) and the types must be compatible. */
1149 if (TREE_CODE (types[0]) != VECTOR_TYPE
1150 || !lang_hooks.types_compatible_p (types[0], types[1])
1151 || !lang_hooks.types_compatible_p (types[1], types[2]))
1152 {
1153 *res = resolved_bad;
1154 return error_mark_node;
1155 }
1156
1157 switch (TYPE_MODE (TREE_TYPE (types[0])))
1158 {
1159 /* For {un}signed ints,
1160 vec_addec (va, vb, carryv) ==
1161 vec_or (vec_addc (va, vb),
1162 vec_addc (vec_add (va, vb),
1163 vec_and (carryv, 0x1))). */
1164 case E_SImode:
1165 {
1166 /* Use save_expr to ensure that operands used more than once that may
1167 have side effects (like calls) are only evaluated once. */
1168 args[0] = save_expr (args[0]);
1169 args[1] = save_expr (args[1]);
1170 vec<tree, va_gc> *params = make_tree_vector ();
1171 vec_safe_push (params, args[0]);
1172 vec_safe_push (params, args[1]);
1173
1174 tree as_c_builtin;
1175 if (fcode == RS6000_OVLD_VEC_ADDEC)
1176 as_c_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADDC];
1177 else
1178 as_c_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUBC];
1179
1180 tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1181 params);
1182 params = make_tree_vector ();
1183 vec_safe_push (params, args[0]);
1184 vec_safe_push (params, args[1]);
1185
1186 tree as_builtin;
1187 if (fcode == RS6000_OVLD_VEC_ADDEC)
1188 as_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADD];
1189 else
1190 as_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUB];
1191
1192 tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin,
1193 params);
1194 tree const1 = build_int_cstu (TREE_TYPE (types[0]), 1);
1195 tree ones_vector = build_vector_from_val (types[0], const1);
1196 tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, types[0],
1197 args[2], ones_vector);
1198 params = make_tree_vector ();
1199 vec_safe_push (params, call2);
1200 vec_safe_push (params, and_expr);
1201 call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin, params);
1202 params = make_tree_vector ();
1203 vec_safe_push (params, call1);
1204 vec_safe_push (params, call2);
1205 tree or_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_OR];
1206 *res = resolved;
1207 return altivec_resolve_overloaded_builtin (loc, or_builtin, params);
1208 }
1209 /* For {un}signed __int128s use the vaddecuq/vsubbecuq
1210 instructions. This occurs through normal processing. */
1211 case E_TImode:
1212 *res = unresolved;
1213 break;
1214
1215 /* Types other than {un}signed int and {un}signed __int128
1216 are errors. */
1217 default:
1218 *res = resolved_bad;
1219 }
1220
1221 return error_mark_node;
1222 }
1223
1224 /* Resolve an overloaded vec_splats or vec_promote call and return a tree
1225 expression for the resolved call if successful. NARGS is the number of
1226 arguments to the call. ARGLIST contains the arguments. RES must be set
1227 to indicate the status of the resolution attempt. */
1228
1229 static tree
1230 resolve_vec_splats (resolution *res, rs6000_gen_builtins fcode,
1231 vec<tree, va_gc> *arglist, unsigned nargs)
1232 {
1233 const char *name;
1234 name = fcode == RS6000_OVLD_VEC_SPLATS ? "vec_splats" : "vec_promote";
1235
1236 if (fcode == RS6000_OVLD_VEC_SPLATS && nargs != 1)
1237 {
1238 error ("builtin %qs only accepts 1 argument", name);
1239 *res = resolved;
1240 return error_mark_node;
1241 }
1242
1243 if (fcode == RS6000_OVLD_VEC_PROMOTE && nargs != 2)
1244 {
1245 error ("builtin %qs only accepts 2 arguments", name);
1246 *res = resolved;
1247 return error_mark_node;
1248 }
1249
1250 /* Ignore promote's element argument. */
1251 if (fcode == RS6000_OVLD_VEC_PROMOTE
1252 && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
1253 {
1254 *res = resolved_bad;
1255 return error_mark_node;
1256 }
1257
1258 tree arg = (*arglist)[0];
1259 tree type = TREE_TYPE (arg);
1260
1261 if (!SCALAR_FLOAT_TYPE_P (type) && !INTEGRAL_TYPE_P (type))
1262 {
1263 *res = resolved_bad;
1264 return error_mark_node;
1265 }
1266
1267 bool unsigned_p = TYPE_UNSIGNED (type);
1268 int size;
1269
1270 switch (TYPE_MODE (type))
1271 {
1272 case E_TImode:
1273 type = unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node;
1274 size = 1;
1275 break;
1276 case E_DImode:
1277 type = unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node;
1278 size = 2;
1279 break;
1280 case E_SImode:
1281 type = unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node;
1282 size = 4;
1283 break;
1284 case E_HImode:
1285 type = unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node;
1286 size = 8;
1287 break;
1288 case E_QImode:
1289 type = unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node;
1290 size = 16;
1291 break;
1292 case E_SFmode:
1293 type = V4SF_type_node;
1294 size = 4;
1295 break;
1296 case E_DFmode:
1297 type = V2DF_type_node;
1298 size = 2;
1299 break;
1300 default:
1301 *res = resolved_bad;
1302 return error_mark_node;
1303 }
1304
1305 arg = save_expr (fold_convert (TREE_TYPE (type), arg));
1306 vec<constructor_elt, va_gc> *vec;
1307 vec_alloc (vec, size);
1308
1309 for (int i = 0; i < size; i++)
1310 {
1311 constructor_elt elt = {NULL_TREE, arg};
1312 vec->quick_push (elt);
1313 }
1314
1315 *res = resolved;
1316 return build_constructor (type, vec);
1317 }
1318
1319 /* Resolve an overloaded vec_extract call and return a tree expression for
1320 the resolved call if successful. NARGS is the number of arguments to
1321 the call. ARGLIST contains the arguments. RES must be set to indicate
1322 the status of the resolution attempt. LOC contains statement location
1323 information. */
1324
1325 static tree
1326 resolve_vec_extract (resolution *res, vec<tree, va_gc> *arglist,
1327 unsigned nargs, location_t loc)
1328 {
1329 if (nargs != 2)
1330 {
1331 error ("builtin %qs only accepts 2 arguments", "vec_extract");
1332 *res = resolved;
1333 return error_mark_node;
1334 }
1335
1336 tree arg1 = (*arglist)[0];
1337 tree arg1_type = TREE_TYPE (arg1);
1338 tree arg2 = (*arglist)[1];
1339
1340 if (TREE_CODE (arg1_type) != VECTOR_TYPE
1341 || !INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1342 {
1343 *res = resolved_bad;
1344 return error_mark_node;
1345 }
1346
1347 /* See if we can optimize vec_extract with the current VSX instruction
1348 set. */
1349 machine_mode mode = TYPE_MODE (arg1_type);
1350 tree arg1_inner_type;
1351
1352 if (VECTOR_MEM_VSX_P (mode))
1353 {
1354 tree call = NULL_TREE;
1355 int nunits = GET_MODE_NUNITS (mode);
1356 arg2 = fold_for_warn (arg2);
1357
1358 /* If the second argument is an integer constant, generate
1359 the built-in code if we can. We need 64-bit and direct
1360 move to extract the small integer vectors. */
1361 if (TREE_CODE (arg2) == INTEGER_CST)
1362 {
1363 wide_int selector = wi::to_wide (arg2);
1364 selector = wi::umod_trunc (selector, nunits);
1365 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1366 switch (mode)
1367 {
1368 case E_V1TImode:
1369 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V1TI];
1370 break;
1371
1372 case E_V2DFmode:
1373 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DF];
1374 break;
1375
1376 case E_V2DImode:
1377 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DI];
1378 break;
1379
1380 case E_V4SFmode:
1381 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SF];
1382 break;
1383
1384 case E_V4SImode:
1385 if (TARGET_DIRECT_MOVE_64BIT)
1386 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SI];
1387 break;
1388
1389 case E_V8HImode:
1390 if (TARGET_DIRECT_MOVE_64BIT)
1391 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V8HI];
1392 break;
1393
1394 case E_V16QImode:
1395 if (TARGET_DIRECT_MOVE_64BIT)
1396 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V16QI];
1397 break;
1398
1399 default:
1400 break;
1401 }
1402 }
1403
1404 /* If the second argument is variable, we can optimize it if we are
1405 generating 64-bit code on a machine with direct move. */
1406 else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT)
1407 {
1408 switch (mode)
1409 {
1410 case E_V2DFmode:
1411 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DF];
1412 break;
1413
1414 case E_V2DImode:
1415 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DI];
1416 break;
1417
1418 case E_V4SFmode:
1419 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SF];
1420 break;
1421
1422 case E_V4SImode:
1423 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SI];
1424 break;
1425
1426 case E_V8HImode:
1427 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V8HI];
1428 break;
1429
1430 case E_V16QImode:
1431 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V16QI];
1432 break;
1433
1434 default:
1435 break;
1436 }
1437 }
1438
1439 if (call)
1440 {
1441 tree result = build_call_expr (call, 2, arg1, arg2);
1442 /* Coerce the result to vector element type. May be no-op. */
1443 arg1_inner_type = TREE_TYPE (arg1_type);
1444 result = fold_convert (arg1_inner_type, result);
1445 *res = resolved;
1446 return result;
1447 }
1448 }
1449
1450 /* Build *(((arg1_inner_type*) & (vector type){arg1}) + arg2). */
1451 arg1_inner_type = TREE_TYPE (arg1_type);
1452 tree subp = build_int_cst (TREE_TYPE (arg2),
1453 TYPE_VECTOR_SUBPARTS (arg1_type) - 1);
1454 arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, subp, 0);
1455
1456 tree decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1457 DECL_EXTERNAL (decl) = 0;
1458 TREE_PUBLIC (decl) = 0;
1459 DECL_CONTEXT (decl) = current_function_decl;
1460 TREE_USED (decl) = 1;
1461 TREE_TYPE (decl) = arg1_type;
1462 TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1463
1464 tree stmt;
1465 if (c_dialect_cxx ())
1466 {
1467 stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1, NULL_TREE, NULL_TREE);
1468 SET_EXPR_LOCATION (stmt, loc);
1469 }
1470 else
1471 {
1472 DECL_INITIAL (decl) = arg1;
1473 stmt = build1 (DECL_EXPR, arg1_type, decl);
1474 TREE_ADDRESSABLE (decl) = 1;
1475 SET_EXPR_LOCATION (stmt, loc);
1476 stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1477 }
1478
1479 tree innerptrtype = build_pointer_type (arg1_inner_type);
1480 stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1481 stmt = convert (innerptrtype, stmt);
1482 stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1483 stmt = build_indirect_ref (loc, stmt, RO_NULL);
1484
1485 /* PR83660: We mark this as having side effects so that downstream in
1486 fold_build_cleanup_point_expr () it will get a CLEANUP_POINT_EXPR. If it
1487 does not we can run into an ICE later in gimplify_cleanup_point_expr ().
1488 Potentially this causes missed optimization because there actually is no
1489 side effect. */
1490 if (c_dialect_cxx ())
1491 TREE_SIDE_EFFECTS (stmt) = 1;
1492
1493 *res = resolved;
1494 return stmt;
1495 }
1496
1497 /* Resolve an overloaded vec_insert call and return a tree expression for
1498 the resolved call if successful. NARGS is the number of arguments to
1499 the call. ARGLIST contains the arguments. RES must be set to indicate
1500 the status of the resolution attempt. LOC contains statement location
1501 information. */
1502
1503 static tree
1504 resolve_vec_insert (resolution *res, vec<tree, va_gc> *arglist,
1505 unsigned nargs, location_t loc)
1506 {
1507 if (nargs != 3)
1508 {
1509 error ("builtin %qs only accepts 3 arguments", "vec_insert");
1510 *res = resolved;
1511 return error_mark_node;
1512 }
1513
1514 tree arg0 = (*arglist)[0];
1515 tree arg1 = (*arglist)[1];
1516 tree arg1_type = TREE_TYPE (arg1);
1517 tree arg2 = fold_for_warn ((*arglist)[2]);
1518
1519 if (TREE_CODE (arg1_type) != VECTOR_TYPE
1520 || !INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1521 {
1522 *res = resolved_bad;
1523 return error_mark_node;
1524 }
1525
1526 /* If we can use the VSX xxpermdi instruction, use that for insert. */
1527 machine_mode mode = TYPE_MODE (arg1_type);
1528
1529 if ((mode == V2DFmode || mode == V2DImode)
1530 && VECTOR_UNIT_VSX_P (mode)
1531 && TREE_CODE (arg2) == INTEGER_CST)
1532 {
1533 wide_int selector = wi::to_wide (arg2);
1534 selector = wi::umod_trunc (selector, 2);
1535 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1536
1537 tree call = NULL_TREE;
1538 if (mode == V2DFmode)
1539 call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V2DF];
1540 else if (mode == V2DImode)
1541 call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V2DI];
1542
1543 /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1544 reversed. */
1545 if (call)
1546 {
1547 *res = resolved;
1548 return build_call_expr (call, 3, arg1, arg0, arg2);
1549 }
1550 }
1551
1552 else if (mode == V1TImode
1553 && VECTOR_UNIT_VSX_P (mode)
1554 && TREE_CODE (arg2) == INTEGER_CST)
1555 {
1556 tree call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V1TI];
1557 wide_int selector = wi::zero(32);
1558 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1559
1560 /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1561 reversed. */
1562 *res = resolved;
1563 return build_call_expr (call, 3, arg1, arg0, arg2);
1564 }
1565
1566 /* Build *(((arg1_inner_type*) & (vector type){arg1}) + arg2) = arg0 with
1567 VIEW_CONVERT_EXPR. i.e.:
1568 D.3192 = v1;
1569 _1 = n & 3;
1570 VIEW_CONVERT_EXPR<int[4]>(D.3192)[_1] = i;
1571 v1 = D.3192;
1572 D.3194 = v1; */
1573 if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1)
1574 arg2 = build_int_cst (TREE_TYPE (arg2), 0);
1575 else
1576 {
1577 tree c = build_int_cst (TREE_TYPE (arg2),
1578 TYPE_VECTOR_SUBPARTS (arg1_type) - 1);
1579 arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, c, 0);
1580 }
1581
1582 tree decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1583 DECL_EXTERNAL (decl) = 0;
1584 TREE_PUBLIC (decl) = 0;
1585 DECL_CONTEXT (decl) = current_function_decl;
1586 TREE_USED (decl) = 1;
1587 TREE_TYPE (decl) = arg1_type;
1588 TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1589 TREE_ADDRESSABLE (decl) = 1;
1590
1591 tree stmt;
1592 if (c_dialect_cxx ())
1593 {
1594 stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1, NULL_TREE, NULL_TREE);
1595 SET_EXPR_LOCATION (stmt, loc);
1596 }
1597 else
1598 {
1599 DECL_INITIAL (decl) = arg1;
1600 stmt = build1 (DECL_EXPR, arg1_type, decl);
1601 SET_EXPR_LOCATION (stmt, loc);
1602 stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1603 }
1604
1605 if (TARGET_VSX)
1606 {
1607 stmt = build_array_ref (loc, stmt, arg2);
1608 stmt = fold_build2 (MODIFY_EXPR, TREE_TYPE (arg0), stmt,
1609 convert (TREE_TYPE (stmt), arg0));
1610 stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1611 }
1612 else
1613 {
1614 tree arg1_inner_type = TREE_TYPE (arg1_type);
1615 tree innerptrtype = build_pointer_type (arg1_inner_type);
1616 stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1617 stmt = convert (innerptrtype, stmt);
1618 stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1619 stmt = build_indirect_ref (loc, stmt, RO_NULL);
1620 stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
1621 convert (TREE_TYPE (stmt), arg0));
1622 stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1623 }
1624
1625 *res = resolved;
1626 return stmt;
1627 }
1628
1629 /* Resolve an overloaded vec_step call and return a tree expression for
1630 the resolved call if successful. NARGS is the number of arguments to
1631 the call. ARGLIST contains the arguments. RES must be set to indicate
1632 the status of the resolution attempt. */
1633
1634 static tree
1635 resolve_vec_step (resolution *res, vec<tree, va_gc> *arglist, unsigned nargs)
1636 {
1637 if (nargs != 1)
1638 {
1639 error ("builtin %qs only accepts 1 argument", "vec_step");
1640 *res = resolved;
1641 return error_mark_node;
1642 }
1643
1644 tree arg0 = (*arglist)[0];
1645 tree arg0_type = TREE_TYPE (arg0);
1646
1647 if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1648 {
1649 *res = resolved_bad;
1650 return error_mark_node;
1651 }
1652
1653 *res = resolved;
1654 return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (arg0_type));
1655 }
1656
1657 /* Look for a matching instance in a chain of instances. INSTANCE points to
1658 the chain of instances; INSTANCE_CODE is the code identifying the specific
1659 built-in being searched for; FCODE is the overloaded function code; TYPES
1660 contains an array of two types that must match the types of the instance's
1661 parameters; and ARGS contains an array of two arguments to be passed to
1662 the instance. If found, resolve the built-in and return it, unless the
1663 built-in is not supported in context. In that case, set
1664 UNSUPPORTED_BUILTIN to true. If we don't match, return error_mark_node
1665 and leave UNSUPPORTED_BUILTIN alone. */
1666
1667 static tree
1668 find_instance (bool *unsupported_builtin, int *instance,
1669 rs6000_gen_builtins instance_code,
1670 rs6000_gen_builtins fcode,
1671 tree *types, tree *args)
1672 {
1673 while (*instance != -1
1674 && rs6000_instance_info[*instance].bifid != instance_code)
1675 *instance = rs6000_instance_info[*instance].next;
1676
1677 int inst = *instance;
1678 gcc_assert (inst != -1);
1679 /* It is possible for an instance to require a data type that isn't
1680 defined on this target, in which case rs6000_instance_info_fntype[inst]
1681 will be NULL. */
1682 if (!rs6000_instance_info_fntype[inst])
1683 return error_mark_node;
1684 rs6000_gen_builtins bifid = rs6000_instance_info[inst].bifid;
1685 tree fntype = rs6000_builtin_info_fntype[bifid];
1686 tree parmtype0 = TREE_VALUE (TYPE_ARG_TYPES (fntype));
1687 tree parmtype1 = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (fntype)));
1688
1689 if (rs6000_builtin_type_compatible (types[0], parmtype0)
1690 && rs6000_builtin_type_compatible (types[1], parmtype1))
1691 {
1692 if (rs6000_builtin_decl (bifid, false) != error_mark_node
1693 && rs6000_builtin_is_supported (bifid))
1694 {
1695 tree ret_type = TREE_TYPE (rs6000_instance_info_fntype[inst]);
1696 return altivec_build_resolved_builtin (args, 2, fntype, ret_type,
1697 bifid, fcode);
1698 }
1699 else
1700 *unsupported_builtin = true;
1701 }
1702
1703 return error_mark_node;
1704 }
1705
1706 /* Implementation of the resolve_overloaded_builtin target hook, to
1707 support Altivec's overloaded builtins. */
1708
1709 tree
1710 altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
1711 void *passed_arglist)
1712 {
1713 rs6000_gen_builtins fcode
1714 = (rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
1715
1716 /* Return immediately if this isn't an overload. */
1717 if (fcode <= RS6000_OVLD_NONE)
1718 return NULL_TREE;
1719
1720 if (TARGET_DEBUG_BUILTIN)
1721 fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n",
1722 (int) fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
1723
1724 /* vec_lvsl and vec_lvsr are deprecated for use with LE element order. */
1725 if (fcode == RS6000_OVLD_VEC_LVSL && !BYTES_BIG_ENDIAN)
1726 warning (OPT_Wdeprecated,
1727 "%<vec_lvsl%> is deprecated for little endian; use "
1728 "assignment for unaligned loads and stores");
1729 else if (fcode == RS6000_OVLD_VEC_LVSR && !BYTES_BIG_ENDIAN)
1730 warning (OPT_Wdeprecated,
1731 "%<vec_lvsr%> is deprecated for little endian; use "
1732 "assignment for unaligned loads and stores");
1733
1734 /* Gather the arguments and their types into arrays for easier handling. */
1735 tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
1736 tree types[MAX_OVLD_ARGS];
1737 tree args[MAX_OVLD_ARGS];
1738 unsigned int n;
1739
1740 /* Count the number of expected arguments. */
1741 unsigned expected_args = 0;
1742 for (tree chain = fnargs;
1743 chain && !VOID_TYPE_P (TREE_VALUE (chain));
1744 chain = TREE_CHAIN (chain))
1745 expected_args++;
1746
1747 vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
1748 unsigned int nargs = vec_safe_length (arglist);
1749
1750 /* If the number of arguments did not match the prototype, return NULL
1751 and the generic code will issue the appropriate error message. Skip
1752 this test for functions where we don't fully describe all the possible
1753 overload signatures in rs6000-overload.def (because they aren't relevant
1754 to the expansion here). If we don't, we get confusing error messages. */
1755 /* As an example, for vec_splats we have:
1756
1757 ; There are no actual builtins for vec_splats. There is special handling for
1758 ; this in altivec_resolve_overloaded_builtin in rs6000-c.cc, where the call
1759 ; is replaced by a constructor. The single overload here causes
1760 ; __builtin_vec_splats to be registered with the front end so that can happen.
1761 [VEC_SPLATS, vec_splats, __builtin_vec_splats]
1762 vsi __builtin_vec_splats (vsi);
1763 ABS_V4SI SPLATS_FAKERY
1764
1765 So even though __builtin_vec_splats accepts all vector types, the
1766 infrastructure cheats and just records one prototype. We end up getting
1767 an error message that refers to this specific prototype even when we
1768 are handling a different argument type. That is completely confusing
1769 to the user, so it's best to let these cases be handled individually
1770 in the resolve_vec_splats, etc., helper functions. */
1771
1772 if (expected_args != nargs
1773 && !(fcode == RS6000_OVLD_VEC_PROMOTE
1774 || fcode == RS6000_OVLD_VEC_SPLATS
1775 || fcode == RS6000_OVLD_VEC_EXTRACT
1776 || fcode == RS6000_OVLD_VEC_INSERT
1777 || fcode == RS6000_OVLD_VEC_STEP))
1778 return NULL;
1779
1780 for (n = 0;
1781 !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
1782 fnargs = TREE_CHAIN (fnargs), n++)
1783 {
1784 tree decl_type = TREE_VALUE (fnargs);
1785 tree arg = (*arglist)[n];
1786
1787 if (arg == error_mark_node)
1788 return error_mark_node;
1789
1790 if (n >= MAX_OVLD_ARGS)
1791 abort ();
1792
1793 arg = default_conversion (arg);
1794 tree type = TREE_TYPE (arg);
1795
1796 /* The C++ front-end converts float * to const void * using
1797 NOP_EXPR<const void *> (NOP_EXPR<void *> (x)). */
1798 if (POINTER_TYPE_P (type)
1799 && TREE_CODE (arg) == NOP_EXPR
1800 && lang_hooks.types_compatible_p (TREE_TYPE (arg),
1801 const_ptr_type_node)
1802 && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
1803 ptr_type_node))
1804 {
1805 arg = TREE_OPERAND (arg, 0);
1806 type = TREE_TYPE (arg);
1807 }
1808
1809 /* Remove the const from the pointers to simplify the overload
1810 matching further down. */
1811 if (POINTER_TYPE_P (decl_type)
1812 && POINTER_TYPE_P (type)
1813 && TYPE_QUALS (TREE_TYPE (type)) != 0)
1814 {
1815 if (TYPE_READONLY (TREE_TYPE (type))
1816 && !TYPE_READONLY (TREE_TYPE (decl_type)))
1817 warning (0, "passing argument %d of %qE discards %qs "
1818 "qualifier from pointer target type", n + 1, fndecl,
1819 "const");
1820 type = build_qualified_type (TREE_TYPE (type), 0);
1821 type = build_pointer_type (type);
1822 arg = fold_convert (type, arg);
1823 }
1824
1825 /* For RS6000_OVLD_VEC_LXVL, convert any const * to its non constant
1826 equivalent to simplify the overload matching below. */
1827 if (fcode == RS6000_OVLD_VEC_LXVL
1828 && POINTER_TYPE_P (type)
1829 && TYPE_READONLY (TREE_TYPE (type)))
1830 {
1831 type = build_qualified_type (TREE_TYPE (type), 0);
1832 type = build_pointer_type (type);
1833 arg = fold_convert (type, arg);
1834 }
1835
1836 args[n] = arg;
1837 types[n] = type;
1838 }
1839
1840 /* Some overloads require special handling. */
1841 tree returned_expr = NULL;
1842 resolution res = unresolved;
1843
1844 if (fcode == RS6000_OVLD_VEC_MUL)
1845 returned_expr = resolve_vec_mul (&res, args, types, loc);
1846 else if (fcode == RS6000_OVLD_VEC_CMPNE)
1847 returned_expr = resolve_vec_cmpne (&res, args, types, loc);
1848 else if (fcode == RS6000_OVLD_VEC_ADDE || fcode == RS6000_OVLD_VEC_SUBE)
1849 returned_expr = resolve_vec_adde_sube (&res, fcode, args, types, loc);
1850 else if (fcode == RS6000_OVLD_VEC_ADDEC || fcode == RS6000_OVLD_VEC_SUBEC)
1851 returned_expr = resolve_vec_addec_subec (&res, fcode, args, types, loc);
1852 else if (fcode == RS6000_OVLD_VEC_SPLATS || fcode == RS6000_OVLD_VEC_PROMOTE)
1853 returned_expr = resolve_vec_splats (&res, fcode, arglist, nargs);
1854 else if (fcode == RS6000_OVLD_VEC_EXTRACT)
1855 returned_expr = resolve_vec_extract (&res, arglist, nargs, loc);
1856 else if (fcode == RS6000_OVLD_VEC_INSERT)
1857 returned_expr = resolve_vec_insert (&res, arglist, nargs, loc);
1858 else if (fcode == RS6000_OVLD_VEC_STEP)
1859 returned_expr = resolve_vec_step (&res, arglist, nargs);
1860
1861 if (res == resolved)
1862 return returned_expr;
1863
1864 /* "Regular" built-in functions and overloaded functions share a namespace
1865 for some arrays, like rs6000_builtin_decls. But rs6000_overload_info
1866 only has information for the overloaded functions, so we need an
1867 adjusted index for that. */
1868 unsigned int adj_fcode = fcode - RS6000_OVLD_NONE;
1869
1870 if (res == resolved_bad)
1871 {
1872 const char *name = rs6000_overload_info[adj_fcode].ovld_name;
1873 error ("invalid parameter combination for AltiVec intrinsic %qs", name);
1874 return error_mark_node;
1875 }
1876
1877 bool unsupported_builtin = false;
1878 rs6000_gen_builtins instance_code;
1879 bool supported = false;
1880 int instance = rs6000_overload_info[adj_fcode].first_instance;
1881 gcc_assert (instance != -1);
1882
1883 /* Functions with no arguments can have only one overloaded instance. */
1884 gcc_assert (nargs > 0 || rs6000_instance_info[instance].next == -1);
1885
1886 /* Standard overload processing involves determining whether an instance
1887 exists that is type-compatible with the overloaded function call. In
1888 a couple of cases, we need to do some extra processing to disambiguate
1889 between multiple compatible instances. */
1890 switch (fcode)
1891 {
1892 /* Need to special case __builtin_cmpb because the overloaded forms
1893 of this function take (unsigned int, unsigned int) or (unsigned
1894 long long int, unsigned long long int). Since C conventions
1895 allow the respective argument types to be implicitly coerced into
1896 each other, the default handling does not provide adequate
1897 discrimination between the desired forms of the function. */
1898 case RS6000_OVLD_SCAL_CMPB:
1899 {
1900 machine_mode arg1_mode = TYPE_MODE (types[0]);
1901 machine_mode arg2_mode = TYPE_MODE (types[1]);
1902
1903 /* If any supplied arguments are wider than 32 bits, resolve to
1904 64-bit variant of built-in function. */
1905 if (GET_MODE_PRECISION (arg1_mode) > 32
1906 || GET_MODE_PRECISION (arg2_mode) > 32)
1907 /* Assure all argument and result types are compatible with
1908 the built-in function represented by RS6000_BIF_CMPB. */
1909 instance_code = RS6000_BIF_CMPB;
1910 else
1911 /* Assure all argument and result types are compatible with
1912 the built-in function represented by RS6000_BIF_CMPB_32. */
1913 instance_code = RS6000_BIF_CMPB_32;
1914
1915 tree call = find_instance (&unsupported_builtin, &instance,
1916 instance_code, fcode, types, args);
1917 if (call != error_mark_node)
1918 return call;
1919 break;
1920 }
1921 case RS6000_OVLD_VEC_VSIE:
1922 {
1923 machine_mode arg1_mode = TYPE_MODE (types[0]);
1924
1925 /* If supplied first argument is wider than 64 bits, resolve to
1926 128-bit variant of built-in function. */
1927 if (GET_MODE_PRECISION (arg1_mode) > 64)
1928 {
1929 /* If first argument is of float variety, choose variant
1930 that expects __ieee128 argument. Otherwise, expect
1931 __int128 argument. */
1932 if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1933 instance_code = RS6000_BIF_VSIEQPF;
1934 else
1935 instance_code = RS6000_BIF_VSIEQP;
1936 }
1937 else
1938 {
1939 /* If first argument is of float variety, choose variant
1940 that expects double argument. Otherwise, expect
1941 long long int argument. */
1942 if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1943 instance_code = RS6000_BIF_VSIEDPF;
1944 else
1945 instance_code = RS6000_BIF_VSIEDP;
1946 }
1947
1948 tree call = find_instance (&unsupported_builtin, &instance,
1949 instance_code, fcode, types, args);
1950 if (call != error_mark_node)
1951 return call;
1952 break;
1953 }
1954 default:
1955 /* Standard overload processing. Look for an instance with compatible
1956 parameter types. If it is supported in the current context, resolve
1957 the overloaded call to that instance. */
1958 for (; instance != -1; instance = rs6000_instance_info[instance].next)
1959 {
1960 tree fntype = rs6000_instance_info_fntype[instance];
1961 rs6000_gen_builtins bifid = rs6000_instance_info[instance].bifid;
1962 /* It is possible for an instance to require a data type that isn't
1963 defined on this target, in which case fntype will be
1964 NULL. */
1965 if (!fntype)
1966 continue;
1967
1968 bool mismatch = false;
1969 tree nextparm = TYPE_ARG_TYPES (fntype);
1970
1971 for (unsigned int arg_i = 0;
1972 arg_i < nargs && nextparm != NULL;
1973 arg_i++)
1974 {
1975 tree parmtype = TREE_VALUE (nextparm);
1976 if (!rs6000_builtin_type_compatible (types[arg_i], parmtype))
1977 {
1978 mismatch = true;
1979 break;
1980 }
1981 nextparm = TREE_CHAIN (nextparm);
1982 }
1983
1984 if (mismatch)
1985 continue;
1986
1987 supported = rs6000_builtin_is_supported (bifid);
1988 if (rs6000_builtin_decl (bifid, false) != error_mark_node
1989 && supported)
1990 {
1991 tree ret_type = TREE_TYPE (fntype);
1992 fntype = rs6000_builtin_info_fntype[bifid];
1993 return altivec_build_resolved_builtin (args, nargs, fntype,
1994 ret_type, bifid, fcode);
1995 }
1996 else
1997 {
1998 unsupported_builtin = true;
1999 break;
2000 }
2001 }
2002 }
2003
2004 if (unsupported_builtin)
2005 {
2006 const char *name = rs6000_overload_info[adj_fcode].ovld_name;
2007 if (!supported)
2008 {
2009 /* Indicate that the instantiation of the overloaded builtin
2010 name is not available with the target flags in effect. */
2011 rs6000_gen_builtins bifid = rs6000_instance_info[instance].bifid;
2012 rs6000_gen_builtins fcode = (rs6000_gen_builtins) bifid;
2013 rs6000_invalid_builtin (fcode);
2014 /* Provide clarity of the relationship between the overload
2015 and the instantiation. */
2016 const char *internal_name = rs6000_builtin_info[bifid].bifname;
2017 rich_location richloc (line_table, input_location);
2018 inform (&richloc,
2019 "overloaded builtin %qs is implemented by builtin %qs",
2020 name, internal_name);
2021 }
2022 else
2023 error ("%qs is not supported in this compiler configuration", name);
2024
2025 return error_mark_node;
2026 }
2027
2028 /* If we fall through to here, there were no compatible instances. */
2029 const char *name = rs6000_overload_info[adj_fcode].ovld_name;
2030 error ("invalid parameter combination for AltiVec intrinsic %qs", name);
2031 return error_mark_node;
2032 }
2033