1 1.1 mrg /* Subroutines for the C front end on the PowerPC architecture. 2 1.1 mrg Copyright (C) 2002-2022 Free Software Foundation, Inc. 3 1.1 mrg 4 1.1 mrg Contributed by Zack Weinberg <zack (at) codesourcery.com> 5 1.1 mrg and Paolo Bonzini <bonzini (at) gnu.org> 6 1.1 mrg 7 1.1 mrg This file is part of GCC. 8 1.1 mrg 9 1.1 mrg GCC is free software; you can redistribute it and/or modify it 10 1.1 mrg under the terms of the GNU General Public License as published 11 1.1 mrg by the Free Software Foundation; either version 3, or (at your 12 1.1 mrg option) any later version. 13 1.1 mrg 14 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT 15 1.1 mrg ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 16 1.1 mrg or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 17 1.1 mrg License for more details. 18 1.1 mrg 19 1.1 mrg You should have received a copy of the GNU General Public License 20 1.1 mrg along with GCC; see the file COPYING3. If not see 21 1.1 mrg <http://www.gnu.org/licenses/>. */ 22 1.1 mrg 23 1.1 mrg #define IN_TARGET_CODE 1 24 1.1 mrg 25 1.1 mrg #include "config.h" 26 1.1 mrg #include "system.h" 27 1.1 mrg #include "coretypes.h" 28 1.1 mrg #include "target.h" 29 1.1 mrg #include "c-family/c-common.h" 30 1.1 mrg #include "memmodel.h" 31 1.1 mrg #include "tm_p.h" 32 1.1 mrg #include "stringpool.h" 33 1.1 mrg #include "stor-layout.h" 34 1.1 mrg #include "c-family/c-pragma.h" 35 1.1 mrg #include "langhooks.h" 36 1.1 mrg #include "c/c-tree.h" 37 1.1 mrg 38 1.1 mrg #include "rs6000-internal.h" 39 1.1 mrg 40 1.1 mrg /* Handle the machine specific pragma longcall. Its syntax is 41 1.1 mrg 42 1.1 mrg # pragma longcall ( TOGGLE ) 43 1.1 mrg 44 1.1 mrg where TOGGLE is either 0 or 1. 45 1.1 mrg 46 1.1 mrg rs6000_default_long_calls is set to the value of TOGGLE, changing 47 1.1 mrg whether or not new function declarations receive a longcall 48 1.1 mrg attribute by default. */ 49 1.1 mrg 50 1.1 mrg void 51 1.1 mrg rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED) 52 1.1 mrg { 53 1.1 mrg #define SYNTAX_ERROR(gmsgid) do { \ 54 1.1 mrg warning (OPT_Wpragmas, gmsgid); \ 55 1.1 mrg warning (OPT_Wpragmas, "ignoring malformed %<#pragma longcall%>"); \ 56 1.1 mrg return; \ 57 1.1 mrg } while (0) 58 1.1 mrg 59 1.1 mrg 60 1.1 mrg 61 1.1 mrg tree x, n; 62 1.1 mrg 63 1.1 mrg /* If we get here, generic code has already scanned the directive 64 1.1 mrg leader and the word "longcall". */ 65 1.1 mrg 66 1.1 mrg if (pragma_lex (&x) != CPP_OPEN_PAREN) 67 1.1 mrg SYNTAX_ERROR ("missing open paren"); 68 1.1 mrg if (pragma_lex (&n) != CPP_NUMBER) 69 1.1 mrg SYNTAX_ERROR ("missing number"); 70 1.1 mrg if (pragma_lex (&x) != CPP_CLOSE_PAREN) 71 1.1 mrg SYNTAX_ERROR ("missing close paren"); 72 1.1 mrg 73 1.1 mrg if (n != integer_zero_node && n != integer_one_node) 74 1.1 mrg SYNTAX_ERROR ("number must be 0 or 1"); 75 1.1 mrg 76 1.1 mrg if (pragma_lex (&x) != CPP_EOF) 77 1.1 mrg warning (OPT_Wpragmas, "junk at end of %<#pragma longcall%>"); 78 1.1 mrg 79 1.1 mrg rs6000_default_long_calls = (n == integer_one_node); 80 1.1 mrg } 81 1.1 mrg 82 1.1 mrg /* Handle defining many CPP flags based on TARGET_xxx. As a general 83 1.1 mrg policy, rather than trying to guess what flags a user might want a 84 1.1 mrg #define for, it's better to define a flag for everything. */ 85 1.1 mrg 86 1.1 mrg #define builtin_define(TXT) cpp_define (pfile, TXT) 87 1.1 mrg #define builtin_assert(TXT) cpp_assert (pfile, TXT) 88 1.1 mrg 89 1.1 mrg /* Keep the AltiVec keywords handy for fast comparisons. */ 90 1.1 mrg static GTY(()) tree __vector_keyword; 91 1.1 mrg static GTY(()) tree vector_keyword; 92 1.1 mrg static GTY(()) tree __pixel_keyword; 93 1.1 mrg static GTY(()) tree pixel_keyword; 94 1.1 mrg static GTY(()) tree __bool_keyword; 95 1.1 mrg static GTY(()) tree bool_keyword; 96 1.1 mrg static GTY(()) tree _Bool_keyword; 97 1.1 mrg static GTY(()) tree __int128_type; 98 1.1 mrg static GTY(()) tree __uint128_type; 99 1.1 mrg 100 1.1 mrg /* Preserved across calls. */ 101 1.1 mrg static tree expand_bool_pixel; 102 1.1 mrg 103 1.1 mrg static cpp_hashnode * 104 1.1 mrg altivec_categorize_keyword (const cpp_token *tok) 105 1.1 mrg { 106 1.1 mrg if (tok->type == CPP_NAME) 107 1.1 mrg { 108 1.1 mrg cpp_hashnode *ident = tok->val.node.node; 109 1.1 mrg 110 1.1 mrg if (ident == C_CPP_HASHNODE (vector_keyword)) 111 1.1 mrg return C_CPP_HASHNODE (__vector_keyword); 112 1.1 mrg 113 1.1 mrg if (ident == C_CPP_HASHNODE (pixel_keyword)) 114 1.1 mrg return C_CPP_HASHNODE (__pixel_keyword); 115 1.1 mrg 116 1.1 mrg if (ident == C_CPP_HASHNODE (bool_keyword)) 117 1.1 mrg return C_CPP_HASHNODE (__bool_keyword); 118 1.1 mrg 119 1.1 mrg if (ident == C_CPP_HASHNODE (_Bool_keyword)) 120 1.1 mrg return C_CPP_HASHNODE (__bool_keyword); 121 1.1 mrg 122 1.1 mrg return ident; 123 1.1 mrg } 124 1.1 mrg 125 1.1 mrg return 0; 126 1.1 mrg } 127 1.1 mrg 128 1.1 mrg static void 129 1.1 mrg init_vector_keywords (void) 130 1.1 mrg { 131 1.1 mrg /* Keywords without two leading underscores are context-sensitive, and hence 132 1.1 mrg implemented as conditional macros, controlled by the 133 1.1 mrg rs6000_macro_to_expand() function below. If we have ISA 2.07 64-bit 134 1.1 mrg support, record the __int128_t and __uint128_t types. */ 135 1.1 mrg 136 1.1 mrg __vector_keyword = get_identifier ("__vector"); 137 1.1 mrg C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL; 138 1.1 mrg 139 1.1 mrg __pixel_keyword = get_identifier ("__pixel"); 140 1.1 mrg C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL; 141 1.1 mrg 142 1.1 mrg __bool_keyword = get_identifier ("__bool"); 143 1.1 mrg C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL; 144 1.1 mrg 145 1.1 mrg vector_keyword = get_identifier ("vector"); 146 1.1 mrg C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL; 147 1.1 mrg 148 1.1 mrg pixel_keyword = get_identifier ("pixel"); 149 1.1 mrg C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL; 150 1.1 mrg 151 1.1 mrg bool_keyword = get_identifier ("bool"); 152 1.1 mrg C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL; 153 1.1 mrg 154 1.1 mrg _Bool_keyword = get_identifier ("_Bool"); 155 1.1 mrg C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL; 156 1.1 mrg 157 1.1 mrg if (TARGET_VADDUQM) 158 1.1 mrg { 159 1.1 mrg __int128_type = get_identifier ("__int128_t"); 160 1.1 mrg __uint128_type = get_identifier ("__uint128_t"); 161 1.1 mrg } 162 1.1 mrg } 163 1.1 mrg 164 1.1 mrg /* Helper function to find out which RID_INT_N_* code is the one for 165 1.1 mrg __int128, if any. Returns RID_MAX+1 if none apply, which is safe 166 1.1 mrg (for our purposes, since we always expect to have __int128) to 167 1.1 mrg compare against. */ 168 1.1 mrg static int 169 1.1 mrg rid_int128(void) 170 1.1 mrg { 171 1.1 mrg int i; 172 1.1 mrg 173 1.1 mrg for (i = 0; i < NUM_INT_N_ENTS; i ++) 174 1.1 mrg if (int_n_enabled_p[i] 175 1.1 mrg && int_n_data[i].bitsize == 128) 176 1.1 mrg return RID_INT_N_0 + i; 177 1.1 mrg 178 1.1 mrg return RID_MAX + 1; 179 1.1 mrg } 180 1.1 mrg 181 1.1 mrg /* Called to decide whether a conditional macro should be expanded. 182 1.1 mrg Since we have exactly one such macro (i.e, 'vector'), we do not 183 1.1 mrg need to examine the 'tok' parameter. */ 184 1.1 mrg 185 1.1 mrg static cpp_hashnode * 186 1.1 mrg rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok) 187 1.1 mrg { 188 1.1 mrg cpp_hashnode *expand_this = tok->val.node.node; 189 1.1 mrg cpp_hashnode *ident; 190 1.1 mrg 191 1.1 mrg /* If the current machine does not have altivec, don't look for the 192 1.1 mrg keywords. */ 193 1.1 mrg if (!TARGET_ALTIVEC) 194 1.1 mrg return NULL; 195 1.1 mrg 196 1.1 mrg ident = altivec_categorize_keyword (tok); 197 1.1 mrg 198 1.1 mrg if (ident != expand_this) 199 1.1 mrg expand_this = NULL; 200 1.1 mrg 201 1.1 mrg if (ident == C_CPP_HASHNODE (__vector_keyword)) 202 1.1 mrg { 203 1.1 mrg int idx = 0; 204 1.1 mrg do 205 1.1 mrg tok = cpp_peek_token (pfile, idx++); 206 1.1 mrg while (tok->type == CPP_PADDING); 207 1.1 mrg ident = altivec_categorize_keyword (tok); 208 1.1 mrg 209 1.1 mrg if (ident == C_CPP_HASHNODE (__pixel_keyword)) 210 1.1 mrg { 211 1.1 mrg expand_this = C_CPP_HASHNODE (__vector_keyword); 212 1.1 mrg expand_bool_pixel = __pixel_keyword; 213 1.1 mrg } 214 1.1 mrg else if (ident == C_CPP_HASHNODE (__bool_keyword)) 215 1.1 mrg { 216 1.1 mrg expand_this = C_CPP_HASHNODE (__vector_keyword); 217 1.1 mrg expand_bool_pixel = __bool_keyword; 218 1.1 mrg } 219 1.1 mrg /* The boost libraries have code with Iterator::vector vector in it. If 220 1.1 mrg we allow the normal handling, this module will be called recursively, 221 1.1 mrg and the vector will be skipped.; */ 222 1.1 mrg else if (ident && (ident != C_CPP_HASHNODE (__vector_keyword))) 223 1.1 mrg { 224 1.1 mrg enum rid rid_code = (enum rid)(ident->rid_code); 225 1.1 mrg bool is_macro = cpp_macro_p (ident); 226 1.1 mrg 227 1.1 mrg /* If there is a function-like macro, check if it is going to be 228 1.1 mrg invoked with or without arguments. Without following ( treat 229 1.1 mrg it like non-macro, otherwise the following cpp_get_token eats 230 1.1 mrg what should be preserved. */ 231 1.1 mrg if (is_macro && cpp_fun_like_macro_p (ident)) 232 1.1 mrg { 233 1.1 mrg int idx2 = idx; 234 1.1 mrg do 235 1.1 mrg tok = cpp_peek_token (pfile, idx2++); 236 1.1 mrg while (tok->type == CPP_PADDING); 237 1.1 mrg if (tok->type != CPP_OPEN_PAREN) 238 1.1 mrg is_macro = false; 239 1.1 mrg } 240 1.1 mrg 241 1.1 mrg if (is_macro) 242 1.1 mrg { 243 1.1 mrg do 244 1.1 mrg (void) cpp_get_token (pfile); 245 1.1 mrg while (--idx > 0); 246 1.1 mrg do 247 1.1 mrg tok = cpp_peek_token (pfile, idx++); 248 1.1 mrg while (tok->type == CPP_PADDING); 249 1.1 mrg ident = altivec_categorize_keyword (tok); 250 1.1 mrg if (ident == C_CPP_HASHNODE (__pixel_keyword)) 251 1.1 mrg { 252 1.1 mrg expand_this = C_CPP_HASHNODE (__vector_keyword); 253 1.1 mrg expand_bool_pixel = __pixel_keyword; 254 1.1 mrg rid_code = RID_MAX; 255 1.1 mrg } 256 1.1 mrg else if (ident == C_CPP_HASHNODE (__bool_keyword)) 257 1.1 mrg { 258 1.1 mrg expand_this = C_CPP_HASHNODE (__vector_keyword); 259 1.1 mrg expand_bool_pixel = __bool_keyword; 260 1.1 mrg rid_code = RID_MAX; 261 1.1 mrg } 262 1.1 mrg else if (ident) 263 1.1 mrg rid_code = (enum rid)(ident->rid_code); 264 1.1 mrg } 265 1.1 mrg 266 1.1 mrg if (rid_code == RID_UNSIGNED || rid_code == RID_LONG 267 1.1 mrg || rid_code == RID_SHORT || rid_code == RID_SIGNED 268 1.1 mrg || rid_code == RID_INT || rid_code == RID_CHAR 269 1.1 mrg || rid_code == RID_FLOAT 270 1.1 mrg || (rid_code == RID_DOUBLE && TARGET_VSX) 271 1.1 mrg || (rid_code == rid_int128 () && TARGET_VADDUQM)) 272 1.1 mrg { 273 1.1 mrg expand_this = C_CPP_HASHNODE (__vector_keyword); 274 1.1 mrg /* If the next keyword is bool or pixel, it 275 1.1 mrg will need to be expanded as well. */ 276 1.1 mrg do 277 1.1 mrg tok = cpp_peek_token (pfile, idx++); 278 1.1 mrg while (tok->type == CPP_PADDING); 279 1.1 mrg ident = altivec_categorize_keyword (tok); 280 1.1 mrg 281 1.1 mrg if (ident == C_CPP_HASHNODE (__pixel_keyword)) 282 1.1 mrg expand_bool_pixel = __pixel_keyword; 283 1.1 mrg else if (ident == C_CPP_HASHNODE (__bool_keyword)) 284 1.1 mrg expand_bool_pixel = __bool_keyword; 285 1.1 mrg else 286 1.1 mrg { 287 1.1 mrg /* Try two tokens down, too. */ 288 1.1 mrg do 289 1.1 mrg tok = cpp_peek_token (pfile, idx++); 290 1.1 mrg while (tok->type == CPP_PADDING); 291 1.1 mrg ident = altivec_categorize_keyword (tok); 292 1.1 mrg if (ident == C_CPP_HASHNODE (__pixel_keyword)) 293 1.1 mrg expand_bool_pixel = __pixel_keyword; 294 1.1 mrg else if (ident == C_CPP_HASHNODE (__bool_keyword)) 295 1.1 mrg expand_bool_pixel = __bool_keyword; 296 1.1 mrg } 297 1.1 mrg } 298 1.1 mrg 299 1.1 mrg /* Support vector __int128_t, but we don't need to worry about bool 300 1.1 mrg or pixel on this type. */ 301 1.1 mrg else if (TARGET_VADDUQM 302 1.1 mrg && (ident == C_CPP_HASHNODE (__int128_type) 303 1.1 mrg || ident == C_CPP_HASHNODE (__uint128_type))) 304 1.1 mrg expand_this = C_CPP_HASHNODE (__vector_keyword); 305 1.1 mrg } 306 1.1 mrg } 307 1.1 mrg else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword)) 308 1.1 mrg { 309 1.1 mrg expand_this = C_CPP_HASHNODE (__pixel_keyword); 310 1.1 mrg expand_bool_pixel = 0; 311 1.1 mrg } 312 1.1 mrg else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword)) 313 1.1 mrg { 314 1.1 mrg expand_this = C_CPP_HASHNODE (__bool_keyword); 315 1.1 mrg expand_bool_pixel = 0; 316 1.1 mrg } 317 1.1 mrg 318 1.1 mrg return expand_this; 319 1.1 mrg } 320 1.1 mrg 321 1.1 mrg 322 1.1 mrg /* Define or undefine a single macro. */ 323 1.1 mrg 324 1.1 mrg static void 325 1.1 mrg rs6000_define_or_undefine_macro (bool define_p, const char *name) 326 1.1 mrg { 327 1.1 mrg if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET) 328 1.1 mrg fprintf (stderr, "#%s %s\n", (define_p) ? "define" : "undef", name); 329 1.1 mrg 330 1.1 mrg if (define_p) 331 1.1 mrg cpp_define (parse_in, name); 332 1.1 mrg else 333 1.1 mrg cpp_undef (parse_in, name); 334 1.1 mrg } 335 1.1 mrg 336 1.1 mrg /* Define or undefine macros based on the current target. If the user does 337 1.1 mrg #pragma GCC target, we need to adjust the macros dynamically. Note, some of 338 1.1 mrg the options needed for builtins have been moved to separate variables, so 339 1.1 mrg have both the target flags and the builtin flags as arguments. */ 340 1.1 mrg 341 1.1 mrg void 342 1.1 mrg rs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags, 343 1.1 mrg HOST_WIDE_INT bu_mask) 344 1.1 mrg { 345 1.1 mrg if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET) 346 1.1 mrg fprintf (stderr, 347 1.1 mrg "rs6000_target_modify_macros (%s, " HOST_WIDE_INT_PRINT_HEX 348 1.1 mrg ", " HOST_WIDE_INT_PRINT_HEX ")\n", 349 1.1 mrg (define_p) ? "define" : "undef", 350 1.1 mrg flags, bu_mask); 351 1.1 mrg 352 1.1 mrg /* Each of the flags mentioned below controls whether certain 353 1.1 mrg preprocessor macros will be automatically defined when 354 1.1 mrg preprocessing source files for compilation by this compiler. 355 1.1 mrg While most of these flags can be enabled or disabled 356 1.1 mrg explicitly by specifying certain command-line options when 357 1.1 mrg invoking the compiler, there are also many ways in which these 358 1.1 mrg flags are enabled or disabled implicitly, based on compiler 359 1.1 mrg defaults, configuration choices, and on the presence of certain 360 1.1 mrg related command-line options. Many, but not all, of these 361 1.1 mrg implicit behaviors can be found in file "rs6000.cc", the 362 1.1 mrg rs6000_option_override_internal() function. 363 1.1 mrg 364 1.1 mrg In general, each of the flags may be automatically enabled in 365 1.1 mrg any of the following conditions: 366 1.1 mrg 367 1.1 mrg 1. If no -mcpu target is specified on the command line and no 368 1.1 mrg --with-cpu target is specified to the configure command line 369 1.1 mrg and the TARGET_DEFAULT macro for this default cpu host 370 1.1 mrg includes the flag, and the flag has not been explicitly disabled 371 1.1 mrg by command-line options. 372 1.1 mrg 373 1.1 mrg 2. If the target specified with -mcpu=target on the command line, or 374 1.1 mrg in the absence of a -mcpu=target command-line option, if the 375 1.1 mrg target specified using --with-cpu=target on the configure 376 1.1 mrg command line, is disqualified because the associated binary 377 1.1 mrg tools (e.g. the assembler) lack support for the requested cpu, 378 1.1 mrg and the TARGET_DEFAULT macro for this default cpu host 379 1.1 mrg includes the flag, and the flag has not been explicitly disabled 380 1.1 mrg by command-line options. 381 1.1 mrg 382 1.1 mrg 3. If either of the above two conditions apply except that the 383 1.1 mrg TARGET_DEFAULT macro is defined to equal zero, and 384 1.1 mrg TARGET_POWERPC64 and 385 1.1 mrg a) BYTES_BIG_ENDIAN and the flag to be enabled is either 386 1.1 mrg MASK_PPC_GFXOPT or MASK_POWERPC64 (flags for "powerpc64" 387 1.1 mrg target), or 388 1.1 mrg b) !BYTES_BIG_ENDIAN and the flag to be enabled is either 389 1.1 mrg MASK_POWERPC64 or it is one of the flags included in 390 1.1 mrg ISA_2_7_MASKS_SERVER (flags for "powerpc64le" target). 391 1.1 mrg 392 1.1 mrg 4. If a cpu has been requested with a -mcpu=target command-line option 393 1.1 mrg and this cpu has not been disqualified due to shortcomings of the 394 1.1 mrg binary tools, and the set of flags associated with the requested cpu 395 1.1 mrg include the flag to be enabled. See rs6000-cpus.def for macro 396 1.1 mrg definitions that represent various ABI standards 397 1.1 mrg (e.g. ISA_2_1_MASKS, ISA_3_0_MASKS_SERVER) and for a list of 398 1.1 mrg the specific flags that are associated with each of the cpu 399 1.1 mrg choices that can be specified as the target of a -mcpu=target 400 1.1 mrg compile option, or as the target of a --with-cpu=target 401 1.1 mrg configure option. Target flags that are specified in either 402 1.1 mrg of these two ways are considered "implicit" since the flags 403 1.1 mrg are not mentioned specifically by name. 404 1.1 mrg 405 1.1 mrg Additional documentation describing behavior specific to 406 1.1 mrg particular flags is provided below, immediately preceding the 407 1.1 mrg use of each relevant flag. 408 1.1 mrg 409 1.1 mrg 5. If there is no -mcpu=target command-line option, and the cpu 410 1.1 mrg requested by a --with-cpu=target command-line option has not 411 1.1 mrg been disqualified due to shortcomings of the binary tools, and 412 1.1 mrg the set of flags associated with the specified target include 413 1.1 mrg the flag to be enabled. See the notes immediately above for a 414 1.1 mrg summary of the flags associated with particular cpu 415 1.1 mrg definitions. */ 416 1.1 mrg 417 1.1 mrg /* rs6000_isa_flags based options. */ 418 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC"); 419 1.1 mrg if ((flags & OPTION_MASK_PPC_GPOPT) != 0) 420 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCSQ"); 421 1.1 mrg if ((flags & OPTION_MASK_PPC_GFXOPT) != 0) 422 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCGR"); 423 1.1 mrg if ((flags & OPTION_MASK_POWERPC64) != 0) 424 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC64"); 425 1.1 mrg if ((flags & OPTION_MASK_MFCRF) != 0) 426 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR4"); 427 1.1 mrg if ((flags & OPTION_MASK_POPCNTB) != 0) 428 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5"); 429 1.1 mrg if ((flags & OPTION_MASK_FPRND) != 0) 430 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5X"); 431 1.1 mrg if ((flags & OPTION_MASK_CMPB) != 0) 432 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR6"); 433 1.1 mrg if ((flags & OPTION_MASK_POPCNTD) != 0) 434 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR7"); 435 1.1 mrg if ((flags & OPTION_MASK_POWER8) != 0) 436 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR8"); 437 1.1 mrg if ((flags & OPTION_MASK_MODULO) != 0) 438 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR9"); 439 1.1 mrg if ((flags & OPTION_MASK_POWER10) != 0) 440 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR10"); 441 1.1 mrg if ((flags & OPTION_MASK_SOFT_FLOAT) != 0) 442 1.1 mrg rs6000_define_or_undefine_macro (define_p, "_SOFT_FLOAT"); 443 1.1 mrg if ((flags & OPTION_MASK_RECIP_PRECISION) != 0) 444 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__"); 445 1.1 mrg /* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on 446 1.1 mrg in any of the following conditions: 447 1.1 mrg 1. The operating system is Darwin and it is configured for 64 448 1.1 mrg bit. (See darwin_rs6000_override_options.) 449 1.1 mrg 2. The operating system is Darwin and the operating system 450 1.1 mrg version is 10.5 or higher and the user has not explicitly 451 1.1 mrg disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and 452 1.1 mrg the compiler is not producing code for integration within the 453 1.1 mrg kernel. (See darwin_rs6000_override_options.) 454 1.1 mrg Note that the OPTION_MASK_ALTIVEC flag is automatically turned 455 1.1 mrg off in any of the following conditions: 456 1.1 mrg 1. The operating system does not support saving of AltiVec 457 1.1 mrg registers (OS_MISSING_ALTIVEC). 458 1.1 mrg 2. If an inner context (as introduced by 459 1.1 mrg __attribute__((__target__())) or #pragma GCC target() 460 1.1 mrg requests a target that normally enables the 461 1.1 mrg OPTION_MASK_ALTIVEC flag but the outer-most "main target" 462 1.1 mrg does not support the rs6000_altivec_abi, this flag is 463 1.1 mrg turned off for the inner context unless OPTION_MASK_ALTIVEC 464 1.1 mrg was explicitly enabled for the inner context. */ 465 1.1 mrg if ((flags & OPTION_MASK_ALTIVEC) != 0) 466 1.1 mrg { 467 1.1 mrg const char *vec_str = (define_p) ? "__VEC__=10206" : "__VEC__"; 468 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__ALTIVEC__"); 469 1.1 mrg rs6000_define_or_undefine_macro (define_p, vec_str); 470 1.1 mrg 471 1.1 mrg /* Define this when supporting context-sensitive keywords. */ 472 1.1 mrg if (!flag_iso) 473 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__APPLE_ALTIVEC__"); 474 1.1 mrg if (rs6000_aix_extabi) 475 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__EXTABI__"); 476 1.1 mrg } 477 1.1 mrg /* Note that the OPTION_MASK_VSX flag is automatically turned on in 478 1.1 mrg the following conditions: 479 1.1 mrg 1. TARGET_P8_VECTOR is explicitly turned on and the OPTION_MASK_VSX 480 1.1 mrg was not explicitly turned off. Hereafter, the OPTION_MASK_VSX 481 1.1 mrg flag is considered to have been explicitly turned on. 482 1.1 mrg Note that the OPTION_MASK_VSX flag is automatically turned off in 483 1.1 mrg the following conditions: 484 1.1 mrg 1. The operating system does not support saving of AltiVec 485 1.1 mrg registers (OS_MISSING_ALTIVEC). 486 1.1 mrg 2. If the option TARGET_HARD_FLOAT is turned off. Hereafter, the 487 1.1 mrg OPTION_MASK_VSX flag is considered to have been turned off 488 1.1 mrg explicitly. 489 1.1 mrg 3. If TARGET_AVOID_XFORM is turned on explicitly at the outermost 490 1.1 mrg compilation context, or if it is turned on by any means in an 491 1.1 mrg inner compilation context. Hereafter, the OPTION_MASK_VSX 492 1.1 mrg flag is considered to have been turned off explicitly. 493 1.1 mrg 4. If TARGET_ALTIVEC was explicitly disabled. Hereafter, the 494 1.1 mrg OPTION_MASK_VSX flag is considered to have been turned off 495 1.1 mrg explicitly. 496 1.1 mrg 5. If an inner context (as introduced by 497 1.1 mrg __attribute__((__target__())) or #pragma GCC target() 498 1.1 mrg requests a target that normally enables the 499 1.1 mrg OPTION_MASK_VSX flag but the outer-most "main target" 500 1.1 mrg does not support the rs6000_altivec_abi, this flag is 501 1.1 mrg turned off for the inner context unless OPTION_MASK_VSX 502 1.1 mrg was explicitly enabled for the inner context. */ 503 1.1 mrg if ((flags & OPTION_MASK_VSX) != 0) 504 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__VSX__"); 505 1.1 mrg if ((flags & OPTION_MASK_HTM) != 0) 506 1.1 mrg { 507 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__HTM__"); 508 1.1 mrg /* Tell the user that our HTM insn patterns act as memory barriers. */ 509 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__"); 510 1.1 mrg } 511 1.1 mrg /* Note that the OPTION_MASK_P8_VECTOR flag is automatically turned 512 1.1 mrg on in the following conditions: 513 1.1 mrg 1. TARGET_P9_VECTOR is explicitly turned on and 514 1.1 mrg OPTION_MASK_P8_VECTOR is not explicitly turned off. 515 1.1 mrg Hereafter, the OPTION_MASK_P8_VECTOR flag is considered to 516 1.1 mrg have been turned off explicitly. 517 1.1 mrg Note that the OPTION_MASK_P8_VECTOR flag is automatically turned 518 1.1 mrg off in the following conditions: 519 1.1 mrg 1. If any of TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX 520 1.1 mrg were turned off explicitly and OPTION_MASK_P8_VECTOR flag was 521 1.1 mrg not turned on explicitly. 522 1.1 mrg 2. If TARGET_ALTIVEC is turned off. Hereafter, the 523 1.1 mrg OPTION_MASK_P8_VECTOR flag is considered to have been turned off 524 1.1 mrg explicitly. 525 1.1 mrg 3. If TARGET_VSX is turned off and OPTION_MASK_P8_VECTOR was not 526 1.1 mrg explicitly enabled. If TARGET_VSX is explicitly enabled, the 527 1.1 mrg OPTION_MASK_P8_VECTOR flag is hereafter also considered to 528 1.1 mrg have been turned off explicitly. */ 529 1.1 mrg if ((flags & OPTION_MASK_P8_VECTOR) != 0) 530 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__"); 531 1.1 mrg /* Note that the OPTION_MASK_P9_VECTOR flag is automatically turned 532 1.1 mrg off in the following conditions: 533 1.1 mrg 1. If TARGET_P8_VECTOR is turned off and OPTION_MASK_P9_VECTOR is 534 1.1 mrg not turned on explicitly. Hereafter, if OPTION_MASK_P8_VECTOR 535 1.1 mrg was turned on explicitly, the OPTION_MASK_P9_VECTOR flag is 536 1.1 mrg also considered to have been turned off explicitly. 537 1.1 mrg Note that the OPTION_MASK_P9_VECTOR is automatically turned on 538 1.1 mrg in the following conditions: 539 1.1 mrg 1. If TARGET_P9_MINMAX was turned on explicitly. 540 1.1 mrg Hereafter, THE OPTION_MASK_P9_VECTOR flag is considered to 541 1.1 mrg have been turned on explicitly. */ 542 1.1 mrg if ((flags & OPTION_MASK_P9_VECTOR) != 0) 543 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__POWER9_VECTOR__"); 544 1.1 mrg /* Note that the OPTION_MASK_QUAD_MEMORY flag is automatically 545 1.1 mrg turned off in the following conditions: 546 1.1 mrg 1. If TARGET_POWERPC64 is turned off. 547 1.1 mrg 2. If WORDS_BIG_ENDIAN is false (non-atomic quad memory 548 1.1 mrg load/store are disabled on little endian). */ 549 1.1 mrg if ((flags & OPTION_MASK_QUAD_MEMORY) != 0) 550 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY__"); 551 1.1 mrg /* Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is automatically 552 1.1 mrg turned off in the following conditions: 553 1.1 mrg 1. If TARGET_POWERPC64 is turned off. 554 1.1 mrg Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is 555 1.1 mrg automatically turned on in the following conditions: 556 1.1 mrg 1. If TARGET_QUAD_MEMORY and this flag was not explicitly 557 1.1 mrg disabled. */ 558 1.1 mrg if ((flags & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0) 559 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY_ATOMIC__"); 560 1.1 mrg /* Note that the OPTION_MASK_CRYPTO flag is automatically turned off 561 1.1 mrg in the following conditions: 562 1.1 mrg 1. If any of TARGET_HARD_FLOAT or TARGET_ALTIVEC or TARGET_VSX 563 1.1 mrg are turned off explicitly and OPTION_MASK_CRYPTO is not turned 564 1.1 mrg on explicitly. 565 1.1 mrg 2. If TARGET_ALTIVEC is turned off. */ 566 1.1 mrg if ((flags & OPTION_MASK_CRYPTO) != 0) 567 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__CRYPTO__"); 568 1.1 mrg if ((flags & OPTION_MASK_FLOAT128_KEYWORD) != 0) 569 1.1 mrg { 570 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__FLOAT128__"); 571 1.1 mrg if (define_p) 572 1.1 mrg rs6000_define_or_undefine_macro (true, "__float128=__ieee128"); 573 1.1 mrg else 574 1.1 mrg rs6000_define_or_undefine_macro (false, "__float128"); 575 1.1 mrg if (ieee128_float_type_node && define_p) 576 1.1 mrg rs6000_define_or_undefine_macro (true, "__SIZEOF_FLOAT128__=16"); 577 1.1 mrg else 578 1.1 mrg rs6000_define_or_undefine_macro (false, "__SIZEOF_FLOAT128__"); 579 1.1 mrg } 580 1.1 mrg /* OPTION_MASK_FLOAT128_HARDWARE can be turned on if -mcpu=power9 is used or 581 1.1 mrg via the target attribute/pragma. */ 582 1.1 mrg if ((flags & OPTION_MASK_FLOAT128_HW) != 0) 583 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__FLOAT128_HARDWARE__"); 584 1.1 mrg 585 1.1 mrg /* options from the builtin masks. */ 586 1.1 mrg /* Note that RS6000_BTM_CELL is enabled only if (rs6000_cpu == 587 1.1 mrg PROCESSOR_CELL) (e.g. -mcpu=cell). */ 588 1.1 mrg if ((bu_mask & RS6000_BTM_CELL) != 0) 589 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__PPU__"); 590 1.1 mrg 591 1.1 mrg /* Tell the user if we support the MMA instructions. */ 592 1.1 mrg if ((flags & OPTION_MASK_MMA) != 0) 593 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__MMA__"); 594 1.1 mrg /* Whether pc-relative code is being generated. */ 595 1.1 mrg if ((flags & OPTION_MASK_PCREL) != 0) 596 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__PCREL__"); 597 1.1 mrg /* Tell the user -mrop-protect is in play. */ 598 1.1 mrg if (rs6000_rop_protect) 599 1.1 mrg rs6000_define_or_undefine_macro (define_p, "__ROP_PROTECT__"); 600 1.1 mrg } 601 1.1 mrg 602 1.1 mrg void 603 1.1 mrg rs6000_cpu_cpp_builtins (cpp_reader *pfile) 604 1.1 mrg { 605 1.1 mrg /* Define all of the common macros. */ 606 1.1 mrg rs6000_target_modify_macros (true, rs6000_isa_flags, 607 1.1 mrg rs6000_builtin_mask_calculate ()); 608 1.1 mrg 609 1.1 mrg if (TARGET_FRE) 610 1.1 mrg builtin_define ("__RECIP__"); 611 1.1 mrg if (TARGET_FRES) 612 1.1 mrg builtin_define ("__RECIPF__"); 613 1.1 mrg if (TARGET_FRSQRTE) 614 1.1 mrg builtin_define ("__RSQRTE__"); 615 1.1 mrg if (TARGET_FRSQRTES) 616 1.1 mrg builtin_define ("__RSQRTEF__"); 617 1.1 mrg if (TARGET_FLOAT128_TYPE) 618 1.1 mrg builtin_define ("__FLOAT128_TYPE__"); 619 1.1 mrg if (ibm128_float_type_node) 620 1.1 mrg builtin_define ("__SIZEOF_IBM128__=16"); 621 1.1 mrg if (ieee128_float_type_node) 622 1.1 mrg builtin_define ("__SIZEOF_IEEE128__=16"); 623 1.1 mrg #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB 624 1.1 mrg builtin_define ("__BUILTIN_CPU_SUPPORTS__"); 625 1.1 mrg #endif 626 1.1 mrg 627 1.1 mrg if (TARGET_EXTRA_BUILTINS && cpp_get_options (pfile)->lang != CLK_ASM) 628 1.1 mrg { 629 1.1 mrg /* Define the AltiVec syntactic elements. */ 630 1.1 mrg builtin_define ("__vector=__attribute__((altivec(vector__)))"); 631 1.1 mrg builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short"); 632 1.1 mrg builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned"); 633 1.1 mrg 634 1.1 mrg if (!flag_iso) 635 1.1 mrg { 636 1.1 mrg builtin_define ("vector=vector"); 637 1.1 mrg builtin_define ("pixel=pixel"); 638 1.1 mrg builtin_define ("bool=bool"); 639 1.1 mrg builtin_define ("_Bool=_Bool"); 640 1.1 mrg init_vector_keywords (); 641 1.1 mrg 642 1.1 mrg /* Enable context-sensitive macros. */ 643 1.1 mrg cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand; 644 1.1 mrg } 645 1.1 mrg } 646 1.1 mrg if (!TARGET_HARD_FLOAT) 647 1.1 mrg builtin_define ("_SOFT_DOUBLE"); 648 1.1 mrg /* Used by lwarx/stwcx. errata work-around. */ 649 1.1 mrg if (rs6000_cpu == PROCESSOR_PPC405) 650 1.1 mrg builtin_define ("__PPC405__"); 651 1.1 mrg /* Used by libstdc++. */ 652 1.1 mrg if (TARGET_NO_LWSYNC) 653 1.1 mrg builtin_define ("__NO_LWSYNC__"); 654 1.1 mrg 655 1.1 mrg if (TARGET_EXTRA_BUILTINS) 656 1.1 mrg { 657 1.1 mrg /* For the VSX builtin functions identical to Altivec functions, just map 658 1.1 mrg the altivec builtin into the vsx version (the altivec functions 659 1.1 mrg generate VSX code if -mvsx). */ 660 1.1 mrg builtin_define ("__builtin_vsx_xxland=__builtin_vec_and"); 661 1.1 mrg builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc"); 662 1.1 mrg builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor"); 663 1.1 mrg builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or"); 664 1.1 mrg builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor"); 665 1.1 mrg builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel"); 666 1.1 mrg builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm"); 667 1.1 mrg 668 1.1 mrg /* Also map the a and m versions of the multiply/add instructions to the 669 1.1 mrg builtin for people blindly going off the instruction manual. */ 670 1.1 mrg builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp"); 671 1.1 mrg builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp"); 672 1.1 mrg builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp"); 673 1.1 mrg builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp"); 674 1.1 mrg builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp"); 675 1.1 mrg builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp"); 676 1.1 mrg builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp"); 677 1.1 mrg builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp"); 678 1.1 mrg builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp"); 679 1.1 mrg builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp"); 680 1.1 mrg builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp"); 681 1.1 mrg builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp"); 682 1.1 mrg builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp"); 683 1.1 mrg builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp"); 684 1.1 mrg builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp"); 685 1.1 mrg builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp"); 686 1.1 mrg } 687 1.1 mrg 688 1.1 mrg /* Map the old _Float128 'q' builtins into the new 'f128' builtins. */ 689 1.1 mrg if (TARGET_FLOAT128_TYPE) 690 1.1 mrg { 691 1.1 mrg builtin_define ("__builtin_fabsq=__builtin_fabsf128"); 692 1.1 mrg builtin_define ("__builtin_copysignq=__builtin_copysignf128"); 693 1.1 mrg builtin_define ("__builtin_nanq=__builtin_nanf128"); 694 1.1 mrg builtin_define ("__builtin_nansq=__builtin_nansf128"); 695 1.1 mrg builtin_define ("__builtin_infq=__builtin_inff128"); 696 1.1 mrg builtin_define ("__builtin_huge_valq=__builtin_huge_valf128"); 697 1.1 mrg } 698 1.1 mrg 699 1.1 mrg /* Tell users they can use __builtin_bswap{16,64}. */ 700 1.1 mrg builtin_define ("__HAVE_BSWAP__"); 701 1.1 mrg 702 1.1 mrg /* May be overridden by target configuration. */ 703 1.1 mrg RS6000_CPU_CPP_ENDIAN_BUILTINS(); 704 1.1 mrg 705 1.1 mrg if (TARGET_LONG_DOUBLE_128) 706 1.1 mrg { 707 1.1 mrg builtin_define ("__LONG_DOUBLE_128__"); 708 1.1 mrg builtin_define ("__LONGDOUBLE128"); 709 1.1 mrg 710 1.1 mrg if (TARGET_IEEEQUAD) 711 1.1 mrg { 712 1.1 mrg /* Older versions of GLIBC used __attribute__((__KC__)) to create the 713 1.1 mrg IEEE 128-bit floating point complex type for C++ (which does not 714 1.1 mrg support _Float128 _Complex). If the default for long double is 715 1.1 mrg IEEE 128-bit mode, the library would need to use 716 1.1 mrg __attribute__((__TC__)) instead. Defining __KF__ and __KC__ 717 1.1 mrg is a stop-gap to build with the older libraries, until we 718 1.1 mrg get an updated library. */ 719 1.1 mrg builtin_define ("__LONG_DOUBLE_IEEE128__"); 720 1.1 mrg builtin_define ("__KF__=__TF__"); 721 1.1 mrg builtin_define ("__KC__=__TC__"); 722 1.1 mrg } 723 1.1 mrg else 724 1.1 mrg builtin_define ("__LONG_DOUBLE_IBM128__"); 725 1.1 mrg } 726 1.1 mrg 727 1.1 mrg switch (TARGET_CMODEL) 728 1.1 mrg { 729 1.1 mrg /* Deliberately omit __CMODEL_SMALL__ since that was the default 730 1.1 mrg before --mcmodel support was added. */ 731 1.1 mrg case CMODEL_MEDIUM: 732 1.1 mrg builtin_define ("__CMODEL_MEDIUM__"); 733 1.1 mrg break; 734 1.1 mrg case CMODEL_LARGE: 735 1.1 mrg builtin_define ("__CMODEL_LARGE__"); 736 1.1 mrg break; 737 1.1 mrg default: 738 1.1 mrg break; 739 1.1 mrg } 740 1.1 mrg 741 1.1 mrg switch (rs6000_current_abi) 742 1.1 mrg { 743 1.1 mrg case ABI_V4: 744 1.1 mrg builtin_define ("_CALL_SYSV"); 745 1.1 mrg break; 746 1.1 mrg case ABI_AIX: 747 1.1 mrg builtin_define ("_CALL_AIXDESC"); 748 1.1 mrg builtin_define ("_CALL_AIX"); 749 1.1 mrg builtin_define ("_CALL_ELF=1"); 750 1.1 mrg break; 751 1.1 mrg case ABI_ELFv2: 752 1.1 mrg builtin_define ("_CALL_ELF=2"); 753 1.1 mrg break; 754 1.1 mrg case ABI_DARWIN: 755 1.1 mrg builtin_define ("_CALL_DARWIN"); 756 1.1 mrg break; 757 1.1 mrg default: 758 1.1 mrg break; 759 1.1 mrg } 760 1.1 mrg 761 1.1 mrg /* Vector element order. */ 762 1.1 mrg if (BYTES_BIG_ENDIAN) 763 1.1 mrg builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__"); 764 1.1 mrg else 765 1.1 mrg builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__"); 766 1.1 mrg 767 1.1 mrg /* Let the compiled code know if 'f' class registers will not be available. */ 768 1.1 mrg if (TARGET_SOFT_FLOAT) 769 1.1 mrg builtin_define ("__NO_FPRS__"); 770 1.1 mrg 771 1.1 mrg /* Whether aggregates passed by value are aligned to a 16 byte boundary 772 1.1 mrg if their alignment is 16 bytes or larger. */ 773 1.1 mrg if ((TARGET_MACHO && rs6000_darwin64_abi) 774 1.1 mrg || DEFAULT_ABI == ABI_ELFv2 775 1.1 mrg || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)) 776 1.1 mrg builtin_define ("__STRUCT_PARM_ALIGN__=16"); 777 1.1 mrg } 778 1.1 mrg 779 1.1 mrg 780 1.1 mrg 782 1.1 mrg /* Convert a type stored into a struct altivec_builtin_types as ID, 783 1.1 mrg into a tree. The types are in rs6000_builtin_types: negative values 784 1.1 mrg create a pointer type for the type associated to ~ID. Note it is 785 1.1 mrg a logical NOT, rather than a negation, otherwise you cannot represent 786 1.1 mrg a pointer type for ID 0. */ 787 1.1 mrg 788 1.1 mrg static inline tree 789 1.1 mrg rs6000_builtin_type (int id) 790 1.1 mrg { 791 1.1 mrg tree t; 792 1.1 mrg t = rs6000_builtin_types[id < 0 ? ~id : id]; 793 1.1 mrg return id < 0 ? build_pointer_type (t) : t; 794 1.1 mrg } 795 1.1 mrg 796 1.1 mrg /* Check whether the type of an argument, T, is compatible with a type ID 797 1.1 mrg stored into a struct altivec_builtin_types. Integer types are considered 798 1.1 mrg compatible; otherwise, the language hook lang_hooks.types_compatible_p makes 799 1.1 mrg the decision. Also allow long double and _Float128 to be compatible if 800 1.1 mrg -mabi=ieeelongdouble. */ 801 1.1 mrg 802 1.1 mrg static inline bool 803 1.1 mrg is_float128_p (tree t) 804 1.1 mrg { 805 1.1 mrg return (t == float128_type_node 806 1.1 mrg || (TARGET_IEEEQUAD 807 1.1 mrg && TARGET_LONG_DOUBLE_128 808 1.1 mrg && t == long_double_type_node)); 809 1.1 mrg } 810 1.1 mrg 811 1.1 mrg 812 1.1 mrg /* Return true iff ARGTYPE can be compatibly passed as PARMTYPE. */ 813 1.1 mrg static bool 814 1.1 mrg rs6000_builtin_type_compatible (tree parmtype, tree argtype) 815 1.1 mrg { 816 1.1 mrg if (parmtype == error_mark_node) 817 1.1 mrg return false; 818 1.1 mrg 819 1.1 mrg if (INTEGRAL_TYPE_P (parmtype) && INTEGRAL_TYPE_P (argtype)) 820 1.1 mrg return true; 821 1.1 mrg 822 1.1 mrg if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128 823 1.1 mrg && is_float128_p (parmtype) && is_float128_p (argtype)) 824 1.1 mrg return true; 825 1.1 mrg 826 1.1 mrg if (POINTER_TYPE_P (parmtype) && POINTER_TYPE_P (argtype)) 827 1.1 mrg { 828 1.1 mrg parmtype = TREE_TYPE (parmtype); 829 1.1 mrg argtype = TREE_TYPE (argtype); 830 1.1 mrg if (TYPE_READONLY (argtype)) 831 1.1 mrg parmtype = build_qualified_type (parmtype, TYPE_QUAL_CONST); 832 1.1 mrg } 833 1.1 mrg 834 1.1 mrg return lang_hooks.types_compatible_p (parmtype, argtype); 835 1.1 mrg } 836 1.1 mrg 837 1.1 mrg /* In addition to calling fold_convert for EXPR of type TYPE, also 838 1.1 mrg call c_fully_fold to remove any C_MAYBE_CONST_EXPRs that could be 839 1.1 mrg hiding there (PR47197). */ 840 1.1 mrg 841 1.1 mrg static tree 842 1.1 mrg fully_fold_convert (tree type, tree expr) 843 1.1 mrg { 844 1.1 mrg tree result = fold_convert (type, expr); 845 1.1 mrg bool maybe_const = true; 846 1.1 mrg 847 1.1 mrg if (!c_dialect_cxx ()) 848 1.1 mrg result = c_fully_fold (result, false, &maybe_const); 849 1.1 mrg 850 1.1 mrg return result; 851 1.1 mrg } 852 1.1 mrg 853 1.1 mrg /* Build a tree for a function call to an Altivec non-overloaded builtin. 854 1.1 mrg The overloaded builtin that matched the types and args is described 855 1.1 mrg by DESC. The N arguments are given in ARGS, respectively. 856 1.1 mrg 857 1.1 mrg Actually the only thing it does is calling fold_convert on ARGS, with 858 1.1 mrg a small exception for vec_{all,any}_{ge,le} predicates. */ 859 1.1 mrg 860 1.1 mrg static tree 861 1.1 mrg altivec_build_resolved_builtin (tree *args, int n, tree fntype, tree ret_type, 862 1.1 mrg rs6000_gen_builtins bif_id, 863 1.1 mrg rs6000_gen_builtins ovld_id) 864 1.1 mrg { 865 1.1 mrg tree argtypes = TYPE_ARG_TYPES (fntype); 866 1.1 mrg tree arg_type[MAX_OVLD_ARGS]; 867 1.1 mrg tree fndecl = rs6000_builtin_decls[bif_id]; 868 1.1 mrg 869 1.1 mrg for (int i = 0; i < n; i++) 870 1.1 mrg { 871 1.1 mrg arg_type[i] = TREE_VALUE (argtypes); 872 1.1 mrg argtypes = TREE_CHAIN (argtypes); 873 1.1 mrg } 874 1.1 mrg 875 1.1 mrg /* The AltiVec overloading implementation is overall gross, but this 876 1.1 mrg is particularly disgusting. The vec_{all,any}_{ge,le} builtins 877 1.1 mrg are completely different for floating-point vs. integer vector 878 1.1 mrg types, because the former has vcmpgefp, but the latter should use 879 1.1 mrg vcmpgtXX. 880 1.1 mrg 881 1.1 mrg In practice, the second and third arguments are swapped, and the 882 1.1 mrg condition (LT vs. EQ, which is recognizable by bit 1 of the first 883 1.1 mrg argument) is reversed. Patch the arguments here before building 884 1.1 mrg the resolved CALL_EXPR. */ 885 1.1 mrg if (n == 3 886 1.1 mrg && ovld_id == RS6000_OVLD_VEC_CMPGE_P 887 1.1 mrg && bif_id != RS6000_BIF_VCMPGEFP_P 888 1.1 mrg && bif_id != RS6000_BIF_XVCMPGEDP_P) 889 1.1 mrg { 890 1.1 mrg std::swap (args[1], args[2]); 891 1.1 mrg std::swap (arg_type[1], arg_type[2]); 892 1.1 mrg 893 1.1 mrg args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0], 894 1.1 mrg build_int_cst (NULL_TREE, 2)); 895 1.1 mrg } 896 1.1 mrg 897 1.1 mrg for (int j = 0; j < n; j++) 898 1.1 mrg args[j] = fully_fold_convert (arg_type[j], args[j]); 899 1.1 mrg 900 1.1 mrg /* If the number of arguments to an overloaded function increases, 901 1.1 mrg we must expand this switch. */ 902 1.1 mrg gcc_assert (MAX_OVLD_ARGS <= 4); 903 1.1 mrg 904 1.1 mrg tree call; 905 1.1 mrg switch (n) 906 1.1 mrg { 907 1.1 mrg case 0: 908 1.1 mrg call = build_call_expr (fndecl, 0); 909 1.1 mrg break; 910 1.1 mrg case 1: 911 1.1 mrg call = build_call_expr (fndecl, 1, args[0]); 912 1.1 mrg break; 913 1.1 mrg case 2: 914 1.1 mrg call = build_call_expr (fndecl, 2, args[0], args[1]); 915 1.1 mrg break; 916 1.1 mrg case 3: 917 1.1 mrg call = build_call_expr (fndecl, 3, args[0], args[1], args[2]); 918 1.1 mrg break; 919 1.1 mrg case 4: 920 1.1 mrg call = build_call_expr (fndecl, 4, args[0], args[1], args[2], args[3]); 921 1.1 mrg break; 922 1.1 mrg default: 923 1.1 mrg gcc_unreachable (); 924 1.1 mrg } 925 1.1 mrg return fold_convert (ret_type, call); 926 1.1 mrg } 927 1.1 mrg 928 1.1 mrg /* Enumeration of possible results from attempted overload resolution. 929 1.1 mrg This is used by special-case helper functions to tell their caller 930 1.1 mrg whether they succeeded and what still needs to be done. 931 1.1 mrg 932 1.1 mrg unresolved = Still needs processing 933 1.1 mrg resolved = Resolved (but may be an error_mark_node) 934 1.1 mrg resolved_bad = An error that needs handling by the caller. */ 935 1.1 mrg 936 1.1 mrg enum resolution { unresolved, resolved, resolved_bad }; 937 1.1 mrg 938 1.1 mrg /* Resolve an overloaded vec_mul call and return a tree expression for the 939 1.1 mrg resolved call if successful. ARGS contains the arguments to the call. 940 1.1 mrg TYPES contains their types. RES must be set to indicate the status of 941 1.1 mrg the resolution attempt. LOC contains statement location information. */ 942 1.1 mrg 943 1.1 mrg static tree 944 1.1 mrg resolve_vec_mul (resolution *res, tree *args, tree *types, location_t loc) 945 1.1 mrg { 946 1.1 mrg /* vec_mul needs to be special cased because there are no instructions for it 947 1.1 mrg for the {un}signed char, {un}signed short, and {un}signed int types. */ 948 1.1 mrg 949 1.1 mrg /* Both arguments must be vectors and the types must be compatible. */ 950 1.1 mrg if (TREE_CODE (types[0]) != VECTOR_TYPE 951 1.1 mrg || !lang_hooks.types_compatible_p (types[0], types[1])) 952 1.1 mrg { 953 1.1 mrg *res = resolved_bad; 954 1.1 mrg return error_mark_node; 955 1.1 mrg } 956 1.1 mrg 957 1.1 mrg switch (TYPE_MODE (TREE_TYPE (types[0]))) 958 1.1 mrg { 959 1.1 mrg case E_QImode: 960 1.1 mrg case E_HImode: 961 1.1 mrg case E_SImode: 962 1.1 mrg case E_DImode: 963 1.1 mrg case E_TImode: 964 1.1 mrg /* For scalar types just use a multiply expression. */ 965 1.1 mrg *res = resolved; 966 1.1 mrg return fold_build2_loc (loc, MULT_EXPR, types[0], args[0], 967 1.1 mrg fold_convert (types[0], args[1])); 968 1.1 mrg case E_SFmode: 969 1.1 mrg { 970 1.1 mrg /* For floats use the xvmulsp instruction directly. */ 971 1.1 mrg *res = resolved; 972 1.1 mrg tree call = rs6000_builtin_decls[RS6000_BIF_XVMULSP]; 973 1.1 mrg return build_call_expr (call, 2, args[0], args[1]); 974 1.1 mrg } 975 1.1 mrg case E_DFmode: 976 1.1 mrg { 977 1.1 mrg /* For doubles use the xvmuldp instruction directly. */ 978 1.1 mrg *res = resolved; 979 1.1 mrg tree call = rs6000_builtin_decls[RS6000_BIF_XVMULDP]; 980 1.1 mrg return build_call_expr (call, 2, args[0], args[1]); 981 1.1 mrg } 982 1.1 mrg /* Other types are errors. */ 983 1.1 mrg default: 984 1.1 mrg *res = resolved_bad; 985 1.1 mrg return error_mark_node; 986 1.1 mrg } 987 1.1 mrg } 988 1.1 mrg 989 1.1 mrg /* Resolve an overloaded vec_cmpne call and return a tree expression for the 990 1.1 mrg resolved call if successful. ARGS contains the arguments to the call. 991 1.1 mrg TYPES contains their types. RES must be set to indicate the status of 992 1.1 mrg the resolution attempt. LOC contains statement location information. */ 993 1.1 mrg 994 1.1 mrg static tree 995 1.1 mrg resolve_vec_cmpne (resolution *res, tree *args, tree *types, location_t loc) 996 1.1 mrg { 997 1.1 mrg /* vec_cmpne needs to be special cased because there are no instructions 998 1.1 mrg for it (prior to power 9). */ 999 1.1 mrg 1000 1.1 mrg /* Both arguments must be vectors and the types must be compatible. */ 1001 1.1 mrg if (TREE_CODE (types[0]) != VECTOR_TYPE 1002 1.1 mrg || !lang_hooks.types_compatible_p (types[0], types[1])) 1003 1.1 mrg { 1004 1.1 mrg *res = resolved_bad; 1005 1.1 mrg return error_mark_node; 1006 1.1 mrg } 1007 1.1 mrg 1008 1.1 mrg machine_mode arg0_elt_mode = TYPE_MODE (TREE_TYPE (types[0])); 1009 1.1 mrg 1010 1.1 mrg /* Power9 instructions provide the most efficient implementation of 1011 1.1 mrg ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode 1012 1.1 mrg or SFmode or DFmode. */ 1013 1.1 mrg if (!TARGET_P9_VECTOR 1014 1.1 mrg || arg0_elt_mode == DImode 1015 1.1 mrg || arg0_elt_mode == TImode 1016 1.1 mrg || arg0_elt_mode == SFmode 1017 1.1 mrg || arg0_elt_mode == DFmode) 1018 1.1 mrg { 1019 1.1 mrg switch (arg0_elt_mode) 1020 1.1 mrg { 1021 1.1 mrg /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb), 1022 1.1 mrg vec_cmpeq (va, vb)). */ 1023 1.1 mrg /* Note: vec_nand also works but opt changes vec_nand's 1024 1.1 mrg to vec_nor's anyway. */ 1025 1.1 mrg case E_QImode: 1026 1.1 mrg case E_HImode: 1027 1.1 mrg case E_SImode: 1028 1.1 mrg case E_DImode: 1029 1.1 mrg case E_TImode: 1030 1.1 mrg case E_SFmode: 1031 1.1 mrg case E_DFmode: 1032 1.1 mrg { 1033 1.1 mrg /* call = vec_cmpeq (va, vb) 1034 1.1 mrg result = vec_nor (call, call). */ 1035 1.1 mrg vec<tree, va_gc> *params = make_tree_vector (); 1036 1.1 mrg vec_safe_push (params, args[0]); 1037 1.1 mrg vec_safe_push (params, args[1]); 1038 1.1 mrg tree decl = rs6000_builtin_decls[RS6000_OVLD_VEC_CMPEQ]; 1039 1.1 mrg tree call = altivec_resolve_overloaded_builtin (loc, decl, params); 1040 1.1 mrg /* Use save_expr to ensure that operands used more than once 1041 1.1 mrg that may have side effects (like calls) are only evaluated 1042 1.1 mrg once. */ 1043 1.1 mrg call = save_expr (call); 1044 1.1 mrg params = make_tree_vector (); 1045 1.1 mrg vec_safe_push (params, call); 1046 1.1 mrg vec_safe_push (params, call); 1047 1.1 mrg decl = rs6000_builtin_decls[RS6000_OVLD_VEC_NOR]; 1048 1.1 mrg *res = resolved; 1049 1.1 mrg return altivec_resolve_overloaded_builtin (loc, decl, params); 1050 1.1 mrg } 1051 1.1 mrg /* Other types are errors. */ 1052 1.1 mrg default: 1053 1.1 mrg *res = resolved_bad; 1054 1.1 mrg return error_mark_node; 1055 1.1 mrg } 1056 1.1 mrg } 1057 1.1 mrg 1058 1.1 mrg /* Otherwise this call is unresolved, and altivec_resolve_overloaded_builtin 1059 1.1 mrg will later process the Power9 alternative. */ 1060 1.1 mrg *res = unresolved; 1061 1.1 mrg return error_mark_node; 1062 1.1 mrg } 1063 1.1 mrg 1064 1.1 mrg /* Resolve an overloaded vec_adde or vec_sube call and return a tree expression 1065 1.1 mrg for the resolved call if successful. ARGS contains the arguments to the 1066 1.1 mrg call. TYPES contains their arguments. RES must be set to indicate the 1067 1.1 mrg status of the resolution attempt. LOC contains statement location 1068 1.1 mrg information. */ 1069 1.1 mrg 1070 1.1 mrg static tree 1071 1.1 mrg resolve_vec_adde_sube (resolution *res, rs6000_gen_builtins fcode, 1072 1.1 mrg tree *args, tree *types, location_t loc) 1073 1.1 mrg { 1074 1.1 mrg /* vec_adde needs to be special cased because there is no instruction 1075 1.1 mrg for the {un}signed int version. */ 1076 1.1 mrg 1077 1.1 mrg /* All 3 arguments must be vectors of (signed or unsigned) (int or 1078 1.1 mrg __int128) and the types must be compatible. */ 1079 1.1 mrg if (TREE_CODE (types[0]) != VECTOR_TYPE 1080 1.1 mrg || !lang_hooks.types_compatible_p (types[0], types[1]) 1081 1.1 mrg || !lang_hooks.types_compatible_p (types[1], types[2])) 1082 1.1 mrg { 1083 1.1 mrg *res = resolved_bad; 1084 1.1 mrg return error_mark_node; 1085 1.1 mrg } 1086 1.1 mrg 1087 1.1 mrg switch (TYPE_MODE (TREE_TYPE (types[0]))) 1088 1.1 mrg { 1089 1.1 mrg /* For {un}signed ints, 1090 1.1 mrg vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb), 1091 1.1 mrg vec_and (carryv, 1)). 1092 1.1 mrg vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb), 1093 1.1 mrg vec_and (carryv, 1)). */ 1094 1.1 mrg case E_SImode: 1095 1.1 mrg { 1096 1.1 mrg vec<tree, va_gc> *params = make_tree_vector (); 1097 1.1 mrg vec_safe_push (params, args[0]); 1098 1.1 mrg vec_safe_push (params, args[1]); 1099 1.1 mrg 1100 1.1 mrg tree add_sub_builtin; 1101 1.1 mrg if (fcode == RS6000_OVLD_VEC_ADDE) 1102 1.1 mrg add_sub_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADD]; 1103 1.1 mrg else 1104 1.1 mrg add_sub_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUB]; 1105 1.1 mrg 1106 1.1 mrg tree call = altivec_resolve_overloaded_builtin (loc, add_sub_builtin, 1107 1.1 mrg params); 1108 1.1 mrg tree const1 = build_int_cstu (TREE_TYPE (types[0]), 1); 1109 1.1 mrg tree ones_vector = build_vector_from_val (types[0], const1); 1110 1.1 mrg tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, types[0], 1111 1.1 mrg args[2], ones_vector); 1112 1.1 mrg params = make_tree_vector (); 1113 1.1 mrg vec_safe_push (params, call); 1114 1.1 mrg vec_safe_push (params, and_expr); 1115 1.1 mrg *res = resolved; 1116 1.1 mrg return altivec_resolve_overloaded_builtin (loc, add_sub_builtin, 1117 1.1 mrg params); 1118 1.1 mrg } 1119 1.1 mrg /* For {un}signed __int128s use the vaddeuqm/vsubeuqm instruction 1120 1.1 mrg directly using the standard machinery. */ 1121 1.1 mrg case E_TImode: 1122 1.1 mrg *res = unresolved; 1123 1.1 mrg break; 1124 1.1 mrg 1125 1.1 mrg /* Types other than {un}signed int and {un}signed __int128 1126 1.1 mrg are errors. */ 1127 1.1 mrg default: 1128 1.1 mrg *res = resolved_bad; 1129 1.1 mrg } 1130 1.1 mrg 1131 1.1 mrg return error_mark_node; 1132 1.1 mrg } 1133 1.1 mrg 1134 1.1 mrg /* Resolve an overloaded vec_addec or vec_subec call and return a tree 1135 1.1 mrg expression for the resolved call if successful. ARGS contains the arguments 1136 1.1 mrg to the call. TYPES contains their types. RES must be set to indicate the 1137 1.1 mrg status of the resolution attempt. LOC contains statement location 1138 1.1 mrg information. */ 1139 1.1 mrg 1140 1.1 mrg static tree 1141 1.1 mrg resolve_vec_addec_subec (resolution *res, rs6000_gen_builtins fcode, 1142 1.1 mrg tree *args, tree *types, location_t loc) 1143 1.1 mrg { 1144 1.1 mrg /* vec_addec and vec_subec needs to be special cased because there is 1145 1.1 mrg no instruction for the (un)signed int version. */ 1146 1.1 mrg 1147 1.1 mrg /* All 3 arguments must be vectors of (signed or unsigned) (int or 1148 1.1 mrg __int128) and the types must be compatible. */ 1149 1.1 mrg if (TREE_CODE (types[0]) != VECTOR_TYPE 1150 1.1 mrg || !lang_hooks.types_compatible_p (types[0], types[1]) 1151 1.1 mrg || !lang_hooks.types_compatible_p (types[1], types[2])) 1152 1.1 mrg { 1153 1.1 mrg *res = resolved_bad; 1154 1.1 mrg return error_mark_node; 1155 1.1 mrg } 1156 1.1 mrg 1157 1.1 mrg switch (TYPE_MODE (TREE_TYPE (types[0]))) 1158 1.1 mrg { 1159 1.1 mrg /* For {un}signed ints, 1160 1.1 mrg vec_addec (va, vb, carryv) == 1161 1.1 mrg vec_or (vec_addc (va, vb), 1162 1.1 mrg vec_addc (vec_add (va, vb), 1163 1.1 mrg vec_and (carryv, 0x1))). */ 1164 1.1 mrg case E_SImode: 1165 1.1 mrg { 1166 1.1 mrg /* Use save_expr to ensure that operands used more than once that may 1167 1.1 mrg have side effects (like calls) are only evaluated once. */ 1168 1.1 mrg args[0] = save_expr (args[0]); 1169 1.1 mrg args[1] = save_expr (args[1]); 1170 1.1 mrg vec<tree, va_gc> *params = make_tree_vector (); 1171 1.1 mrg vec_safe_push (params, args[0]); 1172 1.1 mrg vec_safe_push (params, args[1]); 1173 1.1 mrg 1174 1.1 mrg tree as_c_builtin; 1175 1.1 mrg if (fcode == RS6000_OVLD_VEC_ADDEC) 1176 1.1 mrg as_c_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADDC]; 1177 1.1 mrg else 1178 1.1 mrg as_c_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUBC]; 1179 1.1 mrg 1180 1.1 mrg tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin, 1181 1.1 mrg params); 1182 1.1 mrg params = make_tree_vector (); 1183 1.1 mrg vec_safe_push (params, args[0]); 1184 1.1 mrg vec_safe_push (params, args[1]); 1185 1.1 mrg 1186 1.1 mrg tree as_builtin; 1187 1.1 mrg if (fcode == RS6000_OVLD_VEC_ADDEC) 1188 1.1 mrg as_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADD]; 1189 1.1 mrg else 1190 1.1 mrg as_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUB]; 1191 1.1 mrg 1192 1.1 mrg tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin, 1193 1.1 mrg params); 1194 1.1 mrg tree const1 = build_int_cstu (TREE_TYPE (types[0]), 1); 1195 1.1 mrg tree ones_vector = build_vector_from_val (types[0], const1); 1196 1.1 mrg tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, types[0], 1197 1.1 mrg args[2], ones_vector); 1198 1.1 mrg params = make_tree_vector (); 1199 1.1 mrg vec_safe_push (params, call2); 1200 1.1 mrg vec_safe_push (params, and_expr); 1201 1.1 mrg call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin, params); 1202 1.1 mrg params = make_tree_vector (); 1203 1.1 mrg vec_safe_push (params, call1); 1204 1.1 mrg vec_safe_push (params, call2); 1205 1.1 mrg tree or_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_OR]; 1206 1.1 mrg *res = resolved; 1207 1.1 mrg return altivec_resolve_overloaded_builtin (loc, or_builtin, params); 1208 1.1 mrg } 1209 1.1 mrg /* For {un}signed __int128s use the vaddecuq/vsubbecuq 1210 1.1 mrg instructions. This occurs through normal processing. */ 1211 1.1 mrg case E_TImode: 1212 1.1 mrg *res = unresolved; 1213 1.1 mrg break; 1214 1.1 mrg 1215 1.1 mrg /* Types other than {un}signed int and {un}signed __int128 1216 1.1 mrg are errors. */ 1217 1.1 mrg default: 1218 1.1 mrg *res = resolved_bad; 1219 1.1 mrg } 1220 1.1 mrg 1221 1.1 mrg return error_mark_node; 1222 1.1 mrg } 1223 1.1 mrg 1224 1.1 mrg /* Resolve an overloaded vec_splats or vec_promote call and return a tree 1225 1.1 mrg expression for the resolved call if successful. NARGS is the number of 1226 1.1 mrg arguments to the call. ARGLIST contains the arguments. RES must be set 1227 1.1 mrg to indicate the status of the resolution attempt. */ 1228 1.1 mrg 1229 1.1 mrg static tree 1230 1.1 mrg resolve_vec_splats (resolution *res, rs6000_gen_builtins fcode, 1231 1.1 mrg vec<tree, va_gc> *arglist, unsigned nargs) 1232 1.1 mrg { 1233 1.1 mrg const char *name; 1234 1.1 mrg name = fcode == RS6000_OVLD_VEC_SPLATS ? "vec_splats" : "vec_promote"; 1235 1.1 mrg 1236 1.1 mrg if (fcode == RS6000_OVLD_VEC_SPLATS && nargs != 1) 1237 1.1 mrg { 1238 1.1 mrg error ("builtin %qs only accepts 1 argument", name); 1239 1.1 mrg *res = resolved; 1240 1.1 mrg return error_mark_node; 1241 1.1 mrg } 1242 1.1 mrg 1243 1.1 mrg if (fcode == RS6000_OVLD_VEC_PROMOTE && nargs != 2) 1244 1.1 mrg { 1245 1.1 mrg error ("builtin %qs only accepts 2 arguments", name); 1246 1.1 mrg *res = resolved; 1247 1.1 mrg return error_mark_node; 1248 1.1 mrg } 1249 1.1 mrg 1250 1.1 mrg /* Ignore promote's element argument. */ 1251 1.1 mrg if (fcode == RS6000_OVLD_VEC_PROMOTE 1252 1.1 mrg && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1]))) 1253 1.1 mrg { 1254 1.1 mrg *res = resolved_bad; 1255 1.1 mrg return error_mark_node; 1256 1.1 mrg } 1257 1.1 mrg 1258 1.1 mrg tree arg = (*arglist)[0]; 1259 1.1 mrg tree type = TREE_TYPE (arg); 1260 1.1 mrg 1261 1.1 mrg if (!SCALAR_FLOAT_TYPE_P (type) && !INTEGRAL_TYPE_P (type)) 1262 1.1 mrg { 1263 1.1 mrg *res = resolved_bad; 1264 1.1 mrg return error_mark_node; 1265 1.1 mrg } 1266 1.1 mrg 1267 1.1 mrg bool unsigned_p = TYPE_UNSIGNED (type); 1268 1.1 mrg int size; 1269 1.1 mrg 1270 1.1 mrg switch (TYPE_MODE (type)) 1271 1.1 mrg { 1272 1.1 mrg case E_TImode: 1273 1.1 mrg type = unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node; 1274 1.1 mrg size = 1; 1275 1.1 mrg break; 1276 1.1 mrg case E_DImode: 1277 1.1 mrg type = unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node; 1278 1.1 mrg size = 2; 1279 1.1 mrg break; 1280 1.1 mrg case E_SImode: 1281 1.1 mrg type = unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node; 1282 1.1 mrg size = 4; 1283 1.1 mrg break; 1284 1.1 mrg case E_HImode: 1285 1.1 mrg type = unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node; 1286 1.1 mrg size = 8; 1287 1.1 mrg break; 1288 1.1 mrg case E_QImode: 1289 1.1 mrg type = unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node; 1290 1.1 mrg size = 16; 1291 1.1 mrg break; 1292 1.1 mrg case E_SFmode: 1293 1.1 mrg type = V4SF_type_node; 1294 1.1 mrg size = 4; 1295 1.1 mrg break; 1296 1.1 mrg case E_DFmode: 1297 1.1 mrg type = V2DF_type_node; 1298 1.1 mrg size = 2; 1299 1.1 mrg break; 1300 1.1 mrg default: 1301 1.1 mrg *res = resolved_bad; 1302 1.1 mrg return error_mark_node; 1303 1.1 mrg } 1304 1.1 mrg 1305 1.1 mrg arg = save_expr (fold_convert (TREE_TYPE (type), arg)); 1306 1.1 mrg vec<constructor_elt, va_gc> *vec; 1307 1.1 mrg vec_alloc (vec, size); 1308 1.1 mrg 1309 1.1 mrg for (int i = 0; i < size; i++) 1310 1.1 mrg { 1311 1.1 mrg constructor_elt elt = {NULL_TREE, arg}; 1312 1.1 mrg vec->quick_push (elt); 1313 1.1 mrg } 1314 1.1 mrg 1315 1.1 mrg *res = resolved; 1316 1.1 mrg return build_constructor (type, vec); 1317 1.1 mrg } 1318 1.1 mrg 1319 1.1 mrg /* Resolve an overloaded vec_extract call and return a tree expression for 1320 1.1 mrg the resolved call if successful. NARGS is the number of arguments to 1321 1.1 mrg the call. ARGLIST contains the arguments. RES must be set to indicate 1322 1.1 mrg the status of the resolution attempt. LOC contains statement location 1323 1.1 mrg information. */ 1324 1.1 mrg 1325 1.1 mrg static tree 1326 1.1 mrg resolve_vec_extract (resolution *res, vec<tree, va_gc> *arglist, 1327 1.1 mrg unsigned nargs, location_t loc) 1328 1.1 mrg { 1329 1.1 mrg if (nargs != 2) 1330 1.1 mrg { 1331 1.1 mrg error ("builtin %qs only accepts 2 arguments", "vec_extract"); 1332 1.1 mrg *res = resolved; 1333 1.1 mrg return error_mark_node; 1334 1.1 mrg } 1335 1.1 mrg 1336 1.1 mrg tree arg1 = (*arglist)[0]; 1337 1.1 mrg tree arg1_type = TREE_TYPE (arg1); 1338 1.1 mrg tree arg2 = (*arglist)[1]; 1339 1.1 mrg 1340 1.1 mrg if (TREE_CODE (arg1_type) != VECTOR_TYPE 1341 1.1 mrg || !INTEGRAL_TYPE_P (TREE_TYPE (arg2))) 1342 1.1 mrg { 1343 1.1 mrg *res = resolved_bad; 1344 1.1 mrg return error_mark_node; 1345 1.1 mrg } 1346 1.1 mrg 1347 1.1 mrg /* See if we can optimize vec_extract with the current VSX instruction 1348 1.1 mrg set. */ 1349 1.1 mrg machine_mode mode = TYPE_MODE (arg1_type); 1350 1.1 mrg tree arg1_inner_type; 1351 1.1 mrg 1352 1.1 mrg if (VECTOR_MEM_VSX_P (mode)) 1353 1.1 mrg { 1354 1.1 mrg tree call = NULL_TREE; 1355 1.1 mrg int nunits = GET_MODE_NUNITS (mode); 1356 1.1 mrg arg2 = fold_for_warn (arg2); 1357 1.1 mrg 1358 1.1 mrg /* If the second argument is an integer constant, generate 1359 1.1 mrg the built-in code if we can. We need 64-bit and direct 1360 1.1 mrg move to extract the small integer vectors. */ 1361 1.1 mrg if (TREE_CODE (arg2) == INTEGER_CST) 1362 1.1 mrg { 1363 1.1 mrg wide_int selector = wi::to_wide (arg2); 1364 1.1 mrg selector = wi::umod_trunc (selector, nunits); 1365 1.1 mrg arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector); 1366 1.1 mrg switch (mode) 1367 1.1 mrg { 1368 1.1 mrg case E_V1TImode: 1369 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V1TI]; 1370 1.1 mrg break; 1371 1.1 mrg 1372 1.1 mrg case E_V2DFmode: 1373 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DF]; 1374 1.1 mrg break; 1375 1.1 mrg 1376 1.1 mrg case E_V2DImode: 1377 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DI]; 1378 1.1 mrg break; 1379 1.1 mrg 1380 1.1 mrg case E_V4SFmode: 1381 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SF]; 1382 1.1 mrg break; 1383 1.1 mrg 1384 1.1 mrg case E_V4SImode: 1385 1.1 mrg if (TARGET_DIRECT_MOVE_64BIT) 1386 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SI]; 1387 1.1 mrg break; 1388 1.1 mrg 1389 1.1 mrg case E_V8HImode: 1390 1.1 mrg if (TARGET_DIRECT_MOVE_64BIT) 1391 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V8HI]; 1392 1.1 mrg break; 1393 1.1 mrg 1394 1.1 mrg case E_V16QImode: 1395 1.1 mrg if (TARGET_DIRECT_MOVE_64BIT) 1396 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V16QI]; 1397 1.1 mrg break; 1398 1.1 mrg 1399 1.1 mrg default: 1400 1.1 mrg break; 1401 1.1 mrg } 1402 1.1 mrg } 1403 1.1 mrg 1404 1.1 mrg /* If the second argument is variable, we can optimize it if we are 1405 1.1 mrg generating 64-bit code on a machine with direct move. */ 1406 1.1 mrg else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT) 1407 1.1 mrg { 1408 1.1 mrg switch (mode) 1409 1.1 mrg { 1410 1.1 mrg case E_V2DFmode: 1411 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DF]; 1412 1.1 mrg break; 1413 1.1 mrg 1414 1.1 mrg case E_V2DImode: 1415 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DI]; 1416 1.1 mrg break; 1417 1.1 mrg 1418 1.1 mrg case E_V4SFmode: 1419 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SF]; 1420 1.1 mrg break; 1421 1.1 mrg 1422 1.1 mrg case E_V4SImode: 1423 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SI]; 1424 1.1 mrg break; 1425 1.1 mrg 1426 1.1 mrg case E_V8HImode: 1427 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V8HI]; 1428 1.1 mrg break; 1429 1.1 mrg 1430 1.1 mrg case E_V16QImode: 1431 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V16QI]; 1432 1.1 mrg break; 1433 1.1 mrg 1434 1.1 mrg default: 1435 1.1 mrg break; 1436 1.1 mrg } 1437 1.1 mrg } 1438 1.1 mrg 1439 1.1 mrg if (call) 1440 1.1 mrg { 1441 1.1 mrg tree result = build_call_expr (call, 2, arg1, arg2); 1442 1.1 mrg /* Coerce the result to vector element type. May be no-op. */ 1443 1.1 mrg arg1_inner_type = TREE_TYPE (arg1_type); 1444 1.1 mrg result = fold_convert (arg1_inner_type, result); 1445 1.1 mrg *res = resolved; 1446 1.1 mrg return result; 1447 1.1 mrg } 1448 1.1 mrg } 1449 1.1 mrg 1450 1.1 mrg /* Build *(((arg1_inner_type*) & (vector type){arg1}) + arg2). */ 1451 1.1 mrg arg1_inner_type = TREE_TYPE (arg1_type); 1452 1.1 mrg tree subp = build_int_cst (TREE_TYPE (arg2), 1453 1.1 mrg TYPE_VECTOR_SUBPARTS (arg1_type) - 1); 1454 1.1 mrg arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, subp, 0); 1455 1.1 mrg 1456 1.1 mrg tree decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type); 1457 1.1 mrg DECL_EXTERNAL (decl) = 0; 1458 1.1 mrg TREE_PUBLIC (decl) = 0; 1459 1.1 mrg DECL_CONTEXT (decl) = current_function_decl; 1460 1.1 mrg TREE_USED (decl) = 1; 1461 1.1 mrg TREE_TYPE (decl) = arg1_type; 1462 1.1 mrg TREE_READONLY (decl) = TYPE_READONLY (arg1_type); 1463 1.1 mrg 1464 1.1 mrg tree stmt; 1465 1.1 mrg if (c_dialect_cxx ()) 1466 1.1 mrg { 1467 1.1 mrg stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1, NULL_TREE, NULL_TREE); 1468 1.1 mrg SET_EXPR_LOCATION (stmt, loc); 1469 1.1 mrg } 1470 1.1 mrg else 1471 1.1 mrg { 1472 1.1 mrg DECL_INITIAL (decl) = arg1; 1473 1.1 mrg stmt = build1 (DECL_EXPR, arg1_type, decl); 1474 1.1 mrg TREE_ADDRESSABLE (decl) = 1; 1475 1.1 mrg SET_EXPR_LOCATION (stmt, loc); 1476 1.1 mrg stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt); 1477 1.1 mrg } 1478 1.1 mrg 1479 1.1 mrg tree innerptrtype = build_pointer_type (arg1_inner_type); 1480 1.1 mrg stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0); 1481 1.1 mrg stmt = convert (innerptrtype, stmt); 1482 1.1 mrg stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1); 1483 1.1 mrg stmt = build_indirect_ref (loc, stmt, RO_NULL); 1484 1.1 mrg 1485 1.1 mrg /* PR83660: We mark this as having side effects so that downstream in 1486 1.1 mrg fold_build_cleanup_point_expr () it will get a CLEANUP_POINT_EXPR. If it 1487 1.1 mrg does not we can run into an ICE later in gimplify_cleanup_point_expr (). 1488 1.1 mrg Potentially this causes missed optimization because there actually is no 1489 1.1 mrg side effect. */ 1490 1.1 mrg if (c_dialect_cxx ()) 1491 1.1 mrg TREE_SIDE_EFFECTS (stmt) = 1; 1492 1.1 mrg 1493 1.1 mrg *res = resolved; 1494 1.1 mrg return stmt; 1495 1.1 mrg } 1496 1.1 mrg 1497 1.1 mrg /* Resolve an overloaded vec_insert call and return a tree expression for 1498 1.1 mrg the resolved call if successful. NARGS is the number of arguments to 1499 1.1 mrg the call. ARGLIST contains the arguments. RES must be set to indicate 1500 1.1 mrg the status of the resolution attempt. LOC contains statement location 1501 1.1 mrg information. */ 1502 1.1 mrg 1503 1.1 mrg static tree 1504 1.1 mrg resolve_vec_insert (resolution *res, vec<tree, va_gc> *arglist, 1505 1.1 mrg unsigned nargs, location_t loc) 1506 1.1 mrg { 1507 1.1 mrg if (nargs != 3) 1508 1.1 mrg { 1509 1.1 mrg error ("builtin %qs only accepts 3 arguments", "vec_insert"); 1510 1.1 mrg *res = resolved; 1511 1.1 mrg return error_mark_node; 1512 1.1 mrg } 1513 1.1 mrg 1514 1.1 mrg tree arg0 = (*arglist)[0]; 1515 1.1 mrg tree arg1 = (*arglist)[1]; 1516 1.1 mrg tree arg1_type = TREE_TYPE (arg1); 1517 1.1 mrg tree arg2 = fold_for_warn ((*arglist)[2]); 1518 1.1 mrg 1519 1.1 mrg if (TREE_CODE (arg1_type) != VECTOR_TYPE 1520 1.1 mrg || !INTEGRAL_TYPE_P (TREE_TYPE (arg2))) 1521 1.1 mrg { 1522 1.1 mrg *res = resolved_bad; 1523 1.1 mrg return error_mark_node; 1524 1.1 mrg } 1525 1.1 mrg 1526 1.1 mrg /* If we can use the VSX xxpermdi instruction, use that for insert. */ 1527 1.1 mrg machine_mode mode = TYPE_MODE (arg1_type); 1528 1.1 mrg 1529 1.1 mrg if ((mode == V2DFmode || mode == V2DImode) 1530 1.1 mrg && VECTOR_UNIT_VSX_P (mode) 1531 1.1 mrg && TREE_CODE (arg2) == INTEGER_CST) 1532 1.1 mrg { 1533 1.1 mrg wide_int selector = wi::to_wide (arg2); 1534 1.1 mrg selector = wi::umod_trunc (selector, 2); 1535 1.1 mrg arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector); 1536 1.1 mrg 1537 1.1 mrg tree call = NULL_TREE; 1538 1.1 mrg if (mode == V2DFmode) 1539 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V2DF]; 1540 1.1 mrg else if (mode == V2DImode) 1541 1.1 mrg call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V2DI]; 1542 1.1 mrg 1543 1.1 mrg /* Note, __builtin_vec_insert_<xxx> has vector and scalar types 1544 1.1 mrg reversed. */ 1545 1.1 mrg if (call) 1546 1.1 mrg { 1547 1.1 mrg *res = resolved; 1548 1.1 mrg return build_call_expr (call, 3, arg1, arg0, arg2); 1549 1.1 mrg } 1550 1.1 mrg } 1551 1.1 mrg 1552 1.1 mrg else if (mode == V1TImode 1553 1.1 mrg && VECTOR_UNIT_VSX_P (mode) 1554 1.1 mrg && TREE_CODE (arg2) == INTEGER_CST) 1555 1.1 mrg { 1556 1.1 mrg tree call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V1TI]; 1557 1.1 mrg wide_int selector = wi::zero(32); 1558 1.1 mrg arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector); 1559 1.1 mrg 1560 1.1 mrg /* Note, __builtin_vec_insert_<xxx> has vector and scalar types 1561 1.1 mrg reversed. */ 1562 1.1 mrg *res = resolved; 1563 1.1 mrg return build_call_expr (call, 3, arg1, arg0, arg2); 1564 1.1 mrg } 1565 1.1 mrg 1566 1.1 mrg /* Build *(((arg1_inner_type*) & (vector type){arg1}) + arg2) = arg0 with 1567 1.1 mrg VIEW_CONVERT_EXPR. i.e.: 1568 1.1 mrg D.3192 = v1; 1569 1.1 mrg _1 = n & 3; 1570 1.1 mrg VIEW_CONVERT_EXPR<int[4]>(D.3192)[_1] = i; 1571 1.1 mrg v1 = D.3192; 1572 1.1 mrg D.3194 = v1; */ 1573 1.1 mrg if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1) 1574 1.1 mrg arg2 = build_int_cst (TREE_TYPE (arg2), 0); 1575 1.1 mrg else 1576 1.1 mrg { 1577 1.1 mrg tree c = build_int_cst (TREE_TYPE (arg2), 1578 1.1 mrg TYPE_VECTOR_SUBPARTS (arg1_type) - 1); 1579 1.1 mrg arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, c, 0); 1580 1.1 mrg } 1581 1.1 mrg 1582 1.1 mrg tree decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type); 1583 1.1 mrg DECL_EXTERNAL (decl) = 0; 1584 1.1 mrg TREE_PUBLIC (decl) = 0; 1585 1.1 mrg DECL_CONTEXT (decl) = current_function_decl; 1586 1.1 mrg TREE_USED (decl) = 1; 1587 1.1 mrg TREE_TYPE (decl) = arg1_type; 1588 1.1 mrg TREE_READONLY (decl) = TYPE_READONLY (arg1_type); 1589 1.1 mrg TREE_ADDRESSABLE (decl) = 1; 1590 1.1 mrg 1591 1.1 mrg tree stmt; 1592 1.1 mrg if (c_dialect_cxx ()) 1593 1.1 mrg { 1594 1.1 mrg stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1, NULL_TREE, NULL_TREE); 1595 1.1 mrg SET_EXPR_LOCATION (stmt, loc); 1596 1.1 mrg } 1597 1.1 mrg else 1598 1.1 mrg { 1599 1.1 mrg DECL_INITIAL (decl) = arg1; 1600 1.1 mrg stmt = build1 (DECL_EXPR, arg1_type, decl); 1601 1.1 mrg SET_EXPR_LOCATION (stmt, loc); 1602 1.1 mrg stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt); 1603 1.1 mrg } 1604 1.1 mrg 1605 1.1 mrg if (TARGET_VSX) 1606 1.1 mrg { 1607 1.1 mrg stmt = build_array_ref (loc, stmt, arg2); 1608 1.1 mrg stmt = fold_build2 (MODIFY_EXPR, TREE_TYPE (arg0), stmt, 1609 1.1 mrg convert (TREE_TYPE (stmt), arg0)); 1610 1.1 mrg stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl); 1611 1.1 mrg } 1612 1.1 mrg else 1613 1.1 mrg { 1614 1.1 mrg tree arg1_inner_type = TREE_TYPE (arg1_type); 1615 1.1 mrg tree innerptrtype = build_pointer_type (arg1_inner_type); 1616 1.1 mrg stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0); 1617 1.1 mrg stmt = convert (innerptrtype, stmt); 1618 1.1 mrg stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1); 1619 1.1 mrg stmt = build_indirect_ref (loc, stmt, RO_NULL); 1620 1.1 mrg stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt, 1621 1.1 mrg convert (TREE_TYPE (stmt), arg0)); 1622 1.1 mrg stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl); 1623 1.1 mrg } 1624 1.1 mrg 1625 1.1 mrg *res = resolved; 1626 1.1 mrg return stmt; 1627 1.1 mrg } 1628 1.1 mrg 1629 1.1 mrg /* Resolve an overloaded vec_step call and return a tree expression for 1630 1.1 mrg the resolved call if successful. NARGS is the number of arguments to 1631 1.1 mrg the call. ARGLIST contains the arguments. RES must be set to indicate 1632 1.1 mrg the status of the resolution attempt. */ 1633 1.1 mrg 1634 1.1 mrg static tree 1635 1.1 mrg resolve_vec_step (resolution *res, vec<tree, va_gc> *arglist, unsigned nargs) 1636 1.1 mrg { 1637 1.1 mrg if (nargs != 1) 1638 1.1 mrg { 1639 1.1 mrg error ("builtin %qs only accepts 1 argument", "vec_step"); 1640 1.1 mrg *res = resolved; 1641 1.1 mrg return error_mark_node; 1642 1.1 mrg } 1643 1.1 mrg 1644 1.1 mrg tree arg0 = (*arglist)[0]; 1645 1.1 mrg tree arg0_type = TREE_TYPE (arg0); 1646 1.1 mrg 1647 1.1 mrg if (TREE_CODE (arg0_type) != VECTOR_TYPE) 1648 1.1 mrg { 1649 1.1 mrg *res = resolved_bad; 1650 1.1 mrg return error_mark_node; 1651 1.1 mrg } 1652 1.1 mrg 1653 1.1 mrg *res = resolved; 1654 1.1 mrg return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (arg0_type)); 1655 1.1 mrg } 1656 1.1 mrg 1657 1.1 mrg /* Look for a matching instance in a chain of instances. INSTANCE points to 1658 1.1 mrg the chain of instances; INSTANCE_CODE is the code identifying the specific 1659 1.1 mrg built-in being searched for; FCODE is the overloaded function code; TYPES 1660 1.1 mrg contains an array of two types that must match the types of the instance's 1661 1.1 mrg parameters; and ARGS contains an array of two arguments to be passed to 1662 1.1 mrg the instance. If found, resolve the built-in and return it, unless the 1663 1.1 mrg built-in is not supported in context. In that case, set 1664 1.1 mrg UNSUPPORTED_BUILTIN to true. If we don't match, return error_mark_node 1665 1.1 mrg and leave UNSUPPORTED_BUILTIN alone. */ 1666 1.1 mrg 1667 1.1 mrg static tree 1668 1.1 mrg find_instance (bool *unsupported_builtin, int *instance, 1669 1.1 mrg rs6000_gen_builtins instance_code, 1670 1.1 mrg rs6000_gen_builtins fcode, 1671 1.1 mrg tree *types, tree *args) 1672 1.1 mrg { 1673 1.1 mrg while (*instance != -1 1674 1.1 mrg && rs6000_instance_info[*instance].bifid != instance_code) 1675 1.1 mrg *instance = rs6000_instance_info[*instance].next; 1676 1.1 mrg 1677 1.1 mrg int inst = *instance; 1678 1.1 mrg gcc_assert (inst != -1); 1679 1.1 mrg /* It is possible for an instance to require a data type that isn't 1680 1.1 mrg defined on this target, in which case rs6000_instance_info_fntype[inst] 1681 1.1 mrg will be NULL. */ 1682 1.1 mrg if (!rs6000_instance_info_fntype[inst]) 1683 1.1 mrg return error_mark_node; 1684 1.1 mrg rs6000_gen_builtins bifid = rs6000_instance_info[inst].bifid; 1685 1.1 mrg tree fntype = rs6000_builtin_info_fntype[bifid]; 1686 1.1 mrg tree parmtype0 = TREE_VALUE (TYPE_ARG_TYPES (fntype)); 1687 1.1 mrg tree parmtype1 = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (fntype))); 1688 1.1 mrg 1689 1.1 mrg if (rs6000_builtin_type_compatible (types[0], parmtype0) 1690 1.1 mrg && rs6000_builtin_type_compatible (types[1], parmtype1)) 1691 1.1 mrg { 1692 1.1 mrg if (rs6000_builtin_decl (bifid, false) != error_mark_node 1693 1.1 mrg && rs6000_builtin_is_supported (bifid)) 1694 1.1 mrg { 1695 1.1 mrg tree ret_type = TREE_TYPE (rs6000_instance_info_fntype[inst]); 1696 1.1 mrg return altivec_build_resolved_builtin (args, 2, fntype, ret_type, 1697 1.1 mrg bifid, fcode); 1698 1.1 mrg } 1699 1.1 mrg else 1700 1.1 mrg *unsupported_builtin = true; 1701 1.1 mrg } 1702 1.1 mrg 1703 1.1 mrg return error_mark_node; 1704 1.1 mrg } 1705 1.1 mrg 1706 1.1 mrg /* Implementation of the resolve_overloaded_builtin target hook, to 1707 1.1 mrg support Altivec's overloaded builtins. */ 1708 1.1 mrg 1709 1.1 mrg tree 1710 1.1 mrg altivec_resolve_overloaded_builtin (location_t loc, tree fndecl, 1711 1.1 mrg void *passed_arglist) 1712 1.1 mrg { 1713 1.1 mrg rs6000_gen_builtins fcode 1714 1.1 mrg = (rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl); 1715 1.1 mrg 1716 1.1 mrg /* Return immediately if this isn't an overload. */ 1717 1.1 mrg if (fcode <= RS6000_OVLD_NONE) 1718 1.1 mrg return NULL_TREE; 1719 1.1 mrg 1720 1.1 mrg if (TARGET_DEBUG_BUILTIN) 1721 1.1 mrg fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n", 1722 1.1 mrg (int) fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl))); 1723 1.1 mrg 1724 1.1 mrg /* vec_lvsl and vec_lvsr are deprecated for use with LE element order. */ 1725 1.1 mrg if (fcode == RS6000_OVLD_VEC_LVSL && !BYTES_BIG_ENDIAN) 1726 1.1 mrg warning (OPT_Wdeprecated, 1727 1.1 mrg "%<vec_lvsl%> is deprecated for little endian; use " 1728 1.1 mrg "assignment for unaligned loads and stores"); 1729 1.1 mrg else if (fcode == RS6000_OVLD_VEC_LVSR && !BYTES_BIG_ENDIAN) 1730 1.1 mrg warning (OPT_Wdeprecated, 1731 1.1 mrg "%<vec_lvsr%> is deprecated for little endian; use " 1732 1.1 mrg "assignment for unaligned loads and stores"); 1733 1.1 mrg 1734 1.1 mrg /* Gather the arguments and their types into arrays for easier handling. */ 1735 1.1 mrg tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); 1736 1.1 mrg tree types[MAX_OVLD_ARGS]; 1737 1.1 mrg tree args[MAX_OVLD_ARGS]; 1738 1.1 mrg unsigned int n; 1739 1.1 mrg 1740 1.1 mrg /* Count the number of expected arguments. */ 1741 1.1 mrg unsigned expected_args = 0; 1742 1.1 mrg for (tree chain = fnargs; 1743 1.1 mrg chain && !VOID_TYPE_P (TREE_VALUE (chain)); 1744 1.1 mrg chain = TREE_CHAIN (chain)) 1745 1.1 mrg expected_args++; 1746 1.1 mrg 1747 1.1 mrg vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist); 1748 1.1 mrg unsigned int nargs = vec_safe_length (arglist); 1749 1.1 mrg 1750 1.1 mrg /* If the number of arguments did not match the prototype, return NULL 1751 1.1 mrg and the generic code will issue the appropriate error message. Skip 1752 1.1 mrg this test for functions where we don't fully describe all the possible 1753 1.1 mrg overload signatures in rs6000-overload.def (because they aren't relevant 1754 1.1 mrg to the expansion here). If we don't, we get confusing error messages. */ 1755 1.1 mrg /* As an example, for vec_splats we have: 1756 1.1 mrg 1757 1.1 mrg ; There are no actual builtins for vec_splats. There is special handling for 1758 1.1 mrg ; this in altivec_resolve_overloaded_builtin in rs6000-c.cc, where the call 1759 1.1 mrg ; is replaced by a constructor. The single overload here causes 1760 1.1 mrg ; __builtin_vec_splats to be registered with the front end so that can happen. 1761 1.1 mrg [VEC_SPLATS, vec_splats, __builtin_vec_splats] 1762 1.1 mrg vsi __builtin_vec_splats (vsi); 1763 1.1 mrg ABS_V4SI SPLATS_FAKERY 1764 1.1 mrg 1765 1.1 mrg So even though __builtin_vec_splats accepts all vector types, the 1766 1.1 mrg infrastructure cheats and just records one prototype. We end up getting 1767 1.1 mrg an error message that refers to this specific prototype even when we 1768 1.1 mrg are handling a different argument type. That is completely confusing 1769 1.1 mrg to the user, so it's best to let these cases be handled individually 1770 1.1 mrg in the resolve_vec_splats, etc., helper functions. */ 1771 1.1 mrg 1772 1.1 mrg if (expected_args != nargs 1773 1.1 mrg && !(fcode == RS6000_OVLD_VEC_PROMOTE 1774 1.1 mrg || fcode == RS6000_OVLD_VEC_SPLATS 1775 1.1 mrg || fcode == RS6000_OVLD_VEC_EXTRACT 1776 1.1 mrg || fcode == RS6000_OVLD_VEC_INSERT 1777 1.1 mrg || fcode == RS6000_OVLD_VEC_STEP)) 1778 1.1 mrg return NULL; 1779 1.1 mrg 1780 1.1 mrg for (n = 0; 1781 1.1 mrg !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs; 1782 1.1 mrg fnargs = TREE_CHAIN (fnargs), n++) 1783 1.1 mrg { 1784 1.1 mrg tree decl_type = TREE_VALUE (fnargs); 1785 1.1 mrg tree arg = (*arglist)[n]; 1786 1.1 mrg 1787 1.1 mrg if (arg == error_mark_node) 1788 1.1 mrg return error_mark_node; 1789 1.1 mrg 1790 1.1 mrg if (n >= MAX_OVLD_ARGS) 1791 1.1 mrg abort (); 1792 1.1 mrg 1793 1.1 mrg arg = default_conversion (arg); 1794 1.1 mrg tree type = TREE_TYPE (arg); 1795 1.1 mrg 1796 1.1 mrg /* The C++ front-end converts float * to const void * using 1797 1.1 mrg NOP_EXPR<const void *> (NOP_EXPR<void *> (x)). */ 1798 1.1 mrg if (POINTER_TYPE_P (type) 1799 1.1 mrg && TREE_CODE (arg) == NOP_EXPR 1800 1.1 mrg && lang_hooks.types_compatible_p (TREE_TYPE (arg), 1801 1.1 mrg const_ptr_type_node) 1802 1.1 mrg && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)), 1803 1.1 mrg ptr_type_node)) 1804 1.1 mrg { 1805 1.1 mrg arg = TREE_OPERAND (arg, 0); 1806 1.1 mrg type = TREE_TYPE (arg); 1807 1.1 mrg } 1808 1.1 mrg 1809 1.1 mrg /* Remove the const from the pointers to simplify the overload 1810 1.1 mrg matching further down. */ 1811 1.1 mrg if (POINTER_TYPE_P (decl_type) 1812 1.1 mrg && POINTER_TYPE_P (type) 1813 1.1 mrg && TYPE_QUALS (TREE_TYPE (type)) != 0) 1814 1.1 mrg { 1815 1.1 mrg if (TYPE_READONLY (TREE_TYPE (type)) 1816 1.1 mrg && !TYPE_READONLY (TREE_TYPE (decl_type))) 1817 1.1 mrg warning (0, "passing argument %d of %qE discards %qs " 1818 1.1 mrg "qualifier from pointer target type", n + 1, fndecl, 1819 1.1 mrg "const"); 1820 1.1 mrg type = build_qualified_type (TREE_TYPE (type), 0); 1821 1.1 mrg type = build_pointer_type (type); 1822 1.1 mrg arg = fold_convert (type, arg); 1823 1.1 mrg } 1824 1.1 mrg 1825 1.1 mrg /* For RS6000_OVLD_VEC_LXVL, convert any const * to its non constant 1826 1.1 mrg equivalent to simplify the overload matching below. */ 1827 1.1 mrg if (fcode == RS6000_OVLD_VEC_LXVL 1828 1.1 mrg && POINTER_TYPE_P (type) 1829 1.1 mrg && TYPE_READONLY (TREE_TYPE (type))) 1830 1.1 mrg { 1831 1.1 mrg type = build_qualified_type (TREE_TYPE (type), 0); 1832 1.1 mrg type = build_pointer_type (type); 1833 1.1 mrg arg = fold_convert (type, arg); 1834 1.1 mrg } 1835 1.1 mrg 1836 1.1 mrg args[n] = arg; 1837 1.1 mrg types[n] = type; 1838 1.1 mrg } 1839 1.1 mrg 1840 1.1 mrg /* Some overloads require special handling. */ 1841 1.1 mrg tree returned_expr = NULL; 1842 1.1 mrg resolution res = unresolved; 1843 1.1 mrg 1844 1.1 mrg if (fcode == RS6000_OVLD_VEC_MUL) 1845 1.1 mrg returned_expr = resolve_vec_mul (&res, args, types, loc); 1846 1.1 mrg else if (fcode == RS6000_OVLD_VEC_CMPNE) 1847 1.1 mrg returned_expr = resolve_vec_cmpne (&res, args, types, loc); 1848 1.1 mrg else if (fcode == RS6000_OVLD_VEC_ADDE || fcode == RS6000_OVLD_VEC_SUBE) 1849 1.1 mrg returned_expr = resolve_vec_adde_sube (&res, fcode, args, types, loc); 1850 1.1 mrg else if (fcode == RS6000_OVLD_VEC_ADDEC || fcode == RS6000_OVLD_VEC_SUBEC) 1851 1.1 mrg returned_expr = resolve_vec_addec_subec (&res, fcode, args, types, loc); 1852 1.1 mrg else if (fcode == RS6000_OVLD_VEC_SPLATS || fcode == RS6000_OVLD_VEC_PROMOTE) 1853 1.1 mrg returned_expr = resolve_vec_splats (&res, fcode, arglist, nargs); 1854 1.1 mrg else if (fcode == RS6000_OVLD_VEC_EXTRACT) 1855 1.1 mrg returned_expr = resolve_vec_extract (&res, arglist, nargs, loc); 1856 1.1 mrg else if (fcode == RS6000_OVLD_VEC_INSERT) 1857 1.1 mrg returned_expr = resolve_vec_insert (&res, arglist, nargs, loc); 1858 1.1 mrg else if (fcode == RS6000_OVLD_VEC_STEP) 1859 1.1 mrg returned_expr = resolve_vec_step (&res, arglist, nargs); 1860 1.1 mrg 1861 1.1 mrg if (res == resolved) 1862 1.1 mrg return returned_expr; 1863 1.1 mrg 1864 1.1 mrg /* "Regular" built-in functions and overloaded functions share a namespace 1865 1.1 mrg for some arrays, like rs6000_builtin_decls. But rs6000_overload_info 1866 1.1 mrg only has information for the overloaded functions, so we need an 1867 1.1 mrg adjusted index for that. */ 1868 1.1 mrg unsigned int adj_fcode = fcode - RS6000_OVLD_NONE; 1869 1.1 mrg 1870 1.1 mrg if (res == resolved_bad) 1871 1.1 mrg { 1872 1.1 mrg const char *name = rs6000_overload_info[adj_fcode].ovld_name; 1873 1.1 mrg error ("invalid parameter combination for AltiVec intrinsic %qs", name); 1874 1.1 mrg return error_mark_node; 1875 1.1 mrg } 1876 1.1 mrg 1877 1.1 mrg bool unsupported_builtin = false; 1878 1.1 mrg rs6000_gen_builtins instance_code; 1879 1.1 mrg bool supported = false; 1880 1.1 mrg int instance = rs6000_overload_info[adj_fcode].first_instance; 1881 1.1 mrg gcc_assert (instance != -1); 1882 1.1 mrg 1883 1.1 mrg /* Functions with no arguments can have only one overloaded instance. */ 1884 1.1 mrg gcc_assert (nargs > 0 || rs6000_instance_info[instance].next == -1); 1885 1.1 mrg 1886 1.1 mrg /* Standard overload processing involves determining whether an instance 1887 1.1 mrg exists that is type-compatible with the overloaded function call. In 1888 1.1 mrg a couple of cases, we need to do some extra processing to disambiguate 1889 1.1 mrg between multiple compatible instances. */ 1890 1.1 mrg switch (fcode) 1891 1.1 mrg { 1892 1.1 mrg /* Need to special case __builtin_cmpb because the overloaded forms 1893 1.1 mrg of this function take (unsigned int, unsigned int) or (unsigned 1894 1.1 mrg long long int, unsigned long long int). Since C conventions 1895 1.1 mrg allow the respective argument types to be implicitly coerced into 1896 1.1 mrg each other, the default handling does not provide adequate 1897 1.1 mrg discrimination between the desired forms of the function. */ 1898 1.1 mrg case RS6000_OVLD_SCAL_CMPB: 1899 1.1 mrg { 1900 1.1 mrg machine_mode arg1_mode = TYPE_MODE (types[0]); 1901 1.1 mrg machine_mode arg2_mode = TYPE_MODE (types[1]); 1902 1.1 mrg 1903 1.1 mrg /* If any supplied arguments are wider than 32 bits, resolve to 1904 1.1 mrg 64-bit variant of built-in function. */ 1905 1.1 mrg if (GET_MODE_PRECISION (arg1_mode) > 32 1906 1.1 mrg || GET_MODE_PRECISION (arg2_mode) > 32) 1907 1.1 mrg /* Assure all argument and result types are compatible with 1908 1.1 mrg the built-in function represented by RS6000_BIF_CMPB. */ 1909 1.1 mrg instance_code = RS6000_BIF_CMPB; 1910 1.1 mrg else 1911 1.1 mrg /* Assure all argument and result types are compatible with 1912 1.1 mrg the built-in function represented by RS6000_BIF_CMPB_32. */ 1913 1.1 mrg instance_code = RS6000_BIF_CMPB_32; 1914 1.1 mrg 1915 1.1 mrg tree call = find_instance (&unsupported_builtin, &instance, 1916 1.1 mrg instance_code, fcode, types, args); 1917 1.1 mrg if (call != error_mark_node) 1918 1.1 mrg return call; 1919 1.1 mrg break; 1920 1.1 mrg } 1921 1.1 mrg case RS6000_OVLD_VEC_VSIE: 1922 1.1 mrg { 1923 1.1 mrg machine_mode arg1_mode = TYPE_MODE (types[0]); 1924 1.1 mrg 1925 1.1 mrg /* If supplied first argument is wider than 64 bits, resolve to 1926 1.1 mrg 128-bit variant of built-in function. */ 1927 1.1 mrg if (GET_MODE_PRECISION (arg1_mode) > 64) 1928 1.1 mrg { 1929 1.1 mrg /* If first argument is of float variety, choose variant 1930 1.1 mrg that expects __ieee128 argument. Otherwise, expect 1931 1.1 mrg __int128 argument. */ 1932 1.1 mrg if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT) 1933 1.1 mrg instance_code = RS6000_BIF_VSIEQPF; 1934 1.1 mrg else 1935 1.1 mrg instance_code = RS6000_BIF_VSIEQP; 1936 1.1 mrg } 1937 1.1 mrg else 1938 1.1 mrg { 1939 1.1 mrg /* If first argument is of float variety, choose variant 1940 1.1 mrg that expects double argument. Otherwise, expect 1941 1.1 mrg long long int argument. */ 1942 1.1 mrg if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT) 1943 1.1 mrg instance_code = RS6000_BIF_VSIEDPF; 1944 1.1 mrg else 1945 1.1 mrg instance_code = RS6000_BIF_VSIEDP; 1946 1.1 mrg } 1947 1.1 mrg 1948 1.1 mrg tree call = find_instance (&unsupported_builtin, &instance, 1949 1.1 mrg instance_code, fcode, types, args); 1950 1.1 mrg if (call != error_mark_node) 1951 1.1 mrg return call; 1952 1.1 mrg break; 1953 1.1 mrg } 1954 1.1 mrg default: 1955 1.1 mrg /* Standard overload processing. Look for an instance with compatible 1956 1.1 mrg parameter types. If it is supported in the current context, resolve 1957 1.1 mrg the overloaded call to that instance. */ 1958 1.1 mrg for (; instance != -1; instance = rs6000_instance_info[instance].next) 1959 1.1 mrg { 1960 1.1 mrg tree fntype = rs6000_instance_info_fntype[instance]; 1961 1.1 mrg rs6000_gen_builtins bifid = rs6000_instance_info[instance].bifid; 1962 1.1 mrg /* It is possible for an instance to require a data type that isn't 1963 1.1 mrg defined on this target, in which case fntype will be 1964 1.1 mrg NULL. */ 1965 1.1 mrg if (!fntype) 1966 1.1 mrg continue; 1967 1.1 mrg 1968 1.1 mrg bool mismatch = false; 1969 1.1 mrg tree nextparm = TYPE_ARG_TYPES (fntype); 1970 1.1 mrg 1971 1.1 mrg for (unsigned int arg_i = 0; 1972 1.1 mrg arg_i < nargs && nextparm != NULL; 1973 1.1 mrg arg_i++) 1974 1.1 mrg { 1975 1.1 mrg tree parmtype = TREE_VALUE (nextparm); 1976 1.1 mrg if (!rs6000_builtin_type_compatible (types[arg_i], parmtype)) 1977 1.1 mrg { 1978 1.1 mrg mismatch = true; 1979 1.1 mrg break; 1980 1.1 mrg } 1981 1.1 mrg nextparm = TREE_CHAIN (nextparm); 1982 1.1 mrg } 1983 1.1 mrg 1984 1.1 mrg if (mismatch) 1985 1.1 mrg continue; 1986 1.1 mrg 1987 1.1 mrg supported = rs6000_builtin_is_supported (bifid); 1988 1.1 mrg if (rs6000_builtin_decl (bifid, false) != error_mark_node 1989 1.1 mrg && supported) 1990 1.1 mrg { 1991 1.1 mrg tree ret_type = TREE_TYPE (fntype); 1992 1.1 mrg fntype = rs6000_builtin_info_fntype[bifid]; 1993 1.1 mrg return altivec_build_resolved_builtin (args, nargs, fntype, 1994 1.1 mrg ret_type, bifid, fcode); 1995 1.1 mrg } 1996 1.1 mrg else 1997 1.1 mrg { 1998 1.1 mrg unsupported_builtin = true; 1999 1.1 mrg break; 2000 1.1 mrg } 2001 1.1 mrg } 2002 1.1 mrg } 2003 1.1 mrg 2004 1.1 mrg if (unsupported_builtin) 2005 1.1 mrg { 2006 1.1 mrg const char *name = rs6000_overload_info[adj_fcode].ovld_name; 2007 1.1 mrg if (!supported) 2008 1.1 mrg { 2009 1.1 mrg /* Indicate that the instantiation of the overloaded builtin 2010 1.1 mrg name is not available with the target flags in effect. */ 2011 1.1 mrg rs6000_gen_builtins bifid = rs6000_instance_info[instance].bifid; 2012 1.1 mrg rs6000_gen_builtins fcode = (rs6000_gen_builtins) bifid; 2013 1.1 mrg rs6000_invalid_builtin (fcode); 2014 1.1 mrg /* Provide clarity of the relationship between the overload 2015 1.1 mrg and the instantiation. */ 2016 1.1 mrg const char *internal_name = rs6000_builtin_info[bifid].bifname; 2017 1.1 mrg rich_location richloc (line_table, input_location); 2018 1.1 mrg inform (&richloc, 2019 1.1 mrg "overloaded builtin %qs is implemented by builtin %qs", 2020 1.1 mrg name, internal_name); 2021 1.1 mrg } 2022 1.1 mrg else 2023 1.1 mrg error ("%qs is not supported in this compiler configuration", name); 2024 1.1 mrg 2025 1.1 mrg return error_mark_node; 2026 1.1 mrg } 2027 1.1 mrg 2028 1.1 mrg /* If we fall through to here, there were no compatible instances. */ 2029 1.1 mrg const char *name = rs6000_overload_info[adj_fcode].ovld_name; 2030 1.1 mrg error ("invalid parameter combination for AltiVec intrinsic %qs", name); 2031 1.1 mrg return error_mark_node; 2032 } 2033