Home | History | Annotate | Line # | Download | only in gcc
targhooks.cc revision 1.1
      1 /* Default target hook functions.
      2    Copyright (C) 2003-2022 Free Software Foundation, Inc.
      3 
      4 This file is part of GCC.
      5 
      6 GCC is free software; you can redistribute it and/or modify it under
      7 the terms of the GNU General Public License as published by the Free
      8 Software Foundation; either version 3, or (at your option) any later
      9 version.
     10 
     11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
     12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
     13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     14 for more details.
     15 
     16 You should have received a copy of the GNU General Public License
     17 along with GCC; see the file COPYING3.  If not see
     18 <http://www.gnu.org/licenses/>.  */
     19 
     20 /* The migration of target macros to target hooks works as follows:
     21 
     22    1. Create a target hook that uses the existing target macros to
     23       implement the same functionality.
     24 
     25    2. Convert all the MI files to use the hook instead of the macro.
     26 
     27    3. Repeat for a majority of the remaining target macros.  This will
     28       take some time.
     29 
     30    4. Tell target maintainers to start migrating.
     31 
     32    5. Eventually convert the backends to override the hook instead of
     33       defining the macros.  This will take some time too.
     34 
     35    6. TBD when, poison the macros.  Unmigrated targets will break at
     36       this point.
     37 
     38    Note that we expect steps 1-3 to be done by the people that
     39    understand what the MI does with each macro, and step 5 to be done
     40    by the target maintainers for their respective targets.
     41 
     42    Note that steps 1 and 2 don't have to be done together, but no
     43    target can override the new hook until step 2 is complete for it.
     44 
     45    Once the macros are poisoned, we will revert to the old migration
     46    rules - migrate the macro, callers, and targets all at once.  This
     47    comment can thus be removed at that point.  */
     48 
     49 #include "config.h"
     50 #include "system.h"
     51 #include "coretypes.h"
     52 #include "target.h"
     53 #include "function.h"
     54 #include "rtl.h"
     55 #include "tree.h"
     56 #include "tree-ssa-alias.h"
     57 #include "gimple-expr.h"
     58 #include "memmodel.h"
     59 #include "backend.h"
     60 #include "emit-rtl.h"
     61 #include "df.h"
     62 #include "tm_p.h"
     63 #include "stringpool.h"
     64 #include "tree-vrp.h"
     65 #include "tree-ssanames.h"
     66 #include "profile-count.h"
     67 #include "optabs.h"
     68 #include "regs.h"
     69 #include "recog.h"
     70 #include "diagnostic-core.h"
     71 #include "fold-const.h"
     72 #include "stor-layout.h"
     73 #include "varasm.h"
     74 #include "flags.h"
     75 #include "explow.h"
     76 #include "expmed.h"
     77 #include "calls.h"
     78 #include "expr.h"
     79 #include "output.h"
     80 #include "common/common-target.h"
     81 #include "reload.h"
     82 #include "intl.h"
     83 #include "opts.h"
     84 #include "gimplify.h"
     85 #include "predict.h"
     86 #include "real.h"
     87 #include "langhooks.h"
     88 #include "sbitmap.h"
     89 #include "function-abi.h"
     90 #include "attribs.h"
     91 #include "asan.h"
     92 #include "emit-rtl.h"
     93 #include "gimple.h"
     94 #include "cfgloop.h"
     95 #include "tree-vectorizer.h"
     96 
     97 bool
     98 default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
     99 			      rtx addr ATTRIBUTE_UNUSED,
    100 			      bool strict ATTRIBUTE_UNUSED)
    101 {
    102 #ifdef GO_IF_LEGITIMATE_ADDRESS
    103   /* Defer to the old implementation using a goto.  */
    104   if (strict)
    105     return strict_memory_address_p (mode, addr);
    106   else
    107     return memory_address_p (mode, addr);
    108 #else
    109   gcc_unreachable ();
    110 #endif
    111 }
    112 
    113 void
    114 default_external_libcall (rtx fun ATTRIBUTE_UNUSED)
    115 {
    116 #ifdef ASM_OUTPUT_EXTERNAL_LIBCALL
    117   ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun);
    118 #endif
    119 }
    120 
    121 int
    122 default_unspec_may_trap_p (const_rtx x, unsigned flags)
    123 {
    124   int i;
    125 
    126   /* Any floating arithmetic may trap.  */
    127   if ((SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math))
    128     return 1;
    129 
    130   for (i = 0; i < XVECLEN (x, 0); ++i)
    131     {
    132       if (may_trap_p_1 (XVECEXP (x, 0, i), flags))
    133 	return 1;
    134     }
    135 
    136   return 0;
    137 }
    138 
    139 int
    140 default_bitfield_may_trap_p (const_rtx x, unsigned flags)
    141 {
    142   return 0;
    143 }
    144 
    145 machine_mode
    146 default_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
    147 			       machine_mode mode,
    148 			       int *punsignedp ATTRIBUTE_UNUSED,
    149 			       const_tree funtype ATTRIBUTE_UNUSED,
    150 			       int for_return ATTRIBUTE_UNUSED)
    151 {
    152   if (type != NULL_TREE && for_return == 2)
    153     return promote_mode (type, mode, punsignedp);
    154   return mode;
    155 }
    156 
    157 machine_mode
    158 default_promote_function_mode_always_promote (const_tree type,
    159 					      machine_mode mode,
    160 					      int *punsignedp,
    161 					      const_tree funtype ATTRIBUTE_UNUSED,
    162 					      int for_return ATTRIBUTE_UNUSED)
    163 {
    164   return promote_mode (type, mode, punsignedp);
    165 }
    166 
    167 machine_mode
    168 default_cc_modes_compatible (machine_mode m1, machine_mode m2)
    169 {
    170   if (m1 == m2)
    171     return m1;
    172   return VOIDmode;
    173 }
    174 
    175 bool
    176 default_return_in_memory (const_tree type,
    177 			  const_tree fntype ATTRIBUTE_UNUSED)
    178 {
    179   return (TYPE_MODE (type) == BLKmode);
    180 }
    181 
    182 rtx
    183 default_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED,
    184 			    machine_mode mode ATTRIBUTE_UNUSED)
    185 {
    186   return x;
    187 }
    188 
    189 bool
    190 default_legitimize_address_displacement (rtx *, rtx *, poly_int64,
    191 					 machine_mode)
    192 {
    193   return false;
    194 }
    195 
    196 bool
    197 default_const_not_ok_for_debug_p (rtx x)
    198 {
    199   if (GET_CODE (x) == UNSPEC)
    200     return true;
    201   return false;
    202 }
    203 
    204 rtx
    205 default_expand_builtin_saveregs (void)
    206 {
    207   error ("%<__builtin_saveregs%> not supported by this target");
    208   return const0_rtx;
    209 }
    210 
    211 void
    212 default_setup_incoming_varargs (cumulative_args_t,
    213 				const function_arg_info &, int *, int)
    214 {
    215 }
    216 
    217 /* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE.  */
    218 
    219 rtx
    220 default_builtin_setjmp_frame_value (void)
    221 {
    222   return virtual_stack_vars_rtx;
    223 }
    224 
    225 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false.  */
    226 
    227 bool
    228 hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t ca ATTRIBUTE_UNUSED)
    229 {
    230   return false;
    231 }
    232 
    233 bool
    234 default_pretend_outgoing_varargs_named (cumulative_args_t ca ATTRIBUTE_UNUSED)
    235 {
    236   return (targetm.calls.setup_incoming_varargs
    237 	  != default_setup_incoming_varargs);
    238 }
    239 
    240 scalar_int_mode
    241 default_eh_return_filter_mode (void)
    242 {
    243   return targetm.unwind_word_mode ();
    244 }
    245 
    246 scalar_int_mode
    247 default_libgcc_cmp_return_mode (void)
    248 {
    249   return word_mode;
    250 }
    251 
    252 scalar_int_mode
    253 default_libgcc_shift_count_mode (void)
    254 {
    255   return word_mode;
    256 }
    257 
    258 scalar_int_mode
    259 default_unwind_word_mode (void)
    260 {
    261   return word_mode;
    262 }
    263 
    264 /* The default implementation of TARGET_SHIFT_TRUNCATION_MASK.  */
    265 
    266 unsigned HOST_WIDE_INT
    267 default_shift_truncation_mask (machine_mode mode)
    268 {
    269   return SHIFT_COUNT_TRUNCATED ? GET_MODE_UNIT_BITSIZE (mode) - 1 : 0;
    270 }
    271 
    272 /* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL.  */
    273 
    274 unsigned int
    275 default_min_divisions_for_recip_mul (machine_mode mode ATTRIBUTE_UNUSED)
    276 {
    277   return have_insn_for (DIV, mode) ? 3 : 2;
    278 }
    279 
    280 /* The default implementation of TARGET_MODE_REP_EXTENDED.  */
    281 
    282 int
    283 default_mode_rep_extended (scalar_int_mode, scalar_int_mode)
    284 {
    285   return UNKNOWN;
    286 }
    287 
    288 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true.  */
    289 
    290 bool
    291 hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t a ATTRIBUTE_UNUSED)
    292 {
    293   return true;
    294 }
    295 
    296 /* Return machine mode for non-standard suffix
    297    or VOIDmode if non-standard suffixes are unsupported.  */
    298 machine_mode
    299 default_mode_for_suffix (char suffix ATTRIBUTE_UNUSED)
    300 {
    301   return VOIDmode;
    302 }
    303 
    304 /* The generic C++ ABI specifies this is a 64-bit value.  */
    305 tree
    306 default_cxx_guard_type (void)
    307 {
    308   return long_long_integer_type_node;
    309 }
    310 
    311 /* Returns the size of the cookie to use when allocating an array
    312    whose elements have the indicated TYPE.  Assumes that it is already
    313    known that a cookie is needed.  */
    314 
    315 tree
    316 default_cxx_get_cookie_size (tree type)
    317 {
    318   tree cookie_size;
    319 
    320   /* We need to allocate an additional max (sizeof (size_t), alignof
    321      (true_type)) bytes.  */
    322   tree sizetype_size;
    323   tree type_align;
    324 
    325   sizetype_size = size_in_bytes (sizetype);
    326   type_align = size_int (TYPE_ALIGN_UNIT (type));
    327   if (tree_int_cst_lt (type_align, sizetype_size))
    328     cookie_size = sizetype_size;
    329   else
    330     cookie_size = type_align;
    331 
    332   return cookie_size;
    333 }
    334 
    335 /* Return true if a parameter must be passed by reference.  This version
    336    of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK.  */
    337 
    338 bool
    339 hook_pass_by_reference_must_pass_in_stack (cumulative_args_t,
    340 					   const function_arg_info &arg)
    341 {
    342   return targetm.calls.must_pass_in_stack (arg);
    343 }
    344 
    345 /* Return true if a parameter follows callee copies conventions.  This
    346    version of the hook is true for all named arguments.  */
    347 
    348 bool
    349 hook_callee_copies_named (cumulative_args_t, const function_arg_info &arg)
    350 {
    351   return arg.named;
    352 }
    353 
    354 /* Emit to STREAM the assembler syntax for insn operand X.  */
    355 
    356 void
    357 default_print_operand (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
    358 		       int code ATTRIBUTE_UNUSED)
    359 {
    360 #ifdef PRINT_OPERAND
    361   PRINT_OPERAND (stream, x, code);
    362 #else
    363   gcc_unreachable ();
    364 #endif
    365 }
    366 
    367 /* Emit to STREAM the assembler syntax for an insn operand whose memory
    368    address is X.  */
    369 
    370 void
    371 default_print_operand_address (FILE *stream ATTRIBUTE_UNUSED,
    372 			       machine_mode /*mode*/,
    373 			       rtx x ATTRIBUTE_UNUSED)
    374 {
    375 #ifdef PRINT_OPERAND_ADDRESS
    376   PRINT_OPERAND_ADDRESS (stream, x);
    377 #else
    378   gcc_unreachable ();
    379 #endif
    380 }
    381 
    382 /* Return true if CODE is a valid punctuation character for the
    383    `print_operand' hook.  */
    384 
    385 bool
    386 default_print_operand_punct_valid_p (unsigned char code ATTRIBUTE_UNUSED)
    387 {
    388 #ifdef PRINT_OPERAND_PUNCT_VALID_P
    389   return PRINT_OPERAND_PUNCT_VALID_P (code);
    390 #else
    391   return false;
    392 #endif
    393 }
    394 
    395 /* The default implementation of TARGET_MANGLE_ASSEMBLER_NAME.  */
    396 tree
    397 default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED)
    398 {
    399   const char *skipped = name + (*name == '*' ? 1 : 0);
    400   const char *stripped = targetm.strip_name_encoding (skipped);
    401   if (*name != '*' && user_label_prefix[0])
    402     stripped = ACONCAT ((user_label_prefix, stripped, NULL));
    403   return get_identifier (stripped);
    404 }
    405 
    406 /* The default implementation of TARGET_TRANSLATE_MODE_ATTRIBUTE.  */
    407 
    408 machine_mode
    409 default_translate_mode_attribute (machine_mode mode)
    410 {
    411   return mode;
    412 }
    413 
    414 /* True if MODE is valid for the target.  By "valid", we mean able to
    415    be manipulated in non-trivial ways.  In particular, this means all
    416    the arithmetic is supported.
    417 
    418    By default we guess this means that any C type is supported.  If
    419    we can't map the mode back to a type that would be available in C,
    420    then reject it.  Special case, here, is the double-word arithmetic
    421    supported by optabs.cc.  */
    422 
    423 bool
    424 default_scalar_mode_supported_p (scalar_mode mode)
    425 {
    426   int precision = GET_MODE_PRECISION (mode);
    427 
    428   switch (GET_MODE_CLASS (mode))
    429     {
    430     case MODE_PARTIAL_INT:
    431     case MODE_INT:
    432       if (precision == CHAR_TYPE_SIZE)
    433 	return true;
    434       if (precision == SHORT_TYPE_SIZE)
    435 	return true;
    436       if (precision == INT_TYPE_SIZE)
    437 	return true;
    438       if (precision == LONG_TYPE_SIZE)
    439 	return true;
    440       if (precision == LONG_LONG_TYPE_SIZE)
    441 	return true;
    442       if (precision == 2 * BITS_PER_WORD)
    443 	return true;
    444       return false;
    445 
    446     case MODE_FLOAT:
    447       if (precision == FLOAT_TYPE_SIZE)
    448 	return true;
    449       if (precision == DOUBLE_TYPE_SIZE)
    450 	return true;
    451       if (precision == LONG_DOUBLE_TYPE_SIZE)
    452 	return true;
    453       return false;
    454 
    455     case MODE_DECIMAL_FLOAT:
    456     case MODE_FRACT:
    457     case MODE_UFRACT:
    458     case MODE_ACCUM:
    459     case MODE_UACCUM:
    460       return false;
    461 
    462     default:
    463       gcc_unreachable ();
    464     }
    465 }
    466 
    467 /* Return true if libgcc supports floating-point mode MODE (known to
    468    be supported as a scalar mode).  */
    469 
    470 bool
    471 default_libgcc_floating_mode_supported_p (scalar_float_mode mode)
    472 {
    473   switch (mode)
    474     {
    475 #ifdef HAVE_SFmode
    476     case E_SFmode:
    477 #endif
    478 #ifdef HAVE_DFmode
    479     case E_DFmode:
    480 #endif
    481 #ifdef HAVE_XFmode
    482     case E_XFmode:
    483 #endif
    484 #ifdef HAVE_TFmode
    485     case E_TFmode:
    486 #endif
    487       return true;
    488 
    489     default:
    490       return false;
    491     }
    492 }
    493 
    494 /* Return the machine mode to use for the type _FloatN, if EXTENDED is
    495    false, or _FloatNx, if EXTENDED is true, or VOIDmode if not
    496    supported.  */
    497 opt_scalar_float_mode
    498 default_floatn_mode (int n, bool extended)
    499 {
    500   if (extended)
    501     {
    502       opt_scalar_float_mode cand1, cand2;
    503       scalar_float_mode mode;
    504       switch (n)
    505 	{
    506 	case 32:
    507 #ifdef HAVE_DFmode
    508 	  cand1 = DFmode;
    509 #endif
    510 	  break;
    511 
    512 	case 64:
    513 #ifdef HAVE_XFmode
    514 	  cand1 = XFmode;
    515 #endif
    516 #ifdef HAVE_TFmode
    517 	  cand2 = TFmode;
    518 #endif
    519 	  break;
    520 
    521 	case 128:
    522 	  break;
    523 
    524 	default:
    525 	  /* Those are the only valid _FloatNx types.  */
    526 	  gcc_unreachable ();
    527 	}
    528       if (cand1.exists (&mode)
    529 	  && REAL_MODE_FORMAT (mode)->ieee_bits > n
    530 	  && targetm.scalar_mode_supported_p (mode)
    531 	  && targetm.libgcc_floating_mode_supported_p (mode))
    532 	return cand1;
    533       if (cand2.exists (&mode)
    534 	  && REAL_MODE_FORMAT (mode)->ieee_bits > n
    535 	  && targetm.scalar_mode_supported_p (mode)
    536 	  && targetm.libgcc_floating_mode_supported_p (mode))
    537 	return cand2;
    538     }
    539   else
    540     {
    541       opt_scalar_float_mode cand;
    542       scalar_float_mode mode;
    543       switch (n)
    544 	{
    545 	case 16:
    546 	  /* Always enable _Float16 if we have basic support for the mode.
    547 	     Targets can control the range and precision of operations on
    548 	     the _Float16 type using TARGET_C_EXCESS_PRECISION.  */
    549 #ifdef HAVE_HFmode
    550 	  cand = HFmode;
    551 #endif
    552 	  break;
    553 
    554 	case 32:
    555 #ifdef HAVE_SFmode
    556 	  cand = SFmode;
    557 #endif
    558 	  break;
    559 
    560 	case 64:
    561 #ifdef HAVE_DFmode
    562 	  cand = DFmode;
    563 #endif
    564 	  break;
    565 
    566 	case 128:
    567 #ifdef HAVE_TFmode
    568 	  cand = TFmode;
    569 #endif
    570 	  break;
    571 
    572 	default:
    573 	  break;
    574 	}
    575       if (cand.exists (&mode)
    576 	  && REAL_MODE_FORMAT (mode)->ieee_bits == n
    577 	  && targetm.scalar_mode_supported_p (mode)
    578 	  && targetm.libgcc_floating_mode_supported_p (mode))
    579 	return cand;
    580     }
    581   return opt_scalar_float_mode ();
    582 }
    583 
    584 /* Define this to return true if the _Floatn and _Floatnx built-in functions
    585    should implicitly enable the built-in function without the __builtin_ prefix
    586    in addition to the normal built-in function with the __builtin_ prefix.  The
    587    default is to only enable built-in functions without the __builtin_ prefix
    588    for the GNU C langauge.  The argument FUNC is the enum builtin_in_function
    589    id of the function to be enabled.  */
    590 
    591 bool
    592 default_floatn_builtin_p (int func ATTRIBUTE_UNUSED)
    593 {
    594   static bool first_time_p = true;
    595   static bool c_or_objective_c;
    596 
    597   if (first_time_p)
    598     {
    599       first_time_p = false;
    600       c_or_objective_c = lang_GNU_C () || lang_GNU_OBJC ();
    601     }
    602 
    603   return c_or_objective_c;
    604 }
    605 
    606 /* Make some target macros useable by target-independent code.  */
    607 bool
    608 targhook_words_big_endian (void)
    609 {
    610   return !!WORDS_BIG_ENDIAN;
    611 }
    612 
    613 bool
    614 targhook_float_words_big_endian (void)
    615 {
    616   return !!FLOAT_WORDS_BIG_ENDIAN;
    617 }
    618 
    619 /* True if the target supports floating-point exceptions and rounding
    620    modes.  */
    621 
    622 bool
    623 default_float_exceptions_rounding_supported_p (void)
    624 {
    625 #ifdef HAVE_adddf3
    626   return HAVE_adddf3;
    627 #else
    628   return false;
    629 #endif
    630 }
    631 
    632 /* True if the target supports decimal floating point.  */
    633 
    634 bool
    635 default_decimal_float_supported_p (void)
    636 {
    637   return ENABLE_DECIMAL_FLOAT;
    638 }
    639 
    640 /* True if the target supports fixed-point arithmetic.  */
    641 
    642 bool
    643 default_fixed_point_supported_p (void)
    644 {
    645   return ENABLE_FIXED_POINT;
    646 }
    647 
    648 /* True if the target supports GNU indirect functions.  */
    649 
    650 bool
    651 default_has_ifunc_p (void)
    652 {
    653   return HAVE_GNU_INDIRECT_FUNCTION;
    654 }
    655 
    656 /* Return true if we predict the loop LOOP will be transformed to a
    657    low-overhead loop, otherwise return false.
    658 
    659    By default, false is returned, as this hook's applicability should be
    660    verified for each target.  Target maintainers should re-define the hook
    661    if the target can take advantage of it.  */
    662 
    663 bool
    664 default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED)
    665 {
    666   return false;
    667 }
    668 
    669 /* By default, just use the input MODE itself.  */
    670 
    671 machine_mode
    672 default_preferred_doloop_mode (machine_mode mode)
    673 {
    674   return mode;
    675 }
    676 
    677 /* NULL if INSN insn is valid within a low-overhead loop, otherwise returns
    678    an error message.
    679 
    680    This function checks whether a given INSN is valid within a low-overhead
    681    loop.  If INSN is invalid it returns the reason for that, otherwise it
    682    returns NULL. A called function may clobber any special registers required
    683    for low-overhead looping. Additionally, some targets (eg, PPC) use the count
    684    register for branch on table instructions. We reject the doloop pattern in
    685    these cases.  */
    686 
    687 const char *
    688 default_invalid_within_doloop (const rtx_insn *insn)
    689 {
    690   if (CALL_P (insn))
    691     return "Function call in loop.";
    692 
    693   if (tablejump_p (insn, NULL, NULL) || computed_jump_p (insn))
    694     return "Computed branch in the loop.";
    695 
    696   return NULL;
    697 }
    698 
    699 /* Mapping of builtin functions to vectorized variants.  */
    700 
    701 tree
    702 default_builtin_vectorized_function (unsigned int, tree, tree)
    703 {
    704   return NULL_TREE;
    705 }
    706 
    707 /* Mapping of target builtin functions to vectorized variants.  */
    708 
    709 tree
    710 default_builtin_md_vectorized_function (tree, tree, tree)
    711 {
    712   return NULL_TREE;
    713 }
    714 
    715 /* Default vectorizer cost model values.  */
    716 
    717 int
    718 default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
    719                                     tree vectype,
    720                                     int misalign ATTRIBUTE_UNUSED)
    721 {
    722   switch (type_of_cost)
    723     {
    724       case scalar_stmt:
    725       case scalar_load:
    726       case scalar_store:
    727       case vector_stmt:
    728       case vector_load:
    729       case vector_store:
    730       case vec_to_scalar:
    731       case scalar_to_vec:
    732       case cond_branch_not_taken:
    733       case vec_perm:
    734       case vec_promote_demote:
    735         return 1;
    736 
    737       case unaligned_load:
    738       case unaligned_store:
    739         return 2;
    740 
    741       case cond_branch_taken:
    742         return 3;
    743 
    744       case vec_construct:
    745 	return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1;
    746 
    747       default:
    748         gcc_unreachable ();
    749     }
    750 }
    751 
    752 /* Reciprocal.  */
    753 
    754 tree
    755 default_builtin_reciprocal (tree)
    756 {
    757   return NULL_TREE;
    758 }
    759 
    760 bool
    761 hook_bool_CUMULATIVE_ARGS_arg_info_false (cumulative_args_t,
    762 					  const function_arg_info &)
    763 {
    764   return false;
    765 }
    766 
    767 bool
    768 hook_bool_CUMULATIVE_ARGS_arg_info_true (cumulative_args_t,
    769 					 const function_arg_info &)
    770 {
    771   return true;
    772 }
    773 
    774 int
    775 hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t,
    776 				     const function_arg_info &)
    777 {
    778   return 0;
    779 }
    780 
    781 void
    782 hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED,
    783 				tree ATTRIBUTE_UNUSED)
    784 {
    785 }
    786 
    787 /* Default implementation of TARGET_PUSH_ARGUMENT.  */
    788 
    789 bool
    790 default_push_argument (unsigned int)
    791 {
    792 #ifdef PUSH_ROUNDING
    793   return !ACCUMULATE_OUTGOING_ARGS;
    794 #else
    795   return false;
    796 #endif
    797 }
    798 
    799 void
    800 default_function_arg_advance (cumulative_args_t, const function_arg_info &)
    801 {
    802   gcc_unreachable ();
    803 }
    804 
    805 /* Default implementation of TARGET_FUNCTION_ARG_OFFSET.  */
    806 
    807 HOST_WIDE_INT
    808 default_function_arg_offset (machine_mode, const_tree)
    809 {
    810   return 0;
    811 }
    812 
    813 /* Default implementation of TARGET_FUNCTION_ARG_PADDING: usually pad
    814    upward, but pad short args downward on big-endian machines.  */
    815 
    816 pad_direction
    817 default_function_arg_padding (machine_mode mode, const_tree type)
    818 {
    819   if (!BYTES_BIG_ENDIAN)
    820     return PAD_UPWARD;
    821 
    822   unsigned HOST_WIDE_INT size;
    823   if (mode == BLKmode)
    824     {
    825       if (!type || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
    826 	return PAD_UPWARD;
    827       size = int_size_in_bytes (type);
    828     }
    829   else
    830     /* Targets with variable-sized modes must override this hook
    831        and handle variable-sized modes explicitly.  */
    832     size = GET_MODE_SIZE (mode).to_constant ();
    833 
    834   if (size < (PARM_BOUNDARY / BITS_PER_UNIT))
    835     return PAD_DOWNWARD;
    836 
    837   return PAD_UPWARD;
    838 }
    839 
    840 rtx
    841 default_function_arg (cumulative_args_t, const function_arg_info &)
    842 {
    843   gcc_unreachable ();
    844 }
    845 
    846 rtx
    847 default_function_incoming_arg (cumulative_args_t, const function_arg_info &)
    848 {
    849   gcc_unreachable ();
    850 }
    851 
    852 unsigned int
    853 default_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
    854 			       const_tree type ATTRIBUTE_UNUSED)
    855 {
    856   return PARM_BOUNDARY;
    857 }
    858 
    859 unsigned int
    860 default_function_arg_round_boundary (machine_mode mode ATTRIBUTE_UNUSED,
    861 				     const_tree type ATTRIBUTE_UNUSED)
    862 {
    863   return PARM_BOUNDARY;
    864 }
    865 
    866 void
    867 hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED)
    868 {
    869 }
    870 
    871 const char *
    872 hook_invalid_arg_for_unprototyped_fn (
    873 	const_tree typelist ATTRIBUTE_UNUSED,
    874 	const_tree funcdecl ATTRIBUTE_UNUSED,
    875 	const_tree val ATTRIBUTE_UNUSED)
    876 {
    877   return NULL;
    878 }
    879 
    880 /* Initialize the stack protection decls.  */
    881 
    882 /* Stack protection related decls living in libgcc.  */
    883 static GTY(()) tree stack_chk_guard_decl;
    884 
    885 tree
    886 default_stack_protect_guard (void)
    887 {
    888   tree t = stack_chk_guard_decl;
    889 
    890   if (t == NULL)
    891     {
    892       rtx x;
    893 
    894       t = build_decl (UNKNOWN_LOCATION,
    895 		      VAR_DECL, get_identifier ("__stack_chk_guard"),
    896 		      ptr_type_node);
    897       TREE_STATIC (t) = 1;
    898       TREE_PUBLIC (t) = 1;
    899       DECL_EXTERNAL (t) = 1;
    900       TREE_USED (t) = 1;
    901       TREE_THIS_VOLATILE (t) = 1;
    902       DECL_ARTIFICIAL (t) = 1;
    903       DECL_IGNORED_P (t) = 1;
    904 
    905       /* Do not share RTL as the declaration is visible outside of
    906 	 current function.  */
    907       x = DECL_RTL (t);
    908       RTX_FLAG (x, used) = 1;
    909 
    910       stack_chk_guard_decl = t;
    911     }
    912 
    913   return t;
    914 }
    915 
    916 static GTY(()) tree stack_chk_fail_decl;
    917 
    918 tree
    919 default_external_stack_protect_fail (void)
    920 {
    921   tree t = stack_chk_fail_decl;
    922 
    923   if (t == NULL_TREE)
    924     {
    925       t = build_function_type_list (void_type_node, NULL_TREE);
    926       t = build_decl (UNKNOWN_LOCATION,
    927 		      FUNCTION_DECL, get_identifier ("__stack_chk_fail"), t);
    928       TREE_STATIC (t) = 1;
    929       TREE_PUBLIC (t) = 1;
    930       DECL_EXTERNAL (t) = 1;
    931       TREE_USED (t) = 1;
    932       TREE_THIS_VOLATILE (t) = 1;
    933       TREE_NOTHROW (t) = 1;
    934       DECL_ARTIFICIAL (t) = 1;
    935       DECL_IGNORED_P (t) = 1;
    936       DECL_VISIBILITY (t) = VISIBILITY_DEFAULT;
    937       DECL_VISIBILITY_SPECIFIED (t) = 1;
    938 
    939       stack_chk_fail_decl = t;
    940     }
    941 
    942   return build_call_expr (t, 0);
    943 }
    944 
    945 tree
    946 default_hidden_stack_protect_fail (void)
    947 {
    948 #ifndef HAVE_GAS_HIDDEN
    949   return default_external_stack_protect_fail ();
    950 #else
    951   tree t = stack_chk_fail_decl;
    952 
    953   if (!flag_pic)
    954     return default_external_stack_protect_fail ();
    955 
    956   if (t == NULL_TREE)
    957     {
    958       t = build_function_type_list (void_type_node, NULL_TREE);
    959       t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
    960 		      get_identifier ("__stack_chk_fail_local"), t);
    961       TREE_STATIC (t) = 1;
    962       TREE_PUBLIC (t) = 1;
    963       DECL_EXTERNAL (t) = 1;
    964       TREE_USED (t) = 1;
    965       TREE_THIS_VOLATILE (t) = 1;
    966       TREE_NOTHROW (t) = 1;
    967       DECL_ARTIFICIAL (t) = 1;
    968       DECL_IGNORED_P (t) = 1;
    969       DECL_VISIBILITY_SPECIFIED (t) = 1;
    970 #if 1
    971       /*
    972        * This is a hack:
    973        * It appears that our gas does not generate @PLT for hidden
    974        * symbols. It could be that we need a newer version, or that
    975        * this local function is handled differently on linux.
    976        */
    977       DECL_VISIBILITY (t) = VISIBILITY_DEFAULT;
    978 #else
    979       DECL_VISIBILITY (t) = VISIBILITY_HIDDEN;
    980 #endif
    981 
    982       stack_chk_fail_decl = t;
    983     }
    984 
    985   return build_call_expr (t, 0);
    986 #endif
    987 }
    988 
    989 bool
    990 hook_bool_const_rtx_commutative_p (const_rtx x,
    991 				   int outer_code ATTRIBUTE_UNUSED)
    992 {
    993   return COMMUTATIVE_P (x);
    994 }
    995 
    996 rtx
    997 default_function_value (const_tree ret_type ATTRIBUTE_UNUSED,
    998 			const_tree fn_decl_or_type,
    999 			bool outgoing ATTRIBUTE_UNUSED)
   1000 {
   1001   /* The old interface doesn't handle receiving the function type.  */
   1002   if (fn_decl_or_type
   1003       && !DECL_P (fn_decl_or_type))
   1004     fn_decl_or_type = NULL;
   1005 
   1006 #ifdef FUNCTION_VALUE
   1007   return FUNCTION_VALUE (ret_type, fn_decl_or_type);
   1008 #else
   1009   gcc_unreachable ();
   1010 #endif
   1011 }
   1012 
   1013 rtx
   1014 default_libcall_value (machine_mode mode ATTRIBUTE_UNUSED,
   1015 		       const_rtx fun ATTRIBUTE_UNUSED)
   1016 {
   1017 #ifdef LIBCALL_VALUE
   1018   return LIBCALL_VALUE (MACRO_MODE (mode));
   1019 #else
   1020   gcc_unreachable ();
   1021 #endif
   1022 }
   1023 
   1024 /* The default hook for TARGET_FUNCTION_VALUE_REGNO_P.  */
   1025 
   1026 bool
   1027 default_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED)
   1028 {
   1029 #ifdef FUNCTION_VALUE_REGNO_P
   1030   return FUNCTION_VALUE_REGNO_P (regno);
   1031 #else
   1032   gcc_unreachable ();
   1033 #endif
   1034 }
   1035 
   1036 /* Choose the mode and rtx to use to zero REGNO, storing tem in PMODE and
   1037    PREGNO_RTX and returning TRUE if successful, otherwise returning FALSE.  If
   1038    the natural mode for REGNO doesn't work, attempt to group it with subsequent
   1039    adjacent registers set in TOZERO.  */
   1040 
   1041 static inline bool
   1042 zcur_select_mode_rtx (unsigned int regno, machine_mode *pmode,
   1043 		      rtx *pregno_rtx, HARD_REG_SET tozero)
   1044 {
   1045   rtx regno_rtx = regno_reg_rtx[regno];
   1046   machine_mode mode = GET_MODE (regno_rtx);
   1047 
   1048   /* If the natural mode doesn't work, try some wider mode.  */
   1049   if (!targetm.hard_regno_mode_ok (regno, mode))
   1050     {
   1051       bool found = false;
   1052       for (int nregs = 2;
   1053 	   !found && nregs <= hard_regno_max_nregs
   1054 	     && regno + nregs <= FIRST_PSEUDO_REGISTER
   1055 	     && TEST_HARD_REG_BIT (tozero,
   1056 				   regno + nregs - 1);
   1057 	   nregs++)
   1058 	{
   1059 	  mode = choose_hard_reg_mode (regno, nregs, 0);
   1060 	  if (mode == E_VOIDmode)
   1061 	    continue;
   1062 	  gcc_checking_assert (targetm.hard_regno_mode_ok (regno, mode));
   1063 	  regno_rtx = gen_rtx_REG (mode, regno);
   1064 	  found = true;
   1065 	}
   1066       if (!found)
   1067 	return false;
   1068     }
   1069 
   1070   *pmode = mode;
   1071   *pregno_rtx = regno_rtx;
   1072   return true;
   1073 }
   1074 
   1075 /* The default hook for TARGET_ZERO_CALL_USED_REGS.  */
   1076 
   1077 HARD_REG_SET
   1078 default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
   1079 {
   1080   gcc_assert (!hard_reg_set_empty_p (need_zeroed_hardregs));
   1081 
   1082   HARD_REG_SET failed;
   1083   CLEAR_HARD_REG_SET (failed);
   1084   bool progress = false;
   1085 
   1086   /* First, try to zero each register in need_zeroed_hardregs by
   1087      loading a zero into it, taking note of any failures in
   1088      FAILED.  */
   1089   for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
   1090     if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
   1091       {
   1092 	rtx_insn *last_insn = get_last_insn ();
   1093 	rtx regno_rtx;
   1094 	machine_mode mode;
   1095 
   1096 	if (!zcur_select_mode_rtx (regno, &mode, &regno_rtx,
   1097 				   need_zeroed_hardregs))
   1098 	  {
   1099 	    SET_HARD_REG_BIT (failed, regno);
   1100 	    continue;
   1101 	  }
   1102 
   1103 	rtx zero = CONST0_RTX (mode);
   1104 	rtx_insn *insn = emit_move_insn (regno_rtx, zero);
   1105 	if (!valid_insn_p (insn))
   1106 	  {
   1107 	    SET_HARD_REG_BIT (failed, regno);
   1108 	    delete_insns_since (last_insn);
   1109 	  }
   1110 	else
   1111 	  {
   1112 	    progress = true;
   1113 	    regno += hard_regno_nregs (regno, mode) - 1;
   1114 	  }
   1115       }
   1116 
   1117   /* Now retry with copies from zeroed registers, as long as we've
   1118      made some PROGRESS, and registers remain to be zeroed in
   1119      FAILED.  */
   1120   while (progress && !hard_reg_set_empty_p (failed))
   1121     {
   1122       HARD_REG_SET retrying = failed;
   1123 
   1124       CLEAR_HARD_REG_SET (failed);
   1125       progress = false;
   1126 
   1127       for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
   1128 	if (TEST_HARD_REG_BIT (retrying, regno))
   1129 	  {
   1130 	    rtx regno_rtx;
   1131 	    machine_mode mode;
   1132 
   1133 	    /* This might select registers we've already zeroed.  If grouping
   1134 	       with them is what it takes to get regno zeroed, so be it.  */
   1135 	    if (!zcur_select_mode_rtx (regno, &mode, &regno_rtx,
   1136 				       need_zeroed_hardregs))
   1137 	      {
   1138 		SET_HARD_REG_BIT (failed, regno);
   1139 		continue;
   1140 	      }
   1141 
   1142 	    bool success = false;
   1143 	    /* Look for a source.  */
   1144 	    for (unsigned int src = 0; src < FIRST_PSEUDO_REGISTER; src++)
   1145 	      {
   1146 		/* If SRC hasn't been zeroed (yet?), skip it.  */
   1147 		if (! TEST_HARD_REG_BIT (need_zeroed_hardregs, src))
   1148 		  continue;
   1149 		if (TEST_HARD_REG_BIT (retrying, src))
   1150 		  continue;
   1151 
   1152 		/* Check that SRC can hold MODE, and that any other
   1153 		   registers needed to hold MODE in SRC have also been
   1154 		   zeroed.  */
   1155 		if (!targetm.hard_regno_mode_ok (src, mode))
   1156 		  continue;
   1157 		unsigned n = targetm.hard_regno_nregs (src, mode);
   1158 		bool ok = true;
   1159 		for (unsigned i = 1; ok && i < n; i++)
   1160 		  ok = (TEST_HARD_REG_BIT (need_zeroed_hardregs, src + i)
   1161 			&& !TEST_HARD_REG_BIT (retrying, src + i));
   1162 		if (!ok)
   1163 		  continue;
   1164 
   1165 		/* SRC is usable, try to copy from it.  */
   1166 		rtx_insn *last_insn = get_last_insn ();
   1167 		rtx src_rtx = gen_rtx_REG (mode, src);
   1168 		rtx_insn *insn = emit_move_insn (regno_rtx, src_rtx);
   1169 		if (!valid_insn_p (insn))
   1170 		  /* It didn't work, remove any inserts.  We'll look
   1171 		     for another SRC.  */
   1172 		  delete_insns_since (last_insn);
   1173 		else
   1174 		  {
   1175 		    /* We're done for REGNO.  */
   1176 		    success = true;
   1177 		    break;
   1178 		  }
   1179 	      }
   1180 
   1181 	    /* If nothing worked for REGNO this round, mark it to be
   1182 	       retried if we get another round.  */
   1183 	    if (!success)
   1184 	      SET_HARD_REG_BIT (failed, regno);
   1185 	    else
   1186 	      {
   1187 		/* Take note so as to enable another round if needed.  */
   1188 		progress = true;
   1189 		regno += hard_regno_nregs (regno, mode) - 1;
   1190 	      }
   1191 	  }
   1192     }
   1193 
   1194   /* If any register remained, report it.  */
   1195   if (!progress)
   1196     {
   1197       static bool issued_error;
   1198       if (!issued_error)
   1199 	{
   1200 	  issued_error = true;
   1201 	  sorry ("%qs not supported on this target",
   1202 		 "-fzero-call-used-regs");
   1203 	}
   1204     }
   1205 
   1206   return need_zeroed_hardregs;
   1207 }
   1208 
   1209 rtx
   1210 default_internal_arg_pointer (void)
   1211 {
   1212   /* If the reg that the virtual arg pointer will be translated into is
   1213      not a fixed reg or is the stack pointer, make a copy of the virtual
   1214      arg pointer, and address parms via the copy.  The frame pointer is
   1215      considered fixed even though it is not marked as such.  */
   1216   if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
   1217        || ! (fixed_regs[ARG_POINTER_REGNUM]
   1218 	     || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM)))
   1219     return copy_to_reg (virtual_incoming_args_rtx);
   1220   else
   1221     return virtual_incoming_args_rtx;
   1222 }
   1223 
   1224 rtx
   1225 default_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p)
   1226 {
   1227   if (incoming_p)
   1228     {
   1229 #ifdef STATIC_CHAIN_INCOMING_REGNUM
   1230       return gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM);
   1231 #endif
   1232     }
   1233 
   1234 #ifdef STATIC_CHAIN_REGNUM
   1235   return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
   1236 #endif
   1237 
   1238   {
   1239     static bool issued_error;
   1240     if (!issued_error)
   1241       {
   1242 	issued_error = true;
   1243 	sorry ("nested functions not supported on this target");
   1244       }
   1245 
   1246     /* It really doesn't matter what we return here, so long at it
   1247        doesn't cause the rest of the compiler to crash.  */
   1248     return gen_rtx_MEM (Pmode, stack_pointer_rtx);
   1249   }
   1250 }
   1251 
   1252 void
   1253 default_trampoline_init (rtx ARG_UNUSED (m_tramp), tree ARG_UNUSED (t_func),
   1254 			 rtx ARG_UNUSED (r_chain))
   1255 {
   1256   sorry ("nested function trampolines not supported on this target");
   1257 }
   1258 
   1259 poly_int64
   1260 default_return_pops_args (tree, tree, poly_int64)
   1261 {
   1262   return 0;
   1263 }
   1264 
   1265 reg_class_t
   1266 default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
   1267 					 reg_class_t cl,
   1268 					 reg_class_t best_cl ATTRIBUTE_UNUSED)
   1269 {
   1270   return cl;
   1271 }
   1272 
   1273 extern bool
   1274 default_lra_p (void)
   1275 {
   1276   return true;
   1277 }
   1278 
   1279 int
   1280 default_register_priority (int hard_regno ATTRIBUTE_UNUSED)
   1281 {
   1282   return 0;
   1283 }
   1284 
   1285 extern bool
   1286 default_register_usage_leveling_p (void)
   1287 {
   1288   return false;
   1289 }
   1290 
   1291 extern bool
   1292 default_different_addr_displacement_p (void)
   1293 {
   1294   return false;
   1295 }
   1296 
   1297 reg_class_t
   1298 default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
   1299 			  reg_class_t reload_class_i ATTRIBUTE_UNUSED,
   1300 			  machine_mode reload_mode ATTRIBUTE_UNUSED,
   1301 			  secondary_reload_info *sri)
   1302 {
   1303   enum reg_class rclass = NO_REGS;
   1304   enum reg_class reload_class = (enum reg_class) reload_class_i;
   1305 
   1306   if (sri->prev_sri && sri->prev_sri->t_icode != CODE_FOR_nothing)
   1307     {
   1308       sri->icode = sri->prev_sri->t_icode;
   1309       return NO_REGS;
   1310     }
   1311 #ifdef SECONDARY_INPUT_RELOAD_CLASS
   1312   if (in_p)
   1313     rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class,
   1314 					   MACRO_MODE (reload_mode), x);
   1315 #endif
   1316 #ifdef SECONDARY_OUTPUT_RELOAD_CLASS
   1317   if (! in_p)
   1318     rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class,
   1319 					    MACRO_MODE (reload_mode), x);
   1320 #endif
   1321   if (rclass != NO_REGS)
   1322     {
   1323       enum insn_code icode
   1324 	= direct_optab_handler (in_p ? reload_in_optab : reload_out_optab,
   1325 				reload_mode);
   1326 
   1327       if (icode != CODE_FOR_nothing
   1328 	  && !insn_operand_matches (icode, in_p, x))
   1329 	icode = CODE_FOR_nothing;
   1330       else if (icode != CODE_FOR_nothing)
   1331 	{
   1332 	  const char *insn_constraint, *scratch_constraint;
   1333 	  enum reg_class insn_class, scratch_class;
   1334 
   1335 	  gcc_assert (insn_data[(int) icode].n_operands == 3);
   1336 	  insn_constraint = insn_data[(int) icode].operand[!in_p].constraint;
   1337 	  if (!*insn_constraint)
   1338 	    insn_class = ALL_REGS;
   1339 	  else
   1340 	    {
   1341 	      if (in_p)
   1342 		{
   1343 		  gcc_assert (*insn_constraint == '=');
   1344 		  insn_constraint++;
   1345 		}
   1346 	      insn_class = (reg_class_for_constraint
   1347 			    (lookup_constraint (insn_constraint)));
   1348 	      gcc_assert (insn_class != NO_REGS);
   1349 	    }
   1350 
   1351 	  scratch_constraint = insn_data[(int) icode].operand[2].constraint;
   1352 	  /* The scratch register's constraint must start with "=&",
   1353 	     except for an input reload, where only "=" is necessary,
   1354 	     and where it might be beneficial to re-use registers from
   1355 	     the input.  */
   1356 	  gcc_assert (scratch_constraint[0] == '='
   1357 		      && (in_p || scratch_constraint[1] == '&'));
   1358 	  scratch_constraint++;
   1359 	  if (*scratch_constraint == '&')
   1360 	    scratch_constraint++;
   1361 	  scratch_class = (reg_class_for_constraint
   1362 			   (lookup_constraint (scratch_constraint)));
   1363 
   1364 	  if (reg_class_subset_p (reload_class, insn_class))
   1365 	    {
   1366 	      gcc_assert (scratch_class == rclass);
   1367 	      rclass = NO_REGS;
   1368 	    }
   1369 	  else
   1370 	    rclass = insn_class;
   1371 
   1372         }
   1373       if (rclass == NO_REGS)
   1374 	sri->icode = icode;
   1375       else
   1376 	sri->t_icode = icode;
   1377     }
   1378   return rclass;
   1379 }
   1380 
   1381 /* The default implementation of TARGET_SECONDARY_MEMORY_NEEDED_MODE.  */
   1382 
   1383 machine_mode
   1384 default_secondary_memory_needed_mode (machine_mode mode)
   1385 {
   1386   if (!targetm.lra_p ()
   1387       && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
   1388       && INTEGRAL_MODE_P (mode))
   1389     return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
   1390   return mode;
   1391 }
   1392 
   1393 /* By default, if flag_pic is true, then neither local nor global relocs
   1394    should be placed in readonly memory.  */
   1395 
   1396 int
   1397 default_reloc_rw_mask (void)
   1398 {
   1399   return flag_pic ? 3 : 0;
   1400 }
   1401 
   1402 /* By default, address diff vectors are generated
   1403 for jump tables when flag_pic is true.  */
   1404 
   1405 bool
   1406 default_generate_pic_addr_diff_vec (void)
   1407 {
   1408   return flag_pic;
   1409 }
   1410 
   1411 /* By default, do no modification. */
   1412 tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED,
   1413 					 tree id)
   1414 {
   1415    return id;
   1416 }
   1417 
   1418 /* The default implementation of TARGET_STATIC_RTX_ALIGNMENT.  */
   1419 
   1420 HOST_WIDE_INT
   1421 default_static_rtx_alignment (machine_mode mode)
   1422 {
   1423   return GET_MODE_ALIGNMENT (mode);
   1424 }
   1425 
   1426 /* The default implementation of TARGET_CONSTANT_ALIGNMENT.  */
   1427 
   1428 HOST_WIDE_INT
   1429 default_constant_alignment (const_tree, HOST_WIDE_INT align)
   1430 {
   1431   return align;
   1432 }
   1433 
   1434 /* An implementation of TARGET_CONSTANT_ALIGNMENT that aligns strings
   1435    to at least BITS_PER_WORD but otherwise makes no changes.  */
   1436 
   1437 HOST_WIDE_INT
   1438 constant_alignment_word_strings (const_tree exp, HOST_WIDE_INT align)
   1439 {
   1440   if (TREE_CODE (exp) == STRING_CST)
   1441     return MAX (align, BITS_PER_WORD);
   1442   return align;
   1443 }
   1444 
   1445 /* Default to natural alignment for vector types, bounded by
   1446    MAX_OFILE_ALIGNMENT.  */
   1447 
   1448 HOST_WIDE_INT
   1449 default_vector_alignment (const_tree type)
   1450 {
   1451   unsigned HOST_WIDE_INT align = MAX_OFILE_ALIGNMENT;
   1452   tree size = TYPE_SIZE (type);
   1453   if (tree_fits_uhwi_p (size))
   1454     align = tree_to_uhwi (size);
   1455   if (align >= MAX_OFILE_ALIGNMENT)
   1456     return MAX_OFILE_ALIGNMENT;
   1457   return MAX (align, GET_MODE_ALIGNMENT (TYPE_MODE (type)));
   1458 }
   1459 
   1460 /* The default implementation of
   1461    TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT.  */
   1462 
   1463 poly_uint64
   1464 default_preferred_vector_alignment (const_tree type)
   1465 {
   1466   return TYPE_ALIGN (type);
   1467 }
   1468 
   1469 /* By default assume vectors of element TYPE require a multiple of the natural
   1470    alignment of TYPE.  TYPE is naturally aligned if IS_PACKED is false.  */
   1471 bool
   1472 default_builtin_vector_alignment_reachable (const_tree /*type*/, bool is_packed)
   1473 {
   1474   return ! is_packed;
   1475 }
   1476 
   1477 /* By default, assume that a target supports any factor of misalignment
   1478    memory access if it supports movmisalign patten.
   1479    is_packed is true if the memory access is defined in a packed struct.  */
   1480 bool
   1481 default_builtin_support_vector_misalignment (machine_mode mode,
   1482 					     const_tree type
   1483 					     ATTRIBUTE_UNUSED,
   1484 					     int misalignment
   1485 					     ATTRIBUTE_UNUSED,
   1486 					     bool is_packed
   1487 					     ATTRIBUTE_UNUSED)
   1488 {
   1489   if (optab_handler (movmisalign_optab, mode) != CODE_FOR_nothing)
   1490     return true;
   1491   return false;
   1492 }
   1493 
   1494 /* By default, only attempt to parallelize bitwise operations, and
   1495    possibly adds/subtracts using bit-twiddling.  */
   1496 
   1497 machine_mode
   1498 default_preferred_simd_mode (scalar_mode)
   1499 {
   1500   return word_mode;
   1501 }
   1502 
   1503 /* By default do not split reductions further.  */
   1504 
   1505 machine_mode
   1506 default_split_reduction (machine_mode mode)
   1507 {
   1508   return mode;
   1509 }
   1510 
   1511 /* By default only the preferred vector mode is tried.  */
   1512 
   1513 unsigned int
   1514 default_autovectorize_vector_modes (vector_modes *, bool)
   1515 {
   1516   return 0;
   1517 }
   1518 
   1519 /* The default implementation of TARGET_VECTORIZE_RELATED_MODE.  */
   1520 
   1521 opt_machine_mode
   1522 default_vectorize_related_mode (machine_mode vector_mode,
   1523 				scalar_mode element_mode,
   1524 				poly_uint64 nunits)
   1525 {
   1526   machine_mode result_mode;
   1527   if ((maybe_ne (nunits, 0U)
   1528        || multiple_p (GET_MODE_SIZE (vector_mode),
   1529 		      GET_MODE_SIZE (element_mode), &nunits))
   1530       && mode_for_vector (element_mode, nunits).exists (&result_mode)
   1531       && VECTOR_MODE_P (result_mode)
   1532       && targetm.vector_mode_supported_p (result_mode))
   1533     return result_mode;
   1534 
   1535   return opt_machine_mode ();
   1536 }
   1537 
   1538 /* By default a vector of integers is used as a mask.  */
   1539 
   1540 opt_machine_mode
   1541 default_get_mask_mode (machine_mode mode)
   1542 {
   1543   return related_int_vector_mode (mode);
   1544 }
   1545 
   1546 /* By default consider masked stores to be expensive.  */
   1547 
   1548 bool
   1549 default_empty_mask_is_expensive (unsigned ifn)
   1550 {
   1551   return ifn == IFN_MASK_STORE;
   1552 }
   1553 
   1554 /* By default, the cost model accumulates three separate costs (prologue,
   1555    loop body, and epilogue) for a vectorized loop or block.  So allocate an
   1556    array of three unsigned ints, set it to zero, and return its address.  */
   1557 
   1558 vector_costs *
   1559 default_vectorize_create_costs (vec_info *vinfo, bool costing_for_scalar)
   1560 {
   1561   return new vector_costs (vinfo, costing_for_scalar);
   1562 }
   1563 
   1564 /* Determine whether or not a pointer mode is valid. Assume defaults
   1565    of ptr_mode or Pmode - can be overridden.  */
   1566 bool
   1567 default_valid_pointer_mode (scalar_int_mode mode)
   1568 {
   1569   return (mode == ptr_mode || mode == Pmode);
   1570 }
   1571 
   1572 /* Determine whether the memory reference specified by REF may alias
   1573    the C libraries errno location.  */
   1574 bool
   1575 default_ref_may_alias_errno (ao_ref *ref)
   1576 {
   1577   tree base = ao_ref_base (ref);
   1578   /* The default implementation assumes the errno location is
   1579      a declaration of type int or is always accessed via a
   1580      pointer to int.  We assume that accesses to errno are
   1581      not deliberately obfuscated (even in conforming ways).  */
   1582   if (TYPE_UNSIGNED (TREE_TYPE (base))
   1583       || TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node))
   1584     return false;
   1585   /* The default implementation assumes an errno location declaration
   1586      is never defined in the current compilation unit and may not be
   1587      aliased by a local variable.  */
   1588   if (DECL_P (base)
   1589       && DECL_EXTERNAL (base)
   1590       && !TREE_STATIC (base))
   1591     return true;
   1592   else if (TREE_CODE (base) == MEM_REF
   1593 	   && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
   1594     {
   1595       struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0));
   1596       return !pi || pi->pt.anything || pi->pt.nonlocal;
   1597     }
   1598   return false;
   1599 }
   1600 
   1601 /* Return the mode for a pointer to a given ADDRSPACE,
   1602    defaulting to ptr_mode for all address spaces.  */
   1603 
   1604 scalar_int_mode
   1605 default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
   1606 {
   1607   return ptr_mode;
   1608 }
   1609 
   1610 /* Return the mode for an address in a given ADDRSPACE,
   1611    defaulting to Pmode for all address spaces.  */
   1612 
   1613 scalar_int_mode
   1614 default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
   1615 {
   1616   return Pmode;
   1617 }
   1618 
   1619 /* Named address space version of valid_pointer_mode.
   1620    To match the above, the same modes apply to all address spaces.  */
   1621 
   1622 bool
   1623 default_addr_space_valid_pointer_mode (scalar_int_mode mode,
   1624 				       addr_space_t as ATTRIBUTE_UNUSED)
   1625 {
   1626   return targetm.valid_pointer_mode (mode);
   1627 }
   1628 
   1629 /* Some places still assume that all pointer or address modes are the
   1630    standard Pmode and ptr_mode.  These optimizations become invalid if
   1631    the target actually supports multiple different modes.  For now,
   1632    we disable such optimizations on such targets, using this function.  */
   1633 
   1634 bool
   1635 target_default_pointer_address_modes_p (void)
   1636 {
   1637   if (targetm.addr_space.address_mode != default_addr_space_address_mode)
   1638     return false;
   1639   if (targetm.addr_space.pointer_mode != default_addr_space_pointer_mode)
   1640     return false;
   1641 
   1642   return true;
   1643 }
   1644 
   1645 /* Named address space version of legitimate_address_p.
   1646    By default, all address spaces have the same form.  */
   1647 
   1648 bool
   1649 default_addr_space_legitimate_address_p (machine_mode mode, rtx mem,
   1650 					 bool strict,
   1651 					 addr_space_t as ATTRIBUTE_UNUSED)
   1652 {
   1653   return targetm.legitimate_address_p (mode, mem, strict);
   1654 }
   1655 
   1656 /* Named address space version of LEGITIMIZE_ADDRESS.
   1657    By default, all address spaces have the same form.  */
   1658 
   1659 rtx
   1660 default_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
   1661 				       addr_space_t as ATTRIBUTE_UNUSED)
   1662 {
   1663   return targetm.legitimize_address (x, oldx, mode);
   1664 }
   1665 
   1666 /* The default hook for determining if one named address space is a subset of
   1667    another and to return which address space to use as the common address
   1668    space.  */
   1669 
   1670 bool
   1671 default_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
   1672 {
   1673   return (subset == superset);
   1674 }
   1675 
   1676 /* The default hook for determining if 0 within a named address
   1677    space is a valid address.  */
   1678 
   1679 bool
   1680 default_addr_space_zero_address_valid (addr_space_t as ATTRIBUTE_UNUSED)
   1681 {
   1682   return false;
   1683 }
   1684 
   1685 /* The default hook for debugging the address space is to return the
   1686    address space number to indicate DW_AT_address_class.  */
   1687 int
   1688 default_addr_space_debug (addr_space_t as)
   1689 {
   1690   return as;
   1691 }
   1692 
   1693 /* The default hook implementation for TARGET_ADDR_SPACE_DIAGNOSE_USAGE.
   1694    Don't complain about any address space.  */
   1695 
   1696 void
   1697 default_addr_space_diagnose_usage (addr_space_t, location_t)
   1698 {
   1699 }
   1700 
   1701 
   1702 /* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be
   1703    called for targets with only a generic address space.  */
   1704 
   1705 rtx
   1706 default_addr_space_convert (rtx op ATTRIBUTE_UNUSED,
   1707 			    tree from_type ATTRIBUTE_UNUSED,
   1708 			    tree to_type ATTRIBUTE_UNUSED)
   1709 {
   1710   gcc_unreachable ();
   1711 }
   1712 
   1713 /* The defualt implementation of TARGET_HARD_REGNO_NREGS.  */
   1714 
   1715 unsigned int
   1716 default_hard_regno_nregs (unsigned int, machine_mode mode)
   1717 {
   1718   /* Targets with variable-sized modes must provide their own definition
   1719      of this hook.  */
   1720   return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD);
   1721 }
   1722 
   1723 bool
   1724 default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED)
   1725 {
   1726   return true;
   1727 }
   1728 
   1729 /* The default implementation of TARGET_MODE_DEPENDENT_ADDRESS_P.  */
   1730 
   1731 bool
   1732 default_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED,
   1733 				  addr_space_t addrspace ATTRIBUTE_UNUSED)
   1734 {
   1735   return false;
   1736 }
   1737 
   1738 extern bool default_new_address_profitable_p (rtx, rtx);
   1739 
   1740 
   1741 /* The default implementation of TARGET_NEW_ADDRESS_PROFITABLE_P.  */
   1742 
   1743 bool
   1744 default_new_address_profitable_p (rtx memref ATTRIBUTE_UNUSED,
   1745 				  rtx_insn *insn ATTRIBUTE_UNUSED,
   1746 				  rtx new_addr ATTRIBUTE_UNUSED)
   1747 {
   1748   return true;
   1749 }
   1750 
   1751 bool
   1752 default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl),
   1753 					 tree ARG_UNUSED (name),
   1754 					 tree ARG_UNUSED (args),
   1755 					 int ARG_UNUSED (flags))
   1756 {
   1757   warning (OPT_Wattributes,
   1758 	   "target attribute is not supported on this machine");
   1759 
   1760   return false;
   1761 }
   1762 
   1763 bool
   1764 default_target_option_pragma_parse (tree ARG_UNUSED (args),
   1765 				    tree ARG_UNUSED (pop_target))
   1766 {
   1767   /* If args is NULL the caller is handle_pragma_pop_options ().  In that case,
   1768      emit no warning because "#pragma GCC pop_target" is valid on targets that
   1769      do not have the "target" pragma.  */
   1770   if (args)
   1771     warning (OPT_Wpragmas,
   1772 	     "%<#pragma GCC target%> is not supported for this machine");
   1773 
   1774   return false;
   1775 }
   1776 
   1777 bool
   1778 default_target_can_inline_p (tree caller, tree callee)
   1779 {
   1780   tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee);
   1781   tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller);
   1782   if (! callee_opts)
   1783     callee_opts = target_option_default_node;
   1784   if (! caller_opts)
   1785     caller_opts = target_option_default_node;
   1786 
   1787   /* If both caller and callee have attributes, assume that if the
   1788      pointer is different, the two functions have different target
   1789      options since build_target_option_node uses a hash table for the
   1790      options.  */
   1791   return callee_opts == caller_opts;
   1792 }
   1793 
   1794 /* By default, return false to not need to collect any target information
   1795    for inlining.  Target maintainer should re-define the hook if the
   1796    target want to take advantage of it.  */
   1797 
   1798 bool
   1799 default_need_ipa_fn_target_info (const_tree, unsigned int &)
   1800 {
   1801   return false;
   1802 }
   1803 
   1804 bool
   1805 default_update_ipa_fn_target_info (unsigned int &, const gimple *)
   1806 {
   1807   return false;
   1808 }
   1809 
   1810 /* If the machine does not have a case insn that compares the bounds,
   1811    this means extra overhead for dispatch tables, which raises the
   1812    threshold for using them.  */
   1813 
   1814 unsigned int
   1815 default_case_values_threshold (void)
   1816 {
   1817   return (targetm.have_casesi () ? 4 : 5);
   1818 }
   1819 
   1820 bool
   1821 default_have_conditional_execution (void)
   1822 {
   1823   return HAVE_conditional_execution;
   1824 }
   1825 
   1826 /* By default we assume that c99 functions are present at the runtime,
   1827    but sincos is not.  */
   1828 bool
   1829 default_libc_has_function (enum function_class fn_class,
   1830 			   tree type ATTRIBUTE_UNUSED)
   1831 {
   1832   if (fn_class == function_c94
   1833       || fn_class == function_c99_misc
   1834       || fn_class == function_c99_math_complex)
   1835     return true;
   1836 
   1837   return false;
   1838 }
   1839 
   1840 /* By default assume that libc has not a fast implementation.  */
   1841 
   1842 bool
   1843 default_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED)
   1844 {
   1845   return false;
   1846 }
   1847 
   1848 bool
   1849 gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
   1850 		       tree type ATTRIBUTE_UNUSED)
   1851 {
   1852   return true;
   1853 }
   1854 
   1855 bool
   1856 no_c99_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
   1857 			  tree type ATTRIBUTE_UNUSED)
   1858 {
   1859   return false;
   1860 }
   1861 
   1862 /* Assume some c99 functions are present at the runtime including sincos.  */
   1863 bool
   1864 bsd_libc_has_function (enum function_class fn_class,
   1865 		       tree type ATTRIBUTE_UNUSED)
   1866 {
   1867   if (fn_class == function_c94
   1868       || fn_class == function_c99_misc
   1869       || fn_class == function_sincos)
   1870     return true;
   1871 
   1872   return false;
   1873 }
   1874 
   1875 
   1876 tree
   1877 default_builtin_tm_load_store (tree ARG_UNUSED (type))
   1878 {
   1879   return NULL_TREE;
   1880 }
   1881 
   1882 /* Compute cost of moving registers to/from memory.  */
   1883 
   1884 int
   1885 default_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
   1886 			  reg_class_t rclass ATTRIBUTE_UNUSED,
   1887 			  bool in ATTRIBUTE_UNUSED)
   1888 {
   1889 #ifndef MEMORY_MOVE_COST
   1890     return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in));
   1891 #else
   1892     return MEMORY_MOVE_COST (MACRO_MODE (mode), (enum reg_class) rclass, in);
   1893 #endif
   1894 }
   1895 
   1896 /* Compute cost of moving data from a register of class FROM to one of
   1897    TO, using MODE.  */
   1898 
   1899 int
   1900 default_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
   1901                             reg_class_t from ATTRIBUTE_UNUSED,
   1902                             reg_class_t to ATTRIBUTE_UNUSED)
   1903 {
   1904 #ifndef REGISTER_MOVE_COST
   1905   return 2;
   1906 #else
   1907   return REGISTER_MOVE_COST (MACRO_MODE (mode),
   1908 			     (enum reg_class) from, (enum reg_class) to);
   1909 #endif
   1910 }
   1911 
   1912 /* The default implementation of TARGET_SLOW_UNALIGNED_ACCESS.  */
   1913 
   1914 bool
   1915 default_slow_unaligned_access (machine_mode, unsigned int)
   1916 {
   1917   return STRICT_ALIGNMENT;
   1918 }
   1919 
   1920 /* The default implementation of TARGET_ESTIMATED_POLY_VALUE.  */
   1921 
   1922 HOST_WIDE_INT
   1923 default_estimated_poly_value (poly_int64 x, poly_value_estimate_kind)
   1924 {
   1925   return x.coeffs[0];
   1926 }
   1927 
   1928 /* For hooks which use the MOVE_RATIO macro, this gives the legacy default
   1929    behavior.  SPEED_P is true if we are compiling for speed.  */
   1930 
   1931 unsigned int
   1932 get_move_ratio (bool speed_p ATTRIBUTE_UNUSED)
   1933 {
   1934   unsigned int move_ratio;
   1935 #ifdef MOVE_RATIO
   1936   move_ratio = (unsigned int) MOVE_RATIO (speed_p);
   1937 #else
   1938 #if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti)
   1939   move_ratio = 2;
   1940 #else /* No cpymem patterns, pick a default.  */
   1941   move_ratio = ((speed_p) ? 15 : 3);
   1942 #endif
   1943 #endif
   1944   return move_ratio;
   1945 }
   1946 
   1947 /* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be
   1948    used; return FALSE if the cpymem/setmem optab should be expanded, or
   1949    a call to memcpy emitted.  */
   1950 
   1951 bool
   1952 default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
   1953 					unsigned int alignment,
   1954 					enum by_pieces_operation op,
   1955 					bool speed_p)
   1956 {
   1957   unsigned int max_size = 0;
   1958   unsigned int ratio = 0;
   1959 
   1960   switch (op)
   1961     {
   1962     case CLEAR_BY_PIECES:
   1963       max_size = STORE_MAX_PIECES;
   1964       ratio = CLEAR_RATIO (speed_p);
   1965       break;
   1966     case MOVE_BY_PIECES:
   1967       max_size = MOVE_MAX_PIECES;
   1968       ratio = get_move_ratio (speed_p);
   1969       break;
   1970     case SET_BY_PIECES:
   1971       max_size = STORE_MAX_PIECES;
   1972       ratio = SET_RATIO (speed_p);
   1973       break;
   1974     case STORE_BY_PIECES:
   1975       max_size = STORE_MAX_PIECES;
   1976       ratio = get_move_ratio (speed_p);
   1977       break;
   1978     case COMPARE_BY_PIECES:
   1979       max_size = COMPARE_MAX_PIECES;
   1980       /* Pick a likely default, just as in get_move_ratio.  */
   1981       ratio = speed_p ? 15 : 3;
   1982       break;
   1983     }
   1984 
   1985   return by_pieces_ninsns (size, alignment, max_size + 1, op) < ratio;
   1986 }
   1987 
   1988 /* This hook controls code generation for expanding a memcmp operation by
   1989    pieces.  Return 1 for the normal pattern of compare/jump after each pair
   1990    of loads, or a higher number to reduce the number of branches.  */
   1991 
   1992 int
   1993 default_compare_by_pieces_branch_ratio (machine_mode)
   1994 {
   1995   return 1;
   1996 }
   1997 
   1998 /* Helper for default_print_patchable_function_entry and other
   1999    print_patchable_function_entry hook implementations.  */
   2000 
   2001 void
   2002 default_print_patchable_function_entry_1 (FILE *file,
   2003 					  unsigned HOST_WIDE_INT
   2004 					  patch_area_size,
   2005 					  bool record_p,
   2006 					  unsigned int flags)
   2007 {
   2008   const char *nop_templ = 0;
   2009   int code_num;
   2010   rtx_insn *my_nop = make_insn_raw (gen_nop ());
   2011 
   2012   /* We use the template alone, relying on the (currently sane) assumption
   2013      that the NOP template does not have variable operands.  */
   2014   code_num = recog_memoized (my_nop);
   2015   nop_templ = get_insn_template (code_num, my_nop);
   2016 
   2017   if (record_p && targetm_common.have_named_sections)
   2018     {
   2019       char buf[256];
   2020       static int patch_area_number;
   2021       section *previous_section = in_section;
   2022       const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false);
   2023 
   2024       gcc_assert (asm_op != NULL);
   2025       patch_area_number++;
   2026       ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE", patch_area_number);
   2027 
   2028       switch_to_section (get_section ("__patchable_function_entries",
   2029 				      flags, current_function_decl));
   2030       assemble_align (POINTER_SIZE);
   2031       fputs (asm_op, file);
   2032       assemble_name_raw (file, buf);
   2033       fputc ('\n', file);
   2034 
   2035       switch_to_section (previous_section);
   2036       ASM_OUTPUT_LABEL (file, buf);
   2037     }
   2038 
   2039   unsigned i;
   2040   for (i = 0; i < patch_area_size; ++i)
   2041     output_asm_insn (nop_templ, NULL);
   2042 }
   2043 
   2044 /* Write PATCH_AREA_SIZE NOPs into the asm outfile FILE around a function
   2045    entry.  If RECORD_P is true and the target supports named sections,
   2046    the location of the NOPs will be recorded in a special object section
   2047    called "__patchable_function_entries".  This routine may be called
   2048    twice per function to put NOPs before and after the function
   2049    entry.  */
   2050 
   2051 void
   2052 default_print_patchable_function_entry (FILE *file,
   2053 					unsigned HOST_WIDE_INT patch_area_size,
   2054 					bool record_p)
   2055 {
   2056   unsigned int flags = SECTION_WRITE | SECTION_RELRO;
   2057   if (HAVE_GAS_SECTION_LINK_ORDER)
   2058     flags |= SECTION_LINK_ORDER;
   2059   default_print_patchable_function_entry_1 (file, patch_area_size, record_p,
   2060 					    flags);
   2061 }
   2062 
   2063 bool
   2064 default_profile_before_prologue (void)
   2065 {
   2066 #ifdef PROFILE_BEFORE_PROLOGUE
   2067   return true;
   2068 #else
   2069   return false;
   2070 #endif
   2071 }
   2072 
   2073 /* The default implementation of TARGET_PREFERRED_RELOAD_CLASS.  */
   2074 
   2075 reg_class_t
   2076 default_preferred_reload_class (rtx x ATTRIBUTE_UNUSED,
   2077 			        reg_class_t rclass)
   2078 {
   2079 #ifdef PREFERRED_RELOAD_CLASS
   2080   return (reg_class_t) PREFERRED_RELOAD_CLASS (x, (enum reg_class) rclass);
   2081 #else
   2082   return rclass;
   2083 #endif
   2084 }
   2085 
   2086 /* The default implementation of TARGET_OUTPUT_PREFERRED_RELOAD_CLASS.  */
   2087 
   2088 reg_class_t
   2089 default_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED,
   2090 				       reg_class_t rclass)
   2091 {
   2092   return rclass;
   2093 }
   2094 
   2095 /* The default implementation of TARGET_PREFERRED_RENAME_CLASS.  */
   2096 reg_class_t
   2097 default_preferred_rename_class (reg_class_t rclass ATTRIBUTE_UNUSED)
   2098 {
   2099   return NO_REGS;
   2100 }
   2101 
   2102 /* The default implementation of TARGET_CLASS_LIKELY_SPILLED_P.  */
   2103 
   2104 bool
   2105 default_class_likely_spilled_p (reg_class_t rclass)
   2106 {
   2107   return (reg_class_size[(int) rclass] == 1);
   2108 }
   2109 
   2110 /* The default implementation of TARGET_CLASS_MAX_NREGS.  */
   2111 
   2112 unsigned char
   2113 default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
   2114 			 machine_mode mode ATTRIBUTE_UNUSED)
   2115 {
   2116 #ifdef CLASS_MAX_NREGS
   2117   return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass,
   2118 					  MACRO_MODE (mode));
   2119 #else
   2120   /* Targets with variable-sized modes must provide their own definition
   2121      of this hook.  */
   2122   unsigned int size = GET_MODE_SIZE (mode).to_constant ();
   2123   return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
   2124 #endif
   2125 }
   2126 
   2127 /* Determine the debugging unwind mechanism for the target.  */
   2128 
   2129 enum unwind_info_type
   2130 default_debug_unwind_info (void)
   2131 {
   2132   /* If the target wants to force the use of dwarf2 unwind info, let it.  */
   2133   /* ??? Change all users to the hook, then poison this.  */
   2134 #ifdef DWARF2_FRAME_INFO
   2135   if (DWARF2_FRAME_INFO)
   2136     return UI_DWARF2;
   2137 #endif
   2138 
   2139   /* Otherwise, only turn it on if dwarf2 debugging is enabled.  */
   2140 #ifdef DWARF2_DEBUGGING_INFO
   2141   if (dwarf_debuginfo_p ())
   2142     return UI_DWARF2;
   2143 #endif
   2144 
   2145   return UI_NONE;
   2146 }
   2147 
   2148 /* Targets that set NUM_POLY_INT_COEFFS to something greater than 1
   2149    must define this hook.  */
   2150 
   2151 unsigned int
   2152 default_dwarf_poly_indeterminate_value (unsigned int, unsigned int *, int *)
   2153 {
   2154   gcc_unreachable ();
   2155 }
   2156 
   2157 /* Determine the correct mode for a Dwarf frame register that represents
   2158    register REGNO.  */
   2159 
   2160 machine_mode
   2161 default_dwarf_frame_reg_mode (int regno)
   2162 {
   2163   machine_mode save_mode = reg_raw_mode[regno];
   2164 
   2165   if (targetm.hard_regno_call_part_clobbered (eh_edge_abi.id (),
   2166 					      regno, save_mode))
   2167     save_mode = choose_hard_reg_mode (regno, 1, &eh_edge_abi);
   2168   return save_mode;
   2169 }
   2170 
   2171 /* To be used by targets where reg_raw_mode doesn't return the right
   2172    mode for registers used in apply_builtin_return and apply_builtin_arg.  */
   2173 
   2174 fixed_size_mode
   2175 default_get_reg_raw_mode (int regno)
   2176 {
   2177   /* Targets must override this hook if the underlying register is
   2178      variable-sized.  */
   2179   return as_a <fixed_size_mode> (reg_raw_mode[regno]);
   2180 }
   2181 
   2182 /* Return true if a leaf function should stay leaf even with profiling
   2183    enabled.  */
   2184 
   2185 bool
   2186 default_keep_leaf_when_profiled ()
   2187 {
   2188   return false;
   2189 }
   2190 
   2191 /* Return true if the state of option OPTION should be stored in PCH files
   2192    and checked by default_pch_valid_p.  Store the option's current state
   2193    in STATE if so.  */
   2194 
   2195 static inline bool
   2196 option_affects_pch_p (int option, struct cl_option_state *state)
   2197 {
   2198   if ((cl_options[option].flags & CL_TARGET) == 0)
   2199     return false;
   2200   if ((cl_options[option].flags & CL_PCH_IGNORE) != 0)
   2201     return false;
   2202   if (option_flag_var (option, &global_options) == &target_flags)
   2203     if (targetm.check_pch_target_flags)
   2204       return false;
   2205   return get_option_state (&global_options, option, state);
   2206 }
   2207 
   2208 /* Default version of get_pch_validity.
   2209    By default, every flag difference is fatal; that will be mostly right for
   2210    most targets, but completely right for very few.  */
   2211 
   2212 void *
   2213 default_get_pch_validity (size_t *sz)
   2214 {
   2215   struct cl_option_state state;
   2216   size_t i;
   2217   char *result, *r;
   2218 
   2219   *sz = 2;
   2220   if (targetm.check_pch_target_flags)
   2221     *sz += sizeof (target_flags);
   2222   for (i = 0; i < cl_options_count; i++)
   2223     if (option_affects_pch_p (i, &state))
   2224       *sz += state.size;
   2225 
   2226   result = r = XNEWVEC (char, *sz);
   2227   r[0] = flag_pic;
   2228   r[1] = flag_pie;
   2229   r += 2;
   2230   if (targetm.check_pch_target_flags)
   2231     {
   2232       memcpy (r, &target_flags, sizeof (target_flags));
   2233       r += sizeof (target_flags);
   2234     }
   2235 
   2236   for (i = 0; i < cl_options_count; i++)
   2237     if (option_affects_pch_p (i, &state))
   2238       {
   2239 	memcpy (r, state.data, state.size);
   2240 	r += state.size;
   2241       }
   2242 
   2243   return result;
   2244 }
   2245 
   2246 /* Return a message which says that a PCH file was created with a different
   2247    setting of OPTION.  */
   2248 
   2249 static const char *
   2250 pch_option_mismatch (const char *option)
   2251 {
   2252   return xasprintf (_("created and used with differing settings of '%s'"),
   2253 		    option);
   2254 }
   2255 
   2256 /* Default version of pch_valid_p.  */
   2257 
   2258 const char *
   2259 default_pch_valid_p (const void *data_p, size_t len ATTRIBUTE_UNUSED)
   2260 {
   2261   struct cl_option_state state;
   2262   const char *data = (const char *)data_p;
   2263   size_t i;
   2264 
   2265   /* -fpic and -fpie also usually make a PCH invalid.  */
   2266   if (data[0] != flag_pic)
   2267     return _("created and used with different settings of %<-fpic%>");
   2268   if (data[1] != flag_pie)
   2269     return _("created and used with different settings of %<-fpie%>");
   2270   data += 2;
   2271 
   2272   /* Check target_flags.  */
   2273   if (targetm.check_pch_target_flags)
   2274     {
   2275       int tf;
   2276       const char *r;
   2277 
   2278       memcpy (&tf, data, sizeof (target_flags));
   2279       data += sizeof (target_flags);
   2280       r = targetm.check_pch_target_flags (tf);
   2281       if (r != NULL)
   2282 	return r;
   2283     }
   2284 
   2285   for (i = 0; i < cl_options_count; i++)
   2286     if (option_affects_pch_p (i, &state))
   2287       {
   2288 	if (memcmp (data, state.data, state.size) != 0)
   2289 	  return pch_option_mismatch (cl_options[i].opt_text);
   2290 	data += state.size;
   2291       }
   2292 
   2293   return NULL;
   2294 }
   2295 
   2296 /* Default version of cstore_mode.  */
   2297 
   2298 scalar_int_mode
   2299 default_cstore_mode (enum insn_code icode)
   2300 {
   2301   return as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
   2302 }
   2303 
   2304 /* Default version of member_type_forces_blk.  */
   2305 
   2306 bool
   2307 default_member_type_forces_blk (const_tree, machine_mode)
   2308 {
   2309   return false;
   2310 }
   2311 
   2312 /* Default version of canonicalize_comparison.  */
   2313 
   2314 void
   2315 default_canonicalize_comparison (int *, rtx *, rtx *, bool)
   2316 {
   2317 }
   2318 
   2319 /* Default implementation of TARGET_ATOMIC_ASSIGN_EXPAND_FENV.  */
   2320 
   2321 void
   2322 default_atomic_assign_expand_fenv (tree *, tree *, tree *)
   2323 {
   2324 }
   2325 
   2326 #ifndef PAD_VARARGS_DOWN
   2327 #define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN
   2328 #endif
   2329 
   2330 /* Build an indirect-ref expression over the given TREE, which represents a
   2331    piece of a va_arg() expansion.  */
   2332 tree
   2333 build_va_arg_indirect_ref (tree addr)
   2334 {
   2335   addr = build_simple_mem_ref_loc (EXPR_LOCATION (addr), addr);
   2336   return addr;
   2337 }
   2338 
   2339 /* The "standard" implementation of va_arg: read the value from the
   2340    current (padded) address and increment by the (padded) size.  */
   2341 
   2342 tree
   2343 std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
   2344 			  gimple_seq *post_p)
   2345 {
   2346   tree addr, t, type_size, rounded_size, valist_tmp;
   2347   unsigned HOST_WIDE_INT align, boundary;
   2348   bool indirect;
   2349 
   2350   /* All of the alignment and movement below is for args-grow-up machines.
   2351      As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all
   2352      implement their own specialized gimplify_va_arg_expr routines.  */
   2353   if (ARGS_GROW_DOWNWARD)
   2354     gcc_unreachable ();
   2355 
   2356   indirect = pass_va_arg_by_reference (type);
   2357   if (indirect)
   2358     type = build_pointer_type (type);
   2359 
   2360   if (targetm.calls.split_complex_arg
   2361       && TREE_CODE (type) == COMPLEX_TYPE
   2362       && targetm.calls.split_complex_arg (type))
   2363     {
   2364       tree real_part, imag_part;
   2365 
   2366       real_part = std_gimplify_va_arg_expr (valist,
   2367 					    TREE_TYPE (type), pre_p, NULL);
   2368       real_part = get_initialized_tmp_var (real_part, pre_p);
   2369 
   2370       imag_part = std_gimplify_va_arg_expr (unshare_expr (valist),
   2371 					    TREE_TYPE (type), pre_p, NULL);
   2372       imag_part = get_initialized_tmp_var (imag_part, pre_p);
   2373 
   2374       return build2 (COMPLEX_EXPR, type, real_part, imag_part);
   2375    }
   2376 
   2377   align = PARM_BOUNDARY / BITS_PER_UNIT;
   2378   boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
   2379 
   2380   /* When we align parameter on stack for caller, if the parameter
   2381      alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
   2382      aligned at MAX_SUPPORTED_STACK_ALIGNMENT.  We will match callee
   2383      here with caller.  */
   2384   if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
   2385     boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
   2386 
   2387   boundary /= BITS_PER_UNIT;
   2388 
   2389   /* Hoist the valist value into a temporary for the moment.  */
   2390   valist_tmp = get_initialized_tmp_var (valist, pre_p);
   2391 
   2392   /* va_list pointer is aligned to PARM_BOUNDARY.  If argument actually
   2393      requires greater alignment, we must perform dynamic alignment.  */
   2394   if (boundary > align
   2395       && !TYPE_EMPTY_P (type)
   2396       && !integer_zerop (TYPE_SIZE (type)))
   2397     {
   2398       t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
   2399 		  fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
   2400       gimplify_and_add (t, pre_p);
   2401 
   2402       t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
   2403 		  fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
   2404 			       valist_tmp,
   2405 			       build_int_cst (TREE_TYPE (valist), -boundary)));
   2406       gimplify_and_add (t, pre_p);
   2407     }
   2408   else
   2409     boundary = align;
   2410 
   2411   /* If the actual alignment is less than the alignment of the type,
   2412      adjust the type accordingly so that we don't assume strict alignment
   2413      when dereferencing the pointer.  */
   2414   boundary *= BITS_PER_UNIT;
   2415   if (boundary < TYPE_ALIGN (type))
   2416     {
   2417       type = build_variant_type_copy (type);
   2418       SET_TYPE_ALIGN (type, boundary);
   2419     }
   2420 
   2421   /* Compute the rounded size of the type.  */
   2422   type_size = arg_size_in_bytes (type);
   2423   rounded_size = round_up (type_size, align);
   2424 
   2425   /* Reduce rounded_size so it's sharable with the postqueue.  */
   2426   gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
   2427 
   2428   /* Get AP.  */
   2429   addr = valist_tmp;
   2430   if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
   2431     {
   2432       /* Small args are padded downward.  */
   2433       t = fold_build2_loc (input_location, GT_EXPR, sizetype,
   2434 		       rounded_size, size_int (align));
   2435       t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
   2436 		       size_binop (MINUS_EXPR, rounded_size, type_size));
   2437       addr = fold_build_pointer_plus (addr, t);
   2438     }
   2439 
   2440   /* Compute new value for AP.  */
   2441   t = fold_build_pointer_plus (valist_tmp, rounded_size);
   2442   t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
   2443   gimplify_and_add (t, pre_p);
   2444 
   2445   addr = fold_convert (build_pointer_type (type), addr);
   2446 
   2447   if (indirect)
   2448     addr = build_va_arg_indirect_ref (addr);
   2449 
   2450   return build_va_arg_indirect_ref (addr);
   2451 }
   2452 
   2453 /* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do
   2454    not support nested low-overhead loops.  */
   2455 
   2456 bool
   2457 can_use_doloop_if_innermost (const widest_int &, const widest_int &,
   2458 			     unsigned int loop_depth, bool)
   2459 {
   2460   return loop_depth == 1;
   2461 }
   2462 
   2463 /* Default implementation of TARGET_OPTAB_SUPPORTED_P.  */
   2464 
   2465 bool
   2466 default_optab_supported_p (int, machine_mode, machine_mode, optimization_type)
   2467 {
   2468   return true;
   2469 }
   2470 
   2471 /* Default implementation of TARGET_MAX_NOCE_IFCVT_SEQ_COST.  */
   2472 
   2473 unsigned int
   2474 default_max_noce_ifcvt_seq_cost (edge e)
   2475 {
   2476   bool predictable_p = predictable_edge_p (e);
   2477 
   2478   if (predictable_p)
   2479     {
   2480       if (OPTION_SET_P (param_max_rtl_if_conversion_predictable_cost))
   2481 	return param_max_rtl_if_conversion_predictable_cost;
   2482     }
   2483   else
   2484     {
   2485       if (OPTION_SET_P (param_max_rtl_if_conversion_unpredictable_cost))
   2486 	return param_max_rtl_if_conversion_unpredictable_cost;
   2487     }
   2488 
   2489   return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
   2490 }
   2491 
   2492 /* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION.  */
   2493 
   2494 unsigned int
   2495 default_min_arithmetic_precision (void)
   2496 {
   2497   return WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : BITS_PER_UNIT;
   2498 }
   2499 
   2500 /* Default implementation of TARGET_C_EXCESS_PRECISION.  */
   2501 
   2502 enum flt_eval_method
   2503 default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED)
   2504 {
   2505   return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
   2506 }
   2507 
   2508 /* Default implementation for
   2509   TARGET_STACK_CLASH_PROTECTION_ALLOCA_PROBE_RANGE.  */
   2510 HOST_WIDE_INT
   2511 default_stack_clash_protection_alloca_probe_range (void)
   2512 {
   2513   return 0;
   2514 }
   2515 
   2516 /* The default implementation of TARGET_EARLY_REMAT_MODES.  */
   2517 
   2518 void
   2519 default_select_early_remat_modes (sbitmap)
   2520 {
   2521 }
   2522 
   2523 /* The default implementation of TARGET_PREFERRED_ELSE_VALUE.  */
   2524 
   2525 tree
   2526 default_preferred_else_value (unsigned, tree type, unsigned, tree *)
   2527 {
   2528   return build_zero_cst (type);
   2529 }
   2530 
   2531 /* Default implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE.  */
   2532 bool
   2533 default_have_speculation_safe_value (bool active ATTRIBUTE_UNUSED)
   2534 {
   2535 #ifdef HAVE_speculation_barrier
   2536   return active ? HAVE_speculation_barrier : true;
   2537 #else
   2538   return false;
   2539 #endif
   2540 }
   2541 /* Alternative implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE
   2542    that can be used on targets that never have speculative execution.  */
   2543 bool
   2544 speculation_safe_value_not_needed (bool active)
   2545 {
   2546   return !active;
   2547 }
   2548 
   2549 /* Default implementation of the speculation-safe-load builtin.  This
   2550    implementation simply copies val to result and generates a
   2551    speculation_barrier insn, if such a pattern is defined.  */
   2552 rtx
   2553 default_speculation_safe_value (machine_mode mode ATTRIBUTE_UNUSED,
   2554 				rtx result, rtx val,
   2555 				rtx failval ATTRIBUTE_UNUSED)
   2556 {
   2557   emit_move_insn (result, val);
   2558 
   2559 #ifdef HAVE_speculation_barrier
   2560   /* Assume the target knows what it is doing: if it defines a
   2561      speculation barrier, but it is not enabled, then assume that one
   2562      isn't needed.  */
   2563   if (HAVE_speculation_barrier)
   2564     emit_insn (gen_speculation_barrier ());
   2565 #endif
   2566 
   2567   return result;
   2568 }
   2569 
   2570 /* How many bits to shift in order to access the tag bits.
   2571    The default is to store the tag in the top 8 bits of a 64 bit pointer, hence
   2572    shifting 56 bits will leave just the tag.  */
   2573 #define HWASAN_SHIFT (GET_MODE_PRECISION (Pmode) - 8)
   2574 #define HWASAN_SHIFT_RTX GEN_INT (HWASAN_SHIFT)
   2575 
   2576 bool
   2577 default_memtag_can_tag_addresses ()
   2578 {
   2579   return false;
   2580 }
   2581 
   2582 uint8_t
   2583 default_memtag_tag_size ()
   2584 {
   2585   return 8;
   2586 }
   2587 
   2588 uint8_t
   2589 default_memtag_granule_size ()
   2590 {
   2591   return 16;
   2592 }
   2593 
   2594 /* The default implementation of TARGET_MEMTAG_INSERT_RANDOM_TAG.  */
   2595 rtx
   2596 default_memtag_insert_random_tag (rtx untagged, rtx target)
   2597 {
   2598   gcc_assert (param_hwasan_instrument_stack);
   2599   if (param_hwasan_random_frame_tag)
   2600     {
   2601       rtx fn = init_one_libfunc ("__hwasan_generate_tag");
   2602       rtx new_tag = emit_library_call_value (fn, NULL_RTX, LCT_NORMAL, QImode);
   2603       return targetm.memtag.set_tag (untagged, new_tag, target);
   2604     }
   2605   else
   2606     {
   2607       /* NOTE: The kernel API does not have __hwasan_generate_tag exposed.
   2608 	 In the future we may add the option emit random tags with inline
   2609 	 instrumentation instead of function calls.  This would be the same
   2610 	 between the kernel and userland.  */
   2611       return untagged;
   2612     }
   2613 }
   2614 
   2615 /* The default implementation of TARGET_MEMTAG_ADD_TAG.  */
   2616 rtx
   2617 default_memtag_add_tag (rtx base, poly_int64 offset, uint8_t tag_offset)
   2618 {
   2619   /* Need to look into what the most efficient code sequence is.
   2620      This is a code sequence that would be emitted *many* times, so we
   2621      want it as small as possible.
   2622 
   2623      There are two places where tag overflow is a question:
   2624        - Tagging the shadow stack.
   2625 	  (both tagging and untagging).
   2626        - Tagging addressable pointers.
   2627 
   2628      We need to ensure both behaviors are the same (i.e. that the tag that
   2629      ends up in a pointer after "overflowing" the tag bits with a tag addition
   2630      is the same that ends up in the shadow space).
   2631 
   2632      The aim is that the behavior of tag addition should follow modulo
   2633      wrapping in both instances.
   2634 
   2635      The libhwasan code doesn't have any path that increments a pointer's tag,
   2636      which means it has no opinion on what happens when a tag increment
   2637      overflows (and hence we can choose our own behavior).  */
   2638 
   2639   offset += ((uint64_t)tag_offset << HWASAN_SHIFT);
   2640   return plus_constant (Pmode, base, offset);
   2641 }
   2642 
   2643 /* The default implementation of TARGET_MEMTAG_SET_TAG.  */
   2644 rtx
   2645 default_memtag_set_tag (rtx untagged, rtx tag, rtx target)
   2646 {
   2647   gcc_assert (GET_MODE (untagged) == Pmode && GET_MODE (tag) == QImode);
   2648   tag = expand_simple_binop (Pmode, ASHIFT, tag, HWASAN_SHIFT_RTX, NULL_RTX,
   2649 			     /* unsignedp = */1, OPTAB_WIDEN);
   2650   rtx ret = expand_simple_binop (Pmode, IOR, untagged, tag, target,
   2651 				 /* unsignedp = */1, OPTAB_DIRECT);
   2652   gcc_assert (ret);
   2653   return ret;
   2654 }
   2655 
   2656 /* The default implementation of TARGET_MEMTAG_EXTRACT_TAG.  */
   2657 rtx
   2658 default_memtag_extract_tag (rtx tagged_pointer, rtx target)
   2659 {
   2660   rtx tag = expand_simple_binop (Pmode, LSHIFTRT, tagged_pointer,
   2661 				 HWASAN_SHIFT_RTX, target,
   2662 				 /* unsignedp = */0,
   2663 				 OPTAB_DIRECT);
   2664   rtx ret = gen_lowpart (QImode, tag);
   2665   gcc_assert (ret);
   2666   return ret;
   2667 }
   2668 
   2669 /* The default implementation of TARGET_MEMTAG_UNTAGGED_POINTER.  */
   2670 rtx
   2671 default_memtag_untagged_pointer (rtx tagged_pointer, rtx target)
   2672 {
   2673   rtx tag_mask = gen_int_mode ((HOST_WIDE_INT_1U << HWASAN_SHIFT) - 1, Pmode);
   2674   rtx untagged_base = expand_simple_binop (Pmode, AND, tagged_pointer,
   2675 					   tag_mask, target, true,
   2676 					   OPTAB_DIRECT);
   2677   gcc_assert (untagged_base);
   2678   return untagged_base;
   2679 }
   2680 
   2681 /* The default implementation of TARGET_GCOV_TYPE_SIZE.  */
   2682 HOST_WIDE_INT
   2683 default_gcov_type_size (void)
   2684 {
   2685   return TYPE_PRECISION (long_long_integer_type_node) > 32 ? 64 : 32;
   2686 }
   2687 
   2688 #include "gt-targhooks.h"
   2689