Home | History | Annotate | Line # | Download | only in sh
sh.cc revision 1.1
      1  1.1  mrg /* Output routines for GCC for Renesas / SuperH SH.
      2  1.1  mrg    Copyright (C) 1993-2022 Free Software Foundation, Inc.
      3  1.1  mrg    Contributed by Steve Chamberlain (sac (at) cygnus.com).
      4  1.1  mrg    Improved by Jim Wilson (wilson (at) cygnus.com).
      5  1.1  mrg 
      6  1.1  mrg This file is part of GCC.
      7  1.1  mrg 
      8  1.1  mrg GCC is free software; you can redistribute it and/or modify
      9  1.1  mrg it under the terms of the GNU General Public License as published by
     10  1.1  mrg the Free Software Foundation; either version 3, or (at your option)
     11  1.1  mrg any later version.
     12  1.1  mrg 
     13  1.1  mrg GCC is distributed in the hope that it will be useful,
     14  1.1  mrg but WITHOUT ANY WARRANTY; without even the implied warranty of
     15  1.1  mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     16  1.1  mrg GNU General Public License for more details.
     17  1.1  mrg 
     18  1.1  mrg You should have received a copy of the GNU General Public License
     19  1.1  mrg along with GCC; see the file COPYING3.  If not see
     20  1.1  mrg <http://www.gnu.org/licenses/>.  */
     21  1.1  mrg 
     22  1.1  mrg #include <sstream>
     23  1.1  mrg 
     24  1.1  mrg #define IN_TARGET_CODE 1
     25  1.1  mrg 
     26  1.1  mrg #include "config.h"
     27  1.1  mrg #define INCLUDE_VECTOR
     28  1.1  mrg #include "system.h"
     29  1.1  mrg #include "coretypes.h"
     30  1.1  mrg #include "backend.h"
     31  1.1  mrg #include "target.h"
     32  1.1  mrg #include "rtl.h"
     33  1.1  mrg #include "tree.h"
     34  1.1  mrg #include "gimple.h"
     35  1.1  mrg #include "cfghooks.h"
     36  1.1  mrg #include "df.h"
     37  1.1  mrg #include "memmodel.h"
     38  1.1  mrg #include "tm_p.h"
     39  1.1  mrg #include "stringpool.h"
     40  1.1  mrg #include "attribs.h"
     41  1.1  mrg #include "optabs.h"
     42  1.1  mrg #include "emit-rtl.h"
     43  1.1  mrg #include "recog.h"
     44  1.1  mrg #include "diagnostic-core.h"
     45  1.1  mrg #include "alias.h"
     46  1.1  mrg #include "fold-const.h"
     47  1.1  mrg #include "stor-layout.h"
     48  1.1  mrg #include "calls.h"
     49  1.1  mrg #include "varasm.h"
     50  1.1  mrg #include "flags.h"
     51  1.1  mrg #include "explow.h"
     52  1.1  mrg #include "expr.h"
     53  1.1  mrg #include "reload.h"
     54  1.1  mrg #include "output.h"
     55  1.1  mrg #include "insn-attr.h"
     56  1.1  mrg #include "dwarf2.h"
     57  1.1  mrg #include "langhooks.h"
     58  1.1  mrg #include "cfgrtl.h"
     59  1.1  mrg #include "intl.h"
     60  1.1  mrg #include "sched-int.h"
     61  1.1  mrg #include "gimplify.h"
     62  1.1  mrg #include "tm-constrs.h"
     63  1.1  mrg #include "opts.h"
     64  1.1  mrg #include "tree-pass.h"
     65  1.1  mrg #include "context.h"
     66  1.1  mrg #include "builtins.h"
     67  1.1  mrg #include "rtl-iter.h"
     68  1.1  mrg #include "regs.h"
     69  1.1  mrg #include "toplev.h"
     70  1.1  mrg 
     71  1.1  mrg /* This file should be included last.  */
     72  1.1  mrg #include "target-def.h"
     73  1.1  mrg 
     74  1.1  mrg int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
     75  1.1  mrg 
     76  1.1  mrg #define CONST_OK_FOR_ADD(size) CONST_OK_FOR_I08 (size)
     77  1.1  mrg #define GEN_MOV (*(gen_movsi))
     78  1.1  mrg #define GEN_ADD3 (*(gen_addsi3))
     79  1.1  mrg #define GEN_SUB3 (*(gen_subsi3))
     80  1.1  mrg 
     81  1.1  mrg /* Used to simplify the logic below.  Find the attributes wherever
     82  1.1  mrg    they may be.  */
     83  1.1  mrg #define SH_ATTRIBUTES(decl) \
     84  1.1  mrg   (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
     85  1.1  mrg 		  : DECL_ATTRIBUTES (decl) \
     86  1.1  mrg 		  ? (DECL_ATTRIBUTES (decl)) \
     87  1.1  mrg 		  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
     88  1.1  mrg 
     89  1.1  mrg /* Set to true by expand_prologue() when the function is an
     90  1.1  mrg    interrupt handler.  */
     91  1.1  mrg bool current_function_interrupt;
     92  1.1  mrg 
     93  1.1  mrg tree sh_deferred_function_attributes;
     94  1.1  mrg tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
     95  1.1  mrg 
     96  1.1  mrg /* Global variables for machine-dependent things.  */
     97  1.1  mrg 
     98  1.1  mrg /* Which cpu are we scheduling for.  */
     99  1.1  mrg enum processor_type sh_cpu;
    100  1.1  mrg 
    101  1.1  mrg /* Definitions used in ready queue reordering for first scheduling pass.  */
    102  1.1  mrg 
    103  1.1  mrg /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID.  */
    104  1.1  mrg static short *regmode_weight[2];
    105  1.1  mrg 
    106  1.1  mrg /* Total SFmode and SImode weights of scheduled insns.  */
    107  1.1  mrg static int curr_regmode_pressure[2];
    108  1.1  mrg 
    109  1.1  mrg /* Number of r0 life regions.  */
    110  1.1  mrg static int r0_life_regions;
    111  1.1  mrg 
    112  1.1  mrg /* If true, skip cycles for Q -> R movement.  */
    113  1.1  mrg static int skip_cycles = 0;
    114  1.1  mrg 
    115  1.1  mrg /* Cached value of can_issue_more.  This is cached in sh_variable_issue hook
    116  1.1  mrg    and returned from sh_reorder2.  */
    117  1.1  mrg static short cached_can_issue_more;
    118  1.1  mrg 
    119  1.1  mrg /* Unique number for UNSPEC_BBR pattern.  */
    120  1.1  mrg static unsigned int unspec_bbr_uid = 1;
    121  1.1  mrg 
    122  1.1  mrg /* Provides the class number of the smallest class containing
    123  1.1  mrg    reg number.  */
    124  1.1  mrg enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
    125  1.1  mrg {
    126  1.1  mrg   R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    127  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    128  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    129  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    130  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    131  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    132  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    133  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    134  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    135  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    136  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    137  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    138  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    139  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    140  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    141  1.1  mrg   GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
    142  1.1  mrg   FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
    143  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    144  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    145  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    146  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    147  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    148  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    149  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    150  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    151  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    152  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    153  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    154  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    155  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    156  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    157  1.1  mrg   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
    158  1.1  mrg   TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
    159  1.1  mrg   TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
    160  1.1  mrg   DF_REGS, DF_REGS, DF_REGS, DF_REGS,
    161  1.1  mrg   DF_REGS, DF_REGS, DF_REGS, DF_REGS,
    162  1.1  mrg   NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
    163  1.1  mrg   MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
    164  1.1  mrg   GENERAL_REGS, GENERAL_REGS,
    165  1.1  mrg };
    166  1.1  mrg 
    167  1.1  mrg char sh_register_names[FIRST_PSEUDO_REGISTER] \
    168  1.1  mrg   [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
    169  1.1  mrg 
    170  1.1  mrg char sh_additional_register_names[ADDREGNAMES_SIZE] \
    171  1.1  mrg   [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
    172  1.1  mrg   = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
    173  1.1  mrg 
    174  1.1  mrg int assembler_dialect;
    175  1.1  mrg 
    176  1.1  mrg static void split_branches (rtx_insn *);
    177  1.1  mrg static int branch_dest (rtx);
    178  1.1  mrg static void print_slot (rtx_sequence *);
    179  1.1  mrg static rtx_code_label *add_constant (rtx, machine_mode, rtx);
    180  1.1  mrg static void dump_table (rtx_insn *, rtx_insn *);
    181  1.1  mrg static bool broken_move (rtx_insn *);
    182  1.1  mrg static bool mova_p (rtx_insn *);
    183  1.1  mrg static rtx_insn *find_barrier (int, rtx_insn *, rtx_insn *);
    184  1.1  mrg static bool noncall_uses_reg (rtx, rtx_insn *, rtx *);
    185  1.1  mrg static rtx_insn *gen_block_redirect (rtx_insn *, int, int);
    186  1.1  mrg static void sh_reorg (void);
    187  1.1  mrg static void sh_option_override (void);
    188  1.1  mrg static void sh_override_options_after_change (void);
    189  1.1  mrg static void output_stack_adjust (int, rtx, int, HARD_REG_SET *, bool);
    190  1.1  mrg static rtx_insn* emit_frame_insn (rtx);
    191  1.1  mrg static rtx push (int);
    192  1.1  mrg static void pop (int);
    193  1.1  mrg static void push_regs (HARD_REG_SET* mask, bool interrupt_handler);
    194  1.1  mrg static int calc_live_regs (HARD_REG_SET *);
    195  1.1  mrg static HOST_WIDE_INT rounded_frame_size (int);
    196  1.1  mrg static bool sh_frame_pointer_required (void);
    197  1.1  mrg static void sh_emit_mode_set (int, int, int, HARD_REG_SET);
    198  1.1  mrg static int sh_mode_needed (int, rtx_insn *);
    199  1.1  mrg static int sh_mode_after (int, int, rtx_insn *);
    200  1.1  mrg static int sh_mode_entry (int);
    201  1.1  mrg static int sh_mode_exit (int);
    202  1.1  mrg static int sh_mode_priority (int entity, int n);
    203  1.1  mrg 
    204  1.1  mrg static rtx mark_constant_pool_use (rtx);
    205  1.1  mrg static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree,
    206  1.1  mrg 						   int, bool *);
    207  1.1  mrg static tree sh_handle_resbank_handler_attribute (tree *, tree,
    208  1.1  mrg 						 tree, int, bool *);
    209  1.1  mrg static tree sh2a_handle_function_vector_handler_attribute (tree *, tree,
    210  1.1  mrg 							   tree, int, bool *);
    211  1.1  mrg static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
    212  1.1  mrg static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
    213  1.1  mrg static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
    214  1.1  mrg static void sh_print_operand (FILE *, rtx, int);
    215  1.1  mrg static void sh_print_operand_address (FILE *, machine_mode, rtx);
    216  1.1  mrg static bool sh_print_operand_punct_valid_p (unsigned char code);
    217  1.1  mrg static bool sh_asm_output_addr_const_extra (FILE *file, rtx x);
    218  1.1  mrg static void sh_output_function_epilogue (FILE *);
    219  1.1  mrg static void sh_insert_attributes (tree, tree *);
    220  1.1  mrg static const char *sh_check_pch_target_flags (int);
    221  1.1  mrg static int sh_register_move_cost (machine_mode, reg_class_t, reg_class_t);
    222  1.1  mrg static int sh_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
    223  1.1  mrg static int sh_issue_rate (void);
    224  1.1  mrg static int sh_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *sort_p);
    225  1.1  mrg static short find_set_regmode_weight (rtx, machine_mode);
    226  1.1  mrg static short find_insn_regmode_weight (rtx, machine_mode);
    227  1.1  mrg static void find_regmode_weight (basic_block, machine_mode);
    228  1.1  mrg static int find_r0_life_regions (basic_block);
    229  1.1  mrg static void  sh_md_init_global (FILE *, int, int);
    230  1.1  mrg static void  sh_md_finish_global (FILE *, int);
    231  1.1  mrg static int rank_for_reorder (const void *, const void *);
    232  1.1  mrg static void swap_reorder (rtx_insn **, int);
    233  1.1  mrg static void ready_reorder (rtx_insn **, int);
    234  1.1  mrg static bool high_pressure (machine_mode);
    235  1.1  mrg static int sh_reorder (FILE *, int, rtx_insn **, int *, int);
    236  1.1  mrg static int sh_reorder2 (FILE *, int, rtx_insn **, int *, int);
    237  1.1  mrg static void sh_md_init (FILE *, int, int);
    238  1.1  mrg static int sh_variable_issue (FILE *, int, rtx_insn *, int);
    239  1.1  mrg 
    240  1.1  mrg static bool sh_function_ok_for_sibcall (tree, tree);
    241  1.1  mrg 
    242  1.1  mrg static bool sh_can_follow_jump (const rtx_insn *, const rtx_insn *);
    243  1.1  mrg static bool sh_ms_bitfield_layout_p (const_tree);
    244  1.1  mrg 
    245  1.1  mrg static void sh_init_builtins (void);
    246  1.1  mrg static tree sh_builtin_decl (unsigned, bool);
    247  1.1  mrg static rtx sh_expand_builtin (tree, rtx, rtx, machine_mode, int);
    248  1.1  mrg static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
    249  1.1  mrg 				HOST_WIDE_INT, tree);
    250  1.1  mrg static void sh_file_start (void);
    251  1.1  mrg static bool sh_assemble_integer (rtx, unsigned int, int);
    252  1.1  mrg static bool flow_dependent_p (rtx_insn *, rtx_insn *);
    253  1.1  mrg static void flow_dependent_p_1 (rtx, const_rtx, void *);
    254  1.1  mrg static int shiftcosts (rtx);
    255  1.1  mrg static int and_xor_ior_costs (rtx, int);
    256  1.1  mrg static int addsubcosts (rtx);
    257  1.1  mrg static int multcosts (rtx);
    258  1.1  mrg static bool unspec_caller_rtx_p (rtx);
    259  1.1  mrg static bool sh_cannot_copy_insn_p (rtx_insn *);
    260  1.1  mrg static bool sh_cannot_force_const_mem_p (machine_mode, rtx);
    261  1.1  mrg static bool sh_rtx_costs (rtx, machine_mode, int, int, int *, bool);
    262  1.1  mrg static int sh_address_cost (rtx, machine_mode, addr_space_t, bool);
    263  1.1  mrg static int sh_pr_n_sets (void);
    264  1.1  mrg static rtx sh_allocate_initial_value (rtx);
    265  1.1  mrg static reg_class_t sh_preferred_reload_class (rtx, reg_class_t);
    266  1.1  mrg static reg_class_t sh_secondary_reload (bool, rtx, reg_class_t,
    267  1.1  mrg                                         machine_mode,
    268  1.1  mrg                                         struct secondary_reload_info *);
    269  1.1  mrg static bool sh_legitimate_address_p (machine_mode, rtx, bool);
    270  1.1  mrg static rtx sh_legitimize_address (rtx, rtx, machine_mode);
    271  1.1  mrg static rtx sh_delegitimize_address (rtx);
    272  1.1  mrg static bool sh_cannot_substitute_mem_equiv_p (rtx);
    273  1.1  mrg static bool sh_legitimize_address_displacement (rtx *, rtx *,
    274  1.1  mrg 						poly_int64, machine_mode);
    275  1.1  mrg static int scavenge_reg (HARD_REG_SET *s);
    276  1.1  mrg 
    277  1.1  mrg static rtx sh_struct_value_rtx (tree, int);
    278  1.1  mrg static rtx sh_function_value (const_tree, const_tree, bool);
    279  1.1  mrg static bool sh_function_value_regno_p (const unsigned int);
    280  1.1  mrg static rtx sh_libcall_value (machine_mode, const_rtx);
    281  1.1  mrg static bool sh_return_in_memory (const_tree, const_tree);
    282  1.1  mrg static rtx sh_builtin_saveregs (void);
    283  1.1  mrg static void sh_setup_incoming_varargs (cumulative_args_t,
    284  1.1  mrg 				       const function_arg_info &, int *, int);
    285  1.1  mrg static bool sh_strict_argument_naming (cumulative_args_t);
    286  1.1  mrg static bool sh_pretend_outgoing_varargs_named (cumulative_args_t);
    287  1.1  mrg static void sh_atomic_assign_expand_fenv (tree *, tree *, tree *);
    288  1.1  mrg static tree sh_build_builtin_va_list (void);
    289  1.1  mrg static void sh_va_start (tree, rtx);
    290  1.1  mrg static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
    291  1.1  mrg static bool sh_promote_prototypes (const_tree);
    292  1.1  mrg static machine_mode sh_promote_function_mode (const_tree type,
    293  1.1  mrg 						   machine_mode,
    294  1.1  mrg 						   int *punsignedp,
    295  1.1  mrg 						   const_tree funtype,
    296  1.1  mrg 						   int for_return);
    297  1.1  mrg static bool sh_pass_by_reference (cumulative_args_t,
    298  1.1  mrg 				  const function_arg_info &);
    299  1.1  mrg static bool sh_callee_copies (cumulative_args_t, const function_arg_info &);
    300  1.1  mrg static int sh_arg_partial_bytes (cumulative_args_t, const function_arg_info &);
    301  1.1  mrg static void sh_function_arg_advance (cumulative_args_t,
    302  1.1  mrg 				     const function_arg_info &);
    303  1.1  mrg static rtx sh_function_arg (cumulative_args_t, const function_arg_info &);
    304  1.1  mrg static int sh_dwarf_calling_convention (const_tree);
    305  1.1  mrg static void sh_encode_section_info (tree, rtx, int);
    306  1.1  mrg static bool sh2a_function_vector_p (tree);
    307  1.1  mrg static void sh_trampoline_init (rtx, tree, rtx);
    308  1.1  mrg static rtx sh_trampoline_adjust_address (rtx);
    309  1.1  mrg static void sh_conditional_register_usage (void);
    310  1.1  mrg static bool sh_legitimate_constant_p (machine_mode, rtx);
    311  1.1  mrg static int mov_insn_size (machine_mode, bool);
    312  1.1  mrg static int mov_insn_alignment_mask (machine_mode, bool);
    313  1.1  mrg static bool sh_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT,
    314  1.1  mrg 					       unsigned int,
    315  1.1  mrg 					       enum by_pieces_operation,
    316  1.1  mrg 					       bool);
    317  1.1  mrg static bool sequence_insn_p (rtx_insn *);
    318  1.1  mrg static void sh_canonicalize_comparison (int *, rtx *, rtx *, bool);
    319  1.1  mrg static void sh_canonicalize_comparison (enum rtx_code&, rtx&, rtx&,
    320  1.1  mrg 					machine_mode, bool);
    321  1.1  mrg static bool sh_legitimate_combined_insn (rtx_insn* insn);
    322  1.1  mrg 
    323  1.1  mrg static bool sh_fixed_condition_code_regs (unsigned int* p1, unsigned int* p2);
    324  1.1  mrg 
    325  1.1  mrg static void sh_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
    326  1.1  mrg static unsigned int sh_hard_regno_nregs (unsigned int, machine_mode);
    327  1.1  mrg static bool sh_hard_regno_mode_ok (unsigned int, machine_mode);
    328  1.1  mrg static bool sh_modes_tieable_p (machine_mode, machine_mode);
    329  1.1  mrg static bool sh_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
    330  1.1  mrg 
    331  1.1  mrg static const struct attribute_spec sh_attribute_table[] =
    333  1.1  mrg {
    334  1.1  mrg   /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
    335  1.1  mrg        affects_type_identity, handler, exclude } */
    336  1.1  mrg   { "interrupt_handler", 0, 0, true,  false, false, false,
    337  1.1  mrg     sh_handle_interrupt_handler_attribute, NULL },
    338  1.1  mrg   { "sp_switch",         1, 1, true,  false, false, false,
    339  1.1  mrg      sh_handle_sp_switch_attribute, NULL },
    340  1.1  mrg   { "trap_exit",         1, 1, true,  false, false, false,
    341  1.1  mrg     sh_handle_trap_exit_attribute, NULL },
    342  1.1  mrg   { "renesas",           0, 0, false, true, false, false,
    343  1.1  mrg     sh_handle_renesas_attribute, NULL },
    344  1.1  mrg   { "trapa_handler",     0, 0, true,  false, false, false,
    345  1.1  mrg     sh_handle_interrupt_handler_attribute, NULL },
    346  1.1  mrg   { "nosave_low_regs",   0, 0, true,  false, false, false,
    347  1.1  mrg     sh_handle_interrupt_handler_attribute, NULL },
    348  1.1  mrg   { "resbank",           0, 0, true,  false, false, false,
    349  1.1  mrg     sh_handle_resbank_handler_attribute, NULL },
    350  1.1  mrg   { "function_vector",   1, 1, true,  false, false, false,
    351  1.1  mrg     sh2a_handle_function_vector_handler_attribute, NULL },
    352  1.1  mrg   { NULL,                0, 0, false, false, false, false, NULL, NULL }
    353  1.1  mrg };
    354  1.1  mrg 
    355  1.1  mrg /* Initialize the GCC target structure.  */
    357  1.1  mrg #undef TARGET_ATTRIBUTE_TABLE
    358  1.1  mrg #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
    359  1.1  mrg 
    360  1.1  mrg /* The next two are used for debug info when compiling with -gdwarf.  */
    361  1.1  mrg #undef TARGET_ASM_UNALIGNED_HI_OP
    362  1.1  mrg #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
    363  1.1  mrg #undef TARGET_ASM_UNALIGNED_SI_OP
    364  1.1  mrg #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
    365  1.1  mrg 
    366  1.1  mrg #undef TARGET_OPTION_OVERRIDE
    367  1.1  mrg #define TARGET_OPTION_OVERRIDE sh_option_override
    368  1.1  mrg 
    369  1.1  mrg #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
    370  1.1  mrg #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
    371  1.1  mrg   sh_override_options_after_change
    372  1.1  mrg 
    373  1.1  mrg #undef TARGET_PRINT_OPERAND
    374  1.1  mrg #define TARGET_PRINT_OPERAND sh_print_operand
    375  1.1  mrg #undef TARGET_PRINT_OPERAND_ADDRESS
    376  1.1  mrg #define TARGET_PRINT_OPERAND_ADDRESS sh_print_operand_address
    377  1.1  mrg #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
    378  1.1  mrg #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sh_print_operand_punct_valid_p
    379  1.1  mrg #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
    380  1.1  mrg #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA sh_asm_output_addr_const_extra
    381  1.1  mrg 
    382  1.1  mrg #undef TARGET_ASM_FUNCTION_EPILOGUE
    383  1.1  mrg #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
    384  1.1  mrg 
    385  1.1  mrg #undef TARGET_ASM_OUTPUT_MI_THUNK
    386  1.1  mrg #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
    387  1.1  mrg 
    388  1.1  mrg #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
    389  1.1  mrg #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
    390  1.1  mrg   hook_bool_const_tree_hwi_hwi_const_tree_true
    391  1.1  mrg 
    392  1.1  mrg #undef TARGET_ASM_FILE_START
    393  1.1  mrg #define TARGET_ASM_FILE_START sh_file_start
    394  1.1  mrg #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
    395  1.1  mrg #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
    396  1.1  mrg 
    397  1.1  mrg #undef TARGET_ASM_INTEGER
    398  1.1  mrg #define TARGET_ASM_INTEGER sh_assemble_integer
    399  1.1  mrg 
    400  1.1  mrg #undef TARGET_REGISTER_MOVE_COST
    401  1.1  mrg #define TARGET_REGISTER_MOVE_COST sh_register_move_cost
    402  1.1  mrg 
    403  1.1  mrg #undef TARGET_INSERT_ATTRIBUTES
    404  1.1  mrg #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
    405  1.1  mrg 
    406  1.1  mrg #undef TARGET_SCHED_ADJUST_COST
    407  1.1  mrg #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
    408  1.1  mrg 
    409  1.1  mrg #undef TARGET_SCHED_ISSUE_RATE
    410  1.1  mrg #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
    411  1.1  mrg 
    412  1.1  mrg /* The next 5 hooks have been implemented for reenabling sched1.  With the
    413  1.1  mrg    help of these macros we are limiting the movement of insns in sched1 to
    414  1.1  mrg    reduce the register pressure.  The overall idea is to keep count of SImode
    415  1.1  mrg    and SFmode regs required by already scheduled insns. When these counts
    416  1.1  mrg    cross some threshold values; give priority to insns that free registers.
    417  1.1  mrg    The insn that frees registers is most likely to be the insn with lowest
    418  1.1  mrg    LUID (original insn order); but such an insn might be there in the stalled
    419  1.1  mrg    queue (Q) instead of the ready queue (R).  To solve this, we skip cycles
    420  1.1  mrg    up to a max of 8 cycles so that such insns may move from Q -> R.
    421  1.1  mrg 
    422  1.1  mrg    The description of the hooks are as below:
    423  1.1  mrg 
    424  1.1  mrg    TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
    425  1.1  mrg    scheduler; it is called inside the sched_init function just after
    426  1.1  mrg    find_insn_reg_weights function call. It is used to calculate the SImode
    427  1.1  mrg    and SFmode weights of insns of basic blocks; much similar to what
    428  1.1  mrg    find_insn_reg_weights does.
    429  1.1  mrg    TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
    430  1.1  mrg 
    431  1.1  mrg    TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
    432  1.1  mrg    indicated by TARGET_SCHED_REORDER2; doing this may move insns from
    433  1.1  mrg    (Q)->(R).
    434  1.1  mrg 
    435  1.1  mrg    TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
    436  1.1  mrg    high; reorder the ready queue so that the insn with lowest LUID will be
    437  1.1  mrg    issued next.
    438  1.1  mrg 
    439  1.1  mrg    TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
    440  1.1  mrg    TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
    441  1.1  mrg 
    442  1.1  mrg    TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
    443  1.1  mrg    can be returned from TARGET_SCHED_REORDER2.
    444  1.1  mrg 
    445  1.1  mrg    TARGET_SCHED_INIT: Reset the register pressure counting variables.  */
    446  1.1  mrg 
    447  1.1  mrg #undef TARGET_SCHED_DFA_NEW_CYCLE
    448  1.1  mrg #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
    449  1.1  mrg 
    450  1.1  mrg #undef TARGET_SCHED_INIT_GLOBAL
    451  1.1  mrg #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
    452  1.1  mrg 
    453  1.1  mrg #undef TARGET_SCHED_FINISH_GLOBAL
    454  1.1  mrg #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
    455  1.1  mrg 
    456  1.1  mrg #undef TARGET_SCHED_VARIABLE_ISSUE
    457  1.1  mrg #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
    458  1.1  mrg 
    459  1.1  mrg #undef TARGET_SCHED_REORDER
    460  1.1  mrg #define TARGET_SCHED_REORDER sh_reorder
    461  1.1  mrg 
    462  1.1  mrg #undef TARGET_SCHED_REORDER2
    463  1.1  mrg #define TARGET_SCHED_REORDER2 sh_reorder2
    464  1.1  mrg 
    465  1.1  mrg #undef TARGET_SCHED_INIT
    466  1.1  mrg #define TARGET_SCHED_INIT sh_md_init
    467  1.1  mrg 
    468  1.1  mrg #undef TARGET_DELEGITIMIZE_ADDRESS
    469  1.1  mrg #define TARGET_DELEGITIMIZE_ADDRESS sh_delegitimize_address
    470  1.1  mrg 
    471  1.1  mrg #undef TARGET_LEGITIMIZE_ADDRESS
    472  1.1  mrg #define TARGET_LEGITIMIZE_ADDRESS sh_legitimize_address
    473  1.1  mrg 
    474  1.1  mrg #undef TARGET_CAN_FOLLOW_JUMP
    475  1.1  mrg #define TARGET_CAN_FOLLOW_JUMP sh_can_follow_jump
    476  1.1  mrg 
    477  1.1  mrg #undef TARGET_MS_BITFIELD_LAYOUT_P
    478  1.1  mrg #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
    479  1.1  mrg 
    480  1.1  mrg #undef TARGET_INIT_BUILTINS
    481  1.1  mrg #define TARGET_INIT_BUILTINS sh_init_builtins
    482  1.1  mrg #undef TARGET_BUILTIN_DECL
    483  1.1  mrg #define TARGET_BUILTIN_DECL sh_builtin_decl
    484  1.1  mrg #undef TARGET_EXPAND_BUILTIN
    485  1.1  mrg #define TARGET_EXPAND_BUILTIN sh_expand_builtin
    486  1.1  mrg 
    487  1.1  mrg #undef TARGET_FUNCTION_OK_FOR_SIBCALL
    488  1.1  mrg #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
    489  1.1  mrg 
    490  1.1  mrg #undef TARGET_CANNOT_COPY_INSN_P
    491  1.1  mrg #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
    492  1.1  mrg #undef TARGET_RTX_COSTS
    493  1.1  mrg #define TARGET_RTX_COSTS sh_rtx_costs
    494  1.1  mrg #undef TARGET_ADDRESS_COST
    495  1.1  mrg #define TARGET_ADDRESS_COST sh_address_cost
    496  1.1  mrg #undef TARGET_ALLOCATE_INITIAL_VALUE
    497  1.1  mrg #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
    498  1.1  mrg 
    499  1.1  mrg #undef TARGET_MACHINE_DEPENDENT_REORG
    500  1.1  mrg #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
    501  1.1  mrg 
    502  1.1  mrg #undef TARGET_DWARF_REGISTER_SPAN
    503  1.1  mrg #define TARGET_DWARF_REGISTER_SPAN sh_dwarf_register_span
    504  1.1  mrg 
    505  1.1  mrg #ifdef HAVE_AS_TLS
    506  1.1  mrg #undef TARGET_HAVE_TLS
    507  1.1  mrg #define TARGET_HAVE_TLS true
    508  1.1  mrg #endif
    509  1.1  mrg 
    510  1.1  mrg #undef TARGET_PROMOTE_PROTOTYPES
    511  1.1  mrg #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
    512  1.1  mrg #undef TARGET_PROMOTE_FUNCTION_MODE
    513  1.1  mrg #define TARGET_PROMOTE_FUNCTION_MODE sh_promote_function_mode
    514  1.1  mrg 
    515  1.1  mrg #undef TARGET_FUNCTION_VALUE
    516  1.1  mrg #define TARGET_FUNCTION_VALUE sh_function_value
    517  1.1  mrg #undef TARGET_FUNCTION_VALUE_REGNO_P
    518  1.1  mrg #define TARGET_FUNCTION_VALUE_REGNO_P sh_function_value_regno_p
    519  1.1  mrg #undef TARGET_LIBCALL_VALUE
    520  1.1  mrg #define TARGET_LIBCALL_VALUE sh_libcall_value
    521  1.1  mrg #undef TARGET_STRUCT_VALUE_RTX
    522  1.1  mrg #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
    523  1.1  mrg #undef TARGET_RETURN_IN_MEMORY
    524  1.1  mrg #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
    525  1.1  mrg 
    526  1.1  mrg #undef TARGET_EXPAND_BUILTIN_SAVEREGS
    527  1.1  mrg #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
    528  1.1  mrg #undef TARGET_SETUP_INCOMING_VARARGS
    529  1.1  mrg #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
    530  1.1  mrg #undef TARGET_STRICT_ARGUMENT_NAMING
    531  1.1  mrg #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
    532  1.1  mrg #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
    533  1.1  mrg #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
    534  1.1  mrg #undef TARGET_MUST_PASS_IN_STACK
    535  1.1  mrg #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
    536  1.1  mrg #undef TARGET_PASS_BY_REFERENCE
    537  1.1  mrg #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
    538  1.1  mrg #undef TARGET_CALLEE_COPIES
    539  1.1  mrg #define TARGET_CALLEE_COPIES sh_callee_copies
    540  1.1  mrg #undef TARGET_ARG_PARTIAL_BYTES
    541  1.1  mrg #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
    542  1.1  mrg #undef TARGET_FUNCTION_ARG
    543  1.1  mrg #define TARGET_FUNCTION_ARG sh_function_arg
    544  1.1  mrg #undef TARGET_FUNCTION_ARG_ADVANCE
    545  1.1  mrg #define TARGET_FUNCTION_ARG_ADVANCE sh_function_arg_advance
    546  1.1  mrg 
    547  1.1  mrg #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
    548  1.1  mrg #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sh_atomic_assign_expand_fenv
    549  1.1  mrg 
    550  1.1  mrg #undef TARGET_BUILD_BUILTIN_VA_LIST
    551  1.1  mrg #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
    552  1.1  mrg #undef TARGET_EXPAND_BUILTIN_VA_START
    553  1.1  mrg #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
    554  1.1  mrg #undef TARGET_GIMPLIFY_VA_ARG_EXPR
    555  1.1  mrg #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
    556  1.1  mrg 
    557  1.1  mrg #undef TARGET_VECTOR_MODE_SUPPORTED_P
    558  1.1  mrg #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
    559  1.1  mrg 
    560  1.1  mrg #undef TARGET_CHECK_PCH_TARGET_FLAGS
    561  1.1  mrg #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
    562  1.1  mrg 
    563  1.1  mrg #undef TARGET_DWARF_CALLING_CONVENTION
    564  1.1  mrg #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
    565  1.1  mrg 
    566  1.1  mrg #undef TARGET_FRAME_POINTER_REQUIRED
    567  1.1  mrg #define TARGET_FRAME_POINTER_REQUIRED sh_frame_pointer_required
    568  1.1  mrg 
    569  1.1  mrg #undef TARGET_MODE_EMIT
    570  1.1  mrg #define TARGET_MODE_EMIT sh_emit_mode_set
    571  1.1  mrg 
    572  1.1  mrg #undef TARGET_MODE_NEEDED
    573  1.1  mrg #define TARGET_MODE_NEEDED sh_mode_needed
    574  1.1  mrg 
    575  1.1  mrg #undef TARGET_MODE_AFTER
    576  1.1  mrg #define TARGET_MODE_AFTER sh_mode_after
    577  1.1  mrg 
    578  1.1  mrg #undef TARGET_MODE_ENTRY
    579  1.1  mrg #define TARGET_MODE_ENTRY sh_mode_entry
    580  1.1  mrg 
    581  1.1  mrg #undef TARGET_MODE_EXIT
    582  1.1  mrg #define TARGET_MODE_EXIT sh_mode_exit
    583  1.1  mrg 
    584  1.1  mrg #undef TARGET_MODE_PRIORITY
    585  1.1  mrg #define TARGET_MODE_PRIORITY sh_mode_priority
    586  1.1  mrg 
    587  1.1  mrg /* Return regmode weight for insn.  */
    588  1.1  mrg #define INSN_REGMODE_WEIGHT(INSN, MODE)\
    589  1.1  mrg   regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
    590  1.1  mrg 
    591  1.1  mrg /* Return current register pressure for regmode.  */
    592  1.1  mrg #define CURR_REGMODE_PRESSURE(MODE)\
    593  1.1  mrg   curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
    594  1.1  mrg 
    595  1.1  mrg #undef  TARGET_ENCODE_SECTION_INFO
    596  1.1  mrg #define TARGET_ENCODE_SECTION_INFO	sh_encode_section_info
    597  1.1  mrg 
    598  1.1  mrg #undef TARGET_LRA_P
    599  1.1  mrg #define TARGET_LRA_P sh_lra_p
    600  1.1  mrg 
    601  1.1  mrg #undef TARGET_SECONDARY_RELOAD
    602  1.1  mrg #define TARGET_SECONDARY_RELOAD sh_secondary_reload
    603  1.1  mrg 
    604  1.1  mrg #undef  TARGET_PREFERRED_RELOAD_CLASS
    605  1.1  mrg #define TARGET_PREFERRED_RELOAD_CLASS sh_preferred_reload_class
    606  1.1  mrg 
    607  1.1  mrg #undef TARGET_CONDITIONAL_REGISTER_USAGE
    608  1.1  mrg #define TARGET_CONDITIONAL_REGISTER_USAGE sh_conditional_register_usage
    609  1.1  mrg 
    610  1.1  mrg #undef TARGET_LEGITIMATE_ADDRESS_P
    611  1.1  mrg #define TARGET_LEGITIMATE_ADDRESS_P	sh_legitimate_address_p
    612  1.1  mrg 
    613  1.1  mrg #undef TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P
    614  1.1  mrg #define TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P sh_cannot_substitute_mem_equiv_p
    615  1.1  mrg 
    616  1.1  mrg #undef TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT
    617  1.1  mrg #define TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT \
    618  1.1  mrg   sh_legitimize_address_displacement
    619  1.1  mrg 
    620  1.1  mrg #undef TARGET_TRAMPOLINE_INIT
    621  1.1  mrg #define TARGET_TRAMPOLINE_INIT		sh_trampoline_init
    622  1.1  mrg #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
    623  1.1  mrg #define TARGET_TRAMPOLINE_ADJUST_ADDRESS sh_trampoline_adjust_address
    624  1.1  mrg 
    625  1.1  mrg #undef TARGET_LEGITIMATE_CONSTANT_P
    626  1.1  mrg #define TARGET_LEGITIMATE_CONSTANT_P	sh_legitimate_constant_p
    627  1.1  mrg 
    628  1.1  mrg #undef TARGET_CANONICALIZE_COMPARISON
    629  1.1  mrg #define TARGET_CANONICALIZE_COMPARISON	sh_canonicalize_comparison
    630  1.1  mrg 
    631  1.1  mrg #undef TARGET_LEGITIMATE_COMBINED_INSN
    632  1.1  mrg #define TARGET_LEGITIMATE_COMBINED_INSN sh_legitimate_combined_insn
    633  1.1  mrg 
    634  1.1  mrg #undef TARGET_FIXED_CONDITION_CODE_REGS
    635  1.1  mrg #define TARGET_FIXED_CONDITION_CODE_REGS sh_fixed_condition_code_regs
    636  1.1  mrg 
    637  1.1  mrg #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
    638  1.1  mrg #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
    639  1.1  mrg   sh_use_by_pieces_infrastructure_p
    640  1.1  mrg 
    641  1.1  mrg /* Machine-specific symbol_ref flags.  */
    642  1.1  mrg #define SYMBOL_FLAG_FUNCVEC_FUNCTION	(SYMBOL_FLAG_MACH_DEP << 0)
    643  1.1  mrg 
    644  1.1  mrg /* The tas.b instruction sets the 7th bit in the byte, i.e. 0x80.  This value
    645  1.1  mrg    is used by optabs.cc atomic op expansion code as well as in sync.md.  */
    646  1.1  mrg #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
    647  1.1  mrg #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0x80
    648  1.1  mrg 
    649  1.1  mrg #undef TARGET_CANNOT_FORCE_CONST_MEM
    650  1.1  mrg #define TARGET_CANNOT_FORCE_CONST_MEM sh_cannot_force_const_mem_p
    651  1.1  mrg 
    652  1.1  mrg #undef TARGET_HARD_REGNO_NREGS
    653  1.1  mrg #define TARGET_HARD_REGNO_NREGS sh_hard_regno_nregs
    654  1.1  mrg #undef TARGET_HARD_REGNO_MODE_OK
    655  1.1  mrg #define TARGET_HARD_REGNO_MODE_OK sh_hard_regno_mode_ok
    656  1.1  mrg 
    657  1.1  mrg #undef TARGET_MODES_TIEABLE_P
    658  1.1  mrg #define TARGET_MODES_TIEABLE_P sh_modes_tieable_p
    659  1.1  mrg 
    660  1.1  mrg #undef TARGET_CAN_CHANGE_MODE_CLASS
    661  1.1  mrg #define TARGET_CAN_CHANGE_MODE_CLASS sh_can_change_mode_class
    662  1.1  mrg 
    663  1.1  mrg #undef TARGET_CONSTANT_ALIGNMENT
    664  1.1  mrg #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
    665  1.1  mrg 
    666  1.1  mrg #undef  TARGET_HAVE_SPECULATION_SAFE_VALUE
    667  1.1  mrg #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
    668  1.1  mrg 
    669  1.1  mrg struct gcc_target targetm = TARGET_INITIALIZER;
    670  1.1  mrg 
    671  1.1  mrg 
    673  1.1  mrg /* Information on the currently selected atomic model.
    674  1.1  mrg    This is initialized in sh_option_override.  */
    675  1.1  mrg static sh_atomic_model selected_atomic_model_;
    676  1.1  mrg 
    677  1.1  mrg const sh_atomic_model&
    678  1.1  mrg selected_atomic_model (void)
    679  1.1  mrg {
    680  1.1  mrg   return selected_atomic_model_;
    681  1.1  mrg }
    682  1.1  mrg 
    683  1.1  mrg static sh_atomic_model
    684  1.1  mrg parse_validate_atomic_model_option (const char* str)
    685  1.1  mrg {
    686  1.1  mrg   const char* model_names[sh_atomic_model::num_models];
    687  1.1  mrg   model_names[sh_atomic_model::none] = "none";
    688  1.1  mrg   model_names[sh_atomic_model::soft_gusa] = "soft-gusa";
    689  1.1  mrg   model_names[sh_atomic_model::hard_llcs] = "hard-llcs";
    690  1.1  mrg   model_names[sh_atomic_model::soft_tcb] = "soft-tcb";
    691  1.1  mrg   model_names[sh_atomic_model::soft_imask] = "soft-imask";
    692  1.1  mrg 
    693  1.1  mrg   const char* model_cdef_names[sh_atomic_model::num_models];
    694  1.1  mrg   model_cdef_names[sh_atomic_model::none] = "NONE";
    695  1.1  mrg   model_cdef_names[sh_atomic_model::soft_gusa] = "SOFT_GUSA";
    696  1.1  mrg   model_cdef_names[sh_atomic_model::hard_llcs] = "HARD_LLCS";
    697  1.1  mrg   model_cdef_names[sh_atomic_model::soft_tcb] = "SOFT_TCB";
    698  1.1  mrg   model_cdef_names[sh_atomic_model::soft_imask] = "SOFT_IMASK";
    699  1.1  mrg 
    700  1.1  mrg   sh_atomic_model ret;
    701  1.1  mrg   ret.type = sh_atomic_model::none;
    702  1.1  mrg   ret.name = model_names[sh_atomic_model::none];
    703  1.1  mrg   ret.cdef_name = model_cdef_names[sh_atomic_model::none];
    704  1.1  mrg   ret.strict = false;
    705  1.1  mrg   ret.tcb_gbr_offset = -1;
    706  1.1  mrg 
    707  1.1  mrg   /* Handle empty string as 'none'.  */
    708  1.1  mrg   if (str == NULL || *str == '\0')
    709  1.1  mrg     return ret;
    710  1.1  mrg 
    711  1.1  mrg #define err_ret(...) do { error (__VA_ARGS__); return ret; } while (0)
    712  1.1  mrg 
    713  1.1  mrg   std::vector<std::string> tokens;
    714  1.1  mrg   for (std::stringstream ss (str); ss.good (); )
    715  1.1  mrg   {
    716  1.1  mrg     tokens.push_back (std::string ());
    717  1.1  mrg     std::getline (ss, tokens.back (), ',');
    718  1.1  mrg   }
    719  1.1  mrg 
    720  1.1  mrg   if (tokens.empty ())
    721  1.1  mrg     err_ret ("invalid atomic model option");
    722  1.1  mrg 
    723  1.1  mrg   /* The first token must be the atomic model name.  */
    724  1.1  mrg   {
    725  1.1  mrg     for (size_t i = 0; i < sh_atomic_model::num_models; ++i)
    726  1.1  mrg       if (tokens.front () == model_names[i])
    727  1.1  mrg 	{
    728  1.1  mrg 	  ret.type = (sh_atomic_model::enum_type)i;
    729  1.1  mrg 	  ret.name = model_names[i];
    730  1.1  mrg 	  ret.cdef_name = model_cdef_names[i];
    731  1.1  mrg 	  goto got_mode_name;
    732  1.1  mrg 	}
    733  1.1  mrg 
    734  1.1  mrg     err_ret ("invalid atomic model name %qs", tokens.front ().c_str ());
    735  1.1  mrg got_mode_name:;
    736  1.1  mrg   }
    737  1.1  mrg 
    738  1.1  mrg   /* Go through the remaining tokens.  */
    739  1.1  mrg   for (size_t i = 1; i < tokens.size (); ++i)
    740  1.1  mrg     {
    741  1.1  mrg       if (tokens[i] == "strict")
    742  1.1  mrg 	ret.strict = true;
    743  1.1  mrg       else if (!tokens[i].compare (0, strlen ("gbr-offset="), "gbr-offset="))
    744  1.1  mrg 	{
    745  1.1  mrg 	  std::string offset_str = tokens[i].substr (strlen ("gbr-offset="));
    746  1.1  mrg 	  ret.tcb_gbr_offset = integral_argument (offset_str.c_str ());
    747  1.1  mrg 	  if (offset_str.empty () || ret.tcb_gbr_offset == -1)
    748  1.1  mrg 	    err_ret ("could not parse gbr-offset value %qs in atomic model "
    749  1.1  mrg 		     "option", offset_str.c_str ());
    750  1.1  mrg 	}
    751  1.1  mrg       else
    752  1.1  mrg 	err_ret ("unknown parameter %qs in atomic model option",
    753  1.1  mrg 		 tokens[i].c_str ());
    754  1.1  mrg     }
    755  1.1  mrg 
    756  1.1  mrg   /* Check that the selection makes sense.  */
    757  1.1  mrg   if (ret.type == sh_atomic_model::soft_gusa && !TARGET_SH3)
    758  1.1  mrg     err_ret ("atomic model %s is only available on SH3 and SH4 targets",
    759  1.1  mrg 	     ret.name);
    760  1.1  mrg 
    761  1.1  mrg   if (ret.type == sh_atomic_model::hard_llcs && !TARGET_SH4A)
    762  1.1  mrg     err_ret ("atomic model %s is only available on SH4A targets", ret.name);
    763  1.1  mrg 
    764  1.1  mrg   if (ret.type == sh_atomic_model::soft_tcb && ret.tcb_gbr_offset == -1)
    765  1.1  mrg     err_ret ("atomic model %s requires gbr-offset parameter", ret.name);
    766  1.1  mrg 
    767  1.1  mrg   if (ret.type == sh_atomic_model::soft_tcb
    768  1.1  mrg       && (ret.tcb_gbr_offset < 0 || ret.tcb_gbr_offset > 1020
    769  1.1  mrg           || (ret.tcb_gbr_offset & 3) != 0))
    770  1.1  mrg     err_ret ("invalid gbr-offset value \"%d\" for atomic model %s; it must be "
    771  1.1  mrg 	     "a multiple of 4 in the range 0-1020", ret.tcb_gbr_offset,
    772  1.1  mrg 	     ret.name);
    773  1.1  mrg 
    774  1.1  mrg   if (ret.type == sh_atomic_model::soft_imask && TARGET_USERMODE)
    775  1.1  mrg     err_ret ("cannot use atomic model %s in user mode", ret.name);
    776  1.1  mrg 
    777  1.1  mrg   return ret;
    778  1.1  mrg 
    779  1.1  mrg #undef err_ret
    780  1.1  mrg }
    781  1.1  mrg 
    782  1.1  mrg /* Register SH specific RTL passes.  */
    783  1.1  mrg extern opt_pass* make_pass_sh_treg_combine (gcc::context* ctx, bool split_insns,
    784  1.1  mrg 					    const char* name);
    785  1.1  mrg extern opt_pass* make_pass_sh_optimize_sett_clrt (gcc::context* ctx,
    786  1.1  mrg 						  const char* name);
    787  1.1  mrg static void
    788  1.1  mrg register_sh_passes (void)
    789  1.1  mrg {
    790  1.1  mrg /* Running the sh_treg_combine pass after ce1 generates better code when
    791  1.1  mrg    comparisons are combined and reg-reg moves are introduced, because
    792  1.1  mrg    reg-reg moves will be eliminated afterwards.  However, there are quite
    793  1.1  mrg    some cases where combine will be unable to fold comparison related insns,
    794  1.1  mrg    thus for now don't do it.
    795  1.1  mrg   register_pass (make_pass_sh_treg_combine (g, false, "sh_treg_combine1"),
    796  1.1  mrg 		 PASS_POS_INSERT_AFTER, "ce1", 1);
    797  1.1  mrg */
    798  1.1  mrg 
    799  1.1  mrg   /* Run sh_treg_combine pass after combine but before register allocation.  */
    800  1.1  mrg   register_pass (make_pass_sh_treg_combine (g, true, "sh_treg_combine2"),
    801  1.1  mrg 		 PASS_POS_INSERT_AFTER, "split1", 1);
    802  1.1  mrg 
    803  1.1  mrg   /* Run sh_treg_combine pass after register allocation and basic block
    804  1.1  mrg      reordering as this sometimes creates new opportunities.  */
    805  1.1  mrg   register_pass (make_pass_sh_treg_combine (g, true, "sh_treg_combine3"),
    806  1.1  mrg 		 PASS_POS_INSERT_AFTER, "split3", 1);
    807  1.1  mrg 
    808  1.1  mrg   /* Optimize sett and clrt insns, by e.g. removing them if the T bit value
    809  1.1  mrg      is known after a conditional branch.
    810  1.1  mrg      This must be done after basic blocks and branch conditions have
    811  1.1  mrg      stabilized and won't be changed by further passes.  */
    812  1.1  mrg   register_pass (make_pass_sh_optimize_sett_clrt (g, "sh_optimize_sett_clrt"),
    813  1.1  mrg 		 PASS_POS_INSERT_BEFORE, "sched2", 1);
    814  1.1  mrg }
    815  1.1  mrg 
    816  1.1  mrg /* Implement TARGET_OPTION_OVERRIDE macro.  Validate and override
    817  1.1  mrg    various options, and do some machine dependent initialization.  */
    818  1.1  mrg static void
    819  1.1  mrg sh_option_override (void)
    820  1.1  mrg {
    821  1.1  mrg   int regno;
    822  1.1  mrg 
    823  1.1  mrg   SUBTARGET_OVERRIDE_OPTIONS;
    824  1.1  mrg 
    825  1.1  mrg   sh_cpu = PROCESSOR_SH1;
    826  1.1  mrg   assembler_dialect = 0;
    827  1.1  mrg   if (TARGET_SH2)
    828  1.1  mrg     sh_cpu = PROCESSOR_SH2;
    829  1.1  mrg   if (TARGET_SH2E)
    830  1.1  mrg     sh_cpu = PROCESSOR_SH2E;
    831  1.1  mrg   if (TARGET_SH2A)
    832  1.1  mrg     sh_cpu = PROCESSOR_SH2A;
    833  1.1  mrg   if (TARGET_SH3)
    834  1.1  mrg     sh_cpu = PROCESSOR_SH3;
    835  1.1  mrg   if (TARGET_SH3E)
    836  1.1  mrg     sh_cpu = PROCESSOR_SH3E;
    837  1.1  mrg   if (TARGET_SH4)
    838  1.1  mrg     {
    839  1.1  mrg       assembler_dialect = 1;
    840  1.1  mrg       sh_cpu = PROCESSOR_SH4;
    841  1.1  mrg     }
    842  1.1  mrg   if (TARGET_SH4A)
    843  1.1  mrg     {
    844  1.1  mrg       assembler_dialect = 1;
    845  1.1  mrg       sh_cpu = PROCESSOR_SH4A;
    846  1.1  mrg     }
    847  1.1  mrg 
    848  1.1  mrg   /* User/priviledged mode is supported only on SH3* and SH4*.
    849  1.1  mrg      Disable it for everything else.  */
    850  1.1  mrg   if (!TARGET_SH3 && TARGET_USERMODE)
    851  1.1  mrg     TARGET_USERMODE = false;
    852  1.1  mrg 
    853  1.1  mrg   if (! strcmp (sh_div_str, "call-div1"))
    854  1.1  mrg     sh_div_strategy = SH_DIV_CALL_DIV1;
    855  1.1  mrg   else if (! strcmp (sh_div_str, "call-fp") && TARGET_FPU_ANY)
    856  1.1  mrg     sh_div_strategy = SH_DIV_CALL_FP;
    857  1.1  mrg   else if (! strcmp (sh_div_str, "call-table") && TARGET_DYNSHIFT)
    858  1.1  mrg     sh_div_strategy = SH_DIV_CALL_TABLE;
    859  1.1  mrg   else
    860  1.1  mrg     {
    861  1.1  mrg       /* Pick one that makes most sense for the target in general.
    862  1.1  mrg 	 It is not much good to use different functions depending on -Os,
    863  1.1  mrg 	 since then we'll end up with two different functions when some of
    864  1.1  mrg 	 the code is compiled for size, and some for speed.  */
    865  1.1  mrg 
    866  1.1  mrg       /* SH4 tends to emphasize speed.  */
    867  1.1  mrg       if (TARGET_HARD_SH4)
    868  1.1  mrg 	sh_div_strategy = SH_DIV_CALL_TABLE;
    869  1.1  mrg       /* These have their own way of doing things.  */
    870  1.1  mrg       else if (TARGET_SH2A)
    871  1.1  mrg 	sh_div_strategy = SH_DIV_INTRINSIC;
    872  1.1  mrg       /* SH1 .. SH3 cores often go into small-footprint systems, so
    873  1.1  mrg 	 default to the smallest implementation available.  */
    874  1.1  mrg       else
    875  1.1  mrg 	sh_div_strategy = SH_DIV_CALL_DIV1;
    876  1.1  mrg     }
    877  1.1  mrg 
    878  1.1  mrg   if (sh_divsi3_libfunc[0])
    879  1.1  mrg     ; /* User supplied - leave it alone.  */
    880  1.1  mrg   else if (TARGET_DIVIDE_CALL_FP)
    881  1.1  mrg     sh_divsi3_libfunc = "__sdivsi3_i4";
    882  1.1  mrg   else if (TARGET_DIVIDE_CALL_TABLE)
    883  1.1  mrg     sh_divsi3_libfunc = "__sdivsi3_i4i";
    884  1.1  mrg   else
    885  1.1  mrg     sh_divsi3_libfunc = "__sdivsi3";
    886  1.1  mrg 
    887  1.1  mrg   if (sh_branch_cost == -1)
    888  1.1  mrg     {
    889  1.1  mrg       /*  The SH1 does not have delay slots, hence we get a pipeline stall
    890  1.1  mrg 	  at every branch.  The SH4 is superscalar, so the single delay slot
    891  1.1  mrg 	  is not sufficient to keep both pipelines filled.
    892  1.1  mrg 	  In any case, set the default branch cost to '2', as it results in
    893  1.1  mrg 	  slightly overall smaller code and also enables some if conversions
    894  1.1  mrg 	  that are required for matching special T bit related insns.  */
    895  1.1  mrg       sh_branch_cost = 2;
    896  1.1  mrg     }
    897  1.1  mrg 
    898  1.1  mrg   /* Set -mzdcbranch for SH4 / SH4A if not otherwise specified by the user.  */
    899  1.1  mrg   if (! OPTION_SET_P (TARGET_ZDCBRANCH) && TARGET_HARD_SH4)
    900  1.1  mrg     TARGET_ZDCBRANCH = 1;
    901  1.1  mrg 
    902  1.1  mrg   /* FDPIC code is a special form of PIC, and the vast majority of code
    903  1.1  mrg      generation constraints that apply to PIC also apply to FDPIC, so we
    904  1.1  mrg      set flag_pic to avoid the need to check TARGET_FDPIC everywhere
    905  1.1  mrg      flag_pic is checked. */
    906  1.1  mrg   if (TARGET_FDPIC && !flag_pic)
    907  1.1  mrg     flag_pic = 2;
    908  1.1  mrg 
    909  1.1  mrg   for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
    910  1.1  mrg     if (! VALID_REGISTER_P (regno))
    911  1.1  mrg       sh_register_names[regno][0] = '\0';
    912  1.1  mrg 
    913  1.1  mrg   for (regno = 0; regno < ADDREGNAMES_SIZE; regno++)
    914  1.1  mrg     if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno)))
    915  1.1  mrg       sh_additional_register_names[regno][0] = '\0';
    916  1.1  mrg 
    917  1.1  mrg   if (flag_pic && ! TARGET_PREFERGOT)
    918  1.1  mrg     flag_no_function_cse = 1;
    919  1.1  mrg 
    920  1.1  mrg   if (targetm.small_register_classes_for_mode_p (VOIDmode))
    921  1.1  mrg     {
    922  1.1  mrg       /* Never run scheduling before reload, since that can
    923  1.1  mrg 	 break global alloc, and generates slower code anyway due
    924  1.1  mrg 	 to the pressure on R0.  */
    925  1.1  mrg       /* Enable sched1 for SH4 if the user explicitly requests.
    926  1.1  mrg 	 When sched1 is enabled, the ready queue will be reordered by
    927  1.1  mrg 	 the target hooks if pressure is high.  We cannot do this for
    928  1.1  mrg 	 PIC, SH3 and lower as they give spill failures for R0.  */
    929  1.1  mrg       if (!TARGET_HARD_SH4 || flag_pic)
    930  1.1  mrg 	flag_schedule_insns = 0;
    931  1.1  mrg       /* ??? Current exception handling places basic block boundaries
    932  1.1  mrg 	 after call_insns.  It causes the high pressure on R0 and gives
    933  1.1  mrg 	 spill failures for R0 in reload.  See PR 22553 and the thread
    934  1.1  mrg 	 on gcc-patches
    935  1.1  mrg 	 <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>.  */
    936  1.1  mrg       else if (flag_exceptions)
    937  1.1  mrg 	{
    938  1.1  mrg 	  if (flag_schedule_insns && OPTION_SET_P (flag_schedule_insns))
    939  1.1  mrg 	    warning (0, "ignoring %<-fschedule-insns%> because of exception "
    940  1.1  mrg 			"handling bug");
    941  1.1  mrg 	  flag_schedule_insns = 0;
    942  1.1  mrg 	}
    943  1.1  mrg       else if (flag_schedule_insns
    944  1.1  mrg 	       && !OPTION_SET_P (flag_schedule_insns))
    945  1.1  mrg 	flag_schedule_insns = 0;
    946  1.1  mrg     }
    947  1.1  mrg 
    948  1.1  mrg   /* Unwind info is not correct around the CFG unless either a frame
    949  1.1  mrg      pointer is present or M_A_O_A is set.  Fixing this requires rewriting
    950  1.1  mrg      unwind info generation to be aware of the CFG and propagating states
    951  1.1  mrg      around edges.  */
    952  1.1  mrg   if ((flag_unwind_tables || flag_asynchronous_unwind_tables
    953  1.1  mrg        || flag_exceptions || flag_non_call_exceptions)
    954  1.1  mrg       && flag_omit_frame_pointer && !TARGET_ACCUMULATE_OUTGOING_ARGS)
    955  1.1  mrg     {
    956  1.1  mrg       warning (0, "unwind tables currently require either a frame pointer "
    957  1.1  mrg 	       "or %<-maccumulate-outgoing-args%> for correctness");
    958  1.1  mrg       TARGET_ACCUMULATE_OUTGOING_ARGS = 1;
    959  1.1  mrg     }
    960  1.1  mrg 
    961  1.1  mrg   if (flag_unsafe_math_optimizations)
    962  1.1  mrg     {
    963  1.1  mrg       /* Enable fsca insn for SH4A if not otherwise specified by the user.  */
    964  1.1  mrg       if (OPTION_SET_P (TARGET_FSCA) == 0
    965  1.1  mrg 	  && (TARGET_SH4A_FP || TARGET_FPU_SH4_300))
    966  1.1  mrg 	TARGET_FSCA = 1;
    967  1.1  mrg 
    968  1.1  mrg       /* Enable fsrra insn for SH4A if not otherwise specified by the user.  */
    969  1.1  mrg       if (OPTION_SET_P (TARGET_FSRRA) == 0
    970  1.1  mrg 	  && (TARGET_SH4A_FP || TARGET_FPU_SH4_300))
    971  1.1  mrg 	TARGET_FSRRA = 1;
    972  1.1  mrg     }
    973  1.1  mrg 
    974  1.1  mrg   /*  Allow fsrra insn only if -funsafe-math-optimizations and
    975  1.1  mrg       -ffinite-math-only is enabled.  */
    976  1.1  mrg   TARGET_FSRRA = TARGET_FSRRA
    977  1.1  mrg 		 && flag_unsafe_math_optimizations
    978  1.1  mrg 		 && flag_finite_math_only;
    979  1.1  mrg 
    980  1.1  mrg   /* If the -mieee option was not explicitly set by the user, turn it on
    981  1.1  mrg      unless -ffinite-math-only was specified.  See also PR 33135.  */
    982  1.1  mrg   if (! OPTION_SET_P (TARGET_IEEE))
    983  1.1  mrg     TARGET_IEEE = ! flag_finite_math_only;
    984  1.1  mrg 
    985  1.1  mrg   if (sh_fixed_range_str)
    986  1.1  mrg     sh_fix_range (sh_fixed_range_str);
    987  1.1  mrg 
    988  1.1  mrg   /* This target defaults to strict volatile bitfields.  */
    989  1.1  mrg   if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
    990  1.1  mrg     flag_strict_volatile_bitfields = 1;
    991  1.1  mrg 
    992  1.1  mrg   sh_override_options_after_change ();
    993  1.1  mrg 
    994  1.1  mrg   /* Parse atomic model option and make sure it is valid for the current
    995  1.1  mrg      target CPU.  */
    996  1.1  mrg   selected_atomic_model_
    997  1.1  mrg     = parse_validate_atomic_model_option (sh_atomic_model_str);
    998  1.1  mrg 
    999  1.1  mrg   register_sh_passes ();
   1000  1.1  mrg }
   1001  1.1  mrg 
   1002  1.1  mrg /* Implement targetm.override_options_after_change.  */
   1003  1.1  mrg 
   1004  1.1  mrg static void
   1005  1.1  mrg sh_override_options_after_change (void)
   1006  1.1  mrg {
   1007  1.1  mrg   /*  Adjust loop, jump and function alignment values (in bytes), if those
   1008  1.1  mrg       were not specified by the user using -falign-loops, -falign-jumps
   1009  1.1  mrg       and -falign-functions options.
   1010  1.1  mrg       32 bit alignment is better for speed, because instructions can be
   1011  1.1  mrg       fetched as a pair from a longword boundary.  For size use 16 bit
   1012  1.1  mrg       alignment to get more compact code.
   1013  1.1  mrg       Aligning all jumps increases the code size, even if it might
   1014  1.1  mrg       result in slightly faster code.  Thus, it is set to the smallest
   1015  1.1  mrg       alignment possible if not specified by the user.  */
   1016  1.1  mrg   if (flag_align_loops && !str_align_loops)
   1017  1.1  mrg     str_align_loops = optimize_size ? "2" : "4";
   1018  1.1  mrg 
   1019  1.1  mrg   /* Parse values so that we can compare for current value.  */
   1020  1.1  mrg   parse_alignment_opts ();
   1021  1.1  mrg   if (flag_align_jumps && !str_align_jumps)
   1022  1.1  mrg     str_align_jumps = "2";
   1023  1.1  mrg   else if (align_jumps.levels[0].get_value () < 2)
   1024  1.1  mrg     str_align_jumps = "2";
   1025  1.1  mrg 
   1026  1.1  mrg   if (flag_align_functions && !str_align_functions)
   1027  1.1  mrg     str_align_functions = optimize_size ? "2" : "4";
   1028  1.1  mrg 
   1029  1.1  mrg   /* The linker relaxation code breaks when a function contains
   1030  1.1  mrg      alignments that are larger than that at the start of a
   1031  1.1  mrg      compilation unit.  */
   1032  1.1  mrg   if (TARGET_RELAX)
   1033  1.1  mrg     {
   1034  1.1  mrg       /* Parse values so that we can compare for current value.  */
   1035  1.1  mrg       parse_alignment_opts ();
   1036  1.1  mrg       int min_align = MAX (align_loops.levels[0].get_value (),
   1037  1.1  mrg 			   align_jumps.levels[0].get_value ());
   1038  1.1  mrg 
   1039  1.1  mrg       /* Also take possible .long constants / mova tables into account.	*/
   1040  1.1  mrg       if (min_align < 4)
   1041  1.1  mrg 	min_align = 4;
   1042  1.1  mrg       if (align_functions.levels[0].get_value () < min_align)
   1043  1.1  mrg 	{
   1044  1.1  mrg 	  char *r = XNEWVEC (char, 16);
   1045  1.1  mrg 	  sprintf (r, "%d", min_align);
   1046  1.1  mrg 	  str_align_functions = r;
   1047  1.1  mrg 	}
   1048  1.1  mrg     }
   1049  1.1  mrg }
   1050  1.1  mrg 
   1051  1.1  mrg /* Print the operand address in x to the stream.  */
   1053  1.1  mrg static void
   1054  1.1  mrg sh_print_operand_address (FILE *stream, machine_mode /*mode*/, rtx x)
   1055  1.1  mrg {
   1056  1.1  mrg   switch (GET_CODE (x))
   1057  1.1  mrg     {
   1058  1.1  mrg     case REG:
   1059  1.1  mrg     case SUBREG:
   1060  1.1  mrg       fprintf (stream, "@%s", reg_names[true_regnum (x)]);
   1061  1.1  mrg       break;
   1062  1.1  mrg 
   1063  1.1  mrg     case PLUS:
   1064  1.1  mrg       {
   1065  1.1  mrg 	rtx base = XEXP (x, 0);
   1066  1.1  mrg 	rtx index = XEXP (x, 1);
   1067  1.1  mrg 
   1068  1.1  mrg 	switch (GET_CODE (index))
   1069  1.1  mrg 	  {
   1070  1.1  mrg 	  case CONST_INT:
   1071  1.1  mrg 	    fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
   1072  1.1  mrg 		     reg_names[true_regnum (base)]);
   1073  1.1  mrg 	    break;
   1074  1.1  mrg 
   1075  1.1  mrg 	  case REG:
   1076  1.1  mrg 	  case SUBREG:
   1077  1.1  mrg 	    {
   1078  1.1  mrg 	      int base_num = true_regnum (base);
   1079  1.1  mrg 	      int index_num = true_regnum (index);
   1080  1.1  mrg 
   1081  1.1  mrg 	      /* If base or index is R0, make sure that it comes first.
   1082  1.1  mrg 		 Usually one of them will be R0, but the order might be wrong.
   1083  1.1  mrg 		 If neither base nor index are R0 it's an error and we just
   1084  1.1  mrg 		 pass it on to the assembler.  This avoids silent wrong code
   1085  1.1  mrg 		 bugs.  */
   1086  1.1  mrg 	      if (base_num == 0 && index_num != 0)
   1087  1.1  mrg 		std::swap (base_num, index_num);
   1088  1.1  mrg 
   1089  1.1  mrg 	      fprintf (stream, "@(%s,%s)", reg_names[index_num],
   1090  1.1  mrg 					   reg_names[base_num]);
   1091  1.1  mrg 	      break;
   1092  1.1  mrg 	    }
   1093  1.1  mrg 
   1094  1.1  mrg 	  default:
   1095  1.1  mrg 	    gcc_unreachable ();
   1096  1.1  mrg 	  }
   1097  1.1  mrg       }
   1098  1.1  mrg       break;
   1099  1.1  mrg 
   1100  1.1  mrg     case PRE_DEC:
   1101  1.1  mrg       fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
   1102  1.1  mrg       break;
   1103  1.1  mrg 
   1104  1.1  mrg     case POST_INC:
   1105  1.1  mrg       fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
   1106  1.1  mrg       break;
   1107  1.1  mrg 
   1108  1.1  mrg     default:
   1109  1.1  mrg       x = mark_constant_pool_use (x);
   1110  1.1  mrg       output_addr_const (stream, x);
   1111  1.1  mrg       break;
   1112  1.1  mrg     }
   1113  1.1  mrg }
   1114  1.1  mrg 
   1115  1.1  mrg /* Print operand x (an rtx) in assembler syntax to file stream
   1116  1.1  mrg    according to modifier code.
   1117  1.1  mrg 
   1118  1.1  mrg    '.'  print a .s if insn needs delay slot
   1119  1.1  mrg    ','  print LOCAL_LABEL_PREFIX
   1120  1.1  mrg    '@'  print trap, rte or rts depending upon pragma interruptness
   1121  1.1  mrg    '#'  output a nop if there is nothing to put in the delay slot
   1122  1.1  mrg    '''  print likelihood suffix (/u for unlikely).
   1123  1.1  mrg    '>'  print branch target if -fverbose-asm
   1124  1.1  mrg    'O'  print a constant without the #
   1125  1.1  mrg    'R'  print the LSW of a dp value - changes if in little endian
   1126  1.1  mrg    'S'  print the MSW of a dp value - changes if in little endian
   1127  1.1  mrg    'T'  print the next word of a dp value - same as 'R' in big endian mode.
   1128  1.1  mrg    'M'  print .b / .w / .l / .s / .d suffix if operand is a MEM.
   1129  1.1  mrg    'N'  print 'r63' if the operand is (const_int 0).
   1130  1.1  mrg    'd'  print a V2SF reg as dN instead of fpN.
   1131  1.1  mrg    'm'  print a pair `base,offset' or `base,index', for LD and ST.
   1132  1.1  mrg    'U'  Likewise for {LD,ST}{HI,LO}.
   1133  1.1  mrg    'V'  print the position of a single bit set.
   1134  1.1  mrg    'W'  print the position of a single bit cleared.
   1135  1.1  mrg    't'  print a memory address which is a register.
   1136  1.1  mrg    'u'  prints the lowest 16 bits of CONST_INT, as an unsigned value.
   1137  1.1  mrg    'o'  output an operator.  */
   1138  1.1  mrg static void
   1139  1.1  mrg sh_print_operand (FILE *stream, rtx x, int code)
   1140  1.1  mrg {
   1141  1.1  mrg   int regno;
   1142  1.1  mrg   machine_mode mode;
   1143  1.1  mrg 
   1144  1.1  mrg   switch (code)
   1145  1.1  mrg     {
   1146  1.1  mrg       tree trapa_attr;
   1147  1.1  mrg 
   1148  1.1  mrg     case '.':
   1149  1.1  mrg       if (final_sequence
   1150  1.1  mrg 	  && ! INSN_ANNULLED_BRANCH_P (final_sequence->insn (0))
   1151  1.1  mrg 	  && get_attr_length (final_sequence->insn (1)))
   1152  1.1  mrg 	fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
   1153  1.1  mrg       break;
   1154  1.1  mrg     case ',':
   1155  1.1  mrg       fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
   1156  1.1  mrg       break;
   1157  1.1  mrg     case '@':
   1158  1.1  mrg       trapa_attr = lookup_attribute ("trap_exit",
   1159  1.1  mrg 				      DECL_ATTRIBUTES (current_function_decl));
   1160  1.1  mrg       if (trapa_attr)
   1161  1.1  mrg 	fprintf (stream, "trapa	#%ld",
   1162  1.1  mrg 		 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
   1163  1.1  mrg       else if (sh_cfun_interrupt_handler_p ())
   1164  1.1  mrg 	{
   1165  1.1  mrg 	  if (sh_cfun_resbank_handler_p ())
   1166  1.1  mrg 	    fprintf (stream, "resbank\n");
   1167  1.1  mrg 	  fprintf (stream, "rte");
   1168  1.1  mrg 	}
   1169  1.1  mrg       else
   1170  1.1  mrg 	fprintf (stream, "rts");
   1171  1.1  mrg       break;
   1172  1.1  mrg     case '#':
   1173  1.1  mrg       /* Output a nop if there's nothing in the delay slot.  */
   1174  1.1  mrg       if (dbr_sequence_length () == 0)
   1175  1.1  mrg 	fprintf (stream, "\n\tnop");
   1176  1.1  mrg       break;
   1177  1.1  mrg     case '\'':
   1178  1.1  mrg       {
   1179  1.1  mrg 	rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
   1180  1.1  mrg 
   1181  1.1  mrg 	if (note
   1182  1.1  mrg 	    && profile_probability::from_reg_br_prob_note (XINT (note, 0))
   1183  1.1  mrg 	       < profile_probability::even ())
   1184  1.1  mrg 	  fputs ("/u", stream);
   1185  1.1  mrg 	break;
   1186  1.1  mrg       }
   1187  1.1  mrg     case '>':
   1188  1.1  mrg       if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
   1189  1.1  mrg 	{
   1190  1.1  mrg 	  fputs ("\t! target: ", stream);
   1191  1.1  mrg 	  output_addr_const (stream, JUMP_LABEL (current_output_insn));
   1192  1.1  mrg 	}
   1193  1.1  mrg       break;
   1194  1.1  mrg     case 'O':
   1195  1.1  mrg       x = mark_constant_pool_use (x);
   1196  1.1  mrg       output_addr_const (stream, x);
   1197  1.1  mrg       break;
   1198  1.1  mrg     /* N.B.: %R / %S / %T adjust memory addresses by four.
   1199  1.1  mrg        While they can be used to access 64 bit parts of a larger value
   1200  1.1  mrg        held in general purpose registers, that won't work with memory -
   1201  1.1  mrg        neither for fp registers, since the frxx names are used.  */
   1202  1.1  mrg     case 'R':
   1203  1.1  mrg       if (REG_P (x) || GET_CODE (x) == SUBREG)
   1204  1.1  mrg 	{
   1205  1.1  mrg 	  regno = true_regnum (x);
   1206  1.1  mrg 	  regno += FP_REGISTER_P (regno) ? 1 : SH_REG_LSW_OFFSET;
   1207  1.1  mrg 	  fputs (reg_names[regno], (stream));
   1208  1.1  mrg 	}
   1209  1.1  mrg       else if (MEM_P (x))
   1210  1.1  mrg 	{
   1211  1.1  mrg 	  x = adjust_address (x, SImode, 4 * SH_REG_LSW_OFFSET);
   1212  1.1  mrg 	  sh_print_operand_address (stream, GET_MODE (x), XEXP (x, 0));
   1213  1.1  mrg 	}
   1214  1.1  mrg       else
   1215  1.1  mrg 	{
   1216  1.1  mrg 	  rtx sub = NULL_RTX;
   1217  1.1  mrg 
   1218  1.1  mrg 	  mode = GET_MODE (x);
   1219  1.1  mrg 	  if (mode == VOIDmode)
   1220  1.1  mrg 	    mode = DImode;
   1221  1.1  mrg 	  if (GET_MODE_SIZE (mode) >= 8)
   1222  1.1  mrg 	    sub = simplify_subreg (SImode, x, mode, 4 * SH_REG_LSW_OFFSET);
   1223  1.1  mrg 	  if (sub)
   1224  1.1  mrg 	    sh_print_operand (stream, sub, 0);
   1225  1.1  mrg 	  else
   1226  1.1  mrg 	    output_operand_lossage ("invalid operand to %%R");
   1227  1.1  mrg 	}
   1228  1.1  mrg       break;
   1229  1.1  mrg     case 'S':
   1230  1.1  mrg       if (REG_P (x) || GET_CODE (x) == SUBREG)
   1231  1.1  mrg 	{
   1232  1.1  mrg 	  regno = true_regnum (x);
   1233  1.1  mrg 	  regno += FP_REGISTER_P (regno) ? 0 : SH_REG_MSW_OFFSET;
   1234  1.1  mrg 	  fputs (reg_names[regno], (stream));
   1235  1.1  mrg 	}
   1236  1.1  mrg       else if (MEM_P (x))
   1237  1.1  mrg 	{
   1238  1.1  mrg 	  x = adjust_address (x, SImode, 4 * SH_REG_MSW_OFFSET);
   1239  1.1  mrg 	  sh_print_operand_address (stream, GET_MODE (x), XEXP (x, 0));
   1240  1.1  mrg 	}
   1241  1.1  mrg       else
   1242  1.1  mrg 	{
   1243  1.1  mrg 	  rtx sub = NULL_RTX;
   1244  1.1  mrg 
   1245  1.1  mrg 	  mode = GET_MODE (x);
   1246  1.1  mrg 	  if (mode == VOIDmode)
   1247  1.1  mrg 	    mode = DImode;
   1248  1.1  mrg 	  if (GET_MODE_SIZE (mode) >= 8)
   1249  1.1  mrg 	    sub = simplify_subreg (SImode, x, mode, 4 * SH_REG_MSW_OFFSET);
   1250  1.1  mrg 	  if (sub)
   1251  1.1  mrg 	    sh_print_operand (stream, sub, 0);
   1252  1.1  mrg 	  else
   1253  1.1  mrg 	    output_operand_lossage ("invalid operand to %%S");
   1254  1.1  mrg 	}
   1255  1.1  mrg       break;
   1256  1.1  mrg     case 'T':
   1257  1.1  mrg       /* Next word of a double.  */
   1258  1.1  mrg       switch (GET_CODE (x))
   1259  1.1  mrg 	{
   1260  1.1  mrg 	case REG:
   1261  1.1  mrg 	  fputs (reg_names[REGNO (x) + 1], (stream));
   1262  1.1  mrg 	  break;
   1263  1.1  mrg 	case MEM:
   1264  1.1  mrg 	  {
   1265  1.1  mrg 	    machine_mode mode = GET_MODE (x);
   1266  1.1  mrg 	    if (GET_CODE (XEXP (x, 0)) != PRE_DEC
   1267  1.1  mrg 		&& GET_CODE (XEXP (x, 0)) != POST_INC)
   1268  1.1  mrg 	      x = adjust_address (x, SImode, 4);
   1269  1.1  mrg 	    sh_print_operand_address (stream, mode, XEXP (x, 0));
   1270  1.1  mrg 	  }
   1271  1.1  mrg 	  break;
   1272  1.1  mrg 	default:
   1273  1.1  mrg 	  break;
   1274  1.1  mrg 	}
   1275  1.1  mrg       break;
   1276  1.1  mrg 
   1277  1.1  mrg     case 't':
   1278  1.1  mrg       gcc_assert (MEM_P (x));
   1279  1.1  mrg       x = XEXP (x, 0);
   1280  1.1  mrg       switch (GET_CODE (x))
   1281  1.1  mrg 	{
   1282  1.1  mrg 	case REG:
   1283  1.1  mrg 	case SUBREG:
   1284  1.1  mrg 	  sh_print_operand (stream, x, 0);
   1285  1.1  mrg 	  break;
   1286  1.1  mrg 	default:
   1287  1.1  mrg 	  break;
   1288  1.1  mrg 	}
   1289  1.1  mrg       break;
   1290  1.1  mrg 
   1291  1.1  mrg     case 'o':
   1292  1.1  mrg       switch (GET_CODE (x))
   1293  1.1  mrg 	{
   1294  1.1  mrg 	case PLUS:  fputs ("add", stream); break;
   1295  1.1  mrg 	case MINUS: fputs ("sub", stream); break;
   1296  1.1  mrg 	case MULT:  fputs ("mul", stream); break;
   1297  1.1  mrg 	case DIV:   fputs ("div", stream); break;
   1298  1.1  mrg 	case EQ:    fputs ("eq",  stream); break;
   1299  1.1  mrg 	case NE:    fputs ("ne",  stream); break;
   1300  1.1  mrg 	case GT:  case LT:  fputs ("gt",  stream); break;
   1301  1.1  mrg 	case GE:  case LE:  fputs ("ge",  stream); break;
   1302  1.1  mrg 	case GTU: case LTU: fputs ("gtu", stream); break;
   1303  1.1  mrg 	case GEU: case LEU: fputs ("geu", stream); break;
   1304  1.1  mrg 	default:
   1305  1.1  mrg 	  break;
   1306  1.1  mrg 	}
   1307  1.1  mrg       break;
   1308  1.1  mrg     case 'M':
   1309  1.1  mrg       if (MEM_P (x))
   1310  1.1  mrg 	{
   1311  1.1  mrg 	  switch (GET_MODE (x))
   1312  1.1  mrg 	    {
   1313  1.1  mrg 	    case E_QImode: fputs (".b", stream); break;
   1314  1.1  mrg 	    case E_HImode: fputs (".w", stream); break;
   1315  1.1  mrg 	    case E_SImode: fputs (".l", stream); break;
   1316  1.1  mrg 	    case E_SFmode: fputs (".s", stream); break;
   1317  1.1  mrg 	    case E_DFmode: fputs (".d", stream); break;
   1318  1.1  mrg 	    default: gcc_unreachable ();
   1319  1.1  mrg 	    }
   1320  1.1  mrg 	}
   1321  1.1  mrg       break;
   1322  1.1  mrg 
   1323  1.1  mrg     case 'm':
   1324  1.1  mrg       gcc_assert (MEM_P (x));
   1325  1.1  mrg       x = XEXP (x, 0);
   1326  1.1  mrg       /* Fall through.  */
   1327  1.1  mrg     case 'U':
   1328  1.1  mrg       switch (GET_CODE (x))
   1329  1.1  mrg 	{
   1330  1.1  mrg 	case REG:
   1331  1.1  mrg 	case SUBREG:
   1332  1.1  mrg 	  sh_print_operand (stream, x, 0);
   1333  1.1  mrg 	  fputs (", 0", stream);
   1334  1.1  mrg 	  break;
   1335  1.1  mrg 
   1336  1.1  mrg 	case PLUS:
   1337  1.1  mrg 	  sh_print_operand (stream, XEXP (x, 0), 0);
   1338  1.1  mrg 	  fputs (", ", stream);
   1339  1.1  mrg 	  sh_print_operand (stream, XEXP (x, 1), 0);
   1340  1.1  mrg 	  break;
   1341  1.1  mrg 
   1342  1.1  mrg 	default:
   1343  1.1  mrg 	  gcc_unreachable ();
   1344  1.1  mrg 	}
   1345  1.1  mrg       break;
   1346  1.1  mrg 
   1347  1.1  mrg     case 'V':
   1348  1.1  mrg       {
   1349  1.1  mrg 	int num = exact_log2 (INTVAL (x));
   1350  1.1  mrg 	gcc_assert (num >= 0);
   1351  1.1  mrg 	fprintf (stream, "#%d", num);
   1352  1.1  mrg       }
   1353  1.1  mrg       break;
   1354  1.1  mrg 
   1355  1.1  mrg     case 'W':
   1356  1.1  mrg       {
   1357  1.1  mrg 	int num = exact_log2 (~INTVAL (x));
   1358  1.1  mrg 	gcc_assert (num >= 0);
   1359  1.1  mrg 	fprintf (stream, "#%d", num);
   1360  1.1  mrg       }
   1361  1.1  mrg       break;
   1362  1.1  mrg 
   1363  1.1  mrg     case 'd':
   1364  1.1  mrg       gcc_assert (REG_P (x) && GET_MODE (x) == V2SFmode);
   1365  1.1  mrg 
   1366  1.1  mrg       fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
   1367  1.1  mrg       break;
   1368  1.1  mrg 
   1369  1.1  mrg     case 'N':
   1370  1.1  mrg       if (x == CONST0_RTX (GET_MODE (x)))
   1371  1.1  mrg 	{
   1372  1.1  mrg 	  fprintf ((stream), "r63");
   1373  1.1  mrg 	  break;
   1374  1.1  mrg 	}
   1375  1.1  mrg       goto default_output;
   1376  1.1  mrg     case 'u':
   1377  1.1  mrg       if (CONST_INT_P (x))
   1378  1.1  mrg 	{
   1379  1.1  mrg 	  fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
   1380  1.1  mrg 	  break;
   1381  1.1  mrg 	}
   1382  1.1  mrg       /* Fall through.  */
   1383  1.1  mrg 
   1384  1.1  mrg     default_output:
   1385  1.1  mrg     default:
   1386  1.1  mrg       regno = 0;
   1387  1.1  mrg       mode = GET_MODE (x);
   1388  1.1  mrg 
   1389  1.1  mrg       switch (GET_CODE (x))
   1390  1.1  mrg 	{
   1391  1.1  mrg 	case TRUNCATE:
   1392  1.1  mrg 	  {
   1393  1.1  mrg 	    rtx inner = XEXP (x, 0);
   1394  1.1  mrg 	    int offset = 0;
   1395  1.1  mrg 	    machine_mode inner_mode;
   1396  1.1  mrg 
   1397  1.1  mrg 	    /* We might see SUBREGs with vector mode registers inside.  */
   1398  1.1  mrg 	    if (GET_CODE (inner) == SUBREG
   1399  1.1  mrg 		&& (GET_MODE_SIZE (GET_MODE (inner))
   1400  1.1  mrg 		    == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
   1401  1.1  mrg 		&& subreg_lowpart_p (inner))
   1402  1.1  mrg 	      inner = SUBREG_REG (inner);
   1403  1.1  mrg 	    if (CONST_INT_P (inner))
   1404  1.1  mrg 	      {
   1405  1.1  mrg 		x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
   1406  1.1  mrg 		goto default_output;
   1407  1.1  mrg 	      }
   1408  1.1  mrg 	    inner_mode = GET_MODE (inner);
   1409  1.1  mrg 	    if (GET_CODE (inner) == SUBREG
   1410  1.1  mrg 		&& (GET_MODE_SIZE (GET_MODE (inner))
   1411  1.1  mrg 		    < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
   1412  1.1  mrg 		&& REG_P (SUBREG_REG (inner)))
   1413  1.1  mrg 	      {
   1414  1.1  mrg 		offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
   1415  1.1  mrg 					      GET_MODE (SUBREG_REG (inner)),
   1416  1.1  mrg 					      SUBREG_BYTE (inner),
   1417  1.1  mrg 					      GET_MODE (inner));
   1418  1.1  mrg 		inner = SUBREG_REG (inner);
   1419  1.1  mrg 	      }
   1420  1.1  mrg 	    if (!REG_P (inner) || GET_MODE_SIZE (inner_mode) > 8)
   1421  1.1  mrg 	      abort ();
   1422  1.1  mrg 	    /* Floating point register pairs are always big endian;
   1423  1.1  mrg 	       general purpose registers are 64 bit wide.  */
   1424  1.1  mrg 	    regno = REGNO (inner);
   1425  1.1  mrg 	    regno = (hard_regno_nregs (regno, inner_mode)
   1426  1.1  mrg 		     - hard_regno_nregs (regno, mode))
   1427  1.1  mrg 		     + offset;
   1428  1.1  mrg 	    x = inner;
   1429  1.1  mrg 	    goto reg;
   1430  1.1  mrg 	  }
   1431  1.1  mrg 	case SIGN_EXTEND:
   1432  1.1  mrg 	  x = XEXP (x, 0);
   1433  1.1  mrg 	  goto reg;
   1434  1.1  mrg 	case SUBREG:
   1435  1.1  mrg 	  gcc_assert (SUBREG_BYTE (x) == 0
   1436  1.1  mrg 		      && REG_P (SUBREG_REG (x)));
   1437  1.1  mrg 
   1438  1.1  mrg 	  x = SUBREG_REG (x);
   1439  1.1  mrg 	  /* Fall through.  */
   1440  1.1  mrg 
   1441  1.1  mrg 	reg:
   1442  1.1  mrg 	case REG:
   1443  1.1  mrg 	  regno += REGNO (x);
   1444  1.1  mrg 	  if (FP_REGISTER_P (regno)
   1445  1.1  mrg 	      && mode == V16SFmode)
   1446  1.1  mrg 	    fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
   1447  1.1  mrg 	  else if (FP_REGISTER_P (REGNO (x))
   1448  1.1  mrg 		   && mode == V4SFmode)
   1449  1.1  mrg 	    fprintf ((stream), "fv%s", reg_names[regno] + 2);
   1450  1.1  mrg 	  else if (REG_P (x)
   1451  1.1  mrg 		   && mode == V2SFmode)
   1452  1.1  mrg 	    fprintf ((stream), "fp%s", reg_names[regno] + 2);
   1453  1.1  mrg 	  else if (FP_REGISTER_P (REGNO (x))
   1454  1.1  mrg 		   && GET_MODE_SIZE (mode) > 4)
   1455  1.1  mrg 	    fprintf ((stream), "d%s", reg_names[regno] + 1);
   1456  1.1  mrg 	  else
   1457  1.1  mrg 	    fputs (reg_names[regno], (stream));
   1458  1.1  mrg 	  break;
   1459  1.1  mrg 
   1460  1.1  mrg 	case MEM:
   1461  1.1  mrg 	  output_address (GET_MODE (x), XEXP (x, 0));
   1462  1.1  mrg 	  break;
   1463  1.1  mrg 
   1464  1.1  mrg 	default:
   1465  1.1  mrg 	  fputc ('#', stream);
   1466  1.1  mrg 	  output_addr_const (stream, x);
   1467  1.1  mrg 	  break;
   1468  1.1  mrg 	}
   1469  1.1  mrg       break;
   1470  1.1  mrg     }
   1471  1.1  mrg }
   1472  1.1  mrg 
   1473  1.1  mrg static bool
   1474  1.1  mrg sh_print_operand_punct_valid_p (unsigned char code)
   1475  1.1  mrg {
   1476  1.1  mrg   return (code == '.' || code == '#' || code == '@' || code == ','
   1477  1.1  mrg 	  || code == '$' || code == '\'' || code == '>');
   1478  1.1  mrg }
   1479  1.1  mrg 
   1480  1.1  mrg /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.  */
   1481  1.1  mrg static bool
   1482  1.1  mrg sh_asm_output_addr_const_extra (FILE *file, rtx x)
   1483  1.1  mrg {
   1484  1.1  mrg   if (GET_CODE (x) == UNSPEC)
   1485  1.1  mrg     {
   1486  1.1  mrg       switch (XINT (x, 1))
   1487  1.1  mrg 	{
   1488  1.1  mrg 	case UNSPEC_PIC:
   1489  1.1  mrg 	  /* GLOBAL_OFFSET_TABLE or local symbols, no suffix.  */
   1490  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1491  1.1  mrg 	  break;
   1492  1.1  mrg 	case UNSPEC_GOT:
   1493  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1494  1.1  mrg 	  fputs ("@GOT", file);
   1495  1.1  mrg 	  break;
   1496  1.1  mrg 	case UNSPEC_GOTOFF:
   1497  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1498  1.1  mrg 	  fputs ("@GOTOFF", file);
   1499  1.1  mrg 	  break;
   1500  1.1  mrg 	case UNSPEC_PLT:
   1501  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1502  1.1  mrg 	  fputs ("@PLT", file);
   1503  1.1  mrg 	  break;
   1504  1.1  mrg 	case UNSPEC_GOTPLT:
   1505  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1506  1.1  mrg 	  fputs ("@GOTPLT", file);
   1507  1.1  mrg 	  break;
   1508  1.1  mrg 	case UNSPEC_PCREL:
   1509  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1510  1.1  mrg 	  fputs ("@PCREL", file);
   1511  1.1  mrg 	  break;
   1512  1.1  mrg 	case UNSPEC_DTPOFF:
   1513  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1514  1.1  mrg 	  fputs ("@DTPOFF", file);
   1515  1.1  mrg 	  break;
   1516  1.1  mrg 	case UNSPEC_GOTTPOFF:
   1517  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1518  1.1  mrg 	  fputs ("@GOTTPOFF", file);
   1519  1.1  mrg 	  break;
   1520  1.1  mrg 	case UNSPEC_TPOFF:
   1521  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1522  1.1  mrg 	  fputs ("@TPOFF", file);
   1523  1.1  mrg 	  break;
   1524  1.1  mrg 	case UNSPEC_CALLER:
   1525  1.1  mrg 	  {
   1526  1.1  mrg 	    char name[32];
   1527  1.1  mrg 	    /* LPCS stands for Label for PIC Call Site.  */
   1528  1.1  mrg 	    targetm.asm_out.generate_internal_label (name, "LPCS",
   1529  1.1  mrg 						     INTVAL (XVECEXP (x, 0, 0)));
   1530  1.1  mrg 	    assemble_name (file, name);
   1531  1.1  mrg 	  }
   1532  1.1  mrg 	  break;
   1533  1.1  mrg 	case UNSPEC_SYMOFF:
   1534  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1535  1.1  mrg 	  fputc ('-', file);
   1536  1.1  mrg 	  if (GET_CODE (XVECEXP (x, 0, 1)) == CONST)
   1537  1.1  mrg 	    {
   1538  1.1  mrg 	      fputc ('(', file);
   1539  1.1  mrg 	      output_addr_const (file, XVECEXP (x, 0, 1));
   1540  1.1  mrg 	      fputc (')', file);
   1541  1.1  mrg 	    }
   1542  1.1  mrg 	  else
   1543  1.1  mrg 	    output_addr_const (file, XVECEXP (x, 0, 1));
   1544  1.1  mrg 	  break;
   1545  1.1  mrg 	case UNSPEC_PCREL_SYMOFF:
   1546  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1547  1.1  mrg 	  fputs ("-(", file);
   1548  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 1));
   1549  1.1  mrg 	  fputs ("-.)", file);
   1550  1.1  mrg 	  break;
   1551  1.1  mrg 	case UNSPEC_GOTFUNCDESC:
   1552  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1553  1.1  mrg 	  fputs ("@GOTFUNCDESC", file);
   1554  1.1  mrg 	  break;
   1555  1.1  mrg 	case UNSPEC_GOTOFFFUNCDESC:
   1556  1.1  mrg 	  output_addr_const (file, XVECEXP (x, 0, 0));
   1557  1.1  mrg 	  fputs ("@GOTOFFFUNCDESC", file);
   1558  1.1  mrg 	  break;
   1559  1.1  mrg 	default:
   1560  1.1  mrg 	  return false;
   1561  1.1  mrg 	}
   1562  1.1  mrg       return true;
   1563  1.1  mrg     }
   1564  1.1  mrg   else
   1565  1.1  mrg     return false;
   1566  1.1  mrg }
   1567  1.1  mrg 
   1568  1.1  mrg /* Encode symbol attributes of a SYMBOL_REF into its
   1570  1.1  mrg    SYMBOL_REF_FLAGS.  */
   1571  1.1  mrg static void
   1572  1.1  mrg sh_encode_section_info (tree decl, rtx rtl, int first)
   1573  1.1  mrg {
   1574  1.1  mrg   default_encode_section_info (decl, rtl, first);
   1575  1.1  mrg 
   1576  1.1  mrg   if (TREE_CODE (decl) == FUNCTION_DECL
   1577  1.1  mrg       && sh2a_function_vector_p (decl) && TARGET_SH2A)
   1578  1.1  mrg     SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION;
   1579  1.1  mrg }
   1580  1.1  mrg 
   1581  1.1  mrg /* Prepare operands for a move define_expand; specifically, one of the
   1582  1.1  mrg    operands must be in a register.  */
   1583  1.1  mrg void
   1584  1.1  mrg prepare_move_operands (rtx operands[], machine_mode mode)
   1585  1.1  mrg {
   1586  1.1  mrg   if ((mode == SImode || mode == DImode)
   1587  1.1  mrg       && flag_pic
   1588  1.1  mrg       && ! ((mode == Pmode || mode == ptr_mode)
   1589  1.1  mrg 	    && tls_symbolic_operand (operands[1], Pmode) != TLS_MODEL_NONE))
   1590  1.1  mrg     {
   1591  1.1  mrg       rtx temp;
   1592  1.1  mrg       if (SYMBOLIC_CONST_P (operands[1]))
   1593  1.1  mrg 	{
   1594  1.1  mrg 	  if (MEM_P (operands[0]))
   1595  1.1  mrg 	    operands[1] = force_reg (Pmode, operands[1]);
   1596  1.1  mrg 	  else
   1597  1.1  mrg 	    {
   1598  1.1  mrg 	      temp = (!can_create_pseudo_p ()
   1599  1.1  mrg 		      ? operands[0]
   1600  1.1  mrg 		      : gen_reg_rtx (Pmode));
   1601  1.1  mrg 	      operands[1] = legitimize_pic_address (operands[1], mode, temp);
   1602  1.1  mrg 	    }
   1603  1.1  mrg 	}
   1604  1.1  mrg       else if (GET_CODE (operands[1]) == CONST
   1605  1.1  mrg 	       && GET_CODE (XEXP (operands[1], 0)) == PLUS
   1606  1.1  mrg 	       && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
   1607  1.1  mrg 	{
   1608  1.1  mrg 	  temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
   1609  1.1  mrg 	  temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
   1610  1.1  mrg 					 mode, temp);
   1611  1.1  mrg 	  operands[1] = expand_binop (mode, add_optab, temp,
   1612  1.1  mrg 				      XEXP (XEXP (operands[1], 0), 1),
   1613  1.1  mrg 				      (!can_create_pseudo_p ()
   1614  1.1  mrg 				       ? temp
   1615  1.1  mrg 				       : gen_reg_rtx (Pmode)),
   1616  1.1  mrg 				      0, OPTAB_LIB_WIDEN);
   1617  1.1  mrg 	}
   1618  1.1  mrg     }
   1619  1.1  mrg 
   1620  1.1  mrg   if (! reload_in_progress && ! reload_completed)
   1621  1.1  mrg     {
   1622  1.1  mrg       /* Copy the source to a register if both operands aren't registers.  */
   1623  1.1  mrg       if (! register_operand (operands[0], mode)
   1624  1.1  mrg 	  && ! register_operand (operands[1], mode))
   1625  1.1  mrg 	operands[1] = copy_to_mode_reg (mode, operands[1]);
   1626  1.1  mrg 
   1627  1.1  mrg       if (MEM_P (operands[0]) && ! memory_operand (operands[0], mode))
   1628  1.1  mrg 	{
   1629  1.1  mrg 	  /* This is like change_address_1 (operands[0], mode, 0, 1) ,
   1630  1.1  mrg 	     except that we can't use that function because it is static.  */
   1631  1.1  mrg 	  rtx new_rtx = change_address (operands[0], mode, 0);
   1632  1.1  mrg 	  MEM_COPY_ATTRIBUTES (new_rtx, operands[0]);
   1633  1.1  mrg 	  operands[0] = new_rtx;
   1634  1.1  mrg 	}
   1635  1.1  mrg 
   1636  1.1  mrg       /* This case can happen while generating code to move the result
   1637  1.1  mrg 	 of a library call to the target.  Reject `st r0,@(rX,rY)' because
   1638  1.1  mrg 	 reload will fail to find a spill register for rX, since r0 is already
   1639  1.1  mrg 	 being used for the source.  */
   1640  1.1  mrg       else if (refers_to_regno_p (R0_REG, operands[1])
   1641  1.1  mrg 	       && MEM_P (operands[0])
   1642  1.1  mrg 	       && GET_CODE (XEXP (operands[0], 0)) == PLUS
   1643  1.1  mrg 	       && REG_P (XEXP (XEXP (operands[0], 0), 1)))
   1644  1.1  mrg 	operands[1] = copy_to_mode_reg (mode, operands[1]);
   1645  1.1  mrg 
   1646  1.1  mrg       /* When the displacement addressing is used, RA will assign r0 to
   1647  1.1  mrg 	 the pseudo register operand for the QI/HImode load/store.
   1648  1.1  mrg 	 This tends to make a long live range for R0 and might cause
   1649  1.1  mrg 	 anomalous register spills in some case with LRA.  See PR
   1650  1.1  mrg 	 target/55212.
   1651  1.1  mrg 	 We split possible load/store to two move insns via r0 so as to
   1652  1.1  mrg 	 shorten R0 live range.  It will make some codes worse but will
   1653  1.1  mrg 	 win on average for LRA.
   1654  1.1  mrg 	 Also when base+index addressing is used and the index term is
   1655  1.1  mrg 	 a subreg, LRA assumes that more hard registers can be available
   1656  1.1  mrg 	 in some situation.  It isn't the case for SH in the problematic
   1657  1.1  mrg 	 case.  We can pre-allocate R0 for that index term to avoid
   1658  1.1  mrg 	 the issue.  See PR target/66591.  */
   1659  1.1  mrg       else if (sh_lra_p ()
   1660  1.1  mrg 	       && ! TARGET_SH2A
   1661  1.1  mrg 	       && ((REG_P (operands[0]) && MEM_P (operands[1]))
   1662  1.1  mrg 		   || (REG_P (operands[1]) && MEM_P (operands[0]))))
   1663  1.1  mrg 	{
   1664  1.1  mrg 	  bool load_p = REG_P (operands[0]);
   1665  1.1  mrg 	  rtx reg = operands[load_p ? 0 : 1];
   1666  1.1  mrg 	  rtx adr = XEXP (operands[load_p ? 1 : 0], 0);
   1667  1.1  mrg 
   1668  1.1  mrg 	  if ((mode == QImode || mode == HImode)
   1669  1.1  mrg 	      && REGNO (reg) >= FIRST_PSEUDO_REGISTER
   1670  1.1  mrg 	      && GET_CODE (adr) == PLUS
   1671  1.1  mrg 	      && REG_P (XEXP (adr, 0))
   1672  1.1  mrg 	      && (REGNO (XEXP (adr, 0)) >= FIRST_PSEUDO_REGISTER)
   1673  1.1  mrg 	      && CONST_INT_P (XEXP (adr, 1))
   1674  1.1  mrg 	      && INTVAL (XEXP (adr, 1)) != 0
   1675  1.1  mrg 	      && sh_legitimate_index_p (mode, XEXP (adr, 1), false, true))
   1676  1.1  mrg 	    {
   1677  1.1  mrg 	      rtx r0_rtx = gen_rtx_REG (mode, R0_REG);
   1678  1.1  mrg 	      emit_move_insn (r0_rtx, operands[1]);
   1679  1.1  mrg 	      operands[1] = r0_rtx;
   1680  1.1  mrg 	    }
   1681  1.1  mrg 	  if (REGNO (reg) >= FIRST_PSEUDO_REGISTER
   1682  1.1  mrg 	      && GET_CODE (adr) == PLUS
   1683  1.1  mrg 	      && REG_P (XEXP (adr, 0))
   1684  1.1  mrg 	      && (REGNO (XEXP (adr, 0)) >= FIRST_PSEUDO_REGISTER)
   1685  1.1  mrg 	      && SUBREG_P (XEXP (adr, 1))
   1686  1.1  mrg 	      && REG_P (SUBREG_REG (XEXP (adr, 1))))
   1687  1.1  mrg 	    {
   1688  1.1  mrg 	      rtx r0_rtx = gen_rtx_REG (GET_MODE (XEXP (adr, 1)), R0_REG);
   1689  1.1  mrg 	      emit_move_insn (r0_rtx, XEXP (adr, 1));
   1690  1.1  mrg 	      XEXP (adr, 1) = r0_rtx;
   1691  1.1  mrg 	    }
   1692  1.1  mrg 	}
   1693  1.1  mrg     }
   1694  1.1  mrg 
   1695  1.1  mrg   if (mode == Pmode || mode == ptr_mode)
   1696  1.1  mrg     {
   1697  1.1  mrg       rtx op0 = operands[0];
   1698  1.1  mrg       rtx op1 = operands[1];
   1699  1.1  mrg       rtx opc;
   1700  1.1  mrg       if (GET_CODE (op1) == CONST
   1701  1.1  mrg 	  && GET_CODE (XEXP (op1, 0)) == PLUS
   1702  1.1  mrg 	  && (tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode)
   1703  1.1  mrg 	      != TLS_MODEL_NONE))
   1704  1.1  mrg 	{
   1705  1.1  mrg 	  opc = XEXP (XEXP (op1, 0), 1);
   1706  1.1  mrg 	  op1 = XEXP (XEXP (op1, 0), 0);
   1707  1.1  mrg 	}
   1708  1.1  mrg       else
   1709  1.1  mrg 	opc = NULL_RTX;
   1710  1.1  mrg 
   1711  1.1  mrg       enum tls_model tls_kind;
   1712  1.1  mrg 
   1713  1.1  mrg       if (! reload_in_progress && ! reload_completed
   1714  1.1  mrg 	  && (tls_kind = tls_symbolic_operand (op1, Pmode)) != TLS_MODEL_NONE)
   1715  1.1  mrg 	{
   1716  1.1  mrg 	  rtx tga_op1, tga_ret, tmp, tmp2;
   1717  1.1  mrg 
   1718  1.1  mrg 	  if (! flag_pic
   1719  1.1  mrg 	      && (tls_kind == TLS_MODEL_GLOBAL_DYNAMIC
   1720  1.1  mrg 		  || tls_kind == TLS_MODEL_LOCAL_DYNAMIC
   1721  1.1  mrg 		  || tls_kind == TLS_MODEL_INITIAL_EXEC))
   1722  1.1  mrg 	    {
   1723  1.1  mrg 	      static int got_labelno;
   1724  1.1  mrg 	      /* Don't schedule insns for getting GOT address when
   1725  1.1  mrg 		 the first scheduling is enabled, to avoid spill
   1726  1.1  mrg 		 failures for R0.  */
   1727  1.1  mrg 	      if (flag_schedule_insns)
   1728  1.1  mrg 		emit_insn (gen_blockage ());
   1729  1.1  mrg 	      emit_insn (gen_GOTaddr2picreg (GEN_INT (++got_labelno)));
   1730  1.1  mrg 	      emit_use (gen_rtx_REG (SImode, PIC_REG));
   1731  1.1  mrg 	      if (flag_schedule_insns)
   1732  1.1  mrg 		emit_insn (gen_blockage ());
   1733  1.1  mrg 	    }
   1734  1.1  mrg 
   1735  1.1  mrg 	  switch (tls_kind)
   1736  1.1  mrg 	    {
   1737  1.1  mrg 	    case TLS_MODEL_GLOBAL_DYNAMIC:
   1738  1.1  mrg 	      tga_ret = gen_rtx_REG (Pmode, R0_REG);
   1739  1.1  mrg 	      if (TARGET_FDPIC)
   1740  1.1  mrg 		emit_move_insn (gen_rtx_REG (Pmode, PIC_REG),
   1741  1.1  mrg 				sh_get_fdpic_reg_initial_val ());
   1742  1.1  mrg 	      emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
   1743  1.1  mrg 	      tmp = gen_reg_rtx (Pmode);
   1744  1.1  mrg 	      emit_move_insn (tmp, tga_ret);
   1745  1.1  mrg 	      op1 = tmp;
   1746  1.1  mrg 	      break;
   1747  1.1  mrg 
   1748  1.1  mrg 	    case TLS_MODEL_LOCAL_DYNAMIC:
   1749  1.1  mrg 	      tga_ret = gen_rtx_REG (Pmode, R0_REG);
   1750  1.1  mrg 	      if (TARGET_FDPIC)
   1751  1.1  mrg 		emit_move_insn (gen_rtx_REG (Pmode, PIC_REG),
   1752  1.1  mrg 				sh_get_fdpic_reg_initial_val ());
   1753  1.1  mrg 	      emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
   1754  1.1  mrg 
   1755  1.1  mrg 	      tmp = gen_reg_rtx (Pmode);
   1756  1.1  mrg 	      emit_move_insn (tmp, tga_ret);
   1757  1.1  mrg 
   1758  1.1  mrg 	      if (register_operand (op0, Pmode))
   1759  1.1  mrg 		tmp2 = op0;
   1760  1.1  mrg 	      else
   1761  1.1  mrg 		tmp2 = gen_reg_rtx (Pmode);
   1762  1.1  mrg 
   1763  1.1  mrg 	      emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
   1764  1.1  mrg 	      op1 = tmp2;
   1765  1.1  mrg 	      break;
   1766  1.1  mrg 
   1767  1.1  mrg 	    case TLS_MODEL_INITIAL_EXEC:
   1768  1.1  mrg 	      tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
   1769  1.1  mrg 	      tmp = gen_sym2GOTTPOFF (op1);
   1770  1.1  mrg 	      if (TARGET_FDPIC)
   1771  1.1  mrg 		emit_move_insn (gen_rtx_REG (Pmode, PIC_REG),
   1772  1.1  mrg 				sh_get_fdpic_reg_initial_val ());
   1773  1.1  mrg 	      emit_insn (gen_tls_initial_exec (tga_op1, tmp));
   1774  1.1  mrg 	      op1 = tga_op1;
   1775  1.1  mrg 	      break;
   1776  1.1  mrg 
   1777  1.1  mrg 	    case TLS_MODEL_LOCAL_EXEC:
   1778  1.1  mrg 	      tmp2 = gen_reg_rtx (Pmode);
   1779  1.1  mrg 	      emit_insn (gen_store_gbr (tmp2));
   1780  1.1  mrg 	      tmp = gen_reg_rtx (Pmode);
   1781  1.1  mrg 	      emit_insn (gen_symTPOFF2reg (tmp, op1));
   1782  1.1  mrg 
   1783  1.1  mrg 	      if (register_operand (op0, Pmode))
   1784  1.1  mrg 		op1 = op0;
   1785  1.1  mrg 	      else
   1786  1.1  mrg 		op1 = gen_reg_rtx (Pmode);
   1787  1.1  mrg 
   1788  1.1  mrg 	      emit_insn (gen_addsi3 (op1, tmp, tmp2));
   1789  1.1  mrg 	      break;
   1790  1.1  mrg 
   1791  1.1  mrg 	    default:
   1792  1.1  mrg 	      gcc_unreachable ();
   1793  1.1  mrg 	    }
   1794  1.1  mrg 	  if (opc)
   1795  1.1  mrg 	    emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
   1796  1.1  mrg 	  operands[1] = op1;
   1797  1.1  mrg 	}
   1798  1.1  mrg     }
   1799  1.1  mrg 
   1800  1.1  mrg   if (SH_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
   1801  1.1  mrg     {
   1802  1.1  mrg       rtx base, offset;
   1803  1.1  mrg       split_const (operands[1], &base, &offset);
   1804  1.1  mrg 
   1805  1.1  mrg       if (GET_CODE (base) == SYMBOL_REF
   1806  1.1  mrg 	  && !offset_within_block_p (base, INTVAL (offset)))
   1807  1.1  mrg 	{
   1808  1.1  mrg 	  rtx tmp = can_create_pseudo_p () ? gen_reg_rtx (mode) : operands[0];
   1809  1.1  mrg 	  emit_move_insn (tmp, base);
   1810  1.1  mrg 	  if (!arith_operand (offset, mode))
   1811  1.1  mrg 	    offset = force_reg (mode, offset);
   1812  1.1  mrg 	  emit_insn (gen_add3_insn (operands[0], tmp, offset));
   1813  1.1  mrg 	}
   1814  1.1  mrg     }
   1815  1.1  mrg }
   1816  1.1  mrg 
   1817  1.1  mrg /* Implement the canonicalize_comparison target hook for the combine
   1818  1.1  mrg    pass.  For the target hook this function is invoked via
   1819  1.1  mrg    sh_canonicalize_comparison.  This function is also re-used to
   1820  1.1  mrg    canonicalize comparisons in cbranch pattern expanders.  */
   1821  1.1  mrg static void
   1822  1.1  mrg sh_canonicalize_comparison (enum rtx_code& cmp, rtx& op0, rtx& op1,
   1823  1.1  mrg 			    machine_mode mode,
   1824  1.1  mrg 			    bool op0_preserve_value)
   1825  1.1  mrg {
   1826  1.1  mrg   /* When invoked from within the combine pass the mode is not specified,
   1827  1.1  mrg      so try to get it from one of the operands.  */
   1828  1.1  mrg   if (mode == VOIDmode)
   1829  1.1  mrg     mode = GET_MODE (op0);
   1830  1.1  mrg   if (mode == VOIDmode)
   1831  1.1  mrg     mode = GET_MODE (op1);
   1832  1.1  mrg 
   1833  1.1  mrg   // We need to have a mode to do something useful here.
   1834  1.1  mrg   if (mode == VOIDmode)
   1835  1.1  mrg     return;
   1836  1.1  mrg 
   1837  1.1  mrg   // Currently, we don't deal with floats here.
   1838  1.1  mrg   if (GET_MODE_CLASS (mode) == MODE_FLOAT)
   1839  1.1  mrg     return;
   1840  1.1  mrg 
   1841  1.1  mrg   // Make sure that the constant operand is the second operand.
   1842  1.1  mrg   if (CONST_INT_P (op0) && !CONST_INT_P (op1))
   1843  1.1  mrg     {
   1844  1.1  mrg       if (op0_preserve_value)
   1845  1.1  mrg 	return;
   1846  1.1  mrg 
   1847  1.1  mrg       std::swap (op0, op1);
   1848  1.1  mrg       cmp = swap_condition (cmp);
   1849  1.1  mrg     }
   1850  1.1  mrg 
   1851  1.1  mrg   if (CONST_INT_P (op1))
   1852  1.1  mrg     {
   1853  1.1  mrg       /* Try to adjust the constant operand in such a way that available
   1854  1.1  mrg 	 comparison insns can be utilized better and the constant can be
   1855  1.1  mrg 	 loaded with a 'mov #imm,Rm' insn.  This avoids a load from the
   1856  1.1  mrg 	 constant pool.  */
   1857  1.1  mrg       const HOST_WIDE_INT val = INTVAL (op1);
   1858  1.1  mrg 
   1859  1.1  mrg       /* x > -1		  --> x >= 0
   1860  1.1  mrg 	 x > 0xFFFFFF7F	  --> x >= 0xFFFFFF80
   1861  1.1  mrg 	 x <= -1	  --> x < 0
   1862  1.1  mrg 	 x <= 0xFFFFFF7F  --> x < 0xFFFFFF80  */
   1863  1.1  mrg       if ((val == -1 || val == -0x81) && (cmp == GT || cmp == LE))
   1864  1.1  mrg 	{
   1865  1.1  mrg 	  cmp = cmp == GT ? GE : LT;
   1866  1.1  mrg 	  op1 = gen_int_mode (val + 1, mode);
   1867  1.1  mrg         }
   1868  1.1  mrg 
   1869  1.1  mrg       /* x >= 1     --> x > 0
   1870  1.1  mrg 	 x >= 0x80  --> x > 0x7F
   1871  1.1  mrg 	 x < 1      --> x <= 0
   1872  1.1  mrg 	 x < 0x80   --> x <= 0x7F  */
   1873  1.1  mrg       else if ((val == 1 || val == 0x80) && (cmp == GE || cmp == LT))
   1874  1.1  mrg 	{
   1875  1.1  mrg 	  cmp = cmp == GE ? GT : LE;
   1876  1.1  mrg 	  op1 = gen_int_mode (val - 1, mode);
   1877  1.1  mrg 	}
   1878  1.1  mrg 
   1879  1.1  mrg       /* unsigned x >= 1  --> x != 0
   1880  1.1  mrg 	 unsigned x < 1   --> x == 0  */
   1881  1.1  mrg       else if (val == 1 && (cmp == GEU || cmp == LTU))
   1882  1.1  mrg 	{
   1883  1.1  mrg 	  cmp = cmp == GEU ? NE : EQ;
   1884  1.1  mrg 	  op1 = CONST0_RTX (mode);
   1885  1.1  mrg 	}
   1886  1.1  mrg 
   1887  1.1  mrg       /* unsigned x >= 0x80  --> unsigned x > 0x7F
   1888  1.1  mrg 	 unsigned x < 0x80   --> unsigned x < 0x7F  */
   1889  1.1  mrg       else if (val == 0x80 && (cmp == GEU || cmp == LTU))
   1890  1.1  mrg 	{
   1891  1.1  mrg 	  cmp = cmp == GEU ? GTU : LEU;
   1892  1.1  mrg 	  op1 = gen_int_mode (val - 1, mode);
   1893  1.1  mrg 	}
   1894  1.1  mrg 
   1895  1.1  mrg       /* unsigned x > 0   --> x != 0
   1896  1.1  mrg 	 unsigned x <= 0  --> x == 0  */
   1897  1.1  mrg       else if (val == 0 && (cmp == GTU || cmp == LEU))
   1898  1.1  mrg 	cmp = cmp == GTU ? NE : EQ;
   1899  1.1  mrg 
   1900  1.1  mrg       /* unsigned x > 0x7FFFFFFF   --> signed x < 0
   1901  1.1  mrg 	 unsigned x <= 0x7FFFFFFF  --> signed x >= 0  */
   1902  1.1  mrg       else if (mode == SImode && (cmp == GTU || cmp == LEU)
   1903  1.1  mrg 	       && val == 0x7FFFFFFF)
   1904  1.1  mrg 	{
   1905  1.1  mrg 	  cmp = cmp == GTU ? LT : GE;
   1906  1.1  mrg 	  op1 = const0_rtx;
   1907  1.1  mrg 	}
   1908  1.1  mrg 
   1909  1.1  mrg       /* unsigned x >= 0x80000000  --> signed x < 0
   1910  1.1  mrg 	 unsigned x < 0x80000000   --> signed x >= 0  */
   1911  1.1  mrg       else if (mode == SImode && (cmp == GEU || cmp == LTU)
   1912  1.1  mrg 	       && (unsigned HOST_WIDE_INT)val
   1913  1.1  mrg 		   == ((unsigned HOST_WIDE_INT)0x7FFFFFFF + 1))
   1914  1.1  mrg 	{
   1915  1.1  mrg 	  cmp = cmp == GEU ? LT : GE;
   1916  1.1  mrg 	  op1 = const0_rtx;
   1917  1.1  mrg 	}
   1918  1.1  mrg     }
   1919  1.1  mrg }
   1920  1.1  mrg 
   1921  1.1  mrg /* This function implements the canonicalize_comparison target hook.
   1922  1.1  mrg    This wrapper around the internally used sh_canonicalize_comparison
   1923  1.1  mrg    function is needed to do the enum rtx_code <-> int conversion.
   1924  1.1  mrg    Target hooks cannot use enum rtx_code in its definition.  */
   1925  1.1  mrg static void
   1926  1.1  mrg sh_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
   1927  1.1  mrg 			    bool op0_preserve_value)
   1928  1.1  mrg {
   1929  1.1  mrg   enum rtx_code tmp_code = (enum rtx_code)*code;
   1930  1.1  mrg   sh_canonicalize_comparison (tmp_code, *op0, *op1,
   1931  1.1  mrg 			      VOIDmode, op0_preserve_value);
   1932  1.1  mrg   *code = (int)tmp_code;
   1933  1.1  mrg }
   1934  1.1  mrg 
   1935  1.1  mrg /* This function implements the legitimate_combined_insn target hook,
   1936  1.1  mrg    which the combine pass uses to early reject combined insns, before
   1937  1.1  mrg    it tries to recog the insn and determine its cost.  */
   1938  1.1  mrg static bool
   1939  1.1  mrg sh_legitimate_combined_insn (rtx_insn* insn)
   1940  1.1  mrg {
   1941  1.1  mrg   /* Reject combinations of memory loads and zero extensions, as these
   1942  1.1  mrg      interfere with other combine patterns such as zero extracts and bit
   1943  1.1  mrg      tests.  The SH2A movu.{b|w} insns are formed later in the
   1944  1.1  mrg      'sh_optimize_extu_exts' pass after combine/split1.  */
   1945  1.1  mrg   rtx p = PATTERN (insn);
   1946  1.1  mrg   if (GET_CODE (p) == SET
   1947  1.1  mrg       && REG_P (XEXP (p, 0)) && GET_MODE (XEXP (p, 0)) == SImode
   1948  1.1  mrg       && GET_CODE (XEXP (p, 1)) == ZERO_EXTEND
   1949  1.1  mrg       && MEM_P (XEXP (XEXP (p, 1), 0)))
   1950  1.1  mrg       return false;
   1951  1.1  mrg 
   1952  1.1  mrg   return true;
   1953  1.1  mrg }
   1954  1.1  mrg 
   1955  1.1  mrg bool
   1956  1.1  mrg sh_fixed_condition_code_regs (unsigned int* p1, unsigned int* p2)
   1957  1.1  mrg {
   1958  1.1  mrg   *p1 = T_REG;
   1959  1.1  mrg   *p2 = INVALID_REGNUM;
   1960  1.1  mrg   return true;
   1961  1.1  mrg }
   1962  1.1  mrg 
   1963  1.1  mrg /* Try to calculate the branch distance of a conditional branch in bytes.
   1964  1.1  mrg 
   1965  1.1  mrg    FIXME: Because of PR 59189 we can't use the CFG here.  Instead just
   1966  1.1  mrg    walk from this insn into the next (fall-through) basic block and see if
   1967  1.1  mrg    we hit the label.  */
   1968  1.1  mrg unsigned int
   1969  1.1  mrg sh_cbranch_distance (rtx_insn* _cbranch_insn, unsigned int max_dist)
   1970  1.1  mrg {
   1971  1.1  mrg   rtx_jump_insn* cbranch_insn = safe_as_a<rtx_jump_insn*> (_cbranch_insn);
   1972  1.1  mrg 
   1973  1.1  mrg   if (dump_file)
   1974  1.1  mrg     {
   1975  1.1  mrg       fprintf (dump_file, "sh_cbranch_distance insn = \n");
   1976  1.1  mrg       print_rtl_single (dump_file, cbranch_insn);
   1977  1.1  mrg     }
   1978  1.1  mrg 
   1979  1.1  mrg   unsigned int dist = 0;
   1980  1.1  mrg 
   1981  1.1  mrg   for (rtx_insn* i = next_nonnote_insn (cbranch_insn);
   1982  1.1  mrg        i != NULL && dist < max_dist; i = next_nonnote_insn (i))
   1983  1.1  mrg     {
   1984  1.1  mrg       const unsigned int i_len = get_attr_length (i);
   1985  1.1  mrg       dist += i_len;
   1986  1.1  mrg 
   1987  1.1  mrg       if (dump_file)
   1988  1.1  mrg 	fprintf (dump_file, "  insn %d  length = %u  dist = %u\n",
   1989  1.1  mrg 		 INSN_UID (i), i_len, dist);
   1990  1.1  mrg 
   1991  1.1  mrg       if (rtx_code_label* l = dyn_cast<rtx_code_label*> (i))
   1992  1.1  mrg 	{
   1993  1.1  mrg 	  if (l == cbranch_insn->jump_target ())
   1994  1.1  mrg 	    {
   1995  1.1  mrg 	      if (dump_file)
   1996  1.1  mrg 		fprintf (dump_file, "  cbranch dist = %u\n", dist);
   1997  1.1  mrg 	      return dist;
   1998  1.1  mrg 	    }
   1999  1.1  mrg 	  break;
   2000  1.1  mrg 	}
   2001  1.1  mrg     }
   2002  1.1  mrg 
   2003  1.1  mrg   if (dump_file)
   2004  1.1  mrg     fprintf (dump_file, "  cbranch dist = unknown\n");
   2005  1.1  mrg 
   2006  1.1  mrg   return unknown_cbranch_distance;
   2007  1.1  mrg }
   2008  1.1  mrg 
   2009  1.1  mrg enum rtx_code
   2010  1.1  mrg prepare_cbranch_operands (rtx *operands, machine_mode mode,
   2011  1.1  mrg 			  enum rtx_code comparison)
   2012  1.1  mrg {
   2013  1.1  mrg   gcc_assert (can_create_pseudo_p ());
   2014  1.1  mrg 
   2015  1.1  mrg   if (comparison == LAST_AND_UNUSED_RTX_CODE)
   2016  1.1  mrg     comparison = GET_CODE (operands[0]);
   2017  1.1  mrg 
   2018  1.1  mrg   sh_canonicalize_comparison (comparison, operands[1], operands[2],
   2019  1.1  mrg 			      mode, false);
   2020  1.1  mrg 
   2021  1.1  mrg   rtx op1 = operands[1];
   2022  1.1  mrg   operands[1] = force_reg (mode, op1);
   2023  1.1  mrg 
   2024  1.1  mrg   /* When we are handling DImode comparisons, we want to keep constants so
   2025  1.1  mrg      that we can optimize the component comparisons; however, memory loads
   2026  1.1  mrg      are better issued as a whole so that they can be scheduled well.
   2027  1.1  mrg      SImode equality comparisons allow I08 constants, but only when they
   2028  1.1  mrg      compare r0.  Hence, if operands[1] has to be loaded from somewhere else
   2029  1.1  mrg      into a register, that register might as well be r0, and we allow the
   2030  1.1  mrg      constant.  If it is already in a register, this is likely to be
   2031  1.1  mrg      allocated to a different hard register, thus we load the constant into
   2032  1.1  mrg      a register unless it is zero.  */
   2033  1.1  mrg   if (!REG_P (operands[2])
   2034  1.1  mrg       && (!CONST_INT_P (operands[2])
   2035  1.1  mrg 	  || (mode == SImode && operands[2] != CONST0_RTX (SImode)
   2036  1.1  mrg 	      && ((comparison != EQ && comparison != NE)
   2037  1.1  mrg 		  || (REG_P (op1) && REGNO (op1) != R0_REG)
   2038  1.1  mrg 		  || !satisfies_constraint_I08 (operands[2])))))
   2039  1.1  mrg     operands[2] = force_reg (mode, operands[2]);
   2040  1.1  mrg 
   2041  1.1  mrg   return comparison;
   2042  1.1  mrg }
   2043  1.1  mrg 
   2044  1.1  mrg static void
   2045  1.1  mrg expand_cbranchsi4 (rtx *operands, enum rtx_code comparison,
   2046  1.1  mrg 		   profile_probability probability)
   2047  1.1  mrg {
   2048  1.1  mrg   rtx (*branch_expander) (rtx) = gen_branch_true;
   2049  1.1  mrg   comparison = prepare_cbranch_operands (operands, SImode, comparison);
   2050  1.1  mrg   switch (comparison)
   2051  1.1  mrg     {
   2052  1.1  mrg     case NE: case LT: case LE: case LTU: case LEU:
   2053  1.1  mrg       comparison = reverse_condition (comparison);
   2054  1.1  mrg       branch_expander = gen_branch_false;
   2055  1.1  mrg     default: ;
   2056  1.1  mrg     }
   2057  1.1  mrg   emit_insn (gen_rtx_SET (get_t_reg_rtx (),
   2058  1.1  mrg 			  gen_rtx_fmt_ee (comparison, SImode,
   2059  1.1  mrg 					  operands[1], operands[2])));
   2060  1.1  mrg   rtx_insn *jump = emit_jump_insn (branch_expander (operands[3]));
   2061  1.1  mrg   if (probability.initialized_p ())
   2062  1.1  mrg     add_reg_br_prob_note (jump, probability);
   2063  1.1  mrg }
   2064  1.1  mrg 
   2065  1.1  mrg void
   2066  1.1  mrg expand_cbranchsi4 (rtx *operands, enum rtx_code comparison)
   2067  1.1  mrg {
   2068  1.1  mrg   expand_cbranchsi4 (operands, comparison,
   2069  1.1  mrg 		     profile_probability::uninitialized ());
   2070  1.1  mrg }
   2071  1.1  mrg 
   2072  1.1  mrg /* ??? How should we distribute probabilities when more than one branch
   2073  1.1  mrg    is generated.  So far we only have some ad-hoc observations:
   2074  1.1  mrg    - If the operands are random, they are likely to differ in both parts.
   2075  1.1  mrg    - If comparing items in a hash chain, the operands are random or equal;
   2076  1.1  mrg      operation should be EQ or NE.
   2077  1.1  mrg    - If items are searched in an ordered tree from the root, we can expect
   2078  1.1  mrg      the highpart to be unequal about half of the time; operation should be
   2079  1.1  mrg      an inequality comparison, operands non-constant, and overall probability
   2080  1.1  mrg      about 50%.  Likewise for quicksort.
   2081  1.1  mrg    - Range checks will be often made against constants.  Even if we assume for
   2082  1.1  mrg      simplicity an even distribution of the non-constant operand over a
   2083  1.1  mrg      sub-range here, the same probability could be generated with differently
   2084  1.1  mrg      wide sub-ranges - as long as the ratio of the part of the subrange that
   2085  1.1  mrg      is before the threshold to the part that comes after the threshold stays
   2086  1.1  mrg      the same.  Thus, we can't really tell anything here;
   2087  1.1  mrg      assuming random distribution is at least simple.
   2088  1.1  mrg  */
   2089  1.1  mrg bool
   2090  1.1  mrg expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
   2091  1.1  mrg {
   2092  1.1  mrg   enum rtx_code msw_taken, msw_skip, lsw_taken;
   2093  1.1  mrg   rtx_code_label *skip_label = NULL;
   2094  1.1  mrg   rtx op1h, op1l, op2h, op2l;
   2095  1.1  mrg   int num_branches;
   2096  1.1  mrg   profile_probability prob, rev_prob;
   2097  1.1  mrg   profile_probability msw_taken_prob = profile_probability::uninitialized (),
   2098  1.1  mrg 		      msw_skip_prob = profile_probability::uninitialized (),
   2099  1.1  mrg 		      lsw_taken_prob = profile_probability::uninitialized ();
   2100  1.1  mrg 
   2101  1.1  mrg   comparison = prepare_cbranch_operands (operands, DImode, comparison);
   2102  1.1  mrg   op1h = gen_highpart_mode (SImode, DImode, operands[1]);
   2103  1.1  mrg   op2h = gen_highpart_mode (SImode, DImode, operands[2]);
   2104  1.1  mrg   op1l = gen_lowpart (SImode, operands[1]);
   2105  1.1  mrg   op2l = gen_lowpart (SImode, operands[2]);
   2106  1.1  mrg   msw_taken = msw_skip = lsw_taken = LAST_AND_UNUSED_RTX_CODE;
   2107  1.1  mrg   prob = split_branch_probability;
   2108  1.1  mrg   rev_prob = prob.invert ();
   2109  1.1  mrg   switch (comparison)
   2110  1.1  mrg     {
   2111  1.1  mrg     case EQ:
   2112  1.1  mrg       msw_skip = NE;
   2113  1.1  mrg       lsw_taken = EQ;
   2114  1.1  mrg       if (prob.initialized_p ())
   2115  1.1  mrg 	{
   2116  1.1  mrg 	  /* FIXME: This is not optimal.  We do not really know the probability
   2117  1.1  mrg 	     that values differ by MCW only, but we should probably distribute
   2118  1.1  mrg 	     probabilities more evenly.  */
   2119  1.1  mrg 	  msw_skip_prob = rev_prob;
   2120  1.1  mrg 	  lsw_taken_prob = prob > profile_probability::never ()
   2121  1.1  mrg 			   ? profile_probability::guessed_always ()
   2122  1.1  mrg 			   : profile_probability::guessed_never ();
   2123  1.1  mrg 	}
   2124  1.1  mrg       break;
   2125  1.1  mrg     case NE:
   2126  1.1  mrg       msw_taken = NE;
   2127  1.1  mrg       msw_taken_prob = prob;
   2128  1.1  mrg       lsw_taken = NE;
   2129  1.1  mrg       lsw_taken_prob = profile_probability::guessed_never ();
   2130  1.1  mrg       break;
   2131  1.1  mrg     case GTU: case GT:
   2132  1.1  mrg       msw_taken = comparison;
   2133  1.1  mrg       if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
   2134  1.1  mrg 	break;
   2135  1.1  mrg       if (comparison != GTU || op2h != CONST0_RTX (SImode))
   2136  1.1  mrg 	msw_skip = swap_condition (msw_taken);
   2137  1.1  mrg       lsw_taken = GTU;
   2138  1.1  mrg       break;
   2139  1.1  mrg     case GEU: case GE:
   2140  1.1  mrg       if (op2l == CONST0_RTX (SImode))
   2141  1.1  mrg 	msw_taken = comparison;
   2142  1.1  mrg       else
   2143  1.1  mrg 	{
   2144  1.1  mrg 	  msw_taken = comparison == GE ? GT : GTU;
   2145  1.1  mrg 	  msw_skip = swap_condition (msw_taken);
   2146  1.1  mrg 	  lsw_taken = GEU;
   2147  1.1  mrg 	}
   2148  1.1  mrg       break;
   2149  1.1  mrg     case LTU: case LT:
   2150  1.1  mrg       msw_taken = comparison;
   2151  1.1  mrg       if (op2l == CONST0_RTX (SImode))
   2152  1.1  mrg 	break;
   2153  1.1  mrg       msw_skip = swap_condition (msw_taken);
   2154  1.1  mrg       lsw_taken = LTU;
   2155  1.1  mrg       break;
   2156  1.1  mrg     case LEU: case LE:
   2157  1.1  mrg       if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
   2158  1.1  mrg 	msw_taken = comparison;
   2159  1.1  mrg       else
   2160  1.1  mrg 	{
   2161  1.1  mrg 	  lsw_taken = LEU;
   2162  1.1  mrg 	  if (comparison == LE)
   2163  1.1  mrg 	    msw_taken = LT;
   2164  1.1  mrg 	  else if (op2h != CONST0_RTX (SImode))
   2165  1.1  mrg 	    msw_taken = LTU;
   2166  1.1  mrg 	  else
   2167  1.1  mrg 	    {
   2168  1.1  mrg 	      msw_skip = swap_condition (LTU);
   2169  1.1  mrg 	      break;
   2170  1.1  mrg 	    }
   2171  1.1  mrg 	  msw_skip = swap_condition (msw_taken);
   2172  1.1  mrg 	}
   2173  1.1  mrg       break;
   2174  1.1  mrg     default: return false;
   2175  1.1  mrg     }
   2176  1.1  mrg   num_branches = ((msw_taken != LAST_AND_UNUSED_RTX_CODE)
   2177  1.1  mrg 		  + (msw_skip != LAST_AND_UNUSED_RTX_CODE)
   2178  1.1  mrg 		  + (lsw_taken != LAST_AND_UNUSED_RTX_CODE));
   2179  1.1  mrg   if (comparison != EQ && comparison != NE && num_branches > 1)
   2180  1.1  mrg     {
   2181  1.1  mrg       if (!CONSTANT_P (operands[2])
   2182  1.1  mrg 	  && prob.initialized_p ()
   2183  1.1  mrg 	  && prob.to_reg_br_prob_base () >= (int) (REG_BR_PROB_BASE * 3 / 8U)
   2184  1.1  mrg 	  && prob.to_reg_br_prob_base () <= (int) (REG_BR_PROB_BASE * 5 / 8U))
   2185  1.1  mrg 	{
   2186  1.1  mrg 	  msw_taken_prob = prob.apply_scale (1, 2);
   2187  1.1  mrg 	  msw_skip_prob = rev_prob.apply_scale (REG_BR_PROB_BASE,
   2188  1.1  mrg 						rev_prob.to_reg_br_prob_base ()
   2189  1.1  mrg 						+ REG_BR_PROB_BASE);
   2190  1.1  mrg 	  lsw_taken_prob = prob;
   2191  1.1  mrg 	}
   2192  1.1  mrg       else
   2193  1.1  mrg 	{
   2194  1.1  mrg 	  msw_taken_prob = prob;
   2195  1.1  mrg 	  msw_skip_prob = profile_probability::guessed_always ();
   2196  1.1  mrg 	  /* ??? If we have a constant op2h, should we use that when
   2197  1.1  mrg 	     calculating lsw_taken_prob?  */
   2198  1.1  mrg 	  lsw_taken_prob = prob;
   2199  1.1  mrg 	}
   2200  1.1  mrg     }
   2201  1.1  mrg   operands[1] = op1h;
   2202  1.1  mrg   operands[2] = op2h;
   2203  1.1  mrg 
   2204  1.1  mrg   if (msw_taken != LAST_AND_UNUSED_RTX_CODE)
   2205  1.1  mrg     expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
   2206  1.1  mrg   if (msw_skip != LAST_AND_UNUSED_RTX_CODE)
   2207  1.1  mrg     {
   2208  1.1  mrg       rtx taken_label = operands[3];
   2209  1.1  mrg 
   2210  1.1  mrg       /* Operands were possibly modified, but msw_skip doesn't expect this.
   2211  1.1  mrg 	 Always use the original ones.  */
   2212  1.1  mrg       if (msw_taken != LAST_AND_UNUSED_RTX_CODE)
   2213  1.1  mrg 	{
   2214  1.1  mrg 	  operands[1] = op1h;
   2215  1.1  mrg 	  operands[2] = op2h;
   2216  1.1  mrg 	}
   2217  1.1  mrg 
   2218  1.1  mrg       operands[3] = skip_label = gen_label_rtx ();
   2219  1.1  mrg       expand_cbranchsi4 (operands, msw_skip, msw_skip_prob);
   2220  1.1  mrg       operands[3] = taken_label;
   2221  1.1  mrg     }
   2222  1.1  mrg   operands[1] = op1l;
   2223  1.1  mrg   operands[2] = op2l;
   2224  1.1  mrg   if (lsw_taken != LAST_AND_UNUSED_RTX_CODE)
   2225  1.1  mrg     expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
   2226  1.1  mrg   if (msw_skip != LAST_AND_UNUSED_RTX_CODE)
   2227  1.1  mrg     emit_label (skip_label);
   2228  1.1  mrg   return true;
   2229  1.1  mrg }
   2230  1.1  mrg 
   2231  1.1  mrg /* Given an operand, return 1 if the evaluated operand plugged into an
   2232  1.1  mrg    if_then_else will result in a branch_true, 0 if branch_false, or
   2233  1.1  mrg    -1 if neither nor applies.  The truth table goes like this:
   2234  1.1  mrg 
   2235  1.1  mrg        op   | cmpval |   code  | result
   2236  1.1  mrg    ---------+--------+---------+--------------------
   2237  1.1  mrg       T (0) |   0    |  EQ (1) |  0 = 0 ^ (0 == 1)
   2238  1.1  mrg       T (0) |   1    |  EQ (1) |  1 = 0 ^ (1 == 1)
   2239  1.1  mrg       T (0) |   0    |  NE (0) |  1 = 0 ^ (0 == 0)
   2240  1.1  mrg       T (0) |   1    |  NE (0) |  0 = 0 ^ (1 == 0)
   2241  1.1  mrg      !T (1) |   0    |  EQ (1) |  1 = 1 ^ (0 == 1)
   2242  1.1  mrg      !T (1) |   1    |  EQ (1) |  0 = 1 ^ (1 == 1)
   2243  1.1  mrg      !T (1) |   0    |  NE (0) |  0 = 1 ^ (0 == 0)
   2244  1.1  mrg      !T (1) |   1    |  NE (0) |  1 = 1 ^ (1 == 0)  */
   2245  1.1  mrg int
   2246  1.1  mrg sh_eval_treg_value (rtx op)
   2247  1.1  mrg {
   2248  1.1  mrg   if (t_reg_operand (op, GET_MODE (op)))
   2249  1.1  mrg     return 1;
   2250  1.1  mrg   if (negt_reg_operand (op, GET_MODE (op)))
   2251  1.1  mrg     return 0;
   2252  1.1  mrg 
   2253  1.1  mrg   rtx_code code = GET_CODE (op);
   2254  1.1  mrg   if ((code != EQ && code != NE) || !CONST_INT_P (XEXP (op, 1)))
   2255  1.1  mrg     return -1;
   2256  1.1  mrg 
   2257  1.1  mrg   int cmpop = code == EQ ? 1 : 0;
   2258  1.1  mrg   int cmpval = INTVAL (XEXP (op, 1));
   2259  1.1  mrg   if (cmpval != 0 && cmpval != 1)
   2260  1.1  mrg     return -1;
   2261  1.1  mrg 
   2262  1.1  mrg   int t;
   2263  1.1  mrg   if (t_reg_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0))))
   2264  1.1  mrg     t = 0;
   2265  1.1  mrg   else if (negt_reg_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0))))
   2266  1.1  mrg     t = 1;
   2267  1.1  mrg   else
   2268  1.1  mrg     return -1;
   2269  1.1  mrg 
   2270  1.1  mrg   return t ^ (cmpval == cmpop);
   2271  1.1  mrg }
   2272  1.1  mrg 
   2273  1.1  mrg /* Emit INSN, possibly in a PARALLEL with an USE/CLOBBER of FPSCR bits in case
   2274  1.1  mrg    of floating-point comparisons.  */
   2275  1.1  mrg static void
   2276  1.1  mrg sh_emit_set_t_insn (rtx insn, machine_mode mode)
   2277  1.1  mrg {
   2278  1.1  mrg   if (TARGET_FPU_ANY && GET_MODE_CLASS (mode) == MODE_FLOAT
   2279  1.1  mrg       && GET_CODE (insn) != PARALLEL)
   2280  1.1  mrg     {
   2281  1.1  mrg       insn = gen_rtx_PARALLEL (VOIDmode,
   2282  1.1  mrg 	  gen_rtvec (3, insn,
   2283  1.1  mrg 	      gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, FPSCR_STAT_REG)),
   2284  1.1  mrg 	      gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, FPSCR_MODES_REG))));
   2285  1.1  mrg     }
   2286  1.1  mrg   emit_insn (insn);
   2287  1.1  mrg }
   2288  1.1  mrg 
   2289  1.1  mrg /* Prepare the operands for an scc instruction; make sure that the
   2290  1.1  mrg    compare has been done and the result is in T_REG.  */
   2291  1.1  mrg void
   2292  1.1  mrg sh_emit_scc_to_t (enum rtx_code code, rtx op0, rtx op1)
   2293  1.1  mrg {
   2294  1.1  mrg   rtx t_reg = get_t_reg_rtx ();
   2295  1.1  mrg   enum rtx_code oldcode = code;
   2296  1.1  mrg 
   2297  1.1  mrg   /* First need a compare insn.  */
   2298  1.1  mrg   switch (code)
   2299  1.1  mrg     {
   2300  1.1  mrg     case NE:
   2301  1.1  mrg       /* It isn't possible to handle this case.  */
   2302  1.1  mrg       gcc_unreachable ();
   2303  1.1  mrg     case LT:
   2304  1.1  mrg       code = GT;
   2305  1.1  mrg       break;
   2306  1.1  mrg     case LE:
   2307  1.1  mrg       code = GE;
   2308  1.1  mrg       break;
   2309  1.1  mrg     case LTU:
   2310  1.1  mrg       code = GTU;
   2311  1.1  mrg       break;
   2312  1.1  mrg     case LEU:
   2313  1.1  mrg       code = GEU;
   2314  1.1  mrg       break;
   2315  1.1  mrg     default:
   2316  1.1  mrg       break;
   2317  1.1  mrg     }
   2318  1.1  mrg   if (code != oldcode)
   2319  1.1  mrg     std::swap (op0, op1);
   2320  1.1  mrg 
   2321  1.1  mrg   machine_mode mode = GET_MODE (op0);
   2322  1.1  mrg   if (mode == VOIDmode)
   2323  1.1  mrg     mode = GET_MODE (op1);
   2324  1.1  mrg 
   2325  1.1  mrg   op0 = force_reg (mode, op0);
   2326  1.1  mrg   if ((code != EQ && code != NE
   2327  1.1  mrg        && (op1 != const0_rtx
   2328  1.1  mrg 	   || code == GTU  || code == GEU || code == LTU || code == LEU))
   2329  1.1  mrg       || (mode == DImode && op1 != const0_rtx)
   2330  1.1  mrg       || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
   2331  1.1  mrg     op1 = force_reg (mode, op1);
   2332  1.1  mrg 
   2333  1.1  mrg   sh_emit_set_t_insn (gen_rtx_SET (t_reg,
   2334  1.1  mrg 			           gen_rtx_fmt_ee (code, SImode, op0, op1)),
   2335  1.1  mrg 		      mode);
   2336  1.1  mrg }
   2337  1.1  mrg 
   2338  1.1  mrg /* Called from the md file, set up the operands of a compare instruction.  */
   2339  1.1  mrg void
   2340  1.1  mrg sh_emit_compare_and_branch (rtx *operands, machine_mode mode)
   2341  1.1  mrg {
   2342  1.1  mrg   enum rtx_code code = GET_CODE (operands[0]);
   2343  1.1  mrg   enum rtx_code branch_code;
   2344  1.1  mrg   rtx op0 = operands[1];
   2345  1.1  mrg   rtx op1 = operands[2];
   2346  1.1  mrg   rtx insn;
   2347  1.1  mrg   bool need_ccmpeq = false;
   2348  1.1  mrg 
   2349  1.1  mrg   if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)
   2350  1.1  mrg     {
   2351  1.1  mrg       op0 = force_reg (mode, op0);
   2352  1.1  mrg       op1 = force_reg (mode, op1);
   2353  1.1  mrg     }
   2354  1.1  mrg   else
   2355  1.1  mrg     {
   2356  1.1  mrg       if (code != EQ || mode == DImode)
   2357  1.1  mrg 	{
   2358  1.1  mrg 	  /* Force args into regs, since we can't use constants here.  */
   2359  1.1  mrg 	  op0 = force_reg (mode, op0);
   2360  1.1  mrg 	  if (op1 != const0_rtx || code == GTU  || code == GEU)
   2361  1.1  mrg 	    op1 = force_reg (mode, op1);
   2362  1.1  mrg         }
   2363  1.1  mrg     }
   2364  1.1  mrg 
   2365  1.1  mrg   if (GET_MODE_CLASS (mode) == MODE_FLOAT)
   2366  1.1  mrg     {
   2367  1.1  mrg       if (code == LT
   2368  1.1  mrg 	  || (code == LE && TARGET_IEEE && TARGET_SH2E)
   2369  1.1  mrg 	  || (code == GE && !(TARGET_IEEE && TARGET_SH2E)))
   2370  1.1  mrg 	{
   2371  1.1  mrg 	  std::swap (op0, op1);
   2372  1.1  mrg 	  code = swap_condition (code);
   2373  1.1  mrg 	}
   2374  1.1  mrg 
   2375  1.1  mrg       /* GE becomes fcmp/gt+fcmp/eq, for SH2E and TARGET_IEEE only.  */
   2376  1.1  mrg       if (code == GE)
   2377  1.1  mrg 	{
   2378  1.1  mrg 	  gcc_assert (TARGET_IEEE && TARGET_SH2E);
   2379  1.1  mrg 	  need_ccmpeq = true;
   2380  1.1  mrg 	  code = GT;
   2381  1.1  mrg 	}
   2382  1.1  mrg 
   2383  1.1  mrg       /* Now we can have EQ, NE, GT, LE.  NE and LE are then transformed
   2384  1.1  mrg 	 to EQ/GT respectively.  */
   2385  1.1  mrg       gcc_assert (code == EQ || code == GT || code == NE || code == LE);
   2386  1.1  mrg     }
   2387  1.1  mrg 
   2388  1.1  mrg   switch (code)
   2389  1.1  mrg     {
   2390  1.1  mrg     case EQ:
   2391  1.1  mrg     case GT:
   2392  1.1  mrg     case GE:
   2393  1.1  mrg     case GTU:
   2394  1.1  mrg     case GEU:
   2395  1.1  mrg       branch_code = code;
   2396  1.1  mrg       break;
   2397  1.1  mrg     case NE:
   2398  1.1  mrg     case LT:
   2399  1.1  mrg     case LE:
   2400  1.1  mrg     case LTU:
   2401  1.1  mrg     case LEU:
   2402  1.1  mrg       branch_code = reverse_condition (code);
   2403  1.1  mrg       break;
   2404  1.1  mrg     default:
   2405  1.1  mrg       gcc_unreachable ();
   2406  1.1  mrg     }
   2407  1.1  mrg 
   2408  1.1  mrg   insn = gen_rtx_SET (get_t_reg_rtx (),
   2409  1.1  mrg 		      gen_rtx_fmt_ee (branch_code, SImode, op0, op1));
   2410  1.1  mrg 
   2411  1.1  mrg   sh_emit_set_t_insn (insn, mode);
   2412  1.1  mrg   if (need_ccmpeq)
   2413  1.1  mrg     sh_emit_set_t_insn (gen_ieee_ccmpeqsf_t (op0, op1), mode);
   2414  1.1  mrg 
   2415  1.1  mrg   if (branch_code == code)
   2416  1.1  mrg     emit_jump_insn (gen_branch_true (operands[3]));
   2417  1.1  mrg   else
   2418  1.1  mrg     emit_jump_insn (gen_branch_false (operands[3]));
   2419  1.1  mrg }
   2420  1.1  mrg 
   2421  1.1  mrg void
   2422  1.1  mrg sh_emit_compare_and_set (rtx *operands, machine_mode mode)
   2423  1.1  mrg {
   2424  1.1  mrg   enum rtx_code code = GET_CODE (operands[1]);
   2425  1.1  mrg   rtx op0 = operands[2];
   2426  1.1  mrg   rtx op1 = operands[3];
   2427  1.1  mrg   rtx_code_label *lab = NULL;
   2428  1.1  mrg   bool invert = false;
   2429  1.1  mrg 
   2430  1.1  mrg   op0 = force_reg (mode, op0);
   2431  1.1  mrg   if ((code != EQ && code != NE
   2432  1.1  mrg        && (op1 != const0_rtx
   2433  1.1  mrg 	   || code == GTU  || code == GEU || code == LTU || code == LEU))
   2434  1.1  mrg       || (mode == DImode && op1 != const0_rtx)
   2435  1.1  mrg       || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
   2436  1.1  mrg     op1 = force_reg (mode, op1);
   2437  1.1  mrg 
   2438  1.1  mrg   if (GET_MODE_CLASS (mode) == MODE_FLOAT)
   2439  1.1  mrg     {
   2440  1.1  mrg       if (code == LT || code == LE)
   2441  1.1  mrg 	{
   2442  1.1  mrg 	  std::swap (op0, op1);
   2443  1.1  mrg 	  code = swap_condition (code);
   2444  1.1  mrg 	}
   2445  1.1  mrg       if (code == GE)
   2446  1.1  mrg 	{
   2447  1.1  mrg 	  if (TARGET_IEEE)
   2448  1.1  mrg 	    {
   2449  1.1  mrg 	      lab = gen_label_rtx ();
   2450  1.1  mrg 	      sh_emit_scc_to_t (EQ, op0, op1);
   2451  1.1  mrg 	      emit_jump_insn (gen_branch_true (lab));
   2452  1.1  mrg 	      code = GT;
   2453  1.1  mrg 	   }
   2454  1.1  mrg 	  else
   2455  1.1  mrg 	    {
   2456  1.1  mrg 	      code = LT;
   2457  1.1  mrg 	      invert = true;
   2458  1.1  mrg 	    }
   2459  1.1  mrg 	}
   2460  1.1  mrg     }
   2461  1.1  mrg 
   2462  1.1  mrg   if (code == NE)
   2463  1.1  mrg     {
   2464  1.1  mrg       code = EQ;
   2465  1.1  mrg       invert = true;
   2466  1.1  mrg     }
   2467  1.1  mrg 
   2468  1.1  mrg   sh_emit_scc_to_t (code, op0, op1);
   2469  1.1  mrg   if (lab)
   2470  1.1  mrg     emit_label (lab);
   2471  1.1  mrg   if (invert)
   2472  1.1  mrg     emit_insn (gen_movnegt (operands[0], get_t_reg_rtx ()));
   2473  1.1  mrg   else
   2474  1.1  mrg     emit_move_insn (operands[0], get_t_reg_rtx ());
   2475  1.1  mrg }
   2476  1.1  mrg 
   2477  1.1  mrg /* Functions to output assembly code.  */
   2479  1.1  mrg 
   2480  1.1  mrg /* Return a sequence of instructions to perform DI or DF move.
   2481  1.1  mrg 
   2482  1.1  mrg    Since the SH cannot move a DI or DF in one instruction, we have
   2483  1.1  mrg    to take care when we see overlapping source and dest registers.  */
   2484  1.1  mrg const char *
   2485  1.1  mrg output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
   2486  1.1  mrg 		   machine_mode mode)
   2487  1.1  mrg {
   2488  1.1  mrg   rtx dst = operands[0];
   2489  1.1  mrg   rtx src = operands[1];
   2490  1.1  mrg 
   2491  1.1  mrg   if (MEM_P (dst)
   2492  1.1  mrg       && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
   2493  1.1  mrg     return     "mov.l	%T1,%0"	"\n"
   2494  1.1  mrg 	   "	mov.l	%1,%0";
   2495  1.1  mrg 
   2496  1.1  mrg   if (register_operand (dst, mode)
   2497  1.1  mrg       && register_operand (src, mode))
   2498  1.1  mrg     {
   2499  1.1  mrg       if (REGNO (src) == MACH_REG)
   2500  1.1  mrg 	return         "sts	mach,%S0" "\n"
   2501  1.1  mrg 	       "	sts	macl,%R0";
   2502  1.1  mrg 
   2503  1.1  mrg       /* When mov.d r1,r2 do r2->r3 then r1->r2;
   2504  1.1  mrg          when mov.d r1,r0 do r1->r0 then r2->r1.  */
   2505  1.1  mrg       if (REGNO (src) + 1 == REGNO (dst))
   2506  1.1  mrg 	return         "mov	%T1,%T0" "\n"
   2507  1.1  mrg 	       "	mov	%1,%0";
   2508  1.1  mrg       else
   2509  1.1  mrg 	return         "mov	%1,%0" "\n"
   2510  1.1  mrg 	       "	mov	%T1,%T0";
   2511  1.1  mrg     }
   2512  1.1  mrg   else if (CONST_INT_P (src))
   2513  1.1  mrg     {
   2514  1.1  mrg       if (INTVAL (src) < 0)
   2515  1.1  mrg 	output_asm_insn ("mov	#-1,%S0", operands);
   2516  1.1  mrg       else
   2517  1.1  mrg 	output_asm_insn ("mov	#0,%S0", operands);
   2518  1.1  mrg 
   2519  1.1  mrg       return "mov	%1,%R0";
   2520  1.1  mrg     }
   2521  1.1  mrg   else if (MEM_P (src))
   2522  1.1  mrg     {
   2523  1.1  mrg       int ptrreg = -1;
   2524  1.1  mrg       int dreg = REGNO (dst);
   2525  1.1  mrg       rtx inside = XEXP (src, 0);
   2526  1.1  mrg 
   2527  1.1  mrg       switch (GET_CODE (inside))
   2528  1.1  mrg 	{
   2529  1.1  mrg 	case REG:
   2530  1.1  mrg 	  ptrreg = REGNO (inside);
   2531  1.1  mrg 	  break;
   2532  1.1  mrg 
   2533  1.1  mrg 	case SUBREG:
   2534  1.1  mrg 	  ptrreg = subreg_regno (inside);
   2535  1.1  mrg 	  break;
   2536  1.1  mrg 
   2537  1.1  mrg 	case PLUS:
   2538  1.1  mrg 	  ptrreg = REGNO (XEXP (inside, 0));
   2539  1.1  mrg 	  /* ??? A r0+REG address shouldn't be possible here, because it isn't
   2540  1.1  mrg 	     an offsettable address.  Unfortunately, offsettable addresses use
   2541  1.1  mrg 	     QImode to check the offset, and a QImode offsettable address
   2542  1.1  mrg 	     requires r0 for the other operand, which is not currently
   2543  1.1  mrg 	     supported, so we can't use the 'o' constraint.
   2544  1.1  mrg 	     Thus we must check for and handle r0+REG addresses here.
   2545  1.1  mrg 	     We punt for now, since this is likely very rare.  */
   2546  1.1  mrg 	  gcc_assert (!REG_P (XEXP (inside, 1)));
   2547  1.1  mrg 	  break;
   2548  1.1  mrg 
   2549  1.1  mrg 	case LABEL_REF:
   2550  1.1  mrg 	  return       "mov.l	%1,%0" "\n"
   2551  1.1  mrg 		 "	mov.l	%1+4,%T0";
   2552  1.1  mrg 	case POST_INC:
   2553  1.1  mrg 	  return       "mov.l	%1,%0" "\n"
   2554  1.1  mrg 		 "	mov.l	%1,%T0";
   2555  1.1  mrg 	default:
   2556  1.1  mrg 	  gcc_unreachable ();
   2557  1.1  mrg 	}
   2558  1.1  mrg 
   2559  1.1  mrg       /* Work out the safe way to copy.  Copy into the second half first.  */
   2560  1.1  mrg       if (dreg == ptrreg)
   2561  1.1  mrg 	return         "mov.l	%T1,%T0" "\n"
   2562  1.1  mrg 	       "	mov.l	%1,%0";
   2563  1.1  mrg     }
   2564  1.1  mrg 
   2565  1.1  mrg   return       "mov.l	%1,%0" "\n"
   2566  1.1  mrg 	 "	mov.l	%T1,%T0";
   2567  1.1  mrg }
   2568  1.1  mrg 
   2569  1.1  mrg /* Print an instruction which would have gone into a delay slot after
   2570  1.1  mrg    another instruction, but couldn't because the other instruction expanded
   2571  1.1  mrg    into a sequence where putting the slot insn at the end wouldn't work.  */
   2572  1.1  mrg static void
   2573  1.1  mrg print_slot (rtx_sequence *seq)
   2574  1.1  mrg {
   2575  1.1  mrg   final_scan_insn (seq->insn (1), asm_out_file, optimize, 1, NULL);
   2576  1.1  mrg 
   2577  1.1  mrg   seq->insn (1)->set_deleted ();
   2578  1.1  mrg }
   2579  1.1  mrg 
   2580  1.1  mrg const char *
   2581  1.1  mrg output_far_jump (rtx_insn *insn, rtx op)
   2582  1.1  mrg {
   2583  1.1  mrg   struct { rtx lab, reg, op; } this_jmp;
   2584  1.1  mrg   rtx_code_label *braf_base_lab = NULL;
   2585  1.1  mrg   const char *jump;
   2586  1.1  mrg   int far;
   2587  1.1  mrg   int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
   2588  1.1  mrg   rtx_insn *prev;
   2589  1.1  mrg 
   2590  1.1  mrg   this_jmp.lab = gen_label_rtx ();
   2591  1.1  mrg 
   2592  1.1  mrg   if (TARGET_SH2
   2593  1.1  mrg       && offset >= -32764
   2594  1.1  mrg       && offset - get_attr_length (insn) <= 32766
   2595  1.1  mrg       && ! CROSSING_JUMP_P (insn))
   2596  1.1  mrg     {
   2597  1.1  mrg       far = 0;
   2598  1.1  mrg       jump =   "mov.w	%O0,%1" "\n"
   2599  1.1  mrg 	     "	braf	%1";
   2600  1.1  mrg     }
   2601  1.1  mrg   else
   2602  1.1  mrg     {
   2603  1.1  mrg       far = 1;
   2604  1.1  mrg       if (flag_pic)
   2605  1.1  mrg 	{
   2606  1.1  mrg 	  if (TARGET_SH2)
   2607  1.1  mrg 	    jump =     "mov.l	%O0,%1" "\n"
   2608  1.1  mrg 		   "	braf	%1";
   2609  1.1  mrg 	  else
   2610  1.1  mrg 	    jump =     "mov.l	r0,@-r15"	"\n"
   2611  1.1  mrg 		   "	mova	%O0,r0"		"\n"
   2612  1.1  mrg 		   "	mov.l	@r0,%1"		"\n"
   2613  1.1  mrg 		   "	add	r0,%1"		"\n"
   2614  1.1  mrg 		   "	mov.l	@r15+,r0"	"\n"
   2615  1.1  mrg 		   "	jmp	@%1";
   2616  1.1  mrg 	}
   2617  1.1  mrg       else
   2618  1.1  mrg 	jump =         "mov.l	%O0,%1" "\n"
   2619  1.1  mrg 	       "	jmp	@%1";
   2620  1.1  mrg     }
   2621  1.1  mrg   /* If we have a scratch register available, use it.  */
   2622  1.1  mrg   if (NONJUMP_INSN_P ((prev = prev_nonnote_insn (insn)))
   2623  1.1  mrg       && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
   2624  1.1  mrg     {
   2625  1.1  mrg       this_jmp.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
   2626  1.1  mrg       if (REGNO (this_jmp.reg) == R0_REG && flag_pic && ! TARGET_SH2)
   2627  1.1  mrg 	jump =         "mov.l	r1,@-r15"	"\n"
   2628  1.1  mrg 	       "	mova	%O0,r0"		"\n"
   2629  1.1  mrg 	       "	mov.l	@r0,r1"		"\n"
   2630  1.1  mrg 	       "	add	r1,r0"		"\n"
   2631  1.1  mrg 	       "	mov.l	@r15+,r1"	"\n"
   2632  1.1  mrg 	       "	jmp	@%1";
   2633  1.1  mrg       output_asm_insn (jump, &this_jmp.lab);
   2634  1.1  mrg       if (dbr_sequence_length ())
   2635  1.1  mrg 	print_slot (final_sequence);
   2636  1.1  mrg       else
   2637  1.1  mrg 	output_asm_insn ("nop", 0);
   2638  1.1  mrg     }
   2639  1.1  mrg   else
   2640  1.1  mrg     {
   2641  1.1  mrg       /* Output the delay slot insn first if any.  */
   2642  1.1  mrg       if (dbr_sequence_length ())
   2643  1.1  mrg 	print_slot (final_sequence);
   2644  1.1  mrg 
   2645  1.1  mrg       this_jmp.reg = gen_rtx_REG (SImode, 13);
   2646  1.1  mrg       output_asm_insn ("mov.l	r13,@-r15", 0);
   2647  1.1  mrg       output_asm_insn (jump, &this_jmp.lab);
   2648  1.1  mrg       output_asm_insn ("mov.l	@r15+,r13", 0);
   2649  1.1  mrg     }
   2650  1.1  mrg   if (far && flag_pic && TARGET_SH2)
   2651  1.1  mrg     {
   2652  1.1  mrg       braf_base_lab = gen_label_rtx ();
   2653  1.1  mrg       (*targetm.asm_out.internal_label) (asm_out_file, "L",
   2654  1.1  mrg 				 CODE_LABEL_NUMBER (braf_base_lab));
   2655  1.1  mrg     }
   2656  1.1  mrg   if (far)
   2657  1.1  mrg     output_asm_insn (".align	2", 0);
   2658  1.1  mrg   (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this_jmp.lab));
   2659  1.1  mrg   this_jmp.op = op;
   2660  1.1  mrg   if (far && flag_pic)
   2661  1.1  mrg     {
   2662  1.1  mrg       if (TARGET_SH2)
   2663  1.1  mrg 	this_jmp.lab = braf_base_lab;
   2664  1.1  mrg       output_asm_insn (".long	%O2-%O0", &this_jmp.lab);
   2665  1.1  mrg     }
   2666  1.1  mrg   else
   2667  1.1  mrg     output_asm_insn (far ? ".long	%O2" : ".word %O2-%O0", &this_jmp.lab);
   2668  1.1  mrg   return "";
   2669  1.1  mrg }
   2670  1.1  mrg 
   2671  1.1  mrg /* Local label counter, used for constants in the pool and inside
   2672  1.1  mrg    pattern branches.  */
   2673  1.1  mrg static int lf = 100;
   2674  1.1  mrg 
   2675  1.1  mrg /* Output code for ordinary branches.  */
   2676  1.1  mrg const char *
   2677  1.1  mrg output_branch (int logic, rtx_insn *insn, rtx *operands)
   2678  1.1  mrg {
   2679  1.1  mrg   switch (get_attr_length (insn))
   2680  1.1  mrg     {
   2681  1.1  mrg     case 6:
   2682  1.1  mrg       /* This can happen if filling the delay slot has caused a forward
   2683  1.1  mrg 	 branch to exceed its range (we could reverse it, but only
   2684  1.1  mrg 	 when we know we won't overextend other branches; this should
   2685  1.1  mrg 	 best be handled by relaxation).
   2686  1.1  mrg 	 It can also happen when other condbranches hoist delay slot insn
   2687  1.1  mrg 	 from their destination, thus leading to code size increase.
   2688  1.1  mrg 	 But the branch will still be in the range -4092..+4098 bytes.  */
   2689  1.1  mrg       if (! TARGET_RELAX)
   2690  1.1  mrg 	{
   2691  1.1  mrg 	  int label = lf++;
   2692  1.1  mrg 	  /* The call to print_slot will clobber the operands.  */
   2693  1.1  mrg 	  rtx op0 = operands[0];
   2694  1.1  mrg 
   2695  1.1  mrg 	  /* If the instruction in the delay slot is annulled (true), then
   2696  1.1  mrg 	     there is no delay slot where we can put it now.  The only safe
   2697  1.1  mrg 	     place for it is after the label.  final will do that by default.  */
   2698  1.1  mrg 
   2699  1.1  mrg 	  if (final_sequence
   2700  1.1  mrg 	      && ! INSN_ANNULLED_BRANCH_P (final_sequence->insn (0))
   2701  1.1  mrg 	      && get_attr_length (final_sequence->insn (1)))
   2702  1.1  mrg 	    {
   2703  1.1  mrg 	      asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
   2704  1.1  mrg 	                   ASSEMBLER_DIALECT ? "/" : ".", label);
   2705  1.1  mrg 	      print_slot (final_sequence);
   2706  1.1  mrg 	    }
   2707  1.1  mrg 	  else
   2708  1.1  mrg 	    asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
   2709  1.1  mrg 
   2710  1.1  mrg 	  output_asm_insn ("bra\t%l0", &op0);
   2711  1.1  mrg 	  fprintf (asm_out_file, "\tnop\n");
   2712  1.1  mrg 	  (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
   2713  1.1  mrg 
   2714  1.1  mrg 	  return "";
   2715  1.1  mrg 	}
   2716  1.1  mrg       /* FALLTHRU */
   2717  1.1  mrg       /* When relaxing, handle this like a short branch.  The linker
   2718  1.1  mrg 	 will fix it up if it still doesn't fit after relaxation.  */
   2719  1.1  mrg     case 2:
   2720  1.1  mrg       return logic ? "bt%.\t%l0" : "bf%.\t%l0";
   2721  1.1  mrg 
   2722  1.1  mrg       /* These are for SH2e, in which we have to account for the
   2723  1.1  mrg 	 extra nop because of the hardware bug in annulled branches.  */
   2724  1.1  mrg     case 8:
   2725  1.1  mrg       if (! TARGET_RELAX)
   2726  1.1  mrg 	{
   2727  1.1  mrg 	  int label = lf++;
   2728  1.1  mrg 
   2729  1.1  mrg 	  gcc_assert (!final_sequence
   2730  1.1  mrg 		      || !(INSN_ANNULLED_BRANCH_P
   2731  1.1  mrg 			   (XVECEXP (final_sequence, 0, 0))));
   2732  1.1  mrg 	  asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
   2733  1.1  mrg 		       logic ? "f" : "t",
   2734  1.1  mrg 		       ASSEMBLER_DIALECT ? "/" : ".", label);
   2735  1.1  mrg 	  fprintf (asm_out_file, "\tnop\n");
   2736  1.1  mrg 	  output_asm_insn ("bra\t%l0", operands);
   2737  1.1  mrg 	  fprintf (asm_out_file, "\tnop\n");
   2738  1.1  mrg 	  (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
   2739  1.1  mrg 
   2740  1.1  mrg 	  return "";
   2741  1.1  mrg 	}
   2742  1.1  mrg       /* FALLTHRU */
   2743  1.1  mrg     case 4:
   2744  1.1  mrg       {
   2745  1.1  mrg 	char buffer[10];
   2746  1.1  mrg 
   2747  1.1  mrg 	sprintf (buffer, "b%s%ss\t%%l0",
   2748  1.1  mrg 		 logic ? "t" : "f",
   2749  1.1  mrg 		 ASSEMBLER_DIALECT ? "/" : ".");
   2750  1.1  mrg 	output_asm_insn (buffer, &operands[0]);
   2751  1.1  mrg 	return "nop";
   2752  1.1  mrg       }
   2753  1.1  mrg 
   2754  1.1  mrg     default:
   2755  1.1  mrg       /* There should be no longer branches now - that would
   2756  1.1  mrg 	 indicate that something has destroyed the branches set
   2757  1.1  mrg 	 up in machine_dependent_reorg.  */
   2758  1.1  mrg       gcc_unreachable ();
   2759  1.1  mrg     }
   2760  1.1  mrg }
   2761  1.1  mrg 
   2762  1.1  mrg /* Output a code sequence for INSN using TEMPL with OPERANDS; but before,
   2763  1.1  mrg    fill in operands 9 as a label to the successor insn.
   2764  1.1  mrg    We try to use jump threading where possible.
   2765  1.1  mrg    IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
   2766  1.1  mrg    we assume the jump is taken.  I.e. EQ means follow jmp and bf, NE means
   2767  1.1  mrg    follow jmp and bt, if the address is in range.  */
   2768  1.1  mrg const char *
   2769  1.1  mrg output_branchy_insn (enum rtx_code code, const char *templ,
   2770  1.1  mrg 		     rtx_insn *insn, rtx *operands)
   2771  1.1  mrg {
   2772  1.1  mrg   rtx_insn *next_insn = NEXT_INSN (insn);
   2773  1.1  mrg 
   2774  1.1  mrg   if (next_insn && JUMP_P (next_insn) && condjump_p (next_insn))
   2775  1.1  mrg     {
   2776  1.1  mrg       rtx src = SET_SRC (PATTERN (next_insn));
   2777  1.1  mrg       if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
   2778  1.1  mrg 	{
   2779  1.1  mrg 	  /* Following branch not taken */
   2780  1.1  mrg 	  rtx_code_label *lab = gen_label_rtx ();
   2781  1.1  mrg 	  emit_label_after (lab, next_insn);
   2782  1.1  mrg 	  INSN_ADDRESSES_NEW (lab,
   2783  1.1  mrg 			      INSN_ADDRESSES (INSN_UID (next_insn))
   2784  1.1  mrg 			      + get_attr_length (next_insn));
   2785  1.1  mrg 	  operands[9] = lab;
   2786  1.1  mrg 	  return templ;
   2787  1.1  mrg 	}
   2788  1.1  mrg       else
   2789  1.1  mrg 	{
   2790  1.1  mrg 	  int offset = (branch_dest (next_insn)
   2791  1.1  mrg 			- INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
   2792  1.1  mrg 	  if (offset >= -252 && offset <= 258)
   2793  1.1  mrg 	    {
   2794  1.1  mrg 	      if (GET_CODE (src) == IF_THEN_ELSE)
   2795  1.1  mrg 		/* branch_true */
   2796  1.1  mrg 		src = XEXP (src, 1);
   2797  1.1  mrg 	      operands[9] = src;
   2798  1.1  mrg 	      return templ;
   2799  1.1  mrg 	    }
   2800  1.1  mrg 	}
   2801  1.1  mrg     }
   2802  1.1  mrg   rtx_code_label *lab = gen_label_rtx ();
   2803  1.1  mrg   emit_label_after (lab, insn);
   2804  1.1  mrg   INSN_ADDRESSES_NEW (lab,
   2805  1.1  mrg 		      INSN_ADDRESSES (INSN_UID (insn))
   2806  1.1  mrg 		      + get_attr_length (insn));
   2807  1.1  mrg   operands[9] = lab;
   2808  1.1  mrg   return templ;
   2809  1.1  mrg }
   2810  1.1  mrg 
   2811  1.1  mrg const char *
   2812  1.1  mrg output_ieee_ccmpeq (rtx_insn *insn, rtx *operands)
   2813  1.1  mrg {
   2814  1.1  mrg   return output_branchy_insn (NE,      "bt	%l9" "\n"
   2815  1.1  mrg 				  "	fcmp/eq	%1,%0",
   2816  1.1  mrg 			      insn, operands);
   2817  1.1  mrg }
   2818  1.1  mrg 
   2819  1.1  mrg /* Output the start of the assembler file.  */
   2821  1.1  mrg static void
   2822  1.1  mrg sh_file_start (void)
   2823  1.1  mrg {
   2824  1.1  mrg   default_file_start ();
   2825  1.1  mrg 
   2826  1.1  mrg   if (TARGET_ELF)
   2827  1.1  mrg     /* We need to show the text section with the proper
   2828  1.1  mrg        attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
   2829  1.1  mrg        emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
   2830  1.1  mrg        will complain.  We can teach GAS specifically about the
   2831  1.1  mrg        default attributes for our choice of text section, but
   2832  1.1  mrg        then we would have to change GAS again if/when we change
   2833  1.1  mrg        the text section name.  */
   2834  1.1  mrg     fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
   2835  1.1  mrg   else
   2836  1.1  mrg     /* Switch to the data section so that the coffsem symbol
   2837  1.1  mrg        isn't in the text section.  */
   2838  1.1  mrg     switch_to_section (data_section);
   2839  1.1  mrg 
   2840  1.1  mrg   if (TARGET_LITTLE_ENDIAN)
   2841  1.1  mrg     fputs ("\t.little\n", asm_out_file);
   2842  1.1  mrg }
   2843  1.1  mrg 
   2844  1.1  mrg /* Implementation of TARGET_ASM_INTEGER for SH.  Pointers to functions
   2846  1.1  mrg    need to be output as pointers to function descriptors for
   2847  1.1  mrg    FDPIC.  */
   2848  1.1  mrg 
   2849  1.1  mrg static bool
   2850  1.1  mrg sh_assemble_integer (rtx value, unsigned int size, int aligned_p)
   2851  1.1  mrg {
   2852  1.1  mrg   if (TARGET_FDPIC && size == UNITS_PER_WORD
   2853  1.1  mrg       && GET_CODE (value) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (value))
   2854  1.1  mrg     {
   2855  1.1  mrg       fputs ("\t.long\t", asm_out_file);
   2856  1.1  mrg       output_addr_const (asm_out_file, value);
   2857  1.1  mrg       fputs ("@FUNCDESC\n", asm_out_file);
   2858  1.1  mrg       return true;
   2859  1.1  mrg     }
   2860  1.1  mrg   return default_assemble_integer (value, size, aligned_p);
   2861  1.1  mrg }
   2862  1.1  mrg 
   2863  1.1  mrg /* Check if PAT includes UNSPEC_CALLER unspec pattern.  */
   2865  1.1  mrg static bool
   2866  1.1  mrg unspec_caller_rtx_p (rtx pat)
   2867  1.1  mrg {
   2868  1.1  mrg   rtx base, offset;
   2869  1.1  mrg   split_const (pat, &base, &offset);
   2870  1.1  mrg 
   2871  1.1  mrg   if (GET_CODE (base) == UNSPEC)
   2872  1.1  mrg     {
   2873  1.1  mrg       if (XINT (base, 1) == UNSPEC_CALLER)
   2874  1.1  mrg 	return true;
   2875  1.1  mrg       for (int i = 0; i < XVECLEN (base, 0); i++)
   2876  1.1  mrg 	if (unspec_caller_rtx_p (XVECEXP (base, 0, i)))
   2877  1.1  mrg 	  return true;
   2878  1.1  mrg     }
   2879  1.1  mrg   return false;
   2880  1.1  mrg }
   2881  1.1  mrg 
   2882  1.1  mrg /* Indicate that INSN cannot be duplicated.  This is true for insn
   2883  1.1  mrg    that generates a unique label.  */
   2884  1.1  mrg static bool
   2885  1.1  mrg sh_cannot_copy_insn_p (rtx_insn *insn)
   2886  1.1  mrg {
   2887  1.1  mrg   if (!reload_completed || !flag_pic)
   2888  1.1  mrg     return false;
   2889  1.1  mrg 
   2890  1.1  mrg   if (!NONJUMP_INSN_P (insn))
   2891  1.1  mrg     return false;
   2892  1.1  mrg   if (asm_noperands (insn) >= 0)
   2893  1.1  mrg     return false;
   2894  1.1  mrg 
   2895  1.1  mrg   rtx pat = PATTERN (insn);
   2896  1.1  mrg 
   2897  1.1  mrg   if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == USE)
   2898  1.1  mrg     return false;
   2899  1.1  mrg 
   2900  1.1  mrg   if (TARGET_FDPIC && GET_CODE (pat) == PARALLEL)
   2901  1.1  mrg     {
   2902  1.1  mrg       rtx t = XVECEXP (pat, 0, XVECLEN (pat, 0) - 1);
   2903  1.1  mrg       if (GET_CODE (t) == USE && unspec_caller_rtx_p (XEXP (t, 0)))
   2904  1.1  mrg 	return true;
   2905  1.1  mrg     }
   2906  1.1  mrg 
   2907  1.1  mrg   if (GET_CODE (pat) != SET)
   2908  1.1  mrg     return false;
   2909  1.1  mrg   pat = SET_SRC (pat);
   2910  1.1  mrg 
   2911  1.1  mrg   if (unspec_caller_rtx_p (pat))
   2912  1.1  mrg     return true;
   2913  1.1  mrg 
   2914  1.1  mrg   return false;
   2915  1.1  mrg }
   2916  1.1  mrg 
   2917  1.1  mrg /* Number of instructions used to make an arithmetic right shift by N.  */
   2919  1.1  mrg static const char ashiftrt_insns[] =
   2920  1.1  mrg   { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
   2921  1.1  mrg 
   2922  1.1  mrg /* Description of a logical left or right shift, when expanded to a sequence
   2923  1.1  mrg    of 1/2/8/16 shifts.
   2924  1.1  mrg    Notice that one bit right shifts clobber the T bit.  One bit left shifts
   2925  1.1  mrg    are done with an 'add Rn,Rm' insn and thus do not clobber the T bit.  */
   2926  1.1  mrg enum
   2927  1.1  mrg {
   2928  1.1  mrg   ASHL_CLOBBERS_T = 1 << 0,
   2929  1.1  mrg   LSHR_CLOBBERS_T = 1 << 1
   2930  1.1  mrg };
   2931  1.1  mrg 
   2932  1.1  mrg struct ashl_lshr_sequence
   2933  1.1  mrg {
   2934  1.1  mrg   char insn_count;
   2935  1.1  mrg   signed char amount[6];
   2936  1.1  mrg   char clobbers_t;
   2937  1.1  mrg };
   2938  1.1  mrg 
   2939  1.1  mrg static const struct ashl_lshr_sequence ashl_lshr_seq[32] =
   2940  1.1  mrg {
   2941  1.1  mrg   { 0, { 0 },		    0 },		// 0
   2942  1.1  mrg   { 1, { 1 },		    LSHR_CLOBBERS_T },
   2943  1.1  mrg   { 1, { 2 },		    0 },
   2944  1.1  mrg   { 2, { 2, 1 },	    LSHR_CLOBBERS_T },
   2945  1.1  mrg   { 2, { 2, 2 },	    0 },		// 4
   2946  1.1  mrg   { 3, { 2, 1, 2 },	    LSHR_CLOBBERS_T },
   2947  1.1  mrg   { 3, { 2, 2, 2 },	    0 },
   2948  1.1  mrg   { 4, { 2, 2, 1, 2 },	    LSHR_CLOBBERS_T },
   2949  1.1  mrg   { 1, { 8 },		    0 },		// 8
   2950  1.1  mrg   { 2, { 8, 1 },	    LSHR_CLOBBERS_T },
   2951  1.1  mrg   { 2, { 8, 2 },	    0 },
   2952  1.1  mrg   { 3, { 8, 1, 2 },	    LSHR_CLOBBERS_T },
   2953  1.1  mrg   { 3, { 8, 2, 2 },	    0 },		// 12
   2954  1.1  mrg   { 4, { 8, 2, 1, 2 },	    LSHR_CLOBBERS_T },
   2955  1.1  mrg   { 3, { 8, -2, 8 },	    0 },
   2956  1.1  mrg   { 3, { 8, -1, 8 },	    ASHL_CLOBBERS_T },
   2957  1.1  mrg   { 1, { 16 },		    0 },		// 16
   2958  1.1  mrg   { 2, { 16, 1 },	    LSHR_CLOBBERS_T },
   2959  1.1  mrg   { 2, { 16, 2 },	    0 },
   2960  1.1  mrg   { 3, { 16, 1, 2 },	    LSHR_CLOBBERS_T },
   2961  1.1  mrg   { 3, { 16, 2, 2 },	    0 },		// 20
   2962  1.1  mrg   { 4, { 16, 2, 1, 2 },	    LSHR_CLOBBERS_T },
   2963  1.1  mrg   { 3, { 16, -2, 8 },	    0 },
   2964  1.1  mrg   { 3, { 16, -1, 8 },	    ASHL_CLOBBERS_T },
   2965  1.1  mrg   { 2, { 16, 8 },	    0 },		// 24
   2966  1.1  mrg   { 3, { 16, 1, 8 },	    LSHR_CLOBBERS_T },
   2967  1.1  mrg   { 3, { 16, 8, 2 },	    0 },
   2968  1.1  mrg   { 4, { 16, 8, 1, 2 },     LSHR_CLOBBERS_T },
   2969  1.1  mrg   { 4, { 16, 8, 2, 2 },	    0 },		// 28
   2970  1.1  mrg   { 4, { 16, -1, -2, 16 },  ASHL_CLOBBERS_T },
   2971  1.1  mrg   { 3, { 16, -2, 16 },	    0 },
   2972  1.1  mrg 
   2973  1.1  mrg   /* For a right shift by 31 a 2 insn shll-movt sequence can be used.
   2974  1.1  mrg      For a left shift by 31 a 2 insn and-rotl sequences can be used.
   2975  1.1  mrg      However, the shift-and combiner code needs this entry here to be in
   2976  1.1  mrg      terms of real shift insns.  */
   2977  1.1  mrg   { 3, { 16, -1, 16 },	    ASHL_CLOBBERS_T }
   2978  1.1  mrg };
   2979  1.1  mrg 
   2980  1.1  mrg /* Individual shift amounts for shift amounts < 16, up to three highmost
   2981  1.1  mrg    bits might be clobbered.  This is typically used when combined with some
   2982  1.1  mrg    kind of sign or zero extension.  */
   2983  1.1  mrg static const struct ashl_lshr_sequence ext_ashl_lshr_seq[32] =
   2984  1.1  mrg {
   2985  1.1  mrg   { 0, { 0 },		    0 },		// 0
   2986  1.1  mrg   { 1, { 1 },		    LSHR_CLOBBERS_T },
   2987  1.1  mrg   { 1, { 2 },		    0 },
   2988  1.1  mrg   { 2, { 2, 1 },	    LSHR_CLOBBERS_T },
   2989  1.1  mrg   { 2, { 2, 2 },	    0 },		// 4
   2990  1.1  mrg   { 3, { 2, 1, 2 },	    LSHR_CLOBBERS_T },
   2991  1.1  mrg   { 2, { 8, -2 },	    0 },
   2992  1.1  mrg   { 2, { 8, -1 },	    ASHL_CLOBBERS_T },
   2993  1.1  mrg   { 1, { 8 },		    0 },		// 8
   2994  1.1  mrg   { 2, { 8, 1 },	    LSHR_CLOBBERS_T },
   2995  1.1  mrg   { 2, { 8, 2 },	    0 },
   2996  1.1  mrg   { 3, { 8, 1, 2 },	    LSHR_CLOBBERS_T },
   2997  1.1  mrg   { 3, { 8, 2, 2 },	    0 },		// 12
   2998  1.1  mrg   { 3, { 16, -2, -1 },	    ASHL_CLOBBERS_T },
   2999  1.1  mrg   { 2, { 16, -2 },	    0 },
   3000  1.1  mrg   { 2, { 16, -1 },	    ASHL_CLOBBERS_T },
   3001  1.1  mrg   { 1, { 16 },		    0 },		// 16
   3002  1.1  mrg   { 2, { 16, 1 },	    LSHR_CLOBBERS_T },
   3003  1.1  mrg   { 2, { 16, 2 },	    0 },
   3004  1.1  mrg   { 3, { 16, 1, 2 },	    LSHR_CLOBBERS_T },
   3005  1.1  mrg   { 3, { 16, 2, 2 },	    0 },		// 20
   3006  1.1  mrg   { 4, { 16, 2, 1, 2 },	    LSHR_CLOBBERS_T },
   3007  1.1  mrg   { 3, { 16, -2, 8 },	    0 },
   3008  1.1  mrg   { 3, { 16, -1, 8 },	    ASHL_CLOBBERS_T },
   3009  1.1  mrg   { 2, { 16, 8 },	    0 },		// 24
   3010  1.1  mrg   { 3, { 16, 1, 8 },	    LSHR_CLOBBERS_T },
   3011  1.1  mrg   { 3, { 16, 8, 2 },	    0 },
   3012  1.1  mrg   { 4, { 16, 8, 1, 2 },	    LSHR_CLOBBERS_T },
   3013  1.1  mrg   { 4, { 16, 8, 2, 2 },	    0 },		// 28
   3014  1.1  mrg   { 4, { 16, -1, -2, 16 },  ASHL_CLOBBERS_T },
   3015  1.1  mrg   { 3, { 16, -2, 16 },	    0 },
   3016  1.1  mrg   { 3, { 16, -1, 16 },	    ASHL_CLOBBERS_T }
   3017  1.1  mrg };
   3018  1.1  mrg 
   3019  1.1  mrg /* Return true if a shift left consisting of 1/2/8/16 shift instructions
   3020  1.1  mrg    will clobber the T bit.  */
   3021  1.1  mrg bool
   3022  1.1  mrg sh_ashlsi_clobbers_t_reg_p (rtx shift_amount)
   3023  1.1  mrg {
   3024  1.1  mrg   gcc_assert (CONST_INT_P (shift_amount));
   3025  1.1  mrg 
   3026  1.1  mrg   const int shift_amount_i = INTVAL (shift_amount) & 31;
   3027  1.1  mrg 
   3028  1.1  mrg   /* Special case for shift count of 31: use and-rotl sequence.  */
   3029  1.1  mrg   if (shift_amount_i == 31)
   3030  1.1  mrg     return true;
   3031  1.1  mrg 
   3032  1.1  mrg   return (ashl_lshr_seq[shift_amount_i].clobbers_t
   3033  1.1  mrg 	  & ASHL_CLOBBERS_T) != 0;
   3034  1.1  mrg }
   3035  1.1  mrg 
   3036  1.1  mrg /* Return true if a logical right shift consisting of 1/2/8/16 shift
   3037  1.1  mrg    instructions will clobber the T bit.  */
   3038  1.1  mrg bool
   3039  1.1  mrg sh_lshrsi_clobbers_t_reg_p (rtx shift_amount)
   3040  1.1  mrg {
   3041  1.1  mrg   gcc_assert (CONST_INT_P (shift_amount));
   3042  1.1  mrg 
   3043  1.1  mrg   /* For right shifts the constant might be negative.  */
   3044  1.1  mrg   const int shift_amount_i = std::abs (INTVAL (shift_amount)) & 31;
   3045  1.1  mrg 
   3046  1.1  mrg   /* Special case for shift count of 31: use shll-movt sequence.  */
   3047  1.1  mrg   if (shift_amount_i == 31)
   3048  1.1  mrg     return true;
   3049  1.1  mrg 
   3050  1.1  mrg   return (ashl_lshr_seq[shift_amount_i].clobbers_t
   3051  1.1  mrg 	  & LSHR_CLOBBERS_T) != 0;
   3052  1.1  mrg }
   3053  1.1  mrg 
   3054  1.1  mrg /* Return true if it is potentially beneficial to use a dynamic shift
   3055  1.1  mrg    instruction (shad / shar) instead of a combination of 1/2/8/16
   3056  1.1  mrg    shift instructions for the specified shift count.
   3057  1.1  mrg    If dynamic shifts are not available, always return false.  */
   3058  1.1  mrg bool
   3059  1.1  mrg sh_dynamicalize_shift_p (rtx count)
   3060  1.1  mrg {
   3061  1.1  mrg   gcc_assert (CONST_INT_P (count));
   3062  1.1  mrg 
   3063  1.1  mrg   /* For right shifts the constant might be negative.  */
   3064  1.1  mrg   const int shift_amount_i = std::abs (INTVAL (count)) & 31;
   3065  1.1  mrg   int insn_count;
   3066  1.1  mrg 
   3067  1.1  mrg   /* For left and right shifts, there are shorter 2 insn sequences for
   3068  1.1  mrg      shift amounts of 31.  */
   3069  1.1  mrg   if (shift_amount_i == 31)
   3070  1.1  mrg     insn_count = 2;
   3071  1.1  mrg   else
   3072  1.1  mrg     insn_count = ashl_lshr_seq[shift_amount_i].insn_count;
   3073  1.1  mrg 
   3074  1.1  mrg   return TARGET_DYNSHIFT && (insn_count > 1 + SH_DYNAMIC_SHIFT_COST);
   3075  1.1  mrg }
   3076  1.1  mrg 
   3077  1.1  mrg /* Assuming we have a value that has been sign-extended by at least one bit,
   3078  1.1  mrg    can we use the ext_shift_amounts with the last shift turned to an
   3079  1.1  mrg    arithmetic shift to shift it by N without data loss, and quicker than by
   3080  1.1  mrg    other means?  */
   3081  1.1  mrg #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
   3082  1.1  mrg 
   3083  1.1  mrg /* Return the cost of a shift.  */
   3084  1.1  mrg static inline int
   3085  1.1  mrg shiftcosts (rtx x)
   3086  1.1  mrg {
   3087  1.1  mrg   if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
   3088  1.1  mrg     {
   3089  1.1  mrg       if (GET_MODE (x) == DImode
   3090  1.1  mrg 	  && CONST_INT_P (XEXP (x, 1))
   3091  1.1  mrg 	  && INTVAL (XEXP (x, 1)) == 1)
   3092  1.1  mrg 	return 2;
   3093  1.1  mrg 
   3094  1.1  mrg       /* Everything else is invalid, because there is no pattern for it.  */
   3095  1.1  mrg       return -1;
   3096  1.1  mrg     }
   3097  1.1  mrg   /* If shift by a non constant, then this will be expensive.  */
   3098  1.1  mrg   if (!CONST_INT_P (XEXP (x, 1)))
   3099  1.1  mrg     return SH_DYNAMIC_SHIFT_COST;
   3100  1.1  mrg 
   3101  1.1  mrg   /* Otherwise, return the true cost in instructions.  Cope with out of range
   3102  1.1  mrg      shift counts more or less arbitrarily.  */
   3103  1.1  mrg   int value = INTVAL (XEXP (x, 1)) & 31;
   3104  1.1  mrg 
   3105  1.1  mrg   if (GET_CODE (x) == ASHIFTRT)
   3106  1.1  mrg     {
   3107  1.1  mrg       int cost = ashiftrt_insns[value];
   3108  1.1  mrg       /* If dynamic shifts are available and profitable in this case, then we
   3109  1.1  mrg 	 put the constant in a reg and use shad.  */
   3110  1.1  mrg       if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
   3111  1.1  mrg 	cost = 1 + SH_DYNAMIC_SHIFT_COST;
   3112  1.1  mrg       return cost;
   3113  1.1  mrg     }
   3114  1.1  mrg   else
   3115  1.1  mrg     return ashl_lshr_seq[value].insn_count;
   3116  1.1  mrg }
   3117  1.1  mrg 
   3118  1.1  mrg /* Return the cost of an AND/XOR/IOR operation.  */
   3119  1.1  mrg static inline int
   3120  1.1  mrg and_xor_ior_costs (rtx x, int code)
   3121  1.1  mrg {
   3122  1.1  mrg   /* On SH1-4 we have only max. SImode operations.
   3123  1.1  mrg      Double the cost for modes > SImode.  */
   3124  1.1  mrg   const int cost_scale = GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD ? 2 : 1;
   3125  1.1  mrg 
   3126  1.1  mrg   /* A logical operation with two registers is a single cycle
   3127  1.1  mrg      instruction.  */
   3128  1.1  mrg   if (!CONST_INT_P (XEXP (x, 1)))
   3129  1.1  mrg     return 1 * cost_scale;
   3130  1.1  mrg 
   3131  1.1  mrg   int i = INTVAL (XEXP (x, 1));
   3132  1.1  mrg 
   3133  1.1  mrg   /* These constants are single cycle extu.[bw] instructions.  */
   3134  1.1  mrg   if ((i == 0xff || i == 0xffff) && code == AND)
   3135  1.1  mrg     return 1 * cost_scale;
   3136  1.1  mrg   /* Constants that can be used in an instruction as an immediate are
   3137  1.1  mrg      a single cycle, but this requires r0, so make it a little more
   3138  1.1  mrg      expensive.  */
   3139  1.1  mrg   if (CONST_OK_FOR_K08 (i))
   3140  1.1  mrg     return 2 * cost_scale;
   3141  1.1  mrg   /* Constants that can be loaded with a mov immediate need one more cycle.
   3142  1.1  mrg      This case is probably unnecessary.  */
   3143  1.1  mrg   if (CONST_OK_FOR_I08 (i))
   3144  1.1  mrg     return 2 * cost_scale;
   3145  1.1  mrg   /* Any other constant requires an additional 2 cycle pc-relative load.
   3146  1.1  mrg      This case is probably unnecessary.  */
   3147  1.1  mrg   return 3 * cost_scale;
   3148  1.1  mrg }
   3149  1.1  mrg 
   3150  1.1  mrg /* Return the cost of an addition or a subtraction.  */
   3151  1.1  mrg static inline int
   3152  1.1  mrg addsubcosts (rtx x)
   3153  1.1  mrg {
   3154  1.1  mrg   if (GET_MODE (x) == SImode)
   3155  1.1  mrg     {
   3156  1.1  mrg       /* The addc or subc patterns will eventually become one or two
   3157  1.1  mrg 	 instructions.  Below are some costs for some of the patterns
   3158  1.1  mrg 	 which combine would reject because the costs of the individual
   3159  1.1  mrg 	 insns in the patterns are lower.
   3160  1.1  mrg 
   3161  1.1  mrg 	 FIXME: It would be much easier if we had something like insn cost
   3162  1.1  mrg 	 attributes and the cost calculation machinery used those attributes
   3163  1.1  mrg 	 in the first place.  This would eliminate redundant recog-like C
   3164  1.1  mrg 	 code to calculate costs of complex patterns.  */
   3165  1.1  mrg       rtx op0 = XEXP (x, 0);
   3166  1.1  mrg       rtx op1 = XEXP (x, 1);
   3167  1.1  mrg 
   3168  1.1  mrg       if (GET_CODE (x) == PLUS)
   3169  1.1  mrg 	{
   3170  1.1  mrg 	  if (GET_CODE (op0) == AND
   3171  1.1  mrg 	      && XEXP (op0, 1) == const1_rtx
   3172  1.1  mrg 	      && (GET_CODE (op1) == PLUS
   3173  1.1  mrg 		  || (GET_CODE (op1) == MULT && XEXP (op1, 1) == const2_rtx)))
   3174  1.1  mrg 	    return 1;
   3175  1.1  mrg 
   3176  1.1  mrg 	  if (GET_CODE (op0) == MULT && XEXP (op0, 1) == const2_rtx
   3177  1.1  mrg 	      && GET_CODE (op1) == LSHIFTRT
   3178  1.1  mrg 	      && CONST_INT_P (XEXP (op1, 1)) && INTVAL (XEXP (op1, 1)) == 31)
   3179  1.1  mrg 	    return 1;
   3180  1.1  mrg 	}
   3181  1.1  mrg       /* Let's assume that adding the result of an insns that stores into
   3182  1.1  mrg 	 the T bit is cheap.  */
   3183  1.1  mrg       if (treg_set_expr (op1, SImode))
   3184  1.1  mrg 	return 1;
   3185  1.1  mrg       if (treg_set_expr (op0, SImode))
   3186  1.1  mrg 	return 1;
   3187  1.1  mrg     }
   3188  1.1  mrg 
   3189  1.1  mrg   /* On SH1-4 we have only max. SImode operations.
   3190  1.1  mrg      Double the cost for modes > SImode.  */
   3191  1.1  mrg   const int cost_scale = GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD ? 2 : 1;
   3192  1.1  mrg 
   3193  1.1  mrg   /* Adding a register is a single cycle insn.  */
   3194  1.1  mrg   if (REG_P (XEXP (x, 1))
   3195  1.1  mrg       || GET_CODE (XEXP (x, 1)) == SUBREG)
   3196  1.1  mrg     return 1 * cost_scale;
   3197  1.1  mrg 
   3198  1.1  mrg   /* Likewise for small constants.  */
   3199  1.1  mrg   if (CONST_INT_P (XEXP (x, 1))
   3200  1.1  mrg       && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
   3201  1.1  mrg     return 1 * cost_scale;
   3202  1.1  mrg 
   3203  1.1  mrg   /* Any other constant requires a 2 cycle pc-relative load plus an
   3204  1.1  mrg      addition.  */
   3205  1.1  mrg   return 3 * cost_scale;
   3206  1.1  mrg }
   3207  1.1  mrg 
   3208  1.1  mrg /* Return the cost of a multiply.  */
   3209  1.1  mrg static inline int
   3210  1.1  mrg multcosts (rtx x ATTRIBUTE_UNUSED)
   3211  1.1  mrg {
   3212  1.1  mrg   if (sh_multcost >= 0)
   3213  1.1  mrg     return sh_multcost;
   3214  1.1  mrg 
   3215  1.1  mrg   if (TARGET_SH2)
   3216  1.1  mrg     {
   3217  1.1  mrg       /* We have a mul insn, so we can never take more than the mul and the
   3218  1.1  mrg 	 read of the mac reg, but count more because of the latency and extra
   3219  1.1  mrg 	 reg usage.  */
   3220  1.1  mrg       if (optimize_size)
   3221  1.1  mrg 	return 2;
   3222  1.1  mrg       return 3;
   3223  1.1  mrg     }
   3224  1.1  mrg 
   3225  1.1  mrg   /* If we're aiming at small code, then just count the number of
   3226  1.1  mrg      insns in a multiply call sequence.  */
   3227  1.1  mrg   if (optimize_size)
   3228  1.1  mrg     return 5;
   3229  1.1  mrg 
   3230  1.1  mrg   /* Otherwise count all the insns in the routine we'd be calling too.  */
   3231  1.1  mrg   return 20;
   3232  1.1  mrg }
   3233  1.1  mrg 
   3234  1.1  mrg /* Compute a (partial) cost for rtx X.  Return true if the complete
   3235  1.1  mrg    cost has been computed, and false if subexpressions should be
   3236  1.1  mrg    scanned.  In either case, *TOTAL contains the cost result.  */
   3237  1.1  mrg static bool
   3238  1.1  mrg sh_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
   3239  1.1  mrg 	      int opno ATTRIBUTE_UNUSED,
   3240  1.1  mrg 	      int *total, bool speed ATTRIBUTE_UNUSED)
   3241  1.1  mrg {
   3242  1.1  mrg   int code = GET_CODE (x);
   3243  1.1  mrg 
   3244  1.1  mrg   switch (code)
   3245  1.1  mrg     {
   3246  1.1  mrg       /* The lower-subreg pass decides whether to split multi-word regs
   3247  1.1  mrg 	 into individual regs by looking at the cost for a SET of certain
   3248  1.1  mrg 	 modes with the following patterns:
   3249  1.1  mrg 	   (set (reg) (reg))
   3250  1.1  mrg 	   (set (reg) (const_int 0))
   3251  1.1  mrg 	 On machines that support vector-move operations a multi-word move
   3252  1.1  mrg 	 is the same cost as individual reg move.  On SH there is no
   3253  1.1  mrg 	 vector-move, so we have to provide the correct cost in the number
   3254  1.1  mrg 	 of move insns to load/store the reg of the mode in question.  */
   3255  1.1  mrg     case SET:
   3256  1.1  mrg       if (sh_movt_set_dest (x) != NULL || sh_movrt_set_dest (x) != NULL)
   3257  1.1  mrg 	{
   3258  1.1  mrg 	  *total = COSTS_N_INSNS (1);
   3259  1.1  mrg 	  return true;
   3260  1.1  mrg 	}
   3261  1.1  mrg 
   3262  1.1  mrg       if (register_operand (SET_DEST (x), VOIDmode)
   3263  1.1  mrg 	    && (register_operand (SET_SRC (x), VOIDmode)
   3264  1.1  mrg 		|| satisfies_constraint_Z (SET_SRC (x))))
   3265  1.1  mrg 	{
   3266  1.1  mrg 	  const machine_mode mode = GET_MODE (SET_DEST (x));
   3267  1.1  mrg 	  *total = COSTS_N_INSNS (GET_MODE_SIZE (mode)
   3268  1.1  mrg 				  / mov_insn_size (mode, TARGET_SH2A));
   3269  1.1  mrg 	  return true;
   3270  1.1  mrg         }
   3271  1.1  mrg       return false;
   3272  1.1  mrg 
   3273  1.1  mrg     /* The cost of a mem access is mainly the cost of the address mode.  */
   3274  1.1  mrg     case MEM:
   3275  1.1  mrg       *total = sh_address_cost (XEXP (x, 0), GET_MODE (x), MEM_ADDR_SPACE (x),
   3276  1.1  mrg 				true);
   3277  1.1  mrg       return true;
   3278  1.1  mrg 
   3279  1.1  mrg     case IF_THEN_ELSE:
   3280  1.1  mrg       /* This case is required for the if_then_else negc pattern.  */
   3281  1.1  mrg       if (treg_set_expr (XEXP (x, 0), SImode))
   3282  1.1  mrg 	{
   3283  1.1  mrg 	  *total = COSTS_N_INSNS (1);
   3284  1.1  mrg 	  return true;
   3285  1.1  mrg 	}
   3286  1.1  mrg       else
   3287  1.1  mrg 	return false;
   3288  1.1  mrg 
   3289  1.1  mrg     /* Zero extracts of single bits are usually combine patterns for the
   3290  1.1  mrg        tst insns.  */
   3291  1.1  mrg     case ZERO_EXTRACT:
   3292  1.1  mrg       if (GET_CODE (XEXP (x, 0)) == XOR
   3293  1.1  mrg 	  && arith_reg_operand (XEXP (XEXP (x, 0), 0), VOIDmode)
   3294  1.1  mrg 	  && XEXP (x, 1) == const1_rtx
   3295  1.1  mrg 	  && CONST_INT_P (XEXP (x, 2))
   3296  1.1  mrg 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
   3297  1.1  mrg 	  /* Check that the xor constaint overlaps with the extracted bit.  */
   3298  1.1  mrg 	  && (INTVAL (XEXP (XEXP (x, 0), 1)) & (1LL << INTVAL (XEXP (x, 2)))))
   3299  1.1  mrg 	{
   3300  1.1  mrg 	  *total = 1; //COSTS_N_INSNS (1);
   3301  1.1  mrg 	  return true;
   3302  1.1  mrg 	}
   3303  1.1  mrg 
   3304  1.1  mrg       /* div0s variant.  */
   3305  1.1  mrg       if (GET_CODE (XEXP (x, 0)) == XOR
   3306  1.1  mrg 	  && GET_CODE (XEXP (XEXP (x, 0), 0)) == XOR
   3307  1.1  mrg 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
   3308  1.1  mrg 	{
   3309  1.1  mrg 	  *total = 1;
   3310  1.1  mrg 	  return true;
   3311  1.1  mrg 	}
   3312  1.1  mrg       return false;
   3313  1.1  mrg 
   3314  1.1  mrg     /* The cost of a sign or zero extend depends on whether the source is a
   3315  1.1  mrg        reg or a mem.  In case of a mem take the address into account.  */
   3316  1.1  mrg     case SIGN_EXTEND:
   3317  1.1  mrg       if (arith_reg_operand (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
   3318  1.1  mrg 	{
   3319  1.1  mrg 	  *total = COSTS_N_INSNS (1);
   3320  1.1  mrg 	  return true;
   3321  1.1  mrg 	}
   3322  1.1  mrg       if (MEM_P (XEXP (x, 0)))
   3323  1.1  mrg 	{
   3324  1.1  mrg 	  *total = sh_address_cost (XEXP (XEXP (x, 0), 0),
   3325  1.1  mrg 				    GET_MODE (XEXP (x, 0)),
   3326  1.1  mrg 				    MEM_ADDR_SPACE (XEXP (x, 0)), true);
   3327  1.1  mrg 	  return true;
   3328  1.1  mrg 	}
   3329  1.1  mrg       return false;
   3330  1.1  mrg 
   3331  1.1  mrg     case ZERO_EXTEND:
   3332  1.1  mrg       if (arith_reg_operand (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
   3333  1.1  mrg 	{
   3334  1.1  mrg 	  *total = COSTS_N_INSNS (1);
   3335  1.1  mrg 	  return true;
   3336  1.1  mrg 	}
   3337  1.1  mrg       else if (TARGET_SH2A && MEM_P (XEXP (x, 0))
   3338  1.1  mrg 	       && (GET_MODE (XEXP (x, 0)) == QImode
   3339  1.1  mrg 		   || GET_MODE (XEXP (x, 0)) == HImode))
   3340  1.1  mrg 	{
   3341  1.1  mrg 	  /* Handle SH2A's movu.b and movu.w insn.  */
   3342  1.1  mrg 	  *total = sh_address_cost (XEXP (XEXP (x, 0), 0),
   3343  1.1  mrg 				    GET_MODE (XEXP (x, 0)),
   3344  1.1  mrg 				    MEM_ADDR_SPACE (XEXP (x, 0)), true);
   3345  1.1  mrg 	  return true;
   3346  1.1  mrg 	}
   3347  1.1  mrg       return false;
   3348  1.1  mrg 
   3349  1.1  mrg     /* mems for SFmode and DFmode can be inside a parallel due to
   3350  1.1  mrg        the way the fpscr is handled.  */
   3351  1.1  mrg     case PARALLEL:
   3352  1.1  mrg       for (int i = 0; i < XVECLEN (x, 0); i++)
   3353  1.1  mrg 	{
   3354  1.1  mrg 	  rtx xx = XVECEXP (x, 0, i);
   3355  1.1  mrg 	  if (GET_CODE (xx) == SET && MEM_P (XEXP (xx, 0)))
   3356  1.1  mrg 	    {
   3357  1.1  mrg 	      *total = sh_address_cost (XEXP (XEXP (xx, 0), 0),
   3358  1.1  mrg 					GET_MODE (XEXP (xx, 0)),
   3359  1.1  mrg 					MEM_ADDR_SPACE (XEXP (xx, 0)), true);
   3360  1.1  mrg 	      return true;
   3361  1.1  mrg 	    }
   3362  1.1  mrg 	  if (GET_CODE (xx) == SET && MEM_P (XEXP (xx, 1)))
   3363  1.1  mrg 	    {
   3364  1.1  mrg 	      *total = sh_address_cost (XEXP (XEXP (xx, 1), 0),
   3365  1.1  mrg 					GET_MODE (XEXP (xx, 1)),
   3366  1.1  mrg 					MEM_ADDR_SPACE (XEXP (xx, 1)), true);
   3367  1.1  mrg 	      return true;
   3368  1.1  mrg 	    }
   3369  1.1  mrg 	}
   3370  1.1  mrg 
   3371  1.1  mrg       if (sh_1el_vec (x, VOIDmode))
   3372  1.1  mrg 	*total = outer_code != SET;
   3373  1.1  mrg       else if (sh_rep_vec (x, VOIDmode))
   3374  1.1  mrg 	*total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
   3375  1.1  mrg 		  + (outer_code != SET));
   3376  1.1  mrg       else
   3377  1.1  mrg 	*total = COSTS_N_INSNS (3) + (outer_code != SET);
   3378  1.1  mrg       return true;
   3379  1.1  mrg 
   3380  1.1  mrg     case CONST_INT:
   3381  1.1  mrg       if (CONST_OK_FOR_I08 (INTVAL (x)))
   3382  1.1  mrg         *total = 0;
   3383  1.1  mrg       else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
   3384  1.1  mrg 	       && CONST_OK_FOR_K08 (INTVAL (x)))
   3385  1.1  mrg         *total = 1;
   3386  1.1  mrg       /* prepare_cmp_insn will force costly constants int registers before
   3387  1.1  mrg 	 the cbranch[sd]i4 patterns can see them, so preserve potentially
   3388  1.1  mrg 	 interesting ones not covered by I08 above.  */
   3389  1.1  mrg       else if (outer_code == COMPARE
   3390  1.1  mrg 	       && ((unsigned HOST_WIDE_INT) INTVAL (x)
   3391  1.1  mrg 		    == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
   3392  1.1  mrg 		    || INTVAL (x) == 0x7fffffff
   3393  1.1  mrg 		   || INTVAL (x) == 0x80 || INTVAL (x) == -0x81))
   3394  1.1  mrg         *total = 1;
   3395  1.1  mrg       else
   3396  1.1  mrg         *total = 8;
   3397  1.1  mrg       return true;
   3398  1.1  mrg 
   3399  1.1  mrg     case EQ:
   3400  1.1  mrg       /* An and with a constant compared against zero is
   3401  1.1  mrg 	 most likely going to be a TST #imm, R0 instruction.  */
   3402  1.1  mrg       if (XEXP (x, 1) == const0_rtx
   3403  1.1  mrg           && ((GET_CODE (XEXP (x, 0)) == AND
   3404  1.1  mrg                || (SUBREG_P (XEXP (x, 0))
   3405  1.1  mrg 		   && GET_CODE (SUBREG_REG (XEXP (x, 0))) == AND))
   3406  1.1  mrg 	      || GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT))
   3407  1.1  mrg 	{
   3408  1.1  mrg 	  *total = 1;
   3409  1.1  mrg 	  return true;
   3410  1.1  mrg 	}
   3411  1.1  mrg 
   3412  1.1  mrg       else if (XEXP (x, 1) == const0_rtx
   3413  1.1  mrg 	       && GET_CODE (XEXP (x, 0)) == AND
   3414  1.1  mrg 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
   3415  1.1  mrg 	       && GET_CODE (XEXP (XEXP (x, 0), 0)) == ASHIFT
   3416  1.1  mrg 	       && arith_reg_operand (XEXP (XEXP (XEXP (x, 0), 0), 0), SImode)
   3417  1.1  mrg 	       && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1)))
   3418  1.1  mrg 	{
   3419  1.1  mrg 	  *total = 1;
   3420  1.1  mrg 	  return true;
   3421  1.1  mrg 	}
   3422  1.1  mrg       else
   3423  1.1  mrg 	return false;
   3424  1.1  mrg 
   3425  1.1  mrg     case SMIN:
   3426  1.1  mrg     case SMAX:
   3427  1.1  mrg       /* This is most likely a clips.b or clips.w insn that is being made up
   3428  1.1  mrg 	 by combine.  */
   3429  1.1  mrg       if (TARGET_SH2A
   3430  1.1  mrg 	  && (GET_CODE (XEXP (x, 0)) == SMAX || GET_CODE (XEXP (x, 0)) == SMIN)
   3431  1.1  mrg 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
   3432  1.1  mrg 	  && REG_P (XEXP (XEXP (x, 0), 0))
   3433  1.1  mrg 	  && CONST_INT_P (XEXP (x, 1)))
   3434  1.1  mrg 	{
   3435  1.1  mrg 	  *total = COSTS_N_INSNS (1);
   3436  1.1  mrg 	  return true;
   3437  1.1  mrg 	}
   3438  1.1  mrg       else
   3439  1.1  mrg 	return false;
   3440  1.1  mrg 
   3441  1.1  mrg     case CONST:
   3442  1.1  mrg     case LABEL_REF:
   3443  1.1  mrg     case SYMBOL_REF:
   3444  1.1  mrg       *total = 5;
   3445  1.1  mrg       return true;
   3446  1.1  mrg 
   3447  1.1  mrg     case CONST_DOUBLE:
   3448  1.1  mrg       /* prepare_cmp_insn will force costly constants int registers before
   3449  1.1  mrg 	 the cbranchdi4 pattern can see them, so preserve potentially
   3450  1.1  mrg 	 interesting ones.  */
   3451  1.1  mrg       if (outer_code == COMPARE && GET_MODE (x) == DImode)
   3452  1.1  mrg 	*total = 1;
   3453  1.1  mrg       else
   3454  1.1  mrg 	*total = 10;
   3455  1.1  mrg       return true;
   3456  1.1  mrg 
   3457  1.1  mrg     case CONST_VECTOR:
   3458  1.1  mrg     /* FIXME: This looks broken.  Only the last statement has any effect.
   3459  1.1  mrg        Probably this could be folded with the PARALLEL case?  */
   3460  1.1  mrg       if (x == CONST0_RTX (GET_MODE (x)))
   3461  1.1  mrg 	*total = 0;
   3462  1.1  mrg       else if (sh_1el_vec (x, VOIDmode))
   3463  1.1  mrg 	*total = outer_code != SET;
   3464  1.1  mrg       if (sh_rep_vec (x, VOIDmode))
   3465  1.1  mrg 	*total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
   3466  1.1  mrg 		  + (outer_code != SET));
   3467  1.1  mrg       *total = COSTS_N_INSNS (3) + (outer_code != SET);
   3468  1.1  mrg       return true;
   3469  1.1  mrg 
   3470  1.1  mrg     case PLUS:
   3471  1.1  mrg     case MINUS:
   3472  1.1  mrg       *total = COSTS_N_INSNS (addsubcosts (x));
   3473  1.1  mrg       return true;
   3474  1.1  mrg 
   3475  1.1  mrg     case AND:
   3476  1.1  mrg       /* Check for (and (not (reg)) (const_int 1)) which is a tst insn.  */
   3477  1.1  mrg       if (GET_CODE (XEXP (x, 0)) == NOT && XEXP (x, 1) == const1_rtx)
   3478  1.1  mrg 	{
   3479  1.1  mrg 	  *total = COSTS_N_INSNS (1);
   3480  1.1  mrg 	  return true;
   3481  1.1  mrg 	}
   3482  1.1  mrg       /* Fall through.  */
   3483  1.1  mrg 
   3484  1.1  mrg     case XOR:
   3485  1.1  mrg     case IOR:
   3486  1.1  mrg       *total = COSTS_N_INSNS (and_xor_ior_costs (x, code));
   3487  1.1  mrg       return true;
   3488  1.1  mrg 
   3489  1.1  mrg     case MULT:
   3490  1.1  mrg       *total = COSTS_N_INSNS (multcosts (x));
   3491  1.1  mrg       return true;
   3492  1.1  mrg 
   3493  1.1  mrg     case LT:
   3494  1.1  mrg     case GE:
   3495  1.1  mrg       /* div0s sign comparison.  */
   3496  1.1  mrg       if (GET_CODE (XEXP (x, 0)) == XOR
   3497  1.1  mrg 	  && REG_P ((XEXP (XEXP (x, 0), 0)))
   3498  1.1  mrg 	  && REG_P ((XEXP (XEXP (x, 0), 1)))
   3499  1.1  mrg 	  && satisfies_constraint_Z (XEXP (x, 1)))
   3500  1.1  mrg 	{
   3501  1.1  mrg 	  *total = COSTS_N_INSNS (1);
   3502  1.1  mrg 	  return true;
   3503  1.1  mrg 	}
   3504  1.1  mrg       else
   3505  1.1  mrg 	return false;
   3506  1.1  mrg 
   3507  1.1  mrg     case LSHIFTRT:
   3508  1.1  mrg       /* div0s sign comparison.  */
   3509  1.1  mrg       if (GET_CODE (XEXP (x, 0)) == XOR
   3510  1.1  mrg 	  && REG_P ((XEXP (XEXP (x, 0), 0)))
   3511  1.1  mrg 	  && REG_P ((XEXP (XEXP (x, 0), 1)))
   3512  1.1  mrg 	  && CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == 31)
   3513  1.1  mrg 	{
   3514  1.1  mrg 	  *total = COSTS_N_INSNS (1);
   3515  1.1  mrg 	  return true;
   3516  1.1  mrg 	}
   3517  1.1  mrg       /* FALLTHRU */
   3518  1.1  mrg     case ASHIFT:
   3519  1.1  mrg     case ASHIFTRT:
   3520  1.1  mrg       {
   3521  1.1  mrg 	int cost = shiftcosts (x);
   3522  1.1  mrg 	if (cost < 0)
   3523  1.1  mrg 	  return false;
   3524  1.1  mrg 	*total = COSTS_N_INSNS (cost);
   3525  1.1  mrg 	return true;
   3526  1.1  mrg       }
   3527  1.1  mrg 
   3528  1.1  mrg     case DIV:
   3529  1.1  mrg     case UDIV:
   3530  1.1  mrg     case MOD:
   3531  1.1  mrg     case UMOD:
   3532  1.1  mrg       *total = COSTS_N_INSNS (20);
   3533  1.1  mrg       return true;
   3534  1.1  mrg 
   3535  1.1  mrg     case FLOAT:
   3536  1.1  mrg     case FIX:
   3537  1.1  mrg       *total = 100;
   3538  1.1  mrg       return true;
   3539  1.1  mrg 
   3540  1.1  mrg     default:
   3541  1.1  mrg       return false;
   3542  1.1  mrg     }
   3543  1.1  mrg }
   3544  1.1  mrg 
   3545  1.1  mrg /* Determine the size of the fundamental move insn that will be used
   3546  1.1  mrg    for the specified mode.  */
   3547  1.1  mrg static inline int
   3548  1.1  mrg mov_insn_size (machine_mode mode, bool consider_sh2a)
   3549  1.1  mrg {
   3550  1.1  mrg   const int mode_sz = GET_MODE_SIZE (mode);
   3551  1.1  mrg 
   3552  1.1  mrg   if ((consider_sh2a && TARGET_SH2A_DOUBLE && mode == DFmode)
   3553  1.1  mrg       || (TARGET_FMOVD && mode == DFmode))
   3554  1.1  mrg     return mode_sz;
   3555  1.1  mrg   else
   3556  1.1  mrg     {
   3557  1.1  mrg       /* The max. available mode for actual move insns is SImode.
   3558  1.1  mrg 	 Larger accesses will be split into multiple loads/stores.  */
   3559  1.1  mrg       const int max_mov_sz = GET_MODE_SIZE (SImode);
   3560  1.1  mrg       return mode_sz >= max_mov_sz ? max_mov_sz : mode_sz;
   3561  1.1  mrg     }
   3562  1.1  mrg }
   3563  1.1  mrg 
   3564  1.1  mrg /* Determine the maximum possible displacement for a move insn for the
   3565  1.1  mrg    specified mode.  */
   3566  1.1  mrg int
   3567  1.1  mrg sh_max_mov_insn_displacement (machine_mode mode, bool consider_sh2a)
   3568  1.1  mrg {
   3569  1.1  mrg   /* The 4 byte displacement move insns are the same as the 2 byte
   3570  1.1  mrg      versions but take a 12 bit displacement.  All we need to do is to
   3571  1.1  mrg      scale the max. displacement value accordingly.  */
   3572  1.1  mrg   const int disp_scale = consider_sh2a ? (4095 / 15) : 1;
   3573  1.1  mrg 
   3574  1.1  mrg   /* SH2A supports FPU move insns with 12 bit displacements.
   3575  1.1  mrg      Other variants to do not support any kind of displacements for
   3576  1.1  mrg      FPU move insns.  */
   3577  1.1  mrg   if (! consider_sh2a && TARGET_FPU_ANY && GET_MODE_CLASS (mode) == MODE_FLOAT)
   3578  1.1  mrg     return 0;
   3579  1.1  mrg   else
   3580  1.1  mrg     {
   3581  1.1  mrg       const int mov_insn_sz = mov_insn_size (mode, consider_sh2a);
   3582  1.1  mrg       const int mode_sz = GET_MODE_SIZE (mode);
   3583  1.1  mrg       int r = 15 * mov_insn_sz * disp_scale;
   3584  1.1  mrg 
   3585  1.1  mrg       /* If the mov insn will be split into multiple loads/stores, the
   3586  1.1  mrg 	 maximum possible displacement is a bit smaller.  */
   3587  1.1  mrg       if (mode_sz > mov_insn_sz)
   3588  1.1  mrg 	r -= mode_sz - mov_insn_sz;
   3589  1.1  mrg       return r;
   3590  1.1  mrg     }
   3591  1.1  mrg }
   3592  1.1  mrg 
   3593  1.1  mrg /* Determine the alignment mask for a move insn of the
   3594  1.1  mrg    specified mode.  */
   3595  1.1  mrg static inline int
   3596  1.1  mrg mov_insn_alignment_mask (machine_mode mode, bool consider_sh2a)
   3597  1.1  mrg {
   3598  1.1  mrg   const int mov_insn_sz = mov_insn_size (mode, consider_sh2a);
   3599  1.1  mrg   return mov_insn_sz > 0 ? (mov_insn_sz - 1) : 0;
   3600  1.1  mrg }
   3601  1.1  mrg 
   3602  1.1  mrg /* Return the displacement value of a displacement address.  */
   3603  1.1  mrg HOST_WIDE_INT
   3604  1.1  mrg sh_disp_addr_displacement (rtx x)
   3605  1.1  mrg {
   3606  1.1  mrg   gcc_assert (satisfies_constraint_Sdd (x));
   3607  1.1  mrg   return INTVAL (XEXP (XEXP (x, 0), 1));
   3608  1.1  mrg }
   3609  1.1  mrg 
   3610  1.1  mrg /* Compute the cost of an address.  */
   3611  1.1  mrg static int
   3612  1.1  mrg sh_address_cost (rtx x, machine_mode mode,
   3613  1.1  mrg 		 addr_space_t as ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED)
   3614  1.1  mrg {
   3615  1.1  mrg   /* 'GBR + 0'.  Account one more because of R0 restriction.  */
   3616  1.1  mrg   if (REG_P (x) && REGNO (x) == GBR_REG)
   3617  1.1  mrg     return 2;
   3618  1.1  mrg 
   3619  1.1  mrg   /* Simple reg, post-inc, pre-dec addressing.  */
   3620  1.1  mrg   if (REG_P (x) || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
   3621  1.1  mrg     return 1;
   3622  1.1  mrg 
   3623  1.1  mrg   /* 'reg + disp' addressing.  */
   3624  1.1  mrg   if (GET_CODE (x) == PLUS
   3625  1.1  mrg       && REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
   3626  1.1  mrg     {
   3627  1.1  mrg       /* 'GBR + disp'.  Account one more because of R0 restriction.  */
   3628  1.1  mrg       if (REGNO (XEXP (x, 0)) == GBR_REG
   3629  1.1  mrg 	  && gbr_displacement (XEXP (x, 1), mode))
   3630  1.1  mrg 	return 2;
   3631  1.1  mrg 
   3632  1.1  mrg       const HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
   3633  1.1  mrg 
   3634  1.1  mrg       if (offset == 0)
   3635  1.1  mrg 	return 1;
   3636  1.1  mrg 
   3637  1.1  mrg       /* The displacement would fit into a 2 byte move insn.
   3638  1.1  mrg 	 HImode and QImode loads/stores with displacement put pressure on
   3639  1.1  mrg 	 R0 which will most likely require another reg copy.  Thus account
   3640  1.1  mrg 	 a higher cost for that.  */
   3641  1.1  mrg       if (offset > 0 && offset <= sh_max_mov_insn_displacement (mode, false))
   3642  1.1  mrg 	return (mode == HImode || mode == QImode) ? 2 : 1;
   3643  1.1  mrg 
   3644  1.1  mrg       /* The displacement would fit into a 4 byte move insn (SH2A).  */
   3645  1.1  mrg       if (TARGET_SH2A
   3646  1.1  mrg 	  && offset > 0 && offset <= sh_max_mov_insn_displacement (mode, true))
   3647  1.1  mrg 	return 2;
   3648  1.1  mrg 
   3649  1.1  mrg       /* The displacement is probably out of range and will require extra
   3650  1.1  mrg 	 calculations.  */
   3651  1.1  mrg       return 3;
   3652  1.1  mrg     }
   3653  1.1  mrg 
   3654  1.1  mrg   /* 'reg + reg' addressing.  Account a slightly higher cost because of
   3655  1.1  mrg      increased pressure on R0.  */
   3656  1.1  mrg   if (GET_CODE (x) == PLUS && ! CONSTANT_P (XEXP (x, 1)))
   3657  1.1  mrg     return 3;
   3658  1.1  mrg 
   3659  1.1  mrg   /* Not sure what it is - probably expensive.  */
   3660  1.1  mrg   return 10;
   3661  1.1  mrg }
   3662  1.1  mrg 
   3663  1.1  mrg /* Code to expand a shift.  */
   3664  1.1  mrg static void
   3665  1.1  mrg gen_ashift (int type, int n, rtx reg)
   3666  1.1  mrg {
   3667  1.1  mrg   rtx n_rtx;
   3668  1.1  mrg 
   3669  1.1  mrg   /* Negative values here come from the shift_amounts array.  */
   3670  1.1  mrg   if (n < 0)
   3671  1.1  mrg     {
   3672  1.1  mrg       if (type == ASHIFT)
   3673  1.1  mrg 	type = LSHIFTRT;
   3674  1.1  mrg       else
   3675  1.1  mrg 	type = ASHIFT;
   3676  1.1  mrg       n = -n;
   3677  1.1  mrg     }
   3678  1.1  mrg 
   3679  1.1  mrg   n_rtx = GEN_INT (n);
   3680  1.1  mrg   gcc_assert (satisfies_constraint_P27 (n_rtx));
   3681  1.1  mrg 
   3682  1.1  mrg   switch (type)
   3683  1.1  mrg     {
   3684  1.1  mrg     case ASHIFTRT:
   3685  1.1  mrg       emit_insn (gen_ashrsi3_k (reg, reg, n_rtx));
   3686  1.1  mrg       break;
   3687  1.1  mrg     case LSHIFTRT:
   3688  1.1  mrg       if (n == 1)
   3689  1.1  mrg 	emit_insn (gen_shlr (reg, reg));
   3690  1.1  mrg       else
   3691  1.1  mrg 	emit_insn (gen_lshrsi3_k (reg, reg, n_rtx));
   3692  1.1  mrg       break;
   3693  1.1  mrg     case ASHIFT:
   3694  1.1  mrg       emit_insn (gen_ashlsi3_k (reg, reg, n_rtx));
   3695  1.1  mrg       break;
   3696  1.1  mrg     default:
   3697  1.1  mrg       gcc_unreachable ();
   3698  1.1  mrg     }
   3699  1.1  mrg }
   3700  1.1  mrg 
   3701  1.1  mrg /* Code to expand a HImode shift.  */
   3702  1.1  mrg static void
   3703  1.1  mrg gen_ashift_hi (int type, int n, rtx reg)
   3704  1.1  mrg {
   3705  1.1  mrg   /* Negative values here come from the shift_amounts array.  */
   3706  1.1  mrg   if (n < 0)
   3707  1.1  mrg     {
   3708  1.1  mrg       if (type == ASHIFT)
   3709  1.1  mrg 	type = LSHIFTRT;
   3710  1.1  mrg       else
   3711  1.1  mrg 	type = ASHIFT;
   3712  1.1  mrg       n = -n;
   3713  1.1  mrg     }
   3714  1.1  mrg 
   3715  1.1  mrg   switch (type)
   3716  1.1  mrg     {
   3717  1.1  mrg     case ASHIFTRT:
   3718  1.1  mrg     case LSHIFTRT:
   3719  1.1  mrg       /* We don't have HImode right shift operations because using the
   3720  1.1  mrg 	 ordinary 32 bit shift instructions for that doesn't generate proper
   3721  1.1  mrg 	 zero/sign extension.
   3722  1.1  mrg 	 gen_ashift_hi is only called in contexts where we know that the
   3723  1.1  mrg 	 sign extension works out correctly.  */
   3724  1.1  mrg       {
   3725  1.1  mrg 	int offset = 0;
   3726  1.1  mrg 	if (GET_CODE (reg) == SUBREG)
   3727  1.1  mrg 	  {
   3728  1.1  mrg 	    offset = SUBREG_BYTE (reg);
   3729  1.1  mrg 	    reg = SUBREG_REG (reg);
   3730  1.1  mrg 	  }
   3731  1.1  mrg 	gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
   3732  1.1  mrg 	break;
   3733  1.1  mrg       }
   3734  1.1  mrg     case ASHIFT:
   3735  1.1  mrg       emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
   3736  1.1  mrg       break;
   3737  1.1  mrg     }
   3738  1.1  mrg }
   3739  1.1  mrg 
   3740  1.1  mrg /* Output RTL to split a constant shift into its component SH constant
   3741  1.1  mrg    shift instructions.  */
   3742  1.1  mrg void
   3743  1.1  mrg gen_shifty_op (int code, rtx *operands)
   3744  1.1  mrg {
   3745  1.1  mrg   int value = INTVAL (operands[2]);
   3746  1.1  mrg   int max, i;
   3747  1.1  mrg 
   3748  1.1  mrg   /* Truncate the shift count in case it is out of bounds.  */
   3749  1.1  mrg   value = value & 31;
   3750  1.1  mrg 
   3751  1.1  mrg   if (value == 31)
   3752  1.1  mrg     {
   3753  1.1  mrg       if (code == LSHIFTRT)
   3754  1.1  mrg 	{
   3755  1.1  mrg 	  emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
   3756  1.1  mrg 	  emit_insn (gen_movt (operands[0], get_t_reg_rtx ()));
   3757  1.1  mrg 	  return;
   3758  1.1  mrg 	}
   3759  1.1  mrg       else if (code == ASHIFT)
   3760  1.1  mrg 	{
   3761  1.1  mrg 	  /* There is a two instruction sequence for 31 bit left shifts,
   3762  1.1  mrg 	     but it requires r0.  */
   3763  1.1  mrg 	  if (REG_P (operands[0]) && REGNO (operands[0]) == 0)
   3764  1.1  mrg 	    {
   3765  1.1  mrg 	      emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
   3766  1.1  mrg 	      emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
   3767  1.1  mrg 	      return;
   3768  1.1  mrg 	    }
   3769  1.1  mrg 	}
   3770  1.1  mrg     }
   3771  1.1  mrg   else if (value == 0)
   3772  1.1  mrg     {
   3773  1.1  mrg       /* This can happen even when optimizing, if there were subregs before
   3774  1.1  mrg 	 reload.  Don't output a nop here, as this is never optimized away;
   3775  1.1  mrg 	 use a no-op move instead.  */
   3776  1.1  mrg       emit_insn (gen_rtx_SET (operands[0], operands[0]));
   3777  1.1  mrg       return;
   3778  1.1  mrg     }
   3779  1.1  mrg 
   3780  1.1  mrg   max = ashl_lshr_seq[value].insn_count;
   3781  1.1  mrg   for (i = 0; i < max; i++)
   3782  1.1  mrg     gen_ashift (code, ashl_lshr_seq[value].amount[i], operands[0]);
   3783  1.1  mrg }
   3784  1.1  mrg 
   3785  1.1  mrg /* Same as gen_shifty_op, but optimized for values where the topmost bits
   3786  1.1  mrg    don't matter.  */
   3787  1.1  mrg void
   3788  1.1  mrg gen_shifty_hi_op (int code, rtx *operands)
   3789  1.1  mrg {
   3790  1.1  mrg   int value = INTVAL (operands[2]);
   3791  1.1  mrg   int max, i;
   3792  1.1  mrg   void (*gen_fun) (int, int, rtx);
   3793  1.1  mrg 
   3794  1.1  mrg   /* This operation is used by and_shl for SImode values with a few
   3795  1.1  mrg      high bits known to be cleared.  */
   3796  1.1  mrg   value &= 31;
   3797  1.1  mrg   if (value == 0)
   3798  1.1  mrg     {
   3799  1.1  mrg       emit_insn (gen_nop ());
   3800  1.1  mrg       return;
   3801  1.1  mrg     }
   3802  1.1  mrg 
   3803  1.1  mrg   gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
   3804  1.1  mrg   if (code == ASHIFT)
   3805  1.1  mrg     {
   3806  1.1  mrg       max = ext_ashl_lshr_seq[value].insn_count;
   3807  1.1  mrg       for (i = 0; i < max; i++)
   3808  1.1  mrg 	gen_fun (code, ext_ashl_lshr_seq[value].amount[i], operands[0]);
   3809  1.1  mrg     }
   3810  1.1  mrg   else
   3811  1.1  mrg     /* When shifting right, emit the shifts in reverse order, so that
   3812  1.1  mrg        solitary negative values come first.  */
   3813  1.1  mrg     for (i = ext_ashl_lshr_seq[value].insn_count - 1; i >= 0; i--)
   3814  1.1  mrg       gen_fun (code, ext_ashl_lshr_seq[value].amount[i], operands[0]);
   3815  1.1  mrg }
   3816  1.1  mrg 
   3817  1.1  mrg /* Output RTL for an arithmetic right shift.
   3818  1.1  mrg    ??? Rewrite to use super-optimizer sequences.  */
   3819  1.1  mrg bool
   3820  1.1  mrg expand_ashiftrt (rtx *operands)
   3821  1.1  mrg {
   3822  1.1  mrg   rtx wrk;
   3823  1.1  mrg   char func[18];
   3824  1.1  mrg   int value;
   3825  1.1  mrg 
   3826  1.1  mrg   if (TARGET_DYNSHIFT)
   3827  1.1  mrg     {
   3828  1.1  mrg       if (!CONST_INT_P (operands[2]))
   3829  1.1  mrg 	{
   3830  1.1  mrg 	  rtx count = copy_to_mode_reg (SImode, operands[2]);
   3831  1.1  mrg 	  emit_insn (gen_negsi2 (count, count));
   3832  1.1  mrg 	  emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
   3833  1.1  mrg 	  return true;
   3834  1.1  mrg 	}
   3835  1.1  mrg       else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
   3836  1.1  mrg 	       > 1 + SH_DYNAMIC_SHIFT_COST)
   3837  1.1  mrg 	{
   3838  1.1  mrg 	  rtx count
   3839  1.1  mrg 	    = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
   3840  1.1  mrg 	  emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
   3841  1.1  mrg 	  return true;
   3842  1.1  mrg 	}
   3843  1.1  mrg     }
   3844  1.1  mrg   if (!CONST_INT_P (operands[2]))
   3845  1.1  mrg     return false;
   3846  1.1  mrg 
   3847  1.1  mrg   value = INTVAL (operands[2]) & 31;
   3848  1.1  mrg 
   3849  1.1  mrg   if (value == 31)
   3850  1.1  mrg     {
   3851  1.1  mrg       /* If we are called from abs expansion, arrange things so that we
   3852  1.1  mrg 	 we can use a single MT instruction that doesn't clobber the source,
   3853  1.1  mrg 	 if LICM can hoist out the load of the constant zero.  */
   3854  1.1  mrg       if (currently_expanding_to_rtl)
   3855  1.1  mrg 	{
   3856  1.1  mrg 	  emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
   3857  1.1  mrg 				    operands[1]));
   3858  1.1  mrg 	  emit_insn (gen_mov_neg_si_t (operands[0], get_t_reg_rtx ()));
   3859  1.1  mrg 	  return true;
   3860  1.1  mrg 	}
   3861  1.1  mrg       emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
   3862  1.1  mrg       return true;
   3863  1.1  mrg     }
   3864  1.1  mrg   else if (value >= 16 && value <= 19)
   3865  1.1  mrg     {
   3866  1.1  mrg       wrk = gen_reg_rtx (SImode);
   3867  1.1  mrg       emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
   3868  1.1  mrg       value -= 16;
   3869  1.1  mrg       while (value--)
   3870  1.1  mrg 	gen_ashift (ASHIFTRT, 1, wrk);
   3871  1.1  mrg       emit_move_insn (operands[0], wrk);
   3872  1.1  mrg       return true;
   3873  1.1  mrg     }
   3874  1.1  mrg   /* Expand a short sequence inline, longer call a magic routine.  */
   3875  1.1  mrg   else if (value <= 5)
   3876  1.1  mrg     {
   3877  1.1  mrg       wrk = gen_reg_rtx (SImode);
   3878  1.1  mrg       emit_move_insn (wrk, operands[1]);
   3879  1.1  mrg       while (value--)
   3880  1.1  mrg 	gen_ashift (ASHIFTRT, 1, wrk);
   3881  1.1  mrg       emit_move_insn (operands[0], wrk);
   3882  1.1  mrg       return true;
   3883  1.1  mrg     }
   3884  1.1  mrg 
   3885  1.1  mrg   wrk = gen_reg_rtx (Pmode);
   3886  1.1  mrg 
   3887  1.1  mrg   /* Load the value into an arg reg and call a helper.  */
   3888  1.1  mrg   emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
   3889  1.1  mrg   sprintf (func, "__ashiftrt_r4_%d", value);
   3890  1.1  mrg   rtx lab = function_symbol (wrk, func, SFUNC_STATIC).lab;
   3891  1.1  mrg   emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk, lab));
   3892  1.1  mrg   emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
   3893  1.1  mrg   return true;
   3894  1.1  mrg }
   3895  1.1  mrg 
   3896  1.1  mrg /* Try to find a good way to implement the combiner pattern
   3897  1.1  mrg   [(set (match_operand:SI 0 "register_operand" "r")
   3898  1.1  mrg         (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
   3899  1.1  mrg                            (match_operand:SI 2 "const_int_operand" "n"))
   3900  1.1  mrg                 (match_operand:SI 3 "const_int_operand" "n"))) .
   3901  1.1  mrg   LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
   3902  1.1  mrg   return 0 for simple right / left or left/right shift combination.
   3903  1.1  mrg   return 1 for a combination of shifts with zero_extend.
   3904  1.1  mrg   return 2 for a combination of shifts with an AND that needs r0.
   3905  1.1  mrg   return 3 for a combination of shifts with an AND that needs an extra
   3906  1.1  mrg     scratch register, when the three highmost bits of the AND mask are clear.
   3907  1.1  mrg   return 4 for a combination of shifts with an AND that needs an extra
   3908  1.1  mrg     scratch register, when any of the three highmost bits of the AND mask
   3909  1.1  mrg     is set.
   3910  1.1  mrg   If ATTRP is set, store an initial right shift width in ATTRP[0],
   3911  1.1  mrg   and the instruction length in ATTRP[1] .  These values are not valid
   3912  1.1  mrg   when returning 0.
   3913  1.1  mrg   When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
   3914  1.1  mrg   shift_amounts for the last shift value that is to be used before the
   3915  1.1  mrg   sign extend.  */
   3916  1.1  mrg int
   3917  1.1  mrg shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
   3918  1.1  mrg {
   3919  1.1  mrg   unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
   3920  1.1  mrg   int left = INTVAL (left_rtx), right;
   3921  1.1  mrg   int best = 0;
   3922  1.1  mrg   int cost, best_cost = 10000;
   3923  1.1  mrg   int best_right = 0, best_len = 0;
   3924  1.1  mrg   int i;
   3925  1.1  mrg   int can_ext;
   3926  1.1  mrg 
   3927  1.1  mrg   if (left < 0 || left > 31)
   3928  1.1  mrg     return 0;
   3929  1.1  mrg   if (CONST_INT_P (mask_rtx))
   3930  1.1  mrg     mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
   3931  1.1  mrg   else
   3932  1.1  mrg     mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
   3933  1.1  mrg   /* Can this be expressed as a right shift / left shift pair?  */
   3934  1.1  mrg   lsb = ((mask ^ (mask - 1)) >> 1) + 1;
   3935  1.1  mrg   right = exact_log2 (lsb);
   3936  1.1  mrg   mask2 = ~(mask + lsb - 1);
   3937  1.1  mrg   lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
   3938  1.1  mrg   /* mask has no zeroes but trailing zeroes <==> ! mask2 */
   3939  1.1  mrg   if (! mask2)
   3940  1.1  mrg     best_cost = ashl_lshr_seq[right].insn_count
   3941  1.1  mrg 		+ ashl_lshr_seq[right + left].insn_count;
   3942  1.1  mrg   /* mask has no trailing zeroes <==> ! right */
   3943  1.1  mrg   else if (! right && mask2 == ~(lsb2 - 1))
   3944  1.1  mrg     {
   3945  1.1  mrg       int late_right = exact_log2 (lsb2);
   3946  1.1  mrg       best_cost = ashl_lshr_seq[left + late_right].insn_count
   3947  1.1  mrg 		  + ashl_lshr_seq[late_right].insn_count;
   3948  1.1  mrg     }
   3949  1.1  mrg   /* Try to use zero extend.  */
   3950  1.1  mrg   if (mask2 == ~(lsb2 - 1))
   3951  1.1  mrg     {
   3952  1.1  mrg       int width, first;
   3953  1.1  mrg 
   3954  1.1  mrg       for (width = 8; width <= 16; width += 8)
   3955  1.1  mrg 	{
   3956  1.1  mrg 	  /* Can we zero-extend right away?  */
   3957  1.1  mrg 	  if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
   3958  1.1  mrg 	    {
   3959  1.1  mrg 	      cost = 1 + ext_ashl_lshr_seq[right].insn_count
   3960  1.1  mrg 		       + ext_ashl_lshr_seq[left + right].insn_count;
   3961  1.1  mrg 	      if (cost < best_cost)
   3962  1.1  mrg 		{
   3963  1.1  mrg 		  best = 1;
   3964  1.1  mrg 		  best_cost = cost;
   3965  1.1  mrg 		  best_right = right;
   3966  1.1  mrg 		  best_len = cost;
   3967  1.1  mrg 		  if (attrp)
   3968  1.1  mrg 		    attrp[2] = -1;
   3969  1.1  mrg 		}
   3970  1.1  mrg 	      continue;
   3971  1.1  mrg 	    }
   3972  1.1  mrg 	  /* ??? Could try to put zero extend into initial right shift,
   3973  1.1  mrg 	     or even shift a bit left before the right shift.  */
   3974  1.1  mrg 	  /* Determine value of first part of left shift, to get to the
   3975  1.1  mrg 	     zero extend cut-off point.  */
   3976  1.1  mrg 	  first = width - exact_log2 (lsb2) + right;
   3977  1.1  mrg 	  if (first >= 0 && right + left - first >= 0)
   3978  1.1  mrg 	    {
   3979  1.1  mrg 	      cost = ext_ashl_lshr_seq[right].insn_count
   3980  1.1  mrg 		     + ext_ashl_lshr_seq[first].insn_count + 1
   3981  1.1  mrg 		     + ext_ashl_lshr_seq[right + left - first].insn_count;
   3982  1.1  mrg 
   3983  1.1  mrg 	      if (cost < best_cost)
   3984  1.1  mrg 		{
   3985  1.1  mrg 		  best = 1;
   3986  1.1  mrg 		  best_cost = cost;
   3987  1.1  mrg 		  best_right = right;
   3988  1.1  mrg 		  best_len = cost;
   3989  1.1  mrg 		  if (attrp)
   3990  1.1  mrg 		    attrp[2] = first;
   3991  1.1  mrg 		}
   3992  1.1  mrg 	    }
   3993  1.1  mrg 	}
   3994  1.1  mrg     }
   3995  1.1  mrg   /* Try to use r0 AND pattern */
   3996  1.1  mrg   for (i = 0; i <= 2; i++)
   3997  1.1  mrg     {
   3998  1.1  mrg       if (i > right)
   3999  1.1  mrg 	break;
   4000  1.1  mrg       if (! CONST_OK_FOR_K08 (mask >> i))
   4001  1.1  mrg 	continue;
   4002  1.1  mrg       cost = (i != 0) + 2 + ext_ashl_lshr_seq[left + i].insn_count;
   4003  1.1  mrg       if (cost < best_cost)
   4004  1.1  mrg 	{
   4005  1.1  mrg 	  best = 2;
   4006  1.1  mrg 	  best_cost = cost;
   4007  1.1  mrg 	  best_right = i;
   4008  1.1  mrg 	  best_len = cost - 1;
   4009  1.1  mrg 	}
   4010  1.1  mrg     }
   4011  1.1  mrg   /* Try to use a scratch register to hold the AND operand.  */
   4012  1.1  mrg   can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
   4013  1.1  mrg   for (i = 0; i <= 2; i++)
   4014  1.1  mrg     {
   4015  1.1  mrg       if (i > right)
   4016  1.1  mrg 	break;
   4017  1.1  mrg       cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
   4018  1.1  mrg 	     + (can_ext
   4019  1.1  mrg 		? ext_ashl_lshr_seq
   4020  1.1  mrg 		: ashl_lshr_seq)[left + i].insn_count;
   4021  1.1  mrg       if (cost < best_cost)
   4022  1.1  mrg 	{
   4023  1.1  mrg 	  best = 4 - can_ext;
   4024  1.1  mrg 	  best_cost = cost;
   4025  1.1  mrg 	  best_right = i;
   4026  1.1  mrg 	  best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
   4027  1.1  mrg 	}
   4028  1.1  mrg     }
   4029  1.1  mrg 
   4030  1.1  mrg   if (attrp)
   4031  1.1  mrg     {
   4032  1.1  mrg       attrp[0] = best_right;
   4033  1.1  mrg       attrp[1] = best_len;
   4034  1.1  mrg     }
   4035  1.1  mrg   return best;
   4036  1.1  mrg }
   4037  1.1  mrg 
   4038  1.1  mrg /* This is used in length attributes of the unnamed instructions
   4039  1.1  mrg    corresponding to shl_and_kind return values of 1 and 2.  */
   4040  1.1  mrg int
   4041  1.1  mrg shl_and_length (rtx insn)
   4042  1.1  mrg {
   4043  1.1  mrg   rtx set_src, left_rtx, mask_rtx;
   4044  1.1  mrg   int attributes[3];
   4045  1.1  mrg 
   4046  1.1  mrg   set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
   4047  1.1  mrg   left_rtx = XEXP (XEXP (set_src, 0), 1);
   4048  1.1  mrg   mask_rtx = XEXP (set_src, 1);
   4049  1.1  mrg   shl_and_kind (left_rtx, mask_rtx, attributes);
   4050  1.1  mrg   return attributes[1];
   4051  1.1  mrg }
   4052  1.1  mrg 
   4053  1.1  mrg /* This is used in length attribute of the and_shl_scratch instruction.  */
   4054  1.1  mrg int
   4055  1.1  mrg shl_and_scr_length (rtx insn)
   4056  1.1  mrg {
   4057  1.1  mrg   rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
   4058  1.1  mrg   int len = ashl_lshr_seq[INTVAL (XEXP (set_src, 1)) & 31].insn_count;
   4059  1.1  mrg   rtx op = XEXP (set_src, 0);
   4060  1.1  mrg   len += ashl_lshr_seq[INTVAL (XEXP (op, 1)) & 31].insn_count + 1;
   4061  1.1  mrg   op = XEXP (XEXP (op, 0), 0);
   4062  1.1  mrg   return len + ashl_lshr_seq[INTVAL (XEXP (op, 1)) & 31].insn_count;
   4063  1.1  mrg }
   4064  1.1  mrg 
   4065  1.1  mrg /* Generate rtl for instructions for which shl_and_kind advised a particular
   4066  1.1  mrg    method of generating them, i.e. returned zero.  */
   4067  1.1  mrg bool
   4068  1.1  mrg gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
   4069  1.1  mrg {
   4070  1.1  mrg   int attributes[3];
   4071  1.1  mrg   unsigned HOST_WIDE_INT mask;
   4072  1.1  mrg   int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
   4073  1.1  mrg   int right, total_shift;
   4074  1.1  mrg   void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
   4075  1.1  mrg 
   4076  1.1  mrg   right = attributes[0];
   4077  1.1  mrg   total_shift = INTVAL (left_rtx) + right;
   4078  1.1  mrg   mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
   4079  1.1  mrg   switch (kind)
   4080  1.1  mrg     {
   4081  1.1  mrg     default:
   4082  1.1  mrg       return true;
   4083  1.1  mrg     case 1:
   4084  1.1  mrg       {
   4085  1.1  mrg 	int first = attributes[2];
   4086  1.1  mrg 	rtx operands[3];
   4087  1.1  mrg 
   4088  1.1  mrg 	if (first < 0)
   4089  1.1  mrg 	  {
   4090  1.1  mrg 	    emit_insn ((mask << right) <= 0xff
   4091  1.1  mrg 		       ? gen_zero_extendqisi2 (dest,
   4092  1.1  mrg 					       gen_lowpart (QImode, source))
   4093  1.1  mrg 		       : gen_zero_extendhisi2 (dest,
   4094  1.1  mrg 					       gen_lowpart (HImode, source)));
   4095  1.1  mrg 	    source = dest;
   4096  1.1  mrg 	  }
   4097  1.1  mrg 	if (source != dest)
   4098  1.1  mrg 	  emit_insn (gen_movsi (dest, source));
   4099  1.1  mrg 	operands[0] = dest;
   4100  1.1  mrg 	if (right)
   4101  1.1  mrg 	  {
   4102  1.1  mrg 	    operands[2] = GEN_INT (right);
   4103  1.1  mrg 	    gen_shifty_hi_op (LSHIFTRT, operands);
   4104  1.1  mrg 	  }
   4105  1.1  mrg 	if (first > 0)
   4106  1.1  mrg 	  {
   4107  1.1  mrg 	    operands[2] = GEN_INT (first);
   4108  1.1  mrg 	    gen_shifty_hi_op (ASHIFT, operands);
   4109  1.1  mrg 	    total_shift -= first;
   4110  1.1  mrg 	    mask <<= first;
   4111  1.1  mrg 	  }
   4112  1.1  mrg 	if (first >= 0)
   4113  1.1  mrg 	  emit_insn (mask <= 0xff
   4114  1.1  mrg 		     ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
   4115  1.1  mrg 		     : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
   4116  1.1  mrg 	if (total_shift > 0)
   4117  1.1  mrg 	  {
   4118  1.1  mrg 	    operands[2] = GEN_INT (total_shift);
   4119  1.1  mrg 	    gen_shifty_hi_op (ASHIFT, operands);
   4120  1.1  mrg 	  }
   4121  1.1  mrg 	break;
   4122  1.1  mrg       }
   4123  1.1  mrg     case 4:
   4124  1.1  mrg       shift_gen_fun = gen_shifty_op;
   4125  1.1  mrg       /* FALLTHRU */
   4126  1.1  mrg     case 3:
   4127  1.1  mrg       /* If the topmost bit that matters is set, set the topmost bits
   4128  1.1  mrg 	 that don't matter.  This way, we might be able to get a shorter
   4129  1.1  mrg 	 signed constant.  */
   4130  1.1  mrg       if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
   4131  1.1  mrg 	mask |= (HOST_WIDE_INT) ((HOST_WIDE_INT_M1U) << (31 - total_shift));
   4132  1.1  mrg       /* FALLTHRU */
   4133  1.1  mrg     case 2:
   4134  1.1  mrg       /* Don't expand fine-grained when combining, because that will
   4135  1.1  mrg          make the pattern fail.  */
   4136  1.1  mrg       if (currently_expanding_to_rtl
   4137  1.1  mrg 	  || reload_in_progress || reload_completed)
   4138  1.1  mrg 	{
   4139  1.1  mrg 	  rtx operands[3];
   4140  1.1  mrg 
   4141  1.1  mrg 	  /* Cases 3 and 4 should be handled by this split
   4142  1.1  mrg 	     only while combining  */
   4143  1.1  mrg 	  gcc_assert (kind <= 2);
   4144  1.1  mrg 	  if (right)
   4145  1.1  mrg 	    {
   4146  1.1  mrg 	      emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
   4147  1.1  mrg 	      source = dest;
   4148  1.1  mrg 	    }
   4149  1.1  mrg 	  emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
   4150  1.1  mrg 	  if (total_shift)
   4151  1.1  mrg 	    {
   4152  1.1  mrg 	      operands[0] = dest;
   4153  1.1  mrg 	      operands[1] = dest;
   4154  1.1  mrg 	      operands[2] = GEN_INT (total_shift);
   4155  1.1  mrg 	      shift_gen_fun (ASHIFT, operands);
   4156  1.1  mrg 	    }
   4157  1.1  mrg 	  break;
   4158  1.1  mrg 	}
   4159  1.1  mrg       else
   4160  1.1  mrg 	{
   4161  1.1  mrg 	  int neg = 0;
   4162  1.1  mrg 	  if (kind != 4 && total_shift < 16)
   4163  1.1  mrg 	    {
   4164  1.1  mrg 	      neg = -ext_ashl_lshr_seq[total_shift].amount[1];
   4165  1.1  mrg 	      if (neg > 0)
   4166  1.1  mrg 		neg -= ext_ashl_lshr_seq[total_shift].amount[2];
   4167  1.1  mrg 	      else
   4168  1.1  mrg 		neg = 0;
   4169  1.1  mrg 	    }
   4170  1.1  mrg 	  emit_insn (gen_and_shl_scratch (dest, source,
   4171  1.1  mrg 					  GEN_INT (right),
   4172  1.1  mrg 					  GEN_INT (mask),
   4173  1.1  mrg 					  GEN_INT (total_shift + neg),
   4174  1.1  mrg 					  GEN_INT (neg)));
   4175  1.1  mrg 	  emit_insn (gen_movsi (dest, dest));
   4176  1.1  mrg 	  break;
   4177  1.1  mrg 	}
   4178  1.1  mrg     }
   4179  1.1  mrg   return false;
   4180  1.1  mrg }
   4181  1.1  mrg 
   4182  1.1  mrg /* Try to find a good way to implement the combiner pattern
   4183  1.1  mrg   [(set (match_operand:SI 0 "register_operand" "=r")
   4184  1.1  mrg         (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
   4185  1.1  mrg                                     (match_operand:SI 2 "const_int_operand" "n")
   4186  1.1  mrg                          (match_operand:SI 3 "const_int_operand" "n")
   4187  1.1  mrg                          (const_int 0)))
   4188  1.1  mrg    (clobber (reg:SI T_REG))]
   4189  1.1  mrg   LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
   4190  1.1  mrg   return 0 for simple left / right shift combination.
   4191  1.1  mrg   return 1 for left shift / 8 bit sign extend / left shift.
   4192  1.1  mrg   return 2 for left shift / 16 bit sign extend / left shift.
   4193  1.1  mrg   return 3 for left shift / 8 bit sign extend / shift / sign extend.
   4194  1.1  mrg   return 4 for left shift / 16 bit sign extend / shift / sign extend.
   4195  1.1  mrg   return 5 for left shift / 16 bit sign extend / right shift
   4196  1.1  mrg   return 6 for < 8 bit sign extend / left shift.
   4197  1.1  mrg   return 7 for < 8 bit sign extend / left shift / single right shift.
   4198  1.1  mrg   If COSTP is nonzero, assign the calculated cost to *COSTP.  */
   4199  1.1  mrg int
   4200  1.1  mrg shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
   4201  1.1  mrg {
   4202  1.1  mrg   int left, size, insize, ext;
   4203  1.1  mrg   int cost = 0, best_cost;
   4204  1.1  mrg   int kind;
   4205  1.1  mrg 
   4206  1.1  mrg   left = INTVAL (left_rtx);
   4207  1.1  mrg   size = INTVAL (size_rtx);
   4208  1.1  mrg   insize = size - left;
   4209  1.1  mrg   gcc_assert (insize > 0);
   4210  1.1  mrg   /* Default to left / right shift.  */
   4211  1.1  mrg   kind = 0;
   4212  1.1  mrg   best_cost = ashl_lshr_seq[32 - insize].insn_count
   4213  1.1  mrg 	      + ashl_lshr_seq[32 - size].insn_count;
   4214  1.1  mrg   if (size <= 16)
   4215  1.1  mrg     {
   4216  1.1  mrg       /* 16 bit shift / sign extend / 16 bit shift */
   4217  1.1  mrg       cost = ashl_lshr_seq[16 - insize].insn_count + 1
   4218  1.1  mrg 	     + ashl_lshr_seq[16 - size].insn_count;
   4219  1.1  mrg       /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
   4220  1.1  mrg 	 below, by alternative 3 or something even better.  */
   4221  1.1  mrg       if (cost < best_cost)
   4222  1.1  mrg 	{
   4223  1.1  mrg 	  kind = 5;
   4224  1.1  mrg 	  best_cost = cost;
   4225  1.1  mrg 	}
   4226  1.1  mrg     }
   4227  1.1  mrg   /* Try a plain sign extend between two shifts.  */
   4228  1.1  mrg   for (ext = 16; ext >= insize; ext -= 8)
   4229  1.1  mrg     {
   4230  1.1  mrg       if (ext <= size)
   4231  1.1  mrg 	{
   4232  1.1  mrg 	  cost = ext_ashl_lshr_seq[ext - insize].insn_count + 1
   4233  1.1  mrg 		 + ashl_lshr_seq[size - ext].insn_count;
   4234  1.1  mrg 	  if (cost < best_cost)
   4235  1.1  mrg 	    {
   4236  1.1  mrg 	      kind = ext / (unsigned) 8;
   4237  1.1  mrg 	      best_cost = cost;
   4238  1.1  mrg 	    }
   4239  1.1  mrg 	}
   4240  1.1  mrg       /* Check if we can do a sloppy shift with a final signed shift
   4241  1.1  mrg 	 restoring the sign.  */
   4242  1.1  mrg       if (EXT_SHIFT_SIGNED (size - ext))
   4243  1.1  mrg 	cost = ext_ashl_lshr_seq[ext - insize].insn_count
   4244  1.1  mrg 	       + ext_ashl_lshr_seq[size - ext].insn_count + 1;
   4245  1.1  mrg       /* If not, maybe it's still cheaper to do the second shift sloppy,
   4246  1.1  mrg 	 and do a final sign extend?  */
   4247  1.1  mrg       else if (size <= 16)
   4248  1.1  mrg 	cost = ext_ashl_lshr_seq[ext - insize].insn_count + 1
   4249  1.1  mrg 	  + ext_ashl_lshr_seq[size > ext ? size - ext : ext - size].insn_count
   4250  1.1  mrg 	  + 1;
   4251  1.1  mrg       else
   4252  1.1  mrg 	continue;
   4253  1.1  mrg       if (cost < best_cost)
   4254  1.1  mrg 	{
   4255  1.1  mrg 	  kind = ext / (unsigned) 8 + 2;
   4256  1.1  mrg 	  best_cost = cost;
   4257  1.1  mrg 	}
   4258  1.1  mrg     }
   4259  1.1  mrg   /* Check if we can sign extend in r0 */
   4260  1.1  mrg   if (insize < 8)
   4261  1.1  mrg     {
   4262  1.1  mrg       cost = 3 + ashl_lshr_seq[left].insn_count;
   4263  1.1  mrg       if (cost < best_cost)
   4264  1.1  mrg 	{
   4265  1.1  mrg 	  kind = 6;
   4266  1.1  mrg 	  best_cost = cost;
   4267  1.1  mrg 	}
   4268  1.1  mrg       /* Try the same with a final signed shift.  */
   4269  1.1  mrg       if (left < 31)
   4270  1.1  mrg 	{
   4271  1.1  mrg 	  cost = 3 + ext_ashl_lshr_seq[left + 1].insn_count + 1;
   4272  1.1  mrg 	  if (cost < best_cost)
   4273  1.1  mrg 	    {
   4274  1.1  mrg 	      kind = 7;
   4275  1.1  mrg 	      best_cost = cost;
   4276  1.1  mrg 	    }
   4277  1.1  mrg 	}
   4278  1.1  mrg     }
   4279  1.1  mrg   if (TARGET_DYNSHIFT)
   4280  1.1  mrg     {
   4281  1.1  mrg       /* Try to use a dynamic shift.  */
   4282  1.1  mrg       cost = ashl_lshr_seq[32 - insize].insn_count + 1 + SH_DYNAMIC_SHIFT_COST;
   4283  1.1  mrg       if (cost < best_cost)
   4284  1.1  mrg 	{
   4285  1.1  mrg 	  kind = 0;
   4286  1.1  mrg 	  best_cost = cost;
   4287  1.1  mrg 	}
   4288  1.1  mrg     }
   4289  1.1  mrg   if (costp)
   4290  1.1  mrg     *costp = cost;
   4291  1.1  mrg   return kind;
   4292  1.1  mrg }
   4293  1.1  mrg 
   4294  1.1  mrg /* Function to be used in the length attribute of the instructions
   4295  1.1  mrg    implementing this pattern.  */
   4296  1.1  mrg int
   4297  1.1  mrg shl_sext_length (rtx insn)
   4298  1.1  mrg {
   4299  1.1  mrg   rtx set_src, left_rtx, size_rtx;
   4300  1.1  mrg   int cost;
   4301  1.1  mrg 
   4302  1.1  mrg   set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
   4303  1.1  mrg   left_rtx = XEXP (XEXP (set_src, 0), 1);
   4304  1.1  mrg   size_rtx = XEXP (set_src, 1);
   4305  1.1  mrg   shl_sext_kind (left_rtx, size_rtx, &cost);
   4306  1.1  mrg   return cost;
   4307  1.1  mrg }
   4308  1.1  mrg 
   4309  1.1  mrg /* Generate rtl for this pattern */
   4310  1.1  mrg bool
   4311  1.1  mrg gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
   4312  1.1  mrg {
   4313  1.1  mrg   int kind;
   4314  1.1  mrg   int left, size, insize, cost;
   4315  1.1  mrg   rtx operands[3];
   4316  1.1  mrg 
   4317  1.1  mrg   kind = shl_sext_kind (left_rtx, size_rtx, &cost);
   4318  1.1  mrg   left = INTVAL (left_rtx);
   4319  1.1  mrg   size = INTVAL (size_rtx);
   4320  1.1  mrg   insize = size - left;
   4321  1.1  mrg   switch (kind)
   4322  1.1  mrg     {
   4323  1.1  mrg     case 1:
   4324  1.1  mrg     case 2:
   4325  1.1  mrg     case 3:
   4326  1.1  mrg     case 4:
   4327  1.1  mrg       {
   4328  1.1  mrg 	int ext = kind & 1 ? 8 : 16;
   4329  1.1  mrg 	int shift2 = size - ext;
   4330  1.1  mrg 
   4331  1.1  mrg 	/* Don't expand fine-grained when combining, because that will
   4332  1.1  mrg 	   make the pattern fail.  */
   4333  1.1  mrg 	if (! currently_expanding_to_rtl
   4334  1.1  mrg 	    && ! reload_in_progress && ! reload_completed)
   4335  1.1  mrg 	  {
   4336  1.1  mrg 	    emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
   4337  1.1  mrg 	    emit_insn (gen_movsi (dest, source));
   4338  1.1  mrg 	    break;
   4339  1.1  mrg 	  }
   4340  1.1  mrg 	if (dest != source)
   4341  1.1  mrg 	  emit_insn (gen_movsi (dest, source));
   4342  1.1  mrg 	operands[0] = dest;
   4343  1.1  mrg 	if (ext - insize)
   4344  1.1  mrg 	  {
   4345  1.1  mrg 	    operands[2] = GEN_INT (ext - insize);
   4346  1.1  mrg 	    gen_shifty_hi_op (ASHIFT, operands);
   4347  1.1  mrg 	  }
   4348  1.1  mrg 	emit_insn (kind & 1
   4349  1.1  mrg 		   ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
   4350  1.1  mrg 		   : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
   4351  1.1  mrg 	if (kind <= 2)
   4352  1.1  mrg 	  {
   4353  1.1  mrg 	    if (shift2)
   4354  1.1  mrg 	      {
   4355  1.1  mrg 		operands[2] = GEN_INT (shift2);
   4356  1.1  mrg 		gen_shifty_op (ASHIFT, operands);
   4357  1.1  mrg 	      }
   4358  1.1  mrg 	  }
   4359  1.1  mrg 	else
   4360  1.1  mrg 	  {
   4361  1.1  mrg 	    if (shift2 > 0)
   4362  1.1  mrg 	      {
   4363  1.1  mrg 		if (EXT_SHIFT_SIGNED (shift2))
   4364  1.1  mrg 		  {
   4365  1.1  mrg 		    operands[2] = GEN_INT (shift2 + 1);
   4366  1.1  mrg 		    gen_shifty_op (ASHIFT, operands);
   4367  1.1  mrg 		    operands[2] = const1_rtx;
   4368  1.1  mrg 		    gen_shifty_op (ASHIFTRT, operands);
   4369  1.1  mrg 		    break;
   4370  1.1  mrg 		  }
   4371  1.1  mrg 		operands[2] = GEN_INT (shift2);
   4372  1.1  mrg 		gen_shifty_hi_op (ASHIFT, operands);
   4373  1.1  mrg 	      }
   4374  1.1  mrg 	    else if (shift2)
   4375  1.1  mrg 	      {
   4376  1.1  mrg 		operands[2] = GEN_INT (-shift2);
   4377  1.1  mrg 		gen_shifty_hi_op (LSHIFTRT, operands);
   4378  1.1  mrg 	      }
   4379  1.1  mrg 	    emit_insn (size <= 8
   4380  1.1  mrg 		       ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
   4381  1.1  mrg 		       : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
   4382  1.1  mrg 	  }
   4383  1.1  mrg 	break;
   4384  1.1  mrg       }
   4385  1.1  mrg     case 5:
   4386  1.1  mrg       {
   4387  1.1  mrg 	int i = 16 - size;
   4388  1.1  mrg 	if (! currently_expanding_to_rtl
   4389  1.1  mrg 	    && ! reload_in_progress && ! reload_completed)
   4390  1.1  mrg 	  emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
   4391  1.1  mrg 	else
   4392  1.1  mrg 	  {
   4393  1.1  mrg 	    operands[0] = dest;
   4394  1.1  mrg 	    operands[2] = GEN_INT (16 - insize);
   4395  1.1  mrg 	    gen_shifty_hi_op (ASHIFT, operands);
   4396  1.1  mrg 	    emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
   4397  1.1  mrg 	  }
   4398  1.1  mrg 	/* Don't use gen_ashrsi3 because it generates new pseudos.  */
   4399  1.1  mrg 	while (--i >= 0)
   4400  1.1  mrg 	  gen_ashift (ASHIFTRT, 1, dest);
   4401  1.1  mrg 	break;
   4402  1.1  mrg       }
   4403  1.1  mrg     case 6:
   4404  1.1  mrg     case 7:
   4405  1.1  mrg       /* Don't expand fine-grained when combining, because that will
   4406  1.1  mrg 	 make the pattern fail.  */
   4407  1.1  mrg       if (! currently_expanding_to_rtl
   4408  1.1  mrg 	  && ! reload_in_progress && ! reload_completed)
   4409  1.1  mrg 	{
   4410  1.1  mrg 	  emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
   4411  1.1  mrg 	  emit_insn (gen_movsi (dest, source));
   4412  1.1  mrg 	  break;
   4413  1.1  mrg 	}
   4414  1.1  mrg       emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
   4415  1.1  mrg       emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
   4416  1.1  mrg       emit_insn (gen_addsi3 (dest, dest, GEN_INT (HOST_WIDE_INT_M1U << (insize - 1))));
   4417  1.1  mrg       operands[0] = dest;
   4418  1.1  mrg       operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
   4419  1.1  mrg       gen_shifty_op (ASHIFT, operands);
   4420  1.1  mrg       if (kind == 7)
   4421  1.1  mrg 	emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
   4422  1.1  mrg       break;
   4423  1.1  mrg     default:
   4424  1.1  mrg       return true;
   4425  1.1  mrg     }
   4426  1.1  mrg   return false;
   4427  1.1  mrg }
   4428  1.1  mrg 
   4429  1.1  mrg typedef struct label_ref_list_d
   4430  1.1  mrg {
   4431  1.1  mrg   rtx_code_label *label;
   4432  1.1  mrg   struct label_ref_list_d *next;
   4433  1.1  mrg } *label_ref_list_t;
   4434  1.1  mrg 
   4435  1.1  mrg static object_allocator<label_ref_list_d> label_ref_list_d_pool
   4436  1.1  mrg   ("label references list");
   4437  1.1  mrg 
   4438  1.1  mrg /* The SH cannot load a large constant into a register, constants have to
   4439  1.1  mrg    come from a pc relative load.  The reference of a pc relative load
   4440  1.1  mrg    instruction must be less than 1k in front of the instruction.  This
   4441  1.1  mrg    means that we often have to dump a constant inside a function, and
   4442  1.1  mrg    generate code to branch around it.
   4443  1.1  mrg 
   4444  1.1  mrg    It is important to minimize this, since the branches will slow things
   4445  1.1  mrg    down and make things bigger.
   4446  1.1  mrg 
   4447  1.1  mrg    Worst case code looks like:
   4448  1.1  mrg 
   4449  1.1  mrg    mov.l L1,rn
   4450  1.1  mrg    bra   L2
   4451  1.1  mrg    nop
   4452  1.1  mrg    align
   4453  1.1  mrg    L1:   .long value
   4454  1.1  mrg    L2:
   4455  1.1  mrg    ..
   4456  1.1  mrg 
   4457  1.1  mrg    mov.l L3,rn
   4458  1.1  mrg    bra   L4
   4459  1.1  mrg    nop
   4460  1.1  mrg    align
   4461  1.1  mrg    L3:   .long value
   4462  1.1  mrg    L4:
   4463  1.1  mrg    ..
   4464  1.1  mrg 
   4465  1.1  mrg    We fix this by performing a scan before scheduling, which notices which
   4466  1.1  mrg    instructions need to have their operands fetched from the constant table
   4467  1.1  mrg    and builds the table.
   4468  1.1  mrg 
   4469  1.1  mrg    The algorithm is:
   4470  1.1  mrg 
   4471  1.1  mrg    scan, find an instruction which needs a pcrel move.  Look forward, find the
   4472  1.1  mrg    last barrier which is within MAX_COUNT bytes of the requirement.
   4473  1.1  mrg    If there isn't one, make one.  Process all the instructions between
   4474  1.1  mrg    the find and the barrier.
   4475  1.1  mrg 
   4476  1.1  mrg    In the above example, we can tell that L3 is within 1k of L1, so
   4477  1.1  mrg    the first move can be shrunk from the 3 insn+constant sequence into
   4478  1.1  mrg    just 1 insn, and the constant moved to L3 to make:
   4479  1.1  mrg 
   4480  1.1  mrg    mov.l        L1,rn
   4481  1.1  mrg    ..
   4482  1.1  mrg    mov.l        L3,rn
   4483  1.1  mrg    bra          L4
   4484  1.1  mrg    nop
   4485  1.1  mrg    align
   4486  1.1  mrg    L3:.long value
   4487  1.1  mrg    L4:.long value
   4488  1.1  mrg 
   4489  1.1  mrg    Then the second move becomes the target for the shortening process.  */
   4490  1.1  mrg 
   4491  1.1  mrg typedef struct
   4492  1.1  mrg {
   4493  1.1  mrg   rtx value;			/* Value in table.  */
   4494  1.1  mrg   rtx_code_label *label;	/* Label of value.  */
   4495  1.1  mrg   label_ref_list_t wend;	/* End of window.  */
   4496  1.1  mrg   machine_mode mode;	/* Mode of value.  */
   4497  1.1  mrg 
   4498  1.1  mrg   /* True if this constant is accessed as part of a post-increment
   4499  1.1  mrg      sequence.  Note that HImode constants are never accessed in this way.  */
   4500  1.1  mrg   bool part_of_sequence_p;
   4501  1.1  mrg } pool_node;
   4502  1.1  mrg 
   4503  1.1  mrg /* The maximum number of constants that can fit into one pool, since
   4504  1.1  mrg    constants in the range 0..510 are at least 2 bytes long, and in the
   4505  1.1  mrg    range from there to 1018 at least 4 bytes.  */
   4506  1.1  mrg 
   4507  1.1  mrg #define MAX_POOL_SIZE 372
   4508  1.1  mrg static pool_node pool_vector[MAX_POOL_SIZE];
   4509  1.1  mrg static int pool_size;
   4510  1.1  mrg static rtx_code_label *pool_window_label;
   4511  1.1  mrg static int pool_window_last;
   4512  1.1  mrg 
   4513  1.1  mrg static int max_labelno_before_reorg;
   4514  1.1  mrg 
   4515  1.1  mrg /* ??? If we need a constant in HImode which is the truncated value of a
   4516  1.1  mrg    constant we need in SImode, we could combine the two entries thus saving
   4517  1.1  mrg    two bytes.  Is this common enough to be worth the effort of implementing
   4518  1.1  mrg    it?  */
   4519  1.1  mrg 
   4520  1.1  mrg /* ??? This stuff should be done at the same time that we shorten branches.
   4521  1.1  mrg    As it is now, we must assume that all branches are the maximum size, and
   4522  1.1  mrg    this causes us to almost always output constant pools sooner than
   4523  1.1  mrg    necessary.  */
   4524  1.1  mrg 
   4525  1.1  mrg /* Add a constant to the pool and return its label.  */
   4526  1.1  mrg static rtx_code_label *
   4527  1.1  mrg add_constant (rtx x, machine_mode mode, rtx last_value)
   4528  1.1  mrg {
   4529  1.1  mrg   rtx_code_label *lab, *new_rtx;
   4530  1.1  mrg   label_ref_list_t ref, newref;
   4531  1.1  mrg 
   4532  1.1  mrg   /* First see if we've already got it.  */
   4533  1.1  mrg   for (int i = 0; i < pool_size; i++)
   4534  1.1  mrg     {
   4535  1.1  mrg       if (x->code == pool_vector[i].value->code
   4536  1.1  mrg 	  && mode == pool_vector[i].mode)
   4537  1.1  mrg 	{
   4538  1.1  mrg 	  if (x->code == CODE_LABEL)
   4539  1.1  mrg 	    {
   4540  1.1  mrg 	      if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
   4541  1.1  mrg 		continue;
   4542  1.1  mrg 	    }
   4543  1.1  mrg 	  if (rtx_equal_p (x, pool_vector[i].value))
   4544  1.1  mrg 	    {
   4545  1.1  mrg 	      lab = new_rtx = 0;
   4546  1.1  mrg 	      if (! last_value
   4547  1.1  mrg 		  || ! i
   4548  1.1  mrg 		  || ! rtx_equal_p (last_value, pool_vector[i-1].value))
   4549  1.1  mrg 		{
   4550  1.1  mrg 		  new_rtx = gen_label_rtx ();
   4551  1.1  mrg 		  LABEL_REFS (new_rtx) = pool_vector[i].label;
   4552  1.1  mrg 		  pool_vector[i].label = lab = new_rtx;
   4553  1.1  mrg 		}
   4554  1.1  mrg 	      if (lab && pool_window_label)
   4555  1.1  mrg 		{
   4556  1.1  mrg 		  newref = label_ref_list_d_pool.allocate ();
   4557  1.1  mrg 		  newref->label = pool_window_label;
   4558  1.1  mrg 		  ref = pool_vector[pool_window_last].wend;
   4559  1.1  mrg 		  newref->next = ref;
   4560  1.1  mrg 		  pool_vector[pool_window_last].wend = newref;
   4561  1.1  mrg 		}
   4562  1.1  mrg 	      if (new_rtx)
   4563  1.1  mrg 		pool_window_label = new_rtx;
   4564  1.1  mrg 	      pool_window_last = i;
   4565  1.1  mrg 	      return lab;
   4566  1.1  mrg 	    }
   4567  1.1  mrg 	}
   4568  1.1  mrg     }
   4569  1.1  mrg 
   4570  1.1  mrg   /* Need a new one.  */
   4571  1.1  mrg   pool_vector[pool_size].value = x;
   4572  1.1  mrg   if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
   4573  1.1  mrg     {
   4574  1.1  mrg       lab = 0;
   4575  1.1  mrg       pool_vector[pool_size - 1].part_of_sequence_p = true;
   4576  1.1  mrg     }
   4577  1.1  mrg   else
   4578  1.1  mrg     lab = gen_label_rtx ();
   4579  1.1  mrg   pool_vector[pool_size].mode = mode;
   4580  1.1  mrg   pool_vector[pool_size].label = lab;
   4581  1.1  mrg   pool_vector[pool_size].wend = NULL;
   4582  1.1  mrg   pool_vector[pool_size].part_of_sequence_p = (lab == 0);
   4583  1.1  mrg   if (lab && pool_window_label)
   4584  1.1  mrg     {
   4585  1.1  mrg       newref = label_ref_list_d_pool.allocate ();
   4586  1.1  mrg       newref->label = pool_window_label;
   4587  1.1  mrg       ref = pool_vector[pool_window_last].wend;
   4588  1.1  mrg       newref->next = ref;
   4589  1.1  mrg       pool_vector[pool_window_last].wend = newref;
   4590  1.1  mrg     }
   4591  1.1  mrg   if (lab)
   4592  1.1  mrg     pool_window_label = lab;
   4593  1.1  mrg   pool_window_last = pool_size;
   4594  1.1  mrg   pool_size++;
   4595  1.1  mrg   return lab;
   4596  1.1  mrg }
   4597  1.1  mrg 
   4598  1.1  mrg /* Output the literal table.  START, if nonzero, is the first instruction
   4599  1.1  mrg    this table is needed for, and also indicates that there is at least one
   4600  1.1  mrg    casesi_worker_2 instruction; We have to emit the operand3 labels from
   4601  1.1  mrg    these insns at a 4-byte  aligned position.  BARRIER is the barrier
   4602  1.1  mrg    after which we are to place the table.  */
   4603  1.1  mrg static void
   4604  1.1  mrg dump_table (rtx_insn *start, rtx_insn *barrier)
   4605  1.1  mrg {
   4606  1.1  mrg   rtx_insn *scan = barrier;
   4607  1.1  mrg   bool need_align = true;
   4608  1.1  mrg   rtx_code_label *lab;
   4609  1.1  mrg   label_ref_list_t ref;
   4610  1.1  mrg   bool have_df = false;
   4611  1.1  mrg 
   4612  1.1  mrg   /* Do two passes, first time dump out the HI sized constants.  */
   4613  1.1  mrg 
   4614  1.1  mrg   for (int i = 0; i < pool_size; i++)
   4615  1.1  mrg     {
   4616  1.1  mrg       pool_node *p = &pool_vector[i];
   4617  1.1  mrg 
   4618  1.1  mrg       if (p->mode == HImode)
   4619  1.1  mrg 	{
   4620  1.1  mrg 	  if (need_align)
   4621  1.1  mrg 	    {
   4622  1.1  mrg 	      scan = emit_insn_after (gen_align_2 (), scan);
   4623  1.1  mrg 	      need_align = false;
   4624  1.1  mrg 	    }
   4625  1.1  mrg 	  for (lab = p->label; lab;
   4626  1.1  mrg 	       lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
   4627  1.1  mrg 	    scan = emit_label_after (lab, scan);
   4628  1.1  mrg 	  scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
   4629  1.1  mrg 				  scan);
   4630  1.1  mrg 	  for (ref = p->wend; ref; ref = ref->next)
   4631  1.1  mrg 	    {
   4632  1.1  mrg 	      lab = ref->label;
   4633  1.1  mrg 	      scan = emit_insn_after (gen_consttable_window_end (lab), scan);
   4634  1.1  mrg 	    }
   4635  1.1  mrg 	}
   4636  1.1  mrg       else if (p->mode == DFmode)
   4637  1.1  mrg 	have_df = true;
   4638  1.1  mrg     }
   4639  1.1  mrg 
   4640  1.1  mrg   need_align = true;
   4641  1.1  mrg 
   4642  1.1  mrg   if (start)
   4643  1.1  mrg     {
   4644  1.1  mrg       scan = emit_insn_after (gen_align_4 (), scan);
   4645  1.1  mrg       need_align = false;
   4646  1.1  mrg       for (; start != barrier; start = NEXT_INSN (start))
   4647  1.1  mrg 	if (NONJUMP_INSN_P (start)
   4648  1.1  mrg 	    && recog_memoized (start) == CODE_FOR_casesi_worker_2)
   4649  1.1  mrg 	  {
   4650  1.1  mrg 	    rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
   4651  1.1  mrg 	    rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
   4652  1.1  mrg 
   4653  1.1  mrg 	    scan = emit_label_after (as_a <rtx_insn *> (lab), scan);
   4654  1.1  mrg 	  }
   4655  1.1  mrg     }
   4656  1.1  mrg   if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
   4657  1.1  mrg     {
   4658  1.1  mrg       rtx_insn *align_insn = NULL;
   4659  1.1  mrg 
   4660  1.1  mrg       scan = emit_label_after (gen_label_rtx (), scan);
   4661  1.1  mrg       scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
   4662  1.1  mrg       need_align = false;
   4663  1.1  mrg 
   4664  1.1  mrg       for (int i = 0; i < pool_size; i++)
   4665  1.1  mrg 	{
   4666  1.1  mrg 	  pool_node *p = &pool_vector[i];
   4667  1.1  mrg 
   4668  1.1  mrg 	  switch (p->mode)
   4669  1.1  mrg 	    {
   4670  1.1  mrg 	    case E_HImode:
   4671  1.1  mrg 	      break;
   4672  1.1  mrg 	    case E_SImode:
   4673  1.1  mrg 	    case E_SFmode:
   4674  1.1  mrg 	      if (align_insn && !p->part_of_sequence_p)
   4675  1.1  mrg 		{
   4676  1.1  mrg 		  for (lab = p->label; lab;
   4677  1.1  mrg 		       lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
   4678  1.1  mrg 		    emit_label_before (lab, align_insn);
   4679  1.1  mrg 		  emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
   4680  1.1  mrg 				    align_insn);
   4681  1.1  mrg 		  for (ref = p->wend; ref; ref = ref->next)
   4682  1.1  mrg 		    {
   4683  1.1  mrg 		      lab = ref->label;
   4684  1.1  mrg 		      emit_insn_before (gen_consttable_window_end (lab),
   4685  1.1  mrg 					align_insn);
   4686  1.1  mrg 		    }
   4687  1.1  mrg 		  delete_insn (align_insn);
   4688  1.1  mrg 		  align_insn = NULL;
   4689  1.1  mrg 		  continue;
   4690  1.1  mrg 		}
   4691  1.1  mrg 	      else
   4692  1.1  mrg 		{
   4693  1.1  mrg 		  for (lab = p->label; lab;
   4694  1.1  mrg 		       lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
   4695  1.1  mrg 		    scan = emit_label_after (lab, scan);
   4696  1.1  mrg 		  scan = emit_insn_after (gen_consttable_4 (p->value,
   4697  1.1  mrg 							    const0_rtx), scan);
   4698  1.1  mrg 		  need_align = ! need_align;
   4699  1.1  mrg 		}
   4700  1.1  mrg 	      break;
   4701  1.1  mrg 	    case E_DFmode:
   4702  1.1  mrg 	      if (need_align)
   4703  1.1  mrg 		{
   4704  1.1  mrg 		  scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
   4705  1.1  mrg 		  align_insn = scan;
   4706  1.1  mrg 		  need_align = false;
   4707  1.1  mrg 		}
   4708  1.1  mrg 	      /* FALLTHRU */
   4709  1.1  mrg 	    case E_DImode:
   4710  1.1  mrg 	      for (lab = p->label; lab;
   4711  1.1  mrg 		   lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
   4712  1.1  mrg 		scan = emit_label_after (lab, scan);
   4713  1.1  mrg 	      scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
   4714  1.1  mrg 				      scan);
   4715  1.1  mrg 	      break;
   4716  1.1  mrg 	    default:
   4717  1.1  mrg 	      gcc_unreachable ();
   4718  1.1  mrg 	    }
   4719  1.1  mrg 
   4720  1.1  mrg 	  if (p->mode != HImode)
   4721  1.1  mrg 	    {
   4722  1.1  mrg 	      for (ref = p->wend; ref; ref = ref->next)
   4723  1.1  mrg 		{
   4724  1.1  mrg 		  lab = ref->label;
   4725  1.1  mrg 		  scan = emit_insn_after (gen_consttable_window_end (lab),
   4726  1.1  mrg 					  scan);
   4727  1.1  mrg 		}
   4728  1.1  mrg 	    }
   4729  1.1  mrg 	}
   4730  1.1  mrg 
   4731  1.1  mrg       pool_size = 0;
   4732  1.1  mrg     }
   4733  1.1  mrg 
   4734  1.1  mrg   for (int i = 0; i < pool_size; i++)
   4735  1.1  mrg     {
   4736  1.1  mrg       pool_node *p = &pool_vector[i];
   4737  1.1  mrg 
   4738  1.1  mrg       switch (p->mode)
   4739  1.1  mrg 	{
   4740  1.1  mrg 	case E_HImode:
   4741  1.1  mrg 	  break;
   4742  1.1  mrg 	case E_SImode:
   4743  1.1  mrg 	case E_SFmode:
   4744  1.1  mrg 	  if (need_align)
   4745  1.1  mrg 	    {
   4746  1.1  mrg 	      need_align = false;
   4747  1.1  mrg 	      scan = emit_label_after (gen_label_rtx (), scan);
   4748  1.1  mrg 	      scan = emit_insn_after (gen_align_4 (), scan);
   4749  1.1  mrg 	    }
   4750  1.1  mrg 	  for (lab = p->label; lab;
   4751  1.1  mrg 	       lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
   4752  1.1  mrg 	    scan = emit_label_after (lab, scan);
   4753  1.1  mrg 	  scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
   4754  1.1  mrg 				  scan);
   4755  1.1  mrg 	  break;
   4756  1.1  mrg 	case E_DFmode:
   4757  1.1  mrg 	case E_DImode:
   4758  1.1  mrg 	  if (need_align)
   4759  1.1  mrg 	    {
   4760  1.1  mrg 	      need_align = false;
   4761  1.1  mrg 	      scan = emit_label_after (gen_label_rtx (), scan);
   4762  1.1  mrg 	      scan = emit_insn_after (gen_align_4 (), scan);
   4763  1.1  mrg 	    }
   4764  1.1  mrg 	  for (lab = p->label; lab;
   4765  1.1  mrg 	       lab = safe_as_a <rtx_code_label *> (LABEL_REFS (lab)))
   4766  1.1  mrg 	    scan = emit_label_after (lab, scan);
   4767  1.1  mrg 	  scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
   4768  1.1  mrg 				  scan);
   4769  1.1  mrg 	  break;
   4770  1.1  mrg 	default:
   4771  1.1  mrg 	  gcc_unreachable ();
   4772  1.1  mrg 	}
   4773  1.1  mrg 
   4774  1.1  mrg       if (p->mode != HImode)
   4775  1.1  mrg 	{
   4776  1.1  mrg 	  for (ref = p->wend; ref; ref = ref->next)
   4777  1.1  mrg 	    {
   4778  1.1  mrg 	      lab = ref->label;
   4779  1.1  mrg 	      scan = emit_insn_after (gen_consttable_window_end (lab), scan);
   4780  1.1  mrg 	    }
   4781  1.1  mrg 	}
   4782  1.1  mrg     }
   4783  1.1  mrg 
   4784  1.1  mrg   scan = emit_insn_after (gen_consttable_end (), scan);
   4785  1.1  mrg   scan = emit_barrier_after (scan);
   4786  1.1  mrg   pool_size = 0;
   4787  1.1  mrg   pool_window_label = NULL;
   4788  1.1  mrg   pool_window_last = 0;
   4789  1.1  mrg }
   4790  1.1  mrg 
   4791  1.1  mrg #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
   4792  1.1  mrg 
   4793  1.1  mrg /* Nonzero if the insn is a move instruction which needs to be fixed.  */
   4794  1.1  mrg 
   4795  1.1  mrg /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
   4796  1.1  mrg    CONST_DOUBLE input value is CONST_OK_FOR_I08.  For a SFmode move, we don't
   4797  1.1  mrg    need to fix it if the input value is CONST_OK_FOR_I08.  */
   4798  1.1  mrg static bool
   4799  1.1  mrg broken_move (rtx_insn *insn)
   4800  1.1  mrg {
   4801  1.1  mrg   if (NONJUMP_INSN_P (insn))
   4802  1.1  mrg     {
   4803  1.1  mrg       rtx pat = PATTERN (insn);
   4804  1.1  mrg       if (GET_CODE (pat) == PARALLEL)
   4805  1.1  mrg 	pat = XVECEXP (pat, 0, 0);
   4806  1.1  mrg       if (GET_CODE (pat) == SET
   4807  1.1  mrg 	  /* We can load any 8-bit value if we don't care what the high
   4808  1.1  mrg 	     order bits end up as.  */
   4809  1.1  mrg 	  && GET_MODE (SET_DEST (pat)) != QImode
   4810  1.1  mrg 	  && (CONSTANT_P (SET_SRC (pat))
   4811  1.1  mrg 	      || (GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
   4812  1.1  mrg 		  && XINT (SET_SRC (pat), 1) ==  UNSPECV_SP_SWITCH_B)
   4813  1.1  mrg 	      /* Match mova_const.  */
   4814  1.1  mrg 	      || (GET_CODE (SET_SRC (pat)) == UNSPEC
   4815  1.1  mrg 		  && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
   4816  1.1  mrg 		  && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
   4817  1.1  mrg 	  && ! (TARGET_SH2E
   4818  1.1  mrg 		&& GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
   4819  1.1  mrg 		&& (fp_zero_operand (SET_SRC (pat))
   4820  1.1  mrg 		    || fp_one_operand (SET_SRC (pat)))
   4821  1.1  mrg 		/* In general we don't know the current setting of fpscr, so
   4822  1.1  mrg 		   disable fldi.
   4823  1.1  mrg 		   There is an exception if this was a register-register move
   4824  1.1  mrg 		   before reload - and hence it was ascertained that we have
   4825  1.1  mrg 		   single precision setting - and in a post-reload optimization
   4826  1.1  mrg 		   we changed this to do a constant load.  In that case
   4827  1.1  mrg 		   we don't have an r0 clobber, hence we must use fldi.  */
   4828  1.1  mrg 		&& (TARGET_FMOVD
   4829  1.1  mrg 		    || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
   4830  1.1  mrg 			== SCRATCH))
   4831  1.1  mrg 		&& REG_P (SET_DEST (pat))
   4832  1.1  mrg 		&& FP_REGISTER_P (REGNO (SET_DEST (pat))))
   4833  1.1  mrg 	  && ! (TARGET_SH2A
   4834  1.1  mrg 		&& GET_MODE (SET_DEST (pat)) == SImode
   4835  1.1  mrg 		&& (satisfies_constraint_I20 (SET_SRC (pat))
   4836  1.1  mrg 		   || satisfies_constraint_I28 (SET_SRC (pat))))
   4837  1.1  mrg 	  && ! satisfies_constraint_I08 (SET_SRC (pat)))
   4838  1.1  mrg 	return true;
   4839  1.1  mrg     }
   4840  1.1  mrg 
   4841  1.1  mrg   return false;
   4842  1.1  mrg }
   4843  1.1  mrg 
   4844  1.1  mrg /* Return true if the specified insn is a mova insn.  */
   4845  1.1  mrg static bool
   4846  1.1  mrg mova_p (rtx_insn *insn)
   4847  1.1  mrg {
   4848  1.1  mrg   return (NONJUMP_INSN_P (insn)
   4849  1.1  mrg 	  && GET_CODE (PATTERN (insn)) == SET
   4850  1.1  mrg 	  && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
   4851  1.1  mrg 	  && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
   4852  1.1  mrg 	  /* Don't match mova_const.  */
   4853  1.1  mrg 	  && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
   4854  1.1  mrg }
   4855  1.1  mrg 
   4856  1.1  mrg /* Fix up a mova from a switch that went out of range.  */
   4857  1.1  mrg static void
   4858  1.1  mrg fixup_mova (rtx_insn *mova)
   4859  1.1  mrg {
   4860  1.1  mrg   PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
   4861  1.1  mrg   if (! flag_pic)
   4862  1.1  mrg     {
   4863  1.1  mrg       SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
   4864  1.1  mrg       INSN_CODE (mova) = -1;
   4865  1.1  mrg     }
   4866  1.1  mrg   else
   4867  1.1  mrg     {
   4868  1.1  mrg       rtx_insn *worker = mova;
   4869  1.1  mrg       rtx_code_label *lab = gen_label_rtx ();
   4870  1.1  mrg       rtx wpat, wpat0, wpat1, wsrc, target, base, diff;
   4871  1.1  mrg 
   4872  1.1  mrg       do
   4873  1.1  mrg 	{
   4874  1.1  mrg 	  worker = NEXT_INSN (worker);
   4875  1.1  mrg 	  gcc_assert (worker
   4876  1.1  mrg 		      && !LABEL_P (worker)
   4877  1.1  mrg 		      && !JUMP_P (worker));
   4878  1.1  mrg 	} while (NOTE_P (worker)
   4879  1.1  mrg 		 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
   4880  1.1  mrg       wpat = PATTERN (worker);
   4881  1.1  mrg       wpat0 = XVECEXP (wpat, 0, 0);
   4882  1.1  mrg       wpat1 = XVECEXP (wpat, 0, 1);
   4883  1.1  mrg       wsrc = SET_SRC (wpat0);
   4884  1.1  mrg       PATTERN (worker) = (gen_casesi_worker_2
   4885  1.1  mrg 			  (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
   4886  1.1  mrg 			   XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
   4887  1.1  mrg 			   XEXP (wpat1, 0)));
   4888  1.1  mrg       INSN_CODE (worker) = -1;
   4889  1.1  mrg       target = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
   4890  1.1  mrg       base = gen_rtx_LABEL_REF (Pmode, lab);
   4891  1.1  mrg       diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, target, base), UNSPEC_SYMOFF);
   4892  1.1  mrg       SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
   4893  1.1  mrg       INSN_CODE (mova) = -1;
   4894  1.1  mrg     }
   4895  1.1  mrg }
   4896  1.1  mrg 
   4897  1.1  mrg /* NEW_MOVA is a mova we've just encountered while scanning forward.  Update
   4898  1.1  mrg    *num_mova, and check if the new mova is not nested within the first one.
   4899  1.1  mrg    return 0 if *first_mova was replaced, 1 if new_mova was replaced,
   4900  1.1  mrg    2 if new_mova has been assigned to *first_mova, -1 otherwise..  */
   4901  1.1  mrg static int
   4902  1.1  mrg untangle_mova (int *num_mova, rtx_insn **first_mova, rtx_insn *new_mova)
   4903  1.1  mrg {
   4904  1.1  mrg   int n_addr = 0; /* Initialization to shut up spurious warning.  */
   4905  1.1  mrg   int f_target, n_target = 0; /* Likewise.  */
   4906  1.1  mrg 
   4907  1.1  mrg   if (optimize)
   4908  1.1  mrg     {
   4909  1.1  mrg       /* If NEW_MOVA has no address yet, it will be handled later.  */
   4910  1.1  mrg       if (INSN_ADDRESSES_SIZE() <= (unsigned) INSN_UID (new_mova))
   4911  1.1  mrg 	return -1;
   4912  1.1  mrg 
   4913  1.1  mrg       n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
   4914  1.1  mrg       n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
   4915  1.1  mrg       if (n_addr > n_target || n_addr + 1022 < n_target)
   4916  1.1  mrg 	{
   4917  1.1  mrg 	  /* Change the mova into a load.
   4918  1.1  mrg 	     broken_move will then return true for it.  */
   4919  1.1  mrg 	  fixup_mova (new_mova);
   4920  1.1  mrg 	  return 1;
   4921  1.1  mrg 	}
   4922  1.1  mrg     }
   4923  1.1  mrg   if (!(*num_mova)++)
   4924  1.1  mrg     {
   4925  1.1  mrg       *first_mova = new_mova;
   4926  1.1  mrg       return 2;
   4927  1.1  mrg     }
   4928  1.1  mrg   if (!optimize
   4929  1.1  mrg       || ((f_target
   4930  1.1  mrg 	   = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
   4931  1.1  mrg 	  >= n_target))
   4932  1.1  mrg     return -1;
   4933  1.1  mrg 
   4934  1.1  mrg   (*num_mova)--;
   4935  1.1  mrg   if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
   4936  1.1  mrg       > n_target - n_addr)
   4937  1.1  mrg     {
   4938  1.1  mrg       fixup_mova (*first_mova);
   4939  1.1  mrg       return 0;
   4940  1.1  mrg     }
   4941  1.1  mrg   else
   4942  1.1  mrg     {
   4943  1.1  mrg       fixup_mova (new_mova);
   4944  1.1  mrg       return 1;
   4945  1.1  mrg     }
   4946  1.1  mrg }
   4947  1.1  mrg 
   4948  1.1  mrg /* Find the last barrier from insn FROM which is close enough to hold the
   4949  1.1  mrg    constant pool.  If we can't find one, then create one near the end of
   4950  1.1  mrg    the range.  */
   4951  1.1  mrg static rtx_insn *
   4952  1.1  mrg find_barrier (int num_mova, rtx_insn *mova, rtx_insn *from)
   4953  1.1  mrg {
   4954  1.1  mrg   int count_si = 0;
   4955  1.1  mrg   int count_hi = 0;
   4956  1.1  mrg   int found_hi = 0;
   4957  1.1  mrg   int found_si = 0;
   4958  1.1  mrg   int hi_align = 2;
   4959  1.1  mrg   int si_align = 2;
   4960  1.1  mrg   int leading_mova = num_mova;
   4961  1.1  mrg   rtx_insn *barrier_before_mova = NULL;
   4962  1.1  mrg   rtx_insn *found_barrier = NULL;
   4963  1.1  mrg   rtx_insn *good_barrier = NULL;
   4964  1.1  mrg   int si_limit;
   4965  1.1  mrg   int hi_limit;
   4966  1.1  mrg   rtx_insn *orig = from;
   4967  1.1  mrg   rtx_insn *last_got = NULL;
   4968  1.1  mrg   rtx_insn *last_symoff = NULL;
   4969  1.1  mrg 
   4970  1.1  mrg   /* For HImode: range is 510, add 4 because pc counts from address of
   4971  1.1  mrg      second instruction after this one, subtract 2 for the jump instruction
   4972  1.1  mrg      that we may need to emit before the table, subtract 2 for the instruction
   4973  1.1  mrg      that fills the jump delay slot (in very rare cases, reorg will take an
   4974  1.1  mrg      instruction from after the constant pool or will leave the delay slot
   4975  1.1  mrg      empty).  This gives 510.
   4976  1.1  mrg      For SImode: range is 1020, add 4 because pc counts from address of
   4977  1.1  mrg      second instruction after this one, subtract 2 in case pc is 2 byte
   4978  1.1  mrg      aligned, subtract 2 for the jump instruction that we may need to emit
   4979  1.1  mrg      before the table, subtract 2 for the instruction that fills the jump
   4980  1.1  mrg      delay slot.  This gives 1018.  */
   4981  1.1  mrg 
   4982  1.1  mrg   /* The branch will always be shortened now that the reference address for
   4983  1.1  mrg      forward branches is the successor address, thus we need no longer make
   4984  1.1  mrg      adjustments to the [sh]i_limit for -O0.  */
   4985  1.1  mrg 
   4986  1.1  mrg   si_limit = 1018;
   4987  1.1  mrg   hi_limit = 510;
   4988  1.1  mrg 
   4989  1.1  mrg   while (from && count_si < si_limit && count_hi < hi_limit)
   4990  1.1  mrg     {
   4991  1.1  mrg       int inc = get_attr_length (from);
   4992  1.1  mrg       int new_align = 1;
   4993  1.1  mrg 
   4994  1.1  mrg       /* If this is a label that existed at the time of the compute_alignments
   4995  1.1  mrg 	 call, determine the alignment.  N.B.  When find_barrier recurses for
   4996  1.1  mrg 	 an out-of-reach mova, we might see labels at the start of previously
   4997  1.1  mrg 	 inserted constant tables.  */
   4998  1.1  mrg       if (LABEL_P (from)
   4999  1.1  mrg 	  && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
   5000  1.1  mrg 	{
   5001  1.1  mrg 	  if (optimize)
   5002  1.1  mrg 	    new_align = 1 << label_to_alignment (from).levels[0].log;
   5003  1.1  mrg 	  else if (BARRIER_P (prev_nonnote_insn (from)))
   5004  1.1  mrg 	    new_align = 1 << barrier_align (from);
   5005  1.1  mrg 	  else
   5006  1.1  mrg 	    new_align = 1;
   5007  1.1  mrg 	  inc = 0;
   5008  1.1  mrg 	}
   5009  1.1  mrg       /* In case we are scanning a constant table because of recursion, check
   5010  1.1  mrg 	 for explicit alignments.  If the table is long, we might be forced
   5011  1.1  mrg 	 to emit the new table in front of it; the length of the alignment
   5012  1.1  mrg 	 might be the last straw.  */
   5013  1.1  mrg       else if (NONJUMP_INSN_P (from)
   5014  1.1  mrg 	       && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
   5015  1.1  mrg 	       && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
   5016  1.1  mrg 	new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
   5017  1.1  mrg       /* When we find the end of a constant table, paste the new constant
   5018  1.1  mrg 	 at the end.  That is better than putting it in front because
   5019  1.1  mrg 	 this way, we don't need extra alignment for adding a 4-byte-aligned
   5020  1.1  mrg 	 mov(a) label to a 2/4 or 8/4 byte aligned table.  */
   5021  1.1  mrg       else if (NONJUMP_INSN_P (from)
   5022  1.1  mrg 	       && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
   5023  1.1  mrg 	       && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
   5024  1.1  mrg 	return from;
   5025  1.1  mrg 
   5026  1.1  mrg       if (BARRIER_P (from))
   5027  1.1  mrg 	{
   5028  1.1  mrg 	  rtx_insn *next;
   5029  1.1  mrg 
   5030  1.1  mrg 	  found_barrier = from;
   5031  1.1  mrg 
   5032  1.1  mrg 	  /* If we are at the end of the function, or in front of an alignment
   5033  1.1  mrg 	     instruction, we need not insert an extra alignment.  We prefer
   5034  1.1  mrg 	     this kind of barrier.  */
   5035  1.1  mrg 	  if (barrier_align (from) > 2)
   5036  1.1  mrg 	    good_barrier = from;
   5037  1.1  mrg 
   5038  1.1  mrg 	  /* If we are at the end of a hot/cold block, dump the constants
   5039  1.1  mrg 	     here.  */
   5040  1.1  mrg 	  next = NEXT_INSN (from);
   5041  1.1  mrg 	  if (next
   5042  1.1  mrg 	      && NOTE_P (next)
   5043  1.1  mrg 	      && NOTE_KIND (next) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
   5044  1.1  mrg 	    break;
   5045  1.1  mrg 	}
   5046  1.1  mrg 
   5047  1.1  mrg       if (broken_move (from))
   5048  1.1  mrg 	{
   5049  1.1  mrg 	  rtx pat, src, dst;
   5050  1.1  mrg 	  machine_mode mode;
   5051  1.1  mrg 
   5052  1.1  mrg 	  pat = PATTERN (from);
   5053  1.1  mrg 	  if (GET_CODE (pat) == PARALLEL)
   5054  1.1  mrg 	    pat = XVECEXP (pat, 0, 0);
   5055  1.1  mrg 	  src = SET_SRC (pat);
   5056  1.1  mrg 	  dst = SET_DEST (pat);
   5057  1.1  mrg 	  mode = GET_MODE (dst);
   5058  1.1  mrg 
   5059  1.1  mrg 	  /* GOT pcrelat setting comes in pair of
   5060  1.1  mrg 	     mova	.L8,r0
   5061  1.1  mrg 	     mov.l	.L8,r12
   5062  1.1  mrg 	     instructions.  (plus add r0,r12).
   5063  1.1  mrg 	     Remember if we see one without the other.  */
   5064  1.1  mrg 	  if (GET_CODE (src) == UNSPEC && PIC_ADDR_P (XVECEXP (src, 0, 0)))
   5065  1.1  mrg 	    last_got = last_got ? NULL : from;
   5066  1.1  mrg 	  else if (PIC_ADDR_P (src))
   5067  1.1  mrg 	    last_got = last_got ? NULL : from;
   5068  1.1  mrg 
   5069  1.1  mrg 	  /* We must explicitly check the mode, because sometimes the
   5070  1.1  mrg 	     front end will generate code to load unsigned constants into
   5071  1.1  mrg 	     HImode targets without properly sign extending them.  */
   5072  1.1  mrg 	  if (mode == HImode
   5073  1.1  mrg 	      || (mode == SImode && satisfies_constraint_I16 (src)
   5074  1.1  mrg 		  && REGNO (dst) != FPUL_REG))
   5075  1.1  mrg 	    {
   5076  1.1  mrg 	      found_hi += 2;
   5077  1.1  mrg 	      /* We put the short constants before the long constants, so
   5078  1.1  mrg 		 we must count the length of short constants in the range
   5079  1.1  mrg 		 for the long constants.  */
   5080  1.1  mrg 	      /* ??? This isn't optimal, but is easy to do.  */
   5081  1.1  mrg 	      si_limit -= 2;
   5082  1.1  mrg 	    }
   5083  1.1  mrg 	  else
   5084  1.1  mrg 	    {
   5085  1.1  mrg 	      /* We dump DF/DI constants before SF/SI ones, because
   5086  1.1  mrg 		 the limit is the same, but the alignment requirements
   5087  1.1  mrg 		 are higher.  We may waste up to 4 additional bytes
   5088  1.1  mrg 		 for alignment, and the DF/DI constant may have
   5089  1.1  mrg 		 another SF/SI constant placed before it.  */
   5090  1.1  mrg 	      while (si_align > 2 && found_si + si_align - 2 > count_si)
   5091  1.1  mrg 		si_align >>= 1;
   5092  1.1  mrg 	      if (found_si > count_si)
   5093  1.1  mrg 		count_si = found_si;
   5094  1.1  mrg 	      found_si += GET_MODE_SIZE (mode);
   5095  1.1  mrg 	      if (num_mova)
   5096  1.1  mrg 		si_limit -= GET_MODE_SIZE (mode);
   5097  1.1  mrg 	    }
   5098  1.1  mrg 	}
   5099  1.1  mrg 
   5100  1.1  mrg       if (mova_p (from))
   5101  1.1  mrg 	{
   5102  1.1  mrg 	  switch (untangle_mova (&num_mova, &mova, from))
   5103  1.1  mrg 	    {
   5104  1.1  mrg 	      case 1:
   5105  1.1  mrg 		if (flag_pic)
   5106  1.1  mrg 		  {
   5107  1.1  mrg 		    rtx src = SET_SRC (PATTERN (from));
   5108  1.1  mrg 		    if (GET_CODE (src) == CONST
   5109  1.1  mrg 			&& GET_CODE (XEXP (src, 0)) == UNSPEC
   5110  1.1  mrg 			&& XINT (XEXP (src, 0), 1) == UNSPEC_SYMOFF)
   5111  1.1  mrg 		      last_symoff = from;
   5112  1.1  mrg 		  }
   5113  1.1  mrg 		break;
   5114  1.1  mrg 	      case 0:	return find_barrier (0, 0, mova);
   5115  1.1  mrg 	      case 2:
   5116  1.1  mrg 		{
   5117  1.1  mrg 		  leading_mova = 0;
   5118  1.1  mrg 		  barrier_before_mova
   5119  1.1  mrg 		    = good_barrier ? good_barrier : found_barrier;
   5120  1.1  mrg 		}
   5121  1.1  mrg 	      default:	break;
   5122  1.1  mrg 	    }
   5123  1.1  mrg 	  if (found_si > count_si)
   5124  1.1  mrg 	    count_si = found_si;
   5125  1.1  mrg 	}
   5126  1.1  mrg       else if (JUMP_TABLE_DATA_P (from)
   5127  1.1  mrg 	       && GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC)
   5128  1.1  mrg 	{
   5129  1.1  mrg 	  if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
   5130  1.1  mrg 	      || (num_mova
   5131  1.1  mrg 		  && (prev_nonnote_insn (from)
   5132  1.1  mrg 		      == XEXP (MOVA_LABELREF (mova), 0))))
   5133  1.1  mrg 	    num_mova--;
   5134  1.1  mrg 	  if (barrier_align (next_real_insn (from)) == align_jumps.levels[0].log)
   5135  1.1  mrg 	    {
   5136  1.1  mrg 	      /* We have just passed the barrier in front of the
   5137  1.1  mrg 		 ADDR_DIFF_VEC, which is stored in found_barrier.  Since
   5138  1.1  mrg 		 the ADDR_DIFF_VEC is accessed as data, just like our pool
   5139  1.1  mrg 		 constants, this is a good opportunity to accommodate what
   5140  1.1  mrg 		 we have gathered so far.
   5141  1.1  mrg 		 If we waited any longer, we could end up at a barrier in
   5142  1.1  mrg 		 front of code, which gives worse cache usage for separated
   5143  1.1  mrg 		 instruction / data caches.  */
   5144  1.1  mrg 	      good_barrier = found_barrier;
   5145  1.1  mrg 	      break;
   5146  1.1  mrg 	    }
   5147  1.1  mrg 	  else
   5148  1.1  mrg 	    {
   5149  1.1  mrg 	      rtx body = PATTERN (from);
   5150  1.1  mrg 	      inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
   5151  1.1  mrg 	    }
   5152  1.1  mrg 	}
   5153  1.1  mrg       /* For the SH1, we generate alignments even after jumps-around-jumps.  */
   5154  1.1  mrg       else if (JUMP_P (from)
   5155  1.1  mrg 	       && ! TARGET_SH2
   5156  1.1  mrg 	       && ! optimize_size)
   5157  1.1  mrg 	new_align = 4;
   5158  1.1  mrg 
   5159  1.1  mrg       /* There is a possibility that a bf is transformed into a bf/s by the
   5160  1.1  mrg 	 delay slot scheduler.  */
   5161  1.1  mrg       if (JUMP_P (from)
   5162  1.1  mrg 	  && get_attr_type (from) == TYPE_CBRANCH
   5163  1.1  mrg 	  && ! sequence_insn_p (from))
   5164  1.1  mrg 	inc += 2;
   5165  1.1  mrg 
   5166  1.1  mrg       if (found_si)
   5167  1.1  mrg 	{
   5168  1.1  mrg 	  count_si += inc;
   5169  1.1  mrg 	  if (new_align > si_align)
   5170  1.1  mrg 	    {
   5171  1.1  mrg 	      si_limit -= (count_si - 1) & (new_align - si_align);
   5172  1.1  mrg 	      si_align = new_align;
   5173  1.1  mrg 	    }
   5174  1.1  mrg 	  count_si = (count_si + new_align - 1) & -new_align;
   5175  1.1  mrg 	}
   5176  1.1  mrg       if (found_hi)
   5177  1.1  mrg 	{
   5178  1.1  mrg 	  count_hi += inc;
   5179  1.1  mrg 	  if (new_align > hi_align)
   5180  1.1  mrg 	    {
   5181  1.1  mrg 	      hi_limit -= (count_hi - 1) & (new_align - hi_align);
   5182  1.1  mrg 	      hi_align = new_align;
   5183  1.1  mrg 	    }
   5184  1.1  mrg 	  count_hi = (count_hi + new_align - 1) & -new_align;
   5185  1.1  mrg 	}
   5186  1.1  mrg       from = NEXT_INSN (from);
   5187  1.1  mrg     }
   5188  1.1  mrg 
   5189  1.1  mrg   if (num_mova)
   5190  1.1  mrg     {
   5191  1.1  mrg       if (leading_mova)
   5192  1.1  mrg 	{
   5193  1.1  mrg 	  /* Try as we might, the leading mova is out of range.  Change
   5194  1.1  mrg 	     it into a load (which will become a pcload) and retry.  */
   5195  1.1  mrg 	  fixup_mova (mova);
   5196  1.1  mrg 	  return find_barrier (0, 0, mova);
   5197  1.1  mrg 	}
   5198  1.1  mrg       else
   5199  1.1  mrg 	{
   5200  1.1  mrg 	  /* Insert the constant pool table before the mova instruction,
   5201  1.1  mrg 	     to prevent the mova label reference from going out of range.  */
   5202  1.1  mrg 	  from = mova;
   5203  1.1  mrg 	  good_barrier = found_barrier = barrier_before_mova;
   5204  1.1  mrg 	}
   5205  1.1  mrg     }
   5206  1.1  mrg 
   5207  1.1  mrg   if (found_barrier)
   5208  1.1  mrg     {
   5209  1.1  mrg       if (good_barrier && next_real_insn (found_barrier))
   5210  1.1  mrg 	found_barrier = good_barrier;
   5211  1.1  mrg     }
   5212  1.1  mrg   else
   5213  1.1  mrg     {
   5214  1.1  mrg       /* We didn't find a barrier in time to dump our stuff,
   5215  1.1  mrg 	 so we'll make one.  */
   5216  1.1  mrg       rtx_code_label *label = gen_label_rtx ();
   5217  1.1  mrg 
   5218  1.1  mrg       /* Don't emit a constant table in the middle of insns for
   5219  1.1  mrg 	 casesi_worker_2.  This is a bit overkill but is enough
   5220  1.1  mrg 	 because casesi_worker_2 wouldn't appear so frequently.  */
   5221  1.1  mrg       if (last_symoff)
   5222  1.1  mrg 	from = last_symoff;
   5223  1.1  mrg 
   5224  1.1  mrg       /* If we exceeded the range, then we must back up over the last
   5225  1.1  mrg 	 instruction we looked at.  Otherwise, we just need to undo the
   5226  1.1  mrg 	 NEXT_INSN at the end of the loop.  */
   5227  1.1  mrg       if (PREV_INSN (from) != orig
   5228  1.1  mrg 	  && (count_hi > hi_limit || count_si > si_limit))
   5229  1.1  mrg 	from = PREV_INSN (PREV_INSN (from));
   5230  1.1  mrg       else
   5231  1.1  mrg 	from = PREV_INSN (from);
   5232  1.1  mrg 
   5233  1.1  mrg       /* Don't emit a constant table int the middle of global pointer setting,
   5234  1.1  mrg 	 since that that would move the addressing base GOT into another table.
   5235  1.1  mrg 	 We need the first mov instruction before the _GLOBAL_OFFSET_TABLE_
   5236  1.1  mrg 	 in the pool anyway, so just move up the whole constant pool.
   5237  1.1  mrg 
   5238  1.1  mrg 	 However, avoid doing so when the last single GOT mov is the starting
   5239  1.1  mrg 	 insn itself. Going past above the start insn would create a negative
   5240  1.1  mrg 	 offset, causing errors.  */
   5241  1.1  mrg       if (last_got && last_got != orig)
   5242  1.1  mrg         from = PREV_INSN (last_got);
   5243  1.1  mrg 
   5244  1.1  mrg       /* Don't insert the constant pool table at the position which
   5245  1.1  mrg 	 may be the landing pad.  */
   5246  1.1  mrg       if (flag_exceptions
   5247  1.1  mrg 	  && CALL_P (from)
   5248  1.1  mrg 	  && find_reg_note (from, REG_EH_REGION, NULL_RTX))
   5249  1.1  mrg 	from = PREV_INSN (from);
   5250  1.1  mrg 
   5251  1.1  mrg       /* Walk back to be just before any jump or label.
   5252  1.1  mrg 	 Putting it before a label reduces the number of times the branch
   5253  1.1  mrg 	 around the constant pool table will be hit.  Putting it before
   5254  1.1  mrg 	 a jump makes it more likely that the bra delay slot will be
   5255  1.1  mrg 	 filled.  */
   5256  1.1  mrg       while (NOTE_P (from) || JUMP_P (from) || LABEL_P (from))
   5257  1.1  mrg 	from = PREV_INSN (from);
   5258  1.1  mrg 
   5259  1.1  mrg       if (CALL_P (from))
   5260  1.1  mrg 	{
   5261  1.1  mrg 	  bool sibcall_p = SIBLING_CALL_P (from);
   5262  1.1  mrg 
   5263  1.1  mrg 	  /* If FROM was a sibling call, then we know that control
   5264  1.1  mrg 	     will not return.  In fact, we were guaranteed to hit
   5265  1.1  mrg 	     a barrier before another real insn.
   5266  1.1  mrg 
   5267  1.1  mrg 	     The jump around the constant pool is unnecessary.  It
   5268  1.1  mrg 	     costs space, but more importantly it confuses dwarf2cfi
   5269  1.1  mrg 	     generation.  */
   5270  1.1  mrg 	  if (sibcall_p)
   5271  1.1  mrg 	    return emit_barrier_after (from);
   5272  1.1  mrg 	}
   5273  1.1  mrg 
   5274  1.1  mrg       from = emit_jump_insn_after (gen_jump (label), from);
   5275  1.1  mrg       JUMP_LABEL (from) = label;
   5276  1.1  mrg       LABEL_NUSES (label) = 1;
   5277  1.1  mrg       found_barrier = emit_barrier_after (from);
   5278  1.1  mrg       emit_label_after (label, found_barrier);
   5279  1.1  mrg     }
   5280  1.1  mrg 
   5281  1.1  mrg   return found_barrier;
   5282  1.1  mrg }
   5283  1.1  mrg 
   5284  1.1  mrg /* If the instruction INSN is implemented by a special function, and we can
   5285  1.1  mrg    positively find the register that is used to call the sfunc, and this
   5286  1.1  mrg    register is not used anywhere else in this instruction - except as the
   5287  1.1  mrg    destination of a set, return this register; else, return 0.  */
   5288  1.1  mrg rtx
   5289  1.1  mrg sfunc_uses_reg (rtx_insn *insn)
   5290  1.1  mrg {
   5291  1.1  mrg   int i;
   5292  1.1  mrg   rtx pattern, part, reg_part, reg;
   5293  1.1  mrg 
   5294  1.1  mrg   if (!NONJUMP_INSN_P (insn))
   5295  1.1  mrg     return NULL_RTX;
   5296  1.1  mrg   pattern = PATTERN (insn);
   5297  1.1  mrg   if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
   5298  1.1  mrg     return NULL_RTX;
   5299  1.1  mrg 
   5300  1.1  mrg   for (reg_part = NULL_RTX, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
   5301  1.1  mrg     {
   5302  1.1  mrg       part = XVECEXP (pattern, 0, i);
   5303  1.1  mrg       if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
   5304  1.1  mrg 	reg_part = part;
   5305  1.1  mrg     }
   5306  1.1  mrg   if (! reg_part)
   5307  1.1  mrg     return NULL_RTX;
   5308  1.1  mrg   reg = XEXP (reg_part, 0);
   5309  1.1  mrg   for (int i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
   5310  1.1  mrg     {
   5311  1.1  mrg       part = XVECEXP (pattern, 0, i);
   5312  1.1  mrg       if (part == reg_part || GET_CODE (part) == CLOBBER)
   5313  1.1  mrg 	continue;
   5314  1.1  mrg       if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
   5315  1.1  mrg 				  && REG_P (SET_DEST (part)))
   5316  1.1  mrg 				 ? SET_SRC (part) : part)))
   5317  1.1  mrg 	return NULL_RTX;
   5318  1.1  mrg     }
   5319  1.1  mrg   return reg;
   5320  1.1  mrg }
   5321  1.1  mrg 
   5322  1.1  mrg /* See if the only way in which INSN uses REG is by calling it, or by
   5323  1.1  mrg    setting it while calling it.  Set *SET to a SET rtx if the register
   5324  1.1  mrg    is set by INSN.  */
   5325  1.1  mrg static bool
   5326  1.1  mrg noncall_uses_reg (rtx reg, rtx_insn *insn, rtx *set)
   5327  1.1  mrg {
   5328  1.1  mrg   *set = NULL_RTX;
   5329  1.1  mrg 
   5330  1.1  mrg   rtx reg2 = sfunc_uses_reg (insn);
   5331  1.1  mrg   if (reg2 && REGNO (reg2) == REGNO (reg))
   5332  1.1  mrg     {
   5333  1.1  mrg       rtx pattern = single_set (insn);
   5334  1.1  mrg       if (pattern
   5335  1.1  mrg 	  && REG_P (SET_DEST (pattern))
   5336  1.1  mrg 	  && REGNO (reg) == REGNO (SET_DEST (pattern)))
   5337  1.1  mrg 	*set = pattern;
   5338  1.1  mrg       return false;
   5339  1.1  mrg     }
   5340  1.1  mrg   if (!CALL_P (insn))
   5341  1.1  mrg     {
   5342  1.1  mrg       /* We don't use rtx_equal_p because we don't care if the mode is
   5343  1.1  mrg 	 different.  */
   5344  1.1  mrg       rtx pattern = single_set (insn);
   5345  1.1  mrg       if (pattern
   5346  1.1  mrg 	  && REG_P (SET_DEST (pattern))
   5347  1.1  mrg 	  && REGNO (reg) == REGNO (SET_DEST (pattern)))
   5348  1.1  mrg 	{
   5349  1.1  mrg 	  rtx par, part;
   5350  1.1  mrg 	  int i;
   5351  1.1  mrg 
   5352  1.1  mrg 	  *set = pattern;
   5353  1.1  mrg 	  par = PATTERN (insn);
   5354  1.1  mrg 	  if (GET_CODE (par) == PARALLEL)
   5355  1.1  mrg 	    for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
   5356  1.1  mrg 	      {
   5357  1.1  mrg 		part = XVECEXP (par, 0, i);
   5358  1.1  mrg 		if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
   5359  1.1  mrg 		  return true;
   5360  1.1  mrg 	      }
   5361  1.1  mrg 	  return reg_mentioned_p (reg, SET_SRC (pattern));
   5362  1.1  mrg 	}
   5363  1.1  mrg 
   5364  1.1  mrg       return true;
   5365  1.1  mrg     }
   5366  1.1  mrg 
   5367  1.1  mrg   rtx pattern = PATTERN (insn);
   5368  1.1  mrg 
   5369  1.1  mrg   if (GET_CODE (pattern) == PARALLEL)
   5370  1.1  mrg     {
   5371  1.1  mrg       for (int i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
   5372  1.1  mrg 	if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
   5373  1.1  mrg 	  return true;
   5374  1.1  mrg       pattern = XVECEXP (pattern, 0, 0);
   5375  1.1  mrg     }
   5376  1.1  mrg 
   5377  1.1  mrg   if (GET_CODE (pattern) == SET)
   5378  1.1  mrg     {
   5379  1.1  mrg       if (reg_mentioned_p (reg, SET_DEST (pattern)))
   5380  1.1  mrg 	{
   5381  1.1  mrg 	  /* We don't use rtx_equal_p, because we don't care if the
   5382  1.1  mrg 	     mode is different.  */
   5383  1.1  mrg 	  if (!REG_P (SET_DEST (pattern))
   5384  1.1  mrg 	      || REGNO (reg) != REGNO (SET_DEST (pattern)))
   5385  1.1  mrg 	    return true;
   5386  1.1  mrg 
   5387  1.1  mrg 	  *set = pattern;
   5388  1.1  mrg 	}
   5389  1.1  mrg 
   5390  1.1  mrg       pattern = SET_SRC (pattern);
   5391  1.1  mrg     }
   5392  1.1  mrg 
   5393  1.1  mrg   if (GET_CODE (pattern) != CALL
   5394  1.1  mrg       || !MEM_P (XEXP (pattern, 0))
   5395  1.1  mrg       || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
   5396  1.1  mrg     return true;
   5397  1.1  mrg 
   5398  1.1  mrg   return false;
   5399  1.1  mrg }
   5400  1.1  mrg 
   5401  1.1  mrg /* Given a X, a pattern of an insn or a part of it, return a mask of used
   5402  1.1  mrg    general registers.  Bits 0..15 mean that the respective registers
   5403  1.1  mrg    are used as inputs in the instruction.  Bits 16..31 mean that the
   5404  1.1  mrg    registers 0..15, respectively, are used as outputs, or are clobbered.
   5405  1.1  mrg    IS_DEST should be set to 16 if X is the destination of a SET, else to 0.  */
   5406  1.1  mrg int
   5407  1.1  mrg regs_used (rtx x, int is_dest)
   5408  1.1  mrg {
   5409  1.1  mrg   enum rtx_code code;
   5410  1.1  mrg   const char *fmt;
   5411  1.1  mrg   int used = 0;
   5412  1.1  mrg 
   5413  1.1  mrg   if (! x)
   5414  1.1  mrg     return used;
   5415  1.1  mrg   code = GET_CODE (x);
   5416  1.1  mrg   switch (code)
   5417  1.1  mrg     {
   5418  1.1  mrg     case REG:
   5419  1.1  mrg       if (REGNO (x) < 16)
   5420  1.1  mrg 	return (((1 << hard_regno_nregs (0, GET_MODE (x))) - 1)
   5421  1.1  mrg 		<< (REGNO (x) + is_dest));
   5422  1.1  mrg       return 0;
   5423  1.1  mrg     case SUBREG:
   5424  1.1  mrg       {
   5425  1.1  mrg 	rtx y = SUBREG_REG (x);
   5426  1.1  mrg 
   5427  1.1  mrg 	if (!REG_P (y))
   5428  1.1  mrg 	  break;
   5429  1.1  mrg 	if (REGNO (y) < 16)
   5430  1.1  mrg 	  return (((1 << hard_regno_nregs (0, GET_MODE (x))) - 1)
   5431  1.1  mrg 		  << (REGNO (y) +
   5432  1.1  mrg 		      subreg_regno_offset (REGNO (y),
   5433  1.1  mrg 					   GET_MODE (y),
   5434  1.1  mrg 					   SUBREG_BYTE (x),
   5435  1.1  mrg 					   GET_MODE (x)) + is_dest));
   5436  1.1  mrg 	return 0;
   5437  1.1  mrg       }
   5438  1.1  mrg     case SET:
   5439  1.1  mrg       return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
   5440  1.1  mrg     case RETURN:
   5441  1.1  mrg       /* If there was a return value, it must have been indicated with USE.  */
   5442  1.1  mrg       return 0x00ffff00;
   5443  1.1  mrg     case CLOBBER:
   5444  1.1  mrg       is_dest = 1;
   5445  1.1  mrg       break;
   5446  1.1  mrg     case MEM:
   5447  1.1  mrg       is_dest = 0;
   5448  1.1  mrg       break;
   5449  1.1  mrg     case CALL:
   5450  1.1  mrg       used |= 0x00ff00f0;
   5451  1.1  mrg       break;
   5452  1.1  mrg     default:
   5453  1.1  mrg       break;
   5454  1.1  mrg     }
   5455  1.1  mrg 
   5456  1.1  mrg   fmt = GET_RTX_FORMAT (code);
   5457  1.1  mrg 
   5458  1.1  mrg   for (int i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
   5459  1.1  mrg     {
   5460  1.1  mrg       if (fmt[i] == 'E')
   5461  1.1  mrg 	{
   5462  1.1  mrg 	  for (int j = XVECLEN (x, i) - 1; j >= 0; j--)
   5463  1.1  mrg 	    used |= regs_used (XVECEXP (x, i, j), is_dest);
   5464  1.1  mrg 	}
   5465  1.1  mrg       else if (fmt[i] == 'e')
   5466  1.1  mrg 	used |= regs_used (XEXP (x, i), is_dest);
   5467  1.1  mrg     }
   5468  1.1  mrg   return used;
   5469  1.1  mrg }
   5470  1.1  mrg 
   5471  1.1  mrg /* Create an instruction that prevents redirection of a conditional branch
   5472  1.1  mrg    to the destination of the JUMP with address ADDR.
   5473  1.1  mrg    If the branch needs to be implemented as an indirect jump, try to find
   5474  1.1  mrg    a scratch register for it.
   5475  1.1  mrg    If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
   5476  1.1  mrg    If any preceding insn that doesn't fit into a delay slot is good enough,
   5477  1.1  mrg    pass 1.  Pass 2 if a definite blocking insn is needed.
   5478  1.1  mrg    -1 is used internally to avoid deep recursion.
   5479  1.1  mrg    If a blocking instruction is made or recognized, return it.  */
   5480  1.1  mrg static rtx_insn *
   5481  1.1  mrg gen_block_redirect (rtx_insn *jump, int addr, int need_block)
   5482  1.1  mrg {
   5483  1.1  mrg   int dead = 0;
   5484  1.1  mrg   rtx_insn *prev = prev_nonnote_insn (jump);
   5485  1.1  mrg 
   5486  1.1  mrg   /* First, check if we already have an instruction that satisfies our need.  */
   5487  1.1  mrg   if (prev && NONJUMP_INSN_P (prev) && ! prev->deleted ())
   5488  1.1  mrg     {
   5489  1.1  mrg       if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
   5490  1.1  mrg 	return prev;
   5491  1.1  mrg       if (GET_CODE (PATTERN (prev)) == USE
   5492  1.1  mrg 	  || GET_CODE (PATTERN (prev)) == CLOBBER
   5493  1.1  mrg 	  || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
   5494  1.1  mrg 	prev = jump;
   5495  1.1  mrg       else if ((need_block &= ~1) < 0)
   5496  1.1  mrg 	return prev;
   5497  1.1  mrg       else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
   5498  1.1  mrg 	need_block = 0;
   5499  1.1  mrg     }
   5500  1.1  mrg   if (GET_CODE (PATTERN (jump)) == RETURN)
   5501  1.1  mrg     {
   5502  1.1  mrg       if (! need_block)
   5503  1.1  mrg 	return prev;
   5504  1.1  mrg       /* Reorg even does nasty things with return insns that cause branches
   5505  1.1  mrg 	 to go out of range - see find_end_label and callers.  */
   5506  1.1  mrg       return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
   5507  1.1  mrg     }
   5508  1.1  mrg   /* We can't use JUMP_LABEL here because it might be undefined
   5509  1.1  mrg      when not optimizing.  */
   5510  1.1  mrg   rtx dest = XEXP (SET_SRC (PATTERN (jump)), 0);
   5511  1.1  mrg   /* If the branch is out of range, try to find a scratch register for it.  */
   5512  1.1  mrg   if (optimize
   5513  1.1  mrg       && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
   5514  1.1  mrg 	  > 4092 + 4098))
   5515  1.1  mrg     {
   5516  1.1  mrg       rtx_insn *scan;
   5517  1.1  mrg       /* Don't look for the stack pointer as a scratch register,
   5518  1.1  mrg 	 it would cause trouble if an interrupt occurred.  */
   5519  1.1  mrg       unsigned attempt = 0x7fff, used;
   5520  1.1  mrg       int jump_left = flag_expensive_optimizations + 1;
   5521  1.1  mrg 
   5522  1.1  mrg       /* It is likely that the most recent eligible instruction is wanted for
   5523  1.1  mrg 	 the delay slot.  Therefore, find out which registers it uses, and
   5524  1.1  mrg 	 try to avoid using them.  */
   5525  1.1  mrg 
   5526  1.1  mrg       for (scan = jump; (scan = PREV_INSN (scan)); )
   5527  1.1  mrg 	{
   5528  1.1  mrg 	  if (scan->deleted ())
   5529  1.1  mrg 	    continue;
   5530  1.1  mrg 	  rtx_code code = GET_CODE (scan);
   5531  1.1  mrg 	  if (code == CODE_LABEL || code == JUMP_INSN)
   5532  1.1  mrg 	    break;
   5533  1.1  mrg 	  if (code == INSN
   5534  1.1  mrg 	      && GET_CODE (PATTERN (scan)) != USE
   5535  1.1  mrg 	      && GET_CODE (PATTERN (scan)) != CLOBBER
   5536  1.1  mrg 	      && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
   5537  1.1  mrg 	    {
   5538  1.1  mrg 	      attempt &= ~regs_used (PATTERN (scan), 0);
   5539  1.1  mrg 	      break;
   5540  1.1  mrg 	    }
   5541  1.1  mrg 	}
   5542  1.1  mrg       for (used = dead = 0, scan = JUMP_LABEL_AS_INSN (jump);
   5543  1.1  mrg 	   (scan = NEXT_INSN (scan)); )
   5544  1.1  mrg 	{
   5545  1.1  mrg 	  if (scan->deleted ())
   5546  1.1  mrg 	    continue;
   5547  1.1  mrg 	  rtx_code code = GET_CODE (scan);
   5548  1.1  mrg 	  if (INSN_P (scan))
   5549  1.1  mrg 	    {
   5550  1.1  mrg 	      used |= regs_used (PATTERN (scan), 0);
   5551  1.1  mrg 	      if (code == CALL_INSN)
   5552  1.1  mrg 		used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
   5553  1.1  mrg 	      dead |= (used >> 16) & ~used;
   5554  1.1  mrg 	      if (dead & attempt)
   5555  1.1  mrg 		{
   5556  1.1  mrg 		  dead &= attempt;
   5557  1.1  mrg 		  break;
   5558  1.1  mrg 		}
   5559  1.1  mrg 	      if (code == JUMP_INSN)
   5560  1.1  mrg 		{
   5561  1.1  mrg 		  if (jump_left-- && simplejump_p (scan))
   5562  1.1  mrg 		    scan = JUMP_LABEL_AS_INSN (scan);
   5563  1.1  mrg 		  else
   5564  1.1  mrg 		    break;
   5565  1.1  mrg 		}
   5566  1.1  mrg 	    }
   5567  1.1  mrg 	}
   5568  1.1  mrg       /* Mask out the stack pointer again, in case it was
   5569  1.1  mrg 	 the only 'free' register we have found.  */
   5570  1.1  mrg       dead &= 0x7fff;
   5571  1.1  mrg     }
   5572  1.1  mrg   /* If the immediate destination is still in range, check for possible
   5573  1.1  mrg      threading with a jump beyond the delay slot insn.
   5574  1.1  mrg      Don't check if we are called recursively; the jump has been or will be
   5575  1.1  mrg      checked in a different invocation then.  */
   5576  1.1  mrg 
   5577  1.1  mrg   else if (optimize && need_block >= 0)
   5578  1.1  mrg     {
   5579  1.1  mrg       rtx_insn *next = next_active_insn (as_a<rtx_insn *> (dest));
   5580  1.1  mrg       next = next_active_insn (next);
   5581  1.1  mrg       if (next && JUMP_P (next)
   5582  1.1  mrg 	  && GET_CODE (PATTERN (next)) == SET
   5583  1.1  mrg 	  && recog_memoized (next) == CODE_FOR_jump_compact)
   5584  1.1  mrg 	{
   5585  1.1  mrg 	  dest = JUMP_LABEL (next);
   5586  1.1  mrg 	  if (dest
   5587  1.1  mrg 	      && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
   5588  1.1  mrg 		  > 4092 + 4098))
   5589  1.1  mrg 	    gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
   5590  1.1  mrg 	}
   5591  1.1  mrg     }
   5592  1.1  mrg 
   5593  1.1  mrg   if (dead)
   5594  1.1  mrg     {
   5595  1.1  mrg       rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
   5596  1.1  mrg 
   5597  1.1  mrg       /* It would be nice if we could convert the jump into an indirect
   5598  1.1  mrg 	 jump / far branch right now, and thus exposing all constituent
   5599  1.1  mrg 	 instructions to further optimization.  However, reorg uses
   5600  1.1  mrg 	 simplejump_p to determine if there is an unconditional jump where
   5601  1.1  mrg 	 it should try to schedule instructions from the target of the
   5602  1.1  mrg 	 branch; simplejump_p fails for indirect jumps even if they have
   5603  1.1  mrg 	 a JUMP_LABEL.  */
   5604  1.1  mrg       rtx_insn *insn = emit_insn_before (gen_indirect_jump_scratch
   5605  1.1  mrg 					 (reg, GEN_INT (unspec_bbr_uid++)),
   5606  1.1  mrg 					 jump);
   5607  1.1  mrg       /* ??? We would like this to have the scope of the jump, but that
   5608  1.1  mrg 	 scope will change when a delay slot insn of an inner scope is added.
   5609  1.1  mrg 	 Hence, after delay slot scheduling, we'll have to expect
   5610  1.1  mrg 	 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
   5611  1.1  mrg 	 the jump.  */
   5612  1.1  mrg 
   5613  1.1  mrg       INSN_LOCATION (insn) = INSN_LOCATION (jump);
   5614  1.1  mrg       INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
   5615  1.1  mrg       return insn;
   5616  1.1  mrg     }
   5617  1.1  mrg   else if (need_block)
   5618  1.1  mrg     /* We can't use JUMP_LABEL here because it might be undefined
   5619  1.1  mrg        when not optimizing.  */
   5620  1.1  mrg     return emit_insn_before (gen_block_branch_redirect
   5621  1.1  mrg 			     (GEN_INT (unspec_bbr_uid++)),
   5622  1.1  mrg 			     jump);
   5623  1.1  mrg   return prev;
   5624  1.1  mrg }
   5625  1.1  mrg 
   5626  1.1  mrg #define CONDJUMP_MIN -252
   5627  1.1  mrg #define CONDJUMP_MAX 262
   5628  1.1  mrg struct far_branch
   5629  1.1  mrg {
   5630  1.1  mrg   /* A label (to be placed) in front of the jump
   5631  1.1  mrg      that jumps to our ultimate destination.  */
   5632  1.1  mrg   rtx_insn *near_label;
   5633  1.1  mrg   /* Where we are going to insert it if we cannot move the jump any farther,
   5634  1.1  mrg      or the jump itself if we have picked up an existing jump.  */
   5635  1.1  mrg   rtx_insn *insert_place;
   5636  1.1  mrg   /* The ultimate destination.  */
   5637  1.1  mrg   rtx_insn *far_label;
   5638  1.1  mrg   struct far_branch *prev;
   5639  1.1  mrg   /* If the branch has already been created, its address;
   5640  1.1  mrg      else the address of its first prospective user.  */
   5641  1.1  mrg   int address;
   5642  1.1  mrg };
   5643  1.1  mrg 
   5644  1.1  mrg enum mdep_reorg_phase_e mdep_reorg_phase;
   5645  1.1  mrg 
   5646  1.1  mrg static void
   5647  1.1  mrg gen_far_branch (struct far_branch *bp)
   5648  1.1  mrg {
   5649  1.1  mrg   rtx_insn *insn = bp->insert_place;
   5650  1.1  mrg   rtx_jump_insn *jump;
   5651  1.1  mrg   rtx_code_label *label = gen_label_rtx ();
   5652  1.1  mrg 
   5653  1.1  mrg   emit_label_after (label, insn);
   5654  1.1  mrg   if (bp->far_label)
   5655  1.1  mrg     {
   5656  1.1  mrg       jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
   5657  1.1  mrg       LABEL_NUSES (bp->far_label)++;
   5658  1.1  mrg     }
   5659  1.1  mrg   else
   5660  1.1  mrg     jump = emit_jump_insn_after (gen_return (), insn);
   5661  1.1  mrg 
   5662  1.1  mrg   /* Emit a barrier so that reorg knows that any following instructions
   5663  1.1  mrg      are not reachable via a fall-through path.
   5664  1.1  mrg      But don't do this when not optimizing, since we wouldn't suppress the
   5665  1.1  mrg      alignment for the barrier then, and could end up with out-of-range
   5666  1.1  mrg      pc-relative loads.  */
   5667  1.1  mrg   if (optimize)
   5668  1.1  mrg     emit_barrier_after (jump);
   5669  1.1  mrg   emit_label_after (bp->near_label, insn);
   5670  1.1  mrg 
   5671  1.1  mrg   if (bp->far_label)
   5672  1.1  mrg     JUMP_LABEL (jump) = bp->far_label;
   5673  1.1  mrg   else
   5674  1.1  mrg     {
   5675  1.1  mrg       rtx pat = PATTERN (jump);
   5676  1.1  mrg       gcc_assert (ANY_RETURN_P (pat));
   5677  1.1  mrg       JUMP_LABEL (jump) = pat;
   5678  1.1  mrg     }
   5679  1.1  mrg 
   5680  1.1  mrg   bool ok = invert_jump (as_a <rtx_jump_insn *> (insn), label, 1);
   5681  1.1  mrg   gcc_assert (ok);
   5682  1.1  mrg 
   5683  1.1  mrg   /* If we are branching around a jump (rather than a return), prevent
   5684  1.1  mrg      reorg from using an insn from the jump target as the delay slot insn -
   5685  1.1  mrg      when reorg did this, it pessimized code (we rather hide the delay slot)
   5686  1.1  mrg      and it could cause branches to go out of range.  */
   5687  1.1  mrg   if (bp->far_label)
   5688  1.1  mrg     (emit_insn_after
   5689  1.1  mrg      (gen_stuff_delay_slot
   5690  1.1  mrg       (GEN_INT (unspec_bbr_uid++),
   5691  1.1  mrg        GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
   5692  1.1  mrg       insn));
   5693  1.1  mrg   /* Prevent reorg from undoing our splits.  */
   5694  1.1  mrg   gen_block_redirect (jump, bp->address += 2, 2);
   5695  1.1  mrg }
   5696  1.1  mrg 
   5697  1.1  mrg /* Fix up ADDR_DIFF_VECs.  */
   5698  1.1  mrg void
   5699  1.1  mrg fixup_addr_diff_vecs (rtx_insn *first)
   5700  1.1  mrg {
   5701  1.1  mrg   rtx_insn *insn;
   5702  1.1  mrg 
   5703  1.1  mrg   for (insn = first; insn; insn = NEXT_INSN (insn))
   5704  1.1  mrg     {
   5705  1.1  mrg       rtx vec_lab, pat, prevpat, x, braf_label;
   5706  1.1  mrg       rtx_insn *prev;
   5707  1.1  mrg 
   5708  1.1  mrg       if (! JUMP_TABLE_DATA_P (insn)
   5709  1.1  mrg 	  || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
   5710  1.1  mrg 	continue;
   5711  1.1  mrg       pat = PATTERN (insn);
   5712  1.1  mrg       vec_lab = XEXP (XEXP (pat, 0), 0);
   5713  1.1  mrg 
   5714  1.1  mrg       /* Search the matching casesi_jump_2.  */
   5715  1.1  mrg       for (prev = as_a <rtx_insn *> (vec_lab); ; prev = PREV_INSN (prev))
   5716  1.1  mrg 	{
   5717  1.1  mrg 	  if (!JUMP_P (prev))
   5718  1.1  mrg 	    continue;
   5719  1.1  mrg 	  prevpat = PATTERN (prev);
   5720  1.1  mrg 	  if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
   5721  1.1  mrg 	    continue;
   5722  1.1  mrg 	  x = XVECEXP (prevpat, 0, 1);
   5723  1.1  mrg 	  if (GET_CODE (x) != USE)
   5724  1.1  mrg 	    continue;
   5725  1.1  mrg 	  x = XEXP (x, 0);
   5726  1.1  mrg 	  if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
   5727  1.1  mrg 	    break;
   5728  1.1  mrg 	}
   5729  1.1  mrg       /* FIXME: This is a bug in the optimizer, but it seems harmless
   5730  1.1  mrg 	 to just avoid panicing.  */
   5731  1.1  mrg       if (!prev)
   5732  1.1  mrg 	continue;
   5733  1.1  mrg 
   5734  1.1  mrg       /* Emit the reference label of the braf where it belongs, right after
   5735  1.1  mrg 	 the casesi_jump_2 (i.e. braf).  */
   5736  1.1  mrg       braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
   5737  1.1  mrg       emit_label_after (as_a <rtx_insn *> (braf_label), prev);
   5738  1.1  mrg 
   5739  1.1  mrg       /* Fix up the ADDR_DIF_VEC to be relative
   5740  1.1  mrg 	 to the reference address of the braf.  */
   5741  1.1  mrg       XEXP (XEXP (pat, 0), 0) = braf_label;
   5742  1.1  mrg     }
   5743  1.1  mrg }
   5744  1.1  mrg 
   5745  1.1  mrg /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
   5746  1.1  mrg    a barrier.  Return the base 2 logarithm of the desired alignment.  */
   5747  1.1  mrg int
   5748  1.1  mrg barrier_align (rtx_insn *barrier_or_label)
   5749  1.1  mrg {
   5750  1.1  mrg   if (! barrier_or_label)
   5751  1.1  mrg     return 0;
   5752  1.1  mrg 
   5753  1.1  mrg   if (LABEL_P (barrier_or_label)
   5754  1.1  mrg       && NEXT_INSN (barrier_or_label)
   5755  1.1  mrg       && JUMP_TABLE_DATA_P (NEXT_INSN (barrier_or_label)))
   5756  1.1  mrg     return 2;
   5757  1.1  mrg 
   5758  1.1  mrg   if (BARRIER_P (barrier_or_label)
   5759  1.1  mrg       && PREV_INSN (barrier_or_label)
   5760  1.1  mrg       && JUMP_TABLE_DATA_P (PREV_INSN (barrier_or_label)))
   5761  1.1  mrg     {
   5762  1.1  mrg       rtx pat = PATTERN (PREV_INSN (barrier_or_label));
   5763  1.1  mrg       /* If this is a very small table, we want to keep the alignment after
   5764  1.1  mrg 	 the table to the minimum for proper code alignment.  */
   5765  1.1  mrg       return ((optimize_size
   5766  1.1  mrg 	       || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
   5767  1.1  mrg 		   <= (unsigned) 1 << (CACHE_LOG - 2)))
   5768  1.1  mrg 	      ? 1 : align_jumps.levels[0].log);
   5769  1.1  mrg     }
   5770  1.1  mrg 
   5771  1.1  mrg   rtx_insn *next = next_active_insn (barrier_or_label);
   5772  1.1  mrg 
   5773  1.1  mrg   if (! next)
   5774  1.1  mrg     return 0;
   5775  1.1  mrg 
   5776  1.1  mrg   rtx pat = PATTERN (next);
   5777  1.1  mrg 
   5778  1.1  mrg   if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
   5779  1.1  mrg     /* This is a barrier in front of a constant table.  */
   5780  1.1  mrg     return 0;
   5781  1.1  mrg 
   5782  1.1  mrg   if (optimize_size)
   5783  1.1  mrg     return 0;
   5784  1.1  mrg 
   5785  1.1  mrg   if (! TARGET_SH2 || ! optimize)
   5786  1.1  mrg     return align_jumps.levels[0].log;
   5787  1.1  mrg 
   5788  1.1  mrg   /* When fixing up pcloads, a constant table might be inserted just before
   5789  1.1  mrg      the basic block that ends with the barrier.  Thus, we can't trust the
   5790  1.1  mrg      instruction lengths before that.  */
   5791  1.1  mrg   if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
   5792  1.1  mrg     {
   5793  1.1  mrg       /* Check if there is an immediately preceding branch to the insn beyond
   5794  1.1  mrg 	 the barrier.  We must weight the cost of discarding useful information
   5795  1.1  mrg 	 from the current cache line when executing this branch and there is
   5796  1.1  mrg 	 an alignment, against that of fetching unneeded insn in front of the
   5797  1.1  mrg 	 branch target when there is no alignment.  */
   5798  1.1  mrg 
   5799  1.1  mrg       /* There are two delay_slot cases to consider.  One is the simple case
   5800  1.1  mrg 	 where the preceding branch is to the insn beyond the barrier (simple
   5801  1.1  mrg 	 delay slot filling), and the other is where the preceding branch has
   5802  1.1  mrg 	 a delay slot that is a duplicate of the insn after the barrier
   5803  1.1  mrg 	 (fill_eager_delay_slots) and the branch is to the insn after the insn
   5804  1.1  mrg 	 after the barrier.  */
   5805  1.1  mrg 
   5806  1.1  mrg       int slot, credit;
   5807  1.1  mrg       bool jump_to_next = false;
   5808  1.1  mrg 
   5809  1.1  mrg       /* Skip to the insn before the JUMP_INSN before the barrier under
   5810  1.1  mrg 	 investigation.  */
   5811  1.1  mrg       rtx_insn *prev = prev_real_insn (prev_active_insn (barrier_or_label));
   5812  1.1  mrg 
   5813  1.1  mrg       for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
   5814  1.1  mrg 	   credit >= 0 && prev && NONJUMP_INSN_P (prev);
   5815  1.1  mrg 	   prev = prev_real_insn (prev))
   5816  1.1  mrg 	{
   5817  1.1  mrg 	  jump_to_next = false;
   5818  1.1  mrg 	  if (GET_CODE (PATTERN (prev)) == USE
   5819  1.1  mrg 	      || GET_CODE (PATTERN (prev)) == CLOBBER)
   5820  1.1  mrg 	    continue;
   5821  1.1  mrg 	  if (rtx_sequence *prev_seq = dyn_cast <rtx_sequence *> (PATTERN (prev)))
   5822  1.1  mrg 	    {
   5823  1.1  mrg 	      prev = prev_seq->insn (1);
   5824  1.1  mrg 	      if (INSN_UID (prev) == INSN_UID (next))
   5825  1.1  mrg 		{
   5826  1.1  mrg 	  	  /* Delay slot was filled with insn at jump target.  */
   5827  1.1  mrg 		  jump_to_next = true;
   5828  1.1  mrg 		  continue;
   5829  1.1  mrg   		}
   5830  1.1  mrg 	    }
   5831  1.1  mrg 
   5832  1.1  mrg 	  if (slot &&
   5833  1.1  mrg 	      get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
   5834  1.1  mrg 	    slot = 0;
   5835  1.1  mrg 	  credit -= get_attr_length (prev);
   5836  1.1  mrg 	}
   5837  1.1  mrg       if (prev && jump_to_label_p (prev))
   5838  1.1  mrg 	{
   5839  1.1  mrg 	  rtx_insn *x;
   5840  1.1  mrg 	  if (jump_to_next
   5841  1.1  mrg 	      || next_real_insn (JUMP_LABEL_AS_INSN (prev)) == next
   5842  1.1  mrg 	      /* If relax_delay_slots() decides NEXT was redundant
   5843  1.1  mrg 		 with some previous instruction, it will have
   5844  1.1  mrg 		 redirected PREV's jump to the following insn.  */
   5845  1.1  mrg 	      || JUMP_LABEL (prev) == next_nonnote_insn (next)
   5846  1.1  mrg 	      /* There is no upper bound on redundant instructions
   5847  1.1  mrg 		 that might have been skipped, but we must not put an
   5848  1.1  mrg 		 alignment where none had been before.  */
   5849  1.1  mrg 	      || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
   5850  1.1  mrg 		  (INSN_P (x)
   5851  1.1  mrg 		   && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
   5852  1.1  mrg 		       || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
   5853  1.1  mrg 		       || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
   5854  1.1  mrg 	    {
   5855  1.1  mrg 	      rtx pat = PATTERN (prev);
   5856  1.1  mrg 	      if (GET_CODE (pat) == PARALLEL)
   5857  1.1  mrg 		pat = XVECEXP (pat, 0, 0);
   5858  1.1  mrg 	      if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
   5859  1.1  mrg 		return 0;
   5860  1.1  mrg 	    }
   5861  1.1  mrg 	}
   5862  1.1  mrg     }
   5863  1.1  mrg 
   5864  1.1  mrg   return align_jumps.levels[0].log;
   5865  1.1  mrg }
   5866  1.1  mrg 
   5867  1.1  mrg /* If we are inside a phony loop, almost any kind of label can turn up as the
   5868  1.1  mrg    first one in the loop.  Aligning a braf label causes incorrect switch
   5869  1.1  mrg    destination addresses; we can detect braf labels because they are
   5870  1.1  mrg    followed by a BARRIER.
   5871  1.1  mrg    Applying loop alignment to small constant or switch tables is a waste
   5872  1.1  mrg    of space, so we suppress this too.  */
   5873  1.1  mrg int
   5874  1.1  mrg sh_loop_align (rtx_insn *label)
   5875  1.1  mrg {
   5876  1.1  mrg   rtx_insn *next = label;
   5877  1.1  mrg 
   5878  1.1  mrg   if (! optimize || optimize_size)
   5879  1.1  mrg     return 0;
   5880  1.1  mrg 
   5881  1.1  mrg   do
   5882  1.1  mrg     next = next_nonnote_insn (next);
   5883  1.1  mrg   while (next && LABEL_P (next));
   5884  1.1  mrg 
   5885  1.1  mrg   if (! next
   5886  1.1  mrg       || ! INSN_P (next)
   5887  1.1  mrg       || recog_memoized (next) == CODE_FOR_consttable_2)
   5888  1.1  mrg     return 0;
   5889  1.1  mrg 
   5890  1.1  mrg   return align_loops.levels[0].log;
   5891  1.1  mrg }
   5892  1.1  mrg 
   5893  1.1  mrg /* Do a final pass over the function, just before delayed branch
   5894  1.1  mrg    scheduling.  */
   5895  1.1  mrg static void
   5896  1.1  mrg sh_reorg (void)
   5897  1.1  mrg {
   5898  1.1  mrg   rtx_insn *first, *insn, *mova = NULL;
   5899  1.1  mrg   int num_mova;
   5900  1.1  mrg   rtx r0_rtx = gen_rtx_REG (Pmode, 0);
   5901  1.1  mrg   rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
   5902  1.1  mrg 
   5903  1.1  mrg   first = get_insns ();
   5904  1.1  mrg   max_labelno_before_reorg = max_label_num ();
   5905  1.1  mrg 
   5906  1.1  mrg   /* We must split call insns before introducing `mova's.  If we're
   5907  1.1  mrg      optimizing, they'll have already been split.  Otherwise, make
   5908  1.1  mrg      sure we don't split them too late.  */
   5909  1.1  mrg   if (! optimize)
   5910  1.1  mrg     split_all_insns_noflow ();
   5911  1.1  mrg 
   5912  1.1  mrg   /* If relaxing, generate pseudo-ops to associate function calls with
   5913  1.1  mrg      the symbols they call.  It does no harm to not generate these
   5914  1.1  mrg      pseudo-ops.  However, when we can generate them, it enables the
   5915  1.1  mrg      linker to potentially relax the jsr to a bsr, and eliminate the
   5916  1.1  mrg      register load and, possibly, the constant pool entry.  */
   5917  1.1  mrg 
   5918  1.1  mrg   mdep_reorg_phase = SH_INSERT_USES_LABELS;
   5919  1.1  mrg   if (TARGET_RELAX)
   5920  1.1  mrg     {
   5921  1.1  mrg       /* Remove all REG_LABEL_OPERAND notes.  We want to use them for our
   5922  1.1  mrg 	 own purposes.  This works because none of the remaining passes
   5923  1.1  mrg 	 need to look at them.
   5924  1.1  mrg 
   5925  1.1  mrg 	 ??? But it may break in the future.  We should use a machine
   5926  1.1  mrg 	 dependent REG_NOTE, or some other approach entirely.  */
   5927  1.1  mrg       for (insn = first; insn; insn = NEXT_INSN (insn))
   5928  1.1  mrg 	{
   5929  1.1  mrg 	  if (INSN_P (insn))
   5930  1.1  mrg 	    {
   5931  1.1  mrg 	      rtx note;
   5932  1.1  mrg 
   5933  1.1  mrg 	      while ((note = find_reg_note (insn, REG_LABEL_OPERAND,
   5934  1.1  mrg 					    NULL_RTX)) != 0)
   5935  1.1  mrg 		remove_note (insn, note);
   5936  1.1  mrg 	    }
   5937  1.1  mrg 	}
   5938  1.1  mrg 
   5939  1.1  mrg       for (insn = first; insn; insn = NEXT_INSN (insn))
   5940  1.1  mrg 	{
   5941  1.1  mrg 	  rtx pattern, reg, set, dies;
   5942  1.1  mrg 	  rtx_code_label *label;
   5943  1.1  mrg 	  rtx_insn *link, *scan;
   5944  1.1  mrg 	  int rescan = 0, foundinsn = 0;
   5945  1.1  mrg 
   5946  1.1  mrg 	  if (CALL_P (insn))
   5947  1.1  mrg 	    {
   5948  1.1  mrg 	      pattern = PATTERN (insn);
   5949  1.1  mrg 
   5950  1.1  mrg 	      if (GET_CODE (pattern) == PARALLEL)
   5951  1.1  mrg 		pattern = XVECEXP (pattern, 0, 0);
   5952  1.1  mrg 	      if (GET_CODE (pattern) == SET)
   5953  1.1  mrg 		pattern = SET_SRC (pattern);
   5954  1.1  mrg 
   5955  1.1  mrg 	      if (GET_CODE (pattern) != CALL
   5956  1.1  mrg 		  || !MEM_P (XEXP (pattern, 0)))
   5957  1.1  mrg 		continue;
   5958  1.1  mrg 
   5959  1.1  mrg 	      reg = XEXP (XEXP (pattern, 0), 0);
   5960  1.1  mrg 	    }
   5961  1.1  mrg 	  else
   5962  1.1  mrg 	    {
   5963  1.1  mrg 	      reg = sfunc_uses_reg (insn);
   5964  1.1  mrg 	      if (! reg)
   5965  1.1  mrg 		continue;
   5966  1.1  mrg 	    }
   5967  1.1  mrg 
   5968  1.1  mrg 	  if (!REG_P (reg))
   5969  1.1  mrg 	    continue;
   5970  1.1  mrg 
   5971  1.1  mrg 	  /* Try scanning backward to find where the register is set.  */
   5972  1.1  mrg 	  link = NULL;
   5973  1.1  mrg 	  for (scan = PREV_INSN (insn);
   5974  1.1  mrg 	       scan && !LABEL_P (scan);
   5975  1.1  mrg 	       scan = PREV_INSN (scan))
   5976  1.1  mrg 	    {
   5977  1.1  mrg 	      if (! INSN_P (scan))
   5978  1.1  mrg 		continue;
   5979  1.1  mrg 
   5980  1.1  mrg 	      if (! reg_mentioned_p (reg, scan))
   5981  1.1  mrg 		continue;
   5982  1.1  mrg 
   5983  1.1  mrg 	      if (noncall_uses_reg (reg, scan, &set))
   5984  1.1  mrg 		break;
   5985  1.1  mrg 
   5986  1.1  mrg 	      if (set)
   5987  1.1  mrg 		{
   5988  1.1  mrg 		  link = scan;
   5989  1.1  mrg 		  break;
   5990  1.1  mrg 		}
   5991  1.1  mrg 	    }
   5992  1.1  mrg 
   5993  1.1  mrg 	  if (! link)
   5994  1.1  mrg 	    continue;
   5995  1.1  mrg 
   5996  1.1  mrg 	  /* The register is set at LINK.  */
   5997  1.1  mrg 
   5998  1.1  mrg 	  /* We can only optimize the function call if the register is
   5999  1.1  mrg 	     being set to a symbol.  In theory, we could sometimes
   6000  1.1  mrg 	     optimize calls to a constant location, but the assembler
   6001  1.1  mrg 	     and linker do not support that at present.  */
   6002  1.1  mrg 	  if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
   6003  1.1  mrg 	      && GET_CODE (SET_SRC (set)) != LABEL_REF)
   6004  1.1  mrg 	    continue;
   6005  1.1  mrg 
   6006  1.1  mrg 	  /* Scan forward from LINK to the place where REG dies, and
   6007  1.1  mrg 	     make sure that the only insns which use REG are
   6008  1.1  mrg 	     themselves function calls.  */
   6009  1.1  mrg 
   6010  1.1  mrg 	  /* ??? This doesn't work for call targets that were allocated
   6011  1.1  mrg 	     by reload, since there may not be a REG_DEAD note for the
   6012  1.1  mrg 	     register.  */
   6013  1.1  mrg 
   6014  1.1  mrg 	  dies = NULL_RTX;
   6015  1.1  mrg 	  for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
   6016  1.1  mrg 	    {
   6017  1.1  mrg 	      rtx scanset;
   6018  1.1  mrg 
   6019  1.1  mrg 	      /* Don't try to trace forward past a CODE_LABEL if we haven't
   6020  1.1  mrg 		 seen INSN yet.  Ordinarily, we will only find the setting insn
   6021  1.1  mrg 		 if it is in the same basic block.  However,
   6022  1.1  mrg 		 cross-jumping can insert code labels in between the load and
   6023  1.1  mrg 		 the call, and can result in situations where a single call
   6024  1.1  mrg 		 insn may have two targets depending on where we came from.  */
   6025  1.1  mrg 
   6026  1.1  mrg 	      if (LABEL_P (scan) && ! foundinsn)
   6027  1.1  mrg 		break;
   6028  1.1  mrg 
   6029  1.1  mrg 	      if (! INSN_P (scan))
   6030  1.1  mrg 		continue;
   6031  1.1  mrg 
   6032  1.1  mrg 	      /* Don't try to trace forward past a JUMP.  To optimize
   6033  1.1  mrg 		 safely, we would have to check that all the
   6034  1.1  mrg 		 instructions at the jump destination did not use REG.  */
   6035  1.1  mrg 
   6036  1.1  mrg 	      if (JUMP_P (scan))
   6037  1.1  mrg 		break;
   6038  1.1  mrg 
   6039  1.1  mrg 	      if (! reg_mentioned_p (reg, scan))
   6040  1.1  mrg 		continue;
   6041  1.1  mrg 
   6042  1.1  mrg 	      if (noncall_uses_reg (reg, scan, &scanset))
   6043  1.1  mrg 		break;
   6044  1.1  mrg 
   6045  1.1  mrg 	      if (scan == insn)
   6046  1.1  mrg 		foundinsn = 1;
   6047  1.1  mrg 
   6048  1.1  mrg 	      if (scan != insn
   6049  1.1  mrg 		  && (CALL_P (scan) || sfunc_uses_reg (scan)))
   6050  1.1  mrg 		{
   6051  1.1  mrg 		  /* There is a function call to this register other
   6052  1.1  mrg 		     than the one we are checking.  If we optimize
   6053  1.1  mrg 		     this call, we need to rescan again below.  */
   6054  1.1  mrg 		  rescan = 1;
   6055  1.1  mrg 		}
   6056  1.1  mrg 
   6057  1.1  mrg 	      /* ??? We shouldn't have to worry about SCANSET here.
   6058  1.1  mrg 		 We should just be able to check for a REG_DEAD note
   6059  1.1  mrg 		 on a function call.  However, the REG_DEAD notes are
   6060  1.1  mrg 		 apparently not dependable around libcalls; c-torture
   6061  1.1  mrg 		 execute/920501-2 is a test case.  If SCANSET is set,
   6062  1.1  mrg 		 then this insn sets the register, so it must have
   6063  1.1  mrg 		 died earlier.  Unfortunately, this will only handle
   6064  1.1  mrg 		 the cases in which the register is, in fact, set in a
   6065  1.1  mrg 		 later insn.  */
   6066  1.1  mrg 
   6067  1.1  mrg 	      /* ??? We shouldn't have to use FOUNDINSN here.
   6068  1.1  mrg 		 This dates back to when we used LOG_LINKS to find
   6069  1.1  mrg 		 the most recent insn which sets the register.  */
   6070  1.1  mrg 
   6071  1.1  mrg 	      if (foundinsn
   6072  1.1  mrg 		  && (scanset
   6073  1.1  mrg 		      || find_reg_note (scan, REG_DEAD, reg)))
   6074  1.1  mrg 		{
   6075  1.1  mrg 		  dies = scan;
   6076  1.1  mrg 		  break;
   6077  1.1  mrg 		}
   6078  1.1  mrg 	    }
   6079  1.1  mrg 
   6080  1.1  mrg 	  if (! dies)
   6081  1.1  mrg 	    {
   6082  1.1  mrg 	      /* Either there was a branch, or some insn used REG
   6083  1.1  mrg 		 other than as a function call address.  */
   6084  1.1  mrg 	      continue;
   6085  1.1  mrg 	    }
   6086  1.1  mrg 
   6087  1.1  mrg 	  /* Create a code label, and put it in a REG_LABEL_OPERAND note
   6088  1.1  mrg 	     on the insn which sets the register, and on each call insn
   6089  1.1  mrg 	     which uses the register.  In final_prescan_insn we look for
   6090  1.1  mrg 	     the REG_LABEL_OPERAND notes, and output the appropriate label
   6091  1.1  mrg 	     or pseudo-op.  */
   6092  1.1  mrg 
   6093  1.1  mrg 	  label = gen_label_rtx ();
   6094  1.1  mrg 	  add_reg_note (link, REG_LABEL_OPERAND, label);
   6095  1.1  mrg 	  add_reg_note (insn, REG_LABEL_OPERAND, label);
   6096  1.1  mrg 	  if (rescan)
   6097  1.1  mrg 	    {
   6098  1.1  mrg 	      scan = link;
   6099  1.1  mrg 	      do
   6100  1.1  mrg 		{
   6101  1.1  mrg 		  rtx reg2;
   6102  1.1  mrg 
   6103  1.1  mrg 		  scan = NEXT_INSN (scan);
   6104  1.1  mrg 		  if (scan != insn
   6105  1.1  mrg 		      && ((CALL_P (scan)
   6106  1.1  mrg 			   && reg_mentioned_p (reg, scan))
   6107  1.1  mrg 			  || ((reg2 = sfunc_uses_reg (scan))
   6108  1.1  mrg 			      && REGNO (reg2) == REGNO (reg))))
   6109  1.1  mrg 		    add_reg_note (scan, REG_LABEL_OPERAND, label);
   6110  1.1  mrg 		}
   6111  1.1  mrg 	      while (scan != dies);
   6112  1.1  mrg 	    }
   6113  1.1  mrg 	}
   6114  1.1  mrg     }
   6115  1.1  mrg 
   6116  1.1  mrg   if (TARGET_SH2)
   6117  1.1  mrg     fixup_addr_diff_vecs (first);
   6118  1.1  mrg 
   6119  1.1  mrg   if (optimize)
   6120  1.1  mrg     {
   6121  1.1  mrg       mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
   6122  1.1  mrg       shorten_branches (first);
   6123  1.1  mrg     }
   6124  1.1  mrg 
   6125  1.1  mrg   /* Scan the function looking for move instructions which have to be
   6126  1.1  mrg      changed to pc-relative loads and insert the literal tables.  */
   6127  1.1  mrg   mdep_reorg_phase = SH_FIXUP_PCLOAD;
   6128  1.1  mrg   for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
   6129  1.1  mrg     {
   6130  1.1  mrg       if (mova_p (insn))
   6131  1.1  mrg 	{
   6132  1.1  mrg 	  /* ??? basic block reordering can move a switch table dispatch
   6133  1.1  mrg 	     below the switch table.  Check if that has happened.
   6134  1.1  mrg 	     We only have the addresses available when optimizing; but then,
   6135  1.1  mrg 	     this check shouldn't be needed when not optimizing.  */
   6136  1.1  mrg 	  if (!untangle_mova (&num_mova, &mova, insn))
   6137  1.1  mrg 	    {
   6138  1.1  mrg 	      insn = mova;
   6139  1.1  mrg 	      num_mova = 0;
   6140  1.1  mrg 	    }
   6141  1.1  mrg 	}
   6142  1.1  mrg       else if (JUMP_TABLE_DATA_P (insn)
   6143  1.1  mrg 	       && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
   6144  1.1  mrg 	       && num_mova
   6145  1.1  mrg 	       /* ??? loop invariant motion can also move a mova out of a
   6146  1.1  mrg 		  loop.  Since loop does this code motion anyway, maybe we
   6147  1.1  mrg 		  should wrap UNSPEC_MOVA into a CONST, so that reload can
   6148  1.1  mrg 		  move it back.  */
   6149  1.1  mrg 	       && ((num_mova > 1
   6150  1.1  mrg 		    && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
   6151  1.1  mrg 		   || (prev_nonnote_insn (insn)
   6152  1.1  mrg 		       == XEXP (MOVA_LABELREF (mova), 0))))
   6153  1.1  mrg 	{
   6154  1.1  mrg 	  rtx_insn *scan;
   6155  1.1  mrg 	  int total;
   6156  1.1  mrg 
   6157  1.1  mrg 	  num_mova--;
   6158  1.1  mrg 
   6159  1.1  mrg 	  /* Some code might have been inserted between the mova and
   6160  1.1  mrg 	     its ADDR_DIFF_VEC.  Check if the mova is still in range.  */
   6161  1.1  mrg 	  for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
   6162  1.1  mrg 	    total += get_attr_length (scan);
   6163  1.1  mrg 
   6164  1.1  mrg 	  /* range of mova is 1020, add 4 because pc counts from address of
   6165  1.1  mrg 	     second instruction after this one, subtract 2 in case pc is 2
   6166  1.1  mrg 	     byte aligned.  Possible alignment needed for the ADDR_DIFF_VEC
   6167  1.1  mrg 	     cancels out with alignment effects of the mova itself.  */
   6168  1.1  mrg 	  if (total > 1022)
   6169  1.1  mrg 	    {
   6170  1.1  mrg 	      /* Change the mova into a load, and restart scanning
   6171  1.1  mrg 		 there.  broken_move will then return true for mova.  */
   6172  1.1  mrg 	      fixup_mova (mova);
   6173  1.1  mrg 	      insn = mova;
   6174  1.1  mrg 	    }
   6175  1.1  mrg 	}
   6176  1.1  mrg       if (broken_move (insn)
   6177  1.1  mrg 	  || (NONJUMP_INSN_P (insn)
   6178  1.1  mrg 	      && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
   6179  1.1  mrg 	{
   6180  1.1  mrg 	  rtx_insn *scan;
   6181  1.1  mrg 	  /* Scan ahead looking for a barrier to stick the constant table
   6182  1.1  mrg 	     behind.  */
   6183  1.1  mrg 	  rtx_insn *barrier = find_barrier (num_mova, mova, insn);
   6184  1.1  mrg 	  rtx_insn *last_float_move = NULL;
   6185  1.1  mrg 	  rtx last_float = 0, *last_float_addr = NULL;
   6186  1.1  mrg 	  int need_aligned_label = 0;
   6187  1.1  mrg 
   6188  1.1  mrg 	  if (num_mova && ! mova_p (mova))
   6189  1.1  mrg 	    {
   6190  1.1  mrg 	      /* find_barrier had to change the first mova into a
   6191  1.1  mrg 		 pcload; thus, we have to start with this new pcload.  */
   6192  1.1  mrg 	      insn = mova;
   6193  1.1  mrg 	      num_mova = 0;
   6194  1.1  mrg 	    }
   6195  1.1  mrg 	  /* Now find all the moves between the points and modify them.  */
   6196  1.1  mrg 	  for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
   6197  1.1  mrg 	    {
   6198  1.1  mrg 	      if (LABEL_P (scan))
   6199  1.1  mrg 		last_float = 0;
   6200  1.1  mrg 	      if (NONJUMP_INSN_P (scan)
   6201  1.1  mrg 		  && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
   6202  1.1  mrg 		need_aligned_label = 1;
   6203  1.1  mrg 	      if (broken_move (scan))
   6204  1.1  mrg 		{
   6205  1.1  mrg 		  rtx *patp = &PATTERN (scan), pat = *patp;
   6206  1.1  mrg 		  rtx src, dst;
   6207  1.1  mrg 		  rtx lab;
   6208  1.1  mrg 		  rtx newsrc;
   6209  1.1  mrg 		  machine_mode mode;
   6210  1.1  mrg 
   6211  1.1  mrg 		  if (GET_CODE (pat) == PARALLEL)
   6212  1.1  mrg 		    patp = &XVECEXP (pat, 0, 0), pat = *patp;
   6213  1.1  mrg 		  src = SET_SRC (pat);
   6214  1.1  mrg 		  dst = SET_DEST (pat);
   6215  1.1  mrg 		  mode = GET_MODE (dst);
   6216  1.1  mrg 
   6217  1.1  mrg 		  if (mode == SImode && satisfies_constraint_I16 (src)
   6218  1.1  mrg 		      && REGNO (dst) != FPUL_REG)
   6219  1.1  mrg 		    {
   6220  1.1  mrg 		      int offset = 0;
   6221  1.1  mrg 
   6222  1.1  mrg 		      mode = HImode;
   6223  1.1  mrg 		      while (GET_CODE (dst) == SUBREG)
   6224  1.1  mrg 			{
   6225  1.1  mrg 			  offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
   6226  1.1  mrg 							 GET_MODE (SUBREG_REG (dst)),
   6227  1.1  mrg 							 SUBREG_BYTE (dst),
   6228  1.1  mrg 							 GET_MODE (dst));
   6229  1.1  mrg 			  dst = SUBREG_REG (dst);
   6230  1.1  mrg 			}
   6231  1.1  mrg 		      dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
   6232  1.1  mrg 		    }
   6233  1.1  mrg 		  if (REG_P (dst) && FP_ANY_REGISTER_P (REGNO (dst)))
   6234  1.1  mrg 		    {
   6235  1.1  mrg 		      /* This must be an insn that clobbers r0.  */
   6236  1.1  mrg 		      rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
   6237  1.1  mrg 						XVECLEN (PATTERN (scan), 0)
   6238  1.1  mrg 						- 1);
   6239  1.1  mrg 		      rtx clobber = *clobberp;
   6240  1.1  mrg 
   6241  1.1  mrg 		      gcc_assert (GET_CODE (clobber) == CLOBBER
   6242  1.1  mrg 				  && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
   6243  1.1  mrg 
   6244  1.1  mrg 		      if (last_float
   6245  1.1  mrg 			  && reg_set_between_p (r0_rtx, last_float_move, scan))
   6246  1.1  mrg 			last_float = 0;
   6247  1.1  mrg 		      lab = add_constant (src, mode, last_float);
   6248  1.1  mrg 		      if (lab)
   6249  1.1  mrg 			emit_insn_before (gen_mova (lab), scan);
   6250  1.1  mrg 		      else
   6251  1.1  mrg 			{
   6252  1.1  mrg 			  /* There will be a REG_UNUSED note for r0 on
   6253  1.1  mrg 			     LAST_FLOAT_MOVE; we have to change it to REG_INC,
   6254  1.1  mrg 			     lest reorg:mark_target_live_regs will not
   6255  1.1  mrg 			     consider r0 to be used, and we end up with delay
   6256  1.1  mrg 			     slot insn in front of SCAN that clobbers r0.  */
   6257  1.1  mrg 			  rtx note
   6258  1.1  mrg 			    = find_regno_note (last_float_move, REG_UNUSED, 0);
   6259  1.1  mrg 
   6260  1.1  mrg 			  /* If we are not optimizing, then there may not be
   6261  1.1  mrg 			     a note.  */
   6262  1.1  mrg 			  if (note)
   6263  1.1  mrg 			    PUT_REG_NOTE_KIND (note, REG_INC);
   6264  1.1  mrg 
   6265  1.1  mrg 			  *last_float_addr = r0_inc_rtx;
   6266  1.1  mrg 			}
   6267  1.1  mrg 		      last_float_move = scan;
   6268  1.1  mrg 		      last_float = src;
   6269  1.1  mrg 		      newsrc = gen_const_mem (mode,
   6270  1.1  mrg 					(((TARGET_SH4 && ! TARGET_FMOVD)
   6271  1.1  mrg 					  || REGNO (dst) == FPUL_REG)
   6272  1.1  mrg 					 ? r0_inc_rtx
   6273  1.1  mrg 					 : r0_rtx));
   6274  1.1  mrg 		      last_float_addr = &XEXP (newsrc, 0);
   6275  1.1  mrg 
   6276  1.1  mrg 		      /* Remove the clobber of r0.  */
   6277  1.1  mrg 		      *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
   6278  1.1  mrg 						   gen_rtx_SCRATCH (Pmode));
   6279  1.1  mrg 		    }
   6280  1.1  mrg 		  /* This is a mova needing a label.  Create it.  */
   6281  1.1  mrg 		  else if (GET_CODE (src) == UNSPEC
   6282  1.1  mrg 			   && XINT (src, 1) == UNSPEC_MOVA
   6283  1.1  mrg 			   && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
   6284  1.1  mrg 		    {
   6285  1.1  mrg 		      lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
   6286  1.1  mrg 		      newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
   6287  1.1  mrg 		      newsrc = gen_rtx_UNSPEC (SImode,
   6288  1.1  mrg 					       gen_rtvec (1, newsrc),
   6289  1.1  mrg 					       UNSPEC_MOVA);
   6290  1.1  mrg 		    }
   6291  1.1  mrg 		  else if (GET_CODE (src) == UNSPEC_VOLATILE
   6292  1.1  mrg 			   && XINT (src, 1) == UNSPECV_SP_SWITCH_B)
   6293  1.1  mrg 		    {
   6294  1.1  mrg 		      newsrc = XVECEXP (src, 0, 0);
   6295  1.1  mrg 		      XVECEXP (src, 0, 0) = gen_const_mem (mode, newsrc);
   6296  1.1  mrg 		      INSN_CODE (scan) = -1;
   6297  1.1  mrg 		      continue;
   6298  1.1  mrg 		    }
   6299  1.1  mrg 		  else
   6300  1.1  mrg 		    {
   6301  1.1  mrg 		      lab = add_constant (src, mode, 0);
   6302  1.1  mrg 		      newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
   6303  1.1  mrg 		      newsrc = gen_const_mem (mode, newsrc);
   6304  1.1  mrg 		    }
   6305  1.1  mrg 		  *patp = gen_rtx_SET (dst, newsrc);
   6306  1.1  mrg 		  INSN_CODE (scan) = -1;
   6307  1.1  mrg 		}
   6308  1.1  mrg 	    }
   6309  1.1  mrg 	  dump_table (need_aligned_label ? insn : 0, barrier);
   6310  1.1  mrg 	  insn = barrier;
   6311  1.1  mrg 	}
   6312  1.1  mrg     }
   6313  1.1  mrg   label_ref_list_d_pool.release ();
   6314  1.1  mrg   for (insn = first; insn; insn = NEXT_INSN (insn))
   6315  1.1  mrg     PUT_MODE (insn, VOIDmode);
   6316  1.1  mrg 
   6317  1.1  mrg   mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
   6318  1.1  mrg   INSN_ADDRESSES_FREE ();
   6319  1.1  mrg   split_branches (first);
   6320  1.1  mrg 
   6321  1.1  mrg   /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
   6322  1.1  mrg      also has an effect on the register that holds the address of the sfunc.
   6323  1.1  mrg      Insert an extra dummy insn in front of each sfunc that pretends to
   6324  1.1  mrg      use this register.  */
   6325  1.1  mrg   if (flag_delayed_branch)
   6326  1.1  mrg     {
   6327  1.1  mrg       for (insn = first; insn; insn = NEXT_INSN (insn))
   6328  1.1  mrg 	{
   6329  1.1  mrg 	  rtx reg = sfunc_uses_reg (insn);
   6330  1.1  mrg 
   6331  1.1  mrg 	  if (! reg)
   6332  1.1  mrg 	    continue;
   6333  1.1  mrg 	  emit_insn_before (gen_use_sfunc_addr (reg), insn);
   6334  1.1  mrg 	}
   6335  1.1  mrg     }
   6336  1.1  mrg   mdep_reorg_phase = SH_AFTER_MDEP_REORG;
   6337  1.1  mrg }
   6338  1.1  mrg 
   6339  1.1  mrg /* Return the UID of the insn that follows the specified label.  */
   6340  1.1  mrg int
   6341  1.1  mrg get_dest_uid (rtx_insn *label, int max_uid)
   6342  1.1  mrg {
   6343  1.1  mrg   rtx_insn *dest = next_real_insn (label);
   6344  1.1  mrg 
   6345  1.1  mrg   if (! dest)
   6346  1.1  mrg     /* This can happen for an undefined label.  */
   6347  1.1  mrg     return 0;
   6348  1.1  mrg   int dest_uid = INSN_UID (dest);
   6349  1.1  mrg   /* If this is a newly created branch redirection blocking instruction,
   6350  1.1  mrg      we cannot index the branch_uid or insn_addresses arrays with its
   6351  1.1  mrg      uid.  But then, we won't need to, because the actual destination is
   6352  1.1  mrg      the following branch.  */
   6353  1.1  mrg   while (dest_uid >= max_uid)
   6354  1.1  mrg     {
   6355  1.1  mrg       dest = NEXT_INSN (dest);
   6356  1.1  mrg       dest_uid = INSN_UID (dest);
   6357  1.1  mrg     }
   6358  1.1  mrg   if (JUMP_P (dest) && GET_CODE (PATTERN (dest)) == RETURN)
   6359  1.1  mrg     return 0;
   6360  1.1  mrg   return dest_uid;
   6361  1.1  mrg }
   6362  1.1  mrg 
   6363  1.1  mrg /* Split condbranches that are out of range.  Also add clobbers for
   6364  1.1  mrg    scratch registers that are needed in far jumps.
   6365  1.1  mrg    We do this before delay slot scheduling, so that it can take our
   6366  1.1  mrg    newly created instructions into account.  It also allows us to
   6367  1.1  mrg    find branches with common targets more easily.  */
   6368  1.1  mrg static void
   6369  1.1  mrg split_branches (rtx_insn *first)
   6370  1.1  mrg {
   6371  1.1  mrg   rtx_insn *insn;
   6372  1.1  mrg   struct far_branch **uid_branch, *far_branch_list = 0;
   6373  1.1  mrg   int max_uid = get_max_uid ();
   6374  1.1  mrg   int ok;
   6375  1.1  mrg 
   6376  1.1  mrg   /* Find out which branches are out of range.  */
   6377  1.1  mrg   shorten_branches (first);
   6378  1.1  mrg 
   6379  1.1  mrg   uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
   6380  1.1  mrg   memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
   6381  1.1  mrg 
   6382  1.1  mrg   for (insn = first; insn; insn = NEXT_INSN (insn))
   6383  1.1  mrg     if (! INSN_P (insn))
   6384  1.1  mrg       continue;
   6385  1.1  mrg     else if (insn->deleted ())
   6386  1.1  mrg       {
   6387  1.1  mrg 	/* Shorten_branches would split this instruction again,
   6388  1.1  mrg 	   so transform it into a note.  */
   6389  1.1  mrg 	SET_INSN_DELETED (insn);
   6390  1.1  mrg       }
   6391  1.1  mrg     else if (JUMP_P (insn))
   6392  1.1  mrg       {
   6393  1.1  mrg 	enum attr_type type = get_attr_type (insn);
   6394  1.1  mrg 	if (type == TYPE_CBRANCH)
   6395  1.1  mrg 	  {
   6396  1.1  mrg 	    rtx_insn *next, *beyond;
   6397  1.1  mrg 
   6398  1.1  mrg 	    if (get_attr_length (insn) > 4)
   6399  1.1  mrg 	      {
   6400  1.1  mrg 		rtx src = SET_SRC (PATTERN (insn));
   6401  1.1  mrg 		rtx_insn *olabel = safe_as_a <rtx_insn *> (XEXP (XEXP (src, 1), 0));
   6402  1.1  mrg 		int addr = INSN_ADDRESSES (INSN_UID (insn));
   6403  1.1  mrg 		rtx_insn *label = 0;
   6404  1.1  mrg 		int dest_uid = get_dest_uid (olabel, max_uid);
   6405  1.1  mrg 		struct far_branch *bp = uid_branch[dest_uid];
   6406  1.1  mrg 
   6407  1.1  mrg 		/* redirect_jump needs a valid JUMP_LABEL, and it might delete
   6408  1.1  mrg 		   the label if the LABEL_NUSES count drops to zero.  There is
   6409  1.1  mrg 		   always a jump_optimize pass that sets these values, but it
   6410  1.1  mrg 		   proceeds to delete unreferenced code, and then if not
   6411  1.1  mrg 		   optimizing, to un-delete the deleted instructions, thus
   6412  1.1  mrg 		   leaving labels with too low uses counts.  */
   6413  1.1  mrg 		if (! optimize)
   6414  1.1  mrg 		  {
   6415  1.1  mrg 		    JUMP_LABEL (insn) = olabel;
   6416  1.1  mrg 		    LABEL_NUSES (olabel)++;
   6417  1.1  mrg 		  }
   6418  1.1  mrg 		if (! bp)
   6419  1.1  mrg 		  {
   6420  1.1  mrg 		    bp = (struct far_branch *) alloca (sizeof *bp);
   6421  1.1  mrg 		    uid_branch[dest_uid] = bp;
   6422  1.1  mrg 		    bp->prev = far_branch_list;
   6423  1.1  mrg 		    far_branch_list = bp;
   6424  1.1  mrg 		    bp->far_label = as_a <rtx_insn *> (
   6425  1.1  mrg 				      XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
   6426  1.1  mrg 					    0));
   6427  1.1  mrg 		    LABEL_NUSES (bp->far_label)++;
   6428  1.1  mrg 		  }
   6429  1.1  mrg 		else
   6430  1.1  mrg 		  {
   6431  1.1  mrg 		    label = bp->near_label;
   6432  1.1  mrg 		    if (! label && bp->address - addr >= CONDJUMP_MIN)
   6433  1.1  mrg 		      {
   6434  1.1  mrg 			rtx_insn *block = bp->insert_place;
   6435  1.1  mrg 
   6436  1.1  mrg 			if (GET_CODE (PATTERN (block)) == RETURN)
   6437  1.1  mrg 			  block = PREV_INSN (block);
   6438  1.1  mrg 			else
   6439  1.1  mrg 			  block = gen_block_redirect (block,
   6440  1.1  mrg 						      bp->address, 2);
   6441  1.1  mrg 			label = emit_label_after (gen_label_rtx (),
   6442  1.1  mrg 						  PREV_INSN (block));
   6443  1.1  mrg 			bp->near_label = label;
   6444  1.1  mrg 		      }
   6445  1.1  mrg 		    else if (label && ! NEXT_INSN (label))
   6446  1.1  mrg 		      {
   6447  1.1  mrg 			if (addr + 2 - bp->address <= CONDJUMP_MAX)
   6448  1.1  mrg 			  bp->insert_place = insn;
   6449  1.1  mrg 			else
   6450  1.1  mrg 			  gen_far_branch (bp);
   6451  1.1  mrg 		      }
   6452  1.1  mrg 		  }
   6453  1.1  mrg 		if (! label
   6454  1.1  mrg 		    || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
   6455  1.1  mrg 		  {
   6456  1.1  mrg 		    bp->near_label = label = gen_label_rtx ();
   6457  1.1  mrg 		    bp->insert_place = insn;
   6458  1.1  mrg 		    bp->address = addr;
   6459  1.1  mrg 		  }
   6460  1.1  mrg 		ok = redirect_jump (as_a <rtx_jump_insn *> (insn), label, 0);
   6461  1.1  mrg 		gcc_assert (ok);
   6462  1.1  mrg 	      }
   6463  1.1  mrg 	    else
   6464  1.1  mrg 	      {
   6465  1.1  mrg 		/* get_attr_length (insn) == 2 */
   6466  1.1  mrg 		/* Check if we have a pattern where reorg wants to redirect
   6467  1.1  mrg 		   the branch to a label from an unconditional branch that
   6468  1.1  mrg 		   is too far away.  */
   6469  1.1  mrg 		/* We can't use JUMP_LABEL here because it might be undefined
   6470  1.1  mrg 		   when not optimizing.  */
   6471  1.1  mrg 		/* A syntax error might cause beyond to be NULL_RTX.  */
   6472  1.1  mrg 		rtx temp = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
   6473  1.1  mrg 		beyond = next_active_insn (as_a<rtx_insn *> (temp));
   6474  1.1  mrg 
   6475  1.1  mrg 		if (beyond
   6476  1.1  mrg 		    && (JUMP_P (beyond)
   6477  1.1  mrg 			|| ((beyond = next_active_insn (beyond))
   6478  1.1  mrg 			    && JUMP_P (beyond)))
   6479  1.1  mrg 		    && GET_CODE (PATTERN (beyond)) == SET
   6480  1.1  mrg 		    && recog_memoized (beyond) == CODE_FOR_jump_compact
   6481  1.1  mrg 		    && ((INSN_ADDRESSES
   6482  1.1  mrg 			 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
   6483  1.1  mrg 			 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
   6484  1.1  mrg 			> 252 + 258 + 2))
   6485  1.1  mrg 		  gen_block_redirect (beyond,
   6486  1.1  mrg 				      INSN_ADDRESSES (INSN_UID (beyond)), 1);
   6487  1.1  mrg 	      }
   6488  1.1  mrg 
   6489  1.1  mrg 	    next = next_active_insn (insn);
   6490  1.1  mrg 
   6491  1.1  mrg 	    if (next
   6492  1.1  mrg 		&& (JUMP_P (next)
   6493  1.1  mrg 		    || ((next = next_active_insn (next))
   6494  1.1  mrg 			&& JUMP_P (next)))
   6495  1.1  mrg 		&& GET_CODE (PATTERN (next)) == SET
   6496  1.1  mrg 		&& recog_memoized (next) == CODE_FOR_jump_compact
   6497  1.1  mrg 		&& ((INSN_ADDRESSES
   6498  1.1  mrg 		     (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
   6499  1.1  mrg 		     - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
   6500  1.1  mrg 		    > 252 + 258 + 2))
   6501  1.1  mrg 	      gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
   6502  1.1  mrg 	  }
   6503  1.1  mrg 	else if (type == TYPE_JUMP || type == TYPE_RETURN)
   6504  1.1  mrg 	  {
   6505  1.1  mrg 	    int addr = INSN_ADDRESSES (INSN_UID (insn));
   6506  1.1  mrg 	    rtx_insn *far_label = 0;
   6507  1.1  mrg 	    int dest_uid = 0;
   6508  1.1  mrg 	    struct far_branch *bp;
   6509  1.1  mrg 
   6510  1.1  mrg 	    if (type == TYPE_JUMP)
   6511  1.1  mrg 	      {
   6512  1.1  mrg 		if (CROSSING_JUMP_P (insn))
   6513  1.1  mrg 		  {
   6514  1.1  mrg 		    emit_insn_before (gen_block_branch_redirect (const0_rtx),
   6515  1.1  mrg 				      insn);
   6516  1.1  mrg 		    continue;
   6517  1.1  mrg 		  }
   6518  1.1  mrg 
   6519  1.1  mrg 		far_label = as_a <rtx_insn *> (
   6520  1.1  mrg 			      XEXP (SET_SRC (PATTERN (insn)), 0));
   6521  1.1  mrg 		dest_uid = get_dest_uid (far_label, max_uid);
   6522  1.1  mrg 		if (! dest_uid)
   6523  1.1  mrg 		  {
   6524  1.1  mrg 		    /* Parse errors can lead to labels outside
   6525  1.1  mrg 		      the insn stream.  */
   6526  1.1  mrg 		    if (! NEXT_INSN (far_label))
   6527  1.1  mrg 		      continue;
   6528  1.1  mrg 
   6529  1.1  mrg 		    if (! optimize)
   6530  1.1  mrg 		      {
   6531  1.1  mrg 			JUMP_LABEL (insn) = far_label;
   6532  1.1  mrg 			LABEL_NUSES (far_label)++;
   6533  1.1  mrg 		      }
   6534  1.1  mrg 		    redirect_jump (as_a <rtx_jump_insn *> (insn), ret_rtx, 1);
   6535  1.1  mrg 		    far_label = 0;
   6536  1.1  mrg 		  }
   6537  1.1  mrg 	      }
   6538  1.1  mrg 	    bp = uid_branch[dest_uid];
   6539  1.1  mrg 	    if (! bp)
   6540  1.1  mrg 	      {
   6541  1.1  mrg 		bp = (struct far_branch *) alloca (sizeof *bp);
   6542  1.1  mrg 		uid_branch[dest_uid] = bp;
   6543  1.1  mrg 		bp->prev = far_branch_list;
   6544  1.1  mrg 		far_branch_list = bp;
   6545  1.1  mrg 		bp->near_label = 0;
   6546  1.1  mrg 		bp->far_label = far_label;
   6547  1.1  mrg 		if (far_label)
   6548  1.1  mrg 		  LABEL_NUSES (far_label)++;
   6549  1.1  mrg 	      }
   6550  1.1  mrg 	    else if (bp->near_label && ! NEXT_INSN (bp->near_label))
   6551  1.1  mrg 	      if (addr - bp->address <= CONDJUMP_MAX)
   6552  1.1  mrg 		emit_label_after (bp->near_label, PREV_INSN (insn));
   6553  1.1  mrg 	      else
   6554  1.1  mrg 		{
   6555  1.1  mrg 		  gen_far_branch (bp);
   6556  1.1  mrg 		  bp->near_label = 0;
   6557  1.1  mrg 		}
   6558  1.1  mrg 	    else
   6559  1.1  mrg 	      bp->near_label = 0;
   6560  1.1  mrg 	    bp->address = addr;
   6561  1.1  mrg 	    bp->insert_place = insn;
   6562  1.1  mrg 	    if (! far_label)
   6563  1.1  mrg 	      emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
   6564  1.1  mrg 	    else
   6565  1.1  mrg 	      gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
   6566  1.1  mrg 	  }
   6567  1.1  mrg       }
   6568  1.1  mrg   /* Generate all pending far branches,
   6569  1.1  mrg      and free our references to the far labels.  */
   6570  1.1  mrg   while (far_branch_list)
   6571  1.1  mrg     {
   6572  1.1  mrg       if (far_branch_list->near_label
   6573  1.1  mrg 	  && ! NEXT_INSN (far_branch_list->near_label))
   6574  1.1  mrg 	gen_far_branch (far_branch_list);
   6575  1.1  mrg       if (optimize
   6576  1.1  mrg 	  && far_branch_list->far_label
   6577  1.1  mrg 	  && ! --LABEL_NUSES (far_branch_list->far_label))
   6578  1.1  mrg 	delete_insn (far_branch_list->far_label);
   6579  1.1  mrg       far_branch_list = far_branch_list->prev;
   6580  1.1  mrg     }
   6581  1.1  mrg 
   6582  1.1  mrg   /* Instruction length information is no longer valid due to the new
   6583  1.1  mrg      instructions that have been generated.  */
   6584  1.1  mrg   init_insn_lengths ();
   6585  1.1  mrg }
   6586  1.1  mrg 
   6587  1.1  mrg /* Dump out instruction addresses, which is useful for debugging the
   6588  1.1  mrg    constant pool table stuff.
   6589  1.1  mrg 
   6590  1.1  mrg    If relaxing, output the label and pseudo-ops used to link together
   6591  1.1  mrg    calls and the instruction which set the registers.
   6592  1.1  mrg 
   6593  1.1  mrg    ??? The addresses printed by this routine for insns are nonsense for
   6594  1.1  mrg    insns which are inside of a sequence where none of the inner insns have
   6595  1.1  mrg    variable length.  This is because the second pass of shorten_branches
   6596  1.1  mrg    does not bother to update them.  */
   6597  1.1  mrg void
   6598  1.1  mrg final_prescan_insn (rtx_insn *insn, rtx *opvec ATTRIBUTE_UNUSED,
   6599  1.1  mrg 		    int noperands ATTRIBUTE_UNUSED)
   6600  1.1  mrg {
   6601  1.1  mrg   if (TARGET_DUMPISIZE)
   6602  1.1  mrg     fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
   6603  1.1  mrg 
   6604  1.1  mrg   if (TARGET_RELAX)
   6605  1.1  mrg     {
   6606  1.1  mrg       if (rtx note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX))
   6607  1.1  mrg 	{
   6608  1.1  mrg 	  rtx pattern = PATTERN (insn);
   6609  1.1  mrg 	  if (GET_CODE (pattern) == PARALLEL)
   6610  1.1  mrg 	    pattern = XVECEXP (pattern, 0, 0);
   6611  1.1  mrg 	  switch (GET_CODE (pattern))
   6612  1.1  mrg 	    {
   6613  1.1  mrg 	    case SET:
   6614  1.1  mrg 	      if (GET_CODE (SET_SRC (pattern)) != CALL
   6615  1.1  mrg 		  && get_attr_type (insn) != TYPE_SFUNC)
   6616  1.1  mrg 		{
   6617  1.1  mrg 		  targetm.asm_out.internal_label
   6618  1.1  mrg 		    (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
   6619  1.1  mrg 		  break;
   6620  1.1  mrg 		}
   6621  1.1  mrg 	      /* FALLTHROUGH */
   6622  1.1  mrg 	    case CALL:
   6623  1.1  mrg 	      asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
   6624  1.1  mrg 			   CODE_LABEL_NUMBER (XEXP (note, 0)));
   6625  1.1  mrg 	      break;
   6626  1.1  mrg 
   6627  1.1  mrg 	    default:
   6628  1.1  mrg 	      gcc_unreachable ();
   6629  1.1  mrg 	    }
   6630  1.1  mrg 	}
   6631  1.1  mrg     }
   6632  1.1  mrg }
   6633  1.1  mrg 
   6634  1.1  mrg /* Dump out any constants accumulated in the final pass.  These will
   6635  1.1  mrg    only be labels.  */
   6636  1.1  mrg const char *
   6637  1.1  mrg output_jump_label_table (void)
   6638  1.1  mrg {
   6639  1.1  mrg   if (pool_size)
   6640  1.1  mrg     {
   6641  1.1  mrg       fprintf (asm_out_file, "\t.align 2\n");
   6642  1.1  mrg       for (int i = 0; i < pool_size; i++)
   6643  1.1  mrg 	{
   6644  1.1  mrg 	  pool_node *p = &pool_vector[i];
   6645  1.1  mrg 
   6646  1.1  mrg 	  (*targetm.asm_out.internal_label) (asm_out_file, "L",
   6647  1.1  mrg 				     CODE_LABEL_NUMBER (p->label));
   6648  1.1  mrg 	  output_asm_insn (".long	%O0", &p->value);
   6649  1.1  mrg 	}
   6650  1.1  mrg       pool_size = 0;
   6651  1.1  mrg     }
   6652  1.1  mrg 
   6653  1.1  mrg   return "";
   6654  1.1  mrg }
   6655  1.1  mrg 
   6656  1.1  mrg /* A full frame looks like:
   6658  1.1  mrg 
   6659  1.1  mrg    arg-5
   6660  1.1  mrg    arg-4
   6661  1.1  mrg    [ if current_function_anonymous_args
   6662  1.1  mrg    arg-3
   6663  1.1  mrg    arg-2
   6664  1.1  mrg    arg-1
   6665  1.1  mrg    arg-0 ]
   6666  1.1  mrg    saved-fp
   6667  1.1  mrg    saved-r10
   6668  1.1  mrg    saved-r11
   6669  1.1  mrg    saved-r12
   6670  1.1  mrg    saved-pr
   6671  1.1  mrg    local-n
   6672  1.1  mrg    ..
   6673  1.1  mrg    local-1
   6674  1.1  mrg    local-0        <- fp points here.
   6675  1.1  mrg 
   6676  1.1  mrg    Number of bytes pushed for anonymous args, used to pass information
   6677  1.1  mrg    between expand_prologue and expand_epilogue.
   6678  1.1  mrg 
   6679  1.1  mrg    Adjust the stack by SIZE bytes.  REG holds the rtl of the register to be
   6680  1.1  mrg    adjusted.  If epilogue_p is zero, this is for a prologue; otherwise, it's
   6681  1.1  mrg    for an epilogue and a negative value means that it's for a sibcall
   6682  1.1  mrg    epilogue.  If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
   6683  1.1  mrg    all the registers that are about to be restored, and hence dead.  */
   6684  1.1  mrg static void
   6685  1.1  mrg output_stack_adjust (int size, rtx reg, int epilogue_p,
   6686  1.1  mrg 		     HARD_REG_SET *live_regs_mask, bool frame_p)
   6687  1.1  mrg {
   6688  1.1  mrg   rtx_insn *(*emit_fn) (rtx) = frame_p ? &emit_frame_insn : &emit_insn;
   6689  1.1  mrg   if (size)
   6690  1.1  mrg     {
   6691  1.1  mrg       HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
   6692  1.1  mrg 
   6693  1.1  mrg /* This test is bogus, as output_stack_adjust is used to re-align the
   6694  1.1  mrg    stack.  */
   6695  1.1  mrg #if 0
   6696  1.1  mrg       gcc_assert (!(size % align));
   6697  1.1  mrg #endif
   6698  1.1  mrg 
   6699  1.1  mrg       if (CONST_OK_FOR_ADD (size))
   6700  1.1  mrg 	emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
   6701  1.1  mrg       /* Try to do it with two partial adjustments; however, we must make
   6702  1.1  mrg 	 sure that the stack is properly aligned at all times, in case
   6703  1.1  mrg 	 an interrupt occurs between the two partial adjustments.  */
   6704  1.1  mrg       else if (CONST_OK_FOR_ADD (size / 2 & -align)
   6705  1.1  mrg 	       && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
   6706  1.1  mrg 	{
   6707  1.1  mrg 	  emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
   6708  1.1  mrg 	  emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
   6709  1.1  mrg 	}
   6710  1.1  mrg       else
   6711  1.1  mrg 	{
   6712  1.1  mrg 	  rtx const_reg;
   6713  1.1  mrg 	  rtx insn;
   6714  1.1  mrg 	  int temp = epilogue_p ? 7 : 1;
   6715  1.1  mrg 	  int i;
   6716  1.1  mrg 
   6717  1.1  mrg 	  /* If TEMP is invalid, we could temporarily save a general
   6718  1.1  mrg 	     register to MACL.  However, there is currently no need
   6719  1.1  mrg 	     to handle this case, so just die when we see it.  */
   6720  1.1  mrg 	  if (epilogue_p < 0
   6721  1.1  mrg 	      || current_function_interrupt
   6722  1.1  mrg 	      || ! call_used_regs[temp] || fixed_regs[temp])
   6723  1.1  mrg 	    temp = -1;
   6724  1.1  mrg 	  if (temp < 0 && ! current_function_interrupt && epilogue_p >= 0)
   6725  1.1  mrg 	    {
   6726  1.1  mrg 	      HARD_REG_SET temps = (regs_invalidated_by_call
   6727  1.1  mrg 				    & ~fixed_reg_set
   6728  1.1  mrg 				    & savable_regs);
   6729  1.1  mrg 	      if (epilogue_p > 0)
   6730  1.1  mrg 		{
   6731  1.1  mrg 		  int nreg = 0;
   6732  1.1  mrg 		  if (crtl->return_rtx)
   6733  1.1  mrg 		    {
   6734  1.1  mrg 		      machine_mode mode;
   6735  1.1  mrg 		      mode = GET_MODE (crtl->return_rtx);
   6736  1.1  mrg 		      if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
   6737  1.1  mrg 			nreg = hard_regno_nregs (FIRST_RET_REG, mode);
   6738  1.1  mrg 		    }
   6739  1.1  mrg 		  for (i = 0; i < nreg; i++)
   6740  1.1  mrg 		    CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
   6741  1.1  mrg 		  if (crtl->calls_eh_return)
   6742  1.1  mrg 		    {
   6743  1.1  mrg 		      CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
   6744  1.1  mrg 		      for (i = 0; i <= 3; i++)
   6745  1.1  mrg 			CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
   6746  1.1  mrg 		    }
   6747  1.1  mrg 		}
   6748  1.1  mrg 	      if (epilogue_p <= 0)
   6749  1.1  mrg 		{
   6750  1.1  mrg 		  for (i = FIRST_PARM_REG;
   6751  1.1  mrg 		       i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
   6752  1.1  mrg 		    CLEAR_HARD_REG_BIT (temps, i);
   6753  1.1  mrg 		  if (cfun->static_chain_decl != NULL)
   6754  1.1  mrg 		    CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
   6755  1.1  mrg 		}
   6756  1.1  mrg 	      temp = scavenge_reg (&temps);
   6757  1.1  mrg 	    }
   6758  1.1  mrg 	  if (temp < 0 && live_regs_mask)
   6759  1.1  mrg 	    {
   6760  1.1  mrg 	      HARD_REG_SET temps;
   6761  1.1  mrg 
   6762  1.1  mrg 	      temps = *live_regs_mask;
   6763  1.1  mrg 	      CLEAR_HARD_REG_BIT (temps, REGNO (reg));
   6764  1.1  mrg 	      temp = scavenge_reg (&temps);
   6765  1.1  mrg 	    }
   6766  1.1  mrg 	  if (temp < 0)
   6767  1.1  mrg 	    {
   6768  1.1  mrg 	      rtx adj_reg, tmp_reg, mem;
   6769  1.1  mrg 
   6770  1.1  mrg 	      /* If we reached here, the most likely case is the (sibcall)
   6771  1.1  mrg 		 epilogue.  Put a special push/pop sequence for such case as
   6772  1.1  mrg 		 the last resort.  This looks lengthy but would not be problem
   6773  1.1  mrg 		 because it seems to be very rare.  */
   6774  1.1  mrg 	      gcc_assert (epilogue_p);
   6775  1.1  mrg 
   6776  1.1  mrg 	      /* ??? There is still the slight possibility that r4 or
   6777  1.1  mrg 		  r5 have been reserved as fixed registers or assigned
   6778  1.1  mrg 		  as global registers, and they change during an
   6779  1.1  mrg 		  interrupt.  There are possible ways to handle this:
   6780  1.1  mrg 
   6781  1.1  mrg 		  - If we are adjusting the frame pointer (r14), we can do
   6782  1.1  mrg 		    with a single temp register and an ordinary push / pop
   6783  1.1  mrg 		    on the stack.
   6784  1.1  mrg 		  - Grab any call-used or call-saved registers (i.e. not
   6785  1.1  mrg 		    fixed or globals) for the temps we need.  We might
   6786  1.1  mrg 		    also grab r14 if we are adjusting the stack pointer.
   6787  1.1  mrg 		    If we can't find enough available registers, issue
   6788  1.1  mrg 		    a diagnostic and die - the user must have reserved
   6789  1.1  mrg 		    way too many registers.
   6790  1.1  mrg 		 But since all this is rather unlikely to happen and
   6791  1.1  mrg 		 would require extra testing, we just die if r4 / r5
   6792  1.1  mrg 		 are not available.  */
   6793  1.1  mrg 	      gcc_assert (!fixed_regs[4] && !fixed_regs[5]
   6794  1.1  mrg 			  && !global_regs[4] && !global_regs[5]);
   6795  1.1  mrg 
   6796  1.1  mrg 	      adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
   6797  1.1  mrg 	      tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
   6798  1.1  mrg 	      emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
   6799  1.1  mrg 	      emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
   6800  1.1  mrg 	      emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
   6801  1.1  mrg 	      mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
   6802  1.1  mrg 	      emit_move_insn (mem, tmp_reg);
   6803  1.1  mrg 	      emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
   6804  1.1  mrg 	      mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
   6805  1.1  mrg 	      emit_move_insn (mem, tmp_reg);
   6806  1.1  mrg 	      emit_move_insn (reg, adj_reg);
   6807  1.1  mrg 	      mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
   6808  1.1  mrg 	      emit_move_insn (adj_reg, mem);
   6809  1.1  mrg 	      mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
   6810  1.1  mrg 	      emit_move_insn (tmp_reg, mem);
   6811  1.1  mrg 	      /* Tell flow the insns that pop r4/r5 aren't dead.  */
   6812  1.1  mrg 	      emit_use (tmp_reg);
   6813  1.1  mrg 	      emit_use (adj_reg);
   6814  1.1  mrg 	      return;
   6815  1.1  mrg 	    }
   6816  1.1  mrg 	  const_reg = gen_rtx_REG (GET_MODE (reg), temp);
   6817  1.1  mrg 
   6818  1.1  mrg 	  /* If SIZE is negative, subtract the positive value.
   6819  1.1  mrg 	     This sometimes allows a constant pool entry to be shared
   6820  1.1  mrg 	     between prologue and epilogue code.  */
   6821  1.1  mrg 	  if (size < 0)
   6822  1.1  mrg 	    {
   6823  1.1  mrg 	      emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
   6824  1.1  mrg 	      insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
   6825  1.1  mrg 	    }
   6826  1.1  mrg 	  else
   6827  1.1  mrg 	    {
   6828  1.1  mrg 	      emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
   6829  1.1  mrg 	      insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
   6830  1.1  mrg 	    }
   6831  1.1  mrg 	  add_reg_note (insn, REG_FRAME_RELATED_EXPR,
   6832  1.1  mrg 			gen_rtx_SET (reg, gen_rtx_PLUS (SImode, reg,
   6833  1.1  mrg 							GEN_INT (size))));
   6834  1.1  mrg 	}
   6835  1.1  mrg     }
   6836  1.1  mrg }
   6837  1.1  mrg 
   6838  1.1  mrg /* Emit the specified insn and mark it as frame related.  */
   6839  1.1  mrg static rtx_insn *
   6840  1.1  mrg emit_frame_insn (rtx x)
   6841  1.1  mrg {
   6842  1.1  mrg   rtx_insn *insn = emit_insn (x);
   6843  1.1  mrg   RTX_FRAME_RELATED_P (insn) = 1;
   6844  1.1  mrg   return insn;
   6845  1.1  mrg }
   6846  1.1  mrg 
   6847  1.1  mrg /* Output RTL to push register RN onto the stack.  */
   6848  1.1  mrg static rtx
   6849  1.1  mrg push (int rn)
   6850  1.1  mrg {
   6851  1.1  mrg   rtx x;
   6852  1.1  mrg   if (rn == FPUL_REG)
   6853  1.1  mrg     x = gen_push_fpul ();
   6854  1.1  mrg   else if (rn == FPSCR_REG)
   6855  1.1  mrg     x = gen_push_fpscr ();
   6856  1.1  mrg   else if (TARGET_FPU_DOUBLE && TARGET_FMOVD
   6857  1.1  mrg 	   && ! TARGET_FPU_SINGLE && FP_OR_XD_REGISTER_P (rn))
   6858  1.1  mrg     {
   6859  1.1  mrg       if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
   6860  1.1  mrg 	return NULL_RTX;
   6861  1.1  mrg       x = gen_push_4 (gen_rtx_REG (DFmode, rn));
   6862  1.1  mrg     }
   6863  1.1  mrg   else if (TARGET_SH2E && FP_REGISTER_P (rn))
   6864  1.1  mrg     x = gen_push_e (gen_rtx_REG (SFmode, rn));
   6865  1.1  mrg   else
   6866  1.1  mrg     x = gen_push (gen_rtx_REG (SImode, rn));
   6867  1.1  mrg 
   6868  1.1  mrg   x = emit_frame_insn (x);
   6869  1.1  mrg   add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
   6870  1.1  mrg   return x;
   6871  1.1  mrg }
   6872  1.1  mrg 
   6873  1.1  mrg /* Output RTL to pop register RN from the stack.  */
   6874  1.1  mrg static void
   6875  1.1  mrg pop (int rn)
   6876  1.1  mrg {
   6877  1.1  mrg   rtx x, sp_reg, reg;
   6878  1.1  mrg   if (rn == FPUL_REG)
   6879  1.1  mrg     x = gen_pop_fpul ();
   6880  1.1  mrg   else if (rn == FPSCR_REG)
   6881  1.1  mrg     x = gen_pop_fpscr ();
   6882  1.1  mrg   else if (TARGET_FPU_DOUBLE && TARGET_FMOVD
   6883  1.1  mrg 	   && ! TARGET_FPU_SINGLE && FP_OR_XD_REGISTER_P (rn))
   6884  1.1  mrg     {
   6885  1.1  mrg       if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
   6886  1.1  mrg 	return;
   6887  1.1  mrg       x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
   6888  1.1  mrg     }
   6889  1.1  mrg   else if (TARGET_SH2E && FP_REGISTER_P (rn))
   6890  1.1  mrg     x = gen_pop_e (gen_rtx_REG (SFmode, rn));
   6891  1.1  mrg   else
   6892  1.1  mrg     x = gen_pop (gen_rtx_REG (SImode, rn));
   6893  1.1  mrg 
   6894  1.1  mrg   x = emit_insn (x);
   6895  1.1  mrg 
   6896  1.1  mrg   sp_reg = gen_rtx_REG (SImode, STACK_POINTER_REGNUM);
   6897  1.1  mrg   reg = copy_rtx (GET_CODE (PATTERN (x)) == PARALLEL
   6898  1.1  mrg 		  ? SET_DEST (XVECEXP (PATTERN (x), 0, 0))
   6899  1.1  mrg 		  : SET_DEST (PATTERN (x)));
   6900  1.1  mrg   add_reg_note (x, REG_CFA_RESTORE, reg);
   6901  1.1  mrg   add_reg_note (x, REG_CFA_ADJUST_CFA,
   6902  1.1  mrg 		gen_rtx_SET (sp_reg,
   6903  1.1  mrg 			     plus_constant (SImode, sp_reg,
   6904  1.1  mrg 					    GET_MODE_SIZE (GET_MODE (reg)))));
   6905  1.1  mrg   add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
   6906  1.1  mrg   RTX_FRAME_RELATED_P (x) = 1;
   6907  1.1  mrg }
   6908  1.1  mrg 
   6909  1.1  mrg /* Generate code to push the regs specified in the mask.  */
   6910  1.1  mrg static void
   6911  1.1  mrg push_regs (HARD_REG_SET *mask, bool interrupt_handler)
   6912  1.1  mrg {
   6913  1.1  mrg   bool skip_fpscr = false;
   6914  1.1  mrg 
   6915  1.1  mrg   /* Push PR last; this gives better latencies after the prologue, and
   6916  1.1  mrg      candidates for the return delay slot when there are no general
   6917  1.1  mrg      registers pushed.  */
   6918  1.1  mrg   for (int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
   6919  1.1  mrg        i < FIRST_PSEUDO_REGISTER; i++)
   6920  1.1  mrg     {
   6921  1.1  mrg       /* If this is an interrupt handler, and the SZ bit varies,
   6922  1.1  mrg 	 and we have to push any floating point register, we need
   6923  1.1  mrg 	 to switch to the correct precision first.  */
   6924  1.1  mrg       if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
   6925  1.1  mrg 	  && hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
   6926  1.1  mrg 	{
   6927  1.1  mrg 	  push (FPSCR_REG);
   6928  1.1  mrg 	  fpscr_set_from_mem (NORMAL_MODE (FP_MODE), ~*mask);
   6929  1.1  mrg 	  skip_fpscr = true;
   6930  1.1  mrg 	}
   6931  1.1  mrg       if (i != PR_REG
   6932  1.1  mrg 	  && (i != FPSCR_REG || ! skip_fpscr)
   6933  1.1  mrg 	  && TEST_HARD_REG_BIT (*mask, i))
   6934  1.1  mrg 	{
   6935  1.1  mrg 	/* If the ISR has RESBANK attribute assigned, don't push any of
   6936  1.1  mrg 	   the following registers - R0-R14, MACH, MACL and GBR.  */
   6937  1.1  mrg       if (! (sh_cfun_resbank_handler_p ()
   6938  1.1  mrg 	     && ((i >= FIRST_GENERAL_REG && i < LAST_GENERAL_REG)
   6939  1.1  mrg 		 || i == MACH_REG
   6940  1.1  mrg 		 || i == MACL_REG
   6941  1.1  mrg 		 || i == GBR_REG)))
   6942  1.1  mrg 	  push (i);
   6943  1.1  mrg 	}
   6944  1.1  mrg     }
   6945  1.1  mrg 
   6946  1.1  mrg   /* Push banked registers last to improve delay slot opportunities.  */
   6947  1.1  mrg   if (interrupt_handler)
   6948  1.1  mrg     {
   6949  1.1  mrg       bool use_movml = false;
   6950  1.1  mrg 
   6951  1.1  mrg       if (TARGET_SH2A)
   6952  1.1  mrg 	{
   6953  1.1  mrg 	  unsigned int count = 0;
   6954  1.1  mrg 
   6955  1.1  mrg 	  for (int i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
   6956  1.1  mrg 	    if (TEST_HARD_REG_BIT (*mask, i))
   6957  1.1  mrg 	      count++;
   6958  1.1  mrg 	    else
   6959  1.1  mrg 	      break;
   6960  1.1  mrg 
   6961  1.1  mrg 	  /* Use movml when all banked registers are pushed.  */
   6962  1.1  mrg 	  if (count == LAST_BANKED_REG - FIRST_BANKED_REG + 1)
   6963  1.1  mrg 	    use_movml = true;
   6964  1.1  mrg 	}
   6965  1.1  mrg 
   6966  1.1  mrg       if (sh_cfun_resbank_handler_p ())
   6967  1.1  mrg 	; /* Do nothing.  */
   6968  1.1  mrg       else if (use_movml)
   6969  1.1  mrg 	{
   6970  1.1  mrg 	  rtx x, mem, reg, set;
   6971  1.1  mrg 	  rtx sp_reg = gen_rtx_REG (SImode, STACK_POINTER_REGNUM);
   6972  1.1  mrg 
   6973  1.1  mrg 	  /* We must avoid scheduling multiple store insn with another
   6974  1.1  mrg 	     insns.  */
   6975  1.1  mrg 	  emit_insn (gen_blockage ());
   6976  1.1  mrg 	  x = gen_movml_push_banked (sp_reg);
   6977  1.1  mrg 	  x = emit_frame_insn (x);
   6978  1.1  mrg 	  for (int i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
   6979  1.1  mrg 	    {
   6980  1.1  mrg 	      mem = gen_rtx_MEM (SImode, plus_constant (Pmode, sp_reg, i * 4));
   6981  1.1  mrg 	      reg = gen_rtx_REG (SImode, i);
   6982  1.1  mrg 	      add_reg_note (x, REG_CFA_OFFSET, gen_rtx_SET (mem, reg));
   6983  1.1  mrg 	    }
   6984  1.1  mrg 
   6985  1.1  mrg 	  set = gen_rtx_SET (sp_reg, plus_constant (Pmode, sp_reg, - 32));
   6986  1.1  mrg 	  add_reg_note (x, REG_CFA_ADJUST_CFA, set);
   6987  1.1  mrg 	  emit_insn (gen_blockage ());
   6988  1.1  mrg 	}
   6989  1.1  mrg       else
   6990  1.1  mrg 	for (int i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
   6991  1.1  mrg 	  if (TEST_HARD_REG_BIT (*mask, i))
   6992  1.1  mrg 	    push (i);
   6993  1.1  mrg     }
   6994  1.1  mrg 
   6995  1.1  mrg   /* Don't push PR register for an ISR with RESBANK attribute assigned.  */
   6996  1.1  mrg   if (TEST_HARD_REG_BIT (*mask, PR_REG) && !sh_cfun_resbank_handler_p ())
   6997  1.1  mrg     push (PR_REG);
   6998  1.1  mrg }
   6999  1.1  mrg 
   7000  1.1  mrg /* Work out the registers which need to be saved, both as a mask and a
   7001  1.1  mrg    count of saved words.  Return the count.
   7002  1.1  mrg 
   7003  1.1  mrg    If doing a pragma interrupt function, then push all regs used by the
   7004  1.1  mrg    function, and if we call another function (we can tell by looking at PR),
   7005  1.1  mrg    make sure that all the regs it clobbers are safe too.  */
   7006  1.1  mrg static int
   7007  1.1  mrg calc_live_regs (HARD_REG_SET *live_regs_mask)
   7008  1.1  mrg {
   7009  1.1  mrg   unsigned int reg;
   7010  1.1  mrg   tree attrs;
   7011  1.1  mrg   bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
   7012  1.1  mrg   bool nosave_low_regs;
   7013  1.1  mrg 
   7014  1.1  mrg   attrs = DECL_ATTRIBUTES (current_function_decl);
   7015  1.1  mrg   interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
   7016  1.1  mrg   trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
   7017  1.1  mrg   interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
   7018  1.1  mrg   nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
   7019  1.1  mrg 
   7020  1.1  mrg   CLEAR_HARD_REG_SET (*live_regs_mask);
   7021  1.1  mrg   if (TARGET_FPU_DOUBLE && TARGET_FMOVD && interrupt_handler
   7022  1.1  mrg       && df_regs_ever_live_p (FPSCR_REG))
   7023  1.1  mrg     target_flags &= ~MASK_FPU_SINGLE;
   7024  1.1  mrg   /* If we can save a lot of saves by switching to double mode, do that.  */
   7025  1.1  mrg   else if (TARGET_FPU_DOUBLE && TARGET_FMOVD && TARGET_FPU_SINGLE)
   7026  1.1  mrg     for (int count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
   7027  1.1  mrg       if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
   7028  1.1  mrg 	  && (! call_used_regs[reg]
   7029  1.1  mrg 	      || interrupt_handler)
   7030  1.1  mrg 	  && ++count > 2)
   7031  1.1  mrg 	{
   7032  1.1  mrg 	  target_flags &= ~MASK_FPU_SINGLE;
   7033  1.1  mrg 	  break;
   7034  1.1  mrg 	}
   7035  1.1  mrg 
   7036  1.1  mrg 
   7037  1.1  mrg   rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
   7038  1.1  mrg   bool pr_live = (pr_initial
   7039  1.1  mrg 		 ? (!REG_P (pr_initial)
   7040  1.1  mrg 		    || REGNO (pr_initial) != (PR_REG))
   7041  1.1  mrg 		 : df_regs_ever_live_p (PR_REG));
   7042  1.1  mrg   /* For Shcompact, if not optimizing, we end up with a memory reference
   7043  1.1  mrg      using the return address pointer for __builtin_return_address even
   7044  1.1  mrg      though there is no actual need to put the PR register on the stack.  */
   7045  1.1  mrg   pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
   7046  1.1  mrg 
   7047  1.1  mrg   /* Force PR to be live if the prologue has to call the SHmedia
   7048  1.1  mrg      argument decoder or register saver.  */
   7049  1.1  mrg   bool has_call = pr_live;
   7050  1.1  mrg 
   7051  1.1  mrg   int count;
   7052  1.1  mrg   for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
   7053  1.1  mrg     {
   7054  1.1  mrg       if (reg == PR_REG
   7055  1.1  mrg 	  ? pr_live
   7056  1.1  mrg 	  : interrupt_handler
   7057  1.1  mrg 	  ? (/* Need to save all the regs ever live.  */
   7058  1.1  mrg 	     (df_regs_ever_live_p (reg)
   7059  1.1  mrg 	      || (call_used_regs[reg]
   7060  1.1  mrg 		  && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
   7061  1.1  mrg 		      || reg == PIC_OFFSET_TABLE_REGNUM)
   7062  1.1  mrg 		  && has_call))
   7063  1.1  mrg 	     && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
   7064  1.1  mrg 	     && reg != RETURN_ADDRESS_POINTER_REGNUM
   7065  1.1  mrg 	     && reg != T_REG && reg != GBR_REG
   7066  1.1  mrg 	     && reg != FPSCR_MODES_REG && reg != FPSCR_STAT_REG
   7067  1.1  mrg 	     /* Push fpscr only on targets which have FPU */
   7068  1.1  mrg 	     && (reg != FPSCR_REG || TARGET_FPU_ANY))
   7069  1.1  mrg 	  : (/* Only push those regs which are used and need to be saved.  */
   7070  1.1  mrg 	     (false)
   7071  1.1  mrg 	     || (df_regs_ever_live_p (reg)
   7072  1.1  mrg 		 && ((!call_used_regs[reg]
   7073  1.1  mrg 		      && !(reg != PIC_OFFSET_TABLE_REGNUM
   7074  1.1  mrg 			   && fixed_regs[reg]
   7075  1.1  mrg 			   && call_used_or_fixed_reg_p (reg)))
   7076  1.1  mrg 		     || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
   7077  1.1  mrg 	     || (crtl->calls_eh_return
   7078  1.1  mrg 		 && (reg == EH_RETURN_DATA_REGNO (0)
   7079  1.1  mrg 		     || reg == EH_RETURN_DATA_REGNO (1)
   7080  1.1  mrg 		     || reg == EH_RETURN_DATA_REGNO (2)
   7081  1.1  mrg 		     || reg == EH_RETURN_DATA_REGNO (3)))
   7082  1.1  mrg 	     || ((reg == MACL_REG || reg == MACH_REG)
   7083  1.1  mrg 		 && df_regs_ever_live_p (reg)
   7084  1.1  mrg 		 && sh_cfun_attr_renesas_p ())
   7085  1.1  mrg 	     ))
   7086  1.1  mrg 	{
   7087  1.1  mrg 	  SET_HARD_REG_BIT (*live_regs_mask, reg);
   7088  1.1  mrg 	  count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
   7089  1.1  mrg 
   7090  1.1  mrg 	  if (TARGET_FPU_DOUBLE && TARGET_FMOVD
   7091  1.1  mrg 	      && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
   7092  1.1  mrg 	    {
   7093  1.1  mrg 	      if (FP_REGISTER_P (reg))
   7094  1.1  mrg 		{
   7095  1.1  mrg 		  if (! TARGET_FPU_SINGLE && ! df_regs_ever_live_p (reg ^ 1))
   7096  1.1  mrg 		    {
   7097  1.1  mrg 		      SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
   7098  1.1  mrg 		      count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
   7099  1.1  mrg 		    }
   7100  1.1  mrg 		}
   7101  1.1  mrg 	      else if (XD_REGISTER_P (reg))
   7102  1.1  mrg 		{
   7103  1.1  mrg 		  /* Must switch to double mode to access these registers.  */
   7104  1.1  mrg 		  target_flags &= ~MASK_FPU_SINGLE;
   7105  1.1  mrg 		}
   7106  1.1  mrg 	    }
   7107  1.1  mrg 	}
   7108  1.1  mrg       if (nosave_low_regs && reg == R8_REG)
   7109  1.1  mrg 	break;
   7110  1.1  mrg     }
   7111  1.1  mrg 
   7112  1.1  mrg   return count;
   7113  1.1  mrg }
   7114  1.1  mrg 
   7115  1.1  mrg /* Code to generate prologue and epilogue sequences */
   7116  1.1  mrg 
   7117  1.1  mrg /* PUSHED is the number of bytes that are being pushed on the
   7118  1.1  mrg    stack for register saves.  Return the frame size, padded
   7119  1.1  mrg    appropriately so that the stack stays properly aligned.  */
   7120  1.1  mrg static HOST_WIDE_INT
   7121  1.1  mrg rounded_frame_size (int pushed)
   7122  1.1  mrg {
   7123  1.1  mrg   HOST_WIDE_INT size = get_frame_size ();
   7124  1.1  mrg   HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
   7125  1.1  mrg 
   7126  1.1  mrg   if (ACCUMULATE_OUTGOING_ARGS)
   7127  1.1  mrg     size += crtl->outgoing_args_size;
   7128  1.1  mrg 
   7129  1.1  mrg   return ((size + pushed + align - 1) & -align) - pushed;
   7130  1.1  mrg }
   7131  1.1  mrg 
   7132  1.1  mrg /* Expand code for the function prologue.  */
   7133  1.1  mrg void
   7134  1.1  mrg sh_expand_prologue (void)
   7135  1.1  mrg {
   7136  1.1  mrg   int save_flags = target_flags;
   7137  1.1  mrg   tree sp_switch_attr
   7138  1.1  mrg     = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
   7139  1.1  mrg 
   7140  1.1  mrg   current_function_interrupt = sh_cfun_interrupt_handler_p ();
   7141  1.1  mrg 
   7142  1.1  mrg   /* We have pretend args if we had an object sent partially in registers
   7143  1.1  mrg      and partially on the stack, e.g. a large structure.  */
   7144  1.1  mrg   int pretend_args = crtl->args.pretend_args_size;
   7145  1.1  mrg   if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
   7146  1.1  mrg       && (NPARM_REGS(SImode)
   7147  1.1  mrg 	  > crtl->args.info.arg_count[(int) SH_ARG_INT]))
   7148  1.1  mrg     pretend_args = 0;
   7149  1.1  mrg 
   7150  1.1  mrg   output_stack_adjust (-pretend_args, stack_pointer_rtx, 0, NULL, true);
   7151  1.1  mrg   int stack_usage = pretend_args;
   7152  1.1  mrg 
   7153  1.1  mrg   /* Emit the code for SETUP_VARARGS.  */
   7154  1.1  mrg   if (cfun->stdarg)
   7155  1.1  mrg     {
   7156  1.1  mrg       if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
   7157  1.1  mrg 	{
   7158  1.1  mrg 	  /* Push arg regs as if they'd been provided by caller in stack.  */
   7159  1.1  mrg 	  for (int i = 0; i < NPARM_REGS(SImode); i++)
   7160  1.1  mrg 	    {
   7161  1.1  mrg 	      int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
   7162  1.1  mrg 
   7163  1.1  mrg 	      if (i >= (NPARM_REGS(SImode)
   7164  1.1  mrg 			- crtl->args.info.arg_count[(int) SH_ARG_INT]
   7165  1.1  mrg 			))
   7166  1.1  mrg 		break;
   7167  1.1  mrg 	      push (rn);
   7168  1.1  mrg 	      stack_usage += GET_MODE_SIZE (SImode);
   7169  1.1  mrg 	    }
   7170  1.1  mrg 	}
   7171  1.1  mrg     }
   7172  1.1  mrg 
   7173  1.1  mrg   /* If we're supposed to switch stacks at function entry, do so now.  */
   7174  1.1  mrg   if (sp_switch_attr)
   7175  1.1  mrg     {
   7176  1.1  mrg       rtx lab, newsrc;
   7177  1.1  mrg       /* The argument specifies a variable holding the address of the
   7178  1.1  mrg 	 stack the interrupt function should switch to/from at entry/exit.  */
   7179  1.1  mrg       tree arg = TREE_VALUE ( TREE_VALUE (sp_switch_attr));
   7180  1.1  mrg       const char* s = ggc_strdup (TREE_STRING_POINTER (arg));
   7181  1.1  mrg       rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
   7182  1.1  mrg 
   7183  1.1  mrg       lab = add_constant (sp_switch, SImode, 0);
   7184  1.1  mrg       newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
   7185  1.1  mrg 
   7186  1.1  mrg       emit_insn (gen_sp_switch_1 (newsrc));
   7187  1.1  mrg     }
   7188  1.1  mrg 
   7189  1.1  mrg   HARD_REG_SET live_regs_mask;
   7190  1.1  mrg   int d = calc_live_regs (&live_regs_mask);
   7191  1.1  mrg   /* ??? Maybe we could save some switching if we can move a mode switch
   7192  1.1  mrg      that already happens to be at the function start into the prologue.  */
   7193  1.1  mrg   if (target_flags != save_flags && ! current_function_interrupt)
   7194  1.1  mrg     emit_insn (gen_toggle_sz ());
   7195  1.1  mrg 
   7196  1.1  mrg   push_regs (&live_regs_mask, current_function_interrupt);
   7197  1.1  mrg   stack_usage += d;
   7198  1.1  mrg 
   7199  1.1  mrg   if (flag_pic && !TARGET_FDPIC
   7200  1.1  mrg       && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
   7201  1.1  mrg     emit_insn (gen_GOTaddr2picreg (const0_rtx));
   7202  1.1  mrg 
   7203  1.1  mrg   if (target_flags != save_flags && ! current_function_interrupt)
   7204  1.1  mrg     emit_insn (gen_toggle_sz ());
   7205  1.1  mrg 
   7206  1.1  mrg   target_flags = save_flags;
   7207  1.1  mrg 
   7208  1.1  mrg   output_stack_adjust (-rounded_frame_size (d),
   7209  1.1  mrg 		       stack_pointer_rtx, 0, NULL, true);
   7210  1.1  mrg   stack_usage += rounded_frame_size (d);
   7211  1.1  mrg 
   7212  1.1  mrg   if (frame_pointer_needed)
   7213  1.1  mrg     emit_frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
   7214  1.1  mrg 
   7215  1.1  mrg   /* If we are profiling, make sure no instructions are scheduled before
   7216  1.1  mrg      the call to mcount.  Similarly if some call instructions are swapped
   7217  1.1  mrg      before frame related insns, it'll confuse the unwinder because
   7218  1.1  mrg      currently SH has no unwind info for function epilogues.  */
   7219  1.1  mrg   if (crtl->profile || flag_exceptions || flag_unwind_tables)
   7220  1.1  mrg     emit_insn (gen_blockage ());
   7221  1.1  mrg 
   7222  1.1  mrg   if (flag_stack_usage_info)
   7223  1.1  mrg     current_function_static_stack_size = stack_usage;
   7224  1.1  mrg }
   7225  1.1  mrg 
   7226  1.1  mrg /* Expand code for the function epilogue.  */
   7227  1.1  mrg void
   7228  1.1  mrg sh_expand_epilogue (bool sibcall_p)
   7229  1.1  mrg {
   7230  1.1  mrg   int save_flags = target_flags;
   7231  1.1  mrg   bool fpscr_deferred = false;
   7232  1.1  mrg   int e = sibcall_p ? -1 : 1;
   7233  1.1  mrg 
   7234  1.1  mrg   HARD_REG_SET live_regs_mask;
   7235  1.1  mrg   int d = calc_live_regs (&live_regs_mask);
   7236  1.1  mrg 
   7237  1.1  mrg   int save_size = d;
   7238  1.1  mrg   int frame_size = rounded_frame_size (d);
   7239  1.1  mrg 
   7240  1.1  mrg   if (frame_pointer_needed)
   7241  1.1  mrg     {
   7242  1.1  mrg       /* We must avoid scheduling the epilogue with previous basic blocks.
   7243  1.1  mrg 	 See PR/18032 and PR/40313.  */
   7244  1.1  mrg       emit_insn (gen_blockage ());
   7245  1.1  mrg       output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
   7246  1.1  mrg 			   &live_regs_mask, true);
   7247  1.1  mrg 
   7248  1.1  mrg       /* We must avoid moving the stack pointer adjustment past code
   7249  1.1  mrg 	 which reads from the local frame, else an interrupt could
   7250  1.1  mrg 	 occur after the SP adjustment and clobber data in the local
   7251  1.1  mrg 	 frame.  */
   7252  1.1  mrg       emit_insn (gen_blockage ());
   7253  1.1  mrg       emit_frame_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
   7254  1.1  mrg     }
   7255  1.1  mrg   else if (frame_size)
   7256  1.1  mrg     {
   7257  1.1  mrg       /* We must avoid moving the stack pointer adjustment past code
   7258  1.1  mrg 	 which reads from the local frame, else an interrupt could
   7259  1.1  mrg 	 occur after the SP adjustment and clobber data in the local
   7260  1.1  mrg 	 frame.  */
   7261  1.1  mrg       emit_insn (gen_blockage ());
   7262  1.1  mrg       output_stack_adjust (frame_size, stack_pointer_rtx, e,
   7263  1.1  mrg 			   &live_regs_mask, true);
   7264  1.1  mrg     }
   7265  1.1  mrg 
   7266  1.1  mrg   /* Pop all the registers.  */
   7267  1.1  mrg 
   7268  1.1  mrg   if (target_flags != save_flags && ! current_function_interrupt)
   7269  1.1  mrg     emit_insn (gen_toggle_sz ());
   7270  1.1  mrg 
   7271  1.1  mrg     {
   7272  1.1  mrg       int last_reg;
   7273  1.1  mrg 
   7274  1.1  mrg       save_size = 0;
   7275  1.1  mrg 	/* For an ISR with RESBANK attribute assigned, don't pop PR
   7276  1.1  mrg 	   register.  */
   7277  1.1  mrg       if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG)
   7278  1.1  mrg 	  && !sh_cfun_resbank_handler_p ())
   7279  1.1  mrg 	{
   7280  1.1  mrg 	  if (!frame_pointer_needed)
   7281  1.1  mrg 	    emit_insn (gen_blockage ());
   7282  1.1  mrg 	  pop (PR_REG);
   7283  1.1  mrg 	}
   7284  1.1  mrg 
   7285  1.1  mrg       /* Banked registers are popped first to avoid being scheduled in the
   7286  1.1  mrg 	 delay slot. RTE switches banks before the ds instruction.  */
   7287  1.1  mrg       if (current_function_interrupt)
   7288  1.1  mrg 	{
   7289  1.1  mrg 	  bool use_movml = false;
   7290  1.1  mrg 
   7291  1.1  mrg 	  if (TARGET_SH2A)
   7292  1.1  mrg 	    {
   7293  1.1  mrg 	      unsigned int count = 0;
   7294  1.1  mrg 
   7295  1.1  mrg 	      for (int i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
   7296  1.1  mrg 		if (TEST_HARD_REG_BIT (live_regs_mask, i))
   7297  1.1  mrg 		  count++;
   7298  1.1  mrg 		else
   7299  1.1  mrg 		  break;
   7300  1.1  mrg 
   7301  1.1  mrg 	      /* Use movml when all banked register are poped.  */
   7302  1.1  mrg 	      if (count == LAST_BANKED_REG - FIRST_BANKED_REG + 1)
   7303  1.1  mrg 		use_movml = true;
   7304  1.1  mrg 	    }
   7305  1.1  mrg 
   7306  1.1  mrg 	  if (sh_cfun_resbank_handler_p ())
   7307  1.1  mrg 	    ; /* Do nothing.  */
   7308  1.1  mrg 	  else if (use_movml)
   7309  1.1  mrg 	    {
   7310  1.1  mrg 	      rtx sp_reg = gen_rtx_REG (SImode, STACK_POINTER_REGNUM);
   7311  1.1  mrg 
   7312  1.1  mrg 	      /* We must avoid scheduling multiple load insn with another
   7313  1.1  mrg 		 insns.  */
   7314  1.1  mrg 	      emit_insn (gen_blockage ());
   7315  1.1  mrg 	      emit_insn (gen_movml_pop_banked (sp_reg));
   7316  1.1  mrg 	      emit_insn (gen_blockage ());
   7317  1.1  mrg 	    }
   7318  1.1  mrg 	  else
   7319  1.1  mrg 	    for (int i = LAST_BANKED_REG; i >= FIRST_BANKED_REG; i--)
   7320  1.1  mrg 	      if (TEST_HARD_REG_BIT (live_regs_mask, i))
   7321  1.1  mrg 		pop (i);
   7322  1.1  mrg 
   7323  1.1  mrg 	  last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
   7324  1.1  mrg 	}
   7325  1.1  mrg       else
   7326  1.1  mrg 	last_reg = FIRST_PSEUDO_REGISTER;
   7327  1.1  mrg 
   7328  1.1  mrg       for (int i = 0; i < last_reg; i++)
   7329  1.1  mrg 	{
   7330  1.1  mrg 	  int j = (FIRST_PSEUDO_REGISTER - 1) - i;
   7331  1.1  mrg 
   7332  1.1  mrg 	  if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
   7333  1.1  mrg 	      && hard_reg_set_intersect_p (live_regs_mask,
   7334  1.1  mrg 					  reg_class_contents[DF_REGS]))
   7335  1.1  mrg 	    fpscr_deferred = true;
   7336  1.1  mrg 	  /* For an ISR with RESBANK attribute assigned, don't pop
   7337  1.1  mrg 	     following registers, R0-R14, MACH, MACL and GBR.  */
   7338  1.1  mrg 	  else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j)
   7339  1.1  mrg 		   && ! (sh_cfun_resbank_handler_p ()
   7340  1.1  mrg 			 && ((j >= FIRST_GENERAL_REG
   7341  1.1  mrg 			      && j < LAST_GENERAL_REG)
   7342  1.1  mrg 			      || j == MACH_REG
   7343  1.1  mrg 			      || j == MACL_REG
   7344  1.1  mrg 			      || j == GBR_REG)))
   7345  1.1  mrg 	    pop (j);
   7346  1.1  mrg 
   7347  1.1  mrg 	  if (j == FIRST_FP_REG && fpscr_deferred)
   7348  1.1  mrg 	    pop (FPSCR_REG);
   7349  1.1  mrg 	}
   7350  1.1  mrg     }
   7351  1.1  mrg   if (target_flags != save_flags && ! current_function_interrupt)
   7352  1.1  mrg     emit_insn (gen_toggle_sz ());
   7353  1.1  mrg   target_flags = save_flags;
   7354  1.1  mrg 
   7355  1.1  mrg   output_stack_adjust (crtl->args.pretend_args_size + save_size,
   7356  1.1  mrg 		       stack_pointer_rtx, e, NULL, true);
   7357  1.1  mrg 
   7358  1.1  mrg   if (crtl->calls_eh_return)
   7359  1.1  mrg     emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
   7360  1.1  mrg 			 EH_RETURN_STACKADJ_RTX));
   7361  1.1  mrg 
   7362  1.1  mrg   /* Switch back to the normal stack if necessary.  */
   7363  1.1  mrg   if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
   7364  1.1  mrg     emit_insn (gen_sp_switch_2 ());
   7365  1.1  mrg 
   7366  1.1  mrg   /* Tell flow the insn that pops PR isn't dead.  */
   7367  1.1  mrg   if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
   7368  1.1  mrg     emit_use (gen_rtx_REG (SImode, PR_REG));
   7369  1.1  mrg }
   7370  1.1  mrg 
   7371  1.1  mrg /* Emit code to change the current function's return address to RA.
   7372  1.1  mrg    TEMP is available as a scratch register, if needed.  */
   7373  1.1  mrg void
   7374  1.1  mrg sh_set_return_address (rtx ra, rtx tmp)
   7375  1.1  mrg {
   7376  1.1  mrg   HARD_REG_SET live_regs_mask;
   7377  1.1  mrg   int d = calc_live_regs (&live_regs_mask);
   7378  1.1  mrg 
   7379  1.1  mrg   /* If pr_reg isn't life, we can set it directly.  */
   7380  1.1  mrg   if (! TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
   7381  1.1  mrg     {
   7382  1.1  mrg       rtx rr = gen_rtx_REG (SImode, PR_REG);
   7383  1.1  mrg       emit_insn (GEN_MOV (rr, ra));
   7384  1.1  mrg       /* Tell flow the register for return isn't dead.  */
   7385  1.1  mrg       emit_use (rr);
   7386  1.1  mrg       return;
   7387  1.1  mrg     }
   7388  1.1  mrg 
   7389  1.1  mrg   int pr_offset = rounded_frame_size (d);
   7390  1.1  mrg 
   7391  1.1  mrg   emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
   7392  1.1  mrg 
   7393  1.1  mrg   if (frame_pointer_needed)
   7394  1.1  mrg     emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
   7395  1.1  mrg   else
   7396  1.1  mrg     emit_insn (GEN_ADD3 (tmp, tmp, stack_pointer_rtx));
   7397  1.1  mrg 
   7398  1.1  mrg   tmp = gen_frame_mem (Pmode, tmp);
   7399  1.1  mrg   emit_insn (GEN_MOV (tmp, ra));
   7400  1.1  mrg   /* Tell this store isn't dead.  */
   7401  1.1  mrg   emit_use (tmp);
   7402  1.1  mrg }
   7403  1.1  mrg 
   7404  1.1  mrg /* Clear variables at function end.  */
   7405  1.1  mrg static void
   7406  1.1  mrg sh_output_function_epilogue (FILE *)
   7407  1.1  mrg {
   7408  1.1  mrg }
   7409  1.1  mrg 
   7410  1.1  mrg static rtx
   7411  1.1  mrg sh_builtin_saveregs (void)
   7412  1.1  mrg {
   7413  1.1  mrg   /* First unnamed integer register.  */
   7414  1.1  mrg   int first_intreg = crtl->args.info.arg_count[(int) SH_ARG_INT];
   7415  1.1  mrg   /* Number of integer registers we need to save.  */
   7416  1.1  mrg   int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
   7417  1.1  mrg   /* First unnamed SFmode float reg */
   7418  1.1  mrg   int first_floatreg = crtl->args.info.arg_count[(int) SH_ARG_FLOAT];
   7419  1.1  mrg   /* Number of SFmode float regs to save.  */
   7420  1.1  mrg   int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
   7421  1.1  mrg   rtx regbuf, fpregs;
   7422  1.1  mrg   int bufsize, regno;
   7423  1.1  mrg   alias_set_type alias_set;
   7424  1.1  mrg 
   7425  1.1  mrg   if (!TARGET_FPU_ANY)
   7426  1.1  mrg     {
   7427  1.1  mrg       error ("%<__builtin_saveregs%> not supported by this subtarget");
   7428  1.1  mrg       return const0_rtx;
   7429  1.1  mrg     }
   7430  1.1  mrg 
   7431  1.1  mrg   /* Allocate block of memory for the regs.  */
   7432  1.1  mrg   /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
   7433  1.1  mrg      Or can assign_stack_local accept a 0 SIZE argument?  */
   7434  1.1  mrg   bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
   7435  1.1  mrg 
   7436  1.1  mrg   if (n_floatregs & 1)
   7437  1.1  mrg     {
   7438  1.1  mrg       rtx addr;
   7439  1.1  mrg 
   7440  1.1  mrg       regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
   7441  1.1  mrg       addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
   7442  1.1  mrg       emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
   7443  1.1  mrg       regbuf = change_address (regbuf, BLKmode, addr);
   7444  1.1  mrg     }
   7445  1.1  mrg   else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
   7446  1.1  mrg     {
   7447  1.1  mrg       rtx addr, mask;
   7448  1.1  mrg 
   7449  1.1  mrg       regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
   7450  1.1  mrg       addr = copy_to_mode_reg (Pmode, plus_constant (Pmode,
   7451  1.1  mrg 						     XEXP (regbuf, 0), 4));
   7452  1.1  mrg       mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
   7453  1.1  mrg       emit_insn (gen_andsi3 (addr, addr, mask));
   7454  1.1  mrg       regbuf = change_address (regbuf, BLKmode, addr);
   7455  1.1  mrg     }
   7456  1.1  mrg   else
   7457  1.1  mrg     regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
   7458  1.1  mrg   alias_set = get_varargs_alias_set ();
   7459  1.1  mrg   set_mem_alias_set (regbuf, alias_set);
   7460  1.1  mrg 
   7461  1.1  mrg   /* Save int args.
   7462  1.1  mrg      This is optimized to only save the regs that are necessary.  Explicitly
   7463  1.1  mrg      named args need not be saved.  */
   7464  1.1  mrg   if (n_intregs > 0)
   7465  1.1  mrg     move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
   7466  1.1  mrg 			 adjust_address (regbuf, BLKmode,
   7467  1.1  mrg 					 n_floatregs * UNITS_PER_WORD),
   7468  1.1  mrg 			 n_intregs);
   7469  1.1  mrg 
   7470  1.1  mrg   /* Save float args.
   7471  1.1  mrg      This is optimized to only save the regs that are necessary.  Explicitly
   7472  1.1  mrg      named args need not be saved.
   7473  1.1  mrg      We explicitly build a pointer to the buffer because it halves the insn
   7474  1.1  mrg      count when not optimizing (otherwise the pointer is built for each reg
   7475  1.1  mrg      saved).
   7476  1.1  mrg      We emit the moves in reverse order so that we can use predecrement.  */
   7477  1.1  mrg 
   7478  1.1  mrg   fpregs = copy_to_mode_reg (Pmode,
   7479  1.1  mrg 			     plus_constant (Pmode, XEXP (regbuf, 0),
   7480  1.1  mrg 					    n_floatregs * UNITS_PER_WORD));
   7481  1.1  mrg   if (TARGET_FPU_DOUBLE)
   7482  1.1  mrg     {
   7483  1.1  mrg       rtx mem;
   7484  1.1  mrg       for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
   7485  1.1  mrg 	{
   7486  1.1  mrg 	  emit_insn (gen_addsi3 (fpregs, fpregs,
   7487  1.1  mrg 				 GEN_INT (-2 * UNITS_PER_WORD)));
   7488  1.1  mrg 	  mem = change_address (regbuf, DFmode, fpregs);
   7489  1.1  mrg 	  emit_move_insn (mem,
   7490  1.1  mrg 			  gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
   7491  1.1  mrg 	}
   7492  1.1  mrg       regno = first_floatreg;
   7493  1.1  mrg       if (regno & 1)
   7494  1.1  mrg 	{
   7495  1.1  mrg 	  emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
   7496  1.1  mrg 	  mem = change_address (regbuf, SFmode, fpregs);
   7497  1.1  mrg 	  emit_move_insn (mem,
   7498  1.1  mrg 			  gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode)
   7499  1.1  mrg 					       + regno - SH_REG_MSW_OFFSET));
   7500  1.1  mrg 	}
   7501  1.1  mrg     }
   7502  1.1  mrg   else
   7503  1.1  mrg     for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
   7504  1.1  mrg       {
   7505  1.1  mrg         rtx mem;
   7506  1.1  mrg 
   7507  1.1  mrg 	emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
   7508  1.1  mrg 	mem = change_address (regbuf, SFmode, fpregs);
   7509  1.1  mrg 	emit_move_insn (mem,
   7510  1.1  mrg 			gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
   7511  1.1  mrg       }
   7512  1.1  mrg 
   7513  1.1  mrg   /* Return the address of the regbuf.  */
   7514  1.1  mrg   return XEXP (regbuf, 0);
   7515  1.1  mrg }
   7516  1.1  mrg 
   7517  1.1  mrg /* Define the `__builtin_va_list' type for the ABI.  */
   7518  1.1  mrg static tree
   7519  1.1  mrg sh_build_builtin_va_list (void)
   7520  1.1  mrg {
   7521  1.1  mrg   tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
   7522  1.1  mrg   tree record, type_decl;
   7523  1.1  mrg 
   7524  1.1  mrg   if ((! TARGET_SH2E && ! TARGET_SH4)
   7525  1.1  mrg       || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
   7526  1.1  mrg     return ptr_type_node;
   7527  1.1  mrg 
   7528  1.1  mrg   record = (*lang_hooks.types.make_type) (RECORD_TYPE);
   7529  1.1  mrg   type_decl = build_decl (BUILTINS_LOCATION,
   7530  1.1  mrg 			  TYPE_DECL, get_identifier ("__va_list_tag"), record);
   7531  1.1  mrg 
   7532  1.1  mrg   f_next_o = build_decl (BUILTINS_LOCATION,
   7533  1.1  mrg 			 FIELD_DECL, get_identifier ("__va_next_o"),
   7534  1.1  mrg 			 ptr_type_node);
   7535  1.1  mrg   f_next_o_limit = build_decl (BUILTINS_LOCATION,
   7536  1.1  mrg 			       FIELD_DECL,
   7537  1.1  mrg 			       get_identifier ("__va_next_o_limit"),
   7538  1.1  mrg 			       ptr_type_node);
   7539  1.1  mrg   f_next_fp = build_decl (BUILTINS_LOCATION,
   7540  1.1  mrg 			  FIELD_DECL, get_identifier ("__va_next_fp"),
   7541  1.1  mrg 			  ptr_type_node);
   7542  1.1  mrg   f_next_fp_limit = build_decl (BUILTINS_LOCATION,
   7543  1.1  mrg 				FIELD_DECL,
   7544  1.1  mrg 				get_identifier ("__va_next_fp_limit"),
   7545  1.1  mrg 				ptr_type_node);
   7546  1.1  mrg   f_next_stack = build_decl (BUILTINS_LOCATION,
   7547  1.1  mrg 			     FIELD_DECL, get_identifier ("__va_next_stack"),
   7548  1.1  mrg 			     ptr_type_node);
   7549  1.1  mrg 
   7550  1.1  mrg   DECL_FIELD_CONTEXT (f_next_o) = record;
   7551  1.1  mrg   DECL_FIELD_CONTEXT (f_next_o_limit) = record;
   7552  1.1  mrg   DECL_FIELD_CONTEXT (f_next_fp) = record;
   7553  1.1  mrg   DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
   7554  1.1  mrg   DECL_FIELD_CONTEXT (f_next_stack) = record;
   7555  1.1  mrg 
   7556  1.1  mrg   TYPE_STUB_DECL (record) = type_decl;
   7557  1.1  mrg   TYPE_NAME (record) = type_decl;
   7558  1.1  mrg   TYPE_FIELDS (record) = f_next_o;
   7559  1.1  mrg   DECL_CHAIN (f_next_o) = f_next_o_limit;
   7560  1.1  mrg   DECL_CHAIN (f_next_o_limit) = f_next_fp;
   7561  1.1  mrg   DECL_CHAIN (f_next_fp) = f_next_fp_limit;
   7562  1.1  mrg   DECL_CHAIN (f_next_fp_limit) = f_next_stack;
   7563  1.1  mrg 
   7564  1.1  mrg   layout_type (record);
   7565  1.1  mrg 
   7566  1.1  mrg   return record;
   7567  1.1  mrg }
   7568  1.1  mrg 
   7569  1.1  mrg /* Implement `va_start' for varargs and stdarg.  */
   7570  1.1  mrg static void
   7571  1.1  mrg sh_va_start (tree valist, rtx nextarg)
   7572  1.1  mrg {
   7573  1.1  mrg   tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
   7574  1.1  mrg   tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
   7575  1.1  mrg   tree t, u;
   7576  1.1  mrg   int nfp, nint;
   7577  1.1  mrg 
   7578  1.1  mrg   if ((! TARGET_SH2E && ! TARGET_SH4)
   7579  1.1  mrg       || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
   7580  1.1  mrg     {
   7581  1.1  mrg       std_expand_builtin_va_start (valist, nextarg);
   7582  1.1  mrg       return;
   7583  1.1  mrg     }
   7584  1.1  mrg 
   7585  1.1  mrg   f_next_o = TYPE_FIELDS (va_list_type_node);
   7586  1.1  mrg   f_next_o_limit = DECL_CHAIN (f_next_o);
   7587  1.1  mrg   f_next_fp = DECL_CHAIN (f_next_o_limit);
   7588  1.1  mrg   f_next_fp_limit = DECL_CHAIN (f_next_fp);
   7589  1.1  mrg   f_next_stack = DECL_CHAIN (f_next_fp_limit);
   7590  1.1  mrg 
   7591  1.1  mrg   next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
   7592  1.1  mrg 		   NULL_TREE);
   7593  1.1  mrg   next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
   7594  1.1  mrg 			 valist, f_next_o_limit, NULL_TREE);
   7595  1.1  mrg   next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
   7596  1.1  mrg 		    NULL_TREE);
   7597  1.1  mrg   next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
   7598  1.1  mrg 			  valist, f_next_fp_limit, NULL_TREE);
   7599  1.1  mrg   next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
   7600  1.1  mrg 		       valist, f_next_stack, NULL_TREE);
   7601  1.1  mrg 
   7602  1.1  mrg   /* Call __builtin_saveregs.  */
   7603  1.1  mrg   u = make_tree (sizetype, expand_builtin_saveregs ());
   7604  1.1  mrg   u = fold_convert (ptr_type_node, u);
   7605  1.1  mrg   t = build2 (MODIFY_EXPR, ptr_type_node, next_fp, u);
   7606  1.1  mrg   TREE_SIDE_EFFECTS (t) = 1;
   7607  1.1  mrg   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
   7608  1.1  mrg 
   7609  1.1  mrg   nfp = crtl->args.info.arg_count[SH_ARG_FLOAT];
   7610  1.1  mrg   if (nfp < 8)
   7611  1.1  mrg     nfp = 8 - nfp;
   7612  1.1  mrg   else
   7613  1.1  mrg     nfp = 0;
   7614  1.1  mrg   u = fold_build_pointer_plus_hwi (u, UNITS_PER_WORD * nfp);
   7615  1.1  mrg   t = build2 (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
   7616  1.1  mrg   TREE_SIDE_EFFECTS (t) = 1;
   7617  1.1  mrg   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
   7618  1.1  mrg 
   7619  1.1  mrg   t = build2 (MODIFY_EXPR, ptr_type_node, next_o, u);
   7620  1.1  mrg   TREE_SIDE_EFFECTS (t) = 1;
   7621  1.1  mrg   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
   7622  1.1  mrg 
   7623  1.1  mrg   nint = crtl->args.info.arg_count[SH_ARG_INT];
   7624  1.1  mrg   if (nint < 4)
   7625  1.1  mrg     nint = 4 - nint;
   7626  1.1  mrg   else
   7627  1.1  mrg     nint = 0;
   7628  1.1  mrg   u = fold_build_pointer_plus_hwi (u, UNITS_PER_WORD * nint);
   7629  1.1  mrg   t = build2 (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
   7630  1.1  mrg   TREE_SIDE_EFFECTS (t) = 1;
   7631  1.1  mrg   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
   7632  1.1  mrg 
   7633  1.1  mrg   u = make_tree (ptr_type_node, nextarg);
   7634  1.1  mrg   t = build2 (MODIFY_EXPR, ptr_type_node, next_stack, u);
   7635  1.1  mrg   TREE_SIDE_EFFECTS (t) = 1;
   7636  1.1  mrg   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
   7637  1.1  mrg }
   7638  1.1  mrg 
   7639  1.1  mrg /* TYPE is a RECORD_TYPE.  If there is only a single nonzero-sized
   7640  1.1  mrg    member, return it.  */
   7641  1.1  mrg static tree
   7642  1.1  mrg find_sole_member (tree type)
   7643  1.1  mrg {
   7644  1.1  mrg   tree field, member = NULL_TREE;
   7645  1.1  mrg 
   7646  1.1  mrg   for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
   7647  1.1  mrg     {
   7648  1.1  mrg       if (TREE_CODE (field) != FIELD_DECL)
   7649  1.1  mrg 	continue;
   7650  1.1  mrg       if (!DECL_SIZE (field))
   7651  1.1  mrg 	return NULL_TREE;
   7652  1.1  mrg       if (integer_zerop (DECL_SIZE (field)))
   7653  1.1  mrg 	continue;
   7654  1.1  mrg       if (member)
   7655  1.1  mrg 	return NULL_TREE;
   7656  1.1  mrg       member = field;
   7657  1.1  mrg     }
   7658  1.1  mrg   return member;
   7659  1.1  mrg }
   7660  1.1  mrg 
   7661  1.1  mrg /* Implement `va_arg'.  */
   7662  1.1  mrg static tree
   7663  1.1  mrg sh_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
   7664  1.1  mrg 			 gimple_seq *post_p ATTRIBUTE_UNUSED)
   7665  1.1  mrg {
   7666  1.1  mrg   tree tmp;
   7667  1.1  mrg   tree addr, lab_over = NULL, result = NULL;
   7668  1.1  mrg   tree eff_type;
   7669  1.1  mrg 
   7670  1.1  mrg   const bool pass_by_ref
   7671  1.1  mrg     = !VOID_TYPE_P (type) && must_pass_va_arg_in_stack (type);
   7672  1.1  mrg 
   7673  1.1  mrg   if (pass_by_ref)
   7674  1.1  mrg     type = build_pointer_type (type);
   7675  1.1  mrg 
   7676  1.1  mrg   HOST_WIDE_INT size = int_size_in_bytes (type);
   7677  1.1  mrg   HOST_WIDE_INT rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
   7678  1.1  mrg   tree pptr_type_node = build_pointer_type (ptr_type_node);
   7679  1.1  mrg 
   7680  1.1  mrg   if ((TARGET_SH2E || TARGET_SH4)
   7681  1.1  mrg       && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
   7682  1.1  mrg     {
   7683  1.1  mrg       tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
   7684  1.1  mrg       tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
   7685  1.1  mrg       tree lab_false;
   7686  1.1  mrg       tree member;
   7687  1.1  mrg 
   7688  1.1  mrg       f_next_o = TYPE_FIELDS (va_list_type_node);
   7689  1.1  mrg       f_next_o_limit = DECL_CHAIN (f_next_o);
   7690  1.1  mrg       f_next_fp = DECL_CHAIN (f_next_o_limit);
   7691  1.1  mrg       f_next_fp_limit = DECL_CHAIN (f_next_fp);
   7692  1.1  mrg       f_next_stack = DECL_CHAIN (f_next_fp_limit);
   7693  1.1  mrg 
   7694  1.1  mrg       next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
   7695  1.1  mrg 		       NULL_TREE);
   7696  1.1  mrg       next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
   7697  1.1  mrg 			     valist, f_next_o_limit, NULL_TREE);
   7698  1.1  mrg       next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
   7699  1.1  mrg 			valist, f_next_fp, NULL_TREE);
   7700  1.1  mrg       next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
   7701  1.1  mrg 			      valist, f_next_fp_limit, NULL_TREE);
   7702  1.1  mrg       next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
   7703  1.1  mrg 			   valist, f_next_stack, NULL_TREE);
   7704  1.1  mrg 
   7705  1.1  mrg       /* Structures with a single member with a distinct mode are passed
   7706  1.1  mrg 	 like their member.  This is relevant if the latter has a REAL_TYPE
   7707  1.1  mrg 	 or COMPLEX_TYPE type.  */
   7708  1.1  mrg       eff_type = type;
   7709  1.1  mrg       while (TREE_CODE (eff_type) == RECORD_TYPE
   7710  1.1  mrg 	     && (member = find_sole_member (eff_type))
   7711  1.1  mrg 	     && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
   7712  1.1  mrg 		 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
   7713  1.1  mrg 		 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
   7714  1.1  mrg 	{
   7715  1.1  mrg 	  tree field_type = TREE_TYPE (member);
   7716  1.1  mrg 
   7717  1.1  mrg 	  if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
   7718  1.1  mrg 	    eff_type = field_type;
   7719  1.1  mrg 	  else
   7720  1.1  mrg 	    {
   7721  1.1  mrg 	      gcc_assert ((TYPE_ALIGN (eff_type)
   7722  1.1  mrg 			   < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
   7723  1.1  mrg 			  || (TYPE_ALIGN (eff_type)
   7724  1.1  mrg 			      > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
   7725  1.1  mrg 	      break;
   7726  1.1  mrg 	    }
   7727  1.1  mrg 	}
   7728  1.1  mrg 
   7729  1.1  mrg       bool pass_as_float;
   7730  1.1  mrg       if (TARGET_FPU_DOUBLE)
   7731  1.1  mrg 	{
   7732  1.1  mrg 	  pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
   7733  1.1  mrg 			   || (TREE_CODE (eff_type) == COMPLEX_TYPE
   7734  1.1  mrg 			       && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
   7735  1.1  mrg 			       && size <= 16));
   7736  1.1  mrg 	}
   7737  1.1  mrg       else
   7738  1.1  mrg 	{
   7739  1.1  mrg 	  pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
   7740  1.1  mrg 	}
   7741  1.1  mrg 
   7742  1.1  mrg       addr = create_tmp_var (pptr_type_node);
   7743  1.1  mrg       lab_false = create_artificial_label (UNKNOWN_LOCATION);
   7744  1.1  mrg       lab_over = create_artificial_label (UNKNOWN_LOCATION);
   7745  1.1  mrg 
   7746  1.1  mrg       valist = build_simple_mem_ref (addr);
   7747  1.1  mrg 
   7748  1.1  mrg       if (pass_as_float)
   7749  1.1  mrg 	{
   7750  1.1  mrg 	  tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp));
   7751  1.1  mrg 	  tree cmp;
   7752  1.1  mrg 	  bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
   7753  1.1  mrg 
   7754  1.1  mrg 	  tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_fp));
   7755  1.1  mrg 	  gimplify_assign (unshare_expr (addr), tmp, pre_p);
   7756  1.1  mrg 
   7757  1.1  mrg 	  gimplify_assign (unshare_expr (next_fp_tmp), valist, pre_p);
   7758  1.1  mrg 	  tmp = next_fp_limit;
   7759  1.1  mrg 	  if (size > 4 && !is_double)
   7760  1.1  mrg 	    tmp = fold_build_pointer_plus_hwi (unshare_expr (tmp), 4 - size);
   7761  1.1  mrg 	  tmp = build2 (GE_EXPR, boolean_type_node,
   7762  1.1  mrg 			unshare_expr (next_fp_tmp), unshare_expr (tmp));
   7763  1.1  mrg 	  cmp = build3 (COND_EXPR, void_type_node, tmp,
   7764  1.1  mrg 		        build1 (GOTO_EXPR, void_type_node,
   7765  1.1  mrg 				unshare_expr (lab_false)), NULL_TREE);
   7766  1.1  mrg 	  if (!is_double)
   7767  1.1  mrg 	    gimplify_and_add (cmp, pre_p);
   7768  1.1  mrg 
   7769  1.1  mrg 	  if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
   7770  1.1  mrg 	      || (is_double || size == 16))
   7771  1.1  mrg 	    {
   7772  1.1  mrg 	      tmp = fold_convert (sizetype, next_fp_tmp);
   7773  1.1  mrg 	      tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
   7774  1.1  mrg 			    size_int (UNITS_PER_WORD));
   7775  1.1  mrg 	      tmp = fold_build_pointer_plus (unshare_expr (next_fp_tmp), tmp);
   7776  1.1  mrg 	      gimplify_assign (unshare_expr (next_fp_tmp), tmp, pre_p);
   7777  1.1  mrg 	    }
   7778  1.1  mrg 	  if (is_double)
   7779  1.1  mrg 	    gimplify_and_add (cmp, pre_p);
   7780  1.1  mrg 
   7781  1.1  mrg #ifdef FUNCTION_ARG_SCmode_WART
   7782  1.1  mrg 	  if (TYPE_MODE (eff_type) == SCmode
   7783  1.1  mrg 	      && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
   7784  1.1  mrg 	    {
   7785  1.1  mrg 	      tree subtype = TREE_TYPE (eff_type);
   7786  1.1  mrg 	      tree real, imag;
   7787  1.1  mrg 
   7788  1.1  mrg 	      imag
   7789  1.1  mrg 		= std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
   7790  1.1  mrg 	      imag = get_initialized_tmp_var (imag, pre_p, NULL);
   7791  1.1  mrg 
   7792  1.1  mrg 	      real
   7793  1.1  mrg 		= std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
   7794  1.1  mrg 	      real = get_initialized_tmp_var (real, pre_p, NULL);
   7795  1.1  mrg 
   7796  1.1  mrg 	      result = build2 (COMPLEX_EXPR, eff_type, real, imag);
   7797  1.1  mrg 	      if (type != eff_type)
   7798  1.1  mrg 		result = build1 (VIEW_CONVERT_EXPR, type, result);
   7799  1.1  mrg 	      result = get_initialized_tmp_var (result, pre_p, NULL);
   7800  1.1  mrg 	    }
   7801  1.1  mrg #endif /* FUNCTION_ARG_SCmode_WART */
   7802  1.1  mrg 
   7803  1.1  mrg 	  tmp = build1 (GOTO_EXPR, void_type_node, unshare_expr (lab_over));
   7804  1.1  mrg 	  gimplify_and_add (tmp, pre_p);
   7805  1.1  mrg 
   7806  1.1  mrg 	  tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_false));
   7807  1.1  mrg 	  gimplify_and_add (tmp, pre_p);
   7808  1.1  mrg 
   7809  1.1  mrg 	  tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_stack));
   7810  1.1  mrg 	  gimplify_assign (unshare_expr (addr), tmp, pre_p);
   7811  1.1  mrg 	  gimplify_assign (unshare_expr (next_fp_tmp),
   7812  1.1  mrg 			   unshare_expr (valist), pre_p);
   7813  1.1  mrg 
   7814  1.1  mrg 	  gimplify_assign (unshare_expr (valist),
   7815  1.1  mrg 			   unshare_expr (next_fp_tmp), post_p);
   7816  1.1  mrg 	  valist = next_fp_tmp;
   7817  1.1  mrg 	}
   7818  1.1  mrg       else
   7819  1.1  mrg 	{
   7820  1.1  mrg 	  tmp = fold_build_pointer_plus_hwi (unshare_expr (next_o), rsize);
   7821  1.1  mrg 	  tmp = build2 (GT_EXPR, boolean_type_node, tmp,
   7822  1.1  mrg 			unshare_expr (next_o_limit));
   7823  1.1  mrg 	  tmp = build3 (COND_EXPR, void_type_node, tmp,
   7824  1.1  mrg 		        build1 (GOTO_EXPR, void_type_node,
   7825  1.1  mrg 				unshare_expr (lab_false)),
   7826  1.1  mrg 			NULL_TREE);
   7827  1.1  mrg 	  gimplify_and_add (tmp, pre_p);
   7828  1.1  mrg 
   7829  1.1  mrg 	  tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_o));
   7830  1.1  mrg 	  gimplify_assign (unshare_expr (addr), tmp, pre_p);
   7831  1.1  mrg 
   7832  1.1  mrg 	  tmp = build1 (GOTO_EXPR, void_type_node, unshare_expr (lab_over));
   7833  1.1  mrg 	  gimplify_and_add (tmp, pre_p);
   7834  1.1  mrg 
   7835  1.1  mrg 	  tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_false));
   7836  1.1  mrg 	  gimplify_and_add (tmp, pre_p);
   7837  1.1  mrg 
   7838  1.1  mrg 	  if (size > 4 && ! (TARGET_SH4 || TARGET_SH2A))
   7839  1.1  mrg 	    gimplify_assign (unshare_expr (next_o),
   7840  1.1  mrg 			     unshare_expr (next_o_limit), pre_p);
   7841  1.1  mrg 
   7842  1.1  mrg 	  tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_stack));
   7843  1.1  mrg 	  gimplify_assign (unshare_expr (addr), tmp, pre_p);
   7844  1.1  mrg 	}
   7845  1.1  mrg 
   7846  1.1  mrg       if (!result)
   7847  1.1  mrg 	{
   7848  1.1  mrg 	  tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_over));
   7849  1.1  mrg 	  gimplify_and_add (tmp, pre_p);
   7850  1.1  mrg 	}
   7851  1.1  mrg     }
   7852  1.1  mrg 
   7853  1.1  mrg   /* ??? In va-sh.h, there had been code to make values larger than
   7854  1.1  mrg      size 8 indirect.  This does not match the FUNCTION_ARG macros.  */
   7855  1.1  mrg 
   7856  1.1  mrg   tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
   7857  1.1  mrg   if (result)
   7858  1.1  mrg     {
   7859  1.1  mrg       gimplify_assign (result, tmp, pre_p);
   7860  1.1  mrg       result = build1 (NOP_EXPR, TREE_TYPE (result), result);
   7861  1.1  mrg       tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_over));
   7862  1.1  mrg       gimplify_and_add (tmp, pre_p);
   7863  1.1  mrg     }
   7864  1.1  mrg   else
   7865  1.1  mrg     result = tmp;
   7866  1.1  mrg 
   7867  1.1  mrg   if (pass_by_ref)
   7868  1.1  mrg     result = build_va_arg_indirect_ref (result);
   7869  1.1  mrg 
   7870  1.1  mrg   return result;
   7871  1.1  mrg }
   7872  1.1  mrg 
   7873  1.1  mrg /* 64 bit floating points memory transfers are paired single precision loads
   7874  1.1  mrg    or store.  So DWARF information needs fixing in little endian (unless
   7875  1.1  mrg    PR=SZ=1 in FPSCR).  */
   7876  1.1  mrg rtx
   7877  1.1  mrg sh_dwarf_register_span (rtx reg)
   7878  1.1  mrg {
   7879  1.1  mrg   unsigned regno = REGNO (reg);
   7880  1.1  mrg 
   7881  1.1  mrg   if (WORDS_BIG_ENDIAN || GET_MODE (reg) != DFmode)
   7882  1.1  mrg     return NULL_RTX;
   7883  1.1  mrg 
   7884  1.1  mrg   return
   7885  1.1  mrg     gen_rtx_PARALLEL (VOIDmode,
   7886  1.1  mrg 		      gen_rtvec (2,
   7887  1.1  mrg 				 gen_rtx_REG (SFmode, regno + 1),
   7888  1.1  mrg 				 gen_rtx_REG (SFmode, regno)));
   7889  1.1  mrg }
   7890  1.1  mrg 
   7891  1.1  mrg static machine_mode
   7892  1.1  mrg sh_promote_function_mode (const_tree type, machine_mode mode,
   7893  1.1  mrg 			  int *punsignedp, const_tree funtype,
   7894  1.1  mrg 			  int for_return)
   7895  1.1  mrg {
   7896  1.1  mrg   if (sh_promote_prototypes (funtype))
   7897  1.1  mrg     return promote_mode (type, mode, punsignedp);
   7898  1.1  mrg   else
   7899  1.1  mrg     return default_promote_function_mode (type, mode, punsignedp, funtype,
   7900  1.1  mrg 					  for_return);
   7901  1.1  mrg }
   7902  1.1  mrg 
   7903  1.1  mrg static bool
   7904  1.1  mrg sh_promote_prototypes (const_tree type)
   7905  1.1  mrg {
   7906  1.1  mrg   if (TARGET_HITACHI)
   7907  1.1  mrg     return false;
   7908  1.1  mrg   if (! type)
   7909  1.1  mrg     return true;
   7910  1.1  mrg   return ! sh_attr_renesas_p (type);
   7911  1.1  mrg }
   7912  1.1  mrg 
   7913  1.1  mrg static bool
   7914  1.1  mrg sh_pass_by_reference (cumulative_args_t cum_v, const function_arg_info &arg)
   7915  1.1  mrg {
   7916  1.1  mrg   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
   7917  1.1  mrg 
   7918  1.1  mrg   if (targetm.calls.must_pass_in_stack (arg))
   7919  1.1  mrg     return true;
   7920  1.1  mrg 
   7921  1.1  mrg   /* ??? std_gimplify_va_arg_expr passes NULL for cum.  That function
   7922  1.1  mrg      wants to know about pass-by-reference semantics for incoming
   7923  1.1  mrg      arguments.  */
   7924  1.1  mrg   if (! cum)
   7925  1.1  mrg     return false;
   7926  1.1  mrg 
   7927  1.1  mrg   return false;
   7928  1.1  mrg }
   7929  1.1  mrg 
   7930  1.1  mrg static bool
   7931  1.1  mrg sh_callee_copies (cumulative_args_t cum, const function_arg_info &arg)
   7932  1.1  mrg {
   7933  1.1  mrg   /* ??? How can it possibly be correct to return true only on the
   7934  1.1  mrg      caller side of the equation?  Is there someplace else in the
   7935  1.1  mrg      sh backend that's magically producing the copies?  */
   7936  1.1  mrg   return (get_cumulative_args (cum)->outgoing
   7937  1.1  mrg 	  && ((arg.mode == BLKmode
   7938  1.1  mrg 	       ? TYPE_ALIGN (arg.type)
   7939  1.1  mrg 	       : GET_MODE_ALIGNMENT (arg.mode))
   7940  1.1  mrg 	      % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
   7941  1.1  mrg }
   7942  1.1  mrg 
   7943  1.1  mrg static sh_arg_class
   7944  1.1  mrg get_sh_arg_class (machine_mode mode)
   7945  1.1  mrg {
   7946  1.1  mrg   if (TARGET_FPU_ANY && mode == SFmode)
   7947  1.1  mrg     return SH_ARG_FLOAT;
   7948  1.1  mrg 
   7949  1.1  mrg   if (TARGET_FPU_DOUBLE
   7950  1.1  mrg       && (GET_MODE_CLASS (mode) == MODE_FLOAT
   7951  1.1  mrg 	  || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT))
   7952  1.1  mrg     return SH_ARG_FLOAT;
   7953  1.1  mrg 
   7954  1.1  mrg   return SH_ARG_INT;
   7955  1.1  mrg }
   7956  1.1  mrg 
   7957  1.1  mrg /* Round a register number up to a proper boundary for an arg of mode
   7958  1.1  mrg    MODE.
   7959  1.1  mrg    The SH doesn't care about double alignment, so we only
   7960  1.1  mrg    round doubles to even regs when asked to explicitly.  */
   7961  1.1  mrg static int
   7962  1.1  mrg sh_round_reg (const CUMULATIVE_ARGS& cum, machine_mode mode)
   7963  1.1  mrg {
   7964  1.1  mrg   /* FIXME: This used to be a macro and has been copy pasted into this
   7965  1.1  mrg      function as is.  Make this more readable.  */
   7966  1.1  mrg   return
   7967  1.1  mrg   (((TARGET_ALIGN_DOUBLE
   7968  1.1  mrg       || (TARGET_FPU_DOUBLE
   7969  1.1  mrg 	  && (mode == DFmode || mode == DCmode)
   7970  1.1  mrg 	  && cum.arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (mode)))
   7971  1.1  mrg      && GET_MODE_UNIT_SIZE (mode) > UNITS_PER_WORD)
   7972  1.1  mrg     ? (cum.arg_count[(int) get_sh_arg_class (mode)]
   7973  1.1  mrg        + (cum.arg_count[(int) get_sh_arg_class (mode)] & 1))
   7974  1.1  mrg     : cum.arg_count[(int) get_sh_arg_class (mode)]);
   7975  1.1  mrg }
   7976  1.1  mrg 
   7977  1.1  mrg /* Return true if arg of the specified mode should be passed in a register
   7978  1.1  mrg    or false otherwise.  */
   7979  1.1  mrg static bool
   7980  1.1  mrg sh_pass_in_reg_p (const CUMULATIVE_ARGS& cum, machine_mode mode,
   7981  1.1  mrg 		  const_tree type)
   7982  1.1  mrg {
   7983  1.1  mrg   /* FIXME: This used to be a macro and has been copy pasted into this
   7984  1.1  mrg      function as is.  Make this more readable.  */
   7985  1.1  mrg   return
   7986  1.1  mrg   ((type == 0
   7987  1.1  mrg     || (! TREE_ADDRESSABLE (type)
   7988  1.1  mrg 	&& (! (TARGET_HITACHI || cum.renesas_abi)
   7989  1.1  mrg 	    || ! (AGGREGATE_TYPE_P (type)
   7990  1.1  mrg 		  || (!TARGET_FPU_ANY
   7991  1.1  mrg 		      && (GET_MODE_CLASS (mode) == MODE_FLOAT
   7992  1.1  mrg 			  && GET_MODE_SIZE (mode) > GET_MODE_SIZE (SFmode)))))))
   7993  1.1  mrg    && ! cum.force_mem
   7994  1.1  mrg    && (TARGET_SH2E
   7995  1.1  mrg        ? ((mode) == BLKmode
   7996  1.1  mrg 	  ? ((cum.arg_count[(int) SH_ARG_INT] * UNITS_PER_WORD
   7997  1.1  mrg 	      + int_size_in_bytes (type))
   7998  1.1  mrg 	     <= NPARM_REGS (SImode) * UNITS_PER_WORD)
   7999  1.1  mrg 	  : ((sh_round_reg (cum, mode)
   8000  1.1  mrg 	      + sh_hard_regno_nregs (BASE_ARG_REG (mode), mode))
   8001  1.1  mrg 	     <= NPARM_REGS (mode)))
   8002  1.1  mrg        : sh_round_reg (cum, mode) < NPARM_REGS (mode)));
   8003  1.1  mrg }
   8004  1.1  mrg 
   8005  1.1  mrg static int
   8006  1.1  mrg sh_arg_partial_bytes (cumulative_args_t cum_v, const function_arg_info &arg)
   8007  1.1  mrg {
   8008  1.1  mrg   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
   8009  1.1  mrg   int words = 0;
   8010  1.1  mrg 
   8011  1.1  mrg   if (sh_pass_in_reg_p (*cum, arg.mode, arg.type)
   8012  1.1  mrg       && !TARGET_FPU_DOUBLE
   8013  1.1  mrg       && (sh_round_reg (*cum, arg.mode)
   8014  1.1  mrg 	  + CEIL (arg.promoted_size_in_bytes (), UNITS_PER_WORD)
   8015  1.1  mrg 	  > NPARM_REGS (arg.mode)))
   8016  1.1  mrg     words = NPARM_REGS (arg.mode) - sh_round_reg (*cum, arg.mode);
   8017  1.1  mrg 
   8018  1.1  mrg   return words * UNITS_PER_WORD;
   8019  1.1  mrg }
   8020  1.1  mrg 
   8021  1.1  mrg 
   8022  1.1  mrg /* Define where to put the arguments to a function.
   8023  1.1  mrg    Value is zero to push the argument on the stack,
   8024  1.1  mrg    or a hard register in which to store the argument.
   8025  1.1  mrg 
   8026  1.1  mrg    CUM is a variable of type CUMULATIVE_ARGS which gives info about
   8027  1.1  mrg     the preceding args and about the function being called.
   8028  1.1  mrg    ARG is a description of the argument.
   8029  1.1  mrg 
   8030  1.1  mrg    On SH the first args are normally in registers
   8031  1.1  mrg    and the rest are pushed.  Any arg that starts within the first
   8032  1.1  mrg    NPARM_REGS words is at least partially passed in a register unless
   8033  1.1  mrg    its data type forbids.  */
   8034  1.1  mrg static rtx
   8035  1.1  mrg sh_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
   8036  1.1  mrg {
   8037  1.1  mrg   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
   8038  1.1  mrg   machine_mode mode = arg.mode;
   8039  1.1  mrg 
   8040  1.1  mrg   if (arg.end_marker_p ())
   8041  1.1  mrg     return ca->renesas_abi ? const1_rtx : const0_rtx;
   8042  1.1  mrg 
   8043  1.1  mrg   if (sh_pass_in_reg_p (*ca, mode, arg.type)
   8044  1.1  mrg       && (arg.named || ! (TARGET_HITACHI || ca->renesas_abi)))
   8045  1.1  mrg     {
   8046  1.1  mrg       int regno;
   8047  1.1  mrg 
   8048  1.1  mrg       if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
   8049  1.1  mrg 	  && (! FUNCTION_ARG_SCmode_WART || (sh_round_reg (*ca, mode) & 1)))
   8050  1.1  mrg 	{
   8051  1.1  mrg 	  rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
   8052  1.1  mrg 				      gen_rtx_REG (SFmode,
   8053  1.1  mrg 						   BASE_ARG_REG (mode)
   8054  1.1  mrg 						   + (sh_round_reg (*ca, mode) ^ 1)),
   8055  1.1  mrg 				      const0_rtx);
   8056  1.1  mrg 	  rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
   8057  1.1  mrg 				      gen_rtx_REG (SFmode,
   8058  1.1  mrg 						   BASE_ARG_REG (mode)
   8059  1.1  mrg 						   + ((sh_round_reg (*ca, mode) + 1) ^ 1)),
   8060  1.1  mrg 				      GEN_INT (4));
   8061  1.1  mrg 	  return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
   8062  1.1  mrg 	}
   8063  1.1  mrg 
   8064  1.1  mrg      /* If the alignment of a DF value causes an SF register to be
   8065  1.1  mrg 	skipped, we will use that skipped register for the next SF
   8066  1.1  mrg 	value.  */
   8067  1.1  mrg       if ((TARGET_HITACHI || ca->renesas_abi)
   8068  1.1  mrg 	  && ca->free_single_fp_reg
   8069  1.1  mrg 	  && mode == SFmode)
   8070  1.1  mrg 	return gen_rtx_REG (mode, ca->free_single_fp_reg);
   8071  1.1  mrg 
   8072  1.1  mrg       regno = (BASE_ARG_REG (mode) + sh_round_reg (*ca, mode))
   8073  1.1  mrg 	       ^ (mode == SFmode && TARGET_SH4
   8074  1.1  mrg 		  && TARGET_LITTLE_ENDIAN
   8075  1.1  mrg 		  && ! TARGET_HITACHI && ! ca->renesas_abi);
   8076  1.1  mrg       return gen_rtx_REG (mode, regno);
   8077  1.1  mrg 
   8078  1.1  mrg     }
   8079  1.1  mrg 
   8080  1.1  mrg   return NULL_RTX;
   8081  1.1  mrg }
   8082  1.1  mrg 
   8083  1.1  mrg /* Update the data in CUM to advance over argument ARG.  */
   8084  1.1  mrg static void
   8085  1.1  mrg sh_function_arg_advance (cumulative_args_t ca_v,
   8086  1.1  mrg 			 const function_arg_info &arg)
   8087  1.1  mrg {
   8088  1.1  mrg   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
   8089  1.1  mrg 
   8090  1.1  mrg   if (ca->force_mem)
   8091  1.1  mrg     ca->force_mem = false;
   8092  1.1  mrg 
   8093  1.1  mrg   if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
   8094  1.1  mrg     {
   8095  1.1  mrg       /* Note that we've used the skipped register.  */
   8096  1.1  mrg       if (arg.mode == SFmode && ca->free_single_fp_reg)
   8097  1.1  mrg 	{
   8098  1.1  mrg 	  ca->free_single_fp_reg = 0;
   8099  1.1  mrg 	  return;
   8100  1.1  mrg 	}
   8101  1.1  mrg       /* When we have a DF after an SF, there's an SF register that get
   8102  1.1  mrg 	 skipped in order to align the DF value.  We note this skipped
   8103  1.1  mrg 	 register, because the next SF value will use it, and not the
   8104  1.1  mrg 	 SF that follows the DF.  */
   8105  1.1  mrg       if (arg.mode == DFmode
   8106  1.1  mrg 	  && sh_round_reg (*ca, DFmode) != sh_round_reg (*ca, SFmode))
   8107  1.1  mrg 	{
   8108  1.1  mrg 	  ca->free_single_fp_reg = (sh_round_reg (*ca, SFmode)
   8109  1.1  mrg 				    + BASE_ARG_REG (arg.mode));
   8110  1.1  mrg 	}
   8111  1.1  mrg     }
   8112  1.1  mrg 
   8113  1.1  mrg   if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
   8114  1.1  mrg       || sh_pass_in_reg_p (*ca, arg.mode, arg.type))
   8115  1.1  mrg     (ca->arg_count[(int) get_sh_arg_class (arg.mode)]
   8116  1.1  mrg      = (sh_round_reg (*ca, arg.mode)
   8117  1.1  mrg 	+ CEIL (arg.promoted_size_in_bytes (), UNITS_PER_WORD)));
   8118  1.1  mrg }
   8119  1.1  mrg 
   8120  1.1  mrg /* The Renesas calling convention doesn't quite fit into this scheme since
   8121  1.1  mrg    the address is passed like an invisible argument, but one that is always
   8122  1.1  mrg    passed in memory.  */
   8123  1.1  mrg static rtx
   8124  1.1  mrg sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
   8125  1.1  mrg {
   8126  1.1  mrg   if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
   8127  1.1  mrg     return NULL_RTX;
   8128  1.1  mrg   return gen_rtx_REG (Pmode, 2);
   8129  1.1  mrg }
   8130  1.1  mrg 
   8131  1.1  mrg /* Worker function for TARGET_FUNCTION_VALUE.
   8132  1.1  mrg 
   8133  1.1  mrg    For the SH, this is like LIBCALL_VALUE, except that we must change the
   8134  1.1  mrg    mode like PROMOTE_MODE does.
   8135  1.1  mrg    ??? PROMOTE_MODE is ignored for non-scalar types.  The set of types
   8136  1.1  mrg    tested here has to be kept in sync with the one in
   8137  1.1  mrg    explow.cc:promote_mode.  */
   8138  1.1  mrg static rtx
   8139  1.1  mrg sh_function_value (const_tree valtype,
   8140  1.1  mrg 		   const_tree fn_decl_or_type,
   8141  1.1  mrg 		   bool outgoing ATTRIBUTE_UNUSED)
   8142  1.1  mrg {
   8143  1.1  mrg   if (fn_decl_or_type
   8144  1.1  mrg       && !DECL_P (fn_decl_or_type))
   8145  1.1  mrg     fn_decl_or_type = NULL;
   8146  1.1  mrg 
   8147  1.1  mrg   return gen_rtx_REG (
   8148  1.1  mrg 	   ((GET_MODE_CLASS (TYPE_MODE (valtype)) == MODE_INT
   8149  1.1  mrg 	     && GET_MODE_SIZE (TYPE_MODE (valtype)) < 4
   8150  1.1  mrg 	     && (TREE_CODE (valtype) == INTEGER_TYPE
   8151  1.1  mrg 		 || TREE_CODE (valtype) == ENUMERAL_TYPE
   8152  1.1  mrg 		 || TREE_CODE (valtype) == BOOLEAN_TYPE
   8153  1.1  mrg 		 || TREE_CODE (valtype) == REAL_TYPE
   8154  1.1  mrg 		 || TREE_CODE (valtype) == OFFSET_TYPE))
   8155  1.1  mrg 	    && sh_promote_prototypes (fn_decl_or_type)
   8156  1.1  mrg 	    ? SImode : TYPE_MODE (valtype)),
   8157  1.1  mrg 	   BASE_RETURN_VALUE_REG (TYPE_MODE (valtype)));
   8158  1.1  mrg }
   8159  1.1  mrg 
   8160  1.1  mrg /* Worker function for TARGET_LIBCALL_VALUE.  */
   8161  1.1  mrg static rtx
   8162  1.1  mrg sh_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
   8163  1.1  mrg {
   8164  1.1  mrg   return gen_rtx_REG (mode, BASE_RETURN_VALUE_REG (mode));
   8165  1.1  mrg }
   8166  1.1  mrg 
   8167  1.1  mrg /* Return true if N is a possible register number of function value.  */
   8168  1.1  mrg static bool
   8169  1.1  mrg sh_function_value_regno_p (const unsigned int regno)
   8170  1.1  mrg {
   8171  1.1  mrg   return regno == FIRST_RET_REG || (TARGET_SH2E && regno == FIRST_FP_RET_REG);
   8172  1.1  mrg }
   8173  1.1  mrg 
   8174  1.1  mrg /* Worker function for TARGET_RETURN_IN_MEMORY.  */
   8175  1.1  mrg static bool
   8176  1.1  mrg sh_return_in_memory (const_tree type, const_tree fndecl)
   8177  1.1  mrg {
   8178  1.1  mrg   return TYPE_MODE (type) == BLKmode
   8179  1.1  mrg 	 || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
   8180  1.1  mrg 	     && TREE_CODE (type) == RECORD_TYPE);
   8181  1.1  mrg }
   8182  1.1  mrg 
   8183  1.1  mrg /* We actually emit the code in sh_expand_prologue.  We used to use
   8184  1.1  mrg    a static variable to flag that we need to emit this code, but that
   8185  1.1  mrg    doesn't when inlining, when functions are deferred and then emitted
   8186  1.1  mrg    later.  Fortunately, we already have two flags that are part of struct
   8187  1.1  mrg    function that tell if a function uses varargs or stdarg.  */
   8188  1.1  mrg static void
   8189  1.1  mrg sh_setup_incoming_varargs (cumulative_args_t ca,
   8190  1.1  mrg 			   const function_arg_info &arg,
   8191  1.1  mrg 			   int *pretend_arg_size,
   8192  1.1  mrg 			   int second_time ATTRIBUTE_UNUSED)
   8193  1.1  mrg {
   8194  1.1  mrg   gcc_assert (cfun->stdarg);
   8195  1.1  mrg   if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
   8196  1.1  mrg     {
   8197  1.1  mrg       int named_parm_regs, anon_parm_regs;
   8198  1.1  mrg 
   8199  1.1  mrg       named_parm_regs = (sh_round_reg (*get_cumulative_args (ca), arg.mode)
   8200  1.1  mrg 			 + CEIL (arg.promoted_size_in_bytes (),
   8201  1.1  mrg 				 UNITS_PER_WORD));
   8202  1.1  mrg       anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
   8203  1.1  mrg       if (anon_parm_regs > 0)
   8204  1.1  mrg 	*pretend_arg_size = anon_parm_regs * 4;
   8205  1.1  mrg     }
   8206  1.1  mrg }
   8207  1.1  mrg 
   8208  1.1  mrg static bool
   8209  1.1  mrg sh_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
   8210  1.1  mrg {
   8211  1.1  mrg   return false;
   8212  1.1  mrg }
   8213  1.1  mrg 
   8214  1.1  mrg static bool
   8215  1.1  mrg sh_pretend_outgoing_varargs_named (cumulative_args_t ca_v)
   8216  1.1  mrg {
   8217  1.1  mrg   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
   8218  1.1  mrg 
   8219  1.1  mrg   return ! (TARGET_HITACHI || ca->renesas_abi);
   8220  1.1  mrg }
   8221  1.1  mrg 
   8222  1.1  mrg 
   8223  1.1  mrg /* Define the offset between two registers, one to be eliminated, and
   8224  1.1  mrg    the other its replacement, at the start of a routine.  */
   8225  1.1  mrg int
   8226  1.1  mrg initial_elimination_offset (int from, int to)
   8227  1.1  mrg {
   8228  1.1  mrg   const int regs_saved_rounding = 0;
   8229  1.1  mrg   int save_flags = target_flags;
   8230  1.1  mrg   HARD_REG_SET live_regs_mask;
   8231  1.1  mrg 
   8232  1.1  mrg   int regs_saved = calc_live_regs (&live_regs_mask);
   8233  1.1  mrg 
   8234  1.1  mrg   int total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
   8235  1.1  mrg   target_flags = save_flags;
   8236  1.1  mrg 
   8237  1.1  mrg   int total_saved_regs_space = regs_saved + regs_saved_rounding;
   8238  1.1  mrg 
   8239  1.1  mrg   if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
   8240  1.1  mrg     return total_saved_regs_space + total_auto_space;
   8241  1.1  mrg 
   8242  1.1  mrg   if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
   8243  1.1  mrg     return total_saved_regs_space + total_auto_space;
   8244  1.1  mrg 
   8245  1.1  mrg   /* Initial gap between fp and sp is 0.  */
   8246  1.1  mrg   if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
   8247  1.1  mrg     return 0;
   8248  1.1  mrg 
   8249  1.1  mrg   if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
   8250  1.1  mrg     return rounded_frame_size (0);
   8251  1.1  mrg 
   8252  1.1  mrg   if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
   8253  1.1  mrg     return rounded_frame_size (0);
   8254  1.1  mrg 
   8255  1.1  mrg   gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
   8256  1.1  mrg 	      && (to == HARD_FRAME_POINTER_REGNUM
   8257  1.1  mrg 		  || to == STACK_POINTER_REGNUM));
   8258  1.1  mrg   return total_auto_space;
   8259  1.1  mrg }
   8260  1.1  mrg 
   8261  1.1  mrg /* Parse the -mfixed-range= option string.  */
   8262  1.1  mrg void
   8263  1.1  mrg sh_fix_range (const char *const_str)
   8264  1.1  mrg {
   8265  1.1  mrg   /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
   8266  1.1  mrg      REG2 are either register names or register numbers.  The effect
   8267  1.1  mrg      of this option is to mark the registers in the range from REG1 to
   8268  1.1  mrg      REG2 as ``fixed'' so they won't be used by the compiler.  */
   8269  1.1  mrg 
   8270  1.1  mrg   char* str = strcpy ((char*)alloca (strlen (const_str) + 1), const_str);
   8271  1.1  mrg 
   8272  1.1  mrg   while (1)
   8273  1.1  mrg     {
   8274  1.1  mrg       char* dash = strchr (str, '-');
   8275  1.1  mrg       if (!dash)
   8276  1.1  mrg 	{
   8277  1.1  mrg 	  warning (0, "value of %<-mfixed-range%> must have form REG1-REG2");
   8278  1.1  mrg 	  return;
   8279  1.1  mrg 	}
   8280  1.1  mrg       *dash = '\0';
   8281  1.1  mrg       char* comma = strchr (dash + 1, ',');
   8282  1.1  mrg       if (comma)
   8283  1.1  mrg 	*comma = '\0';
   8284  1.1  mrg 
   8285  1.1  mrg       int first = decode_reg_name (str);
   8286  1.1  mrg       if (first < 0)
   8287  1.1  mrg 	{
   8288  1.1  mrg 	  warning (0, "unknown register name: %s", str);
   8289  1.1  mrg 	  return;
   8290  1.1  mrg 	}
   8291  1.1  mrg 
   8292  1.1  mrg       int last = decode_reg_name (dash + 1);
   8293  1.1  mrg       if (last < 0)
   8294  1.1  mrg 	{
   8295  1.1  mrg 	  warning (0, "unknown register name: %s", dash + 1);
   8296  1.1  mrg 	  return;
   8297  1.1  mrg 	}
   8298  1.1  mrg 
   8299  1.1  mrg       *dash = '-';
   8300  1.1  mrg 
   8301  1.1  mrg       if (first > last)
   8302  1.1  mrg 	{
   8303  1.1  mrg 	  warning (0, "%s-%s is an empty range", str, dash + 1);
   8304  1.1  mrg 	  return;
   8305  1.1  mrg 	}
   8306  1.1  mrg 
   8307  1.1  mrg       for (int i = first; i <= last; ++i)
   8308  1.1  mrg 	fixed_regs[i] = 1;
   8309  1.1  mrg 
   8310  1.1  mrg       if (!comma)
   8311  1.1  mrg 	break;
   8312  1.1  mrg 
   8313  1.1  mrg       *comma = ',';
   8314  1.1  mrg       str = comma + 1;
   8315  1.1  mrg     }
   8316  1.1  mrg }
   8317  1.1  mrg 
   8318  1.1  mrg /* Insert any deferred function attributes from earlier pragmas.  */
   8320  1.1  mrg static void
   8321  1.1  mrg sh_insert_attributes (tree node, tree *attributes)
   8322  1.1  mrg {
   8323  1.1  mrg   if (TREE_CODE (node) != FUNCTION_DECL)
   8324  1.1  mrg     return;
   8325  1.1  mrg 
   8326  1.1  mrg   /* We are only interested in fields.  */
   8327  1.1  mrg   if (!DECL_P (node))
   8328  1.1  mrg     return;
   8329  1.1  mrg 
   8330  1.1  mrg   /* Append the attributes to the deferred attributes.  */
   8331  1.1  mrg   *sh_deferred_function_attributes_tail = *attributes;
   8332  1.1  mrg   tree attrs = sh_deferred_function_attributes;
   8333  1.1  mrg   if (!attrs)
   8334  1.1  mrg     return;
   8335  1.1  mrg 
   8336  1.1  mrg   /* Some attributes imply or require the interrupt attribute.  */
   8337  1.1  mrg   if (!lookup_attribute ("interrupt_handler", attrs)
   8338  1.1  mrg       && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
   8339  1.1  mrg     {
   8340  1.1  mrg       /* If we have a trapa_handler, but no interrupt_handler attribute,
   8341  1.1  mrg 	 insert an interrupt_handler attribute.  */
   8342  1.1  mrg       if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
   8343  1.1  mrg 	/* We can't use sh_pr_interrupt here because that's not in the
   8344  1.1  mrg 	   java frontend.  */
   8345  1.1  mrg 	attrs
   8346  1.1  mrg 	  = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
   8347  1.1  mrg       /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
   8348  1.1  mrg 	 if the interrupt attribute is missing, we ignore the attribute
   8349  1.1  mrg 	 and warn.  */
   8350  1.1  mrg       else if (lookup_attribute ("sp_switch", attrs)
   8351  1.1  mrg 	       || lookup_attribute ("trap_exit", attrs)
   8352  1.1  mrg 	       || lookup_attribute ("nosave_low_regs", attrs)
   8353  1.1  mrg 	       || lookup_attribute ("resbank", attrs))
   8354  1.1  mrg 	{
   8355  1.1  mrg 	  tree *tail;
   8356  1.1  mrg 
   8357  1.1  mrg 	  for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
   8358  1.1  mrg 	    {
   8359  1.1  mrg 	      if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
   8360  1.1  mrg 		  || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
   8361  1.1  mrg 		  || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs))
   8362  1.1  mrg 		  || is_attribute_p ("resbank", TREE_PURPOSE (attrs)))
   8363  1.1  mrg 		warning (OPT_Wattributes,
   8364  1.1  mrg 			 "%qE attribute only applies to interrupt functions",
   8365  1.1  mrg 			 TREE_PURPOSE (attrs));
   8366  1.1  mrg 	      else
   8367  1.1  mrg 		{
   8368  1.1  mrg 		  *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
   8369  1.1  mrg 				     NULL_TREE);
   8370  1.1  mrg 		  tail = &TREE_CHAIN (*tail);
   8371  1.1  mrg 		}
   8372  1.1  mrg 	    }
   8373  1.1  mrg 	  attrs = *attributes;
   8374  1.1  mrg 	}
   8375  1.1  mrg     }
   8376  1.1  mrg 
   8377  1.1  mrg   /* Install the processed list.  */
   8378  1.1  mrg   *attributes = attrs;
   8379  1.1  mrg 
   8380  1.1  mrg   /* Clear deferred attributes.  */
   8381  1.1  mrg   sh_deferred_function_attributes = NULL_TREE;
   8382  1.1  mrg   sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
   8383  1.1  mrg 
   8384  1.1  mrg   return;
   8385  1.1  mrg }
   8386  1.1  mrg 
   8387  1.1  mrg /*------------------------------------------------------------------------------
   8388  1.1  mrg   Target specific attributes
   8389  1.1  mrg   Supported attributes are:
   8390  1.1  mrg 
   8391  1.1  mrg    * interrupt_handler
   8392  1.1  mrg 	Specifies this function is an interrupt handler.
   8393  1.1  mrg 
   8394  1.1  mrg    * trapa_handler
   8395  1.1  mrg 	Like interrupt_handler, but don't save all registers.
   8396  1.1  mrg 
   8397  1.1  mrg    * sp_switch
   8398  1.1  mrg 	Specifies an alternate stack for an interrupt handler to run on.
   8399  1.1  mrg 
   8400  1.1  mrg    * trap_exit
   8401  1.1  mrg 	Use a trapa to exit an interrupt function instead of rte.
   8402  1.1  mrg 
   8403  1.1  mrg    * nosave_low_regs
   8404  1.1  mrg 	Don't save r0..r7 in an interrupt handler function.
   8405  1.1  mrg 	This is useful on SH3* and SH4*, which have a separate set of low
   8406  1.1  mrg 	regs for user and privileged modes.
   8407  1.1  mrg 	This is mainly to be used for non-reentrant interrupt handlers (i.e.
   8408  1.1  mrg 	those that run with interrupts disabled and thus can't be
   8409  1.1  mrg 	interrupted thenselves).
   8410  1.1  mrg 
   8411  1.1  mrg    * renesas
   8412  1.1  mrg 	Use Renesas calling/layout conventions (functions and structures).
   8413  1.1  mrg 
   8414  1.1  mrg    * resbank
   8415  1.1  mrg 	In case of an interrupt handler function, use a register bank to
   8416  1.1  mrg 	save registers R0-R14, MACH, MACL, GBR and PR.
   8417  1.1  mrg 	This is available only on SH2A targets.
   8418  1.1  mrg 
   8419  1.1  mrg    * function_vector
   8420  1.1  mrg 	Declares a function to be called using the TBR relative addressing
   8421  1.1  mrg 	mode.  Takes an argument that specifies the slot number in the table
   8422  1.1  mrg 	where this function can be looked up by the JSR/N @@(disp8,TBR) insn.
   8423  1.1  mrg */
   8424  1.1  mrg 
   8425  1.1  mrg /* Handle a 'resbank' attribute.  */
   8426  1.1  mrg static tree
   8427  1.1  mrg sh_handle_resbank_handler_attribute (tree * node, tree name,
   8428  1.1  mrg 				     tree args ATTRIBUTE_UNUSED,
   8429  1.1  mrg 				     int flags ATTRIBUTE_UNUSED,
   8430  1.1  mrg 				     bool * no_add_attrs)
   8431  1.1  mrg {
   8432  1.1  mrg   if (!TARGET_SH2A)
   8433  1.1  mrg     {
   8434  1.1  mrg       warning (OPT_Wattributes, "%qE attribute is supported only for SH2A",
   8435  1.1  mrg 	       name);
   8436  1.1  mrg       *no_add_attrs = true;
   8437  1.1  mrg     }
   8438  1.1  mrg   if (TREE_CODE (*node) != FUNCTION_DECL)
   8439  1.1  mrg     {
   8440  1.1  mrg       warning (OPT_Wattributes, "%qE attribute only applies to functions",
   8441  1.1  mrg 	       name);
   8442  1.1  mrg       *no_add_attrs = true;
   8443  1.1  mrg     }
   8444  1.1  mrg 
   8445  1.1  mrg   return NULL_TREE;
   8446  1.1  mrg }
   8447  1.1  mrg 
   8448  1.1  mrg /* Handle an "interrupt_handler" attribute; arguments as in
   8449  1.1  mrg    struct attribute_spec.handler.  */
   8450  1.1  mrg static tree
   8451  1.1  mrg sh_handle_interrupt_handler_attribute (tree *node, tree name,
   8452  1.1  mrg 				       tree args ATTRIBUTE_UNUSED,
   8453  1.1  mrg 				       int flags ATTRIBUTE_UNUSED,
   8454  1.1  mrg 				       bool *no_add_attrs)
   8455  1.1  mrg {
   8456  1.1  mrg   if (TREE_CODE (*node) != FUNCTION_DECL)
   8457  1.1  mrg     {
   8458  1.1  mrg       warning (OPT_Wattributes, "%qE attribute only applies to functions",
   8459  1.1  mrg 	       name);
   8460  1.1  mrg       *no_add_attrs = true;
   8461  1.1  mrg     }
   8462  1.1  mrg 
   8463  1.1  mrg   return NULL_TREE;
   8464  1.1  mrg }
   8465  1.1  mrg 
   8466  1.1  mrg /* Handle an 'function_vector' attribute; arguments as in
   8467  1.1  mrg    struct attribute_spec.handler.  */
   8468  1.1  mrg static tree
   8469  1.1  mrg sh2a_handle_function_vector_handler_attribute (tree * node, tree name,
   8470  1.1  mrg 					       tree args ATTRIBUTE_UNUSED,
   8471  1.1  mrg 					       int flags ATTRIBUTE_UNUSED,
   8472  1.1  mrg 					       bool * no_add_attrs)
   8473  1.1  mrg {
   8474  1.1  mrg   if (!TARGET_SH2A)
   8475  1.1  mrg     {
   8476  1.1  mrg       warning (OPT_Wattributes, "%qE attribute only applies to SH2A",
   8477  1.1  mrg 	       name);
   8478  1.1  mrg       *no_add_attrs = true;
   8479  1.1  mrg     }
   8480  1.1  mrg   else if (TREE_CODE (*node) != FUNCTION_DECL)
   8481  1.1  mrg     {
   8482  1.1  mrg       warning (OPT_Wattributes, "%qE attribute only applies to functions",
   8483  1.1  mrg 	       name);
   8484  1.1  mrg       *no_add_attrs = true;
   8485  1.1  mrg     }
   8486  1.1  mrg   else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
   8487  1.1  mrg     {
   8488  1.1  mrg       /* The argument must be a constant integer.  */
   8489  1.1  mrg       warning (OPT_Wattributes,
   8490  1.1  mrg 	       "%qE attribute argument not an integer constant",
   8491  1.1  mrg 	       name);
   8492  1.1  mrg       *no_add_attrs = true;
   8493  1.1  mrg     }
   8494  1.1  mrg   else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
   8495  1.1  mrg     {
   8496  1.1  mrg       /* The argument value must be between 0 to 255.  */
   8497  1.1  mrg       warning (OPT_Wattributes,
   8498  1.1  mrg 	       "%qE attribute argument should be between 0 to 255",
   8499  1.1  mrg 	       name);
   8500  1.1  mrg       *no_add_attrs = true;
   8501  1.1  mrg     }
   8502  1.1  mrg   return NULL_TREE;
   8503  1.1  mrg }
   8504  1.1  mrg 
   8505  1.1  mrg /* Returns true if current function has been assigned the attribute
   8506  1.1  mrg    'function_vector'.  */
   8507  1.1  mrg bool
   8508  1.1  mrg sh2a_is_function_vector_call (rtx x)
   8509  1.1  mrg {
   8510  1.1  mrg   if (GET_CODE (x) == SYMBOL_REF
   8511  1.1  mrg       && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
   8512  1.1  mrg     {
   8513  1.1  mrg       tree tr = SYMBOL_REF_DECL (x);
   8514  1.1  mrg 
   8515  1.1  mrg       if (sh2a_function_vector_p (tr))
   8516  1.1  mrg         return true;
   8517  1.1  mrg     }
   8518  1.1  mrg 
   8519  1.1  mrg   return false;
   8520  1.1  mrg }
   8521  1.1  mrg 
   8522  1.1  mrg /* Returns the function vector number, if the attribute
   8523  1.1  mrg    'function_vector' is assigned, otherwise returns zero.  */
   8524  1.1  mrg int
   8525  1.1  mrg sh2a_get_function_vector_number (rtx x)
   8526  1.1  mrg {
   8527  1.1  mrg   if ((GET_CODE (x) == SYMBOL_REF)
   8528  1.1  mrg       && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
   8529  1.1  mrg     {
   8530  1.1  mrg       tree t = SYMBOL_REF_DECL (x);
   8531  1.1  mrg 
   8532  1.1  mrg       if (TREE_CODE (t) != FUNCTION_DECL)
   8533  1.1  mrg 	return 0;
   8534  1.1  mrg 
   8535  1.1  mrg       for (tree list = SH_ATTRIBUTES (t); list; list = TREE_CHAIN (list))
   8536  1.1  mrg 	if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
   8537  1.1  mrg 	  return TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
   8538  1.1  mrg 
   8539  1.1  mrg       return 0;
   8540  1.1  mrg     }
   8541  1.1  mrg   else
   8542  1.1  mrg     return 0;
   8543  1.1  mrg }
   8544  1.1  mrg 
   8545  1.1  mrg /* Handle an "sp_switch" attribute; arguments as in
   8546  1.1  mrg    struct attribute_spec.handler.  */
   8547  1.1  mrg static tree
   8548  1.1  mrg sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
   8549  1.1  mrg 			       int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
   8550  1.1  mrg {
   8551  1.1  mrg   if (TREE_CODE (*node) != FUNCTION_DECL)
   8552  1.1  mrg     {
   8553  1.1  mrg       warning (OPT_Wattributes, "%qE attribute only applies to functions",
   8554  1.1  mrg 	       name);
   8555  1.1  mrg       *no_add_attrs = true;
   8556  1.1  mrg     }
   8557  1.1  mrg   else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
   8558  1.1  mrg     {
   8559  1.1  mrg       /* The argument must be a constant string.  */
   8560  1.1  mrg       warning (OPT_Wattributes, "%qE attribute argument not a string constant",
   8561  1.1  mrg 	       name);
   8562  1.1  mrg       *no_add_attrs = true;
   8563  1.1  mrg     }
   8564  1.1  mrg 
   8565  1.1  mrg   return NULL_TREE;
   8566  1.1  mrg }
   8567  1.1  mrg 
   8568  1.1  mrg /* Handle an "trap_exit" attribute; arguments as in
   8569  1.1  mrg    struct attribute_spec.handler.  */
   8570  1.1  mrg static tree
   8571  1.1  mrg sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
   8572  1.1  mrg 			       int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
   8573  1.1  mrg {
   8574  1.1  mrg   if (TREE_CODE (*node) != FUNCTION_DECL)
   8575  1.1  mrg     {
   8576  1.1  mrg       warning (OPT_Wattributes, "%qE attribute only applies to functions",
   8577  1.1  mrg 	       name);
   8578  1.1  mrg       *no_add_attrs = true;
   8579  1.1  mrg     }
   8580  1.1  mrg   /* The argument specifies a trap number to be used in a trapa instruction
   8581  1.1  mrg      at function exit (instead of an rte instruction).  */
   8582  1.1  mrg   else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
   8583  1.1  mrg     {
   8584  1.1  mrg       /* The argument must be a constant integer.  */
   8585  1.1  mrg       warning (OPT_Wattributes, "%qE attribute argument not an "
   8586  1.1  mrg 	       "integer constant", name);
   8587  1.1  mrg       *no_add_attrs = true;
   8588  1.1  mrg     }
   8589  1.1  mrg 
   8590  1.1  mrg   return NULL_TREE;
   8591  1.1  mrg }
   8592  1.1  mrg 
   8593  1.1  mrg static tree
   8594  1.1  mrg sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
   8595  1.1  mrg 			     tree name ATTRIBUTE_UNUSED,
   8596  1.1  mrg 			     tree args ATTRIBUTE_UNUSED,
   8597  1.1  mrg 			     int flags ATTRIBUTE_UNUSED,
   8598  1.1  mrg 			     bool *no_add_attrs ATTRIBUTE_UNUSED)
   8599  1.1  mrg {
   8600  1.1  mrg   return NULL_TREE;
   8601  1.1  mrg }
   8602  1.1  mrg 
   8603  1.1  mrg /* True if __attribute__((renesas)) or -mrenesas.  */
   8604  1.1  mrg bool
   8605  1.1  mrg sh_attr_renesas_p (const_tree td)
   8606  1.1  mrg {
   8607  1.1  mrg   if (TARGET_HITACHI)
   8608  1.1  mrg     return true;
   8609  1.1  mrg   if (td == NULL_TREE)
   8610  1.1  mrg     return false;
   8611  1.1  mrg   if (DECL_P (td))
   8612  1.1  mrg     td = TREE_TYPE (td);
   8613  1.1  mrg   if (td == error_mark_node)
   8614  1.1  mrg     return false;
   8615  1.1  mrg   return lookup_attribute ("renesas", TYPE_ATTRIBUTES (td)) != NULL_TREE;
   8616  1.1  mrg }
   8617  1.1  mrg 
   8618  1.1  mrg /* True if __attribute__((renesas)) or -mrenesas, for the current
   8619  1.1  mrg    function.  */
   8620  1.1  mrg bool
   8621  1.1  mrg sh_cfun_attr_renesas_p (void)
   8622  1.1  mrg {
   8623  1.1  mrg   return sh_attr_renesas_p (current_function_decl);
   8624  1.1  mrg }
   8625  1.1  mrg 
   8626  1.1  mrg /* Returns true if the current function has the "interrupt_handler"
   8627  1.1  mrg    attribute set.  */
   8628  1.1  mrg bool
   8629  1.1  mrg sh_cfun_interrupt_handler_p (void)
   8630  1.1  mrg {
   8631  1.1  mrg   return (lookup_attribute ("interrupt_handler",
   8632  1.1  mrg 			    DECL_ATTRIBUTES (current_function_decl))
   8633  1.1  mrg 	  != NULL_TREE);
   8634  1.1  mrg }
   8635  1.1  mrg 
   8636  1.1  mrg /* Returns true if FUNC has been assigned the attribute
   8637  1.1  mrg    "function_vector".  */
   8638  1.1  mrg bool
   8639  1.1  mrg sh2a_function_vector_p (tree func)
   8640  1.1  mrg {
   8641  1.1  mrg   if (TREE_CODE (func) != FUNCTION_DECL)
   8642  1.1  mrg     return false;
   8643  1.1  mrg 
   8644  1.1  mrg   for (tree list = SH_ATTRIBUTES (func); list; list = TREE_CHAIN (list))
   8645  1.1  mrg     if (is_attribute_p ("function_vector", get_attribute_name (list)))
   8646  1.1  mrg       return true;
   8647  1.1  mrg 
   8648  1.1  mrg   return false;
   8649  1.1  mrg }
   8650  1.1  mrg 
   8651  1.1  mrg /* Returns true if given tree has the "resbank" attribute set.  */
   8652  1.1  mrg bool
   8653  1.1  mrg sh_cfun_resbank_handler_p (void)
   8654  1.1  mrg {
   8655  1.1  mrg   return ((lookup_attribute ("resbank",
   8656  1.1  mrg 			     DECL_ATTRIBUTES (current_function_decl))
   8657  1.1  mrg 	  != NULL_TREE)
   8658  1.1  mrg 	  && (lookup_attribute ("interrupt_handler",
   8659  1.1  mrg 				DECL_ATTRIBUTES (current_function_decl))
   8660  1.1  mrg 	      != NULL_TREE) && TARGET_SH2A);
   8661  1.1  mrg }
   8662  1.1  mrg 
   8663  1.1  mrg /* Returns true if the current function has a "trap_exit" attribute set.  */
   8664  1.1  mrg bool
   8665  1.1  mrg sh_cfun_trap_exit_p (void)
   8666  1.1  mrg {
   8667  1.1  mrg   return lookup_attribute ("trap_exit", DECL_ATTRIBUTES (current_function_decl))
   8668  1.1  mrg 	 != NULL_TREE;
   8669  1.1  mrg }
   8670  1.1  mrg 
   8671  1.1  mrg /* Implement TARGET_CHECK_PCH_TARGET_FLAGS.  */
   8672  1.1  mrg static const char *
   8673  1.1  mrg sh_check_pch_target_flags (int old_flags)
   8674  1.1  mrg {
   8675  1.1  mrg   if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
   8676  1.1  mrg 				    | MASK_SH_E | MASK_HARD_SH4
   8677  1.1  mrg 				    | MASK_FPU_SINGLE | MASK_SH4))
   8678  1.1  mrg     return _("created and used with different architectures / ABIs");
   8679  1.1  mrg   if ((old_flags ^ target_flags) & MASK_HITACHI)
   8680  1.1  mrg     return _("created and used with different ABIs");
   8681  1.1  mrg   if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
   8682  1.1  mrg     return _("created and used with different endianness");
   8683  1.1  mrg   return NULL;
   8684  1.1  mrg }
   8685  1.1  mrg 
   8686  1.1  mrg /* Predicates used by the templates.  */
   8688  1.1  mrg 
   8689  1.1  mrg /* Returns true if OP is MACL, MACH or PR.  The input must be a REG rtx.
   8690  1.1  mrg    Used only in general_movsrc_operand.  */
   8691  1.1  mrg bool
   8692  1.1  mrg system_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
   8693  1.1  mrg {
   8694  1.1  mrg   switch (REGNO (op))
   8695  1.1  mrg     {
   8696  1.1  mrg     case PR_REG:
   8697  1.1  mrg     case MACL_REG:
   8698  1.1  mrg     case MACH_REG:
   8699  1.1  mrg       return true;
   8700  1.1  mrg     }
   8701  1.1  mrg   return false;
   8702  1.1  mrg }
   8703  1.1  mrg 
   8704  1.1  mrg /* Returns true if OP is a floating point value with value 0.0.  */
   8705  1.1  mrg bool
   8706  1.1  mrg fp_zero_operand (rtx op)
   8707  1.1  mrg {
   8708  1.1  mrg   if (GET_MODE (op) != SFmode)
   8709  1.1  mrg     return false;
   8710  1.1  mrg 
   8711  1.1  mrg   const REAL_VALUE_TYPE* r = CONST_DOUBLE_REAL_VALUE (op);
   8712  1.1  mrg   return real_equal (r, &dconst0) && ! REAL_VALUE_MINUS_ZERO (*r);
   8713  1.1  mrg }
   8714  1.1  mrg 
   8715  1.1  mrg /* Returns true if OP is a floating point value with value 1.0.  */
   8716  1.1  mrg bool
   8717  1.1  mrg fp_one_operand (rtx op)
   8718  1.1  mrg {
   8719  1.1  mrg   if (GET_MODE (op) != SFmode)
   8720  1.1  mrg     return false;
   8721  1.1  mrg 
   8722  1.1  mrg   return real_equal (CONST_DOUBLE_REAL_VALUE (op), &dconst1);
   8723  1.1  mrg }
   8724  1.1  mrg 
   8725  1.1  mrg /* Return the TLS type for TLS symbols.  */
   8726  1.1  mrg enum tls_model
   8727  1.1  mrg tls_symbolic_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
   8728  1.1  mrg {
   8729  1.1  mrg   if (GET_CODE (op) != SYMBOL_REF)
   8730  1.1  mrg     return TLS_MODEL_NONE;
   8731  1.1  mrg   return SYMBOL_REF_TLS_MODEL (op);
   8732  1.1  mrg }
   8733  1.1  mrg 
   8734  1.1  mrg /* Return the destination address of a branch.  */
   8736  1.1  mrg static int
   8737  1.1  mrg branch_dest (rtx branch)
   8738  1.1  mrg {
   8739  1.1  mrg   rtx dest = SET_SRC (PATTERN (branch));
   8740  1.1  mrg 
   8741  1.1  mrg   if (GET_CODE (dest) == IF_THEN_ELSE)
   8742  1.1  mrg     dest = XEXP (dest, 1);
   8743  1.1  mrg 
   8744  1.1  mrg   return INSN_ADDRESSES (INSN_UID (XEXP (dest, 0)));
   8745  1.1  mrg }
   8746  1.1  mrg 
   8747  1.1  mrg /* Return nonzero if REG is not used after INSN.
   8749  1.1  mrg    We assume REG is a reload reg, and therefore does
   8750  1.1  mrg    not live past labels.  It may live past calls or jumps though.  */
   8751  1.1  mrg bool
   8752  1.1  mrg reg_unused_after (rtx reg, rtx_insn *insn)
   8753  1.1  mrg {
   8754  1.1  mrg   /* If the reg is set by this instruction, then it is safe for our
   8755  1.1  mrg      case.  Disregard the case where this is a store to memory, since
   8756  1.1  mrg      we are checking a register used in the store address.  */
   8757  1.1  mrg   rtx set = single_set (insn);
   8758  1.1  mrg   if (set && !MEM_P (SET_DEST (set))
   8759  1.1  mrg       && reg_overlap_mentioned_p (reg, SET_DEST (set)))
   8760  1.1  mrg     return true;
   8761  1.1  mrg 
   8762  1.1  mrg   while ((insn = NEXT_INSN (insn)))
   8763  1.1  mrg     {
   8764  1.1  mrg       if (!INSN_P (insn))
   8765  1.1  mrg 	continue;
   8766  1.1  mrg 
   8767  1.1  mrg       rtx_code code = GET_CODE (insn);
   8768  1.1  mrg 
   8769  1.1  mrg #if 0
   8770  1.1  mrg       /* If this is a label that existed before reload, then the register
   8771  1.1  mrg 	 is dead here.  However, if this is a label added by reorg, then
   8772  1.1  mrg 	 the register may still be live here.  We can't tell the difference,
   8773  1.1  mrg 	 so we just ignore labels completely.  */
   8774  1.1  mrg       if (code == CODE_LABEL)
   8775  1.1  mrg 	return 1;
   8776  1.1  mrg       /* else */
   8777  1.1  mrg #endif
   8778  1.1  mrg 
   8779  1.1  mrg       if (code == JUMP_INSN)
   8780  1.1  mrg 	return false;
   8781  1.1  mrg 
   8782  1.1  mrg       /* If this is a sequence, we must handle them all at once.
   8783  1.1  mrg 	 We could have for instance a call that sets the target register,
   8784  1.1  mrg 	 and an insn in a delay slot that uses the register.  In this case,
   8785  1.1  mrg 	 we must return 0.  */
   8786  1.1  mrg       else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
   8787  1.1  mrg 	{
   8788  1.1  mrg 	  rtx_sequence *seq = as_a <rtx_sequence *> (PATTERN (insn));
   8789  1.1  mrg 	  bool retval = false;
   8790  1.1  mrg 
   8791  1.1  mrg 	  for (int i = 0; i < seq->len (); i++)
   8792  1.1  mrg 	    {
   8793  1.1  mrg 	      rtx_insn *this_insn = seq->insn (i);
   8794  1.1  mrg 	      rtx set = single_set (this_insn);
   8795  1.1  mrg 
   8796  1.1  mrg 	      if (CALL_P (this_insn))
   8797  1.1  mrg 		code = CALL_INSN;
   8798  1.1  mrg 	      else if (JUMP_P (this_insn))
   8799  1.1  mrg 		{
   8800  1.1  mrg 		  if (INSN_ANNULLED_BRANCH_P (this_insn))
   8801  1.1  mrg 		    return false;
   8802  1.1  mrg 		  code = JUMP_INSN;
   8803  1.1  mrg 		}
   8804  1.1  mrg 
   8805  1.1  mrg 	      if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
   8806  1.1  mrg 		return false;
   8807  1.1  mrg 	      if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
   8808  1.1  mrg 		{
   8809  1.1  mrg 		  if (!MEM_P (SET_DEST (set)))
   8810  1.1  mrg 		    retval = true;
   8811  1.1  mrg 		  else
   8812  1.1  mrg 		    return false;
   8813  1.1  mrg 		}
   8814  1.1  mrg 	      if (set == NULL_RTX
   8815  1.1  mrg 		  && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
   8816  1.1  mrg 		return false;
   8817  1.1  mrg 	    }
   8818  1.1  mrg 	  if (retval)
   8819  1.1  mrg 	    return true;
   8820  1.1  mrg 	  else if (code == JUMP_INSN)
   8821  1.1  mrg 	    return false;
   8822  1.1  mrg 	}
   8823  1.1  mrg 
   8824  1.1  mrg       rtx set = single_set (insn);
   8825  1.1  mrg       if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
   8826  1.1  mrg 	return false;
   8827  1.1  mrg       if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
   8828  1.1  mrg 	return !MEM_P (SET_DEST (set));
   8829  1.1  mrg       if (set == NULL && reg_overlap_mentioned_p (reg, PATTERN (insn)))
   8830  1.1  mrg 	return false;
   8831  1.1  mrg 
   8832  1.1  mrg       if (code == CALL_INSN && call_used_regs[REGNO (reg)])
   8833  1.1  mrg 	return true;
   8834  1.1  mrg     }
   8835  1.1  mrg   return true;
   8836  1.1  mrg }
   8837  1.1  mrg 
   8838  1.1  mrg 
   8840  1.1  mrg static GTY(()) rtx t_reg_rtx;
   8841  1.1  mrg rtx
   8842  1.1  mrg get_t_reg_rtx (void)
   8843  1.1  mrg {
   8844  1.1  mrg   if (! t_reg_rtx)
   8845  1.1  mrg     t_reg_rtx = gen_rtx_REG (SImode, T_REG);
   8846  1.1  mrg   return t_reg_rtx;
   8847  1.1  mrg }
   8848  1.1  mrg 
   8849  1.1  mrg static GTY(()) tree fpscr_values;
   8850  1.1  mrg 
   8851  1.1  mrg static void
   8852  1.1  mrg emit_fpu_switch (rtx scratch, int index)
   8853  1.1  mrg {
   8854  1.1  mrg   if (fpscr_values == NULL)
   8855  1.1  mrg     {
   8856  1.1  mrg       tree t = build_index_type (integer_one_node);
   8857  1.1  mrg       t = build_array_type (integer_type_node, t);
   8858  1.1  mrg       t = build_decl (BUILTINS_LOCATION,
   8859  1.1  mrg 		      VAR_DECL, get_identifier ("__fpscr_values"), t);
   8860  1.1  mrg       DECL_ARTIFICIAL (t) = 1;
   8861  1.1  mrg       DECL_IGNORED_P (t) = 1;
   8862  1.1  mrg       DECL_EXTERNAL (t) = 1;
   8863  1.1  mrg       TREE_STATIC (t) = 1;
   8864  1.1  mrg       TREE_PUBLIC (t) = 1;
   8865  1.1  mrg       TREE_USED (t) = 1;
   8866  1.1  mrg 
   8867  1.1  mrg       fpscr_values = t;
   8868  1.1  mrg     }
   8869  1.1  mrg 
   8870  1.1  mrg   rtx src = DECL_RTL (fpscr_values);
   8871  1.1  mrg   if (!can_create_pseudo_p ())
   8872  1.1  mrg     {
   8873  1.1  mrg       emit_move_insn (scratch, XEXP (src, 0));
   8874  1.1  mrg       if (index != 0)
   8875  1.1  mrg 	emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
   8876  1.1  mrg       src = adjust_automodify_address (src, SImode, scratch, index * 4);
   8877  1.1  mrg     }
   8878  1.1  mrg   else
   8879  1.1  mrg     src = adjust_address (src, SImode, index * 4);
   8880  1.1  mrg 
   8881  1.1  mrg   emit_insn (gen_lds_fpscr (src));
   8882  1.1  mrg }
   8883  1.1  mrg 
   8884  1.1  mrg static rtx get_free_reg (HARD_REG_SET);
   8886  1.1  mrg 
   8887  1.1  mrg /* This function returns a register to use to load the address to load
   8888  1.1  mrg    the fpscr from.  Currently it always returns r1 or r7, but when we are
   8889  1.1  mrg    able to use pseudo registers after combine, or have a better mechanism
   8890  1.1  mrg    for choosing a register, it should be done here.  */
   8891  1.1  mrg /* REGS_LIVE is the liveness information for the point for which we
   8892  1.1  mrg    need this allocation.  In some bare-bones exit blocks, r1 is live at the
   8893  1.1  mrg    start.  We can even have all of r0..r3 being live:
   8894  1.1  mrg __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
   8895  1.1  mrg    INSN before which new insns are placed with will clobber the register
   8896  1.1  mrg    we return.  If a basic block consists only of setting the return value
   8897  1.1  mrg    register to a pseudo and using that register, the return value is not
   8898  1.1  mrg    live before or after this block, yet we we'll insert our insns right in
   8899  1.1  mrg    the middle.  */
   8900  1.1  mrg static rtx
   8901  1.1  mrg get_free_reg (HARD_REG_SET regs_live)
   8902  1.1  mrg {
   8903  1.1  mrg   if (! TEST_HARD_REG_BIT (regs_live, 1))
   8904  1.1  mrg     return gen_rtx_REG (Pmode, 1);
   8905  1.1  mrg 
   8906  1.1  mrg   /* Hard reg 1 is live; since this is a small register classes target,
   8907  1.1  mrg      there shouldn't be anything but a jump before the function end.  */
   8908  1.1  mrg   gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
   8909  1.1  mrg   return gen_rtx_REG (Pmode, 7);
   8910  1.1  mrg }
   8911  1.1  mrg 
   8912  1.1  mrg /* This function will set the fpscr from memory.
   8913  1.1  mrg    MODE is the mode we are setting it to.  */
   8914  1.1  mrg void
   8915  1.1  mrg fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
   8916  1.1  mrg {
   8917  1.1  mrg   enum attr_fp_mode fp_mode = (enum attr_fp_mode) mode;
   8918  1.1  mrg   enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
   8919  1.1  mrg 
   8920  1.1  mrg   rtx addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
   8921  1.1  mrg   emit_fpu_switch (addr_reg, fp_mode == norm_mode);
   8922  1.1  mrg }
   8923  1.1  mrg 
   8924  1.1  mrg /* Is the given character a logical line separator for the assembler?  */
   8925  1.1  mrg #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
   8926  1.1  mrg #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
   8927  1.1  mrg #endif
   8928  1.1  mrg 
   8929  1.1  mrg static bool
   8930  1.1  mrg sequence_insn_p (rtx_insn *insn)
   8931  1.1  mrg {
   8932  1.1  mrg   rtx_insn* prev = PREV_INSN (insn);
   8933  1.1  mrg   if (prev == NULL)
   8934  1.1  mrg     return false;
   8935  1.1  mrg 
   8936  1.1  mrg   rtx_insn* next = NEXT_INSN (prev);
   8937  1.1  mrg   if (next == NULL)
   8938  1.1  mrg     return false;
   8939  1.1  mrg 
   8940  1.1  mrg   return INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE;
   8941  1.1  mrg }
   8942  1.1  mrg 
   8943  1.1  mrg int
   8944  1.1  mrg sh_insn_length_adjustment (rtx_insn *insn)
   8945  1.1  mrg {
   8946  1.1  mrg   /* Instructions with unfilled delay slots take up an extra two bytes for
   8947  1.1  mrg      the nop in the delay slot.  */
   8948  1.1  mrg   if (((NONJUMP_INSN_P (insn)
   8949  1.1  mrg 	&& GET_CODE (PATTERN (insn)) != USE
   8950  1.1  mrg 	&& GET_CODE (PATTERN (insn)) != CLOBBER)
   8951  1.1  mrg        || CALL_P (insn) || JUMP_P (insn))
   8952  1.1  mrg       && ! sequence_insn_p (insn)
   8953  1.1  mrg       && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
   8954  1.1  mrg     return 2;
   8955  1.1  mrg 
   8956  1.1  mrg   /* Increase the insn length of a cbranch without a delay slot insn to
   8957  1.1  mrg      force a delay slot which will be stuffed with a nop.  */
   8958  1.1  mrg   if (TARGET_CBRANCH_FORCE_DELAY_SLOT && TARGET_SH2
   8959  1.1  mrg       && JUMP_P (insn) && get_attr_type (insn) == TYPE_CBRANCH
   8960  1.1  mrg       && ! sequence_insn_p (insn))
   8961  1.1  mrg     return 2;
   8962  1.1  mrg 
   8963  1.1  mrg   /* sh-dsp parallel processing insn take four bytes instead of two.  */
   8964  1.1  mrg 
   8965  1.1  mrg   if (NONJUMP_INSN_P (insn))
   8966  1.1  mrg     {
   8967  1.1  mrg       int sum = 0;
   8968  1.1  mrg       rtx body = PATTERN (insn);
   8969  1.1  mrg       const char *templ;
   8970  1.1  mrg       char c;
   8971  1.1  mrg       bool maybe_label = true;
   8972  1.1  mrg 
   8973  1.1  mrg       if (GET_CODE (body) == ASM_INPUT)
   8974  1.1  mrg 	templ = XSTR (body, 0);
   8975  1.1  mrg       else if (asm_noperands (body) >= 0)
   8976  1.1  mrg 	templ
   8977  1.1  mrg 	  = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
   8978  1.1  mrg       else
   8979  1.1  mrg 	return 0;
   8980  1.1  mrg       do
   8981  1.1  mrg 	{
   8982  1.1  mrg 	  int ppi_adjust = 0;
   8983  1.1  mrg 
   8984  1.1  mrg 	  do
   8985  1.1  mrg 	    c = *templ++;
   8986  1.1  mrg 	  while (c == ' ' || c == '\t');
   8987  1.1  mrg 	  /* all sh-dsp parallel-processing insns start with p.
   8988  1.1  mrg 	     The only non-ppi sh insn starting with p is pref.
   8989  1.1  mrg 	     The only ppi starting with pr is prnd.  */
   8990  1.1  mrg 	  if ((c == 'p' || c == 'P') && strncasecmp ("re", templ, 2))
   8991  1.1  mrg 	    ppi_adjust = 2;
   8992  1.1  mrg 	  /* The repeat pseudo-insn expands two three insns, a total of
   8993  1.1  mrg 	     six bytes in size.  */
   8994  1.1  mrg 	  else if ((c == 'r' || c == 'R')
   8995  1.1  mrg 		   && ! strncasecmp ("epeat", templ, 5))
   8996  1.1  mrg 	    ppi_adjust = 4;
   8997  1.1  mrg 	  while (c && c != '\n'
   8998  1.1  mrg 		 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, templ))
   8999  1.1  mrg 	    {
   9000  1.1  mrg 	      /* If this is a label, it is obviously not a ppi insn.  */
   9001  1.1  mrg 	      if (c == ':' && maybe_label)
   9002  1.1  mrg 		{
   9003  1.1  mrg 		  ppi_adjust = 0;
   9004  1.1  mrg 		  break;
   9005  1.1  mrg 		}
   9006  1.1  mrg 	      else if (c == '\'' || c == '"')
   9007  1.1  mrg 		maybe_label = false;
   9008  1.1  mrg 	      c = *templ++;
   9009  1.1  mrg 	    }
   9010  1.1  mrg 	  sum += ppi_adjust;
   9011  1.1  mrg 	  maybe_label = c != ':';
   9012  1.1  mrg 	}
   9013  1.1  mrg       while (c);
   9014  1.1  mrg       return sum;
   9015  1.1  mrg     }
   9016  1.1  mrg   return 0;
   9017  1.1  mrg }
   9018  1.1  mrg 
   9019  1.1  mrg /* Return TRUE for a valid displacement for the REG+disp addressing
   9021  1.1  mrg    with MODE.  */
   9022  1.1  mrg bool
   9023  1.1  mrg sh_legitimate_index_p (machine_mode mode, rtx op, bool consider_sh2a,
   9024  1.1  mrg 		       bool allow_zero)
   9025  1.1  mrg {
   9026  1.1  mrg   if (! CONST_INT_P (op))
   9027  1.1  mrg     return false;
   9028  1.1  mrg 
   9029  1.1  mrg     {
   9030  1.1  mrg       const HOST_WIDE_INT offset = INTVAL (op);
   9031  1.1  mrg       const int max_disp = sh_max_mov_insn_displacement (mode, consider_sh2a);
   9032  1.1  mrg       const int align_mask = mov_insn_alignment_mask (mode, consider_sh2a);
   9033  1.1  mrg 
   9034  1.1  mrg       /* If the mode does not support any displacement always return false.
   9035  1.1  mrg 	 Even though an index of '0' is actually always valid, it will cause
   9036  1.1  mrg 	 troubles when e.g. a DFmode move is split into two SFmode moves,
   9037  1.1  mrg 	 where one SFmode move will have index '0' and the other move will
   9038  1.1  mrg 	 have index '4'.  */
   9039  1.1  mrg        if (!allow_zero && max_disp < 1)
   9040  1.1  mrg 	return false;
   9041  1.1  mrg 
   9042  1.1  mrg       return offset >= 0 && offset <= max_disp && (offset & align_mask) == 0;
   9043  1.1  mrg     }
   9044  1.1  mrg }
   9045  1.1  mrg 
   9046  1.1  mrg /* Recognize an RTL expression that is a valid memory address for
   9047  1.1  mrg    an instruction.
   9048  1.1  mrg    The MODE argument is the machine mode for the MEM expression
   9049  1.1  mrg    that wants to use this address.
   9050  1.1  mrg    Allow  REG
   9051  1.1  mrg 	  REG+disp
   9052  1.1  mrg 	  REG+r0
   9053  1.1  mrg 	  REG++
   9054  1.1  mrg 	  --REG
   9055  1.1  mrg 	  GBR
   9056  1.1  mrg 	  GBR+disp  */
   9057  1.1  mrg static bool
   9058  1.1  mrg sh_legitimate_address_p (machine_mode mode, rtx x, bool strict)
   9059  1.1  mrg {
   9060  1.1  mrg   if (REG_P (x) && REGNO (x) == GBR_REG)
   9061  1.1  mrg     return true;
   9062  1.1  mrg 
   9063  1.1  mrg   if (MAYBE_BASE_REGISTER_RTX_P (x, strict))
   9064  1.1  mrg     return true;
   9065  1.1  mrg   else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
   9066  1.1  mrg 	   && MAYBE_BASE_REGISTER_RTX_P (XEXP (x, 0), strict))
   9067  1.1  mrg     return true;
   9068  1.1  mrg   else if (GET_CODE (x) == PLUS)
   9069  1.1  mrg     {
   9070  1.1  mrg       rtx xop0 = XEXP (x, 0);
   9071  1.1  mrg       rtx xop1 = XEXP (x, 1);
   9072  1.1  mrg 
   9073  1.1  mrg       if (REG_P (xop0) && REGNO (xop0) == GBR_REG)
   9074  1.1  mrg 	return gbr_displacement (xop1, mode);
   9075  1.1  mrg 
   9076  1.1  mrg       if (GET_MODE_SIZE (mode) <= 8
   9077  1.1  mrg 	  && MAYBE_BASE_REGISTER_RTX_P (xop0, strict)
   9078  1.1  mrg 	  && sh_legitimate_index_p (mode, xop1, TARGET_SH2A, false))
   9079  1.1  mrg 	return true;
   9080  1.1  mrg 
   9081  1.1  mrg       if (GET_MODE_SIZE (mode) <= 4
   9082  1.1  mrg 	  || (TARGET_FPU_DOUBLE && TARGET_FMOVD && mode == DFmode))
   9083  1.1  mrg 	{
   9084  1.1  mrg 	  if (MAYBE_BASE_REGISTER_RTX_P (xop1, strict)
   9085  1.1  mrg 	      && MAYBE_INDEX_REGISTER_RTX_P (xop0, strict))
   9086  1.1  mrg 	    return true;
   9087  1.1  mrg 	  if (MAYBE_INDEX_REGISTER_RTX_P (xop1, strict)
   9088  1.1  mrg 	      && MAYBE_BASE_REGISTER_RTX_P (xop0, strict))
   9089  1.1  mrg 	    return true;
   9090  1.1  mrg 	}
   9091  1.1  mrg     }
   9092  1.1  mrg 
   9093  1.1  mrg   return false;
   9094  1.1  mrg }
   9095  1.1  mrg 
   9096  1.1  mrg /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
   9098  1.1  mrg    isn't protected by a PIC unspec.  */
   9099  1.1  mrg bool
   9100  1.1  mrg nonpic_symbol_mentioned_p (rtx x)
   9101  1.1  mrg {
   9102  1.1  mrg   if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
   9103  1.1  mrg       || GET_CODE (x) == PC)
   9104  1.1  mrg     return true;
   9105  1.1  mrg 
   9106  1.1  mrg   /* We don't want to look into the possible MEM location of a
   9107  1.1  mrg      CONST_DOUBLE, since we're not going to use it, in general.  */
   9108  1.1  mrg   if (GET_CODE (x) == CONST_DOUBLE)
   9109  1.1  mrg     return false;
   9110  1.1  mrg 
   9111  1.1  mrg   if (GET_CODE (x) == UNSPEC
   9112  1.1  mrg       && (XINT (x, 1) == UNSPEC_PIC
   9113  1.1  mrg 	  || XINT (x, 1) == UNSPEC_GOT
   9114  1.1  mrg 	  || XINT (x, 1) == UNSPEC_GOTOFF
   9115  1.1  mrg 	  || XINT (x, 1) == UNSPEC_GOTPLT
   9116  1.1  mrg 	  || XINT (x, 1) == UNSPEC_GOTTPOFF
   9117  1.1  mrg 	  || XINT (x, 1) == UNSPEC_DTPOFF
   9118  1.1  mrg 	  || XINT (x, 1) == UNSPEC_TPOFF
   9119  1.1  mrg 	  || XINT (x, 1) == UNSPEC_PLT
   9120  1.1  mrg 	  || XINT (x, 1) == UNSPEC_PCREL
   9121  1.1  mrg 	  || XINT (x, 1) == UNSPEC_SYMOFF
   9122  1.1  mrg 	  || XINT (x, 1) == UNSPEC_PCREL_SYMOFF
   9123  1.1  mrg 	  || XINT (x, 1) == UNSPEC_GOTFUNCDESC
   9124  1.1  mrg 	  || XINT (x, 1) == UNSPEC_GOTOFFFUNCDESC))
   9125  1.1  mrg     return false;
   9126  1.1  mrg 
   9127  1.1  mrg   const char* fmt = GET_RTX_FORMAT (GET_CODE (x));
   9128  1.1  mrg   for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
   9129  1.1  mrg     {
   9130  1.1  mrg       if (fmt[i] == 'E')
   9131  1.1  mrg 	{
   9132  1.1  mrg 	  for (int j = XVECLEN (x, i) - 1; j >= 0; j--)
   9133  1.1  mrg 	    if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
   9134  1.1  mrg 	      return true;
   9135  1.1  mrg 	}
   9136  1.1  mrg       else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
   9137  1.1  mrg 	return true;
   9138  1.1  mrg     }
   9139  1.1  mrg 
   9140  1.1  mrg   return false;
   9141  1.1  mrg }
   9142  1.1  mrg 
   9143  1.1  mrg /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
   9144  1.1  mrg    @GOTOFF in `reg'.  */
   9145  1.1  mrg rtx
   9146  1.1  mrg legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED, rtx reg)
   9147  1.1  mrg {
   9148  1.1  mrg   if (tls_symbolic_operand (orig, Pmode) != TLS_MODEL_NONE)
   9149  1.1  mrg     return orig;
   9150  1.1  mrg 
   9151  1.1  mrg   if (GET_CODE (orig) == LABEL_REF
   9152  1.1  mrg       || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
   9153  1.1  mrg     {
   9154  1.1  mrg       if (reg == NULL_RTX)
   9155  1.1  mrg 	reg = gen_reg_rtx (Pmode);
   9156  1.1  mrg 
   9157  1.1  mrg       if (TARGET_FDPIC
   9158  1.1  mrg 	  && GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (orig))
   9159  1.1  mrg 	{
   9160  1.1  mrg 	  /* Weak functions may be NULL which doesn't work with
   9161  1.1  mrg 	     GOTOFFFUNCDESC because the runtime offset is not known.  */
   9162  1.1  mrg 	  if (SYMBOL_REF_WEAK (orig))
   9163  1.1  mrg 	    emit_insn (gen_symGOTFUNCDESC2reg (reg, orig));
   9164  1.1  mrg 	  else
   9165  1.1  mrg 	    emit_insn (gen_symGOTOFFFUNCDESC2reg (reg, orig));
   9166  1.1  mrg 	}
   9167  1.1  mrg       else if (TARGET_FDPIC
   9168  1.1  mrg 	       && (GET_CODE (orig) == LABEL_REF
   9169  1.1  mrg 		   || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_DECL (orig)
   9170  1.1  mrg 		       && (TREE_READONLY (SYMBOL_REF_DECL (orig))
   9171  1.1  mrg 			   || SYMBOL_REF_EXTERNAL_P (orig)
   9172  1.1  mrg 			   || DECL_SECTION_NAME(SYMBOL_REF_DECL (orig))))))
   9173  1.1  mrg 	/* In FDPIC, GOTOFF can only be used for writable data.  */
   9174  1.1  mrg 	emit_insn (gen_symGOT2reg (reg, orig));
   9175  1.1  mrg       else
   9176  1.1  mrg 	emit_insn (gen_symGOTOFF2reg (reg, orig));
   9177  1.1  mrg       return reg;
   9178  1.1  mrg     }
   9179  1.1  mrg   else if (GET_CODE (orig) == SYMBOL_REF)
   9180  1.1  mrg     {
   9181  1.1  mrg       if (reg == NULL_RTX)
   9182  1.1  mrg 	reg = gen_reg_rtx (Pmode);
   9183  1.1  mrg 
   9184  1.1  mrg       if (TARGET_FDPIC && SYMBOL_REF_FUNCTION_P (orig))
   9185  1.1  mrg 	emit_insn (gen_symGOTFUNCDESC2reg (reg, orig));
   9186  1.1  mrg       else
   9187  1.1  mrg 	emit_insn (gen_symGOT2reg (reg, orig));
   9188  1.1  mrg       return reg;
   9189  1.1  mrg     }
   9190  1.1  mrg   return orig;
   9191  1.1  mrg }
   9192  1.1  mrg 
   9193  1.1  mrg /* Given a (logical) mode size and an offset in bytes, try to find a the
   9194  1.1  mrg    appropriate displacement value for a mov insn.  On SH the displacements
   9195  1.1  mrg    are limited to max. 60 bytes for SImode, max. 30 bytes in HImode and max.
   9196  1.1  mrg    15 bytes in QImode.  To compensate this we create a new base address by
   9197  1.1  mrg    adding an adjustment value to it.
   9198  1.1  mrg 
   9199  1.1  mrg    If the originally requested offset is greater than 127 we prefer using
   9200  1.1  mrg    values 124..127 over 128..131 to increase opportunities to use the
   9201  1.1  mrg    add #imm, Rn insn.
   9202  1.1  mrg 
   9203  1.1  mrg    In some cases it is possible that a requested offset might seem unaligned
   9204  1.1  mrg    or inappropriate for the mode size, like offset = 2 and mode size = 4.
   9205  1.1  mrg    This is compensated by adjusting the base address so that the effective
   9206  1.1  mrg    address of the displacement move insn will be aligned.
   9207  1.1  mrg 
   9208  1.1  mrg    This is not the best possible way of rebasing the base address, as it
   9209  1.1  mrg    does not look at other present displacement addressings around it.
   9210  1.1  mrg    In some cases this can create more base address adjustments than would
   9211  1.1  mrg    actually be necessary.  */
   9212  1.1  mrg struct disp_adjust
   9213  1.1  mrg {
   9214  1.1  mrg   rtx offset_adjust;
   9215  1.1  mrg   rtx mov_disp;
   9216  1.1  mrg };
   9217  1.1  mrg 
   9218  1.1  mrg static struct disp_adjust
   9219  1.1  mrg sh_find_mov_disp_adjust (machine_mode mode, HOST_WIDE_INT offset)
   9220  1.1  mrg {
   9221  1.1  mrg   struct disp_adjust res = { NULL_RTX, NULL_RTX };
   9222  1.1  mrg 
   9223  1.1  mrg   /* Do not try to use SH2A's large displacements here, because this would
   9224  1.1  mrg      effectively disable the small displacement insns.  */
   9225  1.1  mrg   const int mode_sz = GET_MODE_SIZE (mode);
   9226  1.1  mrg   const int mov_insn_sz = mov_insn_size (mode, false);
   9227  1.1  mrg   const int max_disp = sh_max_mov_insn_displacement (mode, false);
   9228  1.1  mrg   const int max_disp_next = max_disp + mov_insn_sz;
   9229  1.1  mrg   HOST_WIDE_INT align_modifier = offset > 127 ? mov_insn_sz : 0;
   9230  1.1  mrg   HOST_WIDE_INT offset_adjust;
   9231  1.1  mrg 
   9232  1.1  mrg   /* In some cases this actually does happen and we must check for it.  */
   9233  1.1  mrg   if (mode_sz < 1 || mode_sz > 8 || max_disp < 1)
   9234  1.1  mrg     return res;
   9235  1.1  mrg 
   9236  1.1  mrg   /* Keeps the previous behavior for QImode displacement addressing.
   9237  1.1  mrg      This just decides how the offset is re-based.  Removing this special
   9238  1.1  mrg      case will result in slightly bigger code on average, but it's not that
   9239  1.1  mrg      bad actually.  */
   9240  1.1  mrg   if (mov_insn_sz == 1)
   9241  1.1  mrg     align_modifier = 0;
   9242  1.1  mrg 
   9243  1.1  mrg   offset_adjust = ((offset + align_modifier) & ~max_disp) - align_modifier;
   9244  1.1  mrg 
   9245  1.1  mrg   if (mode_sz + offset - offset_adjust <= max_disp_next)
   9246  1.1  mrg     {
   9247  1.1  mrg       res.offset_adjust = GEN_INT (offset_adjust);
   9248  1.1  mrg       res.mov_disp = GEN_INT (offset - offset_adjust);
   9249  1.1  mrg     }
   9250  1.1  mrg 
   9251  1.1  mrg   return res;
   9252  1.1  mrg }
   9253  1.1  mrg 
   9254  1.1  mrg /* Try to modify an illegitimate address and make it legitimate.
   9255  1.1  mrg    If we find one, return the new, valid address.
   9256  1.1  mrg    Otherwise, return the original address.  */
   9257  1.1  mrg static rtx
   9258  1.1  mrg sh_legitimize_address (rtx x, rtx oldx, machine_mode mode)
   9259  1.1  mrg {
   9260  1.1  mrg   if (flag_pic)
   9261  1.1  mrg     x = legitimize_pic_address (oldx, mode, NULL_RTX);
   9262  1.1  mrg 
   9263  1.1  mrg   if ((TARGET_FPU_DOUBLE && mode == DFmode)
   9264  1.1  mrg       || (TARGET_SH2E && mode == SFmode))
   9265  1.1  mrg     return x;
   9266  1.1  mrg 
   9267  1.1  mrg   if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
   9268  1.1  mrg       && BASE_REGISTER_RTX_P (XEXP (x, 0)))
   9269  1.1  mrg     {
   9270  1.1  mrg       struct disp_adjust adj = sh_find_mov_disp_adjust (mode,
   9271  1.1  mrg 							INTVAL (XEXP (x, 1)));
   9272  1.1  mrg 
   9273  1.1  mrg       if (adj.offset_adjust != NULL_RTX && adj.mov_disp != NULL_RTX)
   9274  1.1  mrg 	{
   9275  1.1  mrg 	  rtx sum = expand_binop (Pmode, add_optab, XEXP (x, 0),
   9276  1.1  mrg 				  adj.offset_adjust, NULL_RTX, 0,
   9277  1.1  mrg 				  OPTAB_LIB_WIDEN);
   9278  1.1  mrg 	  return gen_rtx_PLUS (Pmode, sum, adj.mov_disp);
   9279  1.1  mrg 	}
   9280  1.1  mrg     }
   9281  1.1  mrg   return x;
   9282  1.1  mrg }
   9283  1.1  mrg 
   9284  1.1  mrg /* Attempt to replace *p, which is an address that needs reloading, with
   9285  1.1  mrg    a valid memory address for an operand of mode MODE.
   9286  1.1  mrg    Like for sh_legitimize_address, for the SH we try to get a normal form
   9287  1.1  mrg    of the address.  That will allow inheritance of the address reloads.  */
   9288  1.1  mrg bool
   9289  1.1  mrg sh_legitimize_reload_address (rtx *p, machine_mode mode, int opnum,
   9290  1.1  mrg 			      int itype)
   9291  1.1  mrg {
   9292  1.1  mrg   enum reload_type type = (enum reload_type) itype;
   9293  1.1  mrg   const int mode_sz = GET_MODE_SIZE (mode);
   9294  1.1  mrg 
   9295  1.1  mrg   if (sh_lra_p ())
   9296  1.1  mrg     return false;
   9297  1.1  mrg 
   9298  1.1  mrg   if (GET_CODE (*p) == PLUS && CONST_INT_P (XEXP (*p, 1))
   9299  1.1  mrg       && MAYBE_BASE_REGISTER_RTX_P (XEXP (*p, 0), true))
   9300  1.1  mrg     {
   9301  1.1  mrg       const HOST_WIDE_INT offset = INTVAL (XEXP (*p, 1));
   9302  1.1  mrg       struct disp_adjust adj = sh_find_mov_disp_adjust (mode, offset);
   9303  1.1  mrg 
   9304  1.1  mrg       if (TARGET_SH2A && mode == DFmode && (offset & 0x7))
   9305  1.1  mrg 	{
   9306  1.1  mrg 	  push_reload (*p, NULL_RTX, p, NULL,
   9307  1.1  mrg 		       BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
   9308  1.1  mrg 	  return true;
   9309  1.1  mrg 	}
   9310  1.1  mrg 
   9311  1.1  mrg       if (TARGET_SH2E && mode == SFmode)
   9312  1.1  mrg 	{
   9313  1.1  mrg 	  *p = copy_rtx (*p);
   9314  1.1  mrg 	  push_reload (*p, NULL_RTX, p, NULL,
   9315  1.1  mrg 		       BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
   9316  1.1  mrg 	  return true;
   9317  1.1  mrg 	}
   9318  1.1  mrg 
   9319  1.1  mrg       /* FIXME: Do not allow to legitimize QImode and HImode displacement
   9320  1.1  mrg 	 moves because then reload has a problem figuring the constraint
   9321  1.1  mrg 	 that the move insn target/source reg must be R0.
   9322  1.1  mrg 	 Or maybe some handling is wrong in sh_secondary_reload for this
   9323  1.1  mrg 	 to work properly? */
   9324  1.1  mrg       if ((mode_sz == 4 || mode_sz == 8)
   9325  1.1  mrg 	  && ! (TARGET_SH4 && mode == DFmode)
   9326  1.1  mrg 	  && adj.offset_adjust != NULL_RTX && adj.mov_disp != NULL_RTX)
   9327  1.1  mrg 	{
   9328  1.1  mrg 	  rtx sum = gen_rtx_PLUS (Pmode, XEXP (*p, 0), adj.offset_adjust);
   9329  1.1  mrg 	  *p = gen_rtx_PLUS (Pmode, sum, adj.mov_disp);
   9330  1.1  mrg 	  push_reload (sum, NULL_RTX, &XEXP (*p, 0), NULL,
   9331  1.1  mrg 		       BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
   9332  1.1  mrg 	  return true;
   9333  1.1  mrg 	}
   9334  1.1  mrg     }
   9335  1.1  mrg 
   9336  1.1  mrg   /* We must re-recognize what we created before.  */
   9337  1.1  mrg   if (GET_CODE (*p) == PLUS
   9338  1.1  mrg       && (mode_sz == 4 || mode_sz == 8)
   9339  1.1  mrg       && GET_CODE (XEXP (*p, 0)) == PLUS
   9340  1.1  mrg       && CONST_INT_P (XEXP (XEXP (*p, 0), 1))
   9341  1.1  mrg       && MAYBE_BASE_REGISTER_RTX_P (XEXP (XEXP (*p, 0), 0), true)
   9342  1.1  mrg       && CONST_INT_P (XEXP (*p, 1))
   9343  1.1  mrg       && ! (TARGET_SH2E && mode == SFmode))
   9344  1.1  mrg     {
   9345  1.1  mrg       /* Because this address is so complex, we know it must have
   9346  1.1  mrg 	 been created by LEGITIMIZE_RELOAD_ADDRESS before; thus,
   9347  1.1  mrg 	 it is already unshared, and needs no further unsharing.  */
   9348  1.1  mrg       push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL,
   9349  1.1  mrg 		   BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
   9350  1.1  mrg       return true;
   9351  1.1  mrg     }
   9352  1.1  mrg 
   9353  1.1  mrg   return false;
   9354  1.1  mrg }
   9355  1.1  mrg 
   9356  1.1  mrg /* In the name of slightly smaller debug output, and to cater to
   9357  1.1  mrg    general assembler lossage, recognize various UNSPEC sequences
   9358  1.1  mrg    and turn them back into a direct symbol reference.  */
   9359  1.1  mrg static rtx
   9360  1.1  mrg sh_delegitimize_address (rtx orig_x)
   9361  1.1  mrg {
   9362  1.1  mrg   orig_x = delegitimize_mem_from_attrs (orig_x);
   9363  1.1  mrg 
   9364  1.1  mrg   rtx x = orig_x;
   9365  1.1  mrg   if (MEM_P (x))
   9366  1.1  mrg     x = XEXP (x, 0);
   9367  1.1  mrg   if (GET_CODE (x) == CONST)
   9368  1.1  mrg     {
   9369  1.1  mrg       rtx y = XEXP (x, 0);
   9370  1.1  mrg       if (GET_CODE (y) == UNSPEC)
   9371  1.1  mrg 	{
   9372  1.1  mrg 	  if (XINT (y, 1) == UNSPEC_GOT
   9373  1.1  mrg 	      || XINT (y, 1) == UNSPEC_GOTOFF
   9374  1.1  mrg 	      || XINT (y, 1) == UNSPEC_SYMOFF)
   9375  1.1  mrg 	    return XVECEXP (y, 0, 0);
   9376  1.1  mrg 	  else if (XINT (y, 1) == UNSPEC_PCREL_SYMOFF)
   9377  1.1  mrg 	    {
   9378  1.1  mrg 	      if (GET_CODE (XVECEXP (y, 0, 0)) == CONST)
   9379  1.1  mrg 		{
   9380  1.1  mrg 		  rtx symplt = XEXP (XVECEXP (y, 0, 0), 0);
   9381  1.1  mrg 
   9382  1.1  mrg 		  if (GET_CODE (symplt) == UNSPEC
   9383  1.1  mrg 		      && (XINT (symplt, 1) == UNSPEC_PLT
   9384  1.1  mrg 			  || XINT (symplt, 1) == UNSPEC_PCREL))
   9385  1.1  mrg 		    return XVECEXP (symplt, 0, 0);
   9386  1.1  mrg 		}
   9387  1.1  mrg 	    }
   9388  1.1  mrg 	}
   9389  1.1  mrg     }
   9390  1.1  mrg 
   9391  1.1  mrg   return orig_x;
   9392  1.1  mrg }
   9393  1.1  mrg 
   9394  1.1  mrg /* Mark the use of a constant in the literal table. If the constant
   9395  1.1  mrg    has multiple labels, make it unique.  */
   9396  1.1  mrg static rtx
   9397  1.1  mrg mark_constant_pool_use (rtx x)
   9398  1.1  mrg {
   9399  1.1  mrg   if (x == NULL_RTX)
   9400  1.1  mrg     return x;
   9401  1.1  mrg 
   9402  1.1  mrg   switch (GET_CODE (x))
   9403  1.1  mrg     {
   9404  1.1  mrg     case LABEL_REF:
   9405  1.1  mrg       x = XEXP (x, 0);
   9406  1.1  mrg     case CODE_LABEL:
   9407  1.1  mrg       break;
   9408  1.1  mrg     default:
   9409  1.1  mrg       return x;
   9410  1.1  mrg     }
   9411  1.1  mrg 
   9412  1.1  mrg   /* Get the first label in the list of labels for the same constant
   9413  1.1  mrg      and delete another labels in the list.  */
   9414  1.1  mrg   rtx_insn* lab = as_a <rtx_insn*> (x);
   9415  1.1  mrg   for (rtx_insn* insn = PREV_INSN (lab); insn; insn = PREV_INSN (insn))
   9416  1.1  mrg     {
   9417  1.1  mrg       if (!LABEL_P (insn)
   9418  1.1  mrg 	  || LABEL_REFS (insn) != NEXT_INSN (insn))
   9419  1.1  mrg 	break;
   9420  1.1  mrg       lab = insn;
   9421  1.1  mrg     }
   9422  1.1  mrg 
   9423  1.1  mrg   for (rtx insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
   9424  1.1  mrg     as_a<rtx_insn *> (insn)->set_deleted ();
   9425  1.1  mrg 
   9426  1.1  mrg   /* Mark constants in a window.  */
   9427  1.1  mrg   for (rtx_insn* insn = NEXT_INSN (as_a <rtx_insn *> (x)); insn;
   9428  1.1  mrg        insn = NEXT_INSN (insn))
   9429  1.1  mrg     {
   9430  1.1  mrg       if (!NONJUMP_INSN_P (insn))
   9431  1.1  mrg 	continue;
   9432  1.1  mrg 
   9433  1.1  mrg       rtx pattern = PATTERN (insn);
   9434  1.1  mrg       if (GET_CODE (pattern) != UNSPEC_VOLATILE)
   9435  1.1  mrg 	continue;
   9436  1.1  mrg 
   9437  1.1  mrg       switch (XINT (pattern, 1))
   9438  1.1  mrg 	{
   9439  1.1  mrg 	case UNSPECV_CONST2:
   9440  1.1  mrg 	case UNSPECV_CONST4:
   9441  1.1  mrg 	case UNSPECV_CONST8:
   9442  1.1  mrg 	  XVECEXP (pattern, 0, 1) = const1_rtx;
   9443  1.1  mrg 	  break;
   9444  1.1  mrg 	case UNSPECV_WINDOW_END:
   9445  1.1  mrg 	  if (XVECEXP (pattern, 0, 0) == x)
   9446  1.1  mrg 	    return lab;
   9447  1.1  mrg 	  break;
   9448  1.1  mrg 	case UNSPECV_CONST_END:
   9449  1.1  mrg 	  return lab;
   9450  1.1  mrg 	default:
   9451  1.1  mrg 	  break;
   9452  1.1  mrg 	}
   9453  1.1  mrg     }
   9454  1.1  mrg 
   9455  1.1  mrg   return lab;
   9456  1.1  mrg }
   9457  1.1  mrg 
   9458  1.1  mrg /* Return true if it's possible to redirect BRANCH1 to the destination
   9460  1.1  mrg    of an unconditional jump BRANCH2.  We only want to do this if the
   9461  1.1  mrg    resulting branch will have a short displacement.  */
   9462  1.1  mrg static bool
   9463  1.1  mrg sh_can_follow_jump (const rtx_insn *branch1, const rtx_insn *branch2)
   9464  1.1  mrg {
   9465  1.1  mrg   /* Don't follow if BRANCH2 is possible to be a jump crossing between
   9466  1.1  mrg      hot and cold partitions.  */
   9467  1.1  mrg   if (flag_reorder_blocks_and_partition
   9468  1.1  mrg       && simplejump_p (branch2)
   9469  1.1  mrg       && CROSSING_JUMP_P (branch2))
   9470  1.1  mrg     return false;
   9471  1.1  mrg 
   9472  1.1  mrg   if (flag_expensive_optimizations && simplejump_p (branch2))
   9473  1.1  mrg     {
   9474  1.1  mrg       rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
   9475  1.1  mrg       rtx_insn *insn;
   9476  1.1  mrg       int distance;
   9477  1.1  mrg 
   9478  1.1  mrg       for (distance = 0, insn = NEXT_INSN (branch1);
   9479  1.1  mrg 	   insn && distance < 256;
   9480  1.1  mrg 	   insn = PREV_INSN (insn))
   9481  1.1  mrg 	{
   9482  1.1  mrg 	  if (insn == dest)
   9483  1.1  mrg 	    return true;
   9484  1.1  mrg 	  else
   9485  1.1  mrg 	    distance += get_attr_length (insn);
   9486  1.1  mrg 	}
   9487  1.1  mrg       for (distance = 0, insn = NEXT_INSN (branch1);
   9488  1.1  mrg 	   insn && distance < 256;
   9489  1.1  mrg 	   insn = NEXT_INSN (insn))
   9490  1.1  mrg 	{
   9491  1.1  mrg 	  if (insn == dest)
   9492  1.1  mrg 	    return true;
   9493  1.1  mrg 	  else
   9494  1.1  mrg 	    distance += get_attr_length (insn);
   9495  1.1  mrg 	}
   9496  1.1  mrg     }
   9497  1.1  mrg   return false;
   9498  1.1  mrg }
   9499  1.1  mrg 
   9500  1.1  mrg /* Return nonzero if register old_reg can be renamed to register new_reg.  */
   9501  1.1  mrg bool
   9502  1.1  mrg sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
   9503  1.1  mrg 			 unsigned int new_reg)
   9504  1.1  mrg {
   9505  1.1  mrg   /* Interrupt functions can only use registers that have already been
   9506  1.1  mrg      saved by the prologue, even if they would normally be
   9507  1.1  mrg      call-clobbered.  */
   9508  1.1  mrg   if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg))
   9509  1.1  mrg     return false;
   9510  1.1  mrg 
   9511  1.1  mrg   return true;
   9512  1.1  mrg }
   9513  1.1  mrg 
   9514  1.1  mrg /* Function to update the integer COST
   9515  1.1  mrg    based on the relationship between INSN that is dependent on
   9516  1.1  mrg    DEP_INSN through the dependence LINK.  The default is to make no
   9517  1.1  mrg    adjustment to COST.  This can be used for example to specify to
   9518  1.1  mrg    the scheduler that an output- or anti-dependence does not incur
   9519  1.1  mrg    the same cost as a data-dependence.  The return value should be
   9520  1.1  mrg    the new value for COST.  */
   9521  1.1  mrg static int
   9522  1.1  mrg sh_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
   9523  1.1  mrg 		unsigned int)
   9524  1.1  mrg {
   9525  1.1  mrg   rtx reg, use_pat;
   9526  1.1  mrg 
   9527  1.1  mrg   if (dep_type == 0)
   9528  1.1  mrg     {
   9529  1.1  mrg       if (recog_memoized (insn) < 0
   9530  1.1  mrg 	  || recog_memoized (dep_insn) < 0)
   9531  1.1  mrg 	return cost;
   9532  1.1  mrg 
   9533  1.1  mrg       rtx dep_set = single_set (dep_insn);
   9534  1.1  mrg 
   9535  1.1  mrg       /* The latency that we specify in the scheduling description refers
   9536  1.1  mrg 	 to the actual output, not to an auto-increment register; for that,
   9537  1.1  mrg 	 the latency is one.  */
   9538  1.1  mrg       if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
   9539  1.1  mrg 	{
   9540  1.1  mrg 	  rtx set = single_set (insn);
   9541  1.1  mrg 
   9542  1.1  mrg 	  if (set
   9543  1.1  mrg 	      && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
   9544  1.1  mrg 	      && (!MEM_P (SET_DEST (set))
   9545  1.1  mrg 		  || !reg_mentioned_p (SET_DEST (dep_set),
   9546  1.1  mrg 				       XEXP (SET_DEST (set), 0))))
   9547  1.1  mrg 	    cost = 1;
   9548  1.1  mrg 	}
   9549  1.1  mrg       /* The only input for a call that is timing-critical is the
   9550  1.1  mrg 	 function's address.  */
   9551  1.1  mrg       if (CALL_P (insn))
   9552  1.1  mrg 	{
   9553  1.1  mrg 	  rtx call = get_call_rtx_from (insn);
   9554  1.1  mrg 	  if (call
   9555  1.1  mrg 		  /* sibcalli_thunk uses a symbol_ref in an unspec.  */
   9556  1.1  mrg 	      && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
   9557  1.1  mrg 		  || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
   9558  1.1  mrg 	    cost -= TARGET_SH4_300 ? 3 : 6;
   9559  1.1  mrg 	}
   9560  1.1  mrg       /* Likewise, the most timing critical input for an sfuncs call
   9561  1.1  mrg 	 is the function address.  However, sfuncs typically start
   9562  1.1  mrg 	 using their arguments pretty quickly.
   9563  1.1  mrg 	 Assume a four cycle delay for SH4 before they are needed.
   9564  1.1  mrg 	 Cached ST40-300 calls are quicker, so assume only a one
   9565  1.1  mrg 	 cycle delay there.
   9566  1.1  mrg 	 ??? Maybe we should encode the delays till input registers
   9567  1.1  mrg 	 are needed by sfuncs into the sfunc call insn.  */
   9568  1.1  mrg       /* All sfunc calls are parallels with at least four components.
   9569  1.1  mrg 	 Exploit this to avoid unnecessary calls to sfunc_uses_reg.  */
   9570  1.1  mrg       else if (GET_CODE (PATTERN (insn)) == PARALLEL
   9571  1.1  mrg 	       && XVECLEN (PATTERN (insn), 0) >= 4
   9572  1.1  mrg 	       && (reg = sfunc_uses_reg (insn)))
   9573  1.1  mrg 	{
   9574  1.1  mrg 	  if (! reg_set_p (reg, dep_insn))
   9575  1.1  mrg 	    cost -= TARGET_SH4_300 ? 1 : 4;
   9576  1.1  mrg 	}
   9577  1.1  mrg       if (TARGET_HARD_SH4 && !TARGET_SH4_300)
   9578  1.1  mrg 	{
   9579  1.1  mrg 	  attr_type dep_type = get_attr_type (dep_insn);
   9580  1.1  mrg 	  attr_type type;
   9581  1.1  mrg 	  if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
   9582  1.1  mrg 	    cost--;
   9583  1.1  mrg 	  else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
   9584  1.1  mrg 		   && (type = get_attr_type (insn)) != TYPE_CALL
   9585  1.1  mrg 		   && type != TYPE_SFUNC)
   9586  1.1  mrg 	    cost--;
   9587  1.1  mrg 	  /* When the preceding instruction loads the shift amount of
   9588  1.1  mrg 	     the following SHAD/SHLD, the latency of the load is increased
   9589  1.1  mrg 	     by 1 cycle.  */
   9590  1.1  mrg 	  if (get_attr_type (insn) == TYPE_DYN_SHIFT
   9591  1.1  mrg 	      && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
   9592  1.1  mrg 	      && reg_overlap_mentioned_p (SET_DEST (dep_set),
   9593  1.1  mrg 					  XEXP (SET_SRC (single_set (insn)),
   9594  1.1  mrg 						1)))
   9595  1.1  mrg 	    cost++;
   9596  1.1  mrg 	  /* When an LS group instruction with a latency of less than
   9597  1.1  mrg 	     3 cycles is followed by a double-precision floating-point
   9598  1.1  mrg 	     instruction, FIPR, or FTRV, the latency of the first
   9599  1.1  mrg 	     instruction is increased to 3 cycles.  */
   9600  1.1  mrg 	  else if (cost < 3
   9601  1.1  mrg 		   && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
   9602  1.1  mrg 		   && get_attr_dfp_comp (insn) == DFP_COMP_YES)
   9603  1.1  mrg 	    cost = 3;
   9604  1.1  mrg 	  /* The lsw register of a double-precision computation is ready one
   9605  1.1  mrg 	     cycle earlier.  */
   9606  1.1  mrg 	  else if (reload_completed
   9607  1.1  mrg 		   && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
   9608  1.1  mrg 		   && (use_pat = single_set (insn))
   9609  1.1  mrg 		   && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
   9610  1.1  mrg 				      SET_SRC (use_pat)))
   9611  1.1  mrg 	    cost -= 1;
   9612  1.1  mrg 
   9613  1.1  mrg 	  if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
   9614  1.1  mrg 	      && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
   9615  1.1  mrg 	    cost -= 1;
   9616  1.1  mrg 	}
   9617  1.1  mrg       else if (TARGET_SH4_300)
   9618  1.1  mrg 	{
   9619  1.1  mrg 	  /* Stores need their input register two cycles later.  */
   9620  1.1  mrg 	  attr_type type;
   9621  1.1  mrg 	  if (dep_set && cost >= 1
   9622  1.1  mrg 	      && ((type = get_attr_type (insn)) == TYPE_STORE
   9623  1.1  mrg 		  || type == TYPE_PSTORE
   9624  1.1  mrg 		  || type == TYPE_FSTORE || type == TYPE_MAC_MEM))
   9625  1.1  mrg 	    {
   9626  1.1  mrg 	      rtx set = single_set (insn);
   9627  1.1  mrg 
   9628  1.1  mrg 	      if (!reg_mentioned_p (SET_SRC (set), XEXP (SET_DEST (set), 0))
   9629  1.1  mrg 		  && rtx_equal_p (SET_SRC (set), SET_DEST (dep_set)))
   9630  1.1  mrg 		{
   9631  1.1  mrg 		  cost -= 2;
   9632  1.1  mrg 		  /* But don't reduce the cost below 1 if the address depends
   9633  1.1  mrg 		     on a side effect of dep_insn.  */
   9634  1.1  mrg 		  if (cost < 1
   9635  1.1  mrg 		      && modified_in_p (XEXP (SET_DEST (set), 0), dep_insn))
   9636  1.1  mrg 		    cost = 1;
   9637  1.1  mrg 		}
   9638  1.1  mrg 	    }
   9639  1.1  mrg 	}
   9640  1.1  mrg     }
   9641  1.1  mrg   /* An anti-dependence penalty of two applies if the first insn is a double
   9642  1.1  mrg      precision fadd / fsub / fmul.  */
   9643  1.1  mrg   else if (!TARGET_SH4_300
   9644  1.1  mrg 	   && dep_type == REG_DEP_ANTI
   9645  1.1  mrg 	   && recog_memoized (dep_insn) >= 0
   9646  1.1  mrg 	   && (get_attr_type (dep_insn) == TYPE_DFP_ARITH
   9647  1.1  mrg 	       || get_attr_type (dep_insn) == TYPE_DFP_MUL)
   9648  1.1  mrg 	   /* A lot of alleged anti-flow dependences are fake,
   9649  1.1  mrg 	      so check this one is real.  */
   9650  1.1  mrg 	   && flow_dependent_p (dep_insn, insn))
   9651  1.1  mrg     cost = 2;
   9652  1.1  mrg 
   9653  1.1  mrg   return cost;
   9654  1.1  mrg }
   9655  1.1  mrg 
   9656  1.1  mrg /* Check if INSN is flow-dependent on DEP_INSN.  Can also be used to check
   9657  1.1  mrg    if DEP_INSN is anti-flow dependent on INSN.  */
   9658  1.1  mrg static bool
   9659  1.1  mrg flow_dependent_p (rtx_insn *insn, rtx_insn *dep_insn)
   9660  1.1  mrg {
   9661  1.1  mrg   rtx tmp = PATTERN (insn);
   9662  1.1  mrg 
   9663  1.1  mrg   note_stores (dep_insn, flow_dependent_p_1, &tmp);
   9664  1.1  mrg   return tmp == NULL_RTX;
   9665  1.1  mrg }
   9666  1.1  mrg 
   9667  1.1  mrg /* A helper function for flow_dependent_p called through note_stores.  */
   9668  1.1  mrg static void
   9669  1.1  mrg flow_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
   9670  1.1  mrg {
   9671  1.1  mrg   rtx * pinsn = (rtx *) data;
   9672  1.1  mrg 
   9673  1.1  mrg   if (*pinsn && reg_referenced_p (x, *pinsn))
   9674  1.1  mrg     *pinsn = NULL_RTX;
   9675  1.1  mrg }
   9676  1.1  mrg 
   9677  1.1  mrg /* For use by sh_allocate_initial_value.  Note that sh.md contains some
   9678  1.1  mrg    'special function' patterns (type sfunc) that clobber pr, but that
   9679  1.1  mrg    do not look like function calls to leaf_function_p.  Hence we must
   9680  1.1  mrg    do this extra check.  */
   9681  1.1  mrg static int
   9682  1.1  mrg sh_pr_n_sets (void)
   9683  1.1  mrg {
   9684  1.1  mrg   return DF_REG_DEF_COUNT (PR_REG);
   9685  1.1  mrg }
   9686  1.1  mrg 
   9687  1.1  mrg /* Return where to allocate pseudo for a given hard register initial
   9688  1.1  mrg    value.  */
   9689  1.1  mrg static rtx
   9690  1.1  mrg sh_allocate_initial_value (rtx hard_reg)
   9691  1.1  mrg {
   9692  1.1  mrg   if (REGNO (hard_reg) == PR_REG)
   9693  1.1  mrg     {
   9694  1.1  mrg       if (crtl->is_leaf && ! sh_pr_n_sets ())
   9695  1.1  mrg 	return hard_reg;
   9696  1.1  mrg       else
   9697  1.1  mrg 	return gen_frame_mem (Pmode, return_address_pointer_rtx);
   9698  1.1  mrg     }
   9699  1.1  mrg 
   9700  1.1  mrg   return NULL_RTX;
   9701  1.1  mrg }
   9702  1.1  mrg 
   9703  1.1  mrg /* This function returns "2" to indicate dual issue for the SH4
   9704  1.1  mrg    processor.  To be used by the DFA pipeline description.  */
   9705  1.1  mrg static int
   9706  1.1  mrg sh_issue_rate (void)
   9707  1.1  mrg {
   9708  1.1  mrg   if (TARGET_SUPERSCALAR)
   9709  1.1  mrg     return 2;
   9710  1.1  mrg   else
   9711  1.1  mrg     return 1;
   9712  1.1  mrg }
   9713  1.1  mrg 
   9714  1.1  mrg /* Functions for ready queue reordering for sched1.  */
   9715  1.1  mrg 
   9716  1.1  mrg /* Get weight for mode for a set x.  */
   9717  1.1  mrg static short
   9718  1.1  mrg find_set_regmode_weight (rtx x, machine_mode mode)
   9719  1.1  mrg {
   9720  1.1  mrg   if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
   9721  1.1  mrg     return 1;
   9722  1.1  mrg   if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
   9723  1.1  mrg     {
   9724  1.1  mrg       if (REG_P (SET_DEST (x)))
   9725  1.1  mrg 	{
   9726  1.1  mrg 	  if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
   9727  1.1  mrg 	    return 1;
   9728  1.1  mrg 	  else
   9729  1.1  mrg 	    return 0;
   9730  1.1  mrg 	}
   9731  1.1  mrg       return 1;
   9732  1.1  mrg     }
   9733  1.1  mrg   return 0;
   9734  1.1  mrg }
   9735  1.1  mrg 
   9736  1.1  mrg /* Get regmode weight for insn.  */
   9737  1.1  mrg static short
   9738  1.1  mrg find_insn_regmode_weight (rtx insn, machine_mode mode)
   9739  1.1  mrg {
   9740  1.1  mrg   /* Increment weight for each register born here.  */
   9741  1.1  mrg   rtx x = PATTERN (insn);
   9742  1.1  mrg   short reg_weight = find_set_regmode_weight (x, mode);
   9743  1.1  mrg   if (GET_CODE (x) == PARALLEL)
   9744  1.1  mrg     {
   9745  1.1  mrg       int j;
   9746  1.1  mrg       for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
   9747  1.1  mrg 	{
   9748  1.1  mrg 	  x = XVECEXP (PATTERN (insn), 0, j);
   9749  1.1  mrg 	  reg_weight += find_set_regmode_weight (x, mode);
   9750  1.1  mrg 	}
   9751  1.1  mrg     }
   9752  1.1  mrg   /* Decrement weight for each register that dies here.  */
   9753  1.1  mrg   for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
   9754  1.1  mrg     {
   9755  1.1  mrg       if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
   9756  1.1  mrg 	{
   9757  1.1  mrg 	  rtx note = XEXP (x, 0);
   9758  1.1  mrg 	  if (REG_P (note) && GET_MODE (note) == mode)
   9759  1.1  mrg 	    reg_weight--;
   9760  1.1  mrg 	}
   9761  1.1  mrg     }
   9762  1.1  mrg   return reg_weight;
   9763  1.1  mrg }
   9764  1.1  mrg 
   9765  1.1  mrg /* Calculate regmode weights for all insns of a basic block.  */
   9766  1.1  mrg static void
   9767  1.1  mrg find_regmode_weight (basic_block b, machine_mode mode)
   9768  1.1  mrg {
   9769  1.1  mrg   rtx_insn *insn, *next_tail, *head, *tail;
   9770  1.1  mrg 
   9771  1.1  mrg   get_ebb_head_tail (b, b, &head, &tail);
   9772  1.1  mrg   next_tail = NEXT_INSN (tail);
   9773  1.1  mrg 
   9774  1.1  mrg   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
   9775  1.1  mrg     {
   9776  1.1  mrg       /* Handle register life information.  */
   9777  1.1  mrg       if (!INSN_P (insn))
   9778  1.1  mrg 	continue;
   9779  1.1  mrg 
   9780  1.1  mrg       if (mode == SFmode)
   9781  1.1  mrg 	INSN_REGMODE_WEIGHT (insn, mode) =
   9782  1.1  mrg 	  find_insn_regmode_weight (insn, mode)
   9783  1.1  mrg 	  + 2 * find_insn_regmode_weight (insn, DFmode);
   9784  1.1  mrg       else if (mode == SImode)
   9785  1.1  mrg 	INSN_REGMODE_WEIGHT (insn, mode) =
   9786  1.1  mrg 	  find_insn_regmode_weight (insn, mode)
   9787  1.1  mrg 	  + 2 * find_insn_regmode_weight (insn, DImode);
   9788  1.1  mrg     }
   9789  1.1  mrg }
   9790  1.1  mrg 
   9791  1.1  mrg /* Comparison function for ready queue sorting.  */
   9792  1.1  mrg static int
   9793  1.1  mrg rank_for_reorder (const void *x, const void *y)
   9794  1.1  mrg {
   9795  1.1  mrg   rtx_insn *tmp = *(rtx_insn * const *) y;
   9796  1.1  mrg   rtx_insn *tmp2 = *(rtx_insn * const *) x;
   9797  1.1  mrg 
   9798  1.1  mrg   /* The insn in a schedule group should be issued the first.  */
   9799  1.1  mrg   if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
   9800  1.1  mrg     return SCHED_GROUP_P (tmp2) ? 1 : -1;
   9801  1.1  mrg 
   9802  1.1  mrg   /* If insns are equally good, sort by INSN_LUID (original insn order), This
   9803  1.1  mrg      minimizes instruction movement, thus minimizing sched's effect on
   9804  1.1  mrg      register pressure.  */
   9805  1.1  mrg   return INSN_LUID (tmp) - INSN_LUID (tmp2);
   9806  1.1  mrg }
   9807  1.1  mrg 
   9808  1.1  mrg /* Resort the array A in which only element at index N may be out of order.  */
   9809  1.1  mrg static void
   9810  1.1  mrg swap_reorder (rtx_insn **a, int n)
   9811  1.1  mrg {
   9812  1.1  mrg   rtx_insn *insn = a[n - 1];
   9813  1.1  mrg   int i = n - 2;
   9814  1.1  mrg 
   9815  1.1  mrg   while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
   9816  1.1  mrg     {
   9817  1.1  mrg       a[i + 1] = a[i];
   9818  1.1  mrg       i -= 1;
   9819  1.1  mrg     }
   9820  1.1  mrg   a[i + 1] = insn;
   9821  1.1  mrg }
   9822  1.1  mrg 
   9823  1.1  mrg /* Sort the ready list by ascending priority.  */
   9824  1.1  mrg static void
   9825  1.1  mrg ready_reorder (rtx_insn **ready, int nready)
   9826  1.1  mrg {
   9827  1.1  mrg   if (nready == 2)
   9828  1.1  mrg     swap_reorder (ready, nready);
   9829  1.1  mrg   else if (nready > 2)
   9830  1.1  mrg      qsort (ready, nready, sizeof (rtx_insn *), rank_for_reorder);
   9831  1.1  mrg }
   9832  1.1  mrg 
   9833  1.1  mrg /* Count life regions of r0 for a block.  */
   9834  1.1  mrg static int
   9835  1.1  mrg find_r0_life_regions (basic_block b)
   9836  1.1  mrg {
   9837  1.1  mrg   bool live;
   9838  1.1  mrg   int set;
   9839  1.1  mrg   int death = 0;
   9840  1.1  mrg 
   9841  1.1  mrg   if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
   9842  1.1  mrg     {
   9843  1.1  mrg       set = 1;
   9844  1.1  mrg       live = true;
   9845  1.1  mrg     }
   9846  1.1  mrg   else
   9847  1.1  mrg     {
   9848  1.1  mrg       set = 0;
   9849  1.1  mrg       live = false;
   9850  1.1  mrg     }
   9851  1.1  mrg 
   9852  1.1  mrg   rtx_insn* insn = BB_HEAD (b);
   9853  1.1  mrg   rtx_insn* end = BB_END (b);
   9854  1.1  mrg   rtx r0_reg = gen_rtx_REG (SImode, R0_REG);
   9855  1.1  mrg   while (1)
   9856  1.1  mrg     {
   9857  1.1  mrg       if (INSN_P (insn))
   9858  1.1  mrg 	{
   9859  1.1  mrg 	  if (find_regno_note (insn, REG_DEAD, R0_REG))
   9860  1.1  mrg 	    {
   9861  1.1  mrg 	      death++;
   9862  1.1  mrg 	      live = false;
   9863  1.1  mrg 	    }
   9864  1.1  mrg 
   9865  1.1  mrg 	  rtx pset;
   9866  1.1  mrg 	  if (!live
   9867  1.1  mrg 	      && (pset = single_set (insn))
   9868  1.1  mrg 	      && reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
   9869  1.1  mrg 	      && !find_regno_note (insn, REG_UNUSED, R0_REG))
   9870  1.1  mrg 	    {
   9871  1.1  mrg 	      set++;
   9872  1.1  mrg 	      live = true;
   9873  1.1  mrg 	    }
   9874  1.1  mrg 	}
   9875  1.1  mrg       if (insn == end)
   9876  1.1  mrg 	break;
   9877  1.1  mrg       insn = NEXT_INSN (insn);
   9878  1.1  mrg     }
   9879  1.1  mrg   return set - death;
   9880  1.1  mrg }
   9881  1.1  mrg 
   9882  1.1  mrg /* Calculate regmode weights for all insns of all basic block.  */
   9883  1.1  mrg static void
   9884  1.1  mrg sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
   9885  1.1  mrg 		   int verbose ATTRIBUTE_UNUSED,
   9886  1.1  mrg 		   int old_max_uid)
   9887  1.1  mrg {
   9888  1.1  mrg   basic_block b;
   9889  1.1  mrg 
   9890  1.1  mrg   regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
   9891  1.1  mrg   regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
   9892  1.1  mrg   r0_life_regions = 0;
   9893  1.1  mrg 
   9894  1.1  mrg   FOR_EACH_BB_REVERSE_FN (b, cfun)
   9895  1.1  mrg   {
   9896  1.1  mrg     find_regmode_weight (b, SImode);
   9897  1.1  mrg     find_regmode_weight (b, SFmode);
   9898  1.1  mrg     if (!reload_completed)
   9899  1.1  mrg       r0_life_regions += find_r0_life_regions (b);
   9900  1.1  mrg   }
   9901  1.1  mrg 
   9902  1.1  mrg   CURR_REGMODE_PRESSURE (SImode) = 0;
   9903  1.1  mrg   CURR_REGMODE_PRESSURE (SFmode) = 0;
   9904  1.1  mrg }
   9905  1.1  mrg 
   9906  1.1  mrg /* Cleanup.  */
   9907  1.1  mrg static void
   9908  1.1  mrg sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
   9909  1.1  mrg 		     int verbose ATTRIBUTE_UNUSED)
   9910  1.1  mrg {
   9911  1.1  mrg   if (regmode_weight[0])
   9912  1.1  mrg     {
   9913  1.1  mrg       free (regmode_weight[0]);
   9914  1.1  mrg       regmode_weight[0] = NULL;
   9915  1.1  mrg     }
   9916  1.1  mrg   if (regmode_weight[1])
   9917  1.1  mrg     {
   9918  1.1  mrg       free (regmode_weight[1]);
   9919  1.1  mrg       regmode_weight[1] = NULL;
   9920  1.1  mrg     }
   9921  1.1  mrg }
   9922  1.1  mrg 
   9923  1.1  mrg /* Cache the can_issue_more so that we can return it from reorder2. Also,
   9924  1.1  mrg    keep count of register pressures on SImode and SFmode. */
   9925  1.1  mrg static int
   9926  1.1  mrg sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
   9927  1.1  mrg 		   int sched_verbose ATTRIBUTE_UNUSED,
   9928  1.1  mrg 		   rtx_insn *insn,
   9929  1.1  mrg 		   int can_issue_more)
   9930  1.1  mrg {
   9931  1.1  mrg   if (GET_CODE (PATTERN (insn)) != USE
   9932  1.1  mrg       && GET_CODE (PATTERN (insn)) != CLOBBER)
   9933  1.1  mrg     cached_can_issue_more = can_issue_more - 1;
   9934  1.1  mrg   else
   9935  1.1  mrg     cached_can_issue_more = can_issue_more;
   9936  1.1  mrg 
   9937  1.1  mrg   if (reload_completed)
   9938  1.1  mrg     return cached_can_issue_more;
   9939  1.1  mrg 
   9940  1.1  mrg   CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
   9941  1.1  mrg   CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
   9942  1.1  mrg 
   9943  1.1  mrg   return cached_can_issue_more;
   9944  1.1  mrg }
   9945  1.1  mrg 
   9946  1.1  mrg static void
   9947  1.1  mrg sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
   9948  1.1  mrg 	    int verbose ATTRIBUTE_UNUSED,
   9949  1.1  mrg 	    int veclen ATTRIBUTE_UNUSED)
   9950  1.1  mrg {
   9951  1.1  mrg   CURR_REGMODE_PRESSURE (SImode) = 0;
   9952  1.1  mrg   CURR_REGMODE_PRESSURE (SFmode) = 0;
   9953  1.1  mrg }
   9954  1.1  mrg 
   9955  1.1  mrg /* Some magic numbers.  */
   9956  1.1  mrg /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
   9957  1.1  mrg    functions that already have high pressure on r0. */
   9958  1.1  mrg #define R0_MAX_LIFE_REGIONS 2
   9959  1.1  mrg /* Register Pressure thresholds for SImode and SFmode registers.  */
   9960  1.1  mrg #define SIMODE_MAX_WEIGHT 5
   9961  1.1  mrg #define SFMODE_MAX_WEIGHT 10
   9962  1.1  mrg 
   9963  1.1  mrg /* Return true if the pressure is high for MODE.  */
   9964  1.1  mrg static bool
   9965  1.1  mrg high_pressure (machine_mode mode)
   9966  1.1  mrg {
   9967  1.1  mrg   /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
   9968  1.1  mrg      functions that already have high pressure on r0. */
   9969  1.1  mrg    if (r0_life_regions >= R0_MAX_LIFE_REGIONS)
   9970  1.1  mrg      return true;
   9971  1.1  mrg 
   9972  1.1  mrg   if (mode == SFmode)
   9973  1.1  mrg     return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
   9974  1.1  mrg   else
   9975  1.1  mrg     return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
   9976  1.1  mrg }
   9977  1.1  mrg 
   9978  1.1  mrg /* Reorder ready queue if register pressure is high.  */
   9979  1.1  mrg static int
   9980  1.1  mrg sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
   9981  1.1  mrg 	    int sched_verbose ATTRIBUTE_UNUSED,
   9982  1.1  mrg 	    rtx_insn **ready,
   9983  1.1  mrg 	    int *n_readyp,
   9984  1.1  mrg 	    int clock_var ATTRIBUTE_UNUSED)
   9985  1.1  mrg {
   9986  1.1  mrg   if (reload_completed)
   9987  1.1  mrg     return sh_issue_rate ();
   9988  1.1  mrg 
   9989  1.1  mrg   if (high_pressure (SFmode) || high_pressure (SImode))
   9990  1.1  mrg     {
   9991  1.1  mrg       ready_reorder (ready, *n_readyp);
   9992  1.1  mrg     }
   9993  1.1  mrg 
   9994  1.1  mrg   return sh_issue_rate ();
   9995  1.1  mrg }
   9996  1.1  mrg 
   9997  1.1  mrg /* Skip cycles if the current register pressure is high.  */
   9998  1.1  mrg static int
   9999  1.1  mrg sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
   10000  1.1  mrg 	     int sched_verbose ATTRIBUTE_UNUSED,
   10001  1.1  mrg 	     rtx_insn **ready ATTRIBUTE_UNUSED,
   10002  1.1  mrg 	     int *n_readyp ATTRIBUTE_UNUSED,
   10003  1.1  mrg 	     int clock_var ATTRIBUTE_UNUSED)
   10004  1.1  mrg {
   10005  1.1  mrg   if (reload_completed)
   10006  1.1  mrg     return cached_can_issue_more;
   10007  1.1  mrg 
   10008  1.1  mrg   if (high_pressure(SFmode) || high_pressure (SImode))
   10009  1.1  mrg     skip_cycles = 1;
   10010  1.1  mrg 
   10011  1.1  mrg   return cached_can_issue_more;
   10012  1.1  mrg }
   10013  1.1  mrg 
   10014  1.1  mrg /* Skip cycles without sorting the ready queue. This will move insn from
   10015  1.1  mrg    Q->R. If this is the last cycle we are skipping; allow sorting of ready
   10016  1.1  mrg    queue by sh_reorder.  */
   10017  1.1  mrg 
   10018  1.1  mrg /* Generally, skipping these many cycles are sufficient for all insns to move
   10019  1.1  mrg    from Q -> R.  */
   10020  1.1  mrg #define MAX_SKIPS 8
   10021  1.1  mrg 
   10022  1.1  mrg static int
   10023  1.1  mrg sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
   10024  1.1  mrg 		  int sched_verbose ATTRIBUTE_UNUSED,
   10025  1.1  mrg 		  rtx_insn *insn ATTRIBUTE_UNUSED,
   10026  1.1  mrg 		  int last_clock_var,
   10027  1.1  mrg 		  int clock_var,
   10028  1.1  mrg 		  int *sort_p)
   10029  1.1  mrg {
   10030  1.1  mrg   if (reload_completed)
   10031  1.1  mrg     return 0;
   10032  1.1  mrg 
   10033  1.1  mrg   if (skip_cycles)
   10034  1.1  mrg     {
   10035  1.1  mrg       if ((clock_var - last_clock_var) < MAX_SKIPS)
   10036  1.1  mrg 	{
   10037  1.1  mrg 	  *sort_p = 0;
   10038  1.1  mrg 	  return 1;
   10039  1.1  mrg 	}
   10040  1.1  mrg       /* If this is the last cycle we are skipping, allow reordering of R.  */
   10041  1.1  mrg       if ((clock_var - last_clock_var) == MAX_SKIPS)
   10042  1.1  mrg 	{
   10043  1.1  mrg 	  *sort_p = 1;
   10044  1.1  mrg 	  return 1;
   10045  1.1  mrg 	}
   10046  1.1  mrg     }
   10047  1.1  mrg 
   10048  1.1  mrg   skip_cycles = 0;
   10049  1.1  mrg 
   10050  1.1  mrg   return 0;
   10051  1.1  mrg }
   10052  1.1  mrg 
   10053  1.1  mrg static bool
   10054  1.1  mrg sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED)
   10055  1.1  mrg {
   10056  1.1  mrg   return TARGET_HITACHI || sh_attr_renesas_p (record_type);
   10057  1.1  mrg }
   10058  1.1  mrg 
   10059  1.1  mrg /*
   10061  1.1  mrg    On the SH1..SH4, the trampoline looks like
   10062  1.1  mrg    2 0002 D202     	   	mov.l	l2,r2
   10063  1.1  mrg    1 0000 D301     		mov.l	l1,r3
   10064  1.1  mrg    3 0004 422B     		jmp	@r2
   10065  1.1  mrg    4 0006 0009     		nop
   10066  1.1  mrg    5 0008 00000000 	l1:  	.long   area
   10067  1.1  mrg    6 000c 00000000 	l2:	.long   function
   10068  1.1  mrg 
   10069  1.1  mrg    FDPIC needs a form that includes a function descriptor and
   10070  1.1  mrg    code to load the GOT register:
   10071  1.1  mrg    0 0000 00000000		.long	l0
   10072  1.1  mrg    1 0004 00000000		.long	gotval
   10073  1.1  mrg    2 0008 D302    	l0:	mov.l	l1,r3
   10074  1.1  mrg    3 000a D203    		mov.l	l2,r2
   10075  1.1  mrg    4 000c 6122    		mov.l	@r2,r1
   10076  1.1  mrg    5 000e 5C21    		mov.l	@(4,r2),r12
   10077  1.1  mrg    6 0010 412B    		jmp	@r1
   10078  1.1  mrg    7 0012 0009    		nop
   10079  1.1  mrg    8 0014 00000000	l1:	.long	area
   10080  1.1  mrg    9 0018 00000000	l2:	.long	function
   10081  1.1  mrg 
   10082  1.1  mrg    SH5 (compact) uses r1 instead of r3 for the static chain.  */
   10083  1.1  mrg 
   10084  1.1  mrg /* Emit insns to store a value at memory address + offset.  */
   10085  1.1  mrg static void
   10086  1.1  mrg sh_emit_storesi (rtx addr, HOST_WIDE_INT offset, rtx value)
   10087  1.1  mrg {
   10088  1.1  mrg   gcc_assert ((offset & 3) == 0);
   10089  1.1  mrg   emit_move_insn (offset == 0
   10090  1.1  mrg 		  ? change_address (addr, SImode, NULL_RTX)
   10091  1.1  mrg 		  : adjust_address (addr, SImode, offset), value);
   10092  1.1  mrg }
   10093  1.1  mrg 
   10094  1.1  mrg /* Emit insns to store w0 at addr + offset and w1 at addr + offset + 2.  */
   10095  1.1  mrg static void
   10096  1.1  mrg sh_emit_storehi (rtx addr, HOST_WIDE_INT offset, uint16_t w0, uint16_t w1)
   10097  1.1  mrg {
   10098  1.1  mrg   sh_emit_storesi (addr, offset, gen_int_mode (TARGET_LITTLE_ENDIAN
   10099  1.1  mrg 					       ? (w0 | (w1 << 16))
   10100  1.1  mrg 					       : (w1 | (w0 << 16)), SImode));
   10101  1.1  mrg }
   10102  1.1  mrg 
   10103  1.1  mrg /* Emit RTL insns to initialize the variable parts of a trampoline.
   10104  1.1  mrg    FNADDR is an RTX for the address of the function's pure code.
   10105  1.1  mrg    CXT is an RTX for the static chain value for the function.  */
   10106  1.1  mrg static void
   10107  1.1  mrg sh_trampoline_init (rtx tramp_mem, tree fndecl, rtx cxt)
   10108  1.1  mrg {
   10109  1.1  mrg   rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
   10110  1.1  mrg   rtx tramp = force_reg (Pmode, XEXP (tramp_mem, 0));
   10111  1.1  mrg 
   10112  1.1  mrg   if (TARGET_FDPIC)
   10113  1.1  mrg     {
   10114  1.1  mrg       rtx a = force_reg (Pmode, plus_constant (Pmode, XEXP (tramp_mem, 0), 8));
   10115  1.1  mrg 
   10116  1.1  mrg       sh_emit_storesi (tramp_mem, 0, a);
   10117  1.1  mrg       sh_emit_storesi (tramp_mem, 4, sh_get_fdpic_reg_initial_val ());
   10118  1.1  mrg 
   10119  1.1  mrg       sh_emit_storehi (tramp_mem,  8, 0xd302, 0xd203);
   10120  1.1  mrg       sh_emit_storehi (tramp_mem, 12, 0x6122, 0x5c21);
   10121  1.1  mrg       sh_emit_storehi (tramp_mem, 16, 0x412b, 0x0009);
   10122  1.1  mrg 
   10123  1.1  mrg       sh_emit_storesi (tramp_mem, 20, cxt);
   10124  1.1  mrg       sh_emit_storesi (tramp_mem, 24, fnaddr);
   10125  1.1  mrg     }
   10126  1.1  mrg   else
   10127  1.1  mrg     {
   10128  1.1  mrg       sh_emit_storehi (tramp_mem, 0, 0xd202, 0xd301);
   10129  1.1  mrg       sh_emit_storehi (tramp_mem, 4, 0x422b, 0x0009);
   10130  1.1  mrg 
   10131  1.1  mrg       sh_emit_storesi (tramp_mem,  8, cxt);
   10132  1.1  mrg       sh_emit_storesi (tramp_mem, 12, fnaddr);
   10133  1.1  mrg     }
   10134  1.1  mrg   if (TARGET_HARD_SH4)
   10135  1.1  mrg     {
   10136  1.1  mrg       if (!TARGET_INLINE_IC_INVALIDATE
   10137  1.1  mrg 	  || (!(TARGET_SH4A || TARGET_SH4_300) && TARGET_USERMODE))
   10138  1.1  mrg 	emit_library_call (function_symbol (NULL, "__ic_invalidate",
   10139  1.1  mrg 					    FUNCTION_ORDINARY).sym,
   10140  1.1  mrg 			   LCT_NORMAL, VOIDmode, tramp, SImode);
   10141  1.1  mrg       else
   10142  1.1  mrg 	emit_insn (gen_ic_invalidate_line (tramp));
   10143  1.1  mrg     }
   10144  1.1  mrg }
   10145  1.1  mrg 
   10146  1.1  mrg /* On SH5, trampolines are SHmedia code, so add 1 to the address.  */
   10147  1.1  mrg static rtx
   10148  1.1  mrg sh_trampoline_adjust_address (rtx tramp)
   10149  1.1  mrg {
   10150  1.1  mrg   return tramp;
   10151  1.1  mrg }
   10152  1.1  mrg 
   10153  1.1  mrg /* If PIC, we cannot make sibling calls to global functions
   10154  1.1  mrg    because the PLT requires r12 to be live.  */
   10155  1.1  mrg static bool
   10156  1.1  mrg sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
   10157  1.1  mrg {
   10158  1.1  mrg   return (1
   10159  1.1  mrg 	  && ! sh_cfun_interrupt_handler_p ()
   10160  1.1  mrg 	  && (! flag_pic || TARGET_FDPIC
   10161  1.1  mrg 	      || (decl && ! (TREE_PUBLIC (decl) || DECL_WEAK (decl)))
   10162  1.1  mrg 	      || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
   10163  1.1  mrg }
   10164  1.1  mrg 
   10165  1.1  mrg /* Expand to appropriate sym*_label2reg for SYM and SIBCALL_P.  */
   10166  1.1  mrg void
   10167  1.1  mrg sh_expand_sym_label2reg (rtx reg, rtx sym, rtx lab, bool sibcall_p)
   10168  1.1  mrg {
   10169  1.1  mrg   const_tree decl = SYMBOL_REF_DECL (sym);
   10170  1.1  mrg   bool is_weak = (decl && DECL_P (decl) && DECL_WEAK (decl));
   10171  1.1  mrg 
   10172  1.1  mrg   if (!is_weak && SYMBOL_REF_LOCAL_P (sym))
   10173  1.1  mrg     emit_insn (gen_sym_label2reg (reg, sym, lab));
   10174  1.1  mrg   else if (sibcall_p && SYMBOL_REF_LOCAL_P (sym))
   10175  1.1  mrg     emit_insn (gen_symPCREL_label2reg (reg, sym, lab));
   10176  1.1  mrg   else
   10177  1.1  mrg     emit_insn (gen_symPLT_label2reg (reg, sym, lab));
   10178  1.1  mrg }
   10179  1.1  mrg 
   10180  1.1  mrg /* Machine specific built-in functions.  */
   10182  1.1  mrg 
   10183  1.1  mrg struct builtin_description
   10184  1.1  mrg {
   10185  1.1  mrg   bool (* const is_enabled) (void);
   10186  1.1  mrg   const enum insn_code icode;
   10187  1.1  mrg   const char *const name;
   10188  1.1  mrg   int signature;
   10189  1.1  mrg   tree fndecl;
   10190  1.1  mrg };
   10191  1.1  mrg 
   10192  1.1  mrg /* This function can be used if there are any built-ins that are not for
   10193  1.1  mrg    SHmedia.  It's commented out to avoid the defined-but-unused warning.  */
   10194  1.1  mrg static bool
   10195  1.1  mrg sh1_builtin_p (void)
   10196  1.1  mrg {
   10197  1.1  mrg   return TARGET_SH1;
   10198  1.1  mrg }
   10199  1.1  mrg 
   10200  1.1  mrg /* describe number and signedness of arguments; arg[0] == result
   10201  1.1  mrg    (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
   10202  1.1  mrg /* 9: 64-bit pointer, 10: 32-bit pointer */
   10203  1.1  mrg static const char signature_args[][4] =
   10204  1.1  mrg {
   10205  1.1  mrg #define SH_BLTIN_V2SI2 0
   10206  1.1  mrg   { 4, 4 },
   10207  1.1  mrg #define SH_BLTIN_V4HI2 1
   10208  1.1  mrg   { 4, 4 },
   10209  1.1  mrg #define SH_BLTIN_V2SI3 2
   10210  1.1  mrg   { 4, 4, 4 },
   10211  1.1  mrg #define SH_BLTIN_V4HI3 3
   10212  1.1  mrg   { 4, 4, 4 },
   10213  1.1  mrg #define SH_BLTIN_V8QI3 4
   10214  1.1  mrg   { 4, 4, 4 },
   10215  1.1  mrg #define SH_BLTIN_MAC_HISI 5
   10216  1.1  mrg   { 1, 4, 4, 1 },
   10217  1.1  mrg #define SH_BLTIN_SH_HI 6
   10218  1.1  mrg   { 4, 4, 1 },
   10219  1.1  mrg #define SH_BLTIN_SH_SI 7
   10220  1.1  mrg   { 4, 4, 1 },
   10221  1.1  mrg #define SH_BLTIN_V4HI2V2SI 8
   10222  1.1  mrg   { 4, 4, 4 },
   10223  1.1  mrg #define SH_BLTIN_V4HI2V8QI 9
   10224  1.1  mrg   { 4, 4, 4 },
   10225  1.1  mrg #define SH_BLTIN_SISF 10
   10226  1.1  mrg   { 4, 2 },
   10227  1.1  mrg #define SH_BLTIN_LDUA_L 11
   10228  1.1  mrg   { 2, 10 },
   10229  1.1  mrg #define SH_BLTIN_LDUA_Q 12
   10230  1.1  mrg   { 1, 10 },
   10231  1.1  mrg #define SH_BLTIN_STUA_L 13
   10232  1.1  mrg   { 0, 10, 2 },
   10233  1.1  mrg #define SH_BLTIN_STUA_Q 14
   10234  1.1  mrg   { 0, 10, 1 },
   10235  1.1  mrg #define SH_BLTIN_LDUA_L64 15
   10236  1.1  mrg   { 2, 9 },
   10237  1.1  mrg #define SH_BLTIN_LDUA_Q64 16
   10238  1.1  mrg   { 1, 9 },
   10239  1.1  mrg #define SH_BLTIN_STUA_L64 17
   10240  1.1  mrg   { 0, 9, 2 },
   10241  1.1  mrg #define SH_BLTIN_STUA_Q64 18
   10242  1.1  mrg   { 0, 9, 1 },
   10243  1.1  mrg #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
   10244  1.1  mrg #define SH_BLTIN_2 19
   10245  1.1  mrg #define SH_BLTIN_SU 19
   10246  1.1  mrg   { 1, 2 },
   10247  1.1  mrg #define SH_BLTIN_3 20
   10248  1.1  mrg #define SH_BLTIN_SUS 20
   10249  1.1  mrg   { 2, 2, 1 },
   10250  1.1  mrg #define SH_BLTIN_PSSV 21
   10251  1.1  mrg   { 0, 8, 2, 2 },
   10252  1.1  mrg #define SH_BLTIN_XXUU 22
   10253  1.1  mrg #define SH_BLTIN_UUUU 22
   10254  1.1  mrg   { 1, 1, 1, 1 },
   10255  1.1  mrg #define SH_BLTIN_PV 23
   10256  1.1  mrg   { 0, 8 },
   10257  1.1  mrg #define SH_BLTIN_VP 24
   10258  1.1  mrg   { 8, 0 },
   10259  1.1  mrg #define SH_BLTIN_UV 25
   10260  1.1  mrg   { 1, 0 },
   10261  1.1  mrg #define SH_BLTIN_VU 26
   10262  1.1  mrg   { 0, 1 },
   10263  1.1  mrg };
   10264  1.1  mrg /* mcmv: operands considered unsigned.  */
   10265  1.1  mrg /* mmulsum_wq, msad_ubq: result considered unsigned long long.  */
   10266  1.1  mrg /* mperm: control value considered unsigned int.  */
   10267  1.1  mrg /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int.  */
   10268  1.1  mrg /* mshards_q: returns signed short.  */
   10269  1.1  mrg /* nsb: takes long long arg, returns unsigned char.  */
   10270  1.1  mrg static struct builtin_description bdesc[] =
   10271  1.1  mrg {
   10272  1.1  mrg   { sh1_builtin_p,
   10273  1.1  mrg     CODE_FOR_sts_fpscr, "__builtin_sh_get_fpscr", SH_BLTIN_UV, 0 },
   10274  1.1  mrg   { sh1_builtin_p,
   10275  1.1  mrg     CODE_FOR_set_fpscr, "__builtin_sh_set_fpscr", SH_BLTIN_VU, 0 },
   10276  1.1  mrg };
   10277  1.1  mrg 
   10278  1.1  mrg static tree sh_builtin_get_fpscr;
   10279  1.1  mrg static tree sh_builtin_set_fpscr;
   10280  1.1  mrg 
   10281  1.1  mrg static void
   10282  1.1  mrg sh_init_builtins (void)
   10283  1.1  mrg {
   10284  1.1  mrg   tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
   10285  1.1  mrg   memset (shared, 0, sizeof shared);
   10286  1.1  mrg 
   10287  1.1  mrg   for (unsigned int di = 0; di < ARRAY_SIZE (bdesc); ++di)
   10288  1.1  mrg     {
   10289  1.1  mrg       builtin_description* d = &bdesc[di];
   10290  1.1  mrg 
   10291  1.1  mrg       if (!d->is_enabled ())
   10292  1.1  mrg 	continue;
   10293  1.1  mrg 
   10294  1.1  mrg       tree type, arg_type = NULL_TREE;
   10295  1.1  mrg       int signature = d->signature;
   10296  1.1  mrg 
   10297  1.1  mrg       if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
   10298  1.1  mrg 	type = shared[signature];
   10299  1.1  mrg       else
   10300  1.1  mrg 	{
   10301  1.1  mrg 	  int has_result = signature_args[signature][0] != 0;
   10302  1.1  mrg 	  tree args[3];
   10303  1.1  mrg 
   10304  1.1  mrg 	  if (! TARGET_FPU_ANY
   10305  1.1  mrg 	      && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
   10306  1.1  mrg 	    continue;
   10307  1.1  mrg 	  for (unsigned int i = 0; i < ARRAY_SIZE (args); i++)
   10308  1.1  mrg 	    args[i] = NULL_TREE;
   10309  1.1  mrg 	  for (int i = 3; ; i--)
   10310  1.1  mrg 	    {
   10311  1.1  mrg 	      int arg = signature_args[signature][i];
   10312  1.1  mrg 	      int opno = i - 1 + has_result;
   10313  1.1  mrg 
   10314  1.1  mrg 	      if (arg & 8)
   10315  1.1  mrg 		arg_type = ptr_type_node;
   10316  1.1  mrg 	      else if (arg)
   10317  1.1  mrg 		arg_type = (*lang_hooks.types.type_for_mode)
   10318  1.1  mrg 		  (insn_data[d->icode].operand[opno].mode, (arg & 1));
   10319  1.1  mrg 	      else if (i)
   10320  1.1  mrg 		continue;
   10321  1.1  mrg 	      else
   10322  1.1  mrg 		arg_type = void_type_node;
   10323  1.1  mrg 	      if (i == 0)
   10324  1.1  mrg 		break;
   10325  1.1  mrg 	      args[i-1] = arg_type;
   10326  1.1  mrg 	    }
   10327  1.1  mrg 	  type = build_function_type_list (arg_type, args[0], args[1],
   10328  1.1  mrg 					   args[2], NULL_TREE);
   10329  1.1  mrg 	  if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
   10330  1.1  mrg 	    shared[signature] = type;
   10331  1.1  mrg 	}
   10332  1.1  mrg       d->fndecl =
   10333  1.1  mrg 	add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
   10334  1.1  mrg 			      NULL, NULL_TREE);
   10335  1.1  mrg       /* Recode {sts,set}_fpscr decls for sh_atomic_assign_expand_fenv.  */
   10336  1.1  mrg       if (d->icode == CODE_FOR_sts_fpscr)
   10337  1.1  mrg 	sh_builtin_get_fpscr = d->fndecl;
   10338  1.1  mrg       else if (d->icode == CODE_FOR_set_fpscr)
   10339  1.1  mrg 	sh_builtin_set_fpscr = d->fndecl;
   10340  1.1  mrg     }
   10341  1.1  mrg }
   10342  1.1  mrg 
   10343  1.1  mrg /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV.  */
   10344  1.1  mrg 
   10345  1.1  mrg static void
   10346  1.1  mrg sh_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
   10347  1.1  mrg {
   10348  1.1  mrg   const unsigned SH_FE_INVALID = 64;
   10349  1.1  mrg   const unsigned SH_FE_DIVBYZERO = 32;
   10350  1.1  mrg   const unsigned SH_FE_OVERFLOW = 16;
   10351  1.1  mrg   const unsigned SH_FE_UNDERFLOW = 8;
   10352  1.1  mrg   const unsigned SH_FE_INEXACT = 4;
   10353  1.1  mrg   const unsigned HOST_WIDE_INT SH_FE_ALL_EXCEPT = (SH_FE_INVALID
   10354  1.1  mrg 						   | SH_FE_DIVBYZERO
   10355  1.1  mrg 						   | SH_FE_OVERFLOW
   10356  1.1  mrg 						   | SH_FE_UNDERFLOW
   10357  1.1  mrg 						   | SH_FE_INEXACT);
   10358  1.1  mrg   const unsigned HOST_WIDE_INT SH_FE_EXCEPT_SHIFT = 5;
   10359  1.1  mrg   tree fenv_var, mask, ld_fenv, masked_fenv;
   10360  1.1  mrg   tree new_fenv_var, reload_fenv, restore_fnenv;
   10361  1.1  mrg   tree update_call, atomic_feraiseexcept, hold_fnclex;
   10362  1.1  mrg 
   10363  1.1  mrg   if (! TARGET_FPU_ANY)
   10364  1.1  mrg     return;
   10365  1.1  mrg 
   10366  1.1  mrg   /* Generate the equivalent of :
   10367  1.1  mrg        unsigned int fenv_var;
   10368  1.1  mrg        fenv_var = __builtin_sh_get_fpscr ();
   10369  1.1  mrg 
   10370  1.1  mrg        unsigned int masked_fenv;
   10371  1.1  mrg        masked_fenv = fenv_var & mask;
   10372  1.1  mrg 
   10373  1.1  mrg        __builtin_sh_set_fpscr (masked_fenv);  */
   10374  1.1  mrg 
   10375  1.1  mrg   fenv_var = create_tmp_var_raw (unsigned_type_node);
   10376  1.1  mrg   mask = build_int_cst (unsigned_type_node,
   10377  1.1  mrg 			~((SH_FE_ALL_EXCEPT << SH_FE_EXCEPT_SHIFT)
   10378  1.1  mrg 			  | SH_FE_ALL_EXCEPT));
   10379  1.1  mrg   ld_fenv = build2 (MODIFY_EXPR, unsigned_type_node,
   10380  1.1  mrg 		    fenv_var, build_call_expr (sh_builtin_get_fpscr, 0));
   10381  1.1  mrg   masked_fenv = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var, mask);
   10382  1.1  mrg   hold_fnclex = build_call_expr (sh_builtin_set_fpscr, 1, masked_fenv);
   10383  1.1  mrg   fenv_var = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
   10384  1.1  mrg 		     build2 (COMPOUND_EXPR, void_type_node, masked_fenv,
   10385  1.1  mrg 			     ld_fenv),
   10386  1.1  mrg 		     NULL_TREE, NULL_TREE);
   10387  1.1  mrg   *hold = build2 (COMPOUND_EXPR, void_type_node, fenv_var, hold_fnclex);
   10388  1.1  mrg 
   10389  1.1  mrg   /* Store the value of masked_fenv to clear the exceptions:
   10390  1.1  mrg      __builtin_sh_set_fpscr (masked_fenv);  */
   10391  1.1  mrg 
   10392  1.1  mrg   *clear = build_call_expr (sh_builtin_set_fpscr, 1, masked_fenv);
   10393  1.1  mrg 
   10394  1.1  mrg   /* Generate the equivalent of :
   10395  1.1  mrg        unsigned int new_fenv_var;
   10396  1.1  mrg        new_fenv_var = __builtin_sh_get_fpscr ();
   10397  1.1  mrg 
   10398  1.1  mrg        __builtin_sh_set_fpscr (fenv_var);
   10399  1.1  mrg 
   10400  1.1  mrg        __atomic_feraiseexcept (new_fenv_var);  */
   10401  1.1  mrg 
   10402  1.1  mrg   new_fenv_var = create_tmp_var_raw (unsigned_type_node);
   10403  1.1  mrg   reload_fenv = build2 (MODIFY_EXPR, unsigned_type_node, new_fenv_var,
   10404  1.1  mrg 			build_call_expr (sh_builtin_get_fpscr, 0));
   10405  1.1  mrg   restore_fnenv = build_call_expr (sh_builtin_set_fpscr, 1, fenv_var);
   10406  1.1  mrg   atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
   10407  1.1  mrg   update_call = build_call_expr (atomic_feraiseexcept, 1,
   10408  1.1  mrg 				 fold_convert (integer_type_node,
   10409  1.1  mrg 					       new_fenv_var));
   10410  1.1  mrg   *update = build2 (COMPOUND_EXPR, void_type_node,
   10411  1.1  mrg 		    build2 (COMPOUND_EXPR, void_type_node,
   10412  1.1  mrg 			    reload_fenv, restore_fnenv), update_call);
   10413  1.1  mrg }
   10414  1.1  mrg 
   10415  1.1  mrg /* Implements target hook vector_mode_supported_p.  */
   10416  1.1  mrg bool
   10417  1.1  mrg sh_vector_mode_supported_p (machine_mode mode ATTRIBUTE_UNUSED)
   10418  1.1  mrg {
   10419  1.1  mrg   return false;
   10420  1.1  mrg }
   10421  1.1  mrg 
   10422  1.1  mrg bool
   10423  1.1  mrg sh_frame_pointer_required (void)
   10424  1.1  mrg {
   10425  1.1  mrg /* If needed override this in other tm.h files to cope with various OS
   10426  1.1  mrg    lossage requiring a frame pointer.  */
   10427  1.1  mrg   if (SUBTARGET_FRAME_POINTER_REQUIRED)
   10428  1.1  mrg     return true;
   10429  1.1  mrg 
   10430  1.1  mrg   if (crtl->profile)
   10431  1.1  mrg     return true;
   10432  1.1  mrg 
   10433  1.1  mrg   return false;
   10434  1.1  mrg }
   10435  1.1  mrg 
   10436  1.1  mrg /* Implements target hook dwarf_calling_convention.  Return an enum
   10437  1.1  mrg    of dwarf_calling_convention.  */
   10438  1.1  mrg int
   10439  1.1  mrg sh_dwarf_calling_convention (const_tree func)
   10440  1.1  mrg {
   10441  1.1  mrg   if (sh_attr_renesas_p (func))
   10442  1.1  mrg     return DW_CC_GNU_renesas_sh;
   10443  1.1  mrg 
   10444  1.1  mrg   return DW_CC_normal;
   10445  1.1  mrg }
   10446  1.1  mrg 
   10447  1.1  mrg /* Returns the sh builtin decl for CODE.  */
   10448  1.1  mrg static tree
   10449  1.1  mrg sh_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
   10450  1.1  mrg {
   10451  1.1  mrg   if (code >= ARRAY_SIZE (bdesc))
   10452  1.1  mrg     return error_mark_node;
   10453  1.1  mrg 
   10454  1.1  mrg   if (!bdesc[code].is_enabled ())
   10455  1.1  mrg     return error_mark_node;
   10456  1.1  mrg 
   10457  1.1  mrg   return bdesc[code].fndecl;
   10458  1.1  mrg }
   10459  1.1  mrg 
   10460  1.1  mrg /* Expand an expression EXP that calls a built-in function,
   10461  1.1  mrg    with result going to TARGET if that's convenient
   10462  1.1  mrg    (and in mode MODE if that's convenient).
   10463  1.1  mrg    SUBTARGET may be used as the target for computing one of EXP's operands.
   10464  1.1  mrg    IGNORE is nonzero if the value is to be ignored.  */
   10465  1.1  mrg static rtx
   10466  1.1  mrg sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
   10467  1.1  mrg 		   machine_mode mode ATTRIBUTE_UNUSED, int ignore)
   10468  1.1  mrg {
   10469  1.1  mrg   tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
   10470  1.1  mrg   unsigned int fcode = DECL_MD_FUNCTION_CODE (fndecl);
   10471  1.1  mrg   const struct builtin_description *d = &bdesc[fcode];
   10472  1.1  mrg   enum insn_code icode = d->icode;
   10473  1.1  mrg   int signature = d->signature;
   10474  1.1  mrg   int nop = 0;
   10475  1.1  mrg   rtx op[4];
   10476  1.1  mrg 
   10477  1.1  mrg   if (signature_args[signature][0])
   10478  1.1  mrg     {
   10479  1.1  mrg       if (ignore)
   10480  1.1  mrg 	return NULL_RTX;
   10481  1.1  mrg 
   10482  1.1  mrg       machine_mode tmode = insn_data[icode].operand[0].mode;
   10483  1.1  mrg       if (! target || GET_MODE (target) != tmode
   10484  1.1  mrg 	  || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
   10485  1.1  mrg 	target = gen_reg_rtx (tmode);
   10486  1.1  mrg       op[nop++] = target;
   10487  1.1  mrg     }
   10488  1.1  mrg   else
   10489  1.1  mrg     target = NULL_RTX;
   10490  1.1  mrg 
   10491  1.1  mrg   for (int i = 1; i <= 3; i++, nop++)
   10492  1.1  mrg     {
   10493  1.1  mrg       if (! signature_args[signature][i])
   10494  1.1  mrg 	break;
   10495  1.1  mrg       tree arg = CALL_EXPR_ARG (exp, i - 1);
   10496  1.1  mrg       if (arg == error_mark_node)
   10497  1.1  mrg 	return const0_rtx;
   10498  1.1  mrg 
   10499  1.1  mrg       machine_mode opmode;
   10500  1.1  mrg       tree optype;
   10501  1.1  mrg       if (signature_args[signature][i] & 8)
   10502  1.1  mrg 	{
   10503  1.1  mrg 	  opmode = ptr_mode;
   10504  1.1  mrg 	  optype = ptr_type_node;
   10505  1.1  mrg 	}
   10506  1.1  mrg       else
   10507  1.1  mrg 	{
   10508  1.1  mrg 	  opmode = insn_data[icode].operand[nop].mode;
   10509  1.1  mrg 	  optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
   10510  1.1  mrg 	}
   10511  1.1  mrg 
   10512  1.1  mrg       machine_mode argmode = TYPE_MODE (TREE_TYPE (arg));
   10513  1.1  mrg       if (argmode != opmode)
   10514  1.1  mrg 	arg = build1 (NOP_EXPR, optype, arg);
   10515  1.1  mrg       op[nop] = expand_expr (arg, NULL_RTX, opmode, EXPAND_NORMAL);
   10516  1.1  mrg       if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
   10517  1.1  mrg 	op[nop] = copy_to_mode_reg (opmode, op[nop]);
   10518  1.1  mrg     }
   10519  1.1  mrg 
   10520  1.1  mrg   rtx pat = NULL_RTX;
   10521  1.1  mrg 
   10522  1.1  mrg   switch (nop)
   10523  1.1  mrg     {
   10524  1.1  mrg     case 1:
   10525  1.1  mrg       pat = (*insn_data[d->icode].genfun) (op[0]);
   10526  1.1  mrg       break;
   10527  1.1  mrg     case 2:
   10528  1.1  mrg       pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
   10529  1.1  mrg       break;
   10530  1.1  mrg     case 3:
   10531  1.1  mrg       pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
   10532  1.1  mrg       break;
   10533  1.1  mrg     case 4:
   10534  1.1  mrg       pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
   10535  1.1  mrg       break;
   10536  1.1  mrg     default:
   10537  1.1  mrg       gcc_unreachable ();
   10538  1.1  mrg     }
   10539  1.1  mrg   if (! pat)
   10540  1.1  mrg     return NULL_RTX;
   10541  1.1  mrg   emit_insn (pat);
   10542  1.1  mrg   return target;
   10543  1.1  mrg }
   10544  1.1  mrg 
   10545  1.1  mrg /* Implement TARGET_HARD_REGNO_NREGS.  On the SH all but the XD regs are
   10546  1.1  mrg    UNITS_PER_WORD bits wide.  */
   10547  1.1  mrg 
   10548  1.1  mrg static unsigned int
   10549  1.1  mrg sh_hard_regno_nregs (unsigned int regno, machine_mode mode)
   10550  1.1  mrg {
   10551  1.1  mrg   if (XD_REGISTER_P (regno))
   10552  1.1  mrg     return CEIL (GET_MODE_SIZE (mode), 2 * UNITS_PER_WORD);
   10553  1.1  mrg   return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
   10554  1.1  mrg }
   10555  1.1  mrg 
   10556  1.1  mrg /* Implement TARGET_HARD_REGNO_MODE_OK.
   10557  1.1  mrg 
   10558  1.1  mrg    We can allow any mode in any general register.  The special registers
   10559  1.1  mrg    only allow SImode.  Don't allow any mode in the PR.
   10560  1.1  mrg 
   10561  1.1  mrg    We cannot hold DCmode values in the XD registers because alter_reg
   10562  1.1  mrg    handles subregs of them incorrectly.  We could work around this by
   10563  1.1  mrg    spacing the XD registers like the DR registers, but this would require
   10564  1.1  mrg    additional memory in every compilation to hold larger register vectors.
   10565  1.1  mrg    We could hold SFmode / SCmode values in XD registers, but that
   10566  1.1  mrg    would require a tertiary reload when reloading from / to memory,
   10567  1.1  mrg    and a secondary reload to reload from / to general regs; that
   10568  1.1  mrg    seems to be a losing proposition.
   10569  1.1  mrg 
   10570  1.1  mrg    We want to allow TImode FP regs so that when V4SFmode is loaded as TImode,
   10571  1.1  mrg    it won't be ferried through GP registers first.  */
   10572  1.1  mrg static bool
   10573  1.1  mrg sh_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
   10574  1.1  mrg {
   10575  1.1  mrg   if (SPECIAL_REGISTER_P (regno))
   10576  1.1  mrg     return mode == SImode;
   10577  1.1  mrg 
   10578  1.1  mrg   if (regno == FPUL_REG)
   10579  1.1  mrg     return (mode == SImode || mode == SFmode);
   10580  1.1  mrg 
   10581  1.1  mrg   if (FP_REGISTER_P (regno) && mode == SFmode)
   10582  1.1  mrg     return true;
   10583  1.1  mrg 
   10584  1.1  mrg   if (mode == V2SFmode)
   10585  1.1  mrg     {
   10586  1.1  mrg       if (((FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 2 == 0)
   10587  1.1  mrg 	   || GENERAL_REGISTER_P (regno)))
   10588  1.1  mrg 	return true;
   10589  1.1  mrg       else
   10590  1.1  mrg 	return false;
   10591  1.1  mrg     }
   10592  1.1  mrg 
   10593  1.1  mrg   if (mode == V4SFmode)
   10594  1.1  mrg     {
   10595  1.1  mrg       if ((FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 4 == 0)
   10596  1.1  mrg 	  || GENERAL_REGISTER_P (regno))
   10597  1.1  mrg 	return true;
   10598  1.1  mrg       else
   10599  1.1  mrg 	return false;
   10600  1.1  mrg     }
   10601  1.1  mrg 
   10602  1.1  mrg   if (mode == V16SFmode)
   10603  1.1  mrg     return regno == FIRST_XD_REG;
   10604  1.1  mrg 
   10605  1.1  mrg   if (FP_REGISTER_P (regno))
   10606  1.1  mrg     {
   10607  1.1  mrg       if (mode == SFmode
   10608  1.1  mrg 	  || mode == SImode
   10609  1.1  mrg 	  || ((TARGET_SH2E) && mode == SCmode)
   10610  1.1  mrg 	  || (((TARGET_FPU_DOUBLE && mode == DFmode) || mode == DCmode)
   10611  1.1  mrg 	      && ((regno - FIRST_FP_REG) & 1) == 0)
   10612  1.1  mrg 	  || (TARGET_SH4 && mode == TImode
   10613  1.1  mrg 	      && ((regno - FIRST_FP_REG) & 3) == 0))
   10614  1.1  mrg 	return true;
   10615  1.1  mrg       else
   10616  1.1  mrg 	return false;
   10617  1.1  mrg     }
   10618  1.1  mrg 
   10619  1.1  mrg   if (XD_REGISTER_P (regno))
   10620  1.1  mrg     return mode == DFmode;
   10621  1.1  mrg 
   10622  1.1  mrg   if (regno == PR_REG)
   10623  1.1  mrg     return mode == SImode;
   10624  1.1  mrg 
   10625  1.1  mrg   if (regno == FPSCR_REG)
   10626  1.1  mrg     return mode == SImode;
   10627  1.1  mrg 
   10628  1.1  mrg   return true;
   10629  1.1  mrg }
   10630  1.1  mrg 
   10631  1.1  mrg /* Implement TARGET_MODES_TIEABLE_P.
   10632  1.1  mrg 
   10633  1.1  mrg    If TARGET_HARD_REGNO_MODE_OK could produce different values for MODE1
   10634  1.1  mrg    and MODE2, for any hard reg, then this must be false for correct output.
   10635  1.1  mrg    That's the case for xd registers: we don't hold SFmode values in
   10636  1.1  mrg    them, so we can't tie an SFmode pseudos with one in another
   10637  1.1  mrg    floating-point mode.  */
   10638  1.1  mrg 
   10639  1.1  mrg static bool
   10640  1.1  mrg sh_modes_tieable_p (machine_mode mode1, machine_mode mode2)
   10641  1.1  mrg {
   10642  1.1  mrg   return (mode1 == mode2
   10643  1.1  mrg 	  || (GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2)
   10644  1.1  mrg 	      && (mode1 != SFmode && mode2 != SFmode)));
   10645  1.1  mrg }
   10646  1.1  mrg 
   10647  1.1  mrg /* Specify the modes required to caller save a given hard regno.
   10648  1.1  mrg    choose_hard_reg_mode chooses mode based on TARGET_HARD_REGNO_MODE_OK
   10649  1.1  mrg    and returns ?Imode for float regs when sh_hard_regno_mode_ok
   10650  1.1  mrg    permits integer modes on them.  That makes LRA's split process
   10651  1.1  mrg    unhappy.  See PR55212.
   10652  1.1  mrg  */
   10653  1.1  mrg machine_mode
   10654  1.1  mrg sh_hard_regno_caller_save_mode (unsigned int regno, unsigned int nregs,
   10655  1.1  mrg 				machine_mode mode)
   10656  1.1  mrg {
   10657  1.1  mrg   if (FP_REGISTER_P (regno)
   10658  1.1  mrg       && (mode == SFmode
   10659  1.1  mrg 	  || mode == SCmode
   10660  1.1  mrg 	  || ((mode == DFmode || mode == DCmode)
   10661  1.1  mrg 	      && ((regno - FIRST_FP_REG) & 1) == 0)))
   10662  1.1  mrg     return mode;
   10663  1.1  mrg 
   10664  1.1  mrg   return choose_hard_reg_mode (regno, nregs, NULL);
   10665  1.1  mrg }
   10666  1.1  mrg 
   10667  1.1  mrg /* Implement TARGET_CAN_CHANGE_MODE_CLASS.  */
   10668  1.1  mrg static bool
   10669  1.1  mrg sh_can_change_mode_class (machine_mode from, machine_mode to,
   10670  1.1  mrg 			  reg_class_t rclass)
   10671  1.1  mrg {
   10672  1.1  mrg   /* We want to enable the use of SUBREGs as a means to
   10673  1.1  mrg      VEC_SELECT a single element of a vector.  */
   10674  1.1  mrg 
   10675  1.1  mrg   /* This effectively disallows using GENERAL_REGS for SFmode vector subregs.
   10676  1.1  mrg      This can be problematic when SFmode vector subregs need to be accessed
   10677  1.1  mrg      on the stack with displacement addressing, as it happens with -O0.
   10678  1.1  mrg      Thus we disallow the mode change for -O0.  */
   10679  1.1  mrg   if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
   10680  1.1  mrg     return optimize ? !reg_classes_intersect_p (GENERAL_REGS, rclass) : true;
   10681  1.1  mrg 
   10682  1.1  mrg   if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
   10683  1.1  mrg     {
   10684  1.1  mrg       if (TARGET_LITTLE_ENDIAN)
   10685  1.1  mrg 	{
   10686  1.1  mrg 	  if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
   10687  1.1  mrg 	    return !reg_classes_intersect_p (DF_REGS, rclass);
   10688  1.1  mrg 	}
   10689  1.1  mrg       else
   10690  1.1  mrg 	{
   10691  1.1  mrg 	  if (GET_MODE_SIZE (from) < 8)
   10692  1.1  mrg 	    return !reg_classes_intersect_p (DF_REGS, rclass);
   10693  1.1  mrg 	}
   10694  1.1  mrg     }
   10695  1.1  mrg   return true;
   10696  1.1  mrg }
   10697  1.1  mrg 
   10698  1.1  mrg /* Return true if registers in machine mode MODE will likely be
   10699  1.1  mrg    allocated to registers in small register classes.  */
   10700  1.1  mrg bool
   10701  1.1  mrg sh_small_register_classes_for_mode_p (machine_mode mode ATTRIBUTE_UNUSED)
   10702  1.1  mrg {
   10703  1.1  mrg   return true;
   10704  1.1  mrg }
   10705  1.1  mrg 
   10706  1.1  mrg /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
   10707  1.1  mrg    that label is used.  */
   10708  1.1  mrg void
   10709  1.1  mrg sh_mark_label (rtx address, int nuses)
   10710  1.1  mrg {
   10711  1.1  mrg   if (GOTOFF_P (address))
   10712  1.1  mrg     {
   10713  1.1  mrg       /* Extract the label or symbol.  */
   10714  1.1  mrg       address = XEXP (address, 0);
   10715  1.1  mrg       if (GET_CODE (address) == PLUS)
   10716  1.1  mrg 	address = XEXP (address, 0);
   10717  1.1  mrg       address = XVECEXP (address, 0, 0);
   10718  1.1  mrg     }
   10719  1.1  mrg   if (GET_CODE (address) == LABEL_REF
   10720  1.1  mrg       && LABEL_P (XEXP (address, 0)))
   10721  1.1  mrg     LABEL_NUSES (XEXP (address, 0)) += nuses;
   10722  1.1  mrg }
   10723  1.1  mrg 
   10724  1.1  mrg /* Compute extra cost of moving data between one register class
   10725  1.1  mrg    and another.
   10726  1.1  mrg 
   10727  1.1  mrg    If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
   10728  1.1  mrg    uses this information.  Hence, the general register <-> floating point
   10729  1.1  mrg    register information here is not used for SFmode.  */
   10730  1.1  mrg static int
   10731  1.1  mrg sh_register_move_cost (machine_mode mode,
   10732  1.1  mrg 		       reg_class_t srcclass, reg_class_t dstclass)
   10733  1.1  mrg {
   10734  1.1  mrg   if (dstclass == T_REGS || dstclass == PR_REGS)
   10735  1.1  mrg     return 10;
   10736  1.1  mrg 
   10737  1.1  mrg   if (dstclass == MAC_REGS && srcclass == MAC_REGS)
   10738  1.1  mrg     return 4;
   10739  1.1  mrg 
   10740  1.1  mrg   if (mode == SImode && TARGET_FMOVD
   10741  1.1  mrg       && REGCLASS_HAS_FP_REG (srcclass)
   10742  1.1  mrg       && REGCLASS_HAS_FP_REG (dstclass))
   10743  1.1  mrg     return 4;
   10744  1.1  mrg 
   10745  1.1  mrg   if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
   10746  1.1  mrg     return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
   10747  1.1  mrg 
   10748  1.1  mrg   if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
   10749  1.1  mrg       || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
   10750  1.1  mrg     return 9;
   10751  1.1  mrg 
   10752  1.1  mrg   if ((REGCLASS_HAS_FP_REG (dstclass)
   10753  1.1  mrg        && REGCLASS_HAS_GENERAL_REG (srcclass))
   10754  1.1  mrg       || (REGCLASS_HAS_GENERAL_REG (dstclass)
   10755  1.1  mrg 	  && REGCLASS_HAS_FP_REG (srcclass)))
   10756  1.1  mrg     {
   10757  1.1  mrg       /* Discourage trying to use fp regs for a pointer.  This also
   10758  1.1  mrg 	 discourages fp regs with SImode because Pmode is an alias
   10759  1.1  mrg 	 of SImode on this target.  See PR target/48596.  */
   10760  1.1  mrg       int addend = (mode == Pmode) ? 40 : 0;
   10761  1.1  mrg 
   10762  1.1  mrg       return ((TARGET_FMOVD ? 8 : 12) + addend)
   10763  1.1  mrg 	     * ((GET_MODE_SIZE (mode) + 7) / 8U);
   10764  1.1  mrg     }
   10765  1.1  mrg 
   10766  1.1  mrg   if ((dstclass == FPUL_REGS
   10767  1.1  mrg        && REGCLASS_HAS_GENERAL_REG (srcclass))
   10768  1.1  mrg       || (srcclass == FPUL_REGS
   10769  1.1  mrg 	  && REGCLASS_HAS_GENERAL_REG (dstclass)))
   10770  1.1  mrg     return 5;
   10771  1.1  mrg 
   10772  1.1  mrg   if ((dstclass == FPUL_REGS
   10773  1.1  mrg        && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
   10774  1.1  mrg       || (srcclass == FPUL_REGS
   10775  1.1  mrg 	  && (dstclass == PR_REGS || dstclass == MAC_REGS)))
   10776  1.1  mrg     return 7;
   10777  1.1  mrg 
   10778  1.1  mrg   if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
   10779  1.1  mrg       || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
   10780  1.1  mrg   return 4;
   10781  1.1  mrg 
   10782  1.1  mrg   if (TARGET_FMOVD
   10783  1.1  mrg       && ! REGCLASS_HAS_GENERAL_REG (srcclass)
   10784  1.1  mrg       && ! REGCLASS_HAS_GENERAL_REG (dstclass))
   10785  1.1  mrg     return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
   10786  1.1  mrg 
   10787  1.1  mrg   if (((dstclass == FP_REGS || dstclass == DF_REGS)
   10788  1.1  mrg        && (srcclass == PR_REGS))
   10789  1.1  mrg       || ((srcclass == FP_REGS || srcclass == DF_REGS)
   10790  1.1  mrg 	  && (dstclass == PR_REGS)))
   10791  1.1  mrg     return 7;
   10792  1.1  mrg 
   10793  1.1  mrg   return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
   10794  1.1  mrg }
   10795  1.1  mrg 
   10796  1.1  mrg static rtx
   10797  1.1  mrg emit_load_ptr (rtx reg, rtx addr)
   10798  1.1  mrg {
   10799  1.1  mrg   rtx mem = gen_const_mem (ptr_mode, addr);
   10800  1.1  mrg 
   10801  1.1  mrg   if (Pmode != ptr_mode)
   10802  1.1  mrg     mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
   10803  1.1  mrg   return emit_move_insn (reg, mem);
   10804  1.1  mrg }
   10805  1.1  mrg 
   10806  1.1  mrg static void
   10807  1.1  mrg sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
   10808  1.1  mrg 		    HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
   10809  1.1  mrg 		    tree function)
   10810  1.1  mrg {
   10811  1.1  mrg   const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
   10812  1.1  mrg   CUMULATIVE_ARGS cum;
   10813  1.1  mrg   int structure_value_byref = 0;
   10814  1.1  mrg   rtx this_rtx, this_value, sibcall, funexp;
   10815  1.1  mrg   rtx_insn *insns;
   10816  1.1  mrg   tree funtype = TREE_TYPE (function);
   10817  1.1  mrg   int simple_add = CONST_OK_FOR_ADD (delta);
   10818  1.1  mrg   int did_load = 0;
   10819  1.1  mrg   rtx scratch0, scratch1, scratch2;
   10820  1.1  mrg 
   10821  1.1  mrg   reload_completed = 1;
   10822  1.1  mrg   epilogue_completed = 1;
   10823  1.1  mrg   crtl->uses_only_leaf_regs = 1;
   10824  1.1  mrg 
   10825  1.1  mrg   emit_note (NOTE_INSN_PROLOGUE_END);
   10826  1.1  mrg 
   10827  1.1  mrg   /* Find the "this" pointer.  We have such a wide range of ABIs for the
   10828  1.1  mrg      SH that it's best to do this completely machine independently.
   10829  1.1  mrg      "this" is passed as first argument, unless a structure return pointer
   10830  1.1  mrg      comes first, in which case "this" comes second.  */
   10831  1.1  mrg   INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
   10832  1.1  mrg #ifndef PCC_STATIC_STRUCT_RETURN
   10833  1.1  mrg   if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
   10834  1.1  mrg     structure_value_byref = 1;
   10835  1.1  mrg #endif /* not PCC_STATIC_STRUCT_RETURN */
   10836  1.1  mrg   if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
   10837  1.1  mrg     {
   10838  1.1  mrg       tree ptype = build_pointer_type (TREE_TYPE (funtype));
   10839  1.1  mrg 
   10840  1.1  mrg       function_arg_info ptr_arg (ptype, Pmode, /*named=*/true);
   10841  1.1  mrg       sh_function_arg_advance (pack_cumulative_args (&cum), ptr_arg);
   10842  1.1  mrg     }
   10843  1.1  mrg   function_arg_info ptr_arg (ptr_type_node, Pmode, /*named=*/true);
   10844  1.1  mrg   this_rtx = sh_function_arg (pack_cumulative_args (&cum), ptr_arg);
   10845  1.1  mrg 
   10846  1.1  mrg   /* For SHcompact, we only have r0 for a scratch register: r1 is the
   10847  1.1  mrg      static chain pointer (even if you can't have nested virtual functions
   10848  1.1  mrg      right now, someone might implement them sometime), and the rest of the
   10849  1.1  mrg      registers are used for argument passing, are callee-saved, or reserved.  */
   10850  1.1  mrg   /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
   10851  1.1  mrg      -ffixed-reg has been used.  */
   10852  1.1  mrg   if (! call_used_or_fixed_reg_p (0) || fixed_regs[0])
   10853  1.1  mrg     error ("r0 needs to be available as a call-clobbered register");
   10854  1.1  mrg   scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
   10855  1.1  mrg 
   10856  1.1  mrg     {
   10857  1.1  mrg       if (call_used_or_fixed_reg_p (1) && ! fixed_regs[1])
   10858  1.1  mrg 	scratch1 = gen_rtx_REG (ptr_mode, 1);
   10859  1.1  mrg       /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
   10860  1.1  mrg 	 pointing where to return struct values.  */
   10861  1.1  mrg       if (call_used_or_fixed_reg_p (3) && ! fixed_regs[3])
   10862  1.1  mrg 	scratch2 = gen_rtx_REG (Pmode, 3);
   10863  1.1  mrg     }
   10864  1.1  mrg 
   10865  1.1  mrg   this_value = plus_constant (Pmode, this_rtx, delta);
   10866  1.1  mrg   if (vcall_offset
   10867  1.1  mrg       && (simple_add || scratch0 != scratch1)
   10868  1.1  mrg       && strict_memory_address_p (ptr_mode, this_value))
   10869  1.1  mrg     {
   10870  1.1  mrg       emit_load_ptr (scratch0, this_value);
   10871  1.1  mrg       did_load = 1;
   10872  1.1  mrg     }
   10873  1.1  mrg 
   10874  1.1  mrg   if (!delta)
   10875  1.1  mrg     ; /* Do nothing.  */
   10876  1.1  mrg   else if (simple_add)
   10877  1.1  mrg     emit_move_insn (this_rtx, this_value);
   10878  1.1  mrg   else
   10879  1.1  mrg     {
   10880  1.1  mrg       emit_move_insn (scratch1, GEN_INT (delta));
   10881  1.1  mrg       emit_insn (gen_add2_insn (this_rtx, scratch1));
   10882  1.1  mrg     }
   10883  1.1  mrg 
   10884  1.1  mrg   if (vcall_offset)
   10885  1.1  mrg     {
   10886  1.1  mrg       rtx offset_addr;
   10887  1.1  mrg 
   10888  1.1  mrg       if (!did_load)
   10889  1.1  mrg 	emit_load_ptr (scratch0, this_rtx);
   10890  1.1  mrg 
   10891  1.1  mrg       offset_addr = plus_constant (Pmode, scratch0, vcall_offset);
   10892  1.1  mrg       if (strict_memory_address_p (ptr_mode, offset_addr))
   10893  1.1  mrg 	; /* Do nothing.  */
   10894  1.1  mrg       else if (scratch0 != scratch1)
   10895  1.1  mrg 	{
   10896  1.1  mrg 	  /* scratch0 != scratch1, and we have indexed loads.  Get better
   10897  1.1  mrg 	     schedule by loading the offset into r1 and using an indexed
   10898  1.1  mrg 	     load - then the load of r1 can issue before the load from
   10899  1.1  mrg 	     (this_rtx + delta) finishes.  */
   10900  1.1  mrg 	  emit_move_insn (scratch1, GEN_INT (vcall_offset));
   10901  1.1  mrg 	  offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
   10902  1.1  mrg 	}
   10903  1.1  mrg       else if (CONST_OK_FOR_ADD (vcall_offset))
   10904  1.1  mrg 	{
   10905  1.1  mrg 	  emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
   10906  1.1  mrg 	  offset_addr = scratch0;
   10907  1.1  mrg 	}
   10908  1.1  mrg       else
   10909  1.1  mrg 	gcc_unreachable (); /* FIXME */
   10910  1.1  mrg       emit_load_ptr (scratch0, offset_addr);
   10911  1.1  mrg 
   10912  1.1  mrg       if (Pmode != ptr_mode)
   10913  1.1  mrg 	scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
   10914  1.1  mrg       emit_insn (gen_add2_insn (this_rtx, scratch0));
   10915  1.1  mrg     }
   10916  1.1  mrg 
   10917  1.1  mrg   /* Generate a tail call to the target function.  */
   10918  1.1  mrg   if (! TREE_USED (function))
   10919  1.1  mrg     {
   10920  1.1  mrg       assemble_external (function);
   10921  1.1  mrg       TREE_USED (function) = 1;
   10922  1.1  mrg     }
   10923  1.1  mrg   funexp = XEXP (DECL_RTL (function), 0);
   10924  1.1  mrg   /* If the function is overridden, so is the thunk, hence we don't
   10925  1.1  mrg      need GOT addressing even if this is a public symbol.  */
   10926  1.1  mrg #if 0
   10927  1.1  mrg   if (TARGET_SH1 && ! flag_weak)
   10928  1.1  mrg     sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
   10929  1.1  mrg   else
   10930  1.1  mrg #endif
   10931  1.1  mrg   if (TARGET_SH2 && flag_pic)
   10932  1.1  mrg     {
   10933  1.1  mrg       if (TARGET_FDPIC)
   10934  1.1  mrg 	{
   10935  1.1  mrg 	  sibcall = gen_sibcall_pcrel_fdpic (funexp, const0_rtx);
   10936  1.1  mrg 	  XEXP (XVECEXP (sibcall, 0, 3), 0) = scratch2;
   10937  1.1  mrg 	}
   10938  1.1  mrg       else
   10939  1.1  mrg 	{
   10940  1.1  mrg 	  sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
   10941  1.1  mrg 	  XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
   10942  1.1  mrg 	}
   10943  1.1  mrg     }
   10944  1.1  mrg   else
   10945  1.1  mrg     {
   10946  1.1  mrg       emit_move_insn (scratch2, funexp);
   10947  1.1  mrg       funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
   10948  1.1  mrg       sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
   10949  1.1  mrg     }
   10950  1.1  mrg   sibcall = emit_call_insn (sibcall);
   10951  1.1  mrg   SIBLING_CALL_P (sibcall) = 1;
   10952  1.1  mrg   use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this_rtx);
   10953  1.1  mrg   emit_barrier ();
   10954  1.1  mrg 
   10955  1.1  mrg   /* Run just enough of rest_of_compilation to do scheduling and get
   10956  1.1  mrg      the insns emitted.  */
   10957  1.1  mrg 
   10958  1.1  mrg   insns = get_insns ();
   10959  1.1  mrg 
   10960  1.1  mrg   if (optimize > 0)
   10961  1.1  mrg     {
   10962  1.1  mrg       if (! cfun->cfg)
   10963  1.1  mrg 	init_flow (cfun);
   10964  1.1  mrg       split_all_insns_noflow ();
   10965  1.1  mrg     }
   10966  1.1  mrg 
   10967  1.1  mrg   sh_reorg ();
   10968  1.1  mrg   shorten_branches (insns);
   10969  1.1  mrg   assemble_start_function (thunk_fndecl, fnname);
   10970  1.1  mrg   final_start_function (insns, file, 1);
   10971  1.1  mrg   final (insns, file, 1);
   10972  1.1  mrg   final_end_function ();
   10973  1.1  mrg   assemble_end_function (thunk_fndecl, fnname);
   10974  1.1  mrg 
   10975  1.1  mrg   reload_completed = 0;
   10976  1.1  mrg   epilogue_completed = 0;
   10977  1.1  mrg }
   10978  1.1  mrg 
   10979  1.1  mrg /* Return an RTX pair for the address and call site label of a function
   10980  1.1  mrg    NAME of kind KIND, placing the result in TARGET if not NULL.  For
   10981  1.1  mrg    SFUNC_STATIC, if FDPIC, the LAB member of result will be set to
   10982  1.1  mrg    (const_int 0) if jsr should be used, or a label_ref if bsrf should
   10983  1.1  mrg    be used.  For FDPIC, both SFUNC_GOT and SFUNC_STATIC will return the
   10984  1.1  mrg    address of the function itself, not a function descriptor, so they
   10985  1.1  mrg    can only be used with functions not using the FDPIC register that
   10986  1.1  mrg    are known to be called directory without a PLT entry.  */
   10987  1.1  mrg 
   10988  1.1  mrg function_symbol_result
   10989  1.1  mrg function_symbol (rtx target, const char *name, sh_function_kind kind)
   10990  1.1  mrg {
   10991  1.1  mrg   /* If this is not an ordinary function, the name usually comes from a
   10992  1.1  mrg      string literal or an sprintf buffer.  Make sure we use the same
   10993  1.1  mrg      string consistently, so that cse will be able to unify address loads.  */
   10994  1.1  mrg   if (kind != FUNCTION_ORDINARY)
   10995  1.1  mrg     name = IDENTIFIER_POINTER (get_identifier (name));
   10996  1.1  mrg   rtx sym = gen_rtx_SYMBOL_REF (Pmode, name);
   10997  1.1  mrg   rtx lab = const0_rtx;
   10998  1.1  mrg   SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
   10999  1.1  mrg   if (flag_pic)
   11000  1.1  mrg     switch (kind)
   11001  1.1  mrg       {
   11002  1.1  mrg       case FUNCTION_ORDINARY:
   11003  1.1  mrg 	break;
   11004  1.1  mrg       case SFUNC_GOT:
   11005  1.1  mrg 	{
   11006  1.1  mrg 	  rtx reg = target ? target : gen_reg_rtx (Pmode);
   11007  1.1  mrg 
   11008  1.1  mrg 	  emit_insn (gen_symGOT2reg (reg, sym));
   11009  1.1  mrg 	  sym = reg;
   11010  1.1  mrg 	  break;
   11011  1.1  mrg 	}
   11012  1.1  mrg       case SFUNC_STATIC:
   11013  1.1  mrg 	{
   11014  1.1  mrg 	  rtx reg = target ? target : gen_reg_rtx (Pmode);
   11015  1.1  mrg 
   11016  1.1  mrg 	  if (TARGET_FDPIC)
   11017  1.1  mrg 	    {
   11018  1.1  mrg 	      /* We use PC-relative calls, since GOTOFF can only refer
   11019  1.1  mrg 		 to writable data.  This works along with sh_sfunc_call.  */
   11020  1.1  mrg  	      lab = PATTERN (gen_call_site ());
   11021  1.1  mrg 	      emit_insn (gen_sym_label2reg (reg, sym, lab));
   11022  1.1  mrg 	    }
   11023  1.1  mrg 	  else
   11024  1.1  mrg 	    {
   11025  1.1  mrg 	      /* ??? To allow cse to work, we use GOTOFF relocations.
   11026  1.1  mrg 		 we could add combiner patterns to transform this into
   11027  1.1  mrg 		 straight pc-relative calls with sym2PIC / bsrf when
   11028  1.1  mrg 		 label load and function call are still 1:1 and in the
   11029  1.1  mrg 		 same basic block during combine.  */
   11030  1.1  mrg 	      emit_insn (gen_symGOTOFF2reg (reg, sym));
   11031  1.1  mrg 	    }
   11032  1.1  mrg 
   11033  1.1  mrg 	  sym = reg;
   11034  1.1  mrg 	  break;
   11035  1.1  mrg 	}
   11036  1.1  mrg       }
   11037  1.1  mrg   if (target && sym != target)
   11038  1.1  mrg     {
   11039  1.1  mrg       emit_move_insn (target, sym);
   11040  1.1  mrg       return function_symbol_result (target, lab);
   11041  1.1  mrg     }
   11042  1.1  mrg   return function_symbol_result (sym, lab);
   11043  1.1  mrg }
   11044  1.1  mrg 
   11045  1.1  mrg /* Find the number of the first general purpose register in S that
   11046  1.1  mrg    is not set.  */
   11047  1.1  mrg static int
   11048  1.1  mrg scavenge_reg (HARD_REG_SET *s)
   11049  1.1  mrg {
   11050  1.1  mrg   for (int r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
   11051  1.1  mrg     if (TEST_HARD_REG_BIT (*s, r))
   11052  1.1  mrg       return r;
   11053  1.1  mrg   return -1;
   11054  1.1  mrg }
   11055  1.1  mrg 
   11056  1.1  mrg rtx
   11057  1.1  mrg sh_get_pr_initial_val (void)
   11058  1.1  mrg {
   11059  1.1  mrg   /* If we haven't finished rtl generation, there might be a nonlocal label
   11060  1.1  mrg      that we haven't seen yet.
   11061  1.1  mrg      ??? get_hard_reg_initial_val fails if it is called after register
   11062  1.1  mrg      allocation has started, unless it has been called before for the
   11063  1.1  mrg      same register.  And even then, we end in trouble if we didn't use
   11064  1.1  mrg      the register in the same basic block before.  So call
   11065  1.1  mrg      get_hard_reg_initial_val now and wrap it in an unspec if we might
   11066  1.1  mrg      need to replace it.  */
   11067  1.1  mrg   /* ??? We also must do this for TARGET_SH1 in general, because otherwise
   11068  1.1  mrg      combine can put the pseudo returned by get_hard_reg_initial_val into
   11069  1.1  mrg      instructions that need a general purpose registers, which will fail to
   11070  1.1  mrg      be recognized when the pseudo becomes allocated to PR.  */
   11071  1.1  mrg   rtx val = get_hard_reg_initial_val (Pmode, PR_REG);
   11072  1.1  mrg   return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
   11073  1.1  mrg }
   11074  1.1  mrg 
   11075  1.1  mrg bool
   11076  1.1  mrg sh_expand_t_scc (rtx operands[])
   11077  1.1  mrg {
   11078  1.1  mrg   enum rtx_code code = GET_CODE (operands[1]);
   11079  1.1  mrg   rtx target = operands[0];
   11080  1.1  mrg   rtx op0 = operands[2];
   11081  1.1  mrg   rtx op1 = operands[3];
   11082  1.1  mrg   rtx result = target;
   11083  1.1  mrg 
   11084  1.1  mrg   if (!REG_P (op0) || REGNO (op0) != T_REG
   11085  1.1  mrg       || !CONST_INT_P (op1))
   11086  1.1  mrg     return false;
   11087  1.1  mrg   if (!REG_P (result))
   11088  1.1  mrg     result = gen_reg_rtx (SImode);
   11089  1.1  mrg   HOST_WIDE_INT val = INTVAL (op1);
   11090  1.1  mrg   if ((code == EQ && val == 1) || (code == NE && val == 0))
   11091  1.1  mrg     emit_insn (gen_movt (result, get_t_reg_rtx ()));
   11092  1.1  mrg   else if ((code == EQ && val == 0) || (code == NE && val == 1))
   11093  1.1  mrg     emit_insn (gen_movnegt (result, get_t_reg_rtx ()));
   11094  1.1  mrg   else if (code == EQ || code == NE)
   11095  1.1  mrg     emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
   11096  1.1  mrg   else
   11097  1.1  mrg     return false;
   11098  1.1  mrg   if (result != target)
   11099  1.1  mrg     emit_move_insn (target, result);
   11100  1.1  mrg   return true;
   11101  1.1  mrg }
   11102  1.1  mrg 
   11103  1.1  mrg /* INSN is an sfunc; return the rtx that describes the address used.  */
   11104  1.1  mrg static rtx
   11105  1.1  mrg extract_sfunc_addr (rtx insn)
   11106  1.1  mrg {
   11107  1.1  mrg   rtx pattern = PATTERN (insn);
   11108  1.1  mrg   const int len = XVECLEN (pattern, 0);
   11109  1.1  mrg   for (int i = 0; i < len; i++)
   11110  1.1  mrg     {
   11111  1.1  mrg       rtx part = XVECEXP (pattern, 0, i);
   11112  1.1  mrg       if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
   11113  1.1  mrg 	  && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
   11114  1.1  mrg 	return XEXP (part, 0);
   11115  1.1  mrg     }
   11116  1.1  mrg   gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
   11117  1.1  mrg   return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
   11118  1.1  mrg }
   11119  1.1  mrg 
   11120  1.1  mrg /* Verify that the register in use_sfunc_addr still agrees with the address
   11121  1.1  mrg    used in the sfunc.  This prevents fill_slots_from_thread from changing
   11122  1.1  mrg    use_sfunc_addr.
   11123  1.1  mrg    INSN is the use_sfunc_addr instruction, and REG is the register it
   11124  1.1  mrg    guards.  */
   11125  1.1  mrg bool
   11126  1.1  mrg check_use_sfunc_addr (rtx_insn *insn, rtx reg)
   11127  1.1  mrg {
   11128  1.1  mrg   /* Search for the sfunc.  It should really come right after INSN.  */
   11129  1.1  mrg   while ((insn = NEXT_INSN (insn)))
   11130  1.1  mrg     {
   11131  1.1  mrg       if (LABEL_P (insn) || JUMP_P (insn))
   11132  1.1  mrg 	break;
   11133  1.1  mrg       if (! INSN_P (insn))
   11134  1.1  mrg 	continue;
   11135  1.1  mrg 
   11136  1.1  mrg       if (rtx_sequence *seq = dyn_cast<rtx_sequence *> (PATTERN (insn)))
   11137  1.1  mrg 	insn = seq->insn (0);
   11138  1.1  mrg       if (GET_CODE (PATTERN (insn)) != PARALLEL
   11139  1.1  mrg 	  || get_attr_type (insn) != TYPE_SFUNC)
   11140  1.1  mrg 	continue;
   11141  1.1  mrg       return rtx_equal_p (extract_sfunc_addr (insn), reg);
   11142  1.1  mrg     }
   11143  1.1  mrg   gcc_unreachable ();
   11144  1.1  mrg }
   11145  1.1  mrg 
   11146  1.1  mrg /* This function returns a constant rtx that represents 2**15 / pi in
   11147  1.1  mrg    SFmode.  It's used to scale a fixed-point signed 16.16-bit fraction
   11148  1.1  mrg    of a full circle back to an SFmode value, i.e. 0x10000 maps to 2*pi.  */
   11149  1.1  mrg static GTY(()) rtx sh_fsca_sf2int_rtx;
   11150  1.1  mrg 
   11151  1.1  mrg rtx
   11152  1.1  mrg sh_fsca_sf2int (void)
   11153  1.1  mrg {
   11154  1.1  mrg   if (! sh_fsca_sf2int_rtx)
   11155  1.1  mrg     {
   11156  1.1  mrg       REAL_VALUE_TYPE rv;
   11157  1.1  mrg 
   11158  1.1  mrg       real_from_string (&rv, "10430.378350470453");
   11159  1.1  mrg       sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
   11160  1.1  mrg     }
   11161  1.1  mrg 
   11162  1.1  mrg   return sh_fsca_sf2int_rtx;
   11163  1.1  mrg }
   11164  1.1  mrg 
   11165  1.1  mrg /* This function returns a constant rtx that represents pi / 2**15 in
   11166  1.1  mrg    SFmode.  It's used to scale SFmode angles, in radians, to a
   11167  1.1  mrg    fixed-point signed 16.16-bit fraction of a full circle, i.e. 2*pi
   11168  1.1  mrg    maps to 0x10000.  */
   11169  1.1  mrg static GTY(()) rtx sh_fsca_int2sf_rtx;
   11170  1.1  mrg 
   11171  1.1  mrg rtx
   11172  1.1  mrg sh_fsca_int2sf (void)
   11173  1.1  mrg {
   11174  1.1  mrg   if (! sh_fsca_int2sf_rtx)
   11175  1.1  mrg     {
   11176  1.1  mrg       REAL_VALUE_TYPE rv;
   11177  1.1  mrg 
   11178  1.1  mrg       real_from_string (&rv, "9.587379924285257e-5");
   11179  1.1  mrg       sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
   11180  1.1  mrg     }
   11181  1.1  mrg 
   11182  1.1  mrg   return sh_fsca_int2sf_rtx;
   11183  1.1  mrg }
   11184  1.1  mrg 
   11185  1.1  mrg /* Initialize the CUMULATIVE_ARGS structure.  */
   11186  1.1  mrg void
   11187  1.1  mrg sh_init_cumulative_args (CUMULATIVE_ARGS *  pcum,
   11188  1.1  mrg 			 tree		    fntype,
   11189  1.1  mrg 			 rtx		    libname ATTRIBUTE_UNUSED,
   11190  1.1  mrg 			 tree		    fndecl,
   11191  1.1  mrg 			 signed int	    n_named_args,
   11192  1.1  mrg 			 machine_mode  mode)
   11193  1.1  mrg {
   11194  1.1  mrg   pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
   11195  1.1  mrg   pcum->free_single_fp_reg = 0;
   11196  1.1  mrg   pcum->outgoing = n_named_args != -1;
   11197  1.1  mrg 
   11198  1.1  mrg   /* FIXME: Should we check TARGET_HITACHI here ???  */
   11199  1.1  mrg   pcum->renesas_abi = sh_attr_renesas_p (fntype);
   11200  1.1  mrg 
   11201  1.1  mrg   if (fntype)
   11202  1.1  mrg     {
   11203  1.1  mrg       pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
   11204  1.1  mrg 			 && aggregate_value_p (TREE_TYPE (fntype), fndecl));
   11205  1.1  mrg       pcum->prototype_p = prototype_p (fntype);
   11206  1.1  mrg       pcum->arg_count [(int) SH_ARG_INT] = false;
   11207  1.1  mrg     }
   11208  1.1  mrg   else
   11209  1.1  mrg     {
   11210  1.1  mrg       pcum->arg_count [(int) SH_ARG_INT] = 0;
   11211  1.1  mrg       pcum->prototype_p = false;
   11212  1.1  mrg       if (mode != VOIDmode)
   11213  1.1  mrg 	{
   11214  1.1  mrg 	  /* If the default ABI is the Renesas ABI then all library
   11215  1.1  mrg 	     calls must assume that the library will be using the
   11216  1.1  mrg 	     Renesas ABI.  So if the function would return its result
   11217  1.1  mrg 	     in memory then we must force the address of this memory
   11218  1.1  mrg 	     block onto the stack.  Ideally we would like to call
   11219  1.1  mrg 	     targetm.calls.return_in_memory() here but we do not have
   11220  1.1  mrg 	     the TYPE or the FNDECL available so we synthesize the
   11221  1.1  mrg 	     contents of that function as best we can.  */
   11222  1.1  mrg 	  pcum->force_mem =
   11223  1.1  mrg 	    (TARGET_DEFAULT & MASK_HITACHI)
   11224  1.1  mrg 	    && (mode == BLKmode
   11225  1.1  mrg 		|| (GET_MODE_SIZE (mode) > 4
   11226  1.1  mrg 		    && !(mode == DFmode
   11227  1.1  mrg 			 && TARGET_FPU_DOUBLE)));
   11228  1.1  mrg 	}
   11229  1.1  mrg       else
   11230  1.1  mrg 	pcum->force_mem = false;
   11231  1.1  mrg     }
   11232  1.1  mrg }
   11233  1.1  mrg 
   11234  1.1  mrg rtx
   11235  1.1  mrg sh_gen_truncate (machine_mode mode, rtx x, int need_sign_ext)
   11236  1.1  mrg {
   11237  1.1  mrg   enum rtx_code code = TRUNCATE;
   11238  1.1  mrg 
   11239  1.1  mrg   if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
   11240  1.1  mrg     {
   11241  1.1  mrg       rtx inner = XEXP (x, 0);
   11242  1.1  mrg       machine_mode inner_mode = GET_MODE (inner);
   11243  1.1  mrg 
   11244  1.1  mrg       if (inner_mode == mode)
   11245  1.1  mrg 	return inner;
   11246  1.1  mrg       else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
   11247  1.1  mrg 	x = inner;
   11248  1.1  mrg       else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
   11249  1.1  mrg 	       && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
   11250  1.1  mrg 	{
   11251  1.1  mrg 	  code = GET_CODE (x);
   11252  1.1  mrg 	  x = inner;
   11253  1.1  mrg 	}
   11254  1.1  mrg     }
   11255  1.1  mrg   return gen_rtx_fmt_e (code, mode, x);
   11256  1.1  mrg }
   11257  1.1  mrg 
   11258  1.1  mrg /* Load and store depend on the highpart of the address.  However,
   11259  1.1  mrg    set_attr_alternative does not give well-defined results before reload,
   11260  1.1  mrg    so we must look at the rtl ourselves to see if any of the feeding
   11261  1.1  mrg    registers is used in a memref.
   11262  1.1  mrg 
   11263  1.1  mrg    Return true iff INSN contains a MEM.  */
   11264  1.1  mrg bool
   11265  1.1  mrg sh_contains_memref_p (rtx insn)
   11266  1.1  mrg {
   11267  1.1  mrg   subrtx_iterator::array_type array;
   11268  1.1  mrg   FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
   11269  1.1  mrg     if (MEM_P (*iter))
   11270  1.1  mrg       return true;
   11271  1.1  mrg   return false;
   11272  1.1  mrg }
   11273  1.1  mrg 
   11274  1.1  mrg /* Return true iff INSN loads a banked register.  */
   11275  1.1  mrg bool
   11276  1.1  mrg sh_loads_bankedreg_p (rtx insn)
   11277  1.1  mrg {
   11278  1.1  mrg   if (GET_CODE (PATTERN (insn)) == SET)
   11279  1.1  mrg     {
   11280  1.1  mrg       rtx op = SET_DEST (PATTERN(insn));
   11281  1.1  mrg       if (REG_P (op) && BANKED_REGISTER_P (REGNO (op)))
   11282  1.1  mrg 	return true;
   11283  1.1  mrg     }
   11284  1.1  mrg 
   11285  1.1  mrg   return false;
   11286  1.1  mrg }
   11287  1.1  mrg 
   11288  1.1  mrg /* Implement TARGET_PREFERRED_RELOAD_CLASS.  */
   11289  1.1  mrg static reg_class_t
   11290  1.1  mrg sh_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
   11291  1.1  mrg {
   11292  1.1  mrg   return rclass;
   11293  1.1  mrg }
   11294  1.1  mrg 
   11295  1.1  mrg /* Implement TARGET_SECONDARY_RELOAD.  */
   11296  1.1  mrg static reg_class_t
   11297  1.1  mrg sh_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
   11298  1.1  mrg 		     machine_mode mode, secondary_reload_info *sri)
   11299  1.1  mrg {
   11300  1.1  mrg   enum reg_class rclass = (enum reg_class) rclass_i;
   11301  1.1  mrg 
   11302  1.1  mrg   if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == PLUS
   11303  1.1  mrg       && REG_P (XEXP (XEXP (x, 0), 0))
   11304  1.1  mrg       && REGNO (XEXP (XEXP (x, 0), 0)) == GBR_REG)
   11305  1.1  mrg     return rclass == R0_REGS ? NO_REGS : R0_REGS;
   11306  1.1  mrg 
   11307  1.1  mrg   if (MEM_P (x) && REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) == GBR_REG)
   11308  1.1  mrg     return rclass == R0_REGS ? NO_REGS : R0_REGS;
   11309  1.1  mrg 
   11310  1.1  mrg   if (REG_P (x) && REGNO (x) == GBR_REG)
   11311  1.1  mrg     return NO_REGS;
   11312  1.1  mrg 
   11313  1.1  mrg   if (in_p)
   11314  1.1  mrg     {
   11315  1.1  mrg       if (REGCLASS_HAS_FP_REG (rclass)
   11316  1.1  mrg 	  && immediate_operand ((x), mode)
   11317  1.1  mrg 	  && ! ((fp_zero_operand (x) || fp_one_operand (x)) && mode == SFmode))
   11318  1.1  mrg 	switch (mode)
   11319  1.1  mrg 	  {
   11320  1.1  mrg 	  case E_SFmode:
   11321  1.1  mrg 	    sri->icode = CODE_FOR_reload_insf__frn;
   11322  1.1  mrg 	    return NO_REGS;
   11323  1.1  mrg 	  case E_DFmode:
   11324  1.1  mrg 	    sri->icode = CODE_FOR_reload_indf__frn;
   11325  1.1  mrg 	    return NO_REGS;
   11326  1.1  mrg 	  case E_SImode:
   11327  1.1  mrg 	    /* ??? If we knew that we are in the appropriate mode -
   11328  1.1  mrg 	       single precision - we could use a reload pattern directly.  */
   11329  1.1  mrg 	    return FPUL_REGS;
   11330  1.1  mrg 	  default:
   11331  1.1  mrg 	    abort ();
   11332  1.1  mrg 	  }
   11333  1.1  mrg       if (rclass == FPUL_REGS
   11334  1.1  mrg 	  && ((REG_P (x) && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
   11335  1.1  mrg 			     || REGNO (x) == T_REG))
   11336  1.1  mrg 	      || GET_CODE (x) == PLUS))
   11337  1.1  mrg 	return GENERAL_REGS;
   11338  1.1  mrg       if (rclass == FPUL_REGS && immediate_operand (x, mode))
   11339  1.1  mrg 	{
   11340  1.1  mrg 	  if (satisfies_constraint_I08 (x) || fp_zero_operand (x))
   11341  1.1  mrg 	    return GENERAL_REGS;
   11342  1.1  mrg 	  else if (mode == SFmode)
   11343  1.1  mrg 	    return FP_REGS;
   11344  1.1  mrg 	  sri->icode = CODE_FOR_reload_insi__i_fpul;
   11345  1.1  mrg 	  return NO_REGS;
   11346  1.1  mrg 	}
   11347  1.1  mrg       if (rclass == FPSCR_REGS
   11348  1.1  mrg 	  && ((REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
   11349  1.1  mrg 	      || (MEM_P (x) && GET_CODE (XEXP (x, 0)) == PLUS)))
   11350  1.1  mrg         return GENERAL_REGS;
   11351  1.1  mrg     } /* end of input-only processing.  */
   11352  1.1  mrg 
   11353  1.1  mrg   if (((REGCLASS_HAS_FP_REG (rclass)
   11354  1.1  mrg 	&& (REG_P (x)
   11355  1.1  mrg 	    && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
   11356  1.1  mrg 		|| (FP_REGISTER_P (REGNO (x)) && mode == SImode
   11357  1.1  mrg 		    && TARGET_FMOVD))))
   11358  1.1  mrg        || (REGCLASS_HAS_GENERAL_REG (rclass)
   11359  1.1  mrg 	   && REG_P (x)
   11360  1.1  mrg 	   && FP_REGISTER_P (REGNO (x))))
   11361  1.1  mrg       && (mode == SFmode || mode == SImode))
   11362  1.1  mrg     return FPUL_REGS;
   11363  1.1  mrg   if ((rclass == FPUL_REGS
   11364  1.1  mrg        || (REGCLASS_HAS_FP_REG (rclass) && mode == SImode))
   11365  1.1  mrg       && (MEM_P (x)
   11366  1.1  mrg 	  || (REG_P (x)
   11367  1.1  mrg 	      && (REGNO (x) >= FIRST_PSEUDO_REGISTER
   11368  1.1  mrg 		  || REGNO (x) == T_REG
   11369  1.1  mrg 		  || system_reg_operand (x, VOIDmode)))))
   11370  1.1  mrg     {
   11371  1.1  mrg       if (rclass == FPUL_REGS)
   11372  1.1  mrg 	return GENERAL_REGS;
   11373  1.1  mrg       return NO_REGS;  // LRA wants NO_REGS here, it used to be FPUL_REGS;
   11374  1.1  mrg     }
   11375  1.1  mrg 
   11376  1.1  mrg   if ((rclass == MAC_REGS || rclass == PR_REGS)
   11377  1.1  mrg       && REG_P (x) && ! GENERAL_REGISTER_P (REGNO (x))
   11378  1.1  mrg       && rclass != REGNO_REG_CLASS (REGNO (x)))
   11379  1.1  mrg     return GENERAL_REGS;
   11380  1.1  mrg 
   11381  1.1  mrg  /* If here fall back to loading FPUL register through general registers.
   11382  1.1  mrg     This case can happen when movsi_ie insn is picked initially to
   11383  1.1  mrg     load/store the FPUL register from/to another register, and then the
   11384  1.1  mrg     other register is allocated on the stack.  */
   11385  1.1  mrg   if (rclass == FPUL_REGS && true_regnum (x) == -1)
   11386  1.1  mrg     return GENERAL_REGS;
   11387  1.1  mrg 
   11388  1.1  mrg   /* Force mov.b / mov.w displacement addressing insn to use R0 as
   11389  1.1  mrg      the other operand.
   11390  1.1  mrg      On SH2A could also just leave it alone here, which would result in a
   11391  1.1  mrg      4 byte move insn being generated instead.  However, for this to work
   11392  1.1  mrg      the insns must have the appropriate alternatives.  */
   11393  1.1  mrg   if ((mode == QImode || mode == HImode) && rclass != R0_REGS
   11394  1.1  mrg       && satisfies_constraint_Sdd (x)
   11395  1.1  mrg       && sh_disp_addr_displacement (x)
   11396  1.1  mrg 	 <= sh_max_mov_insn_displacement (mode, false))
   11397  1.1  mrg     return R0_REGS;
   11398  1.1  mrg 
   11399  1.1  mrg   /* When reload is trying to address a QImode or HImode subreg on the stack,
   11400  1.1  mrg      force any subreg byte into R0_REGS, as this is going to become a
   11401  1.1  mrg      displacement address.
   11402  1.1  mrg      We could restrict this to SUBREG_BYTE (x) > 0, but if the actual reg
   11403  1.1  mrg      is on the stack, the memref to it might already require a displacement
   11404  1.1  mrg      and that has to be added to the final address.  At this point we don't
   11405  1.1  mrg      know the cumulative displacement so we assume the worst case.  */
   11406  1.1  mrg   if ((mode == QImode || mode == HImode) && rclass != R0_REGS
   11407  1.1  mrg       && GET_CODE (x) == SUBREG && true_regnum (x) == -1)
   11408  1.1  mrg     return R0_REGS;
   11409  1.1  mrg 
   11410  1.1  mrg   return NO_REGS;
   11411  1.1  mrg }
   11412  1.1  mrg 
   11413  1.1  mrg /* Return true if SUBST can't safely replace its equivalent during RA.  */
   11414  1.1  mrg static bool
   11415  1.1  mrg sh_cannot_substitute_mem_equiv_p (rtx)
   11416  1.1  mrg {
   11417  1.1  mrg   /* If SUBST is mem[base+index] or QI/HImode mem[base+disp], the insn
   11418  1.1  mrg      uses R0 and may cause spill failure when R0 is already used.
   11419  1.1  mrg      We have to return true for that case at least.
   11420  1.1  mrg      Moreover SH has strong R0 parity and also have not enough numbers of
   11421  1.1  mrg      the hard registers to make the equiv substitution win in the size
   11422  1.1  mrg      and the speed on average working sets.  The pseudos produced to
   11423  1.1  mrg      hold the equiv values can't get good hard registers for bad cases
   11424  1.1  mrg      and end up memory save/restore insns which make the code worse.  */
   11425  1.1  mrg   return true;
   11426  1.1  mrg }
   11427  1.1  mrg 
   11428  1.1  mrg /* Implement TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT.  */
   11429  1.1  mrg static bool
   11430  1.1  mrg sh_legitimize_address_displacement (rtx *offset1, rtx *offset2,
   11431  1.1  mrg 				    poly_int64 orig_offset,
   11432  1.1  mrg 				    machine_mode mode)
   11433  1.1  mrg {
   11434  1.1  mrg   if ((TARGET_FPU_DOUBLE && mode == DFmode)
   11435  1.1  mrg       || (TARGET_SH2E && mode == SFmode))
   11436  1.1  mrg     return false;
   11437  1.1  mrg 
   11438  1.1  mrg   struct disp_adjust adj = sh_find_mov_disp_adjust (mode, orig_offset);
   11439  1.1  mrg   if (adj.offset_adjust != NULL_RTX && adj.mov_disp != NULL_RTX)
   11440  1.1  mrg     {
   11441  1.1  mrg       *offset1 = adj.offset_adjust;
   11442  1.1  mrg       *offset2 = adj.mov_disp;
   11443  1.1  mrg       return true;
   11444  1.1  mrg     }
   11445  1.1  mrg 
   11446  1.1  mrg   return false;
   11447  1.1  mrg }
   11448  1.1  mrg 
   11449  1.1  mrg /* Return true if movsf insn should be splited with an additional
   11450  1.1  mrg    register.  */
   11451  1.1  mrg bool
   11452  1.1  mrg sh_movsf_ie_ra_split_p (rtx op0, rtx op1, rtx op2)
   11453  1.1  mrg {
   11454  1.1  mrg   /* op0 == op1 */
   11455  1.1  mrg   if (rtx_equal_p (op0, op1))
   11456  1.1  mrg     return true;
   11457  1.1  mrg   /* fy, FQ, reg */
   11458  1.1  mrg   if (GET_CODE (op1) == CONST_DOUBLE
   11459  1.1  mrg       && ! satisfies_constraint_G (op1)
   11460  1.1  mrg       && ! satisfies_constraint_H (op1)
   11461  1.1  mrg       && REG_P (op0)
   11462  1.1  mrg       && REG_P (op2))
   11463  1.1  mrg     return true;
   11464  1.1  mrg   /* f, r, y */
   11465  1.1  mrg   if (REG_P (op0) && FP_REGISTER_P (REGNO (op0))
   11466  1.1  mrg       && REG_P (op1) && GENERAL_REGISTER_P (REGNO (op1))
   11467  1.1  mrg       && REG_P (op2) && (REGNO (op2) == FPUL_REG))
   11468  1.1  mrg     return true;
   11469  1.1  mrg   /* r, f, y */
   11470  1.1  mrg   if (REG_P (op1) && FP_REGISTER_P (REGNO (op1))
   11471  1.1  mrg       && REG_P (op0) && GENERAL_REGISTER_P (REGNO (op0))
   11472  1.1  mrg       && REG_P (op2) && (REGNO (op2) == FPUL_REG))
   11473  1.1  mrg     return true;
   11474  1.1  mrg 
   11475  1.1  mrg   return false;
   11476  1.1  mrg }
   11477  1.1  mrg 
   11478  1.1  mrg static void
   11479  1.1  mrg sh_conditional_register_usage (void)
   11480  1.1  mrg {
   11481  1.1  mrg   for (int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno ++)
   11482  1.1  mrg     if (! VALID_REGISTER_P (regno))
   11483  1.1  mrg       fixed_regs[regno] = 1;
   11484  1.1  mrg   /* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs.  */
   11485  1.1  mrg   if (flag_pic)
   11486  1.1  mrg     fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
   11487  1.1  mrg   if (TARGET_FDPIC)
   11488  1.1  mrg     {
   11489  1.1  mrg       fixed_regs[PIC_REG] = 1;
   11490  1.1  mrg       call_used_regs[PIC_REG] = 1;
   11491  1.1  mrg     }
   11492  1.1  mrg   /* Renesas saves and restores mac registers on call.  */
   11493  1.1  mrg   if (TARGET_HITACHI && ! TARGET_NOMACSAVE)
   11494  1.1  mrg     {
   11495  1.1  mrg       call_used_regs[MACH_REG] = 0;
   11496  1.1  mrg       call_used_regs[MACL_REG] = 0;
   11497  1.1  mrg     }
   11498  1.1  mrg 
   11499  1.1  mrg   for (int regno = FIRST_GENERAL_REG; regno <= LAST_GENERAL_REG; regno++)
   11500  1.1  mrg     if (! fixed_regs[regno] && call_used_regs[regno])
   11501  1.1  mrg       SET_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno);
   11502  1.1  mrg 
   11503  1.1  mrg   call_used_regs[FPSCR_MODES_REG] = 0;
   11504  1.1  mrg   call_used_regs[FPSCR_STAT_REG] = 0;
   11505  1.1  mrg }
   11506  1.1  mrg 
   11507  1.1  mrg /* Implement TARGET_LEGITIMATE_CONSTANT_P
   11508  1.1  mrg 
   11509  1.1  mrg    can_store_by_pieces constructs VOIDmode CONST_DOUBLEs.  */
   11510  1.1  mrg static bool
   11511  1.1  mrg sh_legitimate_constant_p (machine_mode mode, rtx x)
   11512  1.1  mrg {
   11513  1.1  mrg   if (SH_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
   11514  1.1  mrg     {
   11515  1.1  mrg       rtx base, offset;
   11516  1.1  mrg       split_const (x, &base, &offset);
   11517  1.1  mrg 
   11518  1.1  mrg       if (GET_CODE (base) == SYMBOL_REF
   11519  1.1  mrg 	  && !offset_within_block_p (base, INTVAL (offset)))
   11520  1.1  mrg        return false;
   11521  1.1  mrg     }
   11522  1.1  mrg 
   11523  1.1  mrg   if (TARGET_FDPIC
   11524  1.1  mrg       && (SYMBOLIC_CONST_P (x)
   11525  1.1  mrg 	  || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
   11526  1.1  mrg 	      && SYMBOLIC_CONST_P (XEXP (XEXP (x, 0), 0)))))
   11527  1.1  mrg     return false;
   11528  1.1  mrg 
   11529  1.1  mrg   return GET_CODE (x) != CONST_DOUBLE
   11530  1.1  mrg 	 || mode == DFmode || mode == SFmode
   11531  1.1  mrg 	 || mode == DImode || GET_MODE (x) == VOIDmode;
   11532  1.1  mrg }
   11533  1.1  mrg 
   11534  1.1  mrg enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;
   11535  1.1  mrg 
   11536  1.1  mrg static void
   11537  1.1  mrg sh_init_sync_libfuncs (void)
   11538  1.1  mrg {
   11539  1.1  mrg   init_sync_libfuncs (UNITS_PER_WORD);
   11540  1.1  mrg }
   11541  1.1  mrg 
   11542  1.1  mrg /* Return true if it is appropriate to emit `ret' instructions in the
   11543  1.1  mrg    body of a function.  */
   11544  1.1  mrg bool
   11545  1.1  mrg sh_can_use_simple_return_p (void)
   11546  1.1  mrg {
   11547  1.1  mrg   if (! reload_completed || frame_pointer_needed)
   11548  1.1  mrg     return false;
   11549  1.1  mrg 
   11550  1.1  mrg   /* Moving prologue around does't reduce the size.  */
   11551  1.1  mrg   if (optimize_function_for_size_p (cfun))
   11552  1.1  mrg     return false;
   11553  1.1  mrg 
   11554  1.1  mrg   /* Finally, allow for pr save.  */
   11555  1.1  mrg   HARD_REG_SET live_regs_mask;
   11556  1.1  mrg   int d = calc_live_regs (&live_regs_mask);
   11557  1.1  mrg 
   11558  1.1  mrg   if (rounded_frame_size (d) > 4)
   11559  1.1  mrg    return false;
   11560  1.1  mrg 
   11561  1.1  mrg   return true;
   11562  1.1  mrg }
   11563  1.1  mrg 
   11564  1.1  mrg /*------------------------------------------------------------------------------
   11565  1.1  mrg   Address mode optimization support code
   11566  1.1  mrg */
   11567  1.1  mrg 
   11568  1.1  mrg typedef HOST_WIDE_INT disp_t;
   11569  1.1  mrg static const disp_t MIN_DISP = HOST_WIDE_INT_MIN;
   11570  1.1  mrg static const disp_t MAX_DISP = HOST_WIDE_INT_MAX;
   11571  1.1  mrg static const disp_t INVALID_DISP = MAX_DISP;
   11572  1.1  mrg 
   11573  1.1  mrg /* A memory reference which is described by a base register and a
   11574  1.1  mrg    displacement.  */
   11575  1.1  mrg class base_reg_disp
   11576  1.1  mrg {
   11577  1.1  mrg public:
   11578  1.1  mrg   base_reg_disp (rtx br, disp_t d);
   11579  1.1  mrg 
   11580  1.1  mrg   bool is_reg (void) const;
   11581  1.1  mrg   bool is_disp (void) const;
   11582  1.1  mrg   rtx reg (void) const;
   11583  1.1  mrg   disp_t disp (void) const;
   11584  1.1  mrg 
   11585  1.1  mrg private:
   11586  1.1  mrg   rtx reg_;
   11587  1.1  mrg   disp_t disp_;
   11588  1.1  mrg };
   11589  1.1  mrg 
   11590  1.1  mrg inline
   11591  1.1  mrg base_reg_disp::base_reg_disp (rtx br, disp_t d)
   11592  1.1  mrg : reg_ (br), disp_ (d)
   11593  1.1  mrg {
   11594  1.1  mrg }
   11595  1.1  mrg 
   11596  1.1  mrg inline bool
   11597  1.1  mrg base_reg_disp::is_reg (void) const
   11598  1.1  mrg {
   11599  1.1  mrg   return reg_ != NULL_RTX && disp_ != INVALID_DISP;
   11600  1.1  mrg }
   11601  1.1  mrg 
   11602  1.1  mrg inline bool
   11603  1.1  mrg base_reg_disp::is_disp (void) const
   11604  1.1  mrg {
   11605  1.1  mrg   return reg_ == NULL_RTX && disp_ != INVALID_DISP;
   11606  1.1  mrg }
   11607  1.1  mrg 
   11608  1.1  mrg inline rtx
   11609  1.1  mrg base_reg_disp::reg (void) const
   11610  1.1  mrg {
   11611  1.1  mrg   return reg_;
   11612  1.1  mrg }
   11613  1.1  mrg 
   11614  1.1  mrg inline disp_t
   11615  1.1  mrg base_reg_disp::disp (void) const
   11616  1.1  mrg {
   11617  1.1  mrg   return disp_;
   11618  1.1  mrg }
   11619  1.1  mrg 
   11620  1.1  mrg /* Find the base register and calculate the displacement for a given
   11621  1.1  mrg    address rtx 'x'.  */
   11622  1.1  mrg static base_reg_disp
   11623  1.1  mrg sh_find_base_reg_disp (rtx_insn* insn, rtx x, disp_t disp = 0,
   11624  1.1  mrg 		       rtx base_reg = NULL)
   11625  1.1  mrg {
   11626  1.1  mrg   if (REG_P (x))
   11627  1.1  mrg     {
   11628  1.1  mrg       if (REGNO (x) == GBR_REG)
   11629  1.1  mrg 	return base_reg_disp (x, disp);
   11630  1.1  mrg 
   11631  1.1  mrg       /* We've reached a hard-reg.  This is probably the point where
   11632  1.1  mrg 	 function args are copied to pseudos.  Do not go any further and
   11633  1.1  mrg 	 stick to the pseudo.  If the original mem addr was in a hard reg
   11634  1.1  mrg 	 from the beginning, it will become the base reg.  */
   11635  1.1  mrg       if (REGNO (x) < FIRST_PSEUDO_REGISTER)
   11636  1.1  mrg 	return base_reg_disp (base_reg != NULL ? base_reg : x, disp);
   11637  1.1  mrg 
   11638  1.1  mrg       /* Find the def of the reg and trace it.  If there are more than one
   11639  1.1  mrg 	 defs and they are not the same, assume it's not safe to proceed.  */
   11640  1.1  mrg       rtx_insn* last_i = NULL;
   11641  1.1  mrg       rtx last_set = NULL;
   11642  1.1  mrg       for (df_ref d = DF_REG_DEF_CHAIN (REGNO (x)); d != NULL;
   11643  1.1  mrg 	   d = DF_REF_NEXT_REG (d))
   11644  1.1  mrg 	{
   11645  1.1  mrg 	  rtx set = const_cast<rtx> (set_of (x, DF_REF_INSN (d)));
   11646  1.1  mrg 
   11647  1.1  mrg 	  /* Accept multiple defs, as long as they are equal.  */
   11648  1.1  mrg 	  if (last_set == NULL || rtx_equal_p (last_set, set))
   11649  1.1  mrg 	    {
   11650  1.1  mrg 	      last_i = DF_REF_INSN (d);
   11651  1.1  mrg 	      last_set = set;
   11652  1.1  mrg 	    }
   11653  1.1  mrg 	  else
   11654  1.1  mrg 	    {
   11655  1.1  mrg 	      last_i = NULL;
   11656  1.1  mrg 	      last_set = NULL;
   11657  1.1  mrg 	      break;
   11658  1.1  mrg 	    }
   11659  1.1  mrg 	}
   11660  1.1  mrg 
   11661  1.1  mrg       if (last_set != NULL && last_i != NULL)
   11662  1.1  mrg 	return sh_find_base_reg_disp (last_i, XEXP (last_set, 1), disp,
   11663  1.1  mrg 				      XEXP (last_set, 0));
   11664  1.1  mrg 
   11665  1.1  mrg       /* When here, no previous insn was found that sets the reg.
   11666  1.1  mrg 	 The input reg is already the base reg.  */
   11667  1.1  mrg       return base_reg_disp (x, disp);
   11668  1.1  mrg     }
   11669  1.1  mrg 
   11670  1.1  mrg   else if (GET_CODE (x) == PLUS)
   11671  1.1  mrg     {
   11672  1.1  mrg       base_reg_disp left_val = sh_find_base_reg_disp (insn, XEXP (x, 0));
   11673  1.1  mrg       base_reg_disp right_val = sh_find_base_reg_disp (insn, XEXP (x, 1));
   11674  1.1  mrg 
   11675  1.1  mrg       /* Either left or right val must be a reg.
   11676  1.1  mrg 	 We don't handle the case of 'reg + reg' here.  */
   11677  1.1  mrg       if (left_val.is_reg () && right_val.is_disp ())
   11678  1.1  mrg 	return base_reg_disp (left_val.reg (), left_val.disp ()
   11679  1.1  mrg 					       + right_val.disp () + disp);
   11680  1.1  mrg       else if (right_val.is_reg () && left_val.is_disp ())
   11681  1.1  mrg 	return base_reg_disp (right_val.reg (), right_val.disp ()
   11682  1.1  mrg 						+ left_val.disp () + disp);
   11683  1.1  mrg       else
   11684  1.1  mrg 	return base_reg_disp (base_reg, disp);
   11685  1.1  mrg     }
   11686  1.1  mrg 
   11687  1.1  mrg   else if (CONST_INT_P (x))
   11688  1.1  mrg     return base_reg_disp (NULL, disp + INTVAL (x));
   11689  1.1  mrg 
   11690  1.1  mrg   /* Didn't find anything useful.  */
   11691  1.1  mrg   return base_reg_disp (base_reg, disp);
   11692  1.1  mrg }
   11693  1.1  mrg 
   11694  1.1  mrg /* Given an insn and a memory operand, try to find an equivalent GBR
   11695  1.1  mrg    based memory address and return the corresponding new memory address.
   11696  1.1  mrg    Return NULL_RTX if not found.  */
   11697  1.1  mrg rtx
   11698  1.1  mrg sh_find_equiv_gbr_addr (rtx_insn* insn, rtx mem)
   11699  1.1  mrg {
   11700  1.1  mrg   if (!MEM_P (mem) || gbr_address_mem (mem, GET_MODE (mem)))
   11701  1.1  mrg     return NULL_RTX;
   11702  1.1  mrg 
   11703  1.1  mrg   /* Leave post/pre inc/dec or any other side effect addresses alone.  */
   11704  1.1  mrg   if (side_effects_p (XEXP (mem, 0)))
   11705  1.1  mrg     return NULL_RTX;
   11706  1.1  mrg 
   11707  1.1  mrg   /* When not optimizing there might be no dataflow available.  */
   11708  1.1  mrg   if (df == NULL)
   11709  1.1  mrg     return NULL_RTX;
   11710  1.1  mrg 
   11711  1.1  mrg   base_reg_disp gbr_disp = sh_find_base_reg_disp (insn, XEXP (mem, 0));
   11712  1.1  mrg 
   11713  1.1  mrg   if (gbr_disp.is_reg () && REGNO (gbr_disp.reg ()) == GBR_REG)
   11714  1.1  mrg     {
   11715  1.1  mrg       /* If GBR is marked as call clobbered we bail out if we see a call.
   11716  1.1  mrg 	 FIXME: Actually should check if this mem refers to the gbr value
   11717  1.1  mrg 	 before or after the call.  If there is a store_gbr preceeding this
   11718  1.1  mrg 	 mem, it's safe to use GBR for this mem.
   11719  1.1  mrg 
   11720  1.1  mrg 	 If GBR is not marked as call clobbered, but there is some other
   11721  1.1  mrg 	 def than a call, it's probably a load_gbr upon which we also
   11722  1.1  mrg 	 bail out to be on the safe side.
   11723  1.1  mrg 	 FIXME: Should check if we have a use-after-def case, such as
   11724  1.1  mrg 	 the call case above.  */
   11725  1.1  mrg       for (df_ref d = DF_REG_DEF_CHAIN (GBR_REG); d != NULL;
   11726  1.1  mrg 	   d = DF_REF_NEXT_REG (d))
   11727  1.1  mrg 	{
   11728  1.1  mrg 	  if (CALL_P (DF_REF_INSN (d)))
   11729  1.1  mrg 	    {
   11730  1.1  mrg 	      if (TEST_HARD_REG_BIT (regs_invalidated_by_call, GBR_REG))
   11731  1.1  mrg 		return NULL_RTX;
   11732  1.1  mrg 	      else
   11733  1.1  mrg 		continue;
   11734  1.1  mrg 	    }
   11735  1.1  mrg 	  else
   11736  1.1  mrg 	    return NULL_RTX;
   11737  1.1  mrg 	}
   11738  1.1  mrg 
   11739  1.1  mrg       rtx disp = GEN_INT (gbr_disp.disp ());
   11740  1.1  mrg       if (gbr_displacement (disp, GET_MODE (mem)))
   11741  1.1  mrg 	return gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, GBR_REG), disp);
   11742  1.1  mrg     }
   11743  1.1  mrg 
   11744  1.1  mrg   return NULL_RTX;
   11745  1.1  mrg }
   11746  1.1  mrg 
   11747  1.1  mrg /*------------------------------------------------------------------------------
   11748  1.1  mrg   Manual insn combine support code.
   11749  1.1  mrg */
   11750  1.1  mrg 
   11751  1.1  mrg /* Return true if the specified insn contains any UNSPECs or
   11752  1.1  mrg    UNSPEC_VOLATILEs.  */
   11753  1.1  mrg static bool
   11754  1.1  mrg sh_unspec_insn_p (rtx x)
   11755  1.1  mrg {
   11756  1.1  mrg   subrtx_iterator::array_type array;
   11757  1.1  mrg   FOR_EACH_SUBRTX (i, array, x, ALL)
   11758  1.1  mrg     if (*i != NULL
   11759  1.1  mrg 	&& (GET_CODE (*i) == UNSPEC || GET_CODE (*i) == UNSPEC_VOLATILE))
   11760  1.1  mrg       return true;
   11761  1.1  mrg 
   11762  1.1  mrg   return false;
   11763  1.1  mrg }
   11764  1.1  mrg 
   11765  1.1  mrg /* Return true if the register operands of the specified insn are modified
   11766  1.1  mrg    between the specified from and to insns (exclusive of those two).  */
   11767  1.1  mrg bool
   11768  1.1  mrg sh_insn_operands_modified_between_p (rtx_insn* operands_insn,
   11769  1.1  mrg 				     const rtx_insn* from,
   11770  1.1  mrg 				     const rtx_insn* to)
   11771  1.1  mrg {
   11772  1.1  mrg   /*  FIXME: Return true for multiple sets for now.  */
   11773  1.1  mrg   rtx s = single_set (operands_insn);
   11774  1.1  mrg   if (s == NULL_RTX)
   11775  1.1  mrg     return true;
   11776  1.1  mrg 
   11777  1.1  mrg   subrtx_iterator::array_type array;
   11778  1.1  mrg   FOR_EACH_SUBRTX (i, array, SET_SRC (s), ALL)
   11779  1.1  mrg     if (*i != NULL &&
   11780  1.1  mrg 	((REG_P (*i) || SUBREG_P (*i)) && reg_set_between_p (*i, from, to)))
   11781  1.1  mrg       return true;
   11782  1.1  mrg 
   11783  1.1  mrg   return false;
   11784  1.1  mrg }
   11785  1.1  mrg 
   11786  1.1  mrg /* Given an insn, determine whether it's a 'nott' insn, i.e. an insn that
   11787  1.1  mrg    negates the T bit and stores the result in the T bit.  */
   11788  1.1  mrg bool
   11789  1.1  mrg sh_is_nott_insn (const rtx_insn* i)
   11790  1.1  mrg {
   11791  1.1  mrg   return i != NULL_RTX && PATTERN (i) != NULL_RTX
   11792  1.1  mrg 	 && GET_CODE (PATTERN (i)) == SET
   11793  1.1  mrg 	 && t_reg_operand (XEXP (PATTERN (i), 0), VOIDmode)
   11794  1.1  mrg 	 && negt_reg_operand (XEXP (PATTERN (i), 1), VOIDmode);
   11795  1.1  mrg }
   11796  1.1  mrg 
   11797  1.1  mrg rtx
   11798  1.1  mrg sh_movt_set_dest (const rtx_insn* i)
   11799  1.1  mrg {
   11800  1.1  mrg   return i == NULL ? NULL : sh_movt_set_dest (PATTERN (i));
   11801  1.1  mrg }
   11802  1.1  mrg 
   11803  1.1  mrg rtx
   11804  1.1  mrg sh_movt_set_dest (const_rtx pat)
   11805  1.1  mrg {
   11806  1.1  mrg   return GET_CODE (pat) == SET
   11807  1.1  mrg 	 && arith_reg_dest (XEXP (pat, 0), SImode)
   11808  1.1  mrg 	 && t_reg_operand (XEXP (pat, 1), VOIDmode) ? XEXP (pat, 0) : NULL;
   11809  1.1  mrg }
   11810  1.1  mrg 
   11811  1.1  mrg /* Given an insn, check whether it's a 'movrt' kind of insn, i.e. an insn
   11812  1.1  mrg    that stores the negated T bit in a register, and return the destination
   11813  1.1  mrg    register rtx, or null.  */
   11814  1.1  mrg rtx
   11815  1.1  mrg sh_movrt_set_dest (const rtx_insn* i)
   11816  1.1  mrg {
   11817  1.1  mrg   return i == NULL ? NULL : sh_movrt_set_dest (PATTERN (i));
   11818  1.1  mrg }
   11819  1.1  mrg 
   11820  1.1  mrg rtx
   11821  1.1  mrg sh_movrt_set_dest (const_rtx pat)
   11822  1.1  mrg {
   11823  1.1  mrg   /* The negc movrt replacement is inside a parallel.  */
   11824  1.1  mrg   if (GET_CODE (pat) == PARALLEL)
   11825  1.1  mrg     pat = XVECEXP (pat, 0, 0);
   11826  1.1  mrg 
   11827  1.1  mrg   return GET_CODE (pat) == SET
   11828  1.1  mrg 	 && arith_reg_dest (XEXP (pat, 0), SImode)
   11829  1.1  mrg 	 && negt_reg_operand (XEXP (pat, 1), VOIDmode) ? XEXP (pat, 0) : NULL;
   11830  1.1  mrg 
   11831  1.1  mrg }
   11832  1.1  mrg 
   11833  1.1  mrg /* Given an insn and a reg number, tell whether the reg dies or is unused
   11834  1.1  mrg    after the insn.  */
   11835  1.1  mrg bool
   11836  1.1  mrg sh_reg_dead_or_unused_after_insn (const rtx_insn* i, int regno)
   11837  1.1  mrg {
   11838  1.1  mrg   return find_regno_note (i, REG_DEAD, regno) != NULL
   11839  1.1  mrg 	 || find_regno_note (i, REG_UNUSED, regno) != NULL;
   11840  1.1  mrg }
   11841  1.1  mrg 
   11842  1.1  mrg /* Given an insn and a reg number, remove reg dead or reg unused notes to
   11843  1.1  mrg    mark it as being used after the insn.  */
   11844  1.1  mrg void
   11845  1.1  mrg sh_remove_reg_dead_or_unused_notes (rtx_insn* i, int regno)
   11846  1.1  mrg {
   11847  1.1  mrg   if (rtx n = find_regno_note (i, REG_DEAD, regno))
   11848  1.1  mrg     remove_note (i, n);
   11849  1.1  mrg   if (rtx n = find_regno_note (i, REG_UNUSED, regno))
   11850  1.1  mrg     remove_note (i, n);
   11851  1.1  mrg }
   11852  1.1  mrg 
   11853  1.1  mrg /* Given an insn check if it contains any post/pre inc/dec mem operands and
   11854  1.1  mrg    add the REG_INC notes accordingly.
   11855  1.1  mrg    FIXME: This function is very similar to lra.cc (add_auto_inc_notes).
   11856  1.1  mrg    FIXME: This function is currently used by peephole2 patterns because
   11857  1.1  mrg 	  the peephole2 pass does not preserve REG_INC notes.  If the notes
   11858  1.1  mrg 	  are dropped the following passes will do wrong things.  */
   11859  1.1  mrg rtx_insn*
   11860  1.1  mrg sh_check_add_incdec_notes (rtx_insn* i)
   11861  1.1  mrg {
   11862  1.1  mrg   struct for_each_inc_dec_clb
   11863  1.1  mrg   {
   11864  1.1  mrg     static int func (rtx mem ATTRIBUTE_UNUSED, rtx op ATTRIBUTE_UNUSED,
   11865  1.1  mrg 		     rtx dest, rtx src ATTRIBUTE_UNUSED,
   11866  1.1  mrg 		     rtx srcoff ATTRIBUTE_UNUSED, void* arg)
   11867  1.1  mrg     {
   11868  1.1  mrg       gcc_assert (REG_P (dest));
   11869  1.1  mrg 
   11870  1.1  mrg       rtx_insn* i = (rtx_insn*)arg;
   11871  1.1  mrg       if (find_regno_note (i, REG_INC, REGNO (dest)) == NULL)
   11872  1.1  mrg 	add_reg_note (i, REG_INC, dest);
   11873  1.1  mrg 
   11874  1.1  mrg       return 0;
   11875  1.1  mrg     }
   11876  1.1  mrg   };
   11877  1.1  mrg 
   11878  1.1  mrg   for_each_inc_dec (PATTERN (i), for_each_inc_dec_clb::func, i);
   11879  1.1  mrg   return i;
   11880  1.1  mrg }
   11881  1.1  mrg 
   11882  1.1  mrg /* Given a move insn destiation and a source, make sure that the move source
   11883  1.1  mrg    operand is not a post-inc mem load with the same address reg as the
   11884  1.1  mrg    destination.  Returns the modified source operand with the post-inc removed
   11885  1.1  mrg    if necessary.  */
   11886  1.1  mrg rtx
   11887  1.1  mrg sh_remove_overlapping_post_inc (rtx dst, rtx src)
   11888  1.1  mrg {
   11889  1.1  mrg   if (!MEM_P (src))
   11890  1.1  mrg     return src;
   11891  1.1  mrg 
   11892  1.1  mrg   rtx addr = XEXP (src, 0);
   11893  1.1  mrg 
   11894  1.1  mrg   if (GET_CODE (addr) == POST_INC
   11895  1.1  mrg       && reg_overlap_mentioned_p (XEXP (addr, 0), dst))
   11896  1.1  mrg     return replace_equiv_address (src, XEXP (addr, 0));
   11897  1.1  mrg 
   11898  1.1  mrg   gcc_assert (GET_CODE (addr) != POST_MODIFY);
   11899  1.1  mrg   return src;
   11900  1.1  mrg }
   11901  1.1  mrg 
   11902  1.1  mrg /* Emit a move insn that is safe to be used in peephole patterns.  */
   11903  1.1  mrg rtx_insn*
   11904  1.1  mrg sh_peephole_emit_move_insn (rtx dst, rtx src)
   11905  1.1  mrg {
   11906  1.1  mrg   return sh_check_add_incdec_notes (
   11907  1.1  mrg 	emit_move_insn (dst, sh_remove_overlapping_post_inc (dst, src)));
   11908  1.1  mrg }
   11909  1.1  mrg 
   11910  1.1  mrg /* Given an op rtx and an insn, try to find out whether the result of the
   11911  1.1  mrg    specified op consists only of logical operations on T bit stores.  */
   11912  1.1  mrg bool
   11913  1.1  mrg sh_is_logical_t_store_expr (rtx op, rtx_insn* insn)
   11914  1.1  mrg {
   11915  1.1  mrg   if (!logical_operator (op, SImode))
   11916  1.1  mrg     return false;
   11917  1.1  mrg 
   11918  1.1  mrg   rtx ops[2] = { XEXP (op, 0), XEXP (op, 1) };
   11919  1.1  mrg   int op_is_t_count = 0;
   11920  1.1  mrg 
   11921  1.1  mrg   for (int i = 0; i < 2; ++i)
   11922  1.1  mrg     {
   11923  1.1  mrg       if (t_reg_operand (ops[i], VOIDmode)
   11924  1.1  mrg 	  || negt_reg_operand (ops[i], VOIDmode))
   11925  1.1  mrg 	op_is_t_count++;
   11926  1.1  mrg 
   11927  1.1  mrg       else
   11928  1.1  mrg 	{
   11929  1.1  mrg 	  set_of_reg op_set = sh_find_set_of_reg
   11930  1.1  mrg 	    (ops[i], insn, prev_nonnote_nondebug_insn_bb);
   11931  1.1  mrg 	  if (op_set.set_src == NULL_RTX)
   11932  1.1  mrg 	    continue;
   11933  1.1  mrg 
   11934  1.1  mrg 	  if (t_reg_operand (op_set.set_src, VOIDmode)
   11935  1.1  mrg 	      || negt_reg_operand (op_set.set_src, VOIDmode)
   11936  1.1  mrg 	      || sh_is_logical_t_store_expr (op_set.set_src, op_set.insn))
   11937  1.1  mrg 	      op_is_t_count++;
   11938  1.1  mrg 	}
   11939  1.1  mrg     }
   11940  1.1  mrg 
   11941  1.1  mrg   return op_is_t_count == 2;
   11942  1.1  mrg }
   11943  1.1  mrg 
   11944  1.1  mrg /* Given the operand that is extended in a sign/zero extend insn, and the
   11945  1.1  mrg    insn, try to figure out whether the sign/zero extension can be replaced
   11946  1.1  mrg    by a simple reg-reg copy.  If so, the replacement reg rtx is returned,
   11947  1.1  mrg    NULL_RTX otherwise.  */
   11948  1.1  mrg rtx
   11949  1.1  mrg sh_try_omit_signzero_extend (rtx extended_op, rtx_insn* insn)
   11950  1.1  mrg {
   11951  1.1  mrg   if (REG_P (extended_op))
   11952  1.1  mrg     extended_op = extended_op;
   11953  1.1  mrg   else if (GET_CODE (extended_op) == SUBREG && REG_P (SUBREG_REG (extended_op)))
   11954  1.1  mrg     extended_op = SUBREG_REG (extended_op);
   11955  1.1  mrg   else
   11956  1.1  mrg     return NULL_RTX;
   11957  1.1  mrg 
   11958  1.1  mrg   /* Reg moves must be of the same mode.  */
   11959  1.1  mrg   if (GET_MODE (extended_op) != SImode)
   11960  1.1  mrg     return NULL_RTX;
   11961  1.1  mrg 
   11962  1.1  mrg   set_of_reg s = sh_find_set_of_reg (extended_op, insn,
   11963  1.1  mrg 				     prev_nonnote_nondebug_insn_bb);
   11964  1.1  mrg   if (s.set_src == NULL_RTX)
   11965  1.1  mrg     return NULL_RTX;
   11966  1.1  mrg 
   11967  1.1  mrg   if (t_reg_operand (s.set_src, VOIDmode)
   11968  1.1  mrg       || negt_reg_operand (s.set_src, VOIDmode))
   11969  1.1  mrg     return extended_op;
   11970  1.1  mrg 
   11971  1.1  mrg   /* If the zero extended reg was formed by a logical operation, check the
   11972  1.1  mrg      operands of the logical operation.  If both originated from T bit
   11973  1.1  mrg      stores the zero extension can be eliminated.  */
   11974  1.1  mrg   else if (sh_is_logical_t_store_expr (s.set_src, s.insn))
   11975  1.1  mrg     return extended_op;
   11976  1.1  mrg 
   11977  1.1  mrg   return NULL_RTX;
   11978  1.1  mrg }
   11979  1.1  mrg 
   11980  1.1  mrg /* Given the current insn, which is assumed to be a movrt_negc insn, try to
   11981  1.1  mrg    figure out whether it should be converted into a movt-xor sequence in
   11982  1.1  mrg    the movrt_negc splitter.
   11983  1.1  mrg    Returns true if insns have been modified and the splitter has succeeded.  */
   11984  1.1  mrg bool
   11985  1.1  mrg sh_split_movrt_negc_to_movt_xor (rtx_insn* curr_insn, rtx operands[])
   11986  1.1  mrg {
   11987  1.1  mrg   /* In cases such as
   11988  1.1  mrg 	tst	r4,r4
   11989  1.1  mrg 	mov	#-1,r1
   11990  1.1  mrg 	negc	r1,r1
   11991  1.1  mrg 	tst	r4,r4
   11992  1.1  mrg      we can replace the T bit clobbering negc with a movt-xor sequence and
   11993  1.1  mrg      eliminate the redundant comparison.
   11994  1.1  mrg      Because the xor insn depends on register allocation results, allow this
   11995  1.1  mrg      only before reload.  */
   11996  1.1  mrg   if (!can_create_pseudo_p ())
   11997  1.1  mrg     return false;
   11998  1.1  mrg 
   11999  1.1  mrg   set_of_reg t_before_negc = sh_find_set_of_reg
   12000  1.1  mrg     (get_t_reg_rtx (), curr_insn, prev_nonnote_nondebug_insn_bb);
   12001  1.1  mrg   set_of_reg t_after_negc = sh_find_set_of_reg
   12002  1.1  mrg     (get_t_reg_rtx (), curr_insn, next_nonnote_nondebug_insn_bb);
   12003  1.1  mrg 
   12004  1.1  mrg   if (t_before_negc.set_rtx != NULL_RTX && t_after_negc.set_rtx != NULL_RTX
   12005  1.1  mrg       && rtx_equal_p (t_before_negc.set_rtx, t_after_negc.set_rtx)
   12006  1.1  mrg       && !reg_used_between_p (get_t_reg_rtx (), curr_insn, t_after_negc.insn)
   12007  1.1  mrg       && !sh_insn_operands_modified_between_p (t_before_negc.insn,
   12008  1.1  mrg 					       t_before_negc.insn,
   12009  1.1  mrg 					       t_after_negc.insn)
   12010  1.1  mrg       && !modified_between_p (get_t_reg_rtx (), curr_insn, t_after_negc.insn)
   12011  1.1  mrg       && !sh_unspec_insn_p (t_after_negc.insn)
   12012  1.1  mrg       && !volatile_insn_p (PATTERN (t_after_negc.insn))
   12013  1.1  mrg       && !side_effects_p (PATTERN (t_after_negc.insn))
   12014  1.1  mrg       && !may_trap_or_fault_p (PATTERN (t_after_negc.insn)))
   12015  1.1  mrg     {
   12016  1.1  mrg       emit_insn (gen_movrt_xor (operands[0], get_t_reg_rtx ()));
   12017  1.1  mrg       set_insn_deleted (t_after_negc.insn);
   12018  1.1  mrg       return true;
   12019  1.1  mrg     }
   12020  1.1  mrg   else
   12021  1.1  mrg     return false;
   12022  1.1  mrg }
   12023  1.1  mrg 
   12024  1.1  mrg /* Given a reg and the current insn, see if the value of the reg originated
   12025  1.1  mrg    from a sign or zero extension and return the discovered information.  */
   12026  1.1  mrg sh_extending_set_of_reg
   12027  1.1  mrg sh_find_extending_set_of_reg (rtx reg, rtx_insn* curr_insn)
   12028  1.1  mrg {
   12029  1.1  mrg   if (reg == NULL)
   12030  1.1  mrg     return sh_extending_set_of_reg (curr_insn);
   12031  1.1  mrg 
   12032  1.1  mrg   if (SUBREG_P (reg))
   12033  1.1  mrg     reg = SUBREG_REG (reg);
   12034  1.1  mrg 
   12035  1.1  mrg   if (!REG_P (reg))
   12036  1.1  mrg     return sh_extending_set_of_reg (curr_insn);
   12037  1.1  mrg 
   12038  1.1  mrg   /* FIXME: Also search the predecessor basic blocks.  It seems that checking
   12039  1.1  mrg      only the adjacent predecessor blocks would cover most of the cases.
   12040  1.1  mrg      Also try to look through the first extension that we hit.  There are some
   12041  1.1  mrg      cases, where a zero_extend is followed an (implicit) sign_extend, and it
   12042  1.1  mrg      fails to see the sign_extend.  */
   12043  1.1  mrg   sh_extending_set_of_reg result = sh_find_set_of_reg
   12044  1.1  mrg     (reg, curr_insn, prev_nonnote_nondebug_insn_bb, true);
   12045  1.1  mrg 
   12046  1.1  mrg   if (result.set_src != NULL)
   12047  1.1  mrg     {
   12048  1.1  mrg       if (GET_CODE (result.set_src) == SIGN_EXTEND
   12049  1.1  mrg 	  || GET_CODE (result.set_src) == ZERO_EXTEND)
   12050  1.1  mrg 	{
   12051  1.1  mrg 	  if (dump_file)
   12052  1.1  mrg 	    fprintf (dump_file, "sh_find_extending_set_of_reg: reg %d is "
   12053  1.1  mrg 				"explicitly sign/zero extended in insn %d\n",
   12054  1.1  mrg 				REGNO (reg), INSN_UID (result.insn));
   12055  1.1  mrg 	  result.from_mode = GET_MODE (XEXP (result.set_src, 0));
   12056  1.1  mrg 	  result.ext_code = GET_CODE (result.set_src);
   12057  1.1  mrg 	}
   12058  1.1  mrg       else if (MEM_P (result.set_src)
   12059  1.1  mrg 	       && (GET_MODE (result.set_src) == QImode
   12060  1.1  mrg 		   || GET_MODE (result.set_src) == HImode)
   12061  1.1  mrg 	       && !sh_unspec_insn_p (result.insn))
   12062  1.1  mrg 	{
   12063  1.1  mrg 	  /* On SH QIHImode memory loads always sign extend.  However, in
   12064  1.1  mrg 	     some cases where it seems that the higher bits are not
   12065  1.1  mrg 	     interesting, the loads will not be expanded as sign extending
   12066  1.1  mrg 	     insns, but as QIHImode loads into QIHImode regs.  We report that
   12067  1.1  mrg 	     the reg has been sign extended by the mem load.  When it is used
   12068  1.1  mrg 	     as such, we must convert the mem load into a sign extending insn,
   12069  1.1  mrg 	     see also sh_extending_set_of_reg::use_as_extended_reg.  */
   12070  1.1  mrg 	  if (dump_file)
   12071  1.1  mrg 	    fprintf (dump_file, "sh_find_extending_set_of_reg: reg %d is "
   12072  1.1  mrg 				"implicitly sign extended in insn %d\n",
   12073  1.1  mrg 				REGNO (reg), INSN_UID (result.insn));
   12074  1.1  mrg 	  result.from_mode = GET_MODE (result.set_src);
   12075  1.1  mrg 	  result.ext_code = SIGN_EXTEND;
   12076  1.1  mrg 	}
   12077  1.1  mrg     }
   12078  1.1  mrg 
   12079  1.1  mrg   return result;
   12080  1.1  mrg }
   12081  1.1  mrg 
   12082  1.1  mrg /* Given a reg that is known to be sign or zero extended at some insn,
   12083  1.1  mrg    take the appropriate measures so that the extended value can be used as
   12084  1.1  mrg    a reg at the specified insn and return the resulting reg rtx.  */
   12085  1.1  mrg rtx
   12086  1.1  mrg sh_extending_set_of_reg::use_as_extended_reg (rtx_insn* use_at_insn) const
   12087  1.1  mrg {
   12088  1.1  mrg   gcc_assert (insn != NULL && set_src != NULL && set_rtx != NULL);
   12089  1.1  mrg   gcc_assert (ext_code == SIGN_EXTEND || ext_code == ZERO_EXTEND);
   12090  1.1  mrg   gcc_assert (from_mode == QImode || from_mode == HImode);
   12091  1.1  mrg 
   12092  1.1  mrg   if (MEM_P (set_src) && ext_code == SIGN_EXTEND)
   12093  1.1  mrg     {
   12094  1.1  mrg       if (dump_file)
   12095  1.1  mrg 	fprintf (dump_file,
   12096  1.1  mrg 		 "use_as_extended_reg: converting non-extending mem load in "
   12097  1.1  mrg 		 "insn %d into sign-extending load\n", INSN_UID (insn));
   12098  1.1  mrg 
   12099  1.1  mrg 	rtx r = gen_reg_rtx (SImode);
   12100  1.1  mrg 	rtx_insn* i0;
   12101  1.1  mrg 	if (from_mode == QImode)
   12102  1.1  mrg 	  i0 = sh_check_add_incdec_notes (
   12103  1.1  mrg 			emit_insn_after (gen_extendqisi2 (r, set_src), insn));
   12104  1.1  mrg 	else if (from_mode == HImode)
   12105  1.1  mrg 	  i0 = sh_check_add_incdec_notes (
   12106  1.1  mrg 			emit_insn_after (gen_extendhisi2 (r, set_src), insn));
   12107  1.1  mrg 	else
   12108  1.1  mrg 	  gcc_unreachable ();
   12109  1.1  mrg 
   12110  1.1  mrg 	emit_insn_after (
   12111  1.1  mrg 		gen_move_insn (XEXP (set_rtx, 0),
   12112  1.1  mrg 			       gen_lowpart (GET_MODE (set_src), r)), i0);
   12113  1.1  mrg 	set_insn_deleted (insn);
   12114  1.1  mrg 	return r;
   12115  1.1  mrg     }
   12116  1.1  mrg   else
   12117  1.1  mrg     {
   12118  1.1  mrg       rtx extension_dst = XEXP (set_rtx, 0);
   12119  1.1  mrg       if (GET_MODE (extension_dst) != SImode)
   12120  1.1  mrg 	extension_dst = simplify_gen_subreg (SImode, extension_dst,
   12121  1.1  mrg 					     GET_MODE (extension_dst), 0);
   12122  1.1  mrg       if (modified_between_p (extension_dst, insn, use_at_insn))
   12123  1.1  mrg 	{
   12124  1.1  mrg 	  if (dump_file)
   12125  1.1  mrg 	    fprintf (dump_file,
   12126  1.1  mrg 		     "use_as_extended_reg: dest reg %d of extending insn %d is "
   12127  1.1  mrg 		     "modified, inserting a reg-reg copy\n",
   12128  1.1  mrg 		     REGNO (extension_dst), INSN_UID (insn));
   12129  1.1  mrg 
   12130  1.1  mrg 	  rtx r = gen_reg_rtx (SImode);
   12131  1.1  mrg 	  emit_insn_after (gen_move_insn (r, extension_dst), insn);
   12132  1.1  mrg 	  return r;
   12133  1.1  mrg 	}
   12134  1.1  mrg       else
   12135  1.1  mrg 	{
   12136  1.1  mrg 	  sh_remove_reg_dead_or_unused_notes (insn, REGNO (extension_dst));
   12137  1.1  mrg 	  return extension_dst;
   12138  1.1  mrg 	}
   12139  1.1  mrg     }
   12140  1.1  mrg }
   12141  1.1  mrg 
   12142  1.1  mrg bool
   12143  1.1  mrg sh_extending_set_of_reg::can_use_as_unextended_reg (void) const
   12144  1.1  mrg {
   12145  1.1  mrg   if ((ext_code == SIGN_EXTEND || ext_code == ZERO_EXTEND)
   12146  1.1  mrg       && (from_mode == QImode || from_mode == HImode)
   12147  1.1  mrg       && set_src != NULL)
   12148  1.1  mrg     return arith_reg_operand (XEXP (set_src, 0), from_mode);
   12149  1.1  mrg   else
   12150  1.1  mrg     return false;
   12151  1.1  mrg }
   12152  1.1  mrg 
   12153  1.1  mrg rtx
   12154  1.1  mrg sh_extending_set_of_reg::use_as_unextended_reg (rtx_insn* use_at_insn) const
   12155  1.1  mrg {
   12156  1.1  mrg   gcc_assert (can_use_as_unextended_reg ());
   12157  1.1  mrg 
   12158  1.1  mrg   rtx r = XEXP (set_src, 0);
   12159  1.1  mrg   rtx r0 = simplify_gen_subreg (SImode, r, from_mode, 0);
   12160  1.1  mrg 
   12161  1.1  mrg   if (modified_between_p (r, insn, use_at_insn))
   12162  1.1  mrg     {
   12163  1.1  mrg       rtx r1 = gen_reg_rtx (SImode);
   12164  1.1  mrg       emit_insn_after (gen_move_insn (r1, r0), insn);
   12165  1.1  mrg       return r1;
   12166  1.1  mrg     }
   12167  1.1  mrg   else
   12168  1.1  mrg     {
   12169  1.1  mrg       sh_remove_reg_dead_or_unused_notes (insn, SUBREG_P (r)
   12170  1.1  mrg 						? REGNO (SUBREG_REG (r))
   12171  1.1  mrg 						: REGNO (r));
   12172  1.1  mrg       return r0;
   12173  1.1  mrg     }
   12174  1.1  mrg }
   12175  1.1  mrg 
   12176  1.1  mrg /* Given the current insn, which is assumed to be the *tst<mode>_t_subregs insn,
   12177  1.1  mrg    perform the necessary checks on the operands and split it accordingly.  */
   12178  1.1  mrg void
   12179  1.1  mrg sh_split_tst_subregs (rtx_insn* curr_insn, machine_mode subreg_mode,
   12180  1.1  mrg 		      int subreg_offset, rtx operands[])
   12181  1.1  mrg {
   12182  1.1  mrg   gcc_assert (subreg_mode == QImode || subreg_mode == HImode);
   12183  1.1  mrg 
   12184  1.1  mrg   sh_extending_set_of_reg eop0 = sh_find_extending_set_of_reg (operands[0],
   12185  1.1  mrg 							       curr_insn);
   12186  1.1  mrg   sh_extending_set_of_reg eop1 = sh_find_extending_set_of_reg (operands[1],
   12187  1.1  mrg 							       curr_insn);
   12188  1.1  mrg 
   12189  1.1  mrg   /* If one of the operands is known to be zero extended, that's already
   12190  1.1  mrg      sufficient to mask out the unwanted high bits.  */
   12191  1.1  mrg   if (eop0.ext_code == ZERO_EXTEND && eop0.from_mode == subreg_mode)
   12192  1.1  mrg     {
   12193  1.1  mrg       emit_insn (gen_tstsi_t (eop0.use_as_extended_reg (curr_insn),
   12194  1.1  mrg 			      operands[1]));
   12195  1.1  mrg       return;
   12196  1.1  mrg     }
   12197  1.1  mrg   if (eop1.ext_code == ZERO_EXTEND && eop1.from_mode == subreg_mode)
   12198  1.1  mrg     {
   12199  1.1  mrg       emit_insn (gen_tstsi_t (operands[0],
   12200  1.1  mrg 			      eop1.use_as_extended_reg (curr_insn)));
   12201  1.1  mrg       return;
   12202  1.1  mrg     }
   12203  1.1  mrg 
   12204  1.1  mrg   /* None of the operands seem to be zero extended.
   12205  1.1  mrg      If both are sign extended it's OK, too.  */
   12206  1.1  mrg   if (eop0.ext_code == SIGN_EXTEND && eop1.ext_code == SIGN_EXTEND
   12207  1.1  mrg       && eop0.from_mode == subreg_mode && eop1.from_mode == subreg_mode)
   12208  1.1  mrg     {
   12209  1.1  mrg       emit_insn (gen_tstsi_t (eop0.use_as_extended_reg (curr_insn),
   12210  1.1  mrg 			      eop1.use_as_extended_reg (curr_insn)));
   12211  1.1  mrg       return;
   12212  1.1  mrg     }
   12213  1.1  mrg 
   12214  1.1  mrg   /* Otherwise we have to insert a zero extension on one of the operands to
   12215  1.1  mrg      mask out the unwanted high bits.
   12216  1.1  mrg      Prefer the operand that has no known extension.  */
   12217  1.1  mrg   if (eop0.ext_code != UNKNOWN && eop1.ext_code == UNKNOWN)
   12218  1.1  mrg     std::swap (operands[0], operands[1]);
   12219  1.1  mrg 
   12220  1.1  mrg   rtx tmp0 = gen_reg_rtx (SImode);
   12221  1.1  mrg   rtx tmp1 = simplify_gen_subreg (subreg_mode, operands[0],
   12222  1.1  mrg 				  GET_MODE (operands[0]), subreg_offset);
   12223  1.1  mrg   emit_insn (subreg_mode == QImode
   12224  1.1  mrg 	     ? gen_zero_extendqisi2 (tmp0, tmp1)
   12225  1.1  mrg 	     : gen_zero_extendhisi2 (tmp0, tmp1));
   12226  1.1  mrg   emit_insn (gen_tstsi_t (tmp0, operands[1]));
   12227  1.1  mrg }
   12228  1.1  mrg 
   12229  1.1  mrg /* A helper class to increment/decrement a counter variable each time a
   12230  1.1  mrg    function is entered/left.  */
   12231  1.1  mrg class scope_counter
   12232  1.1  mrg {
   12233  1.1  mrg public:
   12234  1.1  mrg   scope_counter (int& counter) : m_counter (counter) { ++m_counter; }
   12235  1.1  mrg 
   12236  1.1  mrg   ~scope_counter (void)
   12237  1.1  mrg   {
   12238  1.1  mrg     --m_counter;
   12239  1.1  mrg     gcc_assert (m_counter >= 0);
   12240  1.1  mrg   }
   12241  1.1  mrg 
   12242  1.1  mrg   int count (void) const { return m_counter; }
   12243  1.1  mrg 
   12244  1.1  mrg private:
   12245  1.1  mrg   int& m_counter;
   12246  1.1  mrg };
   12247  1.1  mrg 
   12248  1.1  mrg /* Given an rtx x, determine whether the expression can be used to create
   12249  1.1  mrg    an insn that calulates x and stores the result in the T bit.
   12250  1.1  mrg    This is used by the 'treg_set_expr' predicate to construct insns sequences
   12251  1.1  mrg    where T bit results are fed into other insns, such as addc, subc, negc
   12252  1.1  mrg    insns.
   12253  1.1  mrg 
   12254  1.1  mrg    FIXME: The patterns that expand 'treg_set_expr' operands tend to
   12255  1.1  mrg    distinguish between 'positive' and 'negative' forms.  For now this has to
   12256  1.1  mrg    be done in the preparation code.  We could also introduce
   12257  1.1  mrg    'pos_treg_set_expr' and 'neg_treg_set_expr' predicates for that and write
   12258  1.1  mrg    two different patterns for the 'postive' and 'negative' forms.  However,
   12259  1.1  mrg    the total amount of lines of code seems to be about the same and the
   12260  1.1  mrg    '{pos|neg}_treg_set_expr' predicates would be more expensive, because the
   12261  1.1  mrg    recog function would need to look inside the expression by temporarily
   12262  1.1  mrg    splitting it.  */
   12263  1.1  mrg static int sh_recog_treg_set_expr_reent_count = 0;
   12264  1.1  mrg 
   12265  1.1  mrg bool
   12266  1.1  mrg sh_recog_treg_set_expr (rtx op, machine_mode mode)
   12267  1.1  mrg {
   12268  1.1  mrg   scope_counter recursion (sh_recog_treg_set_expr_reent_count);
   12269  1.1  mrg 
   12270  1.1  mrg   /* Limit the recursion count to avoid nested expressions which we can't
   12271  1.1  mrg      resolve to a single treg set insn.  */
   12272  1.1  mrg   if (recursion.count () > 1)
   12273  1.1  mrg     return false;
   12274  1.1  mrg 
   12275  1.1  mrg   /* Early accept known possible operands before doing recog.  */
   12276  1.1  mrg   if (op == const0_rtx || op == const1_rtx || t_reg_operand (op, mode)
   12277  1.1  mrg       || negt_reg_operand (op, mode))
   12278  1.1  mrg     return true;
   12279  1.1  mrg 
   12280  1.1  mrg   /* Early reject impossible operands before doing recog.
   12281  1.1  mrg      There are some (set ((t) (subreg ...))) patterns, but we must be careful
   12282  1.1  mrg      not to allow any invalid reg-reg or mem-reg moves, or else other passes
   12283  1.1  mrg      such as lower-subreg will bail out.  Some insns such as SH4A movua are
   12284  1.1  mrg      done with UNSPEC, so must reject those, too, or else it would result
   12285  1.1  mrg      in an invalid reg -> treg move.  */
   12286  1.1  mrg   if (CONST_INT_P (op) || register_operand (op, mode)
   12287  1.1  mrg       || memory_operand (op, mode) || sh_unspec_insn_p (op))
   12288  1.1  mrg     return false;
   12289  1.1  mrg 
   12290  1.1  mrg   if (!can_create_pseudo_p ())
   12291  1.1  mrg     return false;
   12292  1.1  mrg 
   12293  1.1  mrg   /* expand_debug_locations may call this to compute rtx costs at
   12294  1.1  mrg      very early stage.  In that case, don't make new insns here to
   12295  1.1  mrg      avoid codegen differences with -g. */
   12296  1.1  mrg   if (currently_expanding_to_rtl)
   12297  1.1  mrg     return false;
   12298  1.1  mrg 
   12299  1.1  mrg   /* We are going to invoke recog in a re-entrant way and thus
   12300  1.1  mrg      have to capture its current state and restore it afterwards.  */
   12301  1.1  mrg   recog_data_d prev_recog_data = recog_data;
   12302  1.1  mrg 
   12303  1.1  mrg   /* Note we can't use insn_raw here since that increases the uid
   12304  1.1  mrg      and could cause debug compare differences; this insn never leaves
   12305  1.1  mrg      this function so create a dummy one. */
   12306  1.1  mrg   rtx_insn* i = as_a <rtx_insn *> (rtx_alloc (INSN));
   12307  1.1  mrg 
   12308  1.1  mrg   INSN_UID (i) = 1;
   12309  1.1  mrg   PATTERN (i) = gen_rtx_SET (get_t_reg_rtx (), op);
   12310  1.1  mrg   INSN_CODE (i) = -1;
   12311  1.1  mrg   REG_NOTES (i) = NULL;
   12312  1.1  mrg   INSN_LOCATION (i) = curr_insn_location ();
   12313  1.1  mrg   BLOCK_FOR_INSN (i) = NULL;
   12314  1.1  mrg   SET_PREV_INSN (i) = NULL;
   12315  1.1  mrg   SET_NEXT_INSN (i) = NULL;
   12316  1.1  mrg 
   12317  1.1  mrg   /* If the comparison op doesn't have a result mode, set it to SImode.  */
   12318  1.1  mrg   machine_mode prev_op_mode = GET_MODE (op);
   12319  1.1  mrg   if (COMPARISON_P (op) && prev_op_mode == VOIDmode)
   12320  1.1  mrg     PUT_MODE (op, SImode);
   12321  1.1  mrg 
   12322  1.1  mrg   int result = recog (PATTERN (i), i, 0);
   12323  1.1  mrg 
   12324  1.1  mrg   /* It seems there is no insn like that.  Create a negated version and
   12325  1.1  mrg      try again.  If we hit a negated form, we'll allow that and append a
   12326  1.1  mrg      nott sequence when splitting out the insns.  Insns that do the split
   12327  1.1  mrg      can then remove the trailing nott if they know how to deal with it.  */
   12328  1.1  mrg   if (result < 0 && COMPARISON_P (op))
   12329  1.1  mrg     {
   12330  1.1  mrg       machine_mode cmp_mode = GET_MODE (XEXP (op, 0));
   12331  1.1  mrg       if (cmp_mode == VOIDmode)
   12332  1.1  mrg         cmp_mode = GET_MODE (XEXP (op, 1));
   12333  1.1  mrg 
   12334  1.1  mrg       rtx_code prev_code = GET_CODE (op);
   12335  1.1  mrg       PUT_CODE (op, reverse_condition (GET_CODE (op)));
   12336  1.1  mrg       result = recog (PATTERN (i), i, 0);
   12337  1.1  mrg       PUT_CODE (op, prev_code);
   12338  1.1  mrg     }
   12339  1.1  mrg 
   12340  1.1  mrg   PUT_MODE (op, prev_op_mode);
   12341  1.1  mrg   recog_data = prev_recog_data;
   12342  1.1  mrg   return result >= 0;
   12343  1.1  mrg }
   12344  1.1  mrg 
   12345  1.1  mrg /* Returns true when recog of a 'treg_set_expr' is currently in progress.
   12346  1.1  mrg    This can be used as a condition for insn/split patterns to allow certain
   12347  1.1  mrg    T bit setting patters only to be matched as sub expressions of other
   12348  1.1  mrg    patterns.  */
   12349  1.1  mrg bool
   12350  1.1  mrg sh_in_recog_treg_set_expr (void)
   12351  1.1  mrg {
   12352  1.1  mrg   return sh_recog_treg_set_expr_reent_count > 0;
   12353  1.1  mrg }
   12354  1.1  mrg 
   12355  1.1  mrg /* Given an rtx x, which is assumed to be some expression that has been
   12356  1.1  mrg    matched by the 'treg_set_expr' predicate before, split and emit the
   12357  1.1  mrg    insns that are necessary to calculate the expression and store the result
   12358  1.1  mrg    in the T bit.
   12359  1.1  mrg    The splitting is done recursively similar to 'try_split' in emit-rt.c.
   12360  1.1  mrg    Unfortunately we can't use 'try_split' here directly, as it tries to invoke
   12361  1.1  mrg    'delete_insn' which then causes the DF parts to bail out, because we
   12362  1.1  mrg    currently are inside another gen_split* function and would invoke
   12363  1.1  mrg    'try_split' in a reentrant way.  */
   12364  1.1  mrg static std::pair<rtx_insn*, rtx_insn*>
   12365  1.1  mrg sh_try_split_insn_simple (rtx_insn* i, rtx_insn* curr_insn, int n = 0)
   12366  1.1  mrg {
   12367  1.1  mrg   if (dump_file)
   12368  1.1  mrg     {
   12369  1.1  mrg       fprintf (dump_file, "sh_try_split_insn_simple n = %d i = \n", n);
   12370  1.1  mrg       print_rtl_single (dump_file, i);
   12371  1.1  mrg       fprintf (dump_file, "\n");
   12372  1.1  mrg     }
   12373  1.1  mrg 
   12374  1.1  mrg   rtx_insn* seq = split_insns (PATTERN (i), curr_insn);
   12375  1.1  mrg 
   12376  1.1  mrg   if (seq == NULL)
   12377  1.1  mrg     return std::make_pair (i, i);
   12378  1.1  mrg 
   12379  1.1  mrg   /* Avoid infinite splitter loops if any insn of the result matches
   12380  1.1  mrg      the original pattern.  */
   12381  1.1  mrg   for (rtx_insn* s = seq; s != NULL; s = NEXT_INSN (s))
   12382  1.1  mrg     if (INSN_P (s) && rtx_equal_p (PATTERN (s), PATTERN (i)))
   12383  1.1  mrg       return std::make_pair (i, i);
   12384  1.1  mrg 
   12385  1.1  mrg   unshare_all_rtl_in_chain (seq);
   12386  1.1  mrg 
   12387  1.1  mrg   /* 'seq' is now a replacement for 'i'.  Assuming that 'i' is an insn in
   12388  1.1  mrg      a linked list, replace the single insn with the new insns.  */
   12389  1.1  mrg   rtx_insn* seqlast = seq;
   12390  1.1  mrg   while (NEXT_INSN (seqlast) != NULL)
   12391  1.1  mrg     seqlast = NEXT_INSN (seqlast);
   12392  1.1  mrg 
   12393  1.1  mrg   if (rtx_insn* iprev = PREV_INSN (i))
   12394  1.1  mrg     SET_NEXT_INSN (iprev) = seq;
   12395  1.1  mrg   if (rtx_insn* inext = NEXT_INSN (i))
   12396  1.1  mrg     SET_PREV_INSN (inext) = seqlast;
   12397  1.1  mrg 
   12398  1.1  mrg   SET_PREV_INSN (seq) = PREV_INSN (i);
   12399  1.1  mrg   SET_NEXT_INSN (seqlast) = NEXT_INSN (i);
   12400  1.1  mrg 
   12401  1.1  mrg   SET_PREV_INSN (i) = NULL;
   12402  1.1  mrg   SET_NEXT_INSN (i) = NULL;
   12403  1.1  mrg 
   12404  1.1  mrg   /* Recursively split all insns.  */
   12405  1.1  mrg   for (i = seq; ; i = NEXT_INSN (i))
   12406  1.1  mrg     {
   12407  1.1  mrg       std::pair<rtx_insn*, rtx_insn*> ii =
   12408  1.1  mrg 	  sh_try_split_insn_simple (i, curr_insn, n + 1);
   12409  1.1  mrg       if (i == seq)
   12410  1.1  mrg 	seq = ii.first;
   12411  1.1  mrg       if (i == seqlast)
   12412  1.1  mrg 	{
   12413  1.1  mrg 	  seqlast = ii.second;
   12414  1.1  mrg 	  break;
   12415  1.1  mrg 	}
   12416  1.1  mrg       i = ii.first;
   12417  1.1  mrg     }
   12418  1.1  mrg 
   12419  1.1  mrg   return std::make_pair (seq, seqlast);
   12420  1.1  mrg }
   12421  1.1  mrg 
   12422  1.1  mrg sh_treg_insns
   12423  1.1  mrg sh_split_treg_set_expr (rtx x, rtx_insn* curr_insn)
   12424  1.1  mrg {
   12425  1.1  mrg   if (t_reg_operand (x, VOIDmode))
   12426  1.1  mrg     return sh_treg_insns ();
   12427  1.1  mrg 
   12428  1.1  mrg   scope_counter in_treg_set_expr (sh_recog_treg_set_expr_reent_count);
   12429  1.1  mrg 
   12430  1.1  mrg   rtx_insn* i = make_insn_raw (gen_rtx_SET (get_t_reg_rtx (), x));
   12431  1.1  mrg   SET_PREV_INSN (i) = NULL;
   12432  1.1  mrg   SET_NEXT_INSN (i) = NULL;
   12433  1.1  mrg 
   12434  1.1  mrg   if (dump_file)
   12435  1.1  mrg     {
   12436  1.1  mrg       fprintf (dump_file, "split_treg_set_expr insn:\n");
   12437  1.1  mrg       print_rtl (dump_file, i);
   12438  1.1  mrg       fprintf (dump_file, "\n");
   12439  1.1  mrg     }
   12440  1.1  mrg 
   12441  1.1  mrg   /* If the insn is not found, we will try a negated form and append
   12442  1.1  mrg      a nott.  */
   12443  1.1  mrg   bool append_nott = false;
   12444  1.1  mrg 
   12445  1.1  mrg   /* We are going to invoke recog/split_insns in a re-entrant way and thus
   12446  1.1  mrg      have to capture its current state and restore it afterwards.  */
   12447  1.1  mrg   recog_data_d prev_recog_data = recog_data;
   12448  1.1  mrg 
   12449  1.1  mrg   if (negt_reg_operand (x, GET_MODE (x)))
   12450  1.1  mrg     {
   12451  1.1  mrg       /* This is a normal movt followed by a nott.  It will be converted
   12452  1.1  mrg 	 into a movrt after initial expansion.  */
   12453  1.1  mrg       XEXP (PATTERN (i), 1) = get_t_reg_rtx ();
   12454  1.1  mrg       append_nott = true;
   12455  1.1  mrg     }
   12456  1.1  mrg   else
   12457  1.1  mrg     {
   12458  1.1  mrg       /* If the comparison op doesn't have a mode set, set it to SImode.  */
   12459  1.1  mrg       if (COMPARISON_P (x) && GET_MODE (x) == VOIDmode)
   12460  1.1  mrg 	PUT_MODE (x, SImode);
   12461  1.1  mrg 
   12462  1.1  mrg       int insn_code = recog (PATTERN (i), i, 0);
   12463  1.1  mrg 
   12464  1.1  mrg       if (insn_code < 0 && COMPARISON_P (x))
   12465  1.1  mrg 	{
   12466  1.1  mrg 	  machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
   12467  1.1  mrg 	  if (cmp_mode == VOIDmode)
   12468  1.1  mrg 	    cmp_mode = GET_MODE (XEXP (x, 1));
   12469  1.1  mrg 
   12470  1.1  mrg 	  PUT_CODE (x, reverse_condition (GET_CODE (x)));
   12471  1.1  mrg 	  insn_code = recog (PATTERN (i), i, 0);
   12472  1.1  mrg 	  append_nott = true;
   12473  1.1  mrg 	}
   12474  1.1  mrg 
   12475  1.1  mrg       gcc_assert (insn_code >= 0);
   12476  1.1  mrg     }
   12477  1.1  mrg 
   12478  1.1  mrg   /* Try to recursively split the insn.  Some insns might refuse to split
   12479  1.1  mrg      any further while we are in the treg_set_expr splitting phase.  They
   12480  1.1  mrg      will be emitted as part of the outer insn and then split again.  */
   12481  1.1  mrg   std::pair<rtx_insn*, rtx_insn*> insnlist =
   12482  1.1  mrg 	sh_try_split_insn_simple (i, curr_insn);
   12483  1.1  mrg 
   12484  1.1  mrg   /* Restore recog state.  */
   12485  1.1  mrg   recog_data = prev_recog_data;
   12486  1.1  mrg 
   12487  1.1  mrg   rtx_insn* nott_insn = sh_is_nott_insn (insnlist.second)
   12488  1.1  mrg 			? insnlist.second
   12489  1.1  mrg 			: NULL;
   12490  1.1  mrg   if (dump_file)
   12491  1.1  mrg     {
   12492  1.1  mrg       fprintf (dump_file, "split_treg_set_expr insnlist:\n");
   12493  1.1  mrg       print_rtl (dump_file, insnlist.first);
   12494  1.1  mrg       fprintf (dump_file, "\n");
   12495  1.1  mrg 
   12496  1.1  mrg       if (nott_insn != NULL)
   12497  1.1  mrg 	fprintf (dump_file, "trailing nott insn %d\n", INSN_UID (nott_insn));
   12498  1.1  mrg     }
   12499  1.1  mrg 
   12500  1.1  mrg   emit_insn (insnlist.first);
   12501  1.1  mrg 
   12502  1.1  mrg   if (nott_insn != NULL && append_nott)
   12503  1.1  mrg     {
   12504  1.1  mrg       if (dump_file)
   12505  1.1  mrg 	fprintf (dump_file, "removing trailing nott\n");
   12506  1.1  mrg       remove_insn (nott_insn);
   12507  1.1  mrg       nott_insn = NULL;
   12508  1.1  mrg       append_nott = false;
   12509  1.1  mrg     }
   12510  1.1  mrg 
   12511  1.1  mrg   if (append_nott)
   12512  1.1  mrg     nott_insn = emit_insn (gen_nott (get_t_reg_rtx ()));
   12513  1.1  mrg 
   12514  1.1  mrg   rtx_insn* first_insn = get_insns ();
   12515  1.1  mrg 
   12516  1.1  mrg   if (dump_file)
   12517  1.1  mrg     {
   12518  1.1  mrg       fprintf (dump_file, "resulting insns:\n");
   12519  1.1  mrg       print_rtl (dump_file, first_insn);
   12520  1.1  mrg       fprintf (dump_file, "\n");
   12521  1.1  mrg     }
   12522  1.1  mrg 
   12523  1.1  mrg   return sh_treg_insns (first_insn, nott_insn);
   12524  1.1  mrg }
   12525  1.1  mrg 
   12526  1.1  mrg /*------------------------------------------------------------------------------
   12527  1.1  mrg   Mode switching support code.
   12528  1.1  mrg */
   12529  1.1  mrg 
   12530  1.1  mrg static void
   12531  1.1  mrg sh_emit_mode_set (int entity ATTRIBUTE_UNUSED, int mode,
   12532  1.1  mrg 		  int prev_mode, HARD_REG_SET regs_live ATTRIBUTE_UNUSED)
   12533  1.1  mrg {
   12534  1.1  mrg   if ((TARGET_SH4A_FP || TARGET_FPU_SH4_300)
   12535  1.1  mrg       && prev_mode != FP_MODE_NONE && prev_mode != mode)
   12536  1.1  mrg     {
   12537  1.1  mrg       emit_insn (gen_toggle_pr ());
   12538  1.1  mrg       if (TARGET_FMOVD)
   12539  1.1  mrg 	emit_insn (gen_toggle_sz ());
   12540  1.1  mrg     }
   12541  1.1  mrg   else if (mode != FP_MODE_NONE)
   12542  1.1  mrg     {
   12543  1.1  mrg       rtx tmp = gen_reg_rtx (SImode);
   12544  1.1  mrg       emit_insn (gen_sts_fpscr (tmp));
   12545  1.1  mrg       rtx i = NULL;
   12546  1.1  mrg 
   12547  1.1  mrg       const unsigned HOST_WIDE_INT fpbits =
   12548  1.1  mrg 	  TARGET_FMOVD ? (FPSCR_PR | FPSCR_SZ) : FPSCR_PR;
   12549  1.1  mrg 
   12550  1.1  mrg       if (prev_mode != FP_MODE_NONE && prev_mode != mode)
   12551  1.1  mrg 	i = gen_xorsi3 (tmp, tmp, force_reg (SImode, GEN_INT (fpbits)));
   12552  1.1  mrg       else if (mode == FP_MODE_SINGLE)
   12553  1.1  mrg 	i = gen_andsi3 (tmp, tmp, force_reg (SImode, GEN_INT (~fpbits)));
   12554  1.1  mrg       else if (mode == FP_MODE_DOUBLE)
   12555  1.1  mrg 	i = gen_iorsi3 (tmp, tmp, force_reg (SImode, GEN_INT (fpbits)));
   12556  1.1  mrg       else
   12557  1.1  mrg 	gcc_unreachable ();
   12558  1.1  mrg 
   12559  1.1  mrg       emit_insn (i);
   12560  1.1  mrg       emit_insn (gen_lds_fpscr (tmp));
   12561  1.1  mrg     }
   12562  1.1  mrg }
   12563  1.1  mrg 
   12564  1.1  mrg static int
   12565  1.1  mrg sh_mode_needed (int entity ATTRIBUTE_UNUSED, rtx_insn *insn)
   12566  1.1  mrg {
   12567  1.1  mrg   return recog_memoized (insn) >= 0  ? get_attr_fp_mode (insn) : FP_MODE_NONE;
   12568  1.1  mrg }
   12569  1.1  mrg 
   12570  1.1  mrg static int
   12571  1.1  mrg sh_mode_after (int entity ATTRIBUTE_UNUSED, int mode, rtx_insn *insn)
   12572  1.1  mrg {
   12573  1.1  mrg   if (TARGET_HITACHI && recog_memoized (insn) >= 0 &&
   12574  1.1  mrg       get_attr_fp_set (insn) != FP_SET_NONE)
   12575  1.1  mrg     return (int) get_attr_fp_set (insn);
   12576  1.1  mrg   else
   12577  1.1  mrg     return mode;
   12578  1.1  mrg }
   12579  1.1  mrg 
   12580  1.1  mrg static int
   12581  1.1  mrg sh_mode_entry (int entity ATTRIBUTE_UNUSED)
   12582  1.1  mrg {
   12583  1.1  mrg   return NORMAL_MODE (entity);
   12584  1.1  mrg }
   12585  1.1  mrg 
   12586  1.1  mrg static int
   12587  1.1  mrg sh_mode_exit (int entity ATTRIBUTE_UNUSED)
   12588  1.1  mrg {
   12589  1.1  mrg   return sh_cfun_attr_renesas_p () ? FP_MODE_NONE : NORMAL_MODE (entity);
   12590  1.1  mrg }
   12591  1.1  mrg 
   12592  1.1  mrg static int
   12593  1.1  mrg sh_mode_priority (int entity ATTRIBUTE_UNUSED, int n)
   12594  1.1  mrg {
   12595  1.1  mrg   return ((TARGET_FPU_SINGLE != 0) ^ (n) ? FP_MODE_SINGLE : FP_MODE_DOUBLE);
   12596  1.1  mrg }
   12597  1.1  mrg 
   12598  1.1  mrg /*------------------------------------------------------------------------------
   12599  1.1  mrg   Misc
   12600  1.1  mrg */
   12601  1.1  mrg 
   12602  1.1  mrg /* Return true if we use LRA instead of reload pass.  */
   12603  1.1  mrg bool
   12604  1.1  mrg sh_lra_p (void)
   12605  1.1  mrg {
   12606  1.1  mrg   return sh_lra_flag;
   12607  1.1  mrg }
   12608  1.1  mrg 
   12609  1.1  mrg /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P.  */
   12610  1.1  mrg 
   12611  1.1  mrg static bool
   12612  1.1  mrg sh_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
   12613  1.1  mrg 				   unsigned int align,
   12614  1.1  mrg 				   enum by_pieces_operation op,
   12615  1.1  mrg 				   bool speed_p)
   12616  1.1  mrg {
   12617  1.1  mrg   switch (op)
   12618  1.1  mrg     {
   12619  1.1  mrg       case MOVE_BY_PIECES:
   12620  1.1  mrg 	return by_pieces_ninsns (size, align, MOVE_MAX_PIECES + 1, op)
   12621  1.1  mrg 	  < (!speed_p ? 2 : (align >= 32) ? 16 : 2);
   12622  1.1  mrg       case STORE_BY_PIECES:
   12623  1.1  mrg       case SET_BY_PIECES:
   12624  1.1  mrg 	return by_pieces_ninsns (size, align, STORE_MAX_PIECES + 1, op)
   12625  1.1  mrg 	  < (!speed_p ? 2 : (align >= 32) ? 16 : 2);
   12626  1.1  mrg       default:
   12627  1.1  mrg 	return default_use_by_pieces_infrastructure_p (size, align,
   12628  1.1  mrg 						       op, speed_p);
   12629  1.1  mrg     }
   12630  1.1  mrg }
   12631  1.1  mrg 
   12632  1.1  mrg bool
   12633  1.1  mrg sh_cannot_force_const_mem_p (machine_mode mode ATTRIBUTE_UNUSED,
   12634  1.1  mrg 			     rtx x ATTRIBUTE_UNUSED)
   12635  1.1  mrg {
   12636  1.1  mrg   return TARGET_FDPIC;
   12637  1.1  mrg }
   12638  1.1  mrg 
   12639  1.1  mrg /* Emit insns to load the function address from FUNCDESC (an FDPIC
   12640  1.1  mrg    function descriptor) into r1 and the GOT address into r12,
   12641  1.1  mrg    returning an rtx for r1.  */
   12642  1.1  mrg 
   12643  1.1  mrg rtx
   12644  1.1  mrg sh_load_function_descriptor (rtx funcdesc)
   12645  1.1  mrg {
   12646  1.1  mrg   rtx r1 = gen_rtx_REG (Pmode, R1_REG);
   12647  1.1  mrg   rtx pic_reg = gen_rtx_REG (Pmode, PIC_REG);
   12648             rtx fnaddr = gen_rtx_MEM (Pmode, funcdesc);
   12649             rtx gotaddr = gen_rtx_MEM (Pmode, plus_constant (Pmode, funcdesc, 4));
   12650           
   12651             emit_move_insn (r1, fnaddr);
   12652             /* The ABI requires the entry point address to be loaded first, so
   12653                prevent the load from being moved after that of the GOT
   12654                address.  */
   12655             emit_insn (gen_blockage ());
   12656             emit_move_insn (pic_reg, gotaddr);
   12657             return r1;
   12658           }
   12659           
   12660           /* Return an rtx holding the initial value of the FDPIC register (the
   12661              FDPIC pointer passed in from the caller).  */
   12662           
   12663           rtx
   12664           sh_get_fdpic_reg_initial_val (void)
   12665           {
   12666             return get_hard_reg_initial_val (Pmode, PIC_REG);
   12667           }
   12668           
   12669           #include "gt-sh.h"
   12670