1 1.1 mrg /* Subroutines for insn-output.cc for Motorola 68000 family. 2 1.1 mrg Copyright (C) 1987-2022 Free Software Foundation, Inc. 3 1.1 mrg 4 1.1 mrg This file is part of GCC. 5 1.1 mrg 6 1.1 mrg GCC is free software; you can redistribute it and/or modify 7 1.1 mrg it under the terms of the GNU General Public License as published by 8 1.1 mrg the Free Software Foundation; either version 3, or (at your option) 9 1.1 mrg any later version. 10 1.1 mrg 11 1.1 mrg GCC is distributed in the hope that it will be useful, 12 1.1 mrg but WITHOUT ANY WARRANTY; without even the implied warranty of 13 1.1 mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 1.1 mrg GNU General Public License for more details. 15 1.1 mrg 16 1.1 mrg You should have received a copy of the GNU General Public License 17 1.1 mrg along with GCC; see the file COPYING3. If not see 18 1.1 mrg <http://www.gnu.org/licenses/>. */ 19 1.1 mrg 20 1.1 mrg #define IN_TARGET_CODE 1 21 1.1 mrg 22 1.1 mrg #include "config.h" 23 1.1 mrg #define INCLUDE_STRING 24 1.1 mrg #include "system.h" 25 1.1 mrg #include "coretypes.h" 26 1.1 mrg #include "backend.h" 27 1.1 mrg #include "cfghooks.h" 28 1.1 mrg #include "tree.h" 29 1.1 mrg #include "stringpool.h" 30 1.1 mrg #include "attribs.h" 31 1.1 mrg #include "rtl.h" 32 1.1 mrg #include "df.h" 33 1.1 mrg #include "alias.h" 34 1.1 mrg #include "fold-const.h" 35 1.1 mrg #include "calls.h" 36 1.1 mrg #include "stor-layout.h" 37 1.1 mrg #include "varasm.h" 38 1.1 mrg #include "regs.h" 39 1.1 mrg #include "insn-config.h" 40 1.1 mrg #include "conditions.h" 41 1.1 mrg #include "output.h" 42 1.1 mrg #include "insn-attr.h" 43 1.1 mrg #include "recog.h" 44 1.1 mrg #include "diagnostic-core.h" 45 1.1 mrg #include "flags.h" 46 1.1 mrg #include "expmed.h" 47 1.1 mrg #include "dojump.h" 48 1.1 mrg #include "explow.h" 49 1.1 mrg #include "memmodel.h" 50 1.1 mrg #include "emit-rtl.h" 51 1.1 mrg #include "stmt.h" 52 1.1 mrg #include "expr.h" 53 1.1 mrg #include "reload.h" 54 1.1 mrg #include "tm_p.h" 55 1.1 mrg #include "target.h" 56 1.1 mrg #include "debug.h" 57 1.1 mrg #include "cfgrtl.h" 58 1.1 mrg #include "cfganal.h" 59 1.1 mrg #include "lcm.h" 60 1.1 mrg #include "cfgbuild.h" 61 1.1 mrg #include "cfgcleanup.h" 62 1.1 mrg /* ??? Need to add a dependency between m68k.o and sched-int.h. */ 63 1.1 mrg #include "sched-int.h" 64 1.1 mrg #include "insn-codes.h" 65 1.1 mrg #include "opts.h" 66 1.1 mrg #include "optabs.h" 67 1.1 mrg #include "builtins.h" 68 1.1 mrg #include "rtl-iter.h" 69 1.1 mrg #include "toplev.h" 70 1.1 mrg 71 1.1 mrg /* This file should be included last. */ 72 1.1 mrg #include "target-def.h" 73 1.1 mrg 74 1.1 mrg enum reg_class regno_reg_class[] = 75 1.1 mrg { 76 1.1 mrg DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS, 77 1.1 mrg DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS, 78 1.1 mrg ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, 79 1.1 mrg ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, 80 1.1 mrg FP_REGS, FP_REGS, FP_REGS, FP_REGS, 81 1.1 mrg FP_REGS, FP_REGS, FP_REGS, FP_REGS, 82 1.1 mrg ADDR_REGS 83 1.1 mrg }; 84 1.1 mrg 85 1.1 mrg 86 1.1 mrg /* The minimum number of integer registers that we want to save with the 87 1.1 mrg movem instruction. Using two movel instructions instead of a single 88 1.1 mrg moveml is about 15% faster for the 68020 and 68030 at no expense in 89 1.1 mrg code size. */ 90 1.1 mrg #define MIN_MOVEM_REGS 3 91 1.1 mrg 92 1.1 mrg /* The minimum number of floating point registers that we want to save 93 1.1 mrg with the fmovem instruction. */ 94 1.1 mrg #define MIN_FMOVEM_REGS 1 95 1.1 mrg 96 1.1 mrg /* Structure describing stack frame layout. */ 97 1.1 mrg struct m68k_frame 98 1.1 mrg { 99 1.1 mrg /* Stack pointer to frame pointer offset. */ 100 1.1 mrg HOST_WIDE_INT offset; 101 1.1 mrg 102 1.1 mrg /* Offset of FPU registers. */ 103 1.1 mrg HOST_WIDE_INT foffset; 104 1.1 mrg 105 1.1 mrg /* Frame size in bytes (rounded up). */ 106 1.1 mrg HOST_WIDE_INT size; 107 1.1 mrg 108 1.1 mrg /* Data and address register. */ 109 1.1 mrg int reg_no; 110 1.1 mrg unsigned int reg_mask; 111 1.1 mrg 112 1.1 mrg /* FPU registers. */ 113 1.1 mrg int fpu_no; 114 1.1 mrg unsigned int fpu_mask; 115 1.1 mrg 116 1.1 mrg /* Offsets relative to ARG_POINTER. */ 117 1.1 mrg HOST_WIDE_INT frame_pointer_offset; 118 1.1 mrg HOST_WIDE_INT stack_pointer_offset; 119 1.1 mrg 120 1.1 mrg /* Function which the above information refers to. */ 121 1.1 mrg int funcdef_no; 122 1.1 mrg }; 123 1.1 mrg 124 1.1 mrg /* Current frame information calculated by m68k_compute_frame_layout(). */ 125 1.1 mrg static struct m68k_frame current_frame; 126 1.1 mrg 127 1.1 mrg /* Structure describing an m68k address. 128 1.1 mrg 129 1.1 mrg If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET, 130 1.1 mrg with null fields evaluating to 0. Here: 131 1.1 mrg 132 1.1 mrg - BASE satisfies m68k_legitimate_base_reg_p 133 1.1 mrg - INDEX satisfies m68k_legitimate_index_reg_p 134 1.1 mrg - OFFSET satisfies m68k_legitimate_constant_address_p 135 1.1 mrg 136 1.1 mrg INDEX is either HImode or SImode. The other fields are SImode. 137 1.1 mrg 138 1.1 mrg If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC, 139 1.1 mrg the address is (BASE)+. */ 140 1.1 mrg struct m68k_address { 141 1.1 mrg enum rtx_code code; 142 1.1 mrg rtx base; 143 1.1 mrg rtx index; 144 1.1 mrg rtx offset; 145 1.1 mrg int scale; 146 1.1 mrg }; 147 1.1 mrg 148 1.1 mrg static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int, 149 1.1 mrg unsigned int); 150 1.1 mrg static int m68k_sched_issue_rate (void); 151 1.1 mrg static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int); 152 1.1 mrg static void m68k_sched_md_init_global (FILE *, int, int); 153 1.1 mrg static void m68k_sched_md_finish_global (FILE *, int); 154 1.1 mrg static void m68k_sched_md_init (FILE *, int, int); 155 1.1 mrg static void m68k_sched_dfa_pre_advance_cycle (void); 156 1.1 mrg static void m68k_sched_dfa_post_advance_cycle (void); 157 1.1 mrg static int m68k_sched_first_cycle_multipass_dfa_lookahead (void); 158 1.1 mrg 159 1.1 mrg static bool m68k_can_eliminate (const int, const int); 160 1.1 mrg static void m68k_conditional_register_usage (void); 161 1.1 mrg static bool m68k_legitimate_address_p (machine_mode, rtx, bool); 162 1.1 mrg static void m68k_option_override (void); 163 1.1 mrg static void m68k_override_options_after_change (void); 164 1.2 mrg static void m68k_init_builtins (void); 165 1.1 mrg static rtx find_addr_reg (rtx); 166 1.1 mrg static const char *singlemove_string (rtx *); 167 1.1 mrg static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, 168 1.1 mrg HOST_WIDE_INT, tree); 169 1.1 mrg static rtx m68k_struct_value_rtx (tree, int); 170 1.1 mrg static tree m68k_handle_fndecl_attribute (tree *node, tree name, 171 1.1 mrg tree args, int flags, 172 1.1 mrg bool *no_add_attrs); 173 1.1 mrg static void m68k_compute_frame_layout (void); 174 1.1 mrg static bool m68k_save_reg (unsigned int regno, bool interrupt_handler); 175 1.1 mrg static bool m68k_ok_for_sibcall_p (tree, tree); 176 1.1 mrg static bool m68k_tls_symbol_p (rtx); 177 1.1 mrg static rtx m68k_legitimize_address (rtx, rtx, machine_mode); 178 1.1 mrg static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool); 179 1.1 mrg #if M68K_HONOR_TARGET_STRICT_ALIGNMENT 180 1.1 mrg static bool m68k_return_in_memory (const_tree, const_tree); 181 1.1 mrg #endif 182 1.1 mrg static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED; 183 1.1 mrg static void m68k_trampoline_init (rtx, tree, rtx); 184 1.1 mrg static poly_int64 m68k_return_pops_args (tree, tree, poly_int64); 185 1.1 mrg static rtx m68k_delegitimize_address (rtx); 186 1.1 mrg static void m68k_function_arg_advance (cumulative_args_t, 187 1.1 mrg const function_arg_info &); 188 1.1 mrg static rtx m68k_function_arg (cumulative_args_t, const function_arg_info &); 189 1.1 mrg static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x); 190 1.1 mrg static bool m68k_output_addr_const_extra (FILE *, rtx); 191 1.1 mrg static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED; 192 1.1 mrg static enum flt_eval_method 193 1.1 mrg m68k_excess_precision (enum excess_precision_type); 194 1.1 mrg static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode); 195 1.1 mrg static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode); 196 1.1 mrg static bool m68k_modes_tieable_p (machine_mode, machine_mode); 197 1.1 mrg static machine_mode m68k_promote_function_mode (const_tree, machine_mode, 198 1.1 mrg int *, const_tree, int); 199 1.1 mrg static void m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int); 200 1.1 mrg 201 1.1 mrg /* Initialize the GCC target structure. */ 203 1.1 mrg 204 1.1 mrg #if INT_OP_GROUP == INT_OP_DOT_WORD 205 1.1 mrg #undef TARGET_ASM_ALIGNED_HI_OP 206 1.1 mrg #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" 207 1.1 mrg #endif 208 1.1 mrg 209 1.1 mrg #if INT_OP_GROUP == INT_OP_NO_DOT 210 1.1 mrg #undef TARGET_ASM_BYTE_OP 211 1.1 mrg #define TARGET_ASM_BYTE_OP "\tbyte\t" 212 1.1 mrg #undef TARGET_ASM_ALIGNED_HI_OP 213 1.1 mrg #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t" 214 1.1 mrg #undef TARGET_ASM_ALIGNED_SI_OP 215 1.1 mrg #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t" 216 1.1 mrg #endif 217 1.1 mrg 218 1.1 mrg #if INT_OP_GROUP == INT_OP_DC 219 1.1 mrg #undef TARGET_ASM_BYTE_OP 220 1.1 mrg #define TARGET_ASM_BYTE_OP "\tdc.b\t" 221 1.1 mrg #undef TARGET_ASM_ALIGNED_HI_OP 222 1.1 mrg #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t" 223 1.1 mrg #undef TARGET_ASM_ALIGNED_SI_OP 224 1.1 mrg #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t" 225 1.1 mrg #endif 226 1.1 mrg 227 1.1 mrg #undef TARGET_ASM_UNALIGNED_HI_OP 228 1.1 mrg #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP 229 1.1 mrg #undef TARGET_ASM_UNALIGNED_SI_OP 230 1.1 mrg #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP 231 1.1 mrg 232 1.1 mrg #undef TARGET_ASM_OUTPUT_MI_THUNK 233 1.1 mrg #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk 234 1.1 mrg #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK 235 1.1 mrg #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true 236 1.1 mrg 237 1.1 mrg #undef TARGET_ASM_FILE_START_APP_OFF 238 1.1 mrg #define TARGET_ASM_FILE_START_APP_OFF true 239 1.1 mrg 240 1.1 mrg #undef TARGET_LEGITIMIZE_ADDRESS 241 1.1 mrg #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address 242 1.1 mrg 243 1.1 mrg #undef TARGET_SCHED_ADJUST_COST 244 1.1 mrg #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost 245 1.1 mrg 246 1.1 mrg #undef TARGET_SCHED_ISSUE_RATE 247 1.1 mrg #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate 248 1.1 mrg 249 1.1 mrg #undef TARGET_SCHED_VARIABLE_ISSUE 250 1.1 mrg #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue 251 1.1 mrg 252 1.1 mrg #undef TARGET_SCHED_INIT_GLOBAL 253 1.1 mrg #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global 254 1.1 mrg 255 1.1 mrg #undef TARGET_SCHED_FINISH_GLOBAL 256 1.1 mrg #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global 257 1.1 mrg 258 1.1 mrg #undef TARGET_SCHED_INIT 259 1.1 mrg #define TARGET_SCHED_INIT m68k_sched_md_init 260 1.1 mrg 261 1.1 mrg #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE 262 1.1 mrg #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle 263 1.1 mrg 264 1.1 mrg #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE 265 1.1 mrg #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle 266 1.1 mrg 267 1.1 mrg #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD 268 1.1 mrg #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ 269 1.1 mrg m68k_sched_first_cycle_multipass_dfa_lookahead 270 1.1 mrg 271 1.1 mrg #undef TARGET_OPTION_OVERRIDE 272 1.1 mrg #define TARGET_OPTION_OVERRIDE m68k_option_override 273 1.2 mrg 274 1.2 mrg #undef TARGET_INIT_BUILTINS 275 1.2 mrg #define TARGET_INIT_BUILTINS m68k_init_builtins 276 1.1 mrg 277 1.1 mrg #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE 278 1.1 mrg #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change 279 1.1 mrg 280 1.1 mrg #undef TARGET_RTX_COSTS 281 1.1 mrg #define TARGET_RTX_COSTS m68k_rtx_costs 282 1.1 mrg 283 1.1 mrg #undef TARGET_ATTRIBUTE_TABLE 284 1.1 mrg #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table 285 1.1 mrg 286 1.1 mrg #undef TARGET_PROMOTE_PROTOTYPES 287 1.1 mrg #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true 288 1.1 mrg 289 1.1 mrg #undef TARGET_STRUCT_VALUE_RTX 290 1.1 mrg #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx 291 1.1 mrg 292 1.1 mrg #undef TARGET_CANNOT_FORCE_CONST_MEM 293 1.1 mrg #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem 294 1.1 mrg 295 1.1 mrg #undef TARGET_FUNCTION_OK_FOR_SIBCALL 296 1.1 mrg #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p 297 1.1 mrg 298 1.1 mrg #if M68K_HONOR_TARGET_STRICT_ALIGNMENT 299 1.1 mrg #undef TARGET_RETURN_IN_MEMORY 300 1.1 mrg #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory 301 1.1 mrg #endif 302 1.1 mrg 303 1.1 mrg #ifdef HAVE_AS_TLS 304 1.1 mrg #undef TARGET_HAVE_TLS 305 1.1 mrg #define TARGET_HAVE_TLS (true) 306 1.1 mrg 307 1.1 mrg #undef TARGET_ASM_OUTPUT_DWARF_DTPREL 308 1.1 mrg #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel 309 1.1 mrg #endif 310 1.1 mrg 311 1.1 mrg #undef TARGET_LRA_P 312 1.1 mrg #define TARGET_LRA_P hook_bool_void_false 313 1.1 mrg 314 1.1 mrg #undef TARGET_LEGITIMATE_ADDRESS_P 315 1.1 mrg #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p 316 1.1 mrg 317 1.1 mrg #undef TARGET_CAN_ELIMINATE 318 1.1 mrg #define TARGET_CAN_ELIMINATE m68k_can_eliminate 319 1.1 mrg 320 1.1 mrg #undef TARGET_CONDITIONAL_REGISTER_USAGE 321 1.1 mrg #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage 322 1.1 mrg 323 1.1 mrg #undef TARGET_TRAMPOLINE_INIT 324 1.1 mrg #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init 325 1.1 mrg 326 1.1 mrg #undef TARGET_RETURN_POPS_ARGS 327 1.1 mrg #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args 328 1.1 mrg 329 1.1 mrg #undef TARGET_DELEGITIMIZE_ADDRESS 330 1.1 mrg #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address 331 1.1 mrg 332 1.1 mrg #undef TARGET_FUNCTION_ARG 333 1.1 mrg #define TARGET_FUNCTION_ARG m68k_function_arg 334 1.1 mrg 335 1.1 mrg #undef TARGET_FUNCTION_ARG_ADVANCE 336 1.1 mrg #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance 337 1.1 mrg 338 1.1 mrg #undef TARGET_LEGITIMATE_CONSTANT_P 339 1.1 mrg #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p 340 1.1 mrg 341 1.1 mrg #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA 342 1.1 mrg #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra 343 1.1 mrg 344 1.1 mrg #undef TARGET_C_EXCESS_PRECISION 345 1.1 mrg #define TARGET_C_EXCESS_PRECISION m68k_excess_precision 346 1.1 mrg 347 1.1 mrg /* The value stored by TAS. */ 348 1.1 mrg #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 349 1.1 mrg #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128 350 1.1 mrg 351 1.1 mrg #undef TARGET_HARD_REGNO_NREGS 352 1.1 mrg #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs 353 1.1 mrg #undef TARGET_HARD_REGNO_MODE_OK 354 1.1 mrg #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok 355 1.1 mrg 356 1.1 mrg #undef TARGET_MODES_TIEABLE_P 357 1.1 mrg #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p 358 1.1 mrg 359 1.1 mrg #undef TARGET_PROMOTE_FUNCTION_MODE 360 1.1 mrg #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode 361 1.1 mrg 362 1.1 mrg #undef TARGET_HAVE_SPECULATION_SAFE_VALUE 363 1.1 mrg #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed 364 1.1 mrg 365 1.1 mrg #undef TARGET_ASM_FINAL_POSTSCAN_INSN 366 1.1 mrg #define TARGET_ASM_FINAL_POSTSCAN_INSN m68k_asm_final_postscan_insn 367 1.1 mrg 368 1.1 mrg static const struct attribute_spec m68k_attribute_table[] = 369 1.1 mrg { 370 1.1 mrg /* { name, min_len, max_len, decl_req, type_req, fn_type_req, 371 1.1 mrg affects_type_identity, handler, exclude } */ 372 1.1 mrg { "interrupt", 0, 0, true, false, false, false, 373 1.1 mrg m68k_handle_fndecl_attribute, NULL }, 374 1.1 mrg { "interrupt_handler", 0, 0, true, false, false, false, 375 1.1 mrg m68k_handle_fndecl_attribute, NULL }, 376 1.1 mrg { "interrupt_thread", 0, 0, true, false, false, false, 377 1.1 mrg m68k_handle_fndecl_attribute, NULL }, 378 1.1 mrg { NULL, 0, 0, false, false, false, false, NULL, NULL } 379 1.1 mrg }; 380 1.1 mrg 381 1.1 mrg struct gcc_target targetm = TARGET_INITIALIZER; 382 1.1 mrg 383 1.1 mrg /* Base flags for 68k ISAs. */ 385 1.1 mrg #define FL_FOR_isa_00 FL_ISA_68000 386 1.1 mrg #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010) 387 1.1 mrg /* FL_68881 controls the default setting of -m68881. gcc has traditionally 388 1.1 mrg generated 68881 code for 68020 and 68030 targets unless explicitly told 389 1.1 mrg not to. */ 390 1.1 mrg #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \ 391 1.1 mrg | FL_BITFIELD | FL_68881 | FL_CAS) 392 1.1 mrg #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040) 393 1.1 mrg #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020) 394 1.1 mrg 395 1.1 mrg /* Base flags for ColdFire ISAs. */ 396 1.1 mrg #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A) 397 1.1 mrg #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP) 398 1.1 mrg /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */ 399 1.1 mrg #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV) 400 1.1 mrg /* ISA_C is not upwardly compatible with ISA_B. */ 401 1.1 mrg #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP) 402 1.1 mrg 403 1.1 mrg enum m68k_isa 404 1.1 mrg { 405 1.1 mrg /* Traditional 68000 instruction sets. */ 406 1.1 mrg isa_00, 407 1.1 mrg isa_10, 408 1.1 mrg isa_20, 409 1.1 mrg isa_40, 410 1.1 mrg isa_cpu32, 411 1.1 mrg /* ColdFire instruction set variants. */ 412 1.1 mrg isa_a, 413 1.1 mrg isa_aplus, 414 1.1 mrg isa_b, 415 1.1 mrg isa_c, 416 1.1 mrg isa_max 417 1.1 mrg }; 418 1.1 mrg 419 1.1 mrg /* Information about one of the -march, -mcpu or -mtune arguments. */ 420 1.1 mrg struct m68k_target_selection 421 1.1 mrg { 422 1.1 mrg /* The argument being described. */ 423 1.1 mrg const char *name; 424 1.1 mrg 425 1.1 mrg /* For -mcpu, this is the device selected by the option. 426 1.1 mrg For -mtune and -march, it is a representative device 427 1.1 mrg for the microarchitecture or ISA respectively. */ 428 1.1 mrg enum target_device device; 429 1.1 mrg 430 1.1 mrg /* The M68K_DEVICE fields associated with DEVICE. See the comment 431 1.1 mrg in m68k-devices.def for details. FAMILY is only valid for -mcpu. */ 432 1.1 mrg const char *family; 433 1.1 mrg enum uarch_type microarch; 434 1.1 mrg enum m68k_isa isa; 435 1.1 mrg unsigned long flags; 436 1.1 mrg }; 437 1.1 mrg 438 1.1 mrg /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */ 439 1.1 mrg static const struct m68k_target_selection all_devices[] = 440 1.1 mrg { 441 1.1 mrg #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \ 442 1.1 mrg { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA }, 443 1.1 mrg #include "m68k-devices.def" 444 1.1 mrg #undef M68K_DEVICE 445 1.1 mrg { NULL, unk_device, NULL, unk_arch, isa_max, 0 } 446 1.1 mrg }; 447 1.1 mrg 448 1.1 mrg /* A list of all ISAs, mapping each one to a representative device. 449 1.1 mrg Used for -march selection. */ 450 1.1 mrg static const struct m68k_target_selection all_isas[] = 451 1.1 mrg { 452 1.1 mrg #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \ 453 1.1 mrg { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS }, 454 1.1 mrg #include "m68k-isas.def" 455 1.1 mrg #undef M68K_ISA 456 1.1 mrg { NULL, unk_device, NULL, unk_arch, isa_max, 0 } 457 1.1 mrg }; 458 1.1 mrg 459 1.1 mrg /* A list of all microarchitectures, mapping each one to a representative 460 1.1 mrg device. Used for -mtune selection. */ 461 1.1 mrg static const struct m68k_target_selection all_microarchs[] = 462 1.1 mrg { 463 1.1 mrg #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \ 464 1.1 mrg { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS }, 465 1.1 mrg #include "m68k-microarchs.def" 466 1.1 mrg #undef M68K_MICROARCH 467 1.1 mrg { NULL, unk_device, NULL, unk_arch, isa_max, 0 } 468 1.1 mrg }; 469 1.1 mrg 470 1.1 mrg /* The entries associated with the -mcpu, -march and -mtune settings, 472 1.1 mrg or null for options that have not been used. */ 473 1.1 mrg const struct m68k_target_selection *m68k_cpu_entry; 474 1.1 mrg const struct m68k_target_selection *m68k_arch_entry; 475 1.1 mrg const struct m68k_target_selection *m68k_tune_entry; 476 1.1 mrg 477 1.1 mrg /* Which CPU we are generating code for. */ 478 1.1 mrg enum target_device m68k_cpu; 479 1.1 mrg 480 1.1 mrg /* Which microarchitecture to tune for. */ 481 1.1 mrg enum uarch_type m68k_tune; 482 1.1 mrg 483 1.1 mrg /* Which FPU to use. */ 484 1.1 mrg enum fpu_type m68k_fpu; 485 1.1 mrg 486 1.1 mrg /* The set of FL_* flags that apply to the target processor. */ 487 1.1 mrg unsigned int m68k_cpu_flags; 488 1.1 mrg 489 1.1 mrg /* The set of FL_* flags that apply to the processor to be tuned for. */ 490 1.1 mrg unsigned int m68k_tune_flags; 491 1.1 mrg 492 1.1 mrg /* Asm templates for calling or jumping to an arbitrary symbolic address, 493 1.1 mrg or NULL if such calls or jumps are not supported. The address is held 494 1.1 mrg in operand 0. */ 495 1.1 mrg const char *m68k_symbolic_call; 496 1.1 mrg const char *m68k_symbolic_jump; 497 1.1 mrg 498 1.1 mrg /* Enum variable that corresponds to m68k_symbolic_call values. */ 499 1.1 mrg enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var; 500 1.1 mrg 501 1.1 mrg 502 1.1 mrg /* Implement TARGET_OPTION_OVERRIDE. */ 504 1.1 mrg 505 1.1 mrg static void 506 1.1 mrg m68k_option_override (void) 507 1.1 mrg { 508 1.1 mrg const struct m68k_target_selection *entry; 509 1.1 mrg unsigned long target_mask; 510 1.1 mrg 511 1.1 mrg if (OPTION_SET_P (m68k_arch_option)) 512 1.1 mrg m68k_arch_entry = &all_isas[m68k_arch_option]; 513 1.1 mrg 514 1.1 mrg if (OPTION_SET_P (m68k_cpu_option)) 515 1.1 mrg m68k_cpu_entry = &all_devices[(int) m68k_cpu_option]; 516 1.1 mrg 517 1.1 mrg if (OPTION_SET_P (m68k_tune_option)) 518 1.1 mrg m68k_tune_entry = &all_microarchs[(int) m68k_tune_option]; 519 1.1 mrg 520 1.1 mrg /* User can choose: 521 1.1 mrg 522 1.1 mrg -mcpu= 523 1.1 mrg -march= 524 1.1 mrg -mtune= 525 1.1 mrg 526 1.1 mrg -march=ARCH should generate code that runs any processor 527 1.1 mrg implementing architecture ARCH. -mcpu=CPU should override -march 528 1.1 mrg and should generate code that runs on processor CPU, making free 529 1.1 mrg use of any instructions that CPU understands. -mtune=UARCH applies 530 1.1 mrg on top of -mcpu or -march and optimizes the code for UARCH. It does 531 1.1 mrg not change the target architecture. */ 532 1.1 mrg if (m68k_cpu_entry) 533 1.1 mrg { 534 1.1 mrg /* Complain if the -march setting is for a different microarchitecture, 535 1.1 mrg or includes flags that the -mcpu setting doesn't. */ 536 1.1 mrg if (m68k_arch_entry 537 1.1 mrg && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch 538 1.1 mrg || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0)) 539 1.1 mrg warning (0, "%<-mcpu=%s%> conflicts with %<-march=%s%>", 540 1.1 mrg m68k_cpu_entry->name, m68k_arch_entry->name); 541 1.1 mrg 542 1.1 mrg entry = m68k_cpu_entry; 543 1.1 mrg } 544 1.1 mrg else 545 1.1 mrg entry = m68k_arch_entry; 546 1.1 mrg 547 1.1 mrg if (!entry) 548 1.1 mrg entry = all_devices + TARGET_CPU_DEFAULT; 549 1.1 mrg 550 1.1 mrg m68k_cpu_flags = entry->flags; 551 1.1 mrg 552 1.1 mrg /* Use the architecture setting to derive default values for 553 1.1 mrg certain flags. */ 554 1.1 mrg target_mask = 0; 555 1.1 mrg 556 1.1 mrg /* ColdFire is lenient about alignment. */ 557 1.1 mrg if (!TARGET_COLDFIRE) 558 1.1 mrg target_mask |= MASK_STRICT_ALIGNMENT; 559 1.1 mrg 560 1.1 mrg if ((m68k_cpu_flags & FL_BITFIELD) != 0) 561 1.1 mrg target_mask |= MASK_BITFIELD; 562 1.1 mrg if ((m68k_cpu_flags & FL_CF_HWDIV) != 0) 563 1.1 mrg target_mask |= MASK_CF_HWDIV; 564 1.1 mrg if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0) 565 1.1 mrg target_mask |= MASK_HARD_FLOAT; 566 1.1 mrg target_flags |= target_mask & ~target_flags_explicit; 567 1.1 mrg 568 1.1 mrg /* Set the directly-usable versions of the -mcpu and -mtune settings. */ 569 1.1 mrg m68k_cpu = entry->device; 570 1.1 mrg if (m68k_tune_entry) 571 1.1 mrg { 572 1.1 mrg m68k_tune = m68k_tune_entry->microarch; 573 1.1 mrg m68k_tune_flags = m68k_tune_entry->flags; 574 1.1 mrg } 575 1.1 mrg #ifdef M68K_DEFAULT_TUNE 576 1.1 mrg else if (!m68k_cpu_entry && !m68k_arch_entry) 577 1.1 mrg { 578 1.1 mrg enum target_device dev; 579 1.1 mrg dev = all_microarchs[M68K_DEFAULT_TUNE].device; 580 1.1 mrg m68k_tune_flags = all_devices[dev].flags; 581 1.1 mrg } 582 1.1 mrg #endif 583 1.1 mrg else 584 1.1 mrg { 585 1.1 mrg m68k_tune = entry->microarch; 586 1.1 mrg m68k_tune_flags = entry->flags; 587 1.1 mrg } 588 1.1 mrg 589 1.1 mrg /* Set the type of FPU. */ 590 1.1 mrg m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE 591 1.1 mrg : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE 592 1.1 mrg : FPUTYPE_68881); 593 1.1 mrg 594 1.1 mrg /* Sanity check to ensure that msep-data and mid-sahred-library are not 595 1.1 mrg * both specified together. Doing so simply doesn't make sense. 596 1.1 mrg */ 597 1.1 mrg if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY) 598 1.1 mrg error ("cannot specify both %<-msep-data%> and %<-mid-shared-library%>"); 599 1.1 mrg 600 1.1 mrg /* If we're generating code for a separate A5 relative data segment, 601 1.1 mrg * we've got to enable -fPIC as well. This might be relaxable to 602 1.1 mrg * -fpic but it hasn't been tested properly. 603 1.1 mrg */ 604 1.1 mrg if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY) 605 1.1 mrg flag_pic = 2; 606 1.1 mrg 607 1.1 mrg /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an 608 1.1 mrg error if the target does not support them. */ 609 1.1 mrg if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2) 610 1.1 mrg error ("%<-mpcrel%> %<-fPIC%> is not currently supported on selected cpu"); 611 1.1 mrg 612 1.1 mrg /* ??? A historic way of turning on pic, or is this intended to 613 1.1 mrg be an embedded thing that doesn't have the same name binding 614 1.1 mrg significance that it does on hosted ELF systems? */ 615 1.1 mrg if (TARGET_PCREL && flag_pic == 0) 616 1.1 mrg flag_pic = 1; 617 1.1 mrg 618 1.1 mrg if (!flag_pic) 619 1.1 mrg { 620 1.1 mrg m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR; 621 1.1 mrg 622 1.1 mrg m68k_symbolic_jump = "jra %a0"; 623 1.1 mrg } 624 1.1 mrg else if (TARGET_ID_SHARED_LIBRARY) 625 1.1 mrg /* All addresses must be loaded from the GOT. */ 626 1.1 mrg ; 627 1.1 mrg else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC) 628 1.1 mrg { 629 1.1 mrg if (TARGET_PCREL) 630 1.1 mrg m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C; 631 1.1 mrg else 632 1.1 mrg m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P; 633 1.1 mrg 634 1.1 mrg if (TARGET_ISAC) 635 1.1 mrg /* No unconditional long branch */; 636 1.1 mrg else if (TARGET_PCREL) 637 1.1 mrg m68k_symbolic_jump = "bra%.l %c0"; 638 1.1 mrg else 639 1.1 mrg m68k_symbolic_jump = "bra%.l %p0"; 640 1.1 mrg /* Turn off function cse if we are doing PIC. We always want 641 1.1 mrg function call to be done as `bsr foo@PLTPC'. */ 642 1.1 mrg /* ??? It's traditional to do this for -mpcrel too, but it isn't 643 1.1 mrg clear how intentional that is. */ 644 1.1 mrg flag_no_function_cse = 1; 645 1.1 mrg } 646 1.1 mrg 647 1.1 mrg switch (m68k_symbolic_call_var) 648 1.1 mrg { 649 1.1 mrg case M68K_SYMBOLIC_CALL_JSR: 650 1.1 mrg m68k_symbolic_call = "jsr %a0"; 651 1.1 mrg break; 652 1.1 mrg 653 1.1 mrg case M68K_SYMBOLIC_CALL_BSR_C: 654 1.1 mrg m68k_symbolic_call = "bsr%.l %c0"; 655 1.1 mrg break; 656 1.1 mrg 657 1.1 mrg case M68K_SYMBOLIC_CALL_BSR_P: 658 1.1 mrg m68k_symbolic_call = "bsr%.l %p0"; 659 1.1 mrg break; 660 1.1 mrg 661 1.1 mrg case M68K_SYMBOLIC_CALL_NONE: 662 1.1 mrg gcc_assert (m68k_symbolic_call == NULL); 663 1.1 mrg break; 664 1.1 mrg 665 1.1 mrg default: 666 1.1 mrg gcc_unreachable (); 667 1.1 mrg } 668 1.1 mrg 669 1.1 mrg #ifndef ASM_OUTPUT_ALIGN_WITH_NOP 670 1.1 mrg parse_alignment_opts (); 671 1.1 mrg int label_alignment = align_labels.levels[0].get_value (); 672 1.1 mrg if (label_alignment > 2) 673 1.1 mrg { 674 1.1 mrg warning (0, "%<-falign-labels=%d%> is not supported", label_alignment); 675 1.1 mrg str_align_labels = "1"; 676 1.1 mrg } 677 1.1 mrg 678 1.1 mrg int loop_alignment = align_loops.levels[0].get_value (); 679 1.1 mrg if (loop_alignment > 2) 680 1.1 mrg { 681 1.1 mrg warning (0, "%<-falign-loops=%d%> is not supported", loop_alignment); 682 1.1 mrg str_align_loops = "1"; 683 1.1 mrg } 684 1.1 mrg #endif 685 1.1 mrg 686 1.1 mrg if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0) 687 1.1 mrg && !TARGET_68020) 688 1.1 mrg { 689 1.1 mrg warning (0, "%<-fstack-limit-%> options are not supported on this cpu"); 690 1.1 mrg opt_fstack_limit_symbol_arg = NULL; 691 1.1 mrg opt_fstack_limit_register_no = -1; 692 1.1 mrg } 693 1.1 mrg 694 1.1 mrg SUBTARGET_OVERRIDE_OPTIONS; 695 1.1 mrg 696 1.1 mrg /* Setup scheduling options. */ 697 1.1 mrg if (TUNE_CFV1) 698 1.1 mrg m68k_sched_cpu = CPU_CFV1; 699 1.1 mrg else if (TUNE_CFV2) 700 1.1 mrg m68k_sched_cpu = CPU_CFV2; 701 1.1 mrg else if (TUNE_CFV3) 702 1.1 mrg m68k_sched_cpu = CPU_CFV3; 703 1.1 mrg else if (TUNE_CFV4) 704 1.1 mrg m68k_sched_cpu = CPU_CFV4; 705 1.1 mrg else 706 1.1 mrg { 707 1.1 mrg m68k_sched_cpu = CPU_UNKNOWN; 708 1.1 mrg flag_schedule_insns = 0; 709 1.1 mrg flag_schedule_insns_after_reload = 0; 710 1.1 mrg flag_modulo_sched = 0; 711 1.1 mrg flag_live_range_shrinkage = 0; 712 1.1 mrg } 713 1.1 mrg 714 1.1 mrg if (m68k_sched_cpu != CPU_UNKNOWN) 715 1.1 mrg { 716 1.1 mrg if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0) 717 1.1 mrg m68k_sched_mac = MAC_CF_EMAC; 718 1.1 mrg else if ((m68k_cpu_flags & FL_CF_MAC) != 0) 719 1.1 mrg m68k_sched_mac = MAC_CF_MAC; 720 1.1 mrg else 721 1.2 mrg m68k_sched_mac = MAC_NO; 722 1.2 mrg } 723 1.2 mrg } 724 1.2 mrg 725 1.2 mrg /* Implement TARGET_INIT_BUILTINS. */ 726 1.2 mrg 727 1.2 mrg static void 728 1.2 mrg m68k_init_builtins (void) 729 1.2 mrg { 730 1.2 mrg #ifdef SUBTARGET_INIT_BUILTINS 731 1.1 mrg SUBTARGET_INIT_BUILTINS; 732 1.1 mrg #endif 733 1.1 mrg } 734 1.1 mrg 735 1.1 mrg /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */ 736 1.1 mrg 737 1.1 mrg static void 738 1.1 mrg m68k_override_options_after_change (void) 739 1.1 mrg { 740 1.1 mrg if (m68k_sched_cpu == CPU_UNKNOWN) 741 1.1 mrg { 742 1.1 mrg flag_schedule_insns = 0; 743 1.1 mrg flag_schedule_insns_after_reload = 0; 744 1.1 mrg flag_modulo_sched = 0; 745 1.1 mrg flag_live_range_shrinkage = 0; 746 1.1 mrg } 747 1.1 mrg } 748 1.1 mrg 749 1.1 mrg /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the 750 1.1 mrg given argument and NAME is the argument passed to -mcpu. Return NULL 751 1.1 mrg if -mcpu was not passed. */ 752 1.1 mrg 753 1.1 mrg const char * 754 1.1 mrg m68k_cpp_cpu_ident (const char *prefix) 755 1.1 mrg { 756 1.1 mrg if (!m68k_cpu_entry) 757 1.1 mrg return NULL; 758 1.1 mrg return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL); 759 1.1 mrg } 760 1.1 mrg 761 1.1 mrg /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the 762 1.1 mrg given argument and NAME is the name of the representative device for 763 1.1 mrg the -mcpu argument's family. Return NULL if -mcpu was not passed. */ 764 1.1 mrg 765 1.1 mrg const char * 766 1.1 mrg m68k_cpp_cpu_family (const char *prefix) 767 1.1 mrg { 768 1.1 mrg if (!m68k_cpu_entry) 769 1.1 mrg return NULL; 770 1.1 mrg return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL); 771 1.1 mrg } 772 1.1 mrg 773 1.1 mrg /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or 775 1.1 mrg "interrupt_handler" attribute and interrupt_thread if FUNC has an 776 1.1 mrg "interrupt_thread" attribute. Otherwise, return 777 1.1 mrg m68k_fk_normal_function. */ 778 1.1 mrg 779 1.1 mrg enum m68k_function_kind 780 1.1 mrg m68k_get_function_kind (tree func) 781 1.1 mrg { 782 1.1 mrg tree a; 783 1.1 mrg 784 1.1 mrg gcc_assert (TREE_CODE (func) == FUNCTION_DECL); 785 1.1 mrg 786 1.1 mrg a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func)); 787 1.1 mrg if (a != NULL_TREE) 788 1.1 mrg return m68k_fk_interrupt_handler; 789 1.1 mrg 790 1.1 mrg a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func)); 791 1.1 mrg if (a != NULL_TREE) 792 1.1 mrg return m68k_fk_interrupt_handler; 793 1.1 mrg 794 1.1 mrg a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func)); 795 1.1 mrg if (a != NULL_TREE) 796 1.1 mrg return m68k_fk_interrupt_thread; 797 1.1 mrg 798 1.1 mrg return m68k_fk_normal_function; 799 1.1 mrg } 800 1.1 mrg 801 1.1 mrg /* Handle an attribute requiring a FUNCTION_DECL; arguments as in 802 1.1 mrg struct attribute_spec.handler. */ 803 1.1 mrg static tree 804 1.1 mrg m68k_handle_fndecl_attribute (tree *node, tree name, 805 1.1 mrg tree args ATTRIBUTE_UNUSED, 806 1.1 mrg int flags ATTRIBUTE_UNUSED, 807 1.1 mrg bool *no_add_attrs) 808 1.1 mrg { 809 1.1 mrg if (TREE_CODE (*node) != FUNCTION_DECL) 810 1.1 mrg { 811 1.1 mrg warning (OPT_Wattributes, "%qE attribute only applies to functions", 812 1.1 mrg name); 813 1.1 mrg *no_add_attrs = true; 814 1.1 mrg } 815 1.1 mrg 816 1.1 mrg if (m68k_get_function_kind (*node) != m68k_fk_normal_function) 817 1.1 mrg { 818 1.1 mrg error ("multiple interrupt attributes not allowed"); 819 1.1 mrg *no_add_attrs = true; 820 1.1 mrg } 821 1.1 mrg 822 1.1 mrg if (!TARGET_FIDOA 823 1.1 mrg && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread")) 824 1.1 mrg { 825 1.1 mrg error ("%<interrupt_thread%> is available only on fido"); 826 1.1 mrg *no_add_attrs = true; 827 1.1 mrg } 828 1.1 mrg 829 1.1 mrg return NULL_TREE; 830 1.1 mrg } 831 1.1 mrg 832 1.1 mrg static void 833 1.1 mrg m68k_compute_frame_layout (void) 834 1.1 mrg { 835 1.1 mrg int regno, saved; 836 1.1 mrg unsigned int mask; 837 1.1 mrg enum m68k_function_kind func_kind = 838 1.1 mrg m68k_get_function_kind (current_function_decl); 839 1.1 mrg bool interrupt_handler = func_kind == m68k_fk_interrupt_handler; 840 1.1 mrg bool interrupt_thread = func_kind == m68k_fk_interrupt_thread; 841 1.1 mrg 842 1.1 mrg /* Only compute the frame once per function. 843 1.1 mrg Don't cache information until reload has been completed. */ 844 1.1 mrg if (current_frame.funcdef_no == current_function_funcdef_no 845 1.1 mrg && reload_completed) 846 1.1 mrg return; 847 1.1 mrg 848 1.1 mrg current_frame.size = (get_frame_size () + 3) & -4; 849 1.1 mrg 850 1.1 mrg mask = saved = 0; 851 1.1 mrg 852 1.1 mrg /* Interrupt thread does not need to save any register. */ 853 1.1 mrg if (!interrupt_thread) 854 1.1 mrg for (regno = 0; regno < 16; regno++) 855 1.1 mrg if (m68k_save_reg (regno, interrupt_handler)) 856 1.1 mrg { 857 1.1 mrg mask |= 1 << (regno - D0_REG); 858 1.1 mrg saved++; 859 1.1 mrg } 860 1.1 mrg current_frame.offset = saved * 4; 861 1.1 mrg current_frame.reg_no = saved; 862 1.1 mrg current_frame.reg_mask = mask; 863 1.1 mrg 864 1.1 mrg current_frame.foffset = 0; 865 1.1 mrg mask = saved = 0; 866 1.1 mrg if (TARGET_HARD_FLOAT) 867 1.1 mrg { 868 1.1 mrg /* Interrupt thread does not need to save any register. */ 869 1.1 mrg if (!interrupt_thread) 870 1.1 mrg for (regno = 16; regno < 24; regno++) 871 1.1 mrg if (m68k_save_reg (regno, interrupt_handler)) 872 1.1 mrg { 873 1.1 mrg mask |= 1 << (regno - FP0_REG); 874 1.1 mrg saved++; 875 1.1 mrg } 876 1.1 mrg current_frame.foffset = saved * TARGET_FP_REG_SIZE; 877 1.1 mrg current_frame.offset += current_frame.foffset; 878 1.1 mrg } 879 1.1 mrg current_frame.fpu_no = saved; 880 1.1 mrg current_frame.fpu_mask = mask; 881 1.1 mrg 882 1.1 mrg /* Remember what function this frame refers to. */ 883 1.1 mrg current_frame.funcdef_no = current_function_funcdef_no; 884 1.1 mrg } 885 1.1 mrg 886 1.1 mrg /* Worker function for TARGET_CAN_ELIMINATE. */ 887 1.1 mrg 888 1.1 mrg bool 889 1.1 mrg m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) 890 1.1 mrg { 891 1.1 mrg return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true); 892 1.1 mrg } 893 1.1 mrg 894 1.1 mrg HOST_WIDE_INT 895 1.1 mrg m68k_initial_elimination_offset (int from, int to) 896 1.1 mrg { 897 1.1 mrg int argptr_offset; 898 1.1 mrg /* The arg pointer points 8 bytes before the start of the arguments, 899 1.1 mrg as defined by FIRST_PARM_OFFSET. This makes it coincident with the 900 1.1 mrg frame pointer in most frames. */ 901 1.1 mrg argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD; 902 1.1 mrg if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) 903 1.1 mrg return argptr_offset; 904 1.1 mrg 905 1.1 mrg m68k_compute_frame_layout (); 906 1.1 mrg 907 1.1 mrg gcc_assert (to == STACK_POINTER_REGNUM); 908 1.1 mrg switch (from) 909 1.1 mrg { 910 1.1 mrg case ARG_POINTER_REGNUM: 911 1.1 mrg return current_frame.offset + current_frame.size - argptr_offset; 912 1.1 mrg case FRAME_POINTER_REGNUM: 913 1.1 mrg return current_frame.offset + current_frame.size; 914 1.1 mrg default: 915 1.1 mrg gcc_unreachable (); 916 1.1 mrg } 917 1.1 mrg } 918 1.1 mrg 919 1.1 mrg /* Refer to the array `regs_ever_live' to determine which registers 920 1.1 mrg to save; `regs_ever_live[I]' is nonzero if register number I 921 1.1 mrg is ever used in the function. This function is responsible for 922 1.1 mrg knowing which registers should not be saved even if used. 923 1.1 mrg Return true if we need to save REGNO. */ 924 1.1 mrg 925 1.1 mrg static bool 926 1.1 mrg m68k_save_reg (unsigned int regno, bool interrupt_handler) 927 1.1 mrg { 928 1.1 mrg if (flag_pic && regno == PIC_REG) 929 1.1 mrg { 930 1.1 mrg if (crtl->saves_all_registers) 931 1.1 mrg return true; 932 1.1 mrg if (crtl->uses_pic_offset_table) 933 1.1 mrg return true; 934 1.1 mrg /* Reload may introduce constant pool references into a function 935 1.1 mrg that thitherto didn't need a PIC register. Note that the test 936 1.1 mrg above will not catch that case because we will only set 937 1.1 mrg crtl->uses_pic_offset_table when emitting 938 1.1 mrg the address reloads. */ 939 1.1 mrg if (crtl->uses_const_pool) 940 1.1 mrg return true; 941 1.1 mrg } 942 1.1 mrg 943 1.1 mrg if (crtl->calls_eh_return) 944 1.1 mrg { 945 1.1 mrg unsigned int i; 946 1.1 mrg for (i = 0; ; i++) 947 1.1 mrg { 948 1.1 mrg unsigned int test = EH_RETURN_DATA_REGNO (i); 949 1.1 mrg if (test == INVALID_REGNUM) 950 1.1 mrg break; 951 1.1 mrg if (test == regno) 952 1.1 mrg return true; 953 1.1 mrg } 954 1.1 mrg } 955 1.1 mrg 956 1.1 mrg /* Fixed regs we never touch. */ 957 1.1 mrg if (fixed_regs[regno]) 958 1.1 mrg return false; 959 1.1 mrg 960 1.1 mrg /* The frame pointer (if it is such) is handled specially. */ 961 1.1 mrg if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed) 962 1.1 mrg return false; 963 1.1 mrg 964 1.1 mrg /* Interrupt handlers must also save call_used_regs 965 1.1 mrg if they are live or when calling nested functions. */ 966 1.1 mrg if (interrupt_handler) 967 1.1 mrg { 968 1.1 mrg if (df_regs_ever_live_p (regno)) 969 1.1 mrg return true; 970 1.1 mrg 971 1.1 mrg if (!crtl->is_leaf && call_used_or_fixed_reg_p (regno)) 972 1.1 mrg return true; 973 1.1 mrg } 974 1.1 mrg 975 1.1 mrg /* Never need to save registers that aren't touched. */ 976 1.1 mrg if (!df_regs_ever_live_p (regno)) 977 1.1 mrg return false; 978 1.1 mrg 979 1.1 mrg /* Otherwise save everything that isn't call-clobbered. */ 980 1.1 mrg return !call_used_or_fixed_reg_p (regno); 981 1.1 mrg } 982 1.1 mrg 983 1.1 mrg /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents 984 1.1 mrg the lowest memory address. COUNT is the number of registers to be 985 1.1 mrg moved, with register REGNO + I being moved if bit I of MASK is set. 986 1.1 mrg STORE_P specifies the direction of the move and ADJUST_STACK_P says 987 1.1 mrg whether or not this is pre-decrement (if STORE_P) or post-increment 988 1.1 mrg (if !STORE_P) operation. */ 989 1.1 mrg 990 1.1 mrg static rtx_insn * 991 1.1 mrg m68k_emit_movem (rtx base, HOST_WIDE_INT offset, 992 1.1 mrg unsigned int count, unsigned int regno, 993 1.1 mrg unsigned int mask, bool store_p, bool adjust_stack_p) 994 1.1 mrg { 995 1.1 mrg int i; 996 1.1 mrg rtx body, addr, src, operands[2]; 997 1.1 mrg machine_mode mode; 998 1.1 mrg 999 1.1 mrg body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count)); 1000 1.1 mrg mode = reg_raw_mode[regno]; 1001 1.1 mrg i = 0; 1002 1.1 mrg 1003 1.1 mrg if (adjust_stack_p) 1004 1.1 mrg { 1005 1.1 mrg src = plus_constant (Pmode, base, 1006 1.1 mrg (count 1007 1.1 mrg * GET_MODE_SIZE (mode) 1008 1.1 mrg * (HOST_WIDE_INT) (store_p ? -1 : 1))); 1009 1.1 mrg XVECEXP (body, 0, i++) = gen_rtx_SET (base, src); 1010 1.1 mrg } 1011 1.1 mrg 1012 1.1 mrg for (; mask != 0; mask >>= 1, regno++) 1013 1.1 mrg if (mask & 1) 1014 1.1 mrg { 1015 1.1 mrg addr = plus_constant (Pmode, base, offset); 1016 1.1 mrg operands[!store_p] = gen_frame_mem (mode, addr); 1017 1.1 mrg operands[store_p] = gen_rtx_REG (mode, regno); 1018 1.1 mrg XVECEXP (body, 0, i++) 1019 1.1 mrg = gen_rtx_SET (operands[0], operands[1]); 1020 1.1 mrg offset += GET_MODE_SIZE (mode); 1021 1.1 mrg } 1022 1.1 mrg gcc_assert (i == XVECLEN (body, 0)); 1023 1.1 mrg 1024 1.1 mrg return emit_insn (body); 1025 1.1 mrg } 1026 1.1 mrg 1027 1.1 mrg /* Make INSN a frame-related instruction. */ 1028 1.1 mrg 1029 1.1 mrg static void 1030 1.1 mrg m68k_set_frame_related (rtx_insn *insn) 1031 1.1 mrg { 1032 1.1 mrg rtx body; 1033 1.1 mrg int i; 1034 1.1 mrg 1035 1.1 mrg RTX_FRAME_RELATED_P (insn) = 1; 1036 1.1 mrg body = PATTERN (insn); 1037 1.1 mrg if (GET_CODE (body) == PARALLEL) 1038 1.1 mrg for (i = 0; i < XVECLEN (body, 0); i++) 1039 1.1 mrg RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1; 1040 1.1 mrg } 1041 1.1 mrg 1042 1.1 mrg /* Emit RTL for the "prologue" define_expand. */ 1043 1.1 mrg 1044 1.1 mrg void 1045 1.1 mrg m68k_expand_prologue (void) 1046 1.1 mrg { 1047 1.1 mrg HOST_WIDE_INT fsize_with_regs; 1048 1.1 mrg rtx limit, src, dest; 1049 1.1 mrg 1050 1.1 mrg m68k_compute_frame_layout (); 1051 1.1 mrg 1052 1.1 mrg if (flag_stack_usage_info) 1053 1.1 mrg current_function_static_stack_size 1054 1.1 mrg = current_frame.size + current_frame.offset; 1055 1.1 mrg 1056 1.1 mrg /* If the stack limit is a symbol, we can check it here, 1057 1.1 mrg before actually allocating the space. */ 1058 1.1 mrg if (crtl->limit_stack 1059 1.1 mrg && GET_CODE (stack_limit_rtx) == SYMBOL_REF) 1060 1.1 mrg { 1061 1.1 mrg limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4); 1062 1.1 mrg if (!m68k_legitimate_constant_p (Pmode, limit)) 1063 1.1 mrg { 1064 1.1 mrg emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit); 1065 1.1 mrg limit = gen_rtx_REG (Pmode, D0_REG); 1066 1.1 mrg } 1067 1.1 mrg emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, 1068 1.1 mrg stack_pointer_rtx, limit), 1069 1.1 mrg stack_pointer_rtx, limit, 1070 1.1 mrg const1_rtx)); 1071 1.1 mrg } 1072 1.1 mrg 1073 1.1 mrg fsize_with_regs = current_frame.size; 1074 1.1 mrg if (TARGET_COLDFIRE) 1075 1.1 mrg { 1076 1.1 mrg /* ColdFire's move multiple instructions do not allow pre-decrement 1077 1.1 mrg addressing. Add the size of movem saves to the initial stack 1078 1.1 mrg allocation instead. */ 1079 1.1 mrg if (current_frame.reg_no >= MIN_MOVEM_REGS) 1080 1.1 mrg fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode); 1081 1.1 mrg if (current_frame.fpu_no >= MIN_FMOVEM_REGS) 1082 1.1 mrg fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode); 1083 1.1 mrg } 1084 1.1 mrg 1085 1.1 mrg if (frame_pointer_needed) 1086 1.1 mrg { 1087 1.1 mrg if (fsize_with_regs == 0 && TUNE_68040) 1088 1.1 mrg { 1089 1.1 mrg /* On the 68040, two separate moves are faster than link.w 0. */ 1090 1.1 mrg dest = gen_frame_mem (Pmode, 1091 1.1 mrg gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)); 1092 1.1 mrg m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx)); 1093 1.1 mrg m68k_set_frame_related (emit_move_insn (frame_pointer_rtx, 1094 1.1 mrg stack_pointer_rtx)); 1095 1.1 mrg } 1096 1.1 mrg else if (fsize_with_regs < 0x8000 || TARGET_68020) 1097 1.1 mrg m68k_set_frame_related 1098 1.1 mrg (emit_insn (gen_link (frame_pointer_rtx, 1099 1.1 mrg GEN_INT (-4 - fsize_with_regs)))); 1100 1.1 mrg else 1101 1.1 mrg { 1102 1.1 mrg m68k_set_frame_related 1103 1.1 mrg (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4)))); 1104 1.1 mrg m68k_set_frame_related 1105 1.1 mrg (emit_insn (gen_addsi3 (stack_pointer_rtx, 1106 1.1 mrg stack_pointer_rtx, 1107 1.1 mrg GEN_INT (-fsize_with_regs)))); 1108 1.1 mrg } 1109 1.1 mrg 1110 1.1 mrg /* If the frame pointer is needed, emit a special barrier that 1111 1.1 mrg will prevent the scheduler from moving stores to the frame 1112 1.1 mrg before the stack adjustment. */ 1113 1.1 mrg emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx)); 1114 1.1 mrg } 1115 1.1 mrg else if (fsize_with_regs != 0) 1116 1.1 mrg m68k_set_frame_related 1117 1.1 mrg (emit_insn (gen_addsi3 (stack_pointer_rtx, 1118 1.1 mrg stack_pointer_rtx, 1119 1.1 mrg GEN_INT (-fsize_with_regs)))); 1120 1.1 mrg 1121 1.1 mrg if (current_frame.fpu_mask) 1122 1.1 mrg { 1123 1.1 mrg gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS); 1124 1.1 mrg if (TARGET_68881) 1125 1.1 mrg m68k_set_frame_related 1126 1.1 mrg (m68k_emit_movem (stack_pointer_rtx, 1127 1.1 mrg current_frame.fpu_no * -GET_MODE_SIZE (XFmode), 1128 1.1 mrg current_frame.fpu_no, FP0_REG, 1129 1.1 mrg current_frame.fpu_mask, true, true)); 1130 1.1 mrg else 1131 1.1 mrg { 1132 1.1 mrg int offset; 1133 1.1 mrg 1134 1.1 mrg /* If we're using moveml to save the integer registers, 1135 1.1 mrg the stack pointer will point to the bottom of the moveml 1136 1.1 mrg save area. Find the stack offset of the first FP register. */ 1137 1.1 mrg if (current_frame.reg_no < MIN_MOVEM_REGS) 1138 1.1 mrg offset = 0; 1139 1.1 mrg else 1140 1.1 mrg offset = current_frame.reg_no * GET_MODE_SIZE (SImode); 1141 1.1 mrg m68k_set_frame_related 1142 1.1 mrg (m68k_emit_movem (stack_pointer_rtx, offset, 1143 1.1 mrg current_frame.fpu_no, FP0_REG, 1144 1.1 mrg current_frame.fpu_mask, true, false)); 1145 1.1 mrg } 1146 1.1 mrg } 1147 1.1 mrg 1148 1.1 mrg /* If the stack limit is not a symbol, check it here. 1149 1.1 mrg This has the disadvantage that it may be too late... */ 1150 1.1 mrg if (crtl->limit_stack) 1151 1.1 mrg { 1152 1.1 mrg if (REG_P (stack_limit_rtx)) 1153 1.1 mrg emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx, 1154 1.1 mrg stack_limit_rtx), 1155 1.1 mrg stack_pointer_rtx, stack_limit_rtx, 1156 1.1 mrg const1_rtx)); 1157 1.1 mrg 1158 1.1 mrg else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF) 1159 1.1 mrg warning (0, "stack limit expression is not supported"); 1160 1.1 mrg } 1161 1.1 mrg 1162 1.1 mrg if (current_frame.reg_no < MIN_MOVEM_REGS) 1163 1.1 mrg { 1164 1.1 mrg /* Store each register separately in the same order moveml does. */ 1165 1.1 mrg int i; 1166 1.1 mrg 1167 1.1 mrg for (i = 16; i-- > 0; ) 1168 1.1 mrg if (current_frame.reg_mask & (1 << i)) 1169 1.1 mrg { 1170 1.1 mrg src = gen_rtx_REG (SImode, D0_REG + i); 1171 1.1 mrg dest = gen_frame_mem (SImode, 1172 1.1 mrg gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)); 1173 1.1 mrg m68k_set_frame_related (emit_insn (gen_movsi (dest, src))); 1174 1.1 mrg } 1175 1.1 mrg } 1176 1.1 mrg else 1177 1.1 mrg { 1178 1.1 mrg if (TARGET_COLDFIRE) 1179 1.1 mrg /* The required register save space has already been allocated. 1180 1.1 mrg The first register should be stored at (%sp). */ 1181 1.1 mrg m68k_set_frame_related 1182 1.1 mrg (m68k_emit_movem (stack_pointer_rtx, 0, 1183 1.1 mrg current_frame.reg_no, D0_REG, 1184 1.1 mrg current_frame.reg_mask, true, false)); 1185 1.1 mrg else 1186 1.1 mrg m68k_set_frame_related 1187 1.1 mrg (m68k_emit_movem (stack_pointer_rtx, 1188 1.1 mrg current_frame.reg_no * -GET_MODE_SIZE (SImode), 1189 1.1 mrg current_frame.reg_no, D0_REG, 1190 1.1 mrg current_frame.reg_mask, true, true)); 1191 1.1 mrg } 1192 1.1 mrg 1193 1.1 mrg if (!TARGET_SEP_DATA 1194 1.1 mrg && crtl->uses_pic_offset_table) 1195 1.1 mrg emit_insn (gen_load_got (pic_offset_table_rtx)); 1196 1.1 mrg } 1197 1.1 mrg 1198 1.1 mrg /* Return true if a simple (return) instruction is sufficient for this 1200 1.1 mrg instruction (i.e. if no epilogue is needed). */ 1201 1.1 mrg 1202 1.1 mrg bool 1203 1.1 mrg m68k_use_return_insn (void) 1204 1.1 mrg { 1205 1.1 mrg if (!reload_completed || frame_pointer_needed || get_frame_size () != 0) 1206 1.1 mrg return false; 1207 1.1 mrg 1208 1.1 mrg m68k_compute_frame_layout (); 1209 1.1 mrg return current_frame.offset == 0; 1210 1.1 mrg } 1211 1.1 mrg 1212 1.1 mrg /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand; 1213 1.1 mrg SIBCALL_P says which. 1214 1.1 mrg 1215 1.1 mrg The function epilogue should not depend on the current stack pointer! 1216 1.1 mrg It should use the frame pointer only, if there is a frame pointer. 1217 1.1 mrg This is mandatory because of alloca; we also take advantage of it to 1218 1.1 mrg omit stack adjustments before returning. */ 1219 1.1 mrg 1220 1.1 mrg void 1221 1.1 mrg m68k_expand_epilogue (bool sibcall_p) 1222 1.1 mrg { 1223 1.1 mrg HOST_WIDE_INT fsize, fsize_with_regs; 1224 1.1 mrg bool big, restore_from_sp; 1225 1.1 mrg 1226 1.1 mrg m68k_compute_frame_layout (); 1227 1.1 mrg 1228 1.1 mrg fsize = current_frame.size; 1229 1.1 mrg big = false; 1230 1.1 mrg restore_from_sp = false; 1231 1.1 mrg 1232 1.1 mrg /* FIXME : crtl->is_leaf below is too strong. 1233 1.1 mrg What we really need to know there is if there could be pending 1234 1.1 mrg stack adjustment needed at that point. */ 1235 1.1 mrg restore_from_sp = (!frame_pointer_needed 1236 1.1 mrg || (!cfun->calls_alloca && crtl->is_leaf)); 1237 1.1 mrg 1238 1.1 mrg /* fsize_with_regs is the size we need to adjust the sp when 1239 1.1 mrg popping the frame. */ 1240 1.1 mrg fsize_with_regs = fsize; 1241 1.1 mrg if (TARGET_COLDFIRE && restore_from_sp) 1242 1.1 mrg { 1243 1.1 mrg /* ColdFire's move multiple instructions do not allow post-increment 1244 1.1 mrg addressing. Add the size of movem loads to the final deallocation 1245 1.1 mrg instead. */ 1246 1.1 mrg if (current_frame.reg_no >= MIN_MOVEM_REGS) 1247 1.1 mrg fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode); 1248 1.1 mrg if (current_frame.fpu_no >= MIN_FMOVEM_REGS) 1249 1.1 mrg fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode); 1250 1.1 mrg } 1251 1.1 mrg 1252 1.1 mrg if (current_frame.offset + fsize >= 0x8000 1253 1.1 mrg && !restore_from_sp 1254 1.1 mrg && (current_frame.reg_mask || current_frame.fpu_mask)) 1255 1.1 mrg { 1256 1.1 mrg if (TARGET_COLDFIRE 1257 1.1 mrg && (current_frame.reg_no >= MIN_MOVEM_REGS 1258 1.1 mrg || current_frame.fpu_no >= MIN_FMOVEM_REGS)) 1259 1.1 mrg { 1260 1.1 mrg /* ColdFire's move multiple instructions do not support the 1261 1.1 mrg (d8,Ax,Xi) addressing mode, so we're as well using a normal 1262 1.1 mrg stack-based restore. */ 1263 1.1 mrg emit_move_insn (gen_rtx_REG (Pmode, A1_REG), 1264 1.1 mrg GEN_INT (-(current_frame.offset + fsize))); 1265 1.1 mrg emit_insn (gen_blockage ()); 1266 1.1 mrg emit_insn (gen_addsi3 (stack_pointer_rtx, 1267 1.1 mrg gen_rtx_REG (Pmode, A1_REG), 1268 1.1 mrg frame_pointer_rtx)); 1269 1.1 mrg restore_from_sp = true; 1270 1.1 mrg } 1271 1.1 mrg else 1272 1.1 mrg { 1273 1.1 mrg emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize)); 1274 1.1 mrg fsize = 0; 1275 1.1 mrg big = true; 1276 1.1 mrg } 1277 1.1 mrg } 1278 1.1 mrg 1279 1.1 mrg if (current_frame.reg_no < MIN_MOVEM_REGS) 1280 1.1 mrg { 1281 1.1 mrg /* Restore each register separately in the same order moveml does. */ 1282 1.1 mrg int i; 1283 1.1 mrg HOST_WIDE_INT offset; 1284 1.1 mrg 1285 1.1 mrg offset = current_frame.offset + fsize; 1286 1.1 mrg for (i = 0; i < 16; i++) 1287 1.1 mrg if (current_frame.reg_mask & (1 << i)) 1288 1.1 mrg { 1289 1.1 mrg rtx addr; 1290 1.1 mrg 1291 1.1 mrg if (big) 1292 1.1 mrg { 1293 1.1 mrg /* Generate the address -OFFSET(%fp,%a1.l). */ 1294 1.1 mrg addr = gen_rtx_REG (Pmode, A1_REG); 1295 1.1 mrg addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx); 1296 1.1 mrg addr = plus_constant (Pmode, addr, -offset); 1297 1.1 mrg } 1298 1.1 mrg else if (restore_from_sp) 1299 1.1 mrg addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx); 1300 1.1 mrg else 1301 1.1 mrg addr = plus_constant (Pmode, frame_pointer_rtx, -offset); 1302 1.1 mrg emit_move_insn (gen_rtx_REG (SImode, D0_REG + i), 1303 1.1 mrg gen_frame_mem (SImode, addr)); 1304 1.1 mrg offset -= GET_MODE_SIZE (SImode); 1305 1.1 mrg } 1306 1.1 mrg } 1307 1.1 mrg else if (current_frame.reg_mask) 1308 1.1 mrg { 1309 1.1 mrg if (big) 1310 1.1 mrg m68k_emit_movem (gen_rtx_PLUS (Pmode, 1311 1.1 mrg gen_rtx_REG (Pmode, A1_REG), 1312 1.1 mrg frame_pointer_rtx), 1313 1.1 mrg -(current_frame.offset + fsize), 1314 1.1 mrg current_frame.reg_no, D0_REG, 1315 1.1 mrg current_frame.reg_mask, false, false); 1316 1.1 mrg else if (restore_from_sp) 1317 1.1 mrg m68k_emit_movem (stack_pointer_rtx, 0, 1318 1.1 mrg current_frame.reg_no, D0_REG, 1319 1.1 mrg current_frame.reg_mask, false, 1320 1.1 mrg !TARGET_COLDFIRE); 1321 1.1 mrg else 1322 1.1 mrg m68k_emit_movem (frame_pointer_rtx, 1323 1.1 mrg -(current_frame.offset + fsize), 1324 1.1 mrg current_frame.reg_no, D0_REG, 1325 1.1 mrg current_frame.reg_mask, false, false); 1326 1.1 mrg } 1327 1.1 mrg 1328 1.1 mrg if (current_frame.fpu_no > 0) 1329 1.1 mrg { 1330 1.1 mrg if (big) 1331 1.1 mrg m68k_emit_movem (gen_rtx_PLUS (Pmode, 1332 1.1 mrg gen_rtx_REG (Pmode, A1_REG), 1333 1.1 mrg frame_pointer_rtx), 1334 1.1 mrg -(current_frame.foffset + fsize), 1335 1.1 mrg current_frame.fpu_no, FP0_REG, 1336 1.1 mrg current_frame.fpu_mask, false, false); 1337 1.1 mrg else if (restore_from_sp) 1338 1.1 mrg { 1339 1.1 mrg if (TARGET_COLDFIRE) 1340 1.1 mrg { 1341 1.1 mrg int offset; 1342 1.1 mrg 1343 1.1 mrg /* If we used moveml to restore the integer registers, the 1344 1.1 mrg stack pointer will still point to the bottom of the moveml 1345 1.1 mrg save area. Find the stack offset of the first FP 1346 1.1 mrg register. */ 1347 1.1 mrg if (current_frame.reg_no < MIN_MOVEM_REGS) 1348 1.1 mrg offset = 0; 1349 1.1 mrg else 1350 1.1 mrg offset = current_frame.reg_no * GET_MODE_SIZE (SImode); 1351 1.1 mrg m68k_emit_movem (stack_pointer_rtx, offset, 1352 1.1 mrg current_frame.fpu_no, FP0_REG, 1353 1.1 mrg current_frame.fpu_mask, false, false); 1354 1.1 mrg } 1355 1.1 mrg else 1356 1.1 mrg m68k_emit_movem (stack_pointer_rtx, 0, 1357 1.1 mrg current_frame.fpu_no, FP0_REG, 1358 1.1 mrg current_frame.fpu_mask, false, true); 1359 1.1 mrg } 1360 1.1 mrg else 1361 1.1 mrg m68k_emit_movem (frame_pointer_rtx, 1362 1.1 mrg -(current_frame.foffset + fsize), 1363 1.1 mrg current_frame.fpu_no, FP0_REG, 1364 1.1 mrg current_frame.fpu_mask, false, false); 1365 1.1 mrg } 1366 1.1 mrg 1367 1.1 mrg emit_insn (gen_blockage ()); 1368 1.1 mrg if (frame_pointer_needed) 1369 1.1 mrg emit_insn (gen_unlink (frame_pointer_rtx)); 1370 1.1 mrg else if (fsize_with_regs) 1371 1.1 mrg emit_insn (gen_addsi3 (stack_pointer_rtx, 1372 1.1 mrg stack_pointer_rtx, 1373 1.1 mrg GEN_INT (fsize_with_regs))); 1374 1.1 mrg 1375 1.1 mrg if (crtl->calls_eh_return) 1376 1.1 mrg emit_insn (gen_addsi3 (stack_pointer_rtx, 1377 1.1 mrg stack_pointer_rtx, 1378 1.1 mrg EH_RETURN_STACKADJ_RTX)); 1379 1.1 mrg 1380 1.1 mrg if (!sibcall_p) 1381 1.1 mrg emit_jump_insn (ret_rtx); 1382 1.1 mrg } 1383 1.1 mrg 1384 1.1 mrg /* Return true if PARALLEL contains register REGNO. */ 1386 1.1 mrg static bool 1387 1.1 mrg m68k_reg_present_p (const_rtx parallel, unsigned int regno) 1388 1.1 mrg { 1389 1.1 mrg int i; 1390 1.1 mrg 1391 1.1 mrg if (REG_P (parallel) && REGNO (parallel) == regno) 1392 1.1 mrg return true; 1393 1.1 mrg 1394 1.1 mrg if (GET_CODE (parallel) != PARALLEL) 1395 1.1 mrg return false; 1396 1.1 mrg 1397 1.1 mrg for (i = 0; i < XVECLEN (parallel, 0); ++i) 1398 1.1 mrg { 1399 1.1 mrg const_rtx x; 1400 1.1 mrg 1401 1.1 mrg x = XEXP (XVECEXP (parallel, 0, i), 0); 1402 1.1 mrg if (REG_P (x) && REGNO (x) == regno) 1403 1.1 mrg return true; 1404 1.1 mrg } 1405 1.1 mrg 1406 1.1 mrg return false; 1407 1.1 mrg } 1408 1.1 mrg 1409 1.1 mrg /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */ 1410 1.1 mrg 1411 1.1 mrg static bool 1412 1.1 mrg m68k_ok_for_sibcall_p (tree decl, tree exp) 1413 1.1 mrg { 1414 1.1 mrg enum m68k_function_kind kind; 1415 1.1 mrg 1416 1.1 mrg /* We cannot use sibcalls for nested functions because we use the 1417 1.1 mrg static chain register for indirect calls. */ 1418 1.1 mrg if (CALL_EXPR_STATIC_CHAIN (exp)) 1419 1.1 mrg return false; 1420 1.1 mrg 1421 1.1 mrg if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl)))) 1422 1.1 mrg { 1423 1.1 mrg /* Check that the return value locations are the same. For 1424 1.1 mrg example that we aren't returning a value from the sibling in 1425 1.1 mrg a D0 register but then need to transfer it to a A0 register. */ 1426 1.1 mrg rtx cfun_value; 1427 1.1 mrg rtx call_value; 1428 1.1 mrg 1429 1.1 mrg cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)), 1430 1.1 mrg cfun->decl); 1431 1.1 mrg call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl); 1432 1.1 mrg 1433 1.1 mrg /* Check that the values are equal or that the result the callee 1434 1.1 mrg function returns is superset of what the current function returns. */ 1435 1.1 mrg if (!(rtx_equal_p (cfun_value, call_value) 1436 1.1 mrg || (REG_P (cfun_value) 1437 1.1 mrg && m68k_reg_present_p (call_value, REGNO (cfun_value))))) 1438 1.1 mrg return false; 1439 1.1 mrg } 1440 1.1 mrg 1441 1.1 mrg kind = m68k_get_function_kind (current_function_decl); 1442 1.1 mrg if (kind == m68k_fk_normal_function) 1443 1.1 mrg /* We can always sibcall from a normal function, because it's 1444 1.1 mrg undefined if it is calling an interrupt function. */ 1445 1.1 mrg return true; 1446 1.1 mrg 1447 1.1 mrg /* Otherwise we can only sibcall if the function kind is known to be 1448 1.1 mrg the same. */ 1449 1.1 mrg if (decl && m68k_get_function_kind (decl) == kind) 1450 1.1 mrg return true; 1451 1.1 mrg 1452 1.1 mrg return false; 1453 1.1 mrg } 1454 1.1 mrg 1455 1.1 mrg /* On the m68k all args are always pushed. */ 1456 1.1 mrg 1457 1.1 mrg static rtx 1458 1.1 mrg m68k_function_arg (cumulative_args_t, const function_arg_info &) 1459 1.1 mrg { 1460 1.1 mrg return NULL_RTX; 1461 1.1 mrg } 1462 1.1 mrg 1463 1.1 mrg static void 1464 1.1 mrg m68k_function_arg_advance (cumulative_args_t cum_v, 1465 1.1 mrg const function_arg_info &arg) 1466 1.1 mrg { 1467 1.1 mrg CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); 1468 1.1 mrg 1469 1.1 mrg *cum += (arg.promoted_size_in_bytes () + 3) & ~3; 1470 1.1 mrg } 1471 1.1 mrg 1472 1.1 mrg /* Convert X to a legitimate function call memory reference and return the 1473 1.1 mrg result. */ 1474 1.1 mrg 1475 1.1 mrg rtx 1476 1.1 mrg m68k_legitimize_call_address (rtx x) 1477 1.1 mrg { 1478 1.1 mrg gcc_assert (MEM_P (x)); 1479 1.1 mrg if (call_operand (XEXP (x, 0), VOIDmode)) 1480 1.1 mrg return x; 1481 1.1 mrg return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0))); 1482 1.1 mrg } 1483 1.1 mrg 1484 1.1 mrg /* Likewise for sibling calls. */ 1485 1.1 mrg 1486 1.1 mrg rtx 1487 1.1 mrg m68k_legitimize_sibcall_address (rtx x) 1488 1.1 mrg { 1489 1.1 mrg gcc_assert (MEM_P (x)); 1490 1.1 mrg if (sibcall_operand (XEXP (x, 0), VOIDmode)) 1491 1.1 mrg return x; 1492 1.1 mrg 1493 1.1 mrg emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0)); 1494 1.1 mrg return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM)); 1495 1.1 mrg } 1496 1.1 mrg 1497 1.1 mrg /* Convert X to a legitimate address and return it if successful. Otherwise 1498 1.1 mrg return X. 1499 1.1 mrg 1500 1.1 mrg For the 68000, we handle X+REG by loading X into a register R and 1501 1.1 mrg using R+REG. R will go in an address reg and indexing will be used. 1502 1.1 mrg However, if REG is a broken-out memory address or multiplication, 1503 1.1 mrg nothing needs to be done because REG can certainly go in an address reg. */ 1504 1.1 mrg 1505 1.1 mrg static rtx 1506 1.1 mrg m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode) 1507 1.1 mrg { 1508 1.1 mrg if (m68k_tls_symbol_p (x)) 1509 1.1 mrg return m68k_legitimize_tls_address (x); 1510 1.1 mrg 1511 1.1 mrg if (GET_CODE (x) == PLUS) 1512 1.1 mrg { 1513 1.1 mrg int ch = (x) != (oldx); 1514 1.1 mrg int copied = 0; 1515 1.1 mrg 1516 1.1 mrg #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; } 1517 1.1 mrg 1518 1.1 mrg if (GET_CODE (XEXP (x, 0)) == MULT) 1519 1.1 mrg { 1520 1.1 mrg COPY_ONCE (x); 1521 1.1 mrg XEXP (x, 0) = force_operand (XEXP (x, 0), 0); 1522 1.1 mrg } 1523 1.1 mrg if (GET_CODE (XEXP (x, 1)) == MULT) 1524 1.1 mrg { 1525 1.1 mrg COPY_ONCE (x); 1526 1.1 mrg XEXP (x, 1) = force_operand (XEXP (x, 1), 0); 1527 1.1 mrg } 1528 1.1 mrg if (ch) 1529 1.1 mrg { 1530 1.1 mrg if (GET_CODE (XEXP (x, 1)) == REG 1531 1.1 mrg && GET_CODE (XEXP (x, 0)) == REG) 1532 1.1 mrg { 1533 1.1 mrg if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT) 1534 1.1 mrg { 1535 1.1 mrg COPY_ONCE (x); 1536 1.1 mrg x = force_operand (x, 0); 1537 1.1 mrg } 1538 1.1 mrg return x; 1539 1.1 mrg } 1540 1.1 mrg if (memory_address_p (mode, x)) 1541 1.1 mrg return x; 1542 1.1 mrg } 1543 1.1 mrg if (GET_CODE (XEXP (x, 0)) == REG 1544 1.1 mrg || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND 1545 1.1 mrg && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG 1546 1.1 mrg && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)) 1547 1.1 mrg { 1548 1.1 mrg rtx temp = gen_reg_rtx (Pmode); 1549 1.1 mrg rtx val = force_operand (XEXP (x, 1), 0); 1550 1.1 mrg emit_move_insn (temp, val); 1551 1.1 mrg COPY_ONCE (x); 1552 1.1 mrg XEXP (x, 1) = temp; 1553 1.1 mrg if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT 1554 1.1 mrg && GET_CODE (XEXP (x, 0)) == REG) 1555 1.1 mrg x = force_operand (x, 0); 1556 1.1 mrg } 1557 1.1 mrg else if (GET_CODE (XEXP (x, 1)) == REG 1558 1.1 mrg || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND 1559 1.1 mrg && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG 1560 1.1 mrg && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode)) 1561 1.1 mrg { 1562 1.1 mrg rtx temp = gen_reg_rtx (Pmode); 1563 1.1 mrg rtx val = force_operand (XEXP (x, 0), 0); 1564 1.1 mrg emit_move_insn (temp, val); 1565 1.1 mrg COPY_ONCE (x); 1566 1.1 mrg XEXP (x, 0) = temp; 1567 1.1 mrg if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT 1568 1.1 mrg && GET_CODE (XEXP (x, 1)) == REG) 1569 1.1 mrg x = force_operand (x, 0); 1570 1.1 mrg } 1571 1.1 mrg } 1572 1.1 mrg 1573 1.1 mrg return x; 1574 1.1 mrg } 1575 1.1 mrg 1576 1.1 mrg /* For eliding comparisons, we remember how the flags were set. 1578 1.1 mrg FLAGS_COMPARE_OP0 and FLAGS_COMPARE_OP1 are remembered for a direct 1579 1.1 mrg comparison, they take priority. FLAGS_OPERAND1 and FLAGS_OPERAND2 1580 1.1 mrg are used in more cases, they are a fallback for comparisons against 1581 1.1 mrg zero after a move or arithmetic insn. 1582 1.1 mrg FLAGS_VALID is set to FLAGS_VALID_NO if we should not use any of 1583 1.1 mrg these values. */ 1584 1.1 mrg 1585 1.1 mrg static rtx flags_compare_op0, flags_compare_op1; 1586 1.1 mrg static rtx flags_operand1, flags_operand2; 1587 1.1 mrg static attr_flags_valid flags_valid = FLAGS_VALID_NO; 1588 1.1 mrg 1589 1.1 mrg /* Return a code other than UNKNOWN if we can elide a CODE comparison of 1590 1.1 mrg OP0 with OP1. */ 1591 1.1 mrg 1592 1.1 mrg rtx_code 1593 1.1 mrg m68k_find_flags_value (rtx op0, rtx op1, rtx_code code) 1594 1.1 mrg { 1595 1.1 mrg if (flags_compare_op0 != NULL_RTX) 1596 1.1 mrg { 1597 1.1 mrg if (rtx_equal_p (op0, flags_compare_op0) 1598 1.1 mrg && rtx_equal_p (op1, flags_compare_op1)) 1599 1.1 mrg return code; 1600 1.1 mrg if (rtx_equal_p (op0, flags_compare_op1) 1601 1.1 mrg && rtx_equal_p (op1, flags_compare_op0)) 1602 1.1 mrg return swap_condition (code); 1603 1.1 mrg return UNKNOWN; 1604 1.1 mrg } 1605 1.1 mrg 1606 1.1 mrg machine_mode mode = GET_MODE (op0); 1607 1.1 mrg if (op1 != CONST0_RTX (mode)) 1608 1.1 mrg return UNKNOWN; 1609 1.1 mrg /* Comparisons against 0 with these two should have been optimized out. */ 1610 1.1 mrg gcc_assert (code != LTU && code != GEU); 1611 1.1 mrg if (flags_valid == FLAGS_VALID_NOOV && (code == GT || code == LE)) 1612 1.1 mrg return UNKNOWN; 1613 1.1 mrg if (rtx_equal_p (flags_operand1, op0) || rtx_equal_p (flags_operand2, op0)) 1614 1.1 mrg return (FLOAT_MODE_P (mode) ? code 1615 1.1 mrg : code == GE ? PLUS : code == LT ? MINUS : code); 1616 1.1 mrg /* See if we are testing whether the high part of a DImode value is 1617 1.1 mrg positive or negative and we have the full value as a remembered 1618 1.1 mrg operand. */ 1619 1.1 mrg if (code != GE && code != LT) 1620 1.1 mrg return UNKNOWN; 1621 1.1 mrg if (mode == SImode 1622 1.1 mrg && flags_operand1 != NULL_RTX && GET_MODE (flags_operand1) == DImode 1623 1.1 mrg && REG_P (flags_operand1) && REG_P (op0) 1624 1.1 mrg && hard_regno_nregs (REGNO (flags_operand1), DImode) == 2 1625 1.1 mrg && REGNO (flags_operand1) == REGNO (op0)) 1626 1.1 mrg return code == GE ? PLUS : MINUS; 1627 1.1 mrg if (mode == SImode 1628 1.1 mrg && flags_operand2 != NULL_RTX && GET_MODE (flags_operand2) == DImode 1629 1.1 mrg && REG_P (flags_operand2) && REG_P (op0) 1630 1.1 mrg && hard_regno_nregs (REGNO (flags_operand2), DImode) == 2 1631 1.1 mrg && REGNO (flags_operand2) == REGNO (op0)) 1632 1.1 mrg return code == GE ? PLUS : MINUS; 1633 1.1 mrg return UNKNOWN; 1634 1.1 mrg } 1635 1.1 mrg 1636 1.1 mrg /* Called through CC_STATUS_INIT, which is invoked by final whenever a 1637 1.1 mrg label is encountered. */ 1638 1.1 mrg 1639 1.1 mrg void 1640 1.1 mrg m68k_init_cc () 1641 1.1 mrg { 1642 1.1 mrg flags_compare_op0 = flags_compare_op1 = NULL_RTX; 1643 1.1 mrg flags_operand1 = flags_operand2 = NULL_RTX; 1644 1.1 mrg flags_valid = FLAGS_VALID_NO; 1645 1.1 mrg } 1646 1.1 mrg 1647 1.1 mrg /* Update flags for a move operation with OPERANDS. Called for move 1648 1.1 mrg operations where attr_flags_valid returns "set". */ 1649 1.1 mrg 1650 1.1 mrg static void 1651 1.1 mrg handle_flags_for_move (rtx *operands) 1652 1.1 mrg { 1653 1.1 mrg flags_compare_op0 = flags_compare_op1 = NULL_RTX; 1654 1.1 mrg if (!ADDRESS_REG_P (operands[0])) 1655 1.1 mrg { 1656 1.1 mrg flags_valid = FLAGS_VALID_MOVE; 1657 1.1 mrg flags_operand1 = side_effects_p (operands[0]) ? NULL_RTX : operands[0]; 1658 1.1 mrg if (side_effects_p (operands[1]) 1659 1.1 mrg /* ??? For mem->mem moves, this can discard the source as a 1660 1.1 mrg valid compare operand. If you assume aligned moves, this 1661 1.1 mrg is unnecessary, but in theory, we could have an unaligned 1662 1.1 mrg move overwriting parts of its source. */ 1663 1.1 mrg || modified_in_p (operands[1], current_output_insn)) 1664 1.1 mrg flags_operand2 = NULL_RTX; 1665 1.1 mrg else 1666 1.1 mrg flags_operand2 = operands[1]; 1667 1.1 mrg return; 1668 1.1 mrg } 1669 1.1 mrg if (flags_operand1 != NULL_RTX 1670 1.1 mrg && modified_in_p (flags_operand1, current_output_insn)) 1671 1.1 mrg flags_operand1 = NULL_RTX; 1672 1.1 mrg if (flags_operand2 != NULL_RTX 1673 1.1 mrg && modified_in_p (flags_operand2, current_output_insn)) 1674 1.1 mrg flags_operand2 = NULL_RTX; 1675 1.1 mrg } 1676 1.1 mrg 1677 1.1 mrg /* Process INSN to remember flag operands if possible. */ 1678 1.1 mrg 1679 1.1 mrg static void 1680 1.1 mrg m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int) 1681 1.1 mrg { 1682 1.1 mrg enum attr_flags_valid v = get_attr_flags_valid (insn); 1683 1.1 mrg if (v == FLAGS_VALID_SET) 1684 1.1 mrg return; 1685 1.1 mrg /* Comparisons use FLAGS_VALID_SET, so we can be sure we need to clear these 1686 1.1 mrg now. */ 1687 1.1 mrg flags_compare_op0 = flags_compare_op1 = NULL_RTX; 1688 1.1 mrg 1689 1.1 mrg if (v == FLAGS_VALID_NO) 1690 1.1 mrg { 1691 1.1 mrg flags_operand1 = flags_operand2 = NULL_RTX; 1692 1.1 mrg return; 1693 1.1 mrg } 1694 1.1 mrg else if (v == FLAGS_VALID_UNCHANGED) 1695 1.1 mrg { 1696 1.1 mrg if (flags_operand1 != NULL_RTX && modified_in_p (flags_operand1, insn)) 1697 1.1 mrg flags_operand1 = NULL_RTX; 1698 1.1 mrg if (flags_operand2 != NULL_RTX && modified_in_p (flags_operand2, insn)) 1699 1.1 mrg flags_operand2 = NULL_RTX; 1700 1.1 mrg return; 1701 1.1 mrg } 1702 1.1 mrg 1703 1.1 mrg flags_valid = v; 1704 1.1 mrg rtx set = single_set (insn); 1705 1.1 mrg rtx dest = SET_DEST (set); 1706 1.1 mrg rtx src = SET_SRC (set); 1707 1.1 mrg if (side_effects_p (dest)) 1708 1.1 mrg dest = NULL_RTX; 1709 1.1 mrg 1710 1.1 mrg switch (v) 1711 1.1 mrg { 1712 1.1 mrg case FLAGS_VALID_YES: 1713 1.1 mrg case FLAGS_VALID_NOOV: 1714 1.1 mrg flags_operand1 = dest; 1715 1.1 mrg flags_operand2 = NULL_RTX; 1716 1.1 mrg break; 1717 1.1 mrg case FLAGS_VALID_MOVE: 1718 1.1 mrg /* fmoves to memory or data registers do not set the condition 1719 1.1 mrg codes. Normal moves _do_ set the condition codes, but not in 1720 1.1 mrg a way that is appropriate for comparison with 0, because -0.0 1721 1.1 mrg would be treated as a negative nonzero number. Note that it 1722 1.1 mrg isn't appropriate to conditionalize this restriction on 1723 1.1 mrg HONOR_SIGNED_ZEROS because that macro merely indicates whether 1724 1.1 mrg we care about the difference between -0.0 and +0.0. */ 1725 1.1 mrg if (dest != NULL_RTX 1726 1.1 mrg && !FP_REG_P (dest) 1727 1.1 mrg && (FP_REG_P (src) 1728 1.1 mrg || GET_CODE (src) == FIX 1729 1.1 mrg || FLOAT_MODE_P (GET_MODE (dest)))) 1730 1.1 mrg flags_operand1 = flags_operand2 = NULL_RTX; 1731 1.1 mrg else 1732 1.1 mrg { 1733 1.1 mrg flags_operand1 = dest; 1734 1.1 mrg if (GET_MODE (src) != VOIDmode && !side_effects_p (src) 1735 1.1 mrg && !modified_in_p (src, insn)) 1736 1.1 mrg flags_operand2 = src; 1737 1.1 mrg else 1738 1.1 mrg flags_operand2 = NULL_RTX; 1739 1.1 mrg } 1740 1.1 mrg break; 1741 1.1 mrg default: 1742 1.1 mrg gcc_unreachable (); 1743 1.1 mrg } 1744 1.1 mrg return; 1745 1.1 mrg } 1746 1.1 mrg 1747 1.1 mrg /* Output a dbCC; jCC sequence. Note we do not handle the 1748 1.1 mrg floating point version of this sequence (Fdbcc). 1749 1.1 mrg OPERANDS are as in the two peepholes. CODE is the code 1750 1.1 mrg returned by m68k_output_branch_<mode>. */ 1751 1.1 mrg 1752 1.1 mrg void 1753 1.1 mrg output_dbcc_and_branch (rtx *operands, rtx_code code) 1754 1.1 mrg { 1755 1.1 mrg switch (code) 1756 1.1 mrg { 1757 1.1 mrg case EQ: 1758 1.1 mrg output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands); 1759 1.1 mrg break; 1760 1.1 mrg 1761 1.1 mrg case NE: 1762 1.1 mrg output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands); 1763 1.1 mrg break; 1764 1.1 mrg 1765 1.1 mrg case GT: 1766 1.1 mrg output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands); 1767 1.1 mrg break; 1768 1.1 mrg 1769 1.1 mrg case GTU: 1770 1.1 mrg output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands); 1771 1.1 mrg break; 1772 1.1 mrg 1773 1.1 mrg case LT: 1774 1.1 mrg output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands); 1775 1.1 mrg break; 1776 1.1 mrg 1777 1.1 mrg case LTU: 1778 1.1 mrg output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands); 1779 1.1 mrg break; 1780 1.1 mrg 1781 1.1 mrg case GE: 1782 1.1 mrg output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands); 1783 1.1 mrg break; 1784 1.1 mrg 1785 1.1 mrg case GEU: 1786 1.1 mrg output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands); 1787 1.1 mrg break; 1788 1.1 mrg 1789 1.1 mrg case LE: 1790 1.1 mrg output_asm_insn ("dble %0,%l1\n\tjle %l2", operands); 1791 1.1 mrg break; 1792 1.1 mrg 1793 1.1 mrg case LEU: 1794 1.1 mrg output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands); 1795 1.1 mrg break; 1796 1.1 mrg 1797 1.1 mrg case PLUS: 1798 1.1 mrg output_asm_insn ("dbpl %0,%l1\n\tjle %l2", operands); 1799 1.1 mrg break; 1800 1.1 mrg 1801 1.1 mrg case MINUS: 1802 1.1 mrg output_asm_insn ("dbmi %0,%l1\n\tjle %l2", operands); 1803 1.1 mrg break; 1804 1.1 mrg 1805 1.1 mrg default: 1806 1.1 mrg gcc_unreachable (); 1807 1.1 mrg } 1808 1.1 mrg 1809 1.1 mrg /* If the decrement is to be done in SImode, then we have 1810 1.1 mrg to compensate for the fact that dbcc decrements in HImode. */ 1811 1.1 mrg switch (GET_MODE (operands[0])) 1812 1.1 mrg { 1813 1.1 mrg case E_SImode: 1814 1.1 mrg output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands); 1815 1.1 mrg break; 1816 1.1 mrg 1817 1.1 mrg case E_HImode: 1818 1.1 mrg break; 1819 1.1 mrg 1820 1.1 mrg default: 1821 1.1 mrg gcc_unreachable (); 1822 1.1 mrg } 1823 1.1 mrg } 1824 1.1 mrg 1825 1.1 mrg const char * 1826 1.1 mrg output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest) 1827 1.1 mrg { 1828 1.1 mrg rtx loperands[7]; 1829 1.1 mrg enum rtx_code op_code = GET_CODE (op); 1830 1.1 mrg 1831 1.1 mrg /* This does not produce a useful cc. */ 1832 1.1 mrg CC_STATUS_INIT; 1833 1.1 mrg 1834 1.1 mrg /* The m68k cmp.l instruction requires operand1 to be a reg as used 1835 1.1 mrg below. Swap the operands and change the op if these requirements 1836 1.1 mrg are not fulfilled. */ 1837 1.1 mrg if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG) 1838 1.1 mrg { 1839 1.1 mrg rtx tmp = operand1; 1840 1.1 mrg 1841 1.1 mrg operand1 = operand2; 1842 1.1 mrg operand2 = tmp; 1843 1.1 mrg op_code = swap_condition (op_code); 1844 1.1 mrg } 1845 1.1 mrg loperands[0] = operand1; 1846 1.1 mrg if (GET_CODE (operand1) == REG) 1847 1.1 mrg loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1); 1848 1.1 mrg else 1849 1.1 mrg loperands[1] = adjust_address (operand1, SImode, 4); 1850 1.1 mrg if (operand2 != const0_rtx) 1851 1.1 mrg { 1852 1.1 mrg loperands[2] = operand2; 1853 1.1 mrg if (GET_CODE (operand2) == REG) 1854 1.1 mrg loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1); 1855 1.1 mrg else 1856 1.1 mrg loperands[3] = adjust_address (operand2, SImode, 4); 1857 1.1 mrg } 1858 1.1 mrg loperands[4] = gen_label_rtx (); 1859 1.1 mrg if (operand2 != const0_rtx) 1860 1.1 mrg output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands); 1861 1.1 mrg else 1862 1.1 mrg { 1863 1.1 mrg if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0])) 1864 1.1 mrg output_asm_insn ("tst%.l %0", loperands); 1865 1.1 mrg else 1866 1.1 mrg output_asm_insn ("cmp%.w #0,%0", loperands); 1867 1.1 mrg 1868 1.1 mrg output_asm_insn ("jne %l4", loperands); 1869 1.1 mrg 1870 1.1 mrg if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1])) 1871 1.1 mrg output_asm_insn ("tst%.l %1", loperands); 1872 1.1 mrg else 1873 1.1 mrg output_asm_insn ("cmp%.w #0,%1", loperands); 1874 1.1 mrg } 1875 1.1 mrg 1876 1.1 mrg loperands[5] = dest; 1877 1.1 mrg 1878 1.1 mrg switch (op_code) 1879 1.1 mrg { 1880 1.1 mrg case EQ: 1881 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1882 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1883 1.1 mrg output_asm_insn ("seq %5", loperands); 1884 1.1 mrg break; 1885 1.1 mrg 1886 1.1 mrg case NE: 1887 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1888 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1889 1.1 mrg output_asm_insn ("sne %5", loperands); 1890 1.1 mrg break; 1891 1.1 mrg 1892 1.1 mrg case GT: 1893 1.1 mrg loperands[6] = gen_label_rtx (); 1894 1.1 mrg output_asm_insn ("shi %5\n\tjra %l6", loperands); 1895 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1896 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1897 1.1 mrg output_asm_insn ("sgt %5", loperands); 1898 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1899 1.1 mrg CODE_LABEL_NUMBER (loperands[6])); 1900 1.1 mrg break; 1901 1.1 mrg 1902 1.1 mrg case GTU: 1903 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1904 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1905 1.1 mrg output_asm_insn ("shi %5", loperands); 1906 1.1 mrg break; 1907 1.1 mrg 1908 1.1 mrg case LT: 1909 1.1 mrg loperands[6] = gen_label_rtx (); 1910 1.1 mrg output_asm_insn ("scs %5\n\tjra %l6", loperands); 1911 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1912 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1913 1.1 mrg output_asm_insn ("slt %5", loperands); 1914 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1915 1.1 mrg CODE_LABEL_NUMBER (loperands[6])); 1916 1.1 mrg break; 1917 1.1 mrg 1918 1.1 mrg case LTU: 1919 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1920 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1921 1.1 mrg output_asm_insn ("scs %5", loperands); 1922 1.1 mrg break; 1923 1.1 mrg 1924 1.1 mrg case GE: 1925 1.1 mrg loperands[6] = gen_label_rtx (); 1926 1.1 mrg output_asm_insn ("scc %5\n\tjra %l6", loperands); 1927 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1928 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1929 1.1 mrg output_asm_insn ("sge %5", loperands); 1930 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1931 1.1 mrg CODE_LABEL_NUMBER (loperands[6])); 1932 1.1 mrg break; 1933 1.1 mrg 1934 1.1 mrg case GEU: 1935 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1936 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1937 1.1 mrg output_asm_insn ("scc %5", loperands); 1938 1.1 mrg break; 1939 1.1 mrg 1940 1.1 mrg case LE: 1941 1.1 mrg loperands[6] = gen_label_rtx (); 1942 1.1 mrg output_asm_insn ("sls %5\n\tjra %l6", loperands); 1943 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1944 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1945 1.1 mrg output_asm_insn ("sle %5", loperands); 1946 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1947 1.1 mrg CODE_LABEL_NUMBER (loperands[6])); 1948 1.1 mrg break; 1949 1.1 mrg 1950 1.1 mrg case LEU: 1951 1.1 mrg (*targetm.asm_out.internal_label) (asm_out_file, "L", 1952 1.1 mrg CODE_LABEL_NUMBER (loperands[4])); 1953 1.1 mrg output_asm_insn ("sls %5", loperands); 1954 1.1 mrg break; 1955 1.1 mrg 1956 1.1 mrg default: 1957 1.1 mrg gcc_unreachable (); 1958 1.1 mrg } 1959 1.1 mrg return ""; 1960 1.1 mrg } 1961 1.1 mrg 1962 1.1 mrg rtx_code 1963 1.1 mrg m68k_output_btst (rtx countop, rtx dataop, rtx_code code, int signpos) 1964 1.1 mrg { 1965 1.1 mrg rtx ops[2]; 1966 1.1 mrg ops[0] = countop; 1967 1.1 mrg ops[1] = dataop; 1968 1.1 mrg 1969 1.1 mrg if (GET_CODE (countop) == CONST_INT) 1970 1.1 mrg { 1971 1.1 mrg int count = INTVAL (countop); 1972 1.1 mrg /* If COUNT is bigger than size of storage unit in use, 1973 1.1 mrg advance to the containing unit of same size. */ 1974 1.1 mrg if (count > signpos) 1975 1.1 mrg { 1976 1.1 mrg int offset = (count & ~signpos) / 8; 1977 1.1 mrg count = count & signpos; 1978 1.1 mrg ops[1] = dataop = adjust_address (dataop, QImode, offset); 1979 1.1 mrg } 1980 1.1 mrg 1981 1.1 mrg if (code == EQ || code == NE) 1982 1.1 mrg { 1983 1.1 mrg if (count == 31) 1984 1.1 mrg { 1985 1.1 mrg output_asm_insn ("tst%.l %1", ops); 1986 1.1 mrg return code == EQ ? PLUS : MINUS; 1987 1.1 mrg } 1988 1.1 mrg if (count == 15) 1989 1.1 mrg { 1990 1.1 mrg output_asm_insn ("tst%.w %1", ops); 1991 1.1 mrg return code == EQ ? PLUS : MINUS; 1992 1.1 mrg } 1993 1.1 mrg if (count == 7) 1994 1.1 mrg { 1995 1.1 mrg output_asm_insn ("tst%.b %1", ops); 1996 1.1 mrg return code == EQ ? PLUS : MINUS; 1997 1.1 mrg } 1998 1.1 mrg } 1999 1.1 mrg /* Try to use `movew to ccr' followed by the appropriate branch insn. 2000 1.1 mrg On some m68k variants unfortunately that's slower than btst. 2001 1.1 mrg On 68000 and higher, that should also work for all HImode operands. */ 2002 1.1 mrg if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size) 2003 1.1 mrg { 2004 1.1 mrg if (count == 3 && DATA_REG_P (ops[1]) && (code == EQ || code == NE)) 2005 1.1 mrg { 2006 1.1 mrg output_asm_insn ("move%.w %1,%%ccr", ops); 2007 1.1 mrg return code == EQ ? PLUS : MINUS; 2008 1.1 mrg } 2009 1.1 mrg if (count == 2 && DATA_REG_P (ops[1]) && (code == EQ || code == NE)) 2010 1.1 mrg { 2011 1.1 mrg output_asm_insn ("move%.w %1,%%ccr", ops); 2012 1.1 mrg return code == EQ ? NE : EQ; 2013 1.1 mrg } 2014 1.1 mrg /* count == 1 followed by bvc/bvs and 2015 1.1 mrg count == 0 followed by bcc/bcs are also possible, but need 2016 1.1 mrg m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */ 2017 1.1 mrg } 2018 1.1 mrg } 2019 1.1 mrg output_asm_insn ("btst %0,%1", ops); 2020 1.1 mrg return code; 2021 1.1 mrg } 2022 1.1 mrg 2023 1.1 mrg /* Output a bftst instruction for a zero_extract with ZXOP0, ZXOP1 and ZXOP2 2024 1.1 mrg operands. CODE is the code of the comparison, and we return the code to 2025 1.1 mrg be actually used in the jump. */ 2026 1.1 mrg 2027 1.1 mrg rtx_code 2028 1.1 mrg m68k_output_bftst (rtx zxop0, rtx zxop1, rtx zxop2, rtx_code code) 2029 1.1 mrg { 2030 1.1 mrg if (zxop1 == const1_rtx && GET_CODE (zxop2) == CONST_INT) 2031 1.1 mrg { 2032 1.1 mrg int width = GET_CODE (zxop0) == REG ? 31 : 7; 2033 1.1 mrg /* Pass 1000 as SIGNPOS argument so that btst will 2034 1.1 mrg not think we are testing the sign bit for an `and' 2035 1.1 mrg and assume that nonzero implies a negative result. */ 2036 1.1 mrg return m68k_output_btst (GEN_INT (width - INTVAL (zxop2)), zxop0, code, 1000); 2037 1.1 mrg } 2038 1.1 mrg rtx ops[3] = { zxop0, zxop1, zxop2 }; 2039 1.1 mrg output_asm_insn ("bftst %0{%b2:%b1}", ops); 2040 1.1 mrg return code; 2041 1.1 mrg } 2042 1.1 mrg 2043 1.1 mrg /* Return true if X is a legitimate base register. STRICT_P says 2045 1.1 mrg whether we need strict checking. */ 2046 1.1 mrg 2047 1.1 mrg bool 2048 1.1 mrg m68k_legitimate_base_reg_p (rtx x, bool strict_p) 2049 1.1 mrg { 2050 1.1 mrg /* Allow SUBREG everywhere we allow REG. This results in better code. */ 2051 1.1 mrg if (!strict_p && GET_CODE (x) == SUBREG) 2052 1.1 mrg x = SUBREG_REG (x); 2053 1.1 mrg 2054 1.1 mrg return (REG_P (x) 2055 1.1 mrg && (strict_p 2056 1.1 mrg ? REGNO_OK_FOR_BASE_P (REGNO (x)) 2057 1.1 mrg : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x)))); 2058 1.1 mrg } 2059 1.1 mrg 2060 1.1 mrg /* Return true if X is a legitimate index register. STRICT_P says 2061 1.1 mrg whether we need strict checking. */ 2062 1.1 mrg 2063 1.1 mrg bool 2064 1.1 mrg m68k_legitimate_index_reg_p (rtx x, bool strict_p) 2065 1.1 mrg { 2066 1.1 mrg if (!strict_p && GET_CODE (x) == SUBREG) 2067 1.1 mrg x = SUBREG_REG (x); 2068 1.1 mrg 2069 1.1 mrg return (REG_P (x) 2070 1.1 mrg && (strict_p 2071 1.1 mrg ? REGNO_OK_FOR_INDEX_P (REGNO (x)) 2072 1.1 mrg : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x)))); 2073 1.1 mrg } 2074 1.1 mrg 2075 1.1 mrg /* Return true if X is a legitimate index expression for a (d8,An,Xn) or 2076 1.1 mrg (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of 2077 1.1 mrg ADDRESS if so. STRICT_P says whether we need strict checking. */ 2078 1.1 mrg 2079 1.1 mrg static bool 2080 1.1 mrg m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address) 2081 1.1 mrg { 2082 1.1 mrg int scale; 2083 1.1 mrg 2084 1.1 mrg /* Check for a scale factor. */ 2085 1.1 mrg scale = 1; 2086 1.1 mrg if ((TARGET_68020 || TARGET_COLDFIRE) 2087 1.1 mrg && GET_CODE (x) == MULT 2088 1.1 mrg && GET_CODE (XEXP (x, 1)) == CONST_INT 2089 1.1 mrg && (INTVAL (XEXP (x, 1)) == 2 2090 1.1 mrg || INTVAL (XEXP (x, 1)) == 4 2091 1.1 mrg || (INTVAL (XEXP (x, 1)) == 8 2092 1.1 mrg && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE)))) 2093 1.1 mrg { 2094 1.1 mrg scale = INTVAL (XEXP (x, 1)); 2095 1.1 mrg x = XEXP (x, 0); 2096 1.1 mrg } 2097 1.1 mrg 2098 1.1 mrg /* Check for a word extension. */ 2099 1.1 mrg if (!TARGET_COLDFIRE 2100 1.1 mrg && GET_CODE (x) == SIGN_EXTEND 2101 1.1 mrg && GET_MODE (XEXP (x, 0)) == HImode) 2102 1.1 mrg x = XEXP (x, 0); 2103 1.1 mrg 2104 1.1 mrg if (m68k_legitimate_index_reg_p (x, strict_p)) 2105 1.1 mrg { 2106 1.1 mrg address->scale = scale; 2107 1.1 mrg address->index = x; 2108 1.1 mrg return true; 2109 1.1 mrg } 2110 1.1 mrg 2111 1.1 mrg return false; 2112 1.1 mrg } 2113 1.1 mrg 2114 1.1 mrg /* Return true if X is an illegitimate symbolic constant. */ 2115 1.1 mrg 2116 1.1 mrg bool 2117 1.1 mrg m68k_illegitimate_symbolic_constant_p (rtx x) 2118 1.1 mrg { 2119 1.1 mrg rtx base, offset; 2120 1.1 mrg 2121 1.1 mrg if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P) 2122 1.1 mrg { 2123 1.1 mrg split_const (x, &base, &offset); 2124 1.1 mrg if (GET_CODE (base) == SYMBOL_REF 2125 1.1 mrg && !offset_within_block_p (base, INTVAL (offset))) 2126 1.1 mrg return true; 2127 1.1 mrg } 2128 1.1 mrg return m68k_tls_reference_p (x, false); 2129 1.1 mrg } 2130 1.1 mrg 2131 1.1 mrg /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */ 2132 1.1 mrg 2133 1.1 mrg static bool 2134 1.1 mrg m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x) 2135 1.1 mrg { 2136 1.1 mrg return m68k_illegitimate_symbolic_constant_p (x); 2137 1.1 mrg } 2138 1.1 mrg 2139 1.1 mrg /* Return true if X is a legitimate constant address that can reach 2140 1.1 mrg bytes in the range [X, X + REACH). STRICT_P says whether we need 2141 1.1 mrg strict checking. */ 2142 1.1 mrg 2143 1.1 mrg static bool 2144 1.1 mrg m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p) 2145 1.1 mrg { 2146 1.1 mrg rtx base, offset; 2147 1.1 mrg 2148 1.1 mrg if (!CONSTANT_ADDRESS_P (x)) 2149 1.1 mrg return false; 2150 1.1 mrg 2151 1.1 mrg if (flag_pic 2152 1.1 mrg && !(strict_p && TARGET_PCREL) 2153 1.1 mrg && symbolic_operand (x, VOIDmode)) 2154 1.1 mrg return false; 2155 1.1 mrg 2156 1.1 mrg if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1) 2157 1.1 mrg { 2158 1.1 mrg split_const (x, &base, &offset); 2159 1.1 mrg if (GET_CODE (base) == SYMBOL_REF 2160 1.1 mrg && !offset_within_block_p (base, INTVAL (offset) + reach - 1)) 2161 1.1 mrg return false; 2162 1.1 mrg } 2163 1.1 mrg 2164 1.1 mrg return !m68k_tls_reference_p (x, false); 2165 1.1 mrg } 2166 1.1 mrg 2167 1.1 mrg /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced 2168 1.1 mrg labels will become jump tables. */ 2169 1.1 mrg 2170 1.1 mrg static bool 2171 1.1 mrg m68k_jump_table_ref_p (rtx x) 2172 1.1 mrg { 2173 1.1 mrg if (GET_CODE (x) != LABEL_REF) 2174 1.1 mrg return false; 2175 1.1 mrg 2176 1.1 mrg rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0)); 2177 1.1 mrg if (!NEXT_INSN (insn) && !PREV_INSN (insn)) 2178 1.1 mrg return true; 2179 1.1 mrg 2180 1.1 mrg insn = next_nonnote_insn (insn); 2181 1.1 mrg return insn && JUMP_TABLE_DATA_P (insn); 2182 1.1 mrg } 2183 1.1 mrg 2184 1.1 mrg /* Return true if X is a legitimate address for values of mode MODE. 2185 1.1 mrg STRICT_P says whether strict checking is needed. If the address 2186 1.1 mrg is valid, describe its components in *ADDRESS. */ 2187 1.1 mrg 2188 1.1 mrg static bool 2189 1.1 mrg m68k_decompose_address (machine_mode mode, rtx x, 2190 1.1 mrg bool strict_p, struct m68k_address *address) 2191 1.1 mrg { 2192 1.1 mrg unsigned int reach; 2193 1.1 mrg 2194 1.1 mrg memset (address, 0, sizeof (*address)); 2195 1.1 mrg 2196 1.1 mrg if (mode == BLKmode) 2197 1.1 mrg reach = 1; 2198 1.1 mrg else 2199 1.1 mrg reach = GET_MODE_SIZE (mode); 2200 1.1 mrg 2201 1.1 mrg /* Check for (An) (mode 2). */ 2202 1.1 mrg if (m68k_legitimate_base_reg_p (x, strict_p)) 2203 1.1 mrg { 2204 1.1 mrg address->base = x; 2205 1.1 mrg return true; 2206 1.1 mrg } 2207 1.1 mrg 2208 1.1 mrg /* Check for -(An) and (An)+ (modes 3 and 4). */ 2209 1.1 mrg if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC) 2210 1.1 mrg && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)) 2211 1.1 mrg { 2212 1.1 mrg address->code = GET_CODE (x); 2213 1.1 mrg address->base = XEXP (x, 0); 2214 1.1 mrg return true; 2215 1.1 mrg } 2216 1.1 mrg 2217 1.1 mrg /* Check for (d16,An) (mode 5). */ 2218 1.1 mrg if (GET_CODE (x) == PLUS 2219 1.1 mrg && GET_CODE (XEXP (x, 1)) == CONST_INT 2220 1.1 mrg && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach) 2221 1.1 mrg && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)) 2222 1.1 mrg { 2223 1.1 mrg address->base = XEXP (x, 0); 2224 1.1 mrg address->offset = XEXP (x, 1); 2225 1.1 mrg return true; 2226 1.1 mrg } 2227 1.1 mrg 2228 1.1 mrg /* Check for GOT loads. These are (bd,An,Xn) addresses if 2229 1.1 mrg TARGET_68020 && flag_pic == 2, otherwise they are (d16,An) 2230 1.1 mrg addresses. */ 2231 1.1 mrg if (GET_CODE (x) == PLUS 2232 1.1 mrg && XEXP (x, 0) == pic_offset_table_rtx) 2233 1.1 mrg { 2234 1.1 mrg /* As we are processing a PLUS, do not unwrap RELOC32 symbols -- 2235 1.1 mrg they are invalid in this context. */ 2236 1.1 mrg if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1)) 2237 1.1 mrg { 2238 1.1 mrg address->base = XEXP (x, 0); 2239 1.1 mrg address->offset = XEXP (x, 1); 2240 1.1 mrg return true; 2241 1.1 mrg } 2242 1.1 mrg } 2243 1.1 mrg 2244 1.1 mrg /* The ColdFire FPU only accepts addressing modes 2-5. */ 2245 1.1 mrg if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT) 2246 1.1 mrg return false; 2247 1.1 mrg 2248 1.1 mrg /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case, 2249 1.1 mrg check for (d16,PC) or (bd,PC,Xn) with a suppressed index register. 2250 1.1 mrg All these modes are variations of mode 7. */ 2251 1.1 mrg if (m68k_legitimate_constant_address_p (x, reach, strict_p)) 2252 1.1 mrg { 2253 1.1 mrg address->offset = x; 2254 1.1 mrg return true; 2255 1.1 mrg } 2256 1.1 mrg 2257 1.1 mrg /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for 2258 1.1 mrg tablejumps. 2259 1.1 mrg 2260 1.1 mrg ??? do_tablejump creates these addresses before placing the target 2261 1.1 mrg label, so we have to assume that unplaced labels are jump table 2262 1.1 mrg references. It seems unlikely that we would ever generate indexed 2263 1.1 mrg accesses to unplaced labels in other cases. */ 2264 1.1 mrg if (GET_CODE (x) == PLUS 2265 1.1 mrg && m68k_jump_table_ref_p (XEXP (x, 1)) 2266 1.1 mrg && m68k_decompose_index (XEXP (x, 0), strict_p, address)) 2267 1.1 mrg { 2268 1.1 mrg address->offset = XEXP (x, 1); 2269 1.1 mrg return true; 2270 1.1 mrg } 2271 1.1 mrg 2272 1.1 mrg /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or 2273 1.1 mrg (bd,An,Xn.SIZE*SCALE) addresses. */ 2274 1.1 mrg 2275 1.1 mrg if (TARGET_68020) 2276 1.1 mrg { 2277 1.1 mrg /* Check for a nonzero base displacement. */ 2278 1.1 mrg if (GET_CODE (x) == PLUS 2279 1.1 mrg && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p)) 2280 1.1 mrg { 2281 1.1 mrg address->offset = XEXP (x, 1); 2282 1.1 mrg x = XEXP (x, 0); 2283 1.1 mrg } 2284 1.1 mrg 2285 1.1 mrg /* Check for a suppressed index register. */ 2286 1.1 mrg if (m68k_legitimate_base_reg_p (x, strict_p)) 2287 1.1 mrg { 2288 1.1 mrg address->base = x; 2289 1.1 mrg return true; 2290 1.1 mrg } 2291 1.1 mrg 2292 1.1 mrg /* Check for a suppressed base register. Do not allow this case 2293 1.1 mrg for non-symbolic offsets as it effectively gives gcc freedom 2294 1.1 mrg to treat data registers as base registers, which can generate 2295 1.1 mrg worse code. */ 2296 1.1 mrg if (address->offset 2297 1.1 mrg && symbolic_operand (address->offset, VOIDmode) 2298 1.1 mrg && m68k_decompose_index (x, strict_p, address)) 2299 1.1 mrg return true; 2300 1.1 mrg } 2301 1.1 mrg else 2302 1.1 mrg { 2303 1.1 mrg /* Check for a nonzero base displacement. */ 2304 1.1 mrg if (GET_CODE (x) == PLUS 2305 1.1 mrg && GET_CODE (XEXP (x, 1)) == CONST_INT 2306 1.1 mrg && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach)) 2307 1.1 mrg { 2308 1.1 mrg address->offset = XEXP (x, 1); 2309 1.1 mrg x = XEXP (x, 0); 2310 1.1 mrg } 2311 1.1 mrg } 2312 1.1 mrg 2313 1.1 mrg /* We now expect the sum of a base and an index. */ 2314 1.1 mrg if (GET_CODE (x) == PLUS) 2315 1.1 mrg { 2316 1.1 mrg if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p) 2317 1.1 mrg && m68k_decompose_index (XEXP (x, 1), strict_p, address)) 2318 1.1 mrg { 2319 1.1 mrg address->base = XEXP (x, 0); 2320 1.1 mrg return true; 2321 1.1 mrg } 2322 1.1 mrg 2323 1.1 mrg if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p) 2324 1.1 mrg && m68k_decompose_index (XEXP (x, 0), strict_p, address)) 2325 1.1 mrg { 2326 1.1 mrg address->base = XEXP (x, 1); 2327 1.1 mrg return true; 2328 1.1 mrg } 2329 1.1 mrg } 2330 1.1 mrg return false; 2331 1.1 mrg } 2332 1.1 mrg 2333 1.1 mrg /* Return true if X is a legitimate address for values of mode MODE. 2334 1.1 mrg STRICT_P says whether strict checking is needed. */ 2335 1.1 mrg 2336 1.1 mrg bool 2337 1.1 mrg m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p) 2338 1.1 mrg { 2339 1.1 mrg struct m68k_address address; 2340 1.1 mrg 2341 1.1 mrg return m68k_decompose_address (mode, x, strict_p, &address); 2342 1.1 mrg } 2343 1.1 mrg 2344 1.1 mrg /* Return true if X is a memory, describing its address in ADDRESS if so. 2345 1.1 mrg Apply strict checking if called during or after reload. */ 2346 1.1 mrg 2347 1.1 mrg static bool 2348 1.1 mrg m68k_legitimate_mem_p (rtx x, struct m68k_address *address) 2349 1.1 mrg { 2350 1.1 mrg return (MEM_P (x) 2351 1.1 mrg && m68k_decompose_address (GET_MODE (x), XEXP (x, 0), 2352 1.1 mrg reload_in_progress || reload_completed, 2353 1.1 mrg address)); 2354 1.1 mrg } 2355 1.1 mrg 2356 1.1 mrg /* Implement TARGET_LEGITIMATE_CONSTANT_P. */ 2357 1.1 mrg 2358 1.1 mrg bool 2359 1.1 mrg m68k_legitimate_constant_p (machine_mode mode, rtx x) 2360 1.1 mrg { 2361 1.1 mrg return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x); 2362 1.1 mrg } 2363 1.1 mrg 2364 1.1 mrg /* Return true if X matches the 'Q' constraint. It must be a memory 2365 1.1 mrg with a base address and no constant offset or index. */ 2366 1.1 mrg 2367 1.1 mrg bool 2368 1.1 mrg m68k_matches_q_p (rtx x) 2369 1.1 mrg { 2370 1.1 mrg struct m68k_address address; 2371 1.1 mrg 2372 1.1 mrg return (m68k_legitimate_mem_p (x, &address) 2373 1.1 mrg && address.code == UNKNOWN 2374 1.1 mrg && address.base 2375 1.1 mrg && !address.offset 2376 1.1 mrg && !address.index); 2377 1.1 mrg } 2378 1.1 mrg 2379 1.1 mrg /* Return true if X matches the 'U' constraint. It must be a base address 2380 1.1 mrg with a constant offset and no index. */ 2381 1.1 mrg 2382 1.1 mrg bool 2383 1.1 mrg m68k_matches_u_p (rtx x) 2384 1.1 mrg { 2385 1.1 mrg struct m68k_address address; 2386 1.1 mrg 2387 1.1 mrg return (m68k_legitimate_mem_p (x, &address) 2388 1.1 mrg && address.code == UNKNOWN 2389 1.1 mrg && address.base 2390 1.1 mrg && address.offset 2391 1.1 mrg && !address.index); 2392 1.1 mrg } 2393 1.1 mrg 2394 1.1 mrg /* Return GOT pointer. */ 2395 1.1 mrg 2396 1.1 mrg static rtx 2397 1.1 mrg m68k_get_gp (void) 2398 1.1 mrg { 2399 1.1 mrg if (pic_offset_table_rtx == NULL_RTX) 2400 1.1 mrg pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG); 2401 1.1 mrg 2402 1.1 mrg crtl->uses_pic_offset_table = 1; 2403 1.1 mrg 2404 1.1 mrg return pic_offset_table_rtx; 2405 1.1 mrg } 2406 1.1 mrg 2407 1.1 mrg /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC 2408 1.1 mrg wrappers. */ 2409 1.1 mrg enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO, 2410 1.1 mrg RELOC_TLSIE, RELOC_TLSLE }; 2411 1.1 mrg 2412 1.1 mrg #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT) 2413 1.1 mrg 2414 1.1 mrg /* Wrap symbol X into unspec representing relocation RELOC. 2415 1.1 mrg BASE_REG - register that should be added to the result. 2416 1.1 mrg TEMP_REG - if non-null, temporary register. */ 2417 1.1 mrg 2418 1.1 mrg static rtx 2419 1.1 mrg m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg) 2420 1.1 mrg { 2421 1.1 mrg bool use_x_p; 2422 1.1 mrg 2423 1.1 mrg use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS; 2424 1.1 mrg 2425 1.1 mrg if (TARGET_COLDFIRE && use_x_p) 2426 1.1 mrg /* When compiling with -mx{got, tls} switch the code will look like this: 2427 1.1 mrg 2428 1.1 mrg move.l <X>@<RELOC>,<TEMP_REG> 2429 1.1 mrg add.l <BASE_REG>,<TEMP_REG> */ 2430 1.1 mrg { 2431 1.1 mrg /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra 2432 1.1 mrg to put @RELOC after reference. */ 2433 1.1 mrg x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)), 2434 1.1 mrg UNSPEC_RELOC32); 2435 1.1 mrg x = gen_rtx_CONST (Pmode, x); 2436 1.1 mrg 2437 1.1 mrg if (temp_reg == NULL) 2438 1.1 mrg { 2439 1.1 mrg gcc_assert (can_create_pseudo_p ()); 2440 1.1 mrg temp_reg = gen_reg_rtx (Pmode); 2441 1.1 mrg } 2442 1.1 mrg 2443 1.1 mrg emit_move_insn (temp_reg, x); 2444 1.1 mrg emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg)); 2445 1.1 mrg x = temp_reg; 2446 1.1 mrg } 2447 1.1 mrg else 2448 1.1 mrg { 2449 1.1 mrg x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)), 2450 1.1 mrg UNSPEC_RELOC16); 2451 1.1 mrg x = gen_rtx_CONST (Pmode, x); 2452 1.1 mrg 2453 1.1 mrg x = gen_rtx_PLUS (Pmode, base_reg, x); 2454 1.1 mrg } 2455 1.1 mrg 2456 1.1 mrg return x; 2457 1.1 mrg } 2458 1.1 mrg 2459 1.1 mrg /* Helper for m68k_unwrap_symbol. 2460 1.1 mrg Also, if unwrapping was successful (that is if (ORIG != <return value>)), 2461 1.1 mrg sets *RELOC_PTR to relocation type for the symbol. */ 2462 1.1 mrg 2463 1.1 mrg static rtx 2464 1.1 mrg m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p, 2465 1.1 mrg enum m68k_reloc *reloc_ptr) 2466 1.1 mrg { 2467 1.1 mrg if (GET_CODE (orig) == CONST) 2468 1.1 mrg { 2469 1.1 mrg rtx x; 2470 1.1 mrg enum m68k_reloc dummy; 2471 1.1 mrg 2472 1.1 mrg x = XEXP (orig, 0); 2473 1.1 mrg 2474 1.1 mrg if (reloc_ptr == NULL) 2475 1.1 mrg reloc_ptr = &dummy; 2476 1.1 mrg 2477 1.1 mrg /* Handle an addend. */ 2478 1.1 mrg if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS) 2479 1.1 mrg && CONST_INT_P (XEXP (x, 1))) 2480 1.1 mrg x = XEXP (x, 0); 2481 1.1 mrg 2482 1.1 mrg if (GET_CODE (x) == UNSPEC) 2483 1.1 mrg { 2484 1.1 mrg switch (XINT (x, 1)) 2485 1.1 mrg { 2486 1.1 mrg case UNSPEC_RELOC16: 2487 1.1 mrg orig = XVECEXP (x, 0, 0); 2488 1.1 mrg *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1)); 2489 1.1 mrg break; 2490 1.1 mrg 2491 1.1 mrg case UNSPEC_RELOC32: 2492 1.1 mrg if (unwrap_reloc32_p) 2493 1.1 mrg { 2494 1.1 mrg orig = XVECEXP (x, 0, 0); 2495 1.1 mrg *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1)); 2496 1.1 mrg } 2497 1.1 mrg break; 2498 1.1 mrg 2499 1.1 mrg default: 2500 1.1 mrg break; 2501 1.1 mrg } 2502 1.1 mrg } 2503 1.1 mrg } 2504 1.1 mrg 2505 1.1 mrg return orig; 2506 1.1 mrg } 2507 1.1 mrg 2508 1.1 mrg /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p, 2509 1.1 mrg UNSPEC_RELOC32 wrappers. */ 2510 1.1 mrg 2511 1.1 mrg rtx 2512 1.1 mrg m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p) 2513 1.1 mrg { 2514 1.1 mrg return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL); 2515 1.1 mrg } 2516 1.1 mrg 2517 1.1 mrg /* Adjust decorated address operand before outputing assembler for it. */ 2518 1.1 mrg 2519 1.1 mrg static void 2520 1.1 mrg m68k_adjust_decorated_operand (rtx op) 2521 1.1 mrg { 2522 1.1 mrg /* Combine and, possibly, other optimizations may do good job 2523 1.1 mrg converting 2524 1.1 mrg (const (unspec [(symbol)])) 2525 1.1 mrg into 2526 1.1 mrg (const (plus (unspec [(symbol)]) 2527 1.1 mrg (const_int N))). 2528 1.1 mrg The problem with this is emitting @TLS or @GOT decorations. 2529 1.1 mrg The decoration is emitted when processing (unspec), so the 2530 1.1 mrg result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE". 2531 1.1 mrg 2532 1.1 mrg It seems that the easiest solution to this is to convert such 2533 1.1 mrg operands to 2534 1.1 mrg (const (unspec [(plus (symbol) 2535 1.1 mrg (const_int N))])). 2536 1.1 mrg Note, that the top level of operand remains intact, so we don't have 2537 1.1 mrg to patch up anything outside of the operand. */ 2538 1.1 mrg 2539 1.1 mrg subrtx_var_iterator::array_type array; 2540 1.1 mrg FOR_EACH_SUBRTX_VAR (iter, array, op, ALL) 2541 1.1 mrg { 2542 1.1 mrg rtx x = *iter; 2543 1.1 mrg if (m68k_unwrap_symbol (x, true) != x) 2544 1.1 mrg { 2545 1.1 mrg rtx plus; 2546 1.1 mrg 2547 1.1 mrg gcc_assert (GET_CODE (x) == CONST); 2548 1.1 mrg plus = XEXP (x, 0); 2549 1.1 mrg 2550 1.1 mrg if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS) 2551 1.1 mrg { 2552 1.1 mrg rtx unspec; 2553 1.1 mrg rtx addend; 2554 1.1 mrg 2555 1.1 mrg unspec = XEXP (plus, 0); 2556 1.1 mrg gcc_assert (GET_CODE (unspec) == UNSPEC); 2557 1.1 mrg addend = XEXP (plus, 1); 2558 1.1 mrg gcc_assert (CONST_INT_P (addend)); 2559 1.1 mrg 2560 1.1 mrg /* We now have all the pieces, rearrange them. */ 2561 1.1 mrg 2562 1.1 mrg /* Move symbol to plus. */ 2563 1.1 mrg XEXP (plus, 0) = XVECEXP (unspec, 0, 0); 2564 1.1 mrg 2565 1.1 mrg /* Move plus inside unspec. */ 2566 1.1 mrg XVECEXP (unspec, 0, 0) = plus; 2567 1.1 mrg 2568 1.1 mrg /* Move unspec to top level of const. */ 2569 1.1 mrg XEXP (x, 0) = unspec; 2570 1.1 mrg } 2571 1.1 mrg iter.skip_subrtxes (); 2572 1.1 mrg } 2573 1.1 mrg } 2574 1.1 mrg } 2575 1.1 mrg 2576 1.1 mrg /* Move X to a register and add REG_EQUAL note pointing to ORIG. 2577 1.1 mrg If REG is non-null, use it; generate new pseudo otherwise. */ 2578 1.1 mrg 2579 1.1 mrg static rtx 2580 1.1 mrg m68k_move_to_reg (rtx x, rtx orig, rtx reg) 2581 1.1 mrg { 2582 1.1 mrg rtx_insn *insn; 2583 1.1 mrg 2584 1.1 mrg if (reg == NULL_RTX) 2585 1.1 mrg { 2586 1.1 mrg gcc_assert (can_create_pseudo_p ()); 2587 1.1 mrg reg = gen_reg_rtx (Pmode); 2588 1.1 mrg } 2589 1.1 mrg 2590 1.1 mrg insn = emit_move_insn (reg, x); 2591 1.1 mrg /* Put a REG_EQUAL note on this insn, so that it can be optimized 2592 1.1 mrg by loop. */ 2593 1.1 mrg set_unique_reg_note (insn, REG_EQUAL, orig); 2594 1.1 mrg 2595 1.1 mrg return reg; 2596 1.1 mrg } 2597 1.1 mrg 2598 1.1 mrg /* Does the same as m68k_wrap_symbol, but returns a memory reference to 2599 1.1 mrg GOT slot. */ 2600 1.1 mrg 2601 1.1 mrg static rtx 2602 1.1 mrg m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg) 2603 1.1 mrg { 2604 1.1 mrg x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg); 2605 1.1 mrg 2606 1.1 mrg x = gen_rtx_MEM (Pmode, x); 2607 1.1 mrg MEM_READONLY_P (x) = 1; 2608 1.1 mrg 2609 1.1 mrg return x; 2610 1.1 mrg } 2611 1.1 mrg 2612 1.1 mrg /* Legitimize PIC addresses. If the address is already 2613 1.1 mrg position-independent, we return ORIG. Newly generated 2614 1.1 mrg position-independent addresses go to REG. If we need more 2615 1.1 mrg than one register, we lose. 2616 1.1 mrg 2617 1.1 mrg An address is legitimized by making an indirect reference 2618 1.1 mrg through the Global Offset Table with the name of the symbol 2619 1.1 mrg used as an offset. 2620 1.1 mrg 2621 1.1 mrg The assembler and linker are responsible for placing the 2622 1.1 mrg address of the symbol in the GOT. The function prologue 2623 1.1 mrg is responsible for initializing a5 to the starting address 2624 1.1 mrg of the GOT. 2625 1.1 mrg 2626 1.1 mrg The assembler is also responsible for translating a symbol name 2627 1.1 mrg into a constant displacement from the start of the GOT. 2628 1.1 mrg 2629 1.1 mrg A quick example may make things a little clearer: 2630 1.1 mrg 2631 1.1 mrg When not generating PIC code to store the value 12345 into _foo 2632 1.1 mrg we would generate the following code: 2633 1.1 mrg 2634 1.1 mrg movel #12345, _foo 2635 1.1 mrg 2636 1.1 mrg When generating PIC two transformations are made. First, the compiler 2637 1.1 mrg loads the address of foo into a register. So the first transformation makes: 2638 1.1 mrg 2639 1.1 mrg lea _foo, a0 2640 1.1 mrg movel #12345, a0@ 2641 1.1 mrg 2642 1.1 mrg The code in movsi will intercept the lea instruction and call this 2643 1.1 mrg routine which will transform the instructions into: 2644 1.1 mrg 2645 1.1 mrg movel a5@(_foo:w), a0 2646 1.1 mrg movel #12345, a0@ 2647 1.1 mrg 2648 1.1 mrg 2649 1.1 mrg That (in a nutshell) is how *all* symbol and label references are 2650 1.1 mrg handled. */ 2651 1.1 mrg 2652 1.1 mrg rtx 2653 1.1 mrg legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED, 2654 1.1 mrg rtx reg) 2655 1.1 mrg { 2656 1.1 mrg rtx pic_ref = orig; 2657 1.1 mrg 2658 1.1 mrg /* First handle a simple SYMBOL_REF or LABEL_REF */ 2659 1.1 mrg if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF) 2660 1.1 mrg { 2661 1.1 mrg gcc_assert (reg); 2662 1.1 mrg 2663 1.1 mrg pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg); 2664 1.1 mrg pic_ref = m68k_move_to_reg (pic_ref, orig, reg); 2665 1.1 mrg } 2666 1.1 mrg else if (GET_CODE (orig) == CONST) 2667 1.1 mrg { 2668 1.1 mrg rtx base; 2669 1.1 mrg 2670 1.1 mrg /* Make sure this has not already been legitimized. */ 2671 1.1 mrg if (m68k_unwrap_symbol (orig, true) != orig) 2672 1.1 mrg return orig; 2673 1.1 mrg 2674 1.1 mrg gcc_assert (reg); 2675 1.1 mrg 2676 1.1 mrg /* legitimize both operands of the PLUS */ 2677 1.1 mrg gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS); 2678 1.1 mrg 2679 1.1 mrg base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); 2680 1.1 mrg orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, 2681 1.1 mrg base == reg ? 0 : reg); 2682 1.1 mrg 2683 1.1 mrg if (GET_CODE (orig) == CONST_INT) 2684 1.1 mrg pic_ref = plus_constant (Pmode, base, INTVAL (orig)); 2685 1.1 mrg else 2686 1.1 mrg pic_ref = gen_rtx_PLUS (Pmode, base, orig); 2687 1.1 mrg } 2688 1.1 mrg 2689 1.1 mrg return pic_ref; 2690 1.1 mrg } 2691 1.1 mrg 2692 1.1 mrg /* The __tls_get_addr symbol. */ 2693 1.1 mrg static GTY(()) rtx m68k_tls_get_addr; 2694 1.1 mrg 2695 1.1 mrg /* Return SYMBOL_REF for __tls_get_addr. */ 2696 1.1 mrg 2697 1.1 mrg static rtx 2698 1.1 mrg m68k_get_tls_get_addr (void) 2699 1.1 mrg { 2700 1.1 mrg if (m68k_tls_get_addr == NULL_RTX) 2701 1.1 mrg m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr"); 2702 1.1 mrg 2703 1.1 mrg return m68k_tls_get_addr; 2704 1.1 mrg } 2705 1.1 mrg 2706 1.1 mrg /* Return libcall result in A0 instead of usual D0. */ 2707 1.1 mrg static bool m68k_libcall_value_in_a0_p = false; 2708 1.1 mrg 2709 1.1 mrg /* Emit instruction sequence that calls __tls_get_addr. X is 2710 1.1 mrg the TLS symbol we are referencing and RELOC is the symbol type to use 2711 1.1 mrg (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence 2712 1.1 mrg emitted. A pseudo register with result of __tls_get_addr call is 2713 1.1 mrg returned. */ 2714 1.1 mrg 2715 1.1 mrg static rtx 2716 1.1 mrg m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc) 2717 1.1 mrg { 2718 1.1 mrg rtx a0; 2719 1.1 mrg rtx_insn *insns; 2720 1.1 mrg rtx dest; 2721 1.1 mrg 2722 1.1 mrg /* Emit the call sequence. */ 2723 1.1 mrg start_sequence (); 2724 1.1 mrg 2725 1.1 mrg /* FIXME: Unfortunately, emit_library_call_value does not 2726 1.1 mrg consider (plus (%a5) (const (unspec))) to be a good enough 2727 1.1 mrg operand for push, so it forces it into a register. The bad 2728 1.1 mrg thing about this is that combiner, due to copy propagation and other 2729 1.1 mrg optimizations, sometimes cannot later fix this. As a consequence, 2730 1.1 mrg additional register may be allocated resulting in a spill. 2731 1.1 mrg For reference, see args processing loops in 2732 1.1 mrg calls.cc:emit_library_call_value_1. 2733 1.1 mrg For testcase, see gcc.target/m68k/tls-{gd, ld}.c */ 2734 1.1 mrg x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX); 2735 1.1 mrg 2736 1.1 mrg /* __tls_get_addr() is not a libcall, but emitting a libcall_value 2737 1.1 mrg is the simpliest way of generating a call. The difference between 2738 1.1 mrg __tls_get_addr() and libcall is that the result is returned in D0 2739 1.1 mrg instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p 2740 1.1 mrg which temporarily switches returning the result to A0. */ 2741 1.1 mrg 2742 1.1 mrg m68k_libcall_value_in_a0_p = true; 2743 1.1 mrg a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE, 2744 1.1 mrg Pmode, x, Pmode); 2745 1.1 mrg m68k_libcall_value_in_a0_p = false; 2746 1.1 mrg 2747 1.1 mrg insns = get_insns (); 2748 1.1 mrg end_sequence (); 2749 1.1 mrg 2750 1.1 mrg gcc_assert (can_create_pseudo_p ()); 2751 1.1 mrg dest = gen_reg_rtx (Pmode); 2752 1.1 mrg emit_libcall_block (insns, dest, a0, eqv); 2753 1.1 mrg 2754 1.1 mrg return dest; 2755 1.1 mrg } 2756 1.1 mrg 2757 1.1 mrg /* The __tls_get_addr symbol. */ 2758 1.1 mrg static GTY(()) rtx m68k_read_tp; 2759 1.1 mrg 2760 1.1 mrg /* Return SYMBOL_REF for __m68k_read_tp. */ 2761 1.1 mrg 2762 1.1 mrg static rtx 2763 1.1 mrg m68k_get_m68k_read_tp (void) 2764 1.1 mrg { 2765 1.1 mrg if (m68k_read_tp == NULL_RTX) 2766 1.1 mrg m68k_read_tp = init_one_libfunc ("__m68k_read_tp"); 2767 1.1 mrg 2768 1.1 mrg return m68k_read_tp; 2769 1.1 mrg } 2770 1.1 mrg 2771 1.1 mrg /* Emit instruction sequence that calls __m68k_read_tp. 2772 1.1 mrg A pseudo register with result of __m68k_read_tp call is returned. */ 2773 1.1 mrg 2774 1.1 mrg static rtx 2775 1.1 mrg m68k_call_m68k_read_tp (void) 2776 1.1 mrg { 2777 1.1 mrg rtx a0; 2778 1.1 mrg rtx eqv; 2779 1.1 mrg rtx_insn *insns; 2780 1.1 mrg rtx dest; 2781 1.1 mrg 2782 1.1 mrg start_sequence (); 2783 1.1 mrg 2784 1.1 mrg /* __m68k_read_tp() is not a libcall, but emitting a libcall_value 2785 1.1 mrg is the simpliest way of generating a call. The difference between 2786 1.1 mrg __m68k_read_tp() and libcall is that the result is returned in D0 2787 1.1 mrg instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p 2788 1.1 mrg which temporarily switches returning the result to A0. */ 2789 1.1 mrg 2790 1.1 mrg /* Emit the call sequence. */ 2791 1.1 mrg m68k_libcall_value_in_a0_p = true; 2792 1.1 mrg a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE, 2793 1.1 mrg Pmode); 2794 1.1 mrg m68k_libcall_value_in_a0_p = false; 2795 1.1 mrg insns = get_insns (); 2796 1.1 mrg end_sequence (); 2797 1.1 mrg 2798 1.1 mrg /* Attach a unique REG_EQUIV, to allow the RTL optimizers to 2799 1.1 mrg share the m68k_read_tp result with other IE/LE model accesses. */ 2800 1.1 mrg eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32); 2801 1.1 mrg 2802 1.1 mrg gcc_assert (can_create_pseudo_p ()); 2803 1.1 mrg dest = gen_reg_rtx (Pmode); 2804 1.1 mrg emit_libcall_block (insns, dest, a0, eqv); 2805 1.1 mrg 2806 1.1 mrg return dest; 2807 1.1 mrg } 2808 1.1 mrg 2809 1.1 mrg /* Return a legitimized address for accessing TLS SYMBOL_REF X. 2810 1.1 mrg For explanations on instructions sequences see TLS/NPTL ABI for m68k and 2811 1.1 mrg ColdFire. */ 2812 1.1 mrg 2813 1.1 mrg rtx 2814 1.1 mrg m68k_legitimize_tls_address (rtx orig) 2815 1.1 mrg { 2816 1.1 mrg switch (SYMBOL_REF_TLS_MODEL (orig)) 2817 1.1 mrg { 2818 1.1 mrg case TLS_MODEL_GLOBAL_DYNAMIC: 2819 1.1 mrg orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD); 2820 1.1 mrg break; 2821 1.1 mrg 2822 1.1 mrg case TLS_MODEL_LOCAL_DYNAMIC: 2823 1.1 mrg { 2824 1.1 mrg rtx eqv; 2825 1.1 mrg rtx a0; 2826 1.1 mrg rtx x; 2827 1.1 mrg 2828 1.1 mrg /* Attach a unique REG_EQUIV, to allow the RTL optimizers to 2829 1.1 mrg share the LDM result with other LD model accesses. */ 2830 1.1 mrg eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), 2831 1.1 mrg UNSPEC_RELOC32); 2832 1.1 mrg 2833 1.1 mrg a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM); 2834 1.1 mrg 2835 1.1 mrg x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX); 2836 1.1 mrg 2837 1.1 mrg if (can_create_pseudo_p ()) 2838 1.1 mrg x = m68k_move_to_reg (x, orig, NULL_RTX); 2839 1.1 mrg 2840 1.1 mrg orig = x; 2841 1.1 mrg break; 2842 1.1 mrg } 2843 1.1 mrg 2844 1.1 mrg case TLS_MODEL_INITIAL_EXEC: 2845 1.1 mrg { 2846 1.1 mrg rtx a0; 2847 1.1 mrg rtx x; 2848 1.1 mrg 2849 1.1 mrg a0 = m68k_call_m68k_read_tp (); 2850 1.1 mrg 2851 1.1 mrg x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX); 2852 1.1 mrg x = gen_rtx_PLUS (Pmode, x, a0); 2853 1.1 mrg 2854 1.1 mrg if (can_create_pseudo_p ()) 2855 1.1 mrg x = m68k_move_to_reg (x, orig, NULL_RTX); 2856 1.1 mrg 2857 1.1 mrg orig = x; 2858 1.1 mrg break; 2859 1.1 mrg } 2860 1.1 mrg 2861 1.1 mrg case TLS_MODEL_LOCAL_EXEC: 2862 1.1 mrg { 2863 1.1 mrg rtx a0; 2864 1.1 mrg rtx x; 2865 1.1 mrg 2866 1.1 mrg a0 = m68k_call_m68k_read_tp (); 2867 1.1 mrg 2868 1.1 mrg x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX); 2869 1.1 mrg 2870 1.1 mrg if (can_create_pseudo_p ()) 2871 1.1 mrg x = m68k_move_to_reg (x, orig, NULL_RTX); 2872 1.1 mrg 2873 1.1 mrg orig = x; 2874 1.1 mrg break; 2875 1.1 mrg } 2876 1.1 mrg 2877 1.1 mrg default: 2878 1.1 mrg gcc_unreachable (); 2879 1.1 mrg } 2880 1.1 mrg 2881 1.1 mrg return orig; 2882 1.1 mrg } 2883 1.1 mrg 2884 1.1 mrg /* Return true if X is a TLS symbol. */ 2885 1.1 mrg 2886 1.1 mrg static bool 2887 1.1 mrg m68k_tls_symbol_p (rtx x) 2888 1.1 mrg { 2889 1.1 mrg if (!TARGET_HAVE_TLS) 2890 1.1 mrg return false; 2891 1.1 mrg 2892 1.1 mrg if (GET_CODE (x) != SYMBOL_REF) 2893 1.1 mrg return false; 2894 1.1 mrg 2895 1.1 mrg return SYMBOL_REF_TLS_MODEL (x) != 0; 2896 1.1 mrg } 2897 1.1 mrg 2898 1.1 mrg /* If !LEGITIMATE_P, return true if X is a TLS symbol reference, 2899 1.1 mrg though illegitimate one. 2900 1.1 mrg If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */ 2901 1.1 mrg 2902 1.1 mrg bool 2903 1.1 mrg m68k_tls_reference_p (rtx x, bool legitimate_p) 2904 1.1 mrg { 2905 1.1 mrg if (!TARGET_HAVE_TLS) 2906 1.1 mrg return false; 2907 1.1 mrg 2908 1.1 mrg if (!legitimate_p) 2909 1.1 mrg { 2910 1.1 mrg subrtx_var_iterator::array_type array; 2911 1.1 mrg FOR_EACH_SUBRTX_VAR (iter, array, x, ALL) 2912 1.1 mrg { 2913 1.1 mrg rtx x = *iter; 2914 1.1 mrg 2915 1.1 mrg /* Note: this is not the same as m68k_tls_symbol_p. */ 2916 1.1 mrg if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0) 2917 1.1 mrg return true; 2918 1.1 mrg 2919 1.1 mrg /* Don't recurse into legitimate TLS references. */ 2920 1.1 mrg if (m68k_tls_reference_p (x, true)) 2921 1.1 mrg iter.skip_subrtxes (); 2922 1.1 mrg } 2923 1.1 mrg return false; 2924 1.1 mrg } 2925 1.1 mrg else 2926 1.1 mrg { 2927 1.1 mrg enum m68k_reloc reloc = RELOC_GOT; 2928 1.1 mrg 2929 1.1 mrg return (m68k_unwrap_symbol_1 (x, true, &reloc) != x 2930 1.1 mrg && TLS_RELOC_P (reloc)); 2931 1.1 mrg } 2932 1.1 mrg } 2933 1.1 mrg 2934 1.1 mrg 2935 1.1 mrg 2937 1.1 mrg #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255) 2938 1.1 mrg 2939 1.1 mrg /* Return the type of move that should be used for integer I. */ 2940 1.1 mrg 2941 1.1 mrg M68K_CONST_METHOD 2942 1.1 mrg m68k_const_method (HOST_WIDE_INT i) 2943 1.1 mrg { 2944 1.1 mrg unsigned u; 2945 1.1 mrg 2946 1.1 mrg if (USE_MOVQ (i)) 2947 1.1 mrg return MOVQ; 2948 1.1 mrg 2949 1.1 mrg /* The ColdFire doesn't have byte or word operations. */ 2950 1.1 mrg /* FIXME: This may not be useful for the m68060 either. */ 2951 1.1 mrg if (!TARGET_COLDFIRE) 2952 1.1 mrg { 2953 1.1 mrg /* if -256 < N < 256 but N is not in range for a moveq 2954 1.1 mrg N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */ 2955 1.1 mrg if (USE_MOVQ (i ^ 0xff)) 2956 1.1 mrg return NOTB; 2957 1.1 mrg /* Likewise, try with not.w */ 2958 1.1 mrg if (USE_MOVQ (i ^ 0xffff)) 2959 1.1 mrg return NOTW; 2960 1.1 mrg /* This is the only value where neg.w is useful */ 2961 1.1 mrg if (i == -65408) 2962 1.1 mrg return NEGW; 2963 1.1 mrg } 2964 1.1 mrg 2965 1.1 mrg /* Try also with swap. */ 2966 1.1 mrg u = i; 2967 1.1 mrg if (USE_MOVQ ((u >> 16) | (u << 16))) 2968 1.1 mrg return SWAP; 2969 1.1 mrg 2970 1.1 mrg if (TARGET_ISAB) 2971 1.1 mrg { 2972 1.1 mrg /* Try using MVZ/MVS with an immediate value to load constants. */ 2973 1.1 mrg if (i >= 0 && i <= 65535) 2974 1.1 mrg return MVZ; 2975 1.1 mrg if (i >= -32768 && i <= 32767) 2976 1.1 mrg return MVS; 2977 1.1 mrg } 2978 1.1 mrg 2979 1.1 mrg /* Otherwise, use move.l */ 2980 1.1 mrg return MOVL; 2981 1.1 mrg } 2982 1.1 mrg 2983 1.1 mrg /* Return the cost of moving constant I into a data register. */ 2984 1.1 mrg 2985 1.1 mrg static int 2986 1.1 mrg const_int_cost (HOST_WIDE_INT i) 2987 1.1 mrg { 2988 1.1 mrg switch (m68k_const_method (i)) 2989 1.1 mrg { 2990 1.1 mrg case MOVQ: 2991 1.1 mrg /* Constants between -128 and 127 are cheap due to moveq. */ 2992 1.1 mrg return 0; 2993 1.1 mrg case MVZ: 2994 1.1 mrg case MVS: 2995 1.1 mrg case NOTB: 2996 1.1 mrg case NOTW: 2997 1.1 mrg case NEGW: 2998 1.1 mrg case SWAP: 2999 1.1 mrg /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */ 3000 1.1 mrg return 1; 3001 1.1 mrg case MOVL: 3002 1.1 mrg return 2; 3003 1.1 mrg default: 3004 1.1 mrg gcc_unreachable (); 3005 1.1 mrg } 3006 1.1 mrg } 3007 1.1 mrg 3008 1.1 mrg static bool 3009 1.1 mrg m68k_rtx_costs (rtx x, machine_mode mode, int outer_code, 3010 1.1 mrg int opno ATTRIBUTE_UNUSED, 3011 1.1 mrg int *total, bool speed ATTRIBUTE_UNUSED) 3012 1.1 mrg { 3013 1.1 mrg int code = GET_CODE (x); 3014 1.1 mrg 3015 1.1 mrg switch (code) 3016 1.1 mrg { 3017 1.1 mrg case CONST_INT: 3018 1.1 mrg /* Constant zero is super cheap due to clr instruction. */ 3019 1.1 mrg if (x == const0_rtx) 3020 1.1 mrg *total = 0; 3021 1.1 mrg else 3022 1.1 mrg *total = const_int_cost (INTVAL (x)); 3023 1.1 mrg return true; 3024 1.1 mrg 3025 1.1 mrg case CONST: 3026 1.1 mrg case LABEL_REF: 3027 1.1 mrg case SYMBOL_REF: 3028 1.1 mrg *total = 3; 3029 1.1 mrg return true; 3030 1.1 mrg 3031 1.1 mrg case CONST_DOUBLE: 3032 1.1 mrg /* Make 0.0 cheaper than other floating constants to 3033 1.1 mrg encourage creating tstsf and tstdf insns. */ 3034 1.1 mrg if ((GET_RTX_CLASS (outer_code) == RTX_COMPARE 3035 1.1 mrg || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE) 3036 1.1 mrg && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode))) 3037 1.1 mrg *total = 4; 3038 1.1 mrg else 3039 1.1 mrg *total = 5; 3040 1.1 mrg return true; 3041 1.1 mrg 3042 1.1 mrg /* These are vaguely right for a 68020. */ 3043 1.1 mrg /* The costs for long multiply have been adjusted to work properly 3044 1.1 mrg in synth_mult on the 68020, relative to an average of the time 3045 1.1 mrg for add and the time for shift, taking away a little more because 3046 1.1 mrg sometimes move insns are needed. */ 3047 1.1 mrg /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS 3048 1.1 mrg terms. */ 3049 1.1 mrg #define MULL_COST \ 3050 1.1 mrg (TUNE_68060 ? 2 \ 3051 1.1 mrg : TUNE_68040 ? 5 \ 3052 1.1 mrg : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ 3053 1.1 mrg : (TUNE_CFV2 && TUNE_MAC) ? 4 \ 3054 1.1 mrg : TUNE_CFV2 ? 8 \ 3055 1.1 mrg : TARGET_COLDFIRE ? 3 : 13) 3056 1.1 mrg 3057 1.1 mrg #define MULW_COST \ 3058 1.1 mrg (TUNE_68060 ? 2 \ 3059 1.1 mrg : TUNE_68040 ? 3 \ 3060 1.1 mrg : TUNE_68000_10 ? 5 \ 3061 1.1 mrg : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ 3062 1.1 mrg : (TUNE_CFV2 && TUNE_MAC) ? 2 \ 3063 1.1 mrg : TUNE_CFV2 ? 8 \ 3064 1.1 mrg : TARGET_COLDFIRE ? 2 : 8) 3065 1.1 mrg 3066 1.1 mrg #define DIVW_COST \ 3067 1.1 mrg (TARGET_CF_HWDIV ? 11 \ 3068 1.1 mrg : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27) 3069 1.1 mrg 3070 1.1 mrg case PLUS: 3071 1.1 mrg /* An lea costs about three times as much as a simple add. */ 3072 1.1 mrg if (mode == SImode 3073 1.1 mrg && GET_CODE (XEXP (x, 1)) == REG 3074 1.1 mrg && GET_CODE (XEXP (x, 0)) == MULT 3075 1.1 mrg && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG 3076 1.1 mrg && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT 3077 1.1 mrg && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2 3078 1.1 mrg || INTVAL (XEXP (XEXP (x, 0), 1)) == 4 3079 1.1 mrg || INTVAL (XEXP (XEXP (x, 0), 1)) == 8)) 3080 1.1 mrg { 3081 1.1 mrg /* lea an@(dx:l:i),am */ 3082 1.1 mrg *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3); 3083 1.1 mrg return true; 3084 1.1 mrg } 3085 1.1 mrg return false; 3086 1.1 mrg 3087 1.1 mrg case ASHIFT: 3088 1.1 mrg case ASHIFTRT: 3089 1.1 mrg case LSHIFTRT: 3090 1.1 mrg if (TUNE_68060) 3091 1.1 mrg { 3092 1.1 mrg *total = COSTS_N_INSNS(1); 3093 1.1 mrg return true; 3094 1.1 mrg } 3095 1.1 mrg if (TUNE_68000_10) 3096 1.1 mrg { 3097 1.1 mrg if (GET_CODE (XEXP (x, 1)) == CONST_INT) 3098 1.1 mrg { 3099 1.1 mrg if (INTVAL (XEXP (x, 1)) < 16) 3100 1.1 mrg *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2; 3101 1.1 mrg else 3102 1.1 mrg /* We're using clrw + swap for these cases. */ 3103 1.1 mrg *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2; 3104 1.1 mrg } 3105 1.1 mrg else 3106 1.1 mrg *total = COSTS_N_INSNS (10); /* Worst case. */ 3107 1.1 mrg return true; 3108 1.1 mrg } 3109 1.1 mrg /* A shift by a big integer takes an extra instruction. */ 3110 1.1 mrg if (GET_CODE (XEXP (x, 1)) == CONST_INT 3111 1.1 mrg && (INTVAL (XEXP (x, 1)) == 16)) 3112 1.1 mrg { 3113 1.1 mrg *total = COSTS_N_INSNS (2); /* clrw;swap */ 3114 1.1 mrg return true; 3115 1.1 mrg } 3116 1.1 mrg if (GET_CODE (XEXP (x, 1)) == CONST_INT 3117 1.1 mrg && !(INTVAL (XEXP (x, 1)) > 0 3118 1.1 mrg && INTVAL (XEXP (x, 1)) <= 8)) 3119 1.1 mrg { 3120 1.1 mrg *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */ 3121 1.1 mrg return true; 3122 1.1 mrg } 3123 1.1 mrg return false; 3124 1.1 mrg 3125 1.1 mrg case MULT: 3126 1.1 mrg if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND 3127 1.1 mrg || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) 3128 1.1 mrg && mode == SImode) 3129 1.1 mrg *total = COSTS_N_INSNS (MULW_COST); 3130 1.1 mrg else if (mode == QImode || mode == HImode) 3131 1.1 mrg *total = COSTS_N_INSNS (MULW_COST); 3132 1.1 mrg else 3133 1.1 mrg *total = COSTS_N_INSNS (MULL_COST); 3134 1.1 mrg return true; 3135 1.1 mrg 3136 1.1 mrg case DIV: 3137 1.1 mrg case UDIV: 3138 1.1 mrg case MOD: 3139 1.1 mrg case UMOD: 3140 1.1 mrg if (mode == QImode || mode == HImode) 3141 1.1 mrg *total = COSTS_N_INSNS (DIVW_COST); /* div.w */ 3142 1.1 mrg else if (TARGET_CF_HWDIV) 3143 1.1 mrg *total = COSTS_N_INSNS (18); 3144 1.1 mrg else 3145 1.1 mrg *total = COSTS_N_INSNS (43); /* div.l */ 3146 1.1 mrg return true; 3147 1.1 mrg 3148 1.1 mrg case ZERO_EXTRACT: 3149 1.1 mrg if (GET_RTX_CLASS (outer_code) == RTX_COMPARE 3150 1.1 mrg || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE) 3151 1.1 mrg *total = 0; 3152 1.1 mrg return false; 3153 1.1 mrg 3154 1.1 mrg default: 3155 1.1 mrg return false; 3156 1.1 mrg } 3157 1.1 mrg } 3158 1.1 mrg 3159 1.1 mrg /* Return an instruction to move CONST_INT OPERANDS[1] into data register 3160 1.1 mrg OPERANDS[0]. */ 3161 1.1 mrg 3162 1.1 mrg static const char * 3163 1.1 mrg output_move_const_into_data_reg (rtx *operands) 3164 1.1 mrg { 3165 1.1 mrg HOST_WIDE_INT i; 3166 1.1 mrg 3167 1.1 mrg i = INTVAL (operands[1]); 3168 1.1 mrg switch (m68k_const_method (i)) 3169 1.1 mrg { 3170 1.1 mrg case MVZ: 3171 1.1 mrg return "mvzw %1,%0"; 3172 1.1 mrg case MVS: 3173 1.1 mrg return "mvsw %1,%0"; 3174 1.1 mrg case MOVQ: 3175 1.1 mrg return "moveq %1,%0"; 3176 1.1 mrg case NOTB: 3177 1.1 mrg CC_STATUS_INIT; 3178 1.1 mrg operands[1] = GEN_INT (i ^ 0xff); 3179 1.1 mrg return "moveq %1,%0\n\tnot%.b %0"; 3180 1.1 mrg case NOTW: 3181 1.1 mrg CC_STATUS_INIT; 3182 1.1 mrg operands[1] = GEN_INT (i ^ 0xffff); 3183 1.1 mrg return "moveq %1,%0\n\tnot%.w %0"; 3184 1.1 mrg case NEGW: 3185 1.1 mrg CC_STATUS_INIT; 3186 1.1 mrg return "moveq #-128,%0\n\tneg%.w %0"; 3187 1.1 mrg case SWAP: 3188 1.1 mrg { 3189 1.1 mrg unsigned u = i; 3190 1.1 mrg 3191 1.1 mrg operands[1] = GEN_INT ((u << 16) | (u >> 16)); 3192 1.1 mrg return "moveq %1,%0\n\tswap %0"; 3193 1.1 mrg } 3194 1.1 mrg case MOVL: 3195 1.1 mrg return "move%.l %1,%0"; 3196 1.1 mrg default: 3197 1.1 mrg gcc_unreachable (); 3198 1.1 mrg } 3199 1.1 mrg } 3200 1.1 mrg 3201 1.1 mrg /* Return true if I can be handled by ISA B's mov3q instruction. */ 3202 1.1 mrg 3203 1.1 mrg bool 3204 1.1 mrg valid_mov3q_const (HOST_WIDE_INT i) 3205 1.1 mrg { 3206 1.1 mrg return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7)); 3207 1.1 mrg } 3208 1.1 mrg 3209 1.1 mrg /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0]. 3210 1.1 mrg I is the value of OPERANDS[1]. */ 3211 1.1 mrg 3212 1.1 mrg static const char * 3213 1.1 mrg output_move_simode_const (rtx *operands) 3214 1.1 mrg { 3215 1.1 mrg rtx dest; 3216 1.1 mrg HOST_WIDE_INT src; 3217 1.1 mrg 3218 1.1 mrg dest = operands[0]; 3219 1.1 mrg src = INTVAL (operands[1]); 3220 1.1 mrg if (src == 0 3221 1.1 mrg && (DATA_REG_P (dest) || MEM_P (dest)) 3222 1.1 mrg /* clr insns on 68000 read before writing. */ 3223 1.1 mrg && ((TARGET_68010 || TARGET_COLDFIRE) 3224 1.1 mrg || !(MEM_P (dest) && MEM_VOLATILE_P (dest)))) 3225 1.1 mrg return "clr%.l %0"; 3226 1.1 mrg else if (GET_MODE (dest) == SImode && valid_mov3q_const (src)) 3227 1.1 mrg return "mov3q%.l %1,%0"; 3228 1.1 mrg else if (src == 0 && ADDRESS_REG_P (dest)) 3229 1.1 mrg return "sub%.l %0,%0"; 3230 1.1 mrg else if (DATA_REG_P (dest)) 3231 1.1 mrg return output_move_const_into_data_reg (operands); 3232 1.1 mrg else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff)) 3233 1.1 mrg { 3234 1.1 mrg if (valid_mov3q_const (src)) 3235 1.1 mrg return "mov3q%.l %1,%0"; 3236 1.1 mrg return "move%.w %1,%0"; 3237 1.1 mrg } 3238 1.1 mrg else if (MEM_P (dest) 3239 1.1 mrg && GET_CODE (XEXP (dest, 0)) == PRE_DEC 3240 1.1 mrg && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM 3241 1.1 mrg && IN_RANGE (src, -0x8000, 0x7fff)) 3242 1.1 mrg { 3243 1.1 mrg if (valid_mov3q_const (src)) 3244 1.1 mrg return "mov3q%.l %1,%-"; 3245 1.1 mrg return "pea %a1"; 3246 1.1 mrg } 3247 1.1 mrg return "move%.l %1,%0"; 3248 1.1 mrg } 3249 1.1 mrg 3250 1.1 mrg const char * 3251 1.1 mrg output_move_simode (rtx *operands) 3252 1.1 mrg { 3253 1.1 mrg handle_flags_for_move (operands); 3254 1.1 mrg 3255 1.1 mrg if (GET_CODE (operands[1]) == CONST_INT) 3256 1.1 mrg return output_move_simode_const (operands); 3257 1.1 mrg else if ((GET_CODE (operands[1]) == SYMBOL_REF 3258 1.1 mrg || GET_CODE (operands[1]) == CONST) 3259 1.1 mrg && push_operand (operands[0], SImode)) 3260 1.1 mrg return "pea %a1"; 3261 1.1 mrg else if ((GET_CODE (operands[1]) == SYMBOL_REF 3262 1.1 mrg || GET_CODE (operands[1]) == CONST) 3263 1.1 mrg && ADDRESS_REG_P (operands[0])) 3264 1.1 mrg return "lea %a1,%0"; 3265 1.1 mrg return "move%.l %1,%0"; 3266 1.1 mrg } 3267 1.1 mrg 3268 1.1 mrg const char * 3269 1.1 mrg output_move_himode (rtx *operands) 3270 1.1 mrg { 3271 1.1 mrg if (GET_CODE (operands[1]) == CONST_INT) 3272 1.1 mrg { 3273 1.1 mrg if (operands[1] == const0_rtx 3274 1.1 mrg && (DATA_REG_P (operands[0]) 3275 1.1 mrg || GET_CODE (operands[0]) == MEM) 3276 1.1 mrg /* clr insns on 68000 read before writing. */ 3277 1.1 mrg && ((TARGET_68010 || TARGET_COLDFIRE) 3278 1.1 mrg || !(GET_CODE (operands[0]) == MEM 3279 1.1 mrg && MEM_VOLATILE_P (operands[0])))) 3280 1.1 mrg return "clr%.w %0"; 3281 1.1 mrg else if (operands[1] == const0_rtx 3282 1.1 mrg && ADDRESS_REG_P (operands[0])) 3283 1.1 mrg return "sub%.l %0,%0"; 3284 1.1 mrg else if (DATA_REG_P (operands[0]) 3285 1.1 mrg && INTVAL (operands[1]) < 128 3286 1.1 mrg && INTVAL (operands[1]) >= -128) 3287 1.1 mrg return "moveq %1,%0"; 3288 1.1 mrg else if (INTVAL (operands[1]) < 0x8000 3289 1.1 mrg && INTVAL (operands[1]) >= -0x8000) 3290 1.1 mrg return "move%.w %1,%0"; 3291 1.1 mrg } 3292 1.1 mrg else if (CONSTANT_P (operands[1])) 3293 1.1 mrg gcc_unreachable (); 3294 1.1 mrg return "move%.w %1,%0"; 3295 1.1 mrg } 3296 1.1 mrg 3297 1.1 mrg const char * 3298 1.1 mrg output_move_qimode (rtx *operands) 3299 1.1 mrg { 3300 1.1 mrg handle_flags_for_move (operands); 3301 1.1 mrg 3302 1.1 mrg /* 68k family always modifies the stack pointer by at least 2, even for 3303 1.1 mrg byte pushes. The 5200 (ColdFire) does not do this. */ 3304 1.1 mrg 3305 1.1 mrg /* This case is generated by pushqi1 pattern now. */ 3306 1.1 mrg gcc_assert (!(GET_CODE (operands[0]) == MEM 3307 1.1 mrg && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC 3308 1.1 mrg && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx 3309 1.1 mrg && ! ADDRESS_REG_P (operands[1]) 3310 1.1 mrg && ! TARGET_COLDFIRE)); 3311 1.1 mrg 3312 1.1 mrg /* clr and st insns on 68000 read before writing. */ 3313 1.1 mrg if (!ADDRESS_REG_P (operands[0]) 3314 1.1 mrg && ((TARGET_68010 || TARGET_COLDFIRE) 3315 1.1 mrg || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) 3316 1.1 mrg { 3317 1.1 mrg if (operands[1] == const0_rtx) 3318 1.1 mrg return "clr%.b %0"; 3319 1.1 mrg if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0])) 3320 1.1 mrg && GET_CODE (operands[1]) == CONST_INT 3321 1.1 mrg && (INTVAL (operands[1]) & 255) == 255) 3322 1.1 mrg { 3323 1.1 mrg CC_STATUS_INIT; 3324 1.1 mrg return "st %0"; 3325 1.1 mrg } 3326 1.1 mrg } 3327 1.1 mrg if (GET_CODE (operands[1]) == CONST_INT 3328 1.1 mrg && DATA_REG_P (operands[0]) 3329 1.1 mrg && INTVAL (operands[1]) < 128 3330 1.1 mrg && INTVAL (operands[1]) >= -128) 3331 1.1 mrg return "moveq %1,%0"; 3332 1.1 mrg if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0])) 3333 1.1 mrg return "sub%.l %0,%0"; 3334 1.1 mrg if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1])) 3335 1.1 mrg gcc_unreachable (); 3336 1.1 mrg /* 68k family (including the 5200 ColdFire) does not support byte moves to 3337 1.1 mrg from address registers. */ 3338 1.1 mrg if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1])) 3339 1.1 mrg { 3340 1.1 mrg if (ADDRESS_REG_P (operands[1])) 3341 1.1 mrg CC_STATUS_INIT; 3342 1.1 mrg return "move%.w %1,%0"; 3343 1.1 mrg } 3344 1.1 mrg return "move%.b %1,%0"; 3345 1.1 mrg } 3346 1.1 mrg 3347 1.1 mrg const char * 3348 1.1 mrg output_move_stricthi (rtx *operands) 3349 1.1 mrg { 3350 1.1 mrg if (operands[1] == const0_rtx 3351 1.1 mrg /* clr insns on 68000 read before writing. */ 3352 1.1 mrg && ((TARGET_68010 || TARGET_COLDFIRE) 3353 1.1 mrg || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) 3354 1.1 mrg return "clr%.w %0"; 3355 1.1 mrg return "move%.w %1,%0"; 3356 1.1 mrg } 3357 1.1 mrg 3358 1.1 mrg const char * 3359 1.1 mrg output_move_strictqi (rtx *operands) 3360 1.1 mrg { 3361 1.1 mrg if (operands[1] == const0_rtx 3362 1.1 mrg /* clr insns on 68000 read before writing. */ 3363 1.1 mrg && ((TARGET_68010 || TARGET_COLDFIRE) 3364 1.1 mrg || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) 3365 1.1 mrg return "clr%.b %0"; 3366 1.1 mrg return "move%.b %1,%0"; 3367 1.1 mrg } 3368 1.1 mrg 3369 1.1 mrg /* Return the best assembler insn template 3370 1.1 mrg for moving operands[1] into operands[0] as a fullword. */ 3371 1.1 mrg 3372 1.1 mrg static const char * 3373 1.1 mrg singlemove_string (rtx *operands) 3374 1.1 mrg { 3375 1.1 mrg if (GET_CODE (operands[1]) == CONST_INT) 3376 1.1 mrg return output_move_simode_const (operands); 3377 1.1 mrg return "move%.l %1,%0"; 3378 1.1 mrg } 3379 1.1 mrg 3380 1.1 mrg 3381 1.1 mrg /* Output assembler or rtl code to perform a doubleword move insn 3382 1.1 mrg with operands OPERANDS. 3383 1.1 mrg Pointers to 3 helper functions should be specified: 3384 1.1 mrg HANDLE_REG_ADJUST to adjust a register by a small value, 3385 1.1 mrg HANDLE_COMPADR to compute an address and 3386 1.1 mrg HANDLE_MOVSI to move 4 bytes. */ 3387 1.1 mrg 3388 1.1 mrg static void 3389 1.1 mrg handle_move_double (rtx operands[2], 3390 1.1 mrg void (*handle_reg_adjust) (rtx, int), 3391 1.1 mrg void (*handle_compadr) (rtx [2]), 3392 1.1 mrg void (*handle_movsi) (rtx [2])) 3393 1.1 mrg { 3394 1.1 mrg enum 3395 1.1 mrg { 3396 1.1 mrg REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP 3397 1.1 mrg } optype0, optype1; 3398 1.1 mrg rtx latehalf[2]; 3399 1.1 mrg rtx middlehalf[2]; 3400 1.1 mrg rtx xops[2]; 3401 1.1 mrg rtx addreg0 = 0, addreg1 = 0; 3402 1.1 mrg int dest_overlapped_low = 0; 3403 1.1 mrg int size = GET_MODE_SIZE (GET_MODE (operands[0])); 3404 1.1 mrg 3405 1.1 mrg middlehalf[0] = 0; 3406 1.1 mrg middlehalf[1] = 0; 3407 1.1 mrg 3408 1.1 mrg /* First classify both operands. */ 3409 1.1 mrg 3410 1.1 mrg if (REG_P (operands[0])) 3411 1.1 mrg optype0 = REGOP; 3412 1.1 mrg else if (offsettable_memref_p (operands[0])) 3413 1.1 mrg optype0 = OFFSOP; 3414 1.1 mrg else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) 3415 1.1 mrg optype0 = POPOP; 3416 1.1 mrg else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) 3417 1.1 mrg optype0 = PUSHOP; 3418 1.1 mrg else if (GET_CODE (operands[0]) == MEM) 3419 1.1 mrg optype0 = MEMOP; 3420 1.1 mrg else 3421 1.1 mrg optype0 = RNDOP; 3422 1.1 mrg 3423 1.1 mrg if (REG_P (operands[1])) 3424 1.1 mrg optype1 = REGOP; 3425 1.1 mrg else if (CONSTANT_P (operands[1])) 3426 1.1 mrg optype1 = CNSTOP; 3427 1.1 mrg else if (offsettable_memref_p (operands[1])) 3428 1.1 mrg optype1 = OFFSOP; 3429 1.1 mrg else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC) 3430 1.1 mrg optype1 = POPOP; 3431 1.1 mrg else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC) 3432 1.1 mrg optype1 = PUSHOP; 3433 1.1 mrg else if (GET_CODE (operands[1]) == MEM) 3434 1.1 mrg optype1 = MEMOP; 3435 1.1 mrg else 3436 1.1 mrg optype1 = RNDOP; 3437 1.1 mrg 3438 1.1 mrg /* Check for the cases that the operand constraints are not supposed 3439 1.1 mrg to allow to happen. Generating code for these cases is 3440 1.1 mrg painful. */ 3441 1.1 mrg gcc_assert (optype0 != RNDOP && optype1 != RNDOP); 3442 1.1 mrg 3443 1.1 mrg /* If one operand is decrementing and one is incrementing 3444 1.1 mrg decrement the former register explicitly 3445 1.1 mrg and change that operand into ordinary indexing. */ 3446 1.1 mrg 3447 1.1 mrg if (optype0 == PUSHOP && optype1 == POPOP) 3448 1.1 mrg { 3449 1.1 mrg operands[0] = XEXP (XEXP (operands[0], 0), 0); 3450 1.1 mrg 3451 1.1 mrg handle_reg_adjust (operands[0], -size); 3452 1.1 mrg 3453 1.1 mrg if (GET_MODE (operands[1]) == XFmode) 3454 1.1 mrg operands[0] = gen_rtx_MEM (XFmode, operands[0]); 3455 1.1 mrg else if (GET_MODE (operands[0]) == DFmode) 3456 1.1 mrg operands[0] = gen_rtx_MEM (DFmode, operands[0]); 3457 1.1 mrg else 3458 1.1 mrg operands[0] = gen_rtx_MEM (DImode, operands[0]); 3459 1.1 mrg optype0 = OFFSOP; 3460 1.1 mrg } 3461 1.1 mrg if (optype0 == POPOP && optype1 == PUSHOP) 3462 1.1 mrg { 3463 1.1 mrg operands[1] = XEXP (XEXP (operands[1], 0), 0); 3464 1.1 mrg 3465 1.1 mrg handle_reg_adjust (operands[1], -size); 3466 1.1 mrg 3467 1.1 mrg if (GET_MODE (operands[1]) == XFmode) 3468 1.1 mrg operands[1] = gen_rtx_MEM (XFmode, operands[1]); 3469 1.1 mrg else if (GET_MODE (operands[1]) == DFmode) 3470 1.1 mrg operands[1] = gen_rtx_MEM (DFmode, operands[1]); 3471 1.1 mrg else 3472 1.1 mrg operands[1] = gen_rtx_MEM (DImode, operands[1]); 3473 1.1 mrg optype1 = OFFSOP; 3474 1.1 mrg } 3475 1.1 mrg 3476 1.1 mrg /* If an operand is an unoffsettable memory ref, find a register 3477 1.1 mrg we can increment temporarily to make it refer to the second word. */ 3478 1.1 mrg 3479 1.1 mrg if (optype0 == MEMOP) 3480 1.1 mrg addreg0 = find_addr_reg (XEXP (operands[0], 0)); 3481 1.1 mrg 3482 1.1 mrg if (optype1 == MEMOP) 3483 1.1 mrg addreg1 = find_addr_reg (XEXP (operands[1], 0)); 3484 1.1 mrg 3485 1.1 mrg /* Ok, we can do one word at a time. 3486 1.1 mrg Normally we do the low-numbered word first, 3487 1.1 mrg but if either operand is autodecrementing then we 3488 1.1 mrg do the high-numbered word first. 3489 1.1 mrg 3490 1.1 mrg In either case, set up in LATEHALF the operands to use 3491 1.1 mrg for the high-numbered word and in some cases alter the 3492 1.1 mrg operands in OPERANDS to be suitable for the low-numbered word. */ 3493 1.1 mrg 3494 1.1 mrg if (size == 12) 3495 1.1 mrg { 3496 1.1 mrg if (optype0 == REGOP) 3497 1.1 mrg { 3498 1.1 mrg latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2); 3499 1.1 mrg middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); 3500 1.1 mrg } 3501 1.1 mrg else if (optype0 == OFFSOP) 3502 1.1 mrg { 3503 1.1 mrg middlehalf[0] = adjust_address (operands[0], SImode, 4); 3504 1.1 mrg latehalf[0] = adjust_address (operands[0], SImode, size - 4); 3505 1.1 mrg } 3506 1.1 mrg else 3507 1.1 mrg { 3508 1.1 mrg middlehalf[0] = adjust_address (operands[0], SImode, 0); 3509 1.1 mrg latehalf[0] = adjust_address (operands[0], SImode, 0); 3510 1.1 mrg } 3511 1.1 mrg 3512 1.1 mrg if (optype1 == REGOP) 3513 1.1 mrg { 3514 1.1 mrg latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2); 3515 1.1 mrg middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); 3516 1.1 mrg } 3517 1.1 mrg else if (optype1 == OFFSOP) 3518 1.1 mrg { 3519 1.1 mrg middlehalf[1] = adjust_address (operands[1], SImode, 4); 3520 1.1 mrg latehalf[1] = adjust_address (operands[1], SImode, size - 4); 3521 1.1 mrg } 3522 1.1 mrg else if (optype1 == CNSTOP) 3523 1.1 mrg { 3524 1.1 mrg if (GET_CODE (operands[1]) == CONST_DOUBLE) 3525 1.1 mrg { 3526 1.1 mrg long l[3]; 3527 1.1 mrg 3528 1.1 mrg REAL_VALUE_TO_TARGET_LONG_DOUBLE 3529 1.1 mrg (*CONST_DOUBLE_REAL_VALUE (operands[1]), l); 3530 1.1 mrg operands[1] = GEN_INT (l[0]); 3531 1.1 mrg middlehalf[1] = GEN_INT (l[1]); 3532 1.1 mrg latehalf[1] = GEN_INT (l[2]); 3533 1.1 mrg } 3534 1.1 mrg else 3535 1.1 mrg { 3536 1.1 mrg /* No non-CONST_DOUBLE constant should ever appear 3537 1.1 mrg here. */ 3538 1.1 mrg gcc_assert (!CONSTANT_P (operands[1])); 3539 1.1 mrg } 3540 1.1 mrg } 3541 1.1 mrg else 3542 1.1 mrg { 3543 1.1 mrg middlehalf[1] = adjust_address (operands[1], SImode, 0); 3544 1.1 mrg latehalf[1] = adjust_address (operands[1], SImode, 0); 3545 1.1 mrg } 3546 1.1 mrg } 3547 1.1 mrg else 3548 1.1 mrg /* size is not 12: */ 3549 1.1 mrg { 3550 1.1 mrg if (optype0 == REGOP) 3551 1.1 mrg latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); 3552 1.1 mrg else if (optype0 == OFFSOP) 3553 1.1 mrg latehalf[0] = adjust_address (operands[0], SImode, size - 4); 3554 1.1 mrg else 3555 1.1 mrg latehalf[0] = adjust_address (operands[0], SImode, 0); 3556 1.1 mrg 3557 1.1 mrg if (optype1 == REGOP) 3558 1.1 mrg latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); 3559 1.1 mrg else if (optype1 == OFFSOP) 3560 1.1 mrg latehalf[1] = adjust_address (operands[1], SImode, size - 4); 3561 1.1 mrg else if (optype1 == CNSTOP) 3562 1.1 mrg split_double (operands[1], &operands[1], &latehalf[1]); 3563 1.1 mrg else 3564 1.1 mrg latehalf[1] = adjust_address (operands[1], SImode, 0); 3565 1.1 mrg } 3566 1.1 mrg 3567 1.1 mrg /* If insn is effectively movd N(REG),-(REG) then we will do the high 3568 1.1 mrg word first. We should use the adjusted operand 1 (which is N+4(REG)) 3569 1.1 mrg for the low word as well, to compensate for the first decrement of 3570 1.1 mrg REG. */ 3571 1.1 mrg if (optype0 == PUSHOP 3572 1.1 mrg && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1])) 3573 1.1 mrg operands[1] = middlehalf[1] = latehalf[1]; 3574 1.1 mrg 3575 1.1 mrg /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)), 3576 1.1 mrg if the upper part of reg N does not appear in the MEM, arrange to 3577 1.1 mrg emit the move late-half first. Otherwise, compute the MEM address 3578 1.1 mrg into the upper part of N and use that as a pointer to the memory 3579 1.1 mrg operand. */ 3580 1.1 mrg if (optype0 == REGOP 3581 1.1 mrg && (optype1 == OFFSOP || optype1 == MEMOP)) 3582 1.1 mrg { 3583 1.1 mrg rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0])); 3584 1.1 mrg 3585 1.1 mrg if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)) 3586 1.1 mrg && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0))) 3587 1.1 mrg { 3588 1.1 mrg /* If both halves of dest are used in the src memory address, 3589 1.1 mrg compute the address into latehalf of dest. 3590 1.1 mrg Note that this can't happen if the dest is two data regs. */ 3591 1.1 mrg compadr: 3592 1.1 mrg xops[0] = latehalf[0]; 3593 1.1 mrg xops[1] = XEXP (operands[1], 0); 3594 1.1 mrg 3595 1.1 mrg handle_compadr (xops); 3596 1.1 mrg if (GET_MODE (operands[1]) == XFmode) 3597 1.1 mrg { 3598 1.1 mrg operands[1] = gen_rtx_MEM (XFmode, latehalf[0]); 3599 1.1 mrg middlehalf[1] = adjust_address (operands[1], DImode, size - 8); 3600 1.1 mrg latehalf[1] = adjust_address (operands[1], DImode, size - 4); 3601 1.1 mrg } 3602 1.1 mrg else 3603 1.1 mrg { 3604 1.1 mrg operands[1] = gen_rtx_MEM (DImode, latehalf[0]); 3605 1.1 mrg latehalf[1] = adjust_address (operands[1], DImode, size - 4); 3606 1.1 mrg } 3607 1.1 mrg } 3608 1.1 mrg else if (size == 12 3609 1.1 mrg && reg_overlap_mentioned_p (middlehalf[0], 3610 1.1 mrg XEXP (operands[1], 0))) 3611 1.1 mrg { 3612 1.1 mrg /* Check for two regs used by both source and dest. 3613 1.1 mrg Note that this can't happen if the dest is all data regs. 3614 1.1 mrg It can happen if the dest is d6, d7, a0. 3615 1.1 mrg But in that case, latehalf is an addr reg, so 3616 1.1 mrg the code at compadr does ok. */ 3617 1.1 mrg 3618 1.1 mrg if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)) 3619 1.1 mrg || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0))) 3620 1.1 mrg goto compadr; 3621 1.1 mrg 3622 1.1 mrg /* JRV says this can't happen: */ 3623 1.1 mrg gcc_assert (!addreg0 && !addreg1); 3624 1.1 mrg 3625 1.1 mrg /* Only the middle reg conflicts; simply put it last. */ 3626 1.1 mrg handle_movsi (operands); 3627 1.1 mrg handle_movsi (latehalf); 3628 1.1 mrg handle_movsi (middlehalf); 3629 1.1 mrg 3630 1.1 mrg return; 3631 1.1 mrg } 3632 1.1 mrg else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))) 3633 1.1 mrg /* If the low half of dest is mentioned in the source memory 3634 1.1 mrg address, the arrange to emit the move late half first. */ 3635 1.1 mrg dest_overlapped_low = 1; 3636 1.1 mrg } 3637 1.1 mrg 3638 1.1 mrg /* If one or both operands autodecrementing, 3639 1.1 mrg do the two words, high-numbered first. */ 3640 1.1 mrg 3641 1.1 mrg /* Likewise, the first move would clobber the source of the second one, 3642 1.1 mrg do them in the other order. This happens only for registers; 3643 1.1 mrg such overlap can't happen in memory unless the user explicitly 3644 1.1 mrg sets it up, and that is an undefined circumstance. */ 3645 1.1 mrg 3646 1.1 mrg if (optype0 == PUSHOP || optype1 == PUSHOP 3647 1.1 mrg || (optype0 == REGOP && optype1 == REGOP 3648 1.1 mrg && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1])) 3649 1.1 mrg || REGNO (operands[0]) == REGNO (latehalf[1]))) 3650 1.1 mrg || dest_overlapped_low) 3651 1.1 mrg { 3652 1.1 mrg /* Make any unoffsettable addresses point at high-numbered word. */ 3653 1.1 mrg if (addreg0) 3654 1.1 mrg handle_reg_adjust (addreg0, size - 4); 3655 1.1 mrg if (addreg1) 3656 1.1 mrg handle_reg_adjust (addreg1, size - 4); 3657 1.1 mrg 3658 1.1 mrg /* Do that word. */ 3659 1.1 mrg handle_movsi (latehalf); 3660 1.1 mrg 3661 1.1 mrg /* Undo the adds we just did. */ 3662 1.1 mrg if (addreg0) 3663 1.1 mrg handle_reg_adjust (addreg0, -4); 3664 1.1 mrg if (addreg1) 3665 1.1 mrg handle_reg_adjust (addreg1, -4); 3666 1.1 mrg 3667 1.1 mrg if (size == 12) 3668 1.1 mrg { 3669 1.1 mrg handle_movsi (middlehalf); 3670 1.1 mrg 3671 1.1 mrg if (addreg0) 3672 1.1 mrg handle_reg_adjust (addreg0, -4); 3673 1.1 mrg if (addreg1) 3674 1.1 mrg handle_reg_adjust (addreg1, -4); 3675 1.1 mrg } 3676 1.1 mrg 3677 1.1 mrg /* Do low-numbered word. */ 3678 1.1 mrg 3679 1.1 mrg handle_movsi (operands); 3680 1.1 mrg return; 3681 1.1 mrg } 3682 1.1 mrg 3683 1.1 mrg /* Normal case: do the two words, low-numbered first. */ 3684 1.1 mrg 3685 1.1 mrg handle_movsi (operands); 3686 1.1 mrg 3687 1.1 mrg /* Do the middle one of the three words for long double */ 3688 1.1 mrg if (size == 12) 3689 1.1 mrg { 3690 1.1 mrg if (addreg0) 3691 1.1 mrg handle_reg_adjust (addreg0, 4); 3692 1.1 mrg if (addreg1) 3693 1.1 mrg handle_reg_adjust (addreg1, 4); 3694 1.1 mrg 3695 1.1 mrg handle_movsi (middlehalf); 3696 1.1 mrg } 3697 1.1 mrg 3698 1.1 mrg /* Make any unoffsettable addresses point at high-numbered word. */ 3699 1.1 mrg if (addreg0) 3700 1.1 mrg handle_reg_adjust (addreg0, 4); 3701 1.1 mrg if (addreg1) 3702 1.1 mrg handle_reg_adjust (addreg1, 4); 3703 1.1 mrg 3704 1.1 mrg /* Do that word. */ 3705 1.1 mrg handle_movsi (latehalf); 3706 1.1 mrg 3707 1.1 mrg /* Undo the adds we just did. */ 3708 1.1 mrg if (addreg0) 3709 1.1 mrg handle_reg_adjust (addreg0, -(size - 4)); 3710 1.1 mrg if (addreg1) 3711 1.1 mrg handle_reg_adjust (addreg1, -(size - 4)); 3712 1.1 mrg 3713 1.1 mrg return; 3714 1.1 mrg } 3715 1.1 mrg 3716 1.1 mrg /* Output assembler code to adjust REG by N. */ 3717 1.1 mrg static void 3718 1.1 mrg output_reg_adjust (rtx reg, int n) 3719 1.1 mrg { 3720 1.1 mrg const char *s; 3721 1.1 mrg 3722 1.1 mrg gcc_assert (GET_MODE (reg) == SImode && n >= -12 && n != 0 && n <= 12); 3723 1.1 mrg 3724 1.1 mrg switch (n) 3725 1.1 mrg { 3726 1.1 mrg case 12: 3727 1.1 mrg s = "add%.l #12,%0"; 3728 1.1 mrg break; 3729 1.1 mrg 3730 1.1 mrg case 8: 3731 1.1 mrg s = "addq%.l #8,%0"; 3732 1.1 mrg break; 3733 1.1 mrg 3734 1.1 mrg case 4: 3735 1.1 mrg s = "addq%.l #4,%0"; 3736 1.1 mrg break; 3737 1.1 mrg 3738 1.1 mrg case -12: 3739 1.1 mrg s = "sub%.l #12,%0"; 3740 1.1 mrg break; 3741 1.1 mrg 3742 1.1 mrg case -8: 3743 1.1 mrg s = "subq%.l #8,%0"; 3744 1.1 mrg break; 3745 1.1 mrg 3746 1.1 mrg case -4: 3747 1.1 mrg s = "subq%.l #4,%0"; 3748 1.1 mrg break; 3749 1.1 mrg 3750 1.1 mrg default: 3751 1.1 mrg gcc_unreachable (); 3752 1.1 mrg s = NULL; 3753 1.1 mrg } 3754 1.1 mrg 3755 1.1 mrg output_asm_insn (s, ®); 3756 1.1 mrg } 3757 1.1 mrg 3758 1.1 mrg /* Emit rtl code to adjust REG by N. */ 3759 1.1 mrg static void 3760 1.1 mrg emit_reg_adjust (rtx reg1, int n) 3761 1.1 mrg { 3762 1.1 mrg rtx reg2; 3763 1.1 mrg 3764 1.1 mrg gcc_assert (GET_MODE (reg1) == SImode && n >= -12 && n != 0 && n <= 12); 3765 1.1 mrg 3766 1.1 mrg reg1 = copy_rtx (reg1); 3767 1.1 mrg reg2 = copy_rtx (reg1); 3768 1.1 mrg 3769 1.1 mrg if (n < 0) 3770 1.1 mrg emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n))); 3771 1.1 mrg else if (n > 0) 3772 1.1 mrg emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n))); 3773 1.1 mrg else 3774 1.1 mrg gcc_unreachable (); 3775 1.1 mrg } 3776 1.1 mrg 3777 1.1 mrg /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */ 3778 1.1 mrg static void 3779 1.1 mrg output_compadr (rtx operands[2]) 3780 1.1 mrg { 3781 1.1 mrg output_asm_insn ("lea %a1,%0", operands); 3782 1.1 mrg } 3783 1.1 mrg 3784 1.1 mrg /* Output the best assembler insn for moving operands[1] into operands[0] 3785 1.1 mrg as a fullword. */ 3786 1.1 mrg static void 3787 1.1 mrg output_movsi (rtx operands[2]) 3788 1.1 mrg { 3789 1.1 mrg output_asm_insn (singlemove_string (operands), operands); 3790 1.1 mrg } 3791 1.1 mrg 3792 1.1 mrg /* Copy OP and change its mode to MODE. */ 3793 1.1 mrg static rtx 3794 1.1 mrg copy_operand (rtx op, machine_mode mode) 3795 1.1 mrg { 3796 1.1 mrg /* ??? This looks really ugly. There must be a better way 3797 1.1 mrg to change a mode on the operand. */ 3798 1.1 mrg if (GET_MODE (op) != VOIDmode) 3799 1.1 mrg { 3800 1.1 mrg if (REG_P (op)) 3801 1.1 mrg op = gen_rtx_REG (mode, REGNO (op)); 3802 1.1 mrg else 3803 1.1 mrg { 3804 1.1 mrg op = copy_rtx (op); 3805 1.1 mrg PUT_MODE (op, mode); 3806 1.1 mrg } 3807 1.1 mrg } 3808 1.1 mrg 3809 1.1 mrg return op; 3810 1.1 mrg } 3811 1.1 mrg 3812 1.1 mrg /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */ 3813 1.1 mrg static void 3814 1.1 mrg emit_movsi (rtx operands[2]) 3815 1.1 mrg { 3816 1.1 mrg operands[0] = copy_operand (operands[0], SImode); 3817 1.1 mrg operands[1] = copy_operand (operands[1], SImode); 3818 1.1 mrg 3819 1.1 mrg emit_insn (gen_movsi (operands[0], operands[1])); 3820 1.1 mrg } 3821 1.1 mrg 3822 1.1 mrg /* Output assembler code to perform a doubleword move insn 3823 1.1 mrg with operands OPERANDS. */ 3824 1.1 mrg const char * 3825 1.1 mrg output_move_double (rtx *operands) 3826 1.1 mrg { 3827 1.1 mrg handle_move_double (operands, 3828 1.1 mrg output_reg_adjust, output_compadr, output_movsi); 3829 1.1 mrg 3830 1.1 mrg return ""; 3831 1.1 mrg } 3832 1.1 mrg 3833 1.1 mrg /* Output rtl code to perform a doubleword move insn 3834 1.1 mrg with operands OPERANDS. */ 3835 1.1 mrg void 3836 1.1 mrg m68k_emit_move_double (rtx operands[2]) 3837 1.1 mrg { 3838 1.1 mrg handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi); 3839 1.1 mrg } 3840 1.1 mrg 3841 1.1 mrg /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a 3842 1.1 mrg new rtx with the correct mode. */ 3843 1.1 mrg 3844 1.1 mrg static rtx 3845 1.1 mrg force_mode (machine_mode mode, rtx orig) 3846 1.1 mrg { 3847 1.1 mrg if (mode == GET_MODE (orig)) 3848 1.1 mrg return orig; 3849 1.1 mrg 3850 1.1 mrg if (REGNO (orig) >= FIRST_PSEUDO_REGISTER) 3851 1.1 mrg abort (); 3852 1.1 mrg 3853 1.1 mrg return gen_rtx_REG (mode, REGNO (orig)); 3854 1.1 mrg } 3855 1.1 mrg 3856 1.1 mrg static int 3857 1.1 mrg fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED) 3858 1.1 mrg { 3859 1.1 mrg return reg_renumber && FP_REG_P (op); 3860 1.1 mrg } 3861 1.1 mrg 3862 1.1 mrg /* Emit insns to move operands[1] into operands[0]. 3863 1.1 mrg 3864 1.1 mrg Return 1 if we have written out everything that needs to be done to 3865 1.1 mrg do the move. Otherwise, return 0 and the caller will emit the move 3866 1.1 mrg normally. 3867 1.1 mrg 3868 1.1 mrg Note SCRATCH_REG may not be in the proper mode depending on how it 3869 1.1 mrg will be used. This routine is responsible for creating a new copy 3870 1.1 mrg of SCRATCH_REG in the proper mode. */ 3871 1.1 mrg 3872 1.1 mrg int 3873 1.1 mrg emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg) 3874 1.1 mrg { 3875 1.1 mrg rtx operand0 = operands[0]; 3876 1.1 mrg rtx operand1 = operands[1]; 3877 1.1 mrg rtx tem; 3878 1.1 mrg 3879 1.1 mrg if (scratch_reg 3880 1.1 mrg && reload_in_progress && GET_CODE (operand0) == REG 3881 1.1 mrg && REGNO (operand0) >= FIRST_PSEUDO_REGISTER) 3882 1.1 mrg operand0 = reg_equiv_mem (REGNO (operand0)); 3883 1.1 mrg else if (scratch_reg 3884 1.1 mrg && reload_in_progress && GET_CODE (operand0) == SUBREG 3885 1.1 mrg && GET_CODE (SUBREG_REG (operand0)) == REG 3886 1.1 mrg && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER) 3887 1.1 mrg { 3888 1.1 mrg /* We must not alter SUBREG_BYTE (operand0) since that would confuse 3889 1.1 mrg the code which tracks sets/uses for delete_output_reload. */ 3890 1.1 mrg rtx temp = gen_rtx_SUBREG (GET_MODE (operand0), 3891 1.1 mrg reg_equiv_mem (REGNO (SUBREG_REG (operand0))), 3892 1.1 mrg SUBREG_BYTE (operand0)); 3893 1.1 mrg operand0 = alter_subreg (&temp, true); 3894 1.1 mrg } 3895 1.1 mrg 3896 1.1 mrg if (scratch_reg 3897 1.1 mrg && reload_in_progress && GET_CODE (operand1) == REG 3898 1.1 mrg && REGNO (operand1) >= FIRST_PSEUDO_REGISTER) 3899 1.1 mrg operand1 = reg_equiv_mem (REGNO (operand1)); 3900 1.1 mrg else if (scratch_reg 3901 1.1 mrg && reload_in_progress && GET_CODE (operand1) == SUBREG 3902 1.1 mrg && GET_CODE (SUBREG_REG (operand1)) == REG 3903 1.1 mrg && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER) 3904 1.1 mrg { 3905 1.1 mrg /* We must not alter SUBREG_BYTE (operand0) since that would confuse 3906 1.1 mrg the code which tracks sets/uses for delete_output_reload. */ 3907 1.1 mrg rtx temp = gen_rtx_SUBREG (GET_MODE (operand1), 3908 1.1 mrg reg_equiv_mem (REGNO (SUBREG_REG (operand1))), 3909 1.1 mrg SUBREG_BYTE (operand1)); 3910 1.1 mrg operand1 = alter_subreg (&temp, true); 3911 1.1 mrg } 3912 1.1 mrg 3913 1.1 mrg if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM 3914 1.1 mrg && ((tem = find_replacement (&XEXP (operand0, 0))) 3915 1.1 mrg != XEXP (operand0, 0))) 3916 1.1 mrg operand0 = gen_rtx_MEM (GET_MODE (operand0), tem); 3917 1.1 mrg if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM 3918 1.1 mrg && ((tem = find_replacement (&XEXP (operand1, 0))) 3919 1.1 mrg != XEXP (operand1, 0))) 3920 1.1 mrg operand1 = gen_rtx_MEM (GET_MODE (operand1), tem); 3921 1.1 mrg 3922 1.1 mrg /* Handle secondary reloads for loads/stores of FP registers where 3923 1.1 mrg the address is symbolic by using the scratch register */ 3924 1.1 mrg if (fp_reg_operand (operand0, mode) 3925 1.1 mrg && ((GET_CODE (operand1) == MEM 3926 1.1 mrg && ! memory_address_p (DFmode, XEXP (operand1, 0))) 3927 1.1 mrg || ((GET_CODE (operand1) == SUBREG 3928 1.1 mrg && GET_CODE (XEXP (operand1, 0)) == MEM 3929 1.1 mrg && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0))))) 3930 1.1 mrg && scratch_reg) 3931 1.1 mrg { 3932 1.1 mrg if (GET_CODE (operand1) == SUBREG) 3933 1.1 mrg operand1 = XEXP (operand1, 0); 3934 1.1 mrg 3935 1.1 mrg /* SCRATCH_REG will hold an address. We want 3936 1.1 mrg it in SImode regardless of what mode it was originally given 3937 1.1 mrg to us. */ 3938 1.1 mrg scratch_reg = force_mode (SImode, scratch_reg); 3939 1.1 mrg 3940 1.1 mrg /* D might not fit in 14 bits either; for such cases load D into 3941 1.1 mrg scratch reg. */ 3942 1.1 mrg if (!memory_address_p (Pmode, XEXP (operand1, 0))) 3943 1.1 mrg { 3944 1.1 mrg emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1)); 3945 1.1 mrg emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)), 3946 1.1 mrg Pmode, 3947 1.1 mrg XEXP (XEXP (operand1, 0), 0), 3948 1.1 mrg scratch_reg)); 3949 1.1 mrg } 3950 1.1 mrg else 3951 1.1 mrg emit_move_insn (scratch_reg, XEXP (operand1, 0)); 3952 1.1 mrg emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg))); 3953 1.1 mrg return 1; 3954 1.1 mrg } 3955 1.1 mrg else if (fp_reg_operand (operand1, mode) 3956 1.1 mrg && ((GET_CODE (operand0) == MEM 3957 1.1 mrg && ! memory_address_p (DFmode, XEXP (operand0, 0))) 3958 1.1 mrg || ((GET_CODE (operand0) == SUBREG) 3959 1.1 mrg && GET_CODE (XEXP (operand0, 0)) == MEM 3960 1.1 mrg && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0)))) 3961 1.1 mrg && scratch_reg) 3962 1.1 mrg { 3963 1.1 mrg if (GET_CODE (operand0) == SUBREG) 3964 1.1 mrg operand0 = XEXP (operand0, 0); 3965 1.1 mrg 3966 1.1 mrg /* SCRATCH_REG will hold an address and maybe the actual data. We want 3967 1.1 mrg it in SIMODE regardless of what mode it was originally given 3968 1.1 mrg to us. */ 3969 1.1 mrg scratch_reg = force_mode (SImode, scratch_reg); 3970 1.1 mrg 3971 1.1 mrg /* D might not fit in 14 bits either; for such cases load D into 3972 1.1 mrg scratch reg. */ 3973 1.1 mrg if (!memory_address_p (Pmode, XEXP (operand0, 0))) 3974 1.1 mrg { 3975 1.1 mrg emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1)); 3976 1.1 mrg emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0, 3977 1.1 mrg 0)), 3978 1.1 mrg Pmode, 3979 1.1 mrg XEXP (XEXP (operand0, 0), 3980 1.1 mrg 0), 3981 1.1 mrg scratch_reg)); 3982 1.1 mrg } 3983 1.1 mrg else 3984 1.1 mrg emit_move_insn (scratch_reg, XEXP (operand0, 0)); 3985 1.1 mrg emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1)); 3986 1.1 mrg return 1; 3987 1.1 mrg } 3988 1.1 mrg /* Handle secondary reloads for loads of FP registers from constant 3989 1.1 mrg expressions by forcing the constant into memory. 3990 1.1 mrg 3991 1.1 mrg use scratch_reg to hold the address of the memory location. 3992 1.1 mrg 3993 1.1 mrg The proper fix is to change PREFERRED_RELOAD_CLASS to return 3994 1.1 mrg NO_REGS when presented with a const_int and an register class 3995 1.1 mrg containing only FP registers. Doing so unfortunately creates 3996 1.1 mrg more problems than it solves. Fix this for 2.5. */ 3997 1.1 mrg else if (fp_reg_operand (operand0, mode) 3998 1.1 mrg && CONSTANT_P (operand1) 3999 1.1 mrg && scratch_reg) 4000 1.1 mrg { 4001 1.1 mrg rtx xoperands[2]; 4002 1.1 mrg 4003 1.1 mrg /* SCRATCH_REG will hold an address and maybe the actual data. We want 4004 1.1 mrg it in SIMODE regardless of what mode it was originally given 4005 1.1 mrg to us. */ 4006 1.1 mrg scratch_reg = force_mode (SImode, scratch_reg); 4007 1.1 mrg 4008 1.1 mrg /* Force the constant into memory and put the address of the 4009 1.1 mrg memory location into scratch_reg. */ 4010 1.1 mrg xoperands[0] = scratch_reg; 4011 1.1 mrg xoperands[1] = XEXP (force_const_mem (mode, operand1), 0); 4012 1.1 mrg emit_insn (gen_rtx_SET (scratch_reg, xoperands[1])); 4013 1.1 mrg 4014 1.1 mrg /* Now load the destination register. */ 4015 1.1 mrg emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg))); 4016 1.1 mrg return 1; 4017 1.1 mrg } 4018 1.1 mrg 4019 1.1 mrg /* Now have insn-emit do whatever it normally does. */ 4020 1.1 mrg return 0; 4021 1.1 mrg } 4022 1.1 mrg 4023 1.1 mrg /* Split one or more DImode RTL references into pairs of SImode 4024 1.1 mrg references. The RTL can be REG, offsettable MEM, integer constant, or 4025 1.1 mrg CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to 4026 1.1 mrg split and "num" is its length. lo_half and hi_half are output arrays 4027 1.1 mrg that parallel "operands". */ 4028 1.1 mrg 4029 1.1 mrg void 4030 1.1 mrg split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[]) 4031 1.1 mrg { 4032 1.1 mrg while (num--) 4033 1.1 mrg { 4034 1.1 mrg rtx op = operands[num]; 4035 1.1 mrg 4036 1.1 mrg /* simplify_subreg refuses to split volatile memory addresses, 4037 1.1 mrg but we still have to handle it. */ 4038 1.1 mrg if (GET_CODE (op) == MEM) 4039 1.1 mrg { 4040 1.1 mrg lo_half[num] = adjust_address (op, SImode, 4); 4041 1.1 mrg hi_half[num] = adjust_address (op, SImode, 0); 4042 1.1 mrg } 4043 1.1 mrg else 4044 1.1 mrg { 4045 1.1 mrg lo_half[num] = simplify_gen_subreg (SImode, op, 4046 1.1 mrg GET_MODE (op) == VOIDmode 4047 1.1 mrg ? DImode : GET_MODE (op), 4); 4048 1.1 mrg hi_half[num] = simplify_gen_subreg (SImode, op, 4049 1.1 mrg GET_MODE (op) == VOIDmode 4050 1.1 mrg ? DImode : GET_MODE (op), 0); 4051 1.1 mrg } 4052 1.1 mrg } 4053 1.1 mrg } 4054 1.1 mrg 4055 1.1 mrg /* Split X into a base and a constant offset, storing them in *BASE 4056 1.1 mrg and *OFFSET respectively. */ 4057 1.1 mrg 4058 1.1 mrg static void 4059 1.1 mrg m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset) 4060 1.1 mrg { 4061 1.1 mrg *offset = 0; 4062 1.1 mrg if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT) 4063 1.1 mrg { 4064 1.1 mrg *offset += INTVAL (XEXP (x, 1)); 4065 1.1 mrg x = XEXP (x, 0); 4066 1.1 mrg } 4067 1.1 mrg *base = x; 4068 1.1 mrg } 4069 1.1 mrg 4070 1.1 mrg /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem 4071 1.1 mrg instruction. STORE_P says whether the move is a load or store. 4072 1.1 mrg 4073 1.1 mrg If the instruction uses post-increment or pre-decrement addressing, 4074 1.1 mrg AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total 4075 1.1 mrg adjustment. This adjustment will be made by the first element of 4076 1.1 mrg PARALLEL, with the loads or stores starting at element 1. If the 4077 1.1 mrg instruction does not use post-increment or pre-decrement addressing, 4078 1.1 mrg AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores 4079 1.1 mrg start at element 0. */ 4080 1.1 mrg 4081 1.1 mrg bool 4082 1.1 mrg m68k_movem_pattern_p (rtx pattern, rtx automod_base, 4083 1.1 mrg HOST_WIDE_INT automod_offset, bool store_p) 4084 1.1 mrg { 4085 1.1 mrg rtx base, mem_base, set, mem, reg, last_reg; 4086 1.1 mrg HOST_WIDE_INT offset, mem_offset; 4087 1.1 mrg int i, first, len; 4088 1.1 mrg enum reg_class rclass; 4089 1.1 mrg 4090 1.1 mrg len = XVECLEN (pattern, 0); 4091 1.1 mrg first = (automod_base != NULL); 4092 1.1 mrg 4093 1.1 mrg if (automod_base) 4094 1.1 mrg { 4095 1.1 mrg /* Stores must be pre-decrement and loads must be post-increment. */ 4096 1.1 mrg if (store_p != (automod_offset < 0)) 4097 1.1 mrg return false; 4098 1.1 mrg 4099 1.1 mrg /* Work out the base and offset for lowest memory location. */ 4100 1.1 mrg base = automod_base; 4101 1.1 mrg offset = (automod_offset < 0 ? automod_offset : 0); 4102 1.1 mrg } 4103 1.1 mrg else 4104 1.1 mrg { 4105 1.1 mrg /* Allow any valid base and offset in the first access. */ 4106 1.1 mrg base = NULL; 4107 1.1 mrg offset = 0; 4108 1.1 mrg } 4109 1.1 mrg 4110 1.1 mrg last_reg = NULL; 4111 1.1 mrg rclass = NO_REGS; 4112 1.1 mrg for (i = first; i < len; i++) 4113 1.1 mrg { 4114 1.1 mrg /* We need a plain SET. */ 4115 1.1 mrg set = XVECEXP (pattern, 0, i); 4116 1.1 mrg if (GET_CODE (set) != SET) 4117 1.1 mrg return false; 4118 1.1 mrg 4119 1.1 mrg /* Check that we have a memory location... */ 4120 1.1 mrg mem = XEXP (set, !store_p); 4121 1.1 mrg if (!MEM_P (mem) || !memory_operand (mem, VOIDmode)) 4122 1.1 mrg return false; 4123 1.1 mrg 4124 1.1 mrg /* ...with the right address. */ 4125 1.1 mrg if (base == NULL) 4126 1.1 mrg { 4127 1.1 mrg m68k_split_offset (XEXP (mem, 0), &base, &offset); 4128 1.1 mrg /* The ColdFire instruction only allows (An) and (d16,An) modes. 4129 1.1 mrg There are no mode restrictions for 680x0 besides the 4130 1.1 mrg automodification rules enforced above. */ 4131 1.1 mrg if (TARGET_COLDFIRE 4132 1.1 mrg && !m68k_legitimate_base_reg_p (base, reload_completed)) 4133 1.1 mrg return false; 4134 1.1 mrg } 4135 1.1 mrg else 4136 1.1 mrg { 4137 1.1 mrg m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset); 4138 1.1 mrg if (!rtx_equal_p (base, mem_base) || offset != mem_offset) 4139 1.1 mrg return false; 4140 1.1 mrg } 4141 1.1 mrg 4142 1.1 mrg /* Check that we have a register of the required mode and class. */ 4143 1.1 mrg reg = XEXP (set, store_p); 4144 1.1 mrg if (!REG_P (reg) 4145 1.1 mrg || !HARD_REGISTER_P (reg) 4146 1.1 mrg || GET_MODE (reg) != reg_raw_mode[REGNO (reg)]) 4147 1.1 mrg return false; 4148 1.1 mrg 4149 1.1 mrg if (last_reg) 4150 1.1 mrg { 4151 1.1 mrg /* The register must belong to RCLASS and have a higher number 4152 1.1 mrg than the register in the previous SET. */ 4153 1.1 mrg if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg)) 4154 1.1 mrg || REGNO (last_reg) >= REGNO (reg)) 4155 1.1 mrg return false; 4156 1.1 mrg } 4157 1.1 mrg else 4158 1.1 mrg { 4159 1.1 mrg /* Work out which register class we need. */ 4160 1.1 mrg if (INT_REGNO_P (REGNO (reg))) 4161 1.1 mrg rclass = GENERAL_REGS; 4162 1.1 mrg else if (FP_REGNO_P (REGNO (reg))) 4163 1.1 mrg rclass = FP_REGS; 4164 1.1 mrg else 4165 1.1 mrg return false; 4166 1.1 mrg } 4167 1.1 mrg 4168 1.1 mrg last_reg = reg; 4169 1.1 mrg offset += GET_MODE_SIZE (GET_MODE (reg)); 4170 1.1 mrg } 4171 1.1 mrg 4172 1.1 mrg /* If we have an automodification, check whether the final offset is OK. */ 4173 1.1 mrg if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset)) 4174 1.1 mrg return false; 4175 1.1 mrg 4176 1.1 mrg /* Reject unprofitable cases. */ 4177 1.1 mrg if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS)) 4178 1.1 mrg return false; 4179 1.1 mrg 4180 1.1 mrg return true; 4181 1.1 mrg } 4182 1.1 mrg 4183 1.1 mrg /* Return the assembly code template for a movem or fmovem instruction 4184 1.1 mrg whose pattern is given by PATTERN. Store the template's operands 4185 1.1 mrg in OPERANDS. 4186 1.1 mrg 4187 1.1 mrg If the instruction uses post-increment or pre-decrement addressing, 4188 1.1 mrg AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P 4189 1.1 mrg is true if this is a store instruction. */ 4190 1.1 mrg 4191 1.1 mrg const char * 4192 1.1 mrg m68k_output_movem (rtx *operands, rtx pattern, 4193 1.1 mrg HOST_WIDE_INT automod_offset, bool store_p) 4194 1.1 mrg { 4195 1.1 mrg unsigned int mask; 4196 1.1 mrg int i, first; 4197 1.1 mrg 4198 1.1 mrg gcc_assert (GET_CODE (pattern) == PARALLEL); 4199 1.1 mrg mask = 0; 4200 1.1 mrg first = (automod_offset != 0); 4201 1.1 mrg for (i = first; i < XVECLEN (pattern, 0); i++) 4202 1.1 mrg { 4203 1.1 mrg /* When using movem with pre-decrement addressing, register X + D0_REG 4204 1.1 mrg is controlled by bit 15 - X. For all other addressing modes, 4205 1.1 mrg register X + D0_REG is controlled by bit X. Confusingly, the 4206 1.1 mrg register mask for fmovem is in the opposite order to that for 4207 1.1 mrg movem. */ 4208 1.1 mrg unsigned int regno; 4209 1.1 mrg 4210 1.1 mrg gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p))); 4211 1.1 mrg gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p))); 4212 1.1 mrg regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p)); 4213 1.1 mrg if (automod_offset < 0) 4214 1.1 mrg { 4215 1.1 mrg if (FP_REGNO_P (regno)) 4216 1.1 mrg mask |= 1 << (regno - FP0_REG); 4217 1.1 mrg else 4218 1.1 mrg mask |= 1 << (15 - (regno - D0_REG)); 4219 1.1 mrg } 4220 1.1 mrg else 4221 1.1 mrg { 4222 1.1 mrg if (FP_REGNO_P (regno)) 4223 1.1 mrg mask |= 1 << (7 - (regno - FP0_REG)); 4224 1.1 mrg else 4225 1.1 mrg mask |= 1 << (regno - D0_REG); 4226 1.1 mrg } 4227 1.1 mrg } 4228 1.1 mrg CC_STATUS_INIT; 4229 1.1 mrg 4230 1.1 mrg if (automod_offset == 0) 4231 1.1 mrg operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0); 4232 1.1 mrg else if (automod_offset < 0) 4233 1.1 mrg operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0))); 4234 1.1 mrg else 4235 1.1 mrg operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0))); 4236 1.1 mrg operands[1] = GEN_INT (mask); 4237 1.1 mrg if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p)))) 4238 1.1 mrg { 4239 1.1 mrg if (store_p) 4240 1.1 mrg return "fmovem %1,%a0"; 4241 1.1 mrg else 4242 1.1 mrg return "fmovem %a0,%1"; 4243 1.1 mrg } 4244 1.1 mrg else 4245 1.1 mrg { 4246 1.1 mrg if (store_p) 4247 1.1 mrg return "movem%.l %1,%a0"; 4248 1.1 mrg else 4249 1.1 mrg return "movem%.l %a0,%1"; 4250 1.1 mrg } 4251 1.1 mrg } 4252 1.1 mrg 4253 1.1 mrg /* Return a REG that occurs in ADDR with coefficient 1. 4254 1.1 mrg ADDR can be effectively incremented by incrementing REG. */ 4255 1.1 mrg 4256 1.1 mrg static rtx 4257 1.1 mrg find_addr_reg (rtx addr) 4258 1.1 mrg { 4259 1.1 mrg while (GET_CODE (addr) == PLUS) 4260 1.1 mrg { 4261 1.1 mrg if (GET_CODE (XEXP (addr, 0)) == REG) 4262 1.1 mrg addr = XEXP (addr, 0); 4263 1.1 mrg else if (GET_CODE (XEXP (addr, 1)) == REG) 4264 1.1 mrg addr = XEXP (addr, 1); 4265 1.1 mrg else if (CONSTANT_P (XEXP (addr, 0))) 4266 1.1 mrg addr = XEXP (addr, 1); 4267 1.1 mrg else if (CONSTANT_P (XEXP (addr, 1))) 4268 1.1 mrg addr = XEXP (addr, 0); 4269 1.1 mrg else 4270 1.1 mrg gcc_unreachable (); 4271 1.1 mrg } 4272 1.1 mrg gcc_assert (GET_CODE (addr) == REG); 4273 1.1 mrg return addr; 4274 1.1 mrg } 4275 1.1 mrg 4276 1.1 mrg /* Output assembler code to perform a 32-bit 3-operand add. */ 4277 1.1 mrg 4278 1.1 mrg const char * 4279 1.1 mrg output_addsi3 (rtx *operands) 4280 1.1 mrg { 4281 1.1 mrg if (! operands_match_p (operands[0], operands[1])) 4282 1.1 mrg { 4283 1.1 mrg if (!ADDRESS_REG_P (operands[1])) 4284 1.1 mrg { 4285 1.1 mrg rtx tmp = operands[1]; 4286 1.1 mrg 4287 1.1 mrg operands[1] = operands[2]; 4288 1.1 mrg operands[2] = tmp; 4289 1.1 mrg } 4290 1.1 mrg 4291 1.1 mrg /* These insns can result from reloads to access 4292 1.1 mrg stack slots over 64k from the frame pointer. */ 4293 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT 4294 1.1 mrg && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767)) 4295 1.1 mrg return "move%.l %2,%0\n\tadd%.l %1,%0"; 4296 1.1 mrg if (GET_CODE (operands[2]) == REG) 4297 1.1 mrg return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0"; 4298 1.1 mrg return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0"; 4299 1.1 mrg } 4300 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT) 4301 1.1 mrg { 4302 1.1 mrg if (INTVAL (operands[2]) > 0 4303 1.1 mrg && INTVAL (operands[2]) <= 8) 4304 1.1 mrg return "addq%.l %2,%0"; 4305 1.1 mrg if (INTVAL (operands[2]) < 0 4306 1.1 mrg && INTVAL (operands[2]) >= -8) 4307 1.1 mrg { 4308 1.1 mrg operands[2] = GEN_INT (- INTVAL (operands[2])); 4309 1.1 mrg return "subq%.l %2,%0"; 4310 1.1 mrg } 4311 1.1 mrg /* On the CPU32 it is faster to use two addql instructions to 4312 1.1 mrg add a small integer (8 < N <= 16) to a register. 4313 1.1 mrg Likewise for subql. */ 4314 1.1 mrg if (TUNE_CPU32 && REG_P (operands[0])) 4315 1.1 mrg { 4316 1.1 mrg if (INTVAL (operands[2]) > 8 4317 1.1 mrg && INTVAL (operands[2]) <= 16) 4318 1.1 mrg { 4319 1.1 mrg operands[2] = GEN_INT (INTVAL (operands[2]) - 8); 4320 1.1 mrg return "addq%.l #8,%0\n\taddq%.l %2,%0"; 4321 1.1 mrg } 4322 1.1 mrg if (INTVAL (operands[2]) < -8 4323 1.1 mrg && INTVAL (operands[2]) >= -16) 4324 1.1 mrg { 4325 1.1 mrg operands[2] = GEN_INT (- INTVAL (operands[2]) - 8); 4326 1.1 mrg return "subq%.l #8,%0\n\tsubq%.l %2,%0"; 4327 1.1 mrg } 4328 1.1 mrg } 4329 1.1 mrg if (ADDRESS_REG_P (operands[0]) 4330 1.1 mrg && INTVAL (operands[2]) >= -0x8000 4331 1.1 mrg && INTVAL (operands[2]) < 0x8000) 4332 1.1 mrg { 4333 1.1 mrg if (TUNE_68040) 4334 1.1 mrg return "add%.w %2,%0"; 4335 1.1 mrg else 4336 1.1 mrg return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0"; 4337 1.1 mrg } 4338 1.1 mrg } 4339 1.1 mrg return "add%.l %2,%0"; 4340 1.1 mrg } 4341 1.1 mrg 4342 1.1 mrg /* Emit a comparison between OP0 and OP1. Return true iff the comparison 4343 1.1 mrg was reversed. SC1 is an SImode scratch reg, and SC2 a DImode scratch reg, 4344 1.1 mrg as needed. CODE is the code of the comparison, we return it unchanged or 4345 1.1 mrg swapped, as necessary. */ 4346 1.1 mrg rtx_code 4347 1.1 mrg m68k_output_compare_di (rtx op0, rtx op1, rtx sc1, rtx sc2, rtx_insn *insn, 4348 1.1 mrg rtx_code code) 4349 1.1 mrg { 4350 1.1 mrg rtx ops[4]; 4351 1.1 mrg ops[0] = op0; 4352 1.1 mrg ops[1] = op1; 4353 1.1 mrg ops[2] = sc1; 4354 1.1 mrg ops[3] = sc2; 4355 1.1 mrg if (op1 == const0_rtx) 4356 1.1 mrg { 4357 1.1 mrg if (!REG_P (op0) || ADDRESS_REG_P (op0)) 4358 1.1 mrg { 4359 1.1 mrg rtx xoperands[2]; 4360 1.1 mrg 4361 1.1 mrg xoperands[0] = sc2; 4362 1.1 mrg xoperands[1] = op0; 4363 1.1 mrg output_move_double (xoperands); 4364 1.1 mrg output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", xoperands); 4365 1.1 mrg return swap_condition (code); 4366 1.1 mrg } 4367 1.1 mrg if (find_reg_note (insn, REG_DEAD, op0)) 4368 1.1 mrg { 4369 1.1 mrg output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", ops); 4370 1.1 mrg return swap_condition (code); 4371 1.1 mrg } 4372 1.1 mrg else 4373 1.1 mrg { 4374 1.1 mrg /* 'sub' clears %1, and also clears the X cc bit. 4375 1.1 mrg 'tst' sets the Z cc bit according to the low part of the DImode 4376 1.1 mrg operand. 4377 1.1 mrg 'subx %1' (i.e. subx #0) acts as a (non-existent) tstx on the high 4378 1.1 mrg part. */ 4379 1.1 mrg output_asm_insn ("sub%.l %2,%2\n\ttst%.l %R0\n\tsubx%.l %2,%0", ops); 4380 1.1 mrg return code; 4381 1.1 mrg } 4382 1.1 mrg } 4383 1.1 mrg 4384 1.1 mrg if (rtx_equal_p (sc2, op0)) 4385 1.1 mrg { 4386 1.1 mrg output_asm_insn ("sub%.l %R1,%R3\n\tsubx%.l %1,%3", ops); 4387 1.1 mrg return code; 4388 1.1 mrg } 4389 1.1 mrg else 4390 1.1 mrg { 4391 1.1 mrg output_asm_insn ("sub%.l %R0,%R3\n\tsubx%.l %0,%3", ops); 4392 1.1 mrg return swap_condition (code); 4393 1.1 mrg } 4394 1.1 mrg } 4395 1.1 mrg 4396 1.1 mrg static void 4397 1.1 mrg remember_compare_flags (rtx op0, rtx op1) 4398 1.1 mrg { 4399 1.1 mrg if (side_effects_p (op0) || side_effects_p (op1)) 4400 1.1 mrg CC_STATUS_INIT; 4401 1.1 mrg else 4402 1.1 mrg { 4403 1.1 mrg flags_compare_op0 = op0; 4404 1.1 mrg flags_compare_op1 = op1; 4405 1.1 mrg flags_operand1 = flags_operand2 = NULL_RTX; 4406 1.1 mrg flags_valid = FLAGS_VALID_SET; 4407 1.1 mrg } 4408 1.1 mrg } 4409 1.1 mrg 4410 1.1 mrg /* Emit a comparison between OP0 and OP1. CODE is the code of the 4411 1.1 mrg comparison. It is returned, potentially modified if necessary. */ 4412 1.1 mrg rtx_code 4413 1.1 mrg m68k_output_compare_si (rtx op0, rtx op1, rtx_code code) 4414 1.1 mrg { 4415 1.1 mrg rtx_code tmp = m68k_find_flags_value (op0, op1, code); 4416 1.1 mrg if (tmp != UNKNOWN) 4417 1.1 mrg return tmp; 4418 1.1 mrg 4419 1.1 mrg remember_compare_flags (op0, op1); 4420 1.1 mrg 4421 1.1 mrg rtx ops[2]; 4422 1.1 mrg ops[0] = op0; 4423 1.1 mrg ops[1] = op1; 4424 1.1 mrg if (op1 == const0_rtx && (TARGET_68020 || TARGET_COLDFIRE || !ADDRESS_REG_P (op0))) 4425 1.1 mrg output_asm_insn ("tst%.l %0", ops); 4426 1.1 mrg else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM) 4427 1.1 mrg output_asm_insn ("cmpm%.l %1,%0", ops); 4428 1.1 mrg else if (REG_P (op1) 4429 1.1 mrg || (!REG_P (op0) && GET_CODE (op0) != MEM)) 4430 1.1 mrg { 4431 1.1 mrg output_asm_insn ("cmp%.l %d0,%d1", ops); 4432 1.1 mrg std::swap (flags_compare_op0, flags_compare_op1); 4433 1.1 mrg return swap_condition (code); 4434 1.1 mrg } 4435 1.1 mrg else if (!TARGET_COLDFIRE 4436 1.1 mrg && ADDRESS_REG_P (op0) 4437 1.1 mrg && GET_CODE (op1) == CONST_INT 4438 1.1 mrg && INTVAL (op1) < 0x8000 4439 1.1 mrg && INTVAL (op1) >= -0x8000) 4440 1.1 mrg output_asm_insn ("cmp%.w %1,%0", ops); 4441 1.1 mrg else 4442 1.1 mrg output_asm_insn ("cmp%.l %d1,%d0", ops); 4443 1.1 mrg return code; 4444 1.1 mrg } 4445 1.1 mrg 4446 1.1 mrg /* Emit a comparison between OP0 and OP1. CODE is the code of the 4447 1.1 mrg comparison. It is returned, potentially modified if necessary. */ 4448 1.1 mrg rtx_code 4449 1.1 mrg m68k_output_compare_hi (rtx op0, rtx op1, rtx_code code) 4450 1.1 mrg { 4451 1.1 mrg rtx_code tmp = m68k_find_flags_value (op0, op1, code); 4452 1.1 mrg if (tmp != UNKNOWN) 4453 1.1 mrg return tmp; 4454 1.1 mrg 4455 1.1 mrg remember_compare_flags (op0, op1); 4456 1.1 mrg 4457 1.1 mrg rtx ops[2]; 4458 1.1 mrg ops[0] = op0; 4459 1.1 mrg ops[1] = op1; 4460 1.1 mrg if (op1 == const0_rtx) 4461 1.1 mrg output_asm_insn ("tst%.w %d0", ops); 4462 1.1 mrg else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM) 4463 1.1 mrg output_asm_insn ("cmpm%.w %1,%0", ops); 4464 1.1 mrg else if ((REG_P (op1) && !ADDRESS_REG_P (op1)) 4465 1.1 mrg || (!REG_P (op0) && GET_CODE (op0) != MEM)) 4466 1.1 mrg { 4467 1.1 mrg output_asm_insn ("cmp%.w %d0,%d1", ops); 4468 1.1 mrg std::swap (flags_compare_op0, flags_compare_op1); 4469 1.1 mrg return swap_condition (code); 4470 1.1 mrg } 4471 1.1 mrg else 4472 1.1 mrg output_asm_insn ("cmp%.w %d1,%d0", ops); 4473 1.1 mrg return code; 4474 1.1 mrg } 4475 1.1 mrg 4476 1.1 mrg /* Emit a comparison between OP0 and OP1. CODE is the code of the 4477 1.1 mrg comparison. It is returned, potentially modified if necessary. */ 4478 1.1 mrg rtx_code 4479 1.1 mrg m68k_output_compare_qi (rtx op0, rtx op1, rtx_code code) 4480 1.1 mrg { 4481 1.1 mrg rtx_code tmp = m68k_find_flags_value (op0, op1, code); 4482 1.1 mrg if (tmp != UNKNOWN) 4483 1.1 mrg return tmp; 4484 1.1 mrg 4485 1.1 mrg remember_compare_flags (op0, op1); 4486 1.1 mrg 4487 1.1 mrg rtx ops[2]; 4488 1.1 mrg ops[0] = op0; 4489 1.1 mrg ops[1] = op1; 4490 1.1 mrg if (op1 == const0_rtx) 4491 1.1 mrg output_asm_insn ("tst%.b %d0", ops); 4492 1.1 mrg else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM) 4493 1.1 mrg output_asm_insn ("cmpm%.b %1,%0", ops); 4494 1.1 mrg else if (REG_P (op1) || (!REG_P (op0) && GET_CODE (op0) != MEM)) 4495 1.1 mrg { 4496 1.1 mrg output_asm_insn ("cmp%.b %d0,%d1", ops); 4497 1.1 mrg std::swap (flags_compare_op0, flags_compare_op1); 4498 1.1 mrg return swap_condition (code); 4499 1.1 mrg } 4500 1.1 mrg else 4501 1.1 mrg output_asm_insn ("cmp%.b %d1,%d0", ops); 4502 1.1 mrg return code; 4503 1.1 mrg } 4504 1.1 mrg 4505 1.1 mrg /* Emit a comparison between OP0 and OP1. CODE is the code of the 4506 1.1 mrg comparison. It is returned, potentially modified if necessary. */ 4507 1.1 mrg rtx_code 4508 1.1 mrg m68k_output_compare_fp (rtx op0, rtx op1, rtx_code code) 4509 1.1 mrg { 4510 1.1 mrg rtx_code tmp = m68k_find_flags_value (op0, op1, code); 4511 1.1 mrg if (tmp != UNKNOWN) 4512 1.1 mrg return tmp; 4513 1.1 mrg 4514 1.1 mrg rtx ops[2]; 4515 1.1 mrg ops[0] = op0; 4516 1.1 mrg ops[1] = op1; 4517 1.1 mrg 4518 1.1 mrg remember_compare_flags (op0, op1); 4519 1.1 mrg 4520 1.1 mrg machine_mode mode = GET_MODE (op0); 4521 1.1 mrg std::string prec = mode == SFmode ? "s" : mode == DFmode ? "d" : "x"; 4522 1.1 mrg 4523 1.1 mrg if (op1 == CONST0_RTX (GET_MODE (op0))) 4524 1.1 mrg { 4525 1.1 mrg if (FP_REG_P (op0)) 4526 1.1 mrg { 4527 1.1 mrg if (TARGET_COLDFIRE_FPU) 4528 1.1 mrg output_asm_insn ("ftst%.d %0", ops); 4529 1.1 mrg else 4530 1.1 mrg output_asm_insn ("ftst%.x %0", ops); 4531 1.1 mrg } 4532 1.1 mrg else 4533 1.1 mrg output_asm_insn (("ftst%." + prec + " %0").c_str (), ops); 4534 1.1 mrg return code; 4535 1.1 mrg } 4536 1.1 mrg 4537 1.1 mrg switch (which_alternative) 4538 1.1 mrg { 4539 1.1 mrg case 0: 4540 1.1 mrg if (TARGET_COLDFIRE_FPU) 4541 1.1 mrg output_asm_insn ("fcmp%.d %1,%0", ops); 4542 1.1 mrg else 4543 1.1 mrg output_asm_insn ("fcmp%.x %1,%0", ops); 4544 1.1 mrg break; 4545 1.1 mrg case 1: 4546 1.1 mrg output_asm_insn (("fcmp%." + prec + " %f1,%0").c_str (), ops); 4547 1.1 mrg break; 4548 1.1 mrg case 2: 4549 1.1 mrg output_asm_insn (("fcmp%." + prec + " %0,%f1").c_str (), ops); 4550 1.1 mrg std::swap (flags_compare_op0, flags_compare_op1); 4551 1.1 mrg return swap_condition (code); 4552 1.1 mrg case 3: 4553 1.1 mrg /* This is the ftst case, handled earlier. */ 4554 1.1 mrg gcc_unreachable (); 4555 1.1 mrg } 4556 1.1 mrg return code; 4557 1.1 mrg } 4558 1.1 mrg 4559 1.1 mrg /* Return an output template for a branch with CODE. */ 4560 1.1 mrg const char * 4561 1.1 mrg m68k_output_branch_integer (rtx_code code) 4562 1.1 mrg { 4563 1.1 mrg switch (code) 4564 1.1 mrg { 4565 1.1 mrg case EQ: 4566 1.1 mrg return "jeq %l3"; 4567 1.1 mrg case NE: 4568 1.1 mrg return "jne %l3"; 4569 1.1 mrg case GT: 4570 1.1 mrg return "jgt %l3"; 4571 1.1 mrg case GTU: 4572 1.1 mrg return "jhi %l3"; 4573 1.1 mrg case LT: 4574 1.1 mrg return "jlt %l3"; 4575 1.1 mrg case LTU: 4576 1.1 mrg return "jcs %l3"; 4577 1.1 mrg case GE: 4578 1.1 mrg return "jge %l3"; 4579 1.1 mrg case GEU: 4580 1.1 mrg return "jcc %l3"; 4581 1.1 mrg case LE: 4582 1.1 mrg return "jle %l3"; 4583 1.1 mrg case LEU: 4584 1.1 mrg return "jls %l3"; 4585 1.1 mrg case PLUS: 4586 1.1 mrg return "jpl %l3"; 4587 1.1 mrg case MINUS: 4588 1.1 mrg return "jmi %l3"; 4589 1.1 mrg default: 4590 1.1 mrg gcc_unreachable (); 4591 1.1 mrg } 4592 1.1 mrg } 4593 1.1 mrg 4594 1.1 mrg /* Return an output template for a reversed branch with CODE. */ 4595 1.1 mrg const char * 4596 1.1 mrg m68k_output_branch_integer_rev (rtx_code code) 4597 1.1 mrg { 4598 1.1 mrg switch (code) 4599 1.1 mrg { 4600 1.1 mrg case EQ: 4601 1.1 mrg return "jne %l3"; 4602 1.1 mrg case NE: 4603 1.1 mrg return "jeq %l3"; 4604 1.1 mrg case GT: 4605 1.1 mrg return "jle %l3"; 4606 1.1 mrg case GTU: 4607 1.1 mrg return "jls %l3"; 4608 1.1 mrg case LT: 4609 1.1 mrg return "jge %l3"; 4610 1.1 mrg case LTU: 4611 1.1 mrg return "jcc %l3"; 4612 1.1 mrg case GE: 4613 1.1 mrg return "jlt %l3"; 4614 1.1 mrg case GEU: 4615 1.1 mrg return "jcs %l3"; 4616 1.1 mrg case LE: 4617 1.1 mrg return "jgt %l3"; 4618 1.1 mrg case LEU: 4619 1.1 mrg return "jhi %l3"; 4620 1.1 mrg case PLUS: 4621 1.1 mrg return "jmi %l3"; 4622 1.1 mrg case MINUS: 4623 1.1 mrg return "jpl %l3"; 4624 1.1 mrg default: 4625 1.1 mrg gcc_unreachable (); 4626 1.1 mrg } 4627 1.1 mrg } 4628 1.1 mrg 4629 1.1 mrg /* Return an output template for a scc instruction with CODE. */ 4630 1.1 mrg const char * 4631 1.1 mrg m68k_output_scc (rtx_code code) 4632 1.1 mrg { 4633 1.1 mrg switch (code) 4634 1.1 mrg { 4635 1.1 mrg case EQ: 4636 1.1 mrg return "seq %0"; 4637 1.1 mrg case NE: 4638 1.1 mrg return "sne %0"; 4639 1.1 mrg case GT: 4640 1.1 mrg return "sgt %0"; 4641 1.1 mrg case GTU: 4642 1.1 mrg return "shi %0"; 4643 1.1 mrg case LT: 4644 1.1 mrg return "slt %0"; 4645 1.1 mrg case LTU: 4646 1.1 mrg return "scs %0"; 4647 1.1 mrg case GE: 4648 1.1 mrg return "sge %0"; 4649 1.1 mrg case GEU: 4650 1.1 mrg return "scc %0"; 4651 1.1 mrg case LE: 4652 1.1 mrg return "sle %0"; 4653 1.1 mrg case LEU: 4654 1.1 mrg return "sls %0"; 4655 1.1 mrg case PLUS: 4656 1.1 mrg return "spl %0"; 4657 1.1 mrg case MINUS: 4658 1.1 mrg return "smi %0"; 4659 1.1 mrg default: 4660 1.1 mrg gcc_unreachable (); 4661 1.1 mrg } 4662 1.1 mrg } 4663 1.1 mrg 4664 1.1 mrg /* Return an output template for a floating point branch 4665 1.1 mrg instruction with CODE. */ 4666 1.1 mrg const char * 4667 1.1 mrg m68k_output_branch_float (rtx_code code) 4668 1.1 mrg { 4669 1.1 mrg switch (code) 4670 1.1 mrg { 4671 1.1 mrg case EQ: 4672 1.1 mrg return "fjeq %l3"; 4673 1.1 mrg case NE: 4674 1.1 mrg return "fjne %l3"; 4675 1.1 mrg case GT: 4676 1.1 mrg return "fjgt %l3"; 4677 1.1 mrg case LT: 4678 1.1 mrg return "fjlt %l3"; 4679 1.1 mrg case GE: 4680 1.1 mrg return "fjge %l3"; 4681 1.1 mrg case LE: 4682 1.1 mrg return "fjle %l3"; 4683 1.1 mrg case ORDERED: 4684 1.1 mrg return "fjor %l3"; 4685 1.1 mrg case UNORDERED: 4686 1.1 mrg return "fjun %l3"; 4687 1.1 mrg case UNEQ: 4688 1.1 mrg return "fjueq %l3"; 4689 1.1 mrg case UNGE: 4690 1.1 mrg return "fjuge %l3"; 4691 1.1 mrg case UNGT: 4692 1.1 mrg return "fjugt %l3"; 4693 1.1 mrg case UNLE: 4694 1.1 mrg return "fjule %l3"; 4695 1.1 mrg case UNLT: 4696 1.1 mrg return "fjult %l3"; 4697 1.1 mrg case LTGT: 4698 1.1 mrg return "fjogl %l3"; 4699 1.1 mrg default: 4700 1.1 mrg gcc_unreachable (); 4701 1.1 mrg } 4702 1.1 mrg } 4703 1.1 mrg 4704 1.1 mrg /* Return an output template for a reversed floating point branch 4705 1.1 mrg instruction with CODE. */ 4706 1.1 mrg const char * 4707 1.1 mrg m68k_output_branch_float_rev (rtx_code code) 4708 1.1 mrg { 4709 1.1 mrg switch (code) 4710 1.1 mrg { 4711 1.1 mrg case EQ: 4712 1.1 mrg return "fjne %l3"; 4713 1.1 mrg case NE: 4714 1.1 mrg return "fjeq %l3"; 4715 1.1 mrg case GT: 4716 1.1 mrg return "fjngt %l3"; 4717 1.1 mrg case LT: 4718 1.1 mrg return "fjnlt %l3"; 4719 1.1 mrg case GE: 4720 1.1 mrg return "fjnge %l3"; 4721 1.1 mrg case LE: 4722 1.1 mrg return "fjnle %l3"; 4723 1.1 mrg case ORDERED: 4724 1.1 mrg return "fjun %l3"; 4725 1.1 mrg case UNORDERED: 4726 1.1 mrg return "fjor %l3"; 4727 1.1 mrg case UNEQ: 4728 1.1 mrg return "fjogl %l3"; 4729 1.1 mrg case UNGE: 4730 1.1 mrg return "fjolt %l3"; 4731 1.1 mrg case UNGT: 4732 1.1 mrg return "fjole %l3"; 4733 1.1 mrg case UNLE: 4734 1.1 mrg return "fjogt %l3"; 4735 1.1 mrg case UNLT: 4736 1.1 mrg return "fjoge %l3"; 4737 1.1 mrg case LTGT: 4738 1.1 mrg return "fjueq %l3"; 4739 1.1 mrg default: 4740 1.1 mrg gcc_unreachable (); 4741 1.1 mrg } 4742 1.1 mrg } 4743 1.1 mrg 4744 1.1 mrg /* Return an output template for a floating point scc 4745 1.1 mrg instruction with CODE. */ 4746 1.1 mrg const char * 4747 1.1 mrg m68k_output_scc_float (rtx_code code) 4748 1.1 mrg { 4749 1.1 mrg switch (code) 4750 1.1 mrg { 4751 1.1 mrg case EQ: 4752 1.1 mrg return "fseq %0"; 4753 1.1 mrg case NE: 4754 1.1 mrg return "fsne %0"; 4755 1.1 mrg case GT: 4756 1.1 mrg return "fsgt %0"; 4757 1.1 mrg case GTU: 4758 1.1 mrg return "fshi %0"; 4759 1.1 mrg case LT: 4760 1.1 mrg return "fslt %0"; 4761 1.1 mrg case GE: 4762 1.1 mrg return "fsge %0"; 4763 1.1 mrg case LE: 4764 1.1 mrg return "fsle %0"; 4765 1.1 mrg case ORDERED: 4766 1.1 mrg return "fsor %0"; 4767 1.1 mrg case UNORDERED: 4768 1.1 mrg return "fsun %0"; 4769 1.1 mrg case UNEQ: 4770 1.1 mrg return "fsueq %0"; 4771 1.1 mrg case UNGE: 4772 1.1 mrg return "fsuge %0"; 4773 1.1 mrg case UNGT: 4774 1.1 mrg return "fsugt %0"; 4775 1.1 mrg case UNLE: 4776 1.1 mrg return "fsule %0"; 4777 1.1 mrg case UNLT: 4778 1.1 mrg return "fsult %0"; 4779 1.1 mrg case LTGT: 4780 1.1 mrg return "fsogl %0"; 4781 1.1 mrg default: 4782 1.1 mrg gcc_unreachable (); 4783 1.1 mrg } 4784 1.1 mrg } 4785 1.1 mrg 4786 1.1 mrg const char * 4788 1.1 mrg output_move_const_double (rtx *operands) 4789 1.1 mrg { 4790 1.1 mrg int code = standard_68881_constant_p (operands[1]); 4791 1.1 mrg 4792 1.1 mrg if (code != 0) 4793 1.1 mrg { 4794 1.1 mrg static char buf[40]; 4795 1.1 mrg 4796 1.1 mrg sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff); 4797 1.1 mrg return buf; 4798 1.1 mrg } 4799 1.1 mrg return "fmove%.d %1,%0"; 4800 1.1 mrg } 4801 1.1 mrg 4802 1.1 mrg const char * 4803 1.1 mrg output_move_const_single (rtx *operands) 4804 1.1 mrg { 4805 1.1 mrg int code = standard_68881_constant_p (operands[1]); 4806 1.1 mrg 4807 1.1 mrg if (code != 0) 4808 1.1 mrg { 4809 1.1 mrg static char buf[40]; 4810 1.1 mrg 4811 1.1 mrg sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff); 4812 1.1 mrg return buf; 4813 1.1 mrg } 4814 1.1 mrg return "fmove%.s %f1,%0"; 4815 1.1 mrg } 4816 1.1 mrg 4817 1.1 mrg /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get 4818 1.1 mrg from the "fmovecr" instruction. 4819 1.1 mrg The value, anded with 0xff, gives the code to use in fmovecr 4820 1.1 mrg to get the desired constant. */ 4821 1.1 mrg 4822 1.1 mrg /* This code has been fixed for cross-compilation. */ 4823 1.1 mrg 4824 1.1 mrg static int inited_68881_table = 0; 4825 1.1 mrg 4826 1.1 mrg static const char *const strings_68881[7] = { 4827 1.1 mrg "0.0", 4828 1.1 mrg "1.0", 4829 1.1 mrg "10.0", 4830 1.1 mrg "100.0", 4831 1.1 mrg "10000.0", 4832 1.1 mrg "1e8", 4833 1.1 mrg "1e16" 4834 1.1 mrg }; 4835 1.1 mrg 4836 1.1 mrg static const int codes_68881[7] = { 4837 1.1 mrg 0x0f, 4838 1.1 mrg 0x32, 4839 1.1 mrg 0x33, 4840 1.1 mrg 0x34, 4841 1.1 mrg 0x35, 4842 1.1 mrg 0x36, 4843 1.1 mrg 0x37 4844 1.1 mrg }; 4845 1.1 mrg 4846 1.1 mrg REAL_VALUE_TYPE values_68881[7]; 4847 1.1 mrg 4848 1.1 mrg /* Set up values_68881 array by converting the decimal values 4849 1.1 mrg strings_68881 to binary. */ 4850 1.1 mrg 4851 1.1 mrg void 4852 1.1 mrg init_68881_table (void) 4853 1.1 mrg { 4854 1.1 mrg int i; 4855 1.1 mrg REAL_VALUE_TYPE r; 4856 1.1 mrg machine_mode mode; 4857 1.1 mrg 4858 1.1 mrg mode = SFmode; 4859 1.1 mrg for (i = 0; i < 7; i++) 4860 1.1 mrg { 4861 1.1 mrg if (i == 6) 4862 1.1 mrg mode = DFmode; 4863 1.1 mrg r = REAL_VALUE_ATOF (strings_68881[i], mode); 4864 1.1 mrg values_68881[i] = r; 4865 1.1 mrg } 4866 1.1 mrg inited_68881_table = 1; 4867 1.1 mrg } 4868 1.1 mrg 4869 1.1 mrg int 4870 1.1 mrg standard_68881_constant_p (rtx x) 4871 1.1 mrg { 4872 1.1 mrg const REAL_VALUE_TYPE *r; 4873 1.1 mrg int i; 4874 1.1 mrg 4875 1.1 mrg /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be 4876 1.1 mrg used at all on those chips. */ 4877 1.1 mrg if (TUNE_68040_60) 4878 1.1 mrg return 0; 4879 1.1 mrg 4880 1.1 mrg if (! inited_68881_table) 4881 1.1 mrg init_68881_table (); 4882 1.1 mrg 4883 1.1 mrg r = CONST_DOUBLE_REAL_VALUE (x); 4884 1.1 mrg 4885 1.1 mrg /* Use real_identical instead of real_equal so that -0.0 is rejected. */ 4886 1.1 mrg for (i = 0; i < 6; i++) 4887 1.1 mrg { 4888 1.1 mrg if (real_identical (r, &values_68881[i])) 4889 1.1 mrg return (codes_68881[i]); 4890 1.1 mrg } 4891 1.1 mrg 4892 1.1 mrg if (GET_MODE (x) == SFmode) 4893 1.1 mrg return 0; 4894 1.1 mrg 4895 1.1 mrg if (real_equal (r, &values_68881[6])) 4896 1.1 mrg return (codes_68881[6]); 4897 1.1 mrg 4898 1.1 mrg /* larger powers of ten in the constants ram are not used 4899 1.1 mrg because they are not equal to a `double' C constant. */ 4900 1.1 mrg return 0; 4901 1.1 mrg } 4902 1.1 mrg 4903 1.1 mrg /* If X is a floating-point constant, return the logarithm of X base 2, 4904 1.1 mrg or 0 if X is not a power of 2. */ 4905 1.1 mrg 4906 1.1 mrg int 4907 1.1 mrg floating_exact_log2 (rtx x) 4908 1.1 mrg { 4909 1.1 mrg const REAL_VALUE_TYPE *r; 4910 1.1 mrg REAL_VALUE_TYPE r1; 4911 1.1 mrg int exp; 4912 1.1 mrg 4913 1.1 mrg r = CONST_DOUBLE_REAL_VALUE (x); 4914 1.1 mrg 4915 1.1 mrg if (real_less (r, &dconst1)) 4916 1.1 mrg return 0; 4917 1.1 mrg 4918 1.1 mrg exp = real_exponent (r); 4919 1.1 mrg real_2expN (&r1, exp, DFmode); 4920 1.1 mrg if (real_equal (&r1, r)) 4921 1.1 mrg return exp; 4922 1.1 mrg 4923 1.1 mrg return 0; 4924 1.1 mrg } 4925 1.1 mrg 4926 1.1 mrg /* A C compound statement to output to stdio stream STREAM the 4928 1.1 mrg assembler syntax for an instruction operand X. X is an RTL 4929 1.1 mrg expression. 4930 1.1 mrg 4931 1.1 mrg CODE is a value that can be used to specify one of several ways 4932 1.1 mrg of printing the operand. It is used when identical operands 4933 1.1 mrg must be printed differently depending on the context. CODE 4934 1.1 mrg comes from the `%' specification that was used to request 4935 1.1 mrg printing of the operand. If the specification was just `%DIGIT' 4936 1.1 mrg then CODE is 0; if the specification was `%LTR DIGIT' then CODE 4937 1.1 mrg is the ASCII code for LTR. 4938 1.1 mrg 4939 1.1 mrg If X is a register, this macro should print the register's name. 4940 1.1 mrg The names can be found in an array `reg_names' whose type is 4941 1.1 mrg `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'. 4942 1.1 mrg 4943 1.1 mrg When the machine description has a specification `%PUNCT' (a `%' 4944 1.1 mrg followed by a punctuation character), this macro is called with 4945 1.1 mrg a null pointer for X and the punctuation character for CODE. 4946 1.1 mrg 4947 1.1 mrg The m68k specific codes are: 4948 1.1 mrg 4949 1.1 mrg '.' for dot needed in Motorola-style opcode names. 4950 1.1 mrg '-' for an operand pushing on the stack: 4951 1.1 mrg sp@-, -(sp) or -(%sp) depending on the style of syntax. 4952 1.1 mrg '+' for an operand pushing on the stack: 4953 1.1 mrg sp@+, (sp)+ or (%sp)+ depending on the style of syntax. 4954 1.1 mrg '@' for a reference to the top word on the stack: 4955 1.1 mrg sp@, (sp) or (%sp) depending on the style of syntax. 4956 1.1 mrg '#' for an immediate operand prefix (# in MIT and Motorola syntax 4957 1.1 mrg but & in SGS syntax). 4958 1.1 mrg '!' for the cc register (used in an `and to cc' insn). 4959 1.1 mrg '$' for the letter `s' in an op code, but only on the 68040. 4960 1.1 mrg '&' for the letter `d' in an op code, but only on the 68040. 4961 1.1 mrg '/' for register prefix needed by longlong.h. 4962 1.1 mrg '?' for m68k_library_id_string 4963 1.1 mrg 4964 1.1 mrg 'b' for byte insn (no effect, on the Sun; this is for the ISI). 4965 1.1 mrg 'd' to force memory addressing to be absolute, not relative. 4966 1.1 mrg 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex) 4967 1.1 mrg 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex), 4968 1.1 mrg or print pair of registers as rx:ry. 4969 1.1 mrg 'p' print an address with @PLTPC attached, but only if the operand 4970 1.1 mrg is not locally-bound. */ 4971 1.1 mrg 4972 1.1 mrg void 4973 1.1 mrg print_operand (FILE *file, rtx op, int letter) 4974 1.1 mrg { 4975 1.1 mrg if (op != NULL_RTX) 4976 1.1 mrg m68k_adjust_decorated_operand (op); 4977 1.1 mrg 4978 1.1 mrg if (letter == '.') 4979 1.1 mrg { 4980 1.1 mrg if (MOTOROLA) 4981 1.1 mrg fprintf (file, "."); 4982 1.1 mrg } 4983 1.1 mrg else if (letter == '#') 4984 1.1 mrg asm_fprintf (file, "%I"); 4985 1.1 mrg else if (letter == '-') 4986 1.1 mrg asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-"); 4987 1.1 mrg else if (letter == '+') 4988 1.1 mrg asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+"); 4989 1.1 mrg else if (letter == '@') 4990 1.1 mrg asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@"); 4991 1.1 mrg else if (letter == '!') 4992 1.1 mrg asm_fprintf (file, "%Rfpcr"); 4993 1.1 mrg else if (letter == '$') 4994 1.1 mrg { 4995 1.1 mrg if (TARGET_68040) 4996 1.1 mrg fprintf (file, "s"); 4997 1.1 mrg } 4998 1.1 mrg else if (letter == '&') 4999 1.1 mrg { 5000 1.1 mrg if (TARGET_68040) 5001 1.1 mrg fprintf (file, "d"); 5002 1.1 mrg } 5003 1.1 mrg else if (letter == '/') 5004 1.1 mrg asm_fprintf (file, "%R"); 5005 1.1 mrg else if (letter == '?') 5006 1.1 mrg asm_fprintf (file, m68k_library_id_string); 5007 1.1 mrg else if (letter == 'p') 5008 1.1 mrg { 5009 1.1 mrg output_addr_const (file, op); 5010 1.1 mrg if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op))) 5011 1.1 mrg fprintf (file, "@PLTPC"); 5012 1.1 mrg } 5013 1.1 mrg else if (GET_CODE (op) == REG) 5014 1.1 mrg { 5015 1.1 mrg if (letter == 'R') 5016 1.1 mrg /* Print out the second register name of a register pair. 5017 1.1 mrg I.e., R (6) => 7. */ 5018 1.1 mrg fputs (M68K_REGNAME(REGNO (op) + 1), file); 5019 1.1 mrg else 5020 1.1 mrg fputs (M68K_REGNAME(REGNO (op)), file); 5021 1.1 mrg } 5022 1.1 mrg else if (GET_CODE (op) == MEM) 5023 1.1 mrg { 5024 1.1 mrg output_address (GET_MODE (op), XEXP (op, 0)); 5025 1.1 mrg if (letter == 'd' && ! TARGET_68020 5026 1.1 mrg && CONSTANT_ADDRESS_P (XEXP (op, 0)) 5027 1.1 mrg && !(GET_CODE (XEXP (op, 0)) == CONST_INT 5028 1.1 mrg && INTVAL (XEXP (op, 0)) < 0x8000 5029 1.1 mrg && INTVAL (XEXP (op, 0)) >= -0x8000)) 5030 1.1 mrg fprintf (file, MOTOROLA ? ".l" : ":l"); 5031 1.1 mrg } 5032 1.1 mrg else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode) 5033 1.1 mrg { 5034 1.1 mrg long l; 5035 1.1 mrg REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l); 5036 1.1 mrg asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF); 5037 1.1 mrg } 5038 1.1 mrg else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode) 5039 1.1 mrg { 5040 1.1 mrg long l[3]; 5041 1.1 mrg REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l); 5042 1.1 mrg asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF, 5043 1.1 mrg l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF); 5044 1.1 mrg } 5045 1.1 mrg else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode) 5046 1.1 mrg { 5047 1.1 mrg long l[2]; 5048 1.1 mrg REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l); 5049 1.1 mrg asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF); 5050 1.1 mrg } 5051 1.1 mrg else 5052 1.1 mrg { 5053 1.1 mrg /* Use `print_operand_address' instead of `output_addr_const' 5054 1.1 mrg to ensure that we print relevant PIC stuff. */ 5055 1.1 mrg asm_fprintf (file, "%I"); 5056 1.1 mrg if (TARGET_PCREL 5057 1.1 mrg && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST)) 5058 1.1 mrg print_operand_address (file, op); 5059 1.1 mrg else 5060 1.1 mrg output_addr_const (file, op); 5061 1.1 mrg } 5062 1.1 mrg } 5063 1.1 mrg 5064 1.1 mrg /* Return string for TLS relocation RELOC. */ 5065 1.1 mrg 5066 1.1 mrg static const char * 5067 1.1 mrg m68k_get_reloc_decoration (enum m68k_reloc reloc) 5068 1.1 mrg { 5069 1.1 mrg /* To my knowledge, !MOTOROLA assemblers don't support TLS. */ 5070 1.1 mrg gcc_assert (MOTOROLA || reloc == RELOC_GOT); 5071 1.1 mrg 5072 1.1 mrg switch (reloc) 5073 1.1 mrg { 5074 1.1 mrg case RELOC_GOT: 5075 1.1 mrg if (MOTOROLA) 5076 1.1 mrg { 5077 1.1 mrg if (flag_pic == 1 && TARGET_68020) 5078 1.1 mrg return "@GOT.w"; 5079 1.1 mrg else 5080 1.1 mrg return "@GOT"; 5081 1.1 mrg } 5082 1.1 mrg else 5083 1.1 mrg { 5084 1.1 mrg if (TARGET_68020) 5085 1.1 mrg { 5086 1.1 mrg switch (flag_pic) 5087 1.1 mrg { 5088 1.1 mrg case 1: 5089 1.1 mrg return ":w"; 5090 1.1 mrg case 2: 5091 1.1 mrg return ":l"; 5092 1.1 mrg default: 5093 1.1 mrg return ""; 5094 1.1 mrg } 5095 1.1 mrg } 5096 1.1 mrg } 5097 1.1 mrg gcc_unreachable (); 5098 1.1 mrg 5099 1.1 mrg case RELOC_TLSGD: 5100 1.1 mrg return "@TLSGD"; 5101 1.1 mrg 5102 1.1 mrg case RELOC_TLSLDM: 5103 1.1 mrg return "@TLSLDM"; 5104 1.1 mrg 5105 1.1 mrg case RELOC_TLSLDO: 5106 1.1 mrg return "@TLSLDO"; 5107 1.1 mrg 5108 1.1 mrg case RELOC_TLSIE: 5109 1.1 mrg return "@TLSIE"; 5110 1.1 mrg 5111 1.1 mrg case RELOC_TLSLE: 5112 1.1 mrg return "@TLSLE"; 5113 1.1 mrg 5114 1.1 mrg default: 5115 1.1 mrg gcc_unreachable (); 5116 1.1 mrg } 5117 1.1 mrg } 5118 1.1 mrg 5119 1.1 mrg /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */ 5120 1.1 mrg 5121 1.1 mrg static bool 5122 1.1 mrg m68k_output_addr_const_extra (FILE *file, rtx x) 5123 1.1 mrg { 5124 1.1 mrg if (GET_CODE (x) == UNSPEC) 5125 1.1 mrg { 5126 1.1 mrg switch (XINT (x, 1)) 5127 1.1 mrg { 5128 1.1 mrg case UNSPEC_RELOC16: 5129 1.1 mrg case UNSPEC_RELOC32: 5130 1.1 mrg output_addr_const (file, XVECEXP (x, 0, 0)); 5131 1.1 mrg fputs (m68k_get_reloc_decoration 5132 1.1 mrg ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file); 5133 1.1 mrg return true; 5134 1.1 mrg 5135 1.1 mrg default: 5136 1.1 mrg break; 5137 1.1 mrg } 5138 1.1 mrg } 5139 1.1 mrg 5140 1.1 mrg return false; 5141 1.1 mrg } 5142 1.1 mrg 5143 1.1 mrg /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */ 5144 1.1 mrg 5145 1.1 mrg static void 5146 1.1 mrg m68k_output_dwarf_dtprel (FILE *file, int size, rtx x) 5147 1.1 mrg { 5148 1.1 mrg gcc_assert (size == 4); 5149 1.1 mrg fputs ("\t.long\t", file); 5150 1.1 mrg output_addr_const (file, x); 5151 1.1 mrg fputs ("@TLSLDO+0x8000", file); 5152 1.1 mrg } 5153 1.1 mrg 5154 1.1 mrg /* In the name of slightly smaller debug output, and to cater to 5155 1.1 mrg general assembler lossage, recognize various UNSPEC sequences 5156 1.1 mrg and turn them back into a direct symbol reference. */ 5157 1.1 mrg 5158 1.1 mrg static rtx 5159 1.1 mrg m68k_delegitimize_address (rtx orig_x) 5160 1.1 mrg { 5161 1.1 mrg rtx x; 5162 1.1 mrg struct m68k_address addr; 5163 1.1 mrg rtx unspec; 5164 1.1 mrg 5165 1.1 mrg orig_x = delegitimize_mem_from_attrs (orig_x); 5166 1.1 mrg x = orig_x; 5167 1.1 mrg if (MEM_P (x)) 5168 1.1 mrg x = XEXP (x, 0); 5169 1.1 mrg 5170 1.1 mrg if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode) 5171 1.1 mrg return orig_x; 5172 1.1 mrg 5173 1.1 mrg if (!m68k_decompose_address (GET_MODE (x), x, false, &addr) 5174 1.1 mrg || addr.offset == NULL_RTX 5175 1.1 mrg || GET_CODE (addr.offset) != CONST) 5176 1.1 mrg return orig_x; 5177 1.1 mrg 5178 1.1 mrg unspec = XEXP (addr.offset, 0); 5179 1.1 mrg if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1))) 5180 1.1 mrg unspec = XEXP (unspec, 0); 5181 1.1 mrg if (GET_CODE (unspec) != UNSPEC 5182 1.1 mrg || (XINT (unspec, 1) != UNSPEC_RELOC16 5183 1.1 mrg && XINT (unspec, 1) != UNSPEC_RELOC32)) 5184 1.1 mrg return orig_x; 5185 1.1 mrg x = XVECEXP (unspec, 0, 0); 5186 1.1 mrg gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF); 5187 1.1 mrg if (unspec != XEXP (addr.offset, 0)) 5188 1.1 mrg x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1)); 5189 1.1 mrg if (addr.index) 5190 1.1 mrg { 5191 1.1 mrg rtx idx = addr.index; 5192 1.1 mrg if (addr.scale != 1) 5193 1.1 mrg idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale)); 5194 1.1 mrg x = gen_rtx_PLUS (Pmode, idx, x); 5195 1.1 mrg } 5196 1.1 mrg if (addr.base) 5197 1.1 mrg x = gen_rtx_PLUS (Pmode, addr.base, x); 5198 1.1 mrg if (MEM_P (orig_x)) 5199 1.1 mrg x = replace_equiv_address_nv (orig_x, x); 5200 1.1 mrg return x; 5201 1.1 mrg } 5202 1.1 mrg 5203 1.1 mrg 5204 1.1 mrg /* A C compound statement to output to stdio stream STREAM the 5206 1.1 mrg assembler syntax for an instruction operand that is a memory 5207 1.1 mrg reference whose address is ADDR. ADDR is an RTL expression. 5208 1.1 mrg 5209 1.1 mrg Note that this contains a kludge that knows that the only reason 5210 1.1 mrg we have an address (plus (label_ref...) (reg...)) when not generating 5211 1.1 mrg PIC code is in the insn before a tablejump, and we know that m68k.md 5212 1.1 mrg generates a label LInnn: on such an insn. 5213 1.1 mrg 5214 1.1 mrg It is possible for PIC to generate a (plus (label_ref...) (reg...)) 5215 1.1 mrg and we handle that just like we would a (plus (symbol_ref...) (reg...)). 5216 1.1 mrg 5217 1.1 mrg This routine is responsible for distinguishing between -fpic and -fPIC 5218 1.1 mrg style relocations in an address. When generating -fpic code the 5219 1.1 mrg offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating 5220 1.1 mrg -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */ 5221 1.1 mrg 5222 1.1 mrg void 5223 1.1 mrg print_operand_address (FILE *file, rtx addr) 5224 1.1 mrg { 5225 1.1 mrg struct m68k_address address; 5226 1.1 mrg 5227 1.1 mrg m68k_adjust_decorated_operand (addr); 5228 1.1 mrg 5229 1.1 mrg if (!m68k_decompose_address (QImode, addr, true, &address)) 5230 1.1 mrg gcc_unreachable (); 5231 1.1 mrg 5232 1.1 mrg if (address.code == PRE_DEC) 5233 1.1 mrg fprintf (file, MOTOROLA ? "-(%s)" : "%s@-", 5234 1.1 mrg M68K_REGNAME (REGNO (address.base))); 5235 1.1 mrg else if (address.code == POST_INC) 5236 1.1 mrg fprintf (file, MOTOROLA ? "(%s)+" : "%s@+", 5237 1.1 mrg M68K_REGNAME (REGNO (address.base))); 5238 1.1 mrg else if (!address.base && !address.index) 5239 1.1 mrg { 5240 1.1 mrg /* A constant address. */ 5241 1.1 mrg gcc_assert (address.offset == addr); 5242 1.1 mrg if (GET_CODE (addr) == CONST_INT) 5243 1.1 mrg { 5244 1.1 mrg /* (xxx).w or (xxx).l. */ 5245 1.1 mrg if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff)) 5246 1.1 mrg fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr)); 5247 1.1 mrg else 5248 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr)); 5249 1.1 mrg } 5250 1.1 mrg else if (TARGET_PCREL) 5251 1.1 mrg { 5252 1.1 mrg /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */ 5253 1.1 mrg fputc ('(', file); 5254 1.1 mrg output_addr_const (file, addr); 5255 1.1 mrg asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)"); 5256 1.1 mrg } 5257 1.1 mrg else 5258 1.1 mrg { 5259 1.1 mrg /* (xxx).l. We need a special case for SYMBOL_REF if the symbol 5260 1.1 mrg name ends in `.<letter>', as the last 2 characters can be 5261 1.1 mrg mistaken as a size suffix. Put the name in parentheses. */ 5262 1.1 mrg if (GET_CODE (addr) == SYMBOL_REF 5263 1.1 mrg && strlen (XSTR (addr, 0)) > 2 5264 1.1 mrg && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.') 5265 1.1 mrg { 5266 1.1 mrg putc ('(', file); 5267 1.1 mrg output_addr_const (file, addr); 5268 1.1 mrg putc (')', file); 5269 1.1 mrg } 5270 1.1 mrg else 5271 1.1 mrg output_addr_const (file, addr); 5272 1.1 mrg } 5273 1.1 mrg } 5274 1.1 mrg else 5275 1.1 mrg { 5276 1.1 mrg int labelno; 5277 1.1 mrg 5278 1.1 mrg /* If ADDR is a (d8,pc,Xn) address, this is the number of the 5279 1.1 mrg label being accessed, otherwise it is -1. */ 5280 1.1 mrg labelno = (address.offset 5281 1.1 mrg && !address.base 5282 1.1 mrg && GET_CODE (address.offset) == LABEL_REF 5283 1.1 mrg ? CODE_LABEL_NUMBER (XEXP (address.offset, 0)) 5284 1.1 mrg : -1); 5285 1.1 mrg if (MOTOROLA) 5286 1.1 mrg { 5287 1.1 mrg /* Print the "offset(base" component. */ 5288 1.1 mrg if (labelno >= 0) 5289 1.1 mrg asm_fprintf (file, "%LL%d(%Rpc,", labelno); 5290 1.1 mrg else 5291 1.1 mrg { 5292 1.1 mrg if (address.offset) 5293 1.1 mrg output_addr_const (file, address.offset); 5294 1.1 mrg 5295 1.1 mrg putc ('(', file); 5296 1.1 mrg if (address.base) 5297 1.1 mrg fputs (M68K_REGNAME (REGNO (address.base)), file); 5298 1.1 mrg } 5299 1.1 mrg /* Print the ",index" component, if any. */ 5300 1.1 mrg if (address.index) 5301 1.1 mrg { 5302 1.1 mrg if (address.base) 5303 1.1 mrg putc (',', file); 5304 1.1 mrg fprintf (file, "%s.%c", 5305 1.1 mrg M68K_REGNAME (REGNO (address.index)), 5306 1.1 mrg GET_MODE (address.index) == HImode ? 'w' : 'l'); 5307 1.1 mrg if (address.scale != 1) 5308 1.1 mrg fprintf (file, "*%d", address.scale); 5309 1.1 mrg } 5310 1.1 mrg putc (')', file); 5311 1.1 mrg } 5312 1.1 mrg else /* !MOTOROLA */ 5313 1.1 mrg { 5314 1.1 mrg if (!address.offset && !address.index) 5315 1.1 mrg fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base))); 5316 1.1 mrg else 5317 1.1 mrg { 5318 1.1 mrg /* Print the "base@(offset" component. */ 5319 1.1 mrg if (labelno >= 0) 5320 1.1 mrg asm_fprintf (file, "%Rpc@(%LL%d", labelno); 5321 1.1 mrg else 5322 1.1 mrg { 5323 1.1 mrg if (address.base) 5324 1.1 mrg fputs (M68K_REGNAME (REGNO (address.base)), file); 5325 1.1 mrg fprintf (file, "@("); 5326 1.1 mrg if (address.offset) 5327 1.1 mrg output_addr_const (file, address.offset); 5328 1.1 mrg } 5329 1.1 mrg /* Print the ",index" component, if any. */ 5330 1.1 mrg if (address.index) 5331 1.1 mrg { 5332 1.1 mrg fprintf (file, ",%s:%c", 5333 1.1 mrg M68K_REGNAME (REGNO (address.index)), 5334 1.1 mrg GET_MODE (address.index) == HImode ? 'w' : 'l'); 5335 1.1 mrg if (address.scale != 1) 5336 1.1 mrg fprintf (file, ":%d", address.scale); 5337 1.1 mrg } 5338 1.1 mrg putc (')', file); 5339 1.1 mrg } 5340 1.1 mrg } 5341 1.1 mrg } 5342 1.1 mrg } 5343 1.1 mrg 5344 1.1 mrg /* Check for cases where a clr insns can be omitted from code using 5346 1.1 mrg strict_low_part sets. For example, the second clrl here is not needed: 5347 1.1 mrg clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ... 5348 1.1 mrg 5349 1.1 mrg MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear 5350 1.1 mrg insn we are checking for redundancy. TARGET is the register set by the 5351 1.1 mrg clear insn. */ 5352 1.1 mrg 5353 1.1 mrg bool 5354 1.1 mrg strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn, 5355 1.1 mrg rtx target) 5356 1.1 mrg { 5357 1.1 mrg rtx_insn *p = first_insn; 5358 1.1 mrg 5359 1.1 mrg while ((p = PREV_INSN (p))) 5360 1.1 mrg { 5361 1.1 mrg if (NOTE_INSN_BASIC_BLOCK_P (p)) 5362 1.1 mrg return false; 5363 1.1 mrg 5364 1.1 mrg if (NOTE_P (p)) 5365 1.1 mrg continue; 5366 1.1 mrg 5367 1.1 mrg /* If it isn't an insn, then give up. */ 5368 1.1 mrg if (!INSN_P (p)) 5369 1.1 mrg return false; 5370 1.1 mrg 5371 1.1 mrg if (reg_set_p (target, p)) 5372 1.1 mrg { 5373 1.1 mrg rtx set = single_set (p); 5374 1.1 mrg rtx dest; 5375 1.1 mrg 5376 1.1 mrg /* If it isn't an easy to recognize insn, then give up. */ 5377 1.1 mrg if (! set) 5378 1.1 mrg return false; 5379 1.1 mrg 5380 1.1 mrg dest = SET_DEST (set); 5381 1.1 mrg 5382 1.1 mrg /* If this sets the entire target register to zero, then our 5383 1.1 mrg first_insn is redundant. */ 5384 1.1 mrg if (rtx_equal_p (dest, target) 5385 1.1 mrg && SET_SRC (set) == const0_rtx) 5386 1.1 mrg return true; 5387 1.1 mrg else if (GET_CODE (dest) == STRICT_LOW_PART 5388 1.1 mrg && GET_CODE (XEXP (dest, 0)) == REG 5389 1.1 mrg && REGNO (XEXP (dest, 0)) == REGNO (target) 5390 1.1 mrg && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0))) 5391 1.1 mrg <= GET_MODE_SIZE (mode))) 5392 1.1 mrg /* This is a strict low part set which modifies less than 5393 1.1 mrg we are using, so it is safe. */ 5394 1.1 mrg ; 5395 1.1 mrg else 5396 1.1 mrg return false; 5397 1.1 mrg } 5398 1.1 mrg } 5399 1.1 mrg 5400 1.1 mrg return false; 5401 1.1 mrg } 5402 1.1 mrg 5403 1.1 mrg /* Operand predicates for implementing asymmetric pc-relative addressing 5404 1.1 mrg on m68k. The m68k supports pc-relative addressing (mode 7, register 2) 5405 1.1 mrg when used as a source operand, but not as a destination operand. 5406 1.1 mrg 5407 1.1 mrg We model this by restricting the meaning of the basic predicates 5408 1.1 mrg (general_operand, memory_operand, etc) to forbid the use of this 5409 1.1 mrg addressing mode, and then define the following predicates that permit 5410 1.1 mrg this addressing mode. These predicates can then be used for the 5411 1.1 mrg source operands of the appropriate instructions. 5412 1.1 mrg 5413 1.1 mrg n.b. While it is theoretically possible to change all machine patterns 5414 1.1 mrg to use this addressing more where permitted by the architecture, 5415 1.1 mrg it has only been implemented for "common" cases: SImode, HImode, and 5416 1.1 mrg QImode operands, and only for the principle operations that would 5417 1.1 mrg require this addressing mode: data movement and simple integer operations. 5418 1.1 mrg 5419 1.1 mrg In parallel with these new predicates, two new constraint letters 5420 1.1 mrg were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'. 5421 1.1 mrg 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case. 5422 1.1 mrg In the pcrel case 's' is only valid in combination with 'a' registers. 5423 1.1 mrg See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding 5424 1.1 mrg of how these constraints are used. 5425 1.1 mrg 5426 1.1 mrg The use of these predicates is strictly optional, though patterns that 5427 1.1 mrg don't will cause an extra reload register to be allocated where one 5428 1.1 mrg was not necessary: 5429 1.1 mrg 5430 1.1 mrg lea (abc:w,%pc),%a0 ; need to reload address 5431 1.1 mrg moveq &1,%d1 ; since write to pc-relative space 5432 1.1 mrg movel %d1,%a0@ ; is not allowed 5433 1.1 mrg ... 5434 1.1 mrg lea (abc:w,%pc),%a1 ; no need to reload address here 5435 1.1 mrg movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok 5436 1.1 mrg 5437 1.1 mrg For more info, consult tiemann (at) cygnus.com. 5438 1.1 mrg 5439 1.1 mrg 5440 1.1 mrg All of the ugliness with predicates and constraints is due to the 5441 1.1 mrg simple fact that the m68k does not allow a pc-relative addressing 5442 1.1 mrg mode as a destination. gcc does not distinguish between source and 5443 1.1 mrg destination addresses. Hence, if we claim that pc-relative address 5444 1.1 mrg modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we 5445 1.1 mrg end up with invalid code. To get around this problem, we left 5446 1.1 mrg pc-relative modes as invalid addresses, and then added special 5447 1.1 mrg predicates and constraints to accept them. 5448 1.1 mrg 5449 1.1 mrg A cleaner way to handle this is to modify gcc to distinguish 5450 1.1 mrg between source and destination addresses. We can then say that 5451 1.1 mrg pc-relative is a valid source address but not a valid destination 5452 1.1 mrg address, and hopefully avoid a lot of the predicate and constraint 5453 1.1 mrg hackery. Unfortunately, this would be a pretty big change. It would 5454 1.1 mrg be a useful change for a number of ports, but there aren't any current 5455 1.1 mrg plans to undertake this. 5456 1.1 mrg 5457 1.1 mrg ***************************************************************************/ 5458 1.1 mrg 5459 1.1 mrg 5460 1.1 mrg const char * 5461 1.1 mrg output_andsi3 (rtx *operands) 5462 1.1 mrg { 5463 1.1 mrg int logval; 5464 1.1 mrg CC_STATUS_INIT; 5465 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT 5466 1.1 mrg && (INTVAL (operands[2]) | 0xffff) == -1 5467 1.1 mrg && (DATA_REG_P (operands[0]) 5468 1.1 mrg || offsettable_memref_p (operands[0])) 5469 1.1 mrg && !TARGET_COLDFIRE) 5470 1.1 mrg { 5471 1.1 mrg if (GET_CODE (operands[0]) != REG) 5472 1.1 mrg operands[0] = adjust_address (operands[0], HImode, 2); 5473 1.1 mrg operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff); 5474 1.1 mrg if (operands[2] == const0_rtx) 5475 1.1 mrg return "clr%.w %0"; 5476 1.1 mrg return "and%.w %2,%0"; 5477 1.1 mrg } 5478 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT 5479 1.1 mrg && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0 5480 1.1 mrg && (DATA_REG_P (operands[0]) 5481 1.1 mrg || offsettable_memref_p (operands[0]))) 5482 1.1 mrg { 5483 1.1 mrg if (DATA_REG_P (operands[0])) 5484 1.1 mrg operands[1] = GEN_INT (logval); 5485 1.1 mrg else 5486 1.1 mrg { 5487 1.1 mrg operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); 5488 1.1 mrg operands[1] = GEN_INT (logval % 8); 5489 1.1 mrg } 5490 1.1 mrg return "bclr %1,%0"; 5491 1.1 mrg } 5492 1.1 mrg /* Only a standard logical operation on the whole word sets the 5493 1.1 mrg condition codes in a way we can use. */ 5494 1.1 mrg if (!side_effects_p (operands[0])) 5495 1.1 mrg flags_operand1 = operands[0]; 5496 1.1 mrg flags_valid = FLAGS_VALID_YES; 5497 1.1 mrg return "and%.l %2,%0"; 5498 1.1 mrg } 5499 1.1 mrg 5500 1.1 mrg const char * 5501 1.1 mrg output_iorsi3 (rtx *operands) 5502 1.1 mrg { 5503 1.1 mrg int logval; 5504 1.1 mrg CC_STATUS_INIT; 5505 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT 5506 1.1 mrg && INTVAL (operands[2]) >> 16 == 0 5507 1.1 mrg && (DATA_REG_P (operands[0]) 5508 1.1 mrg || offsettable_memref_p (operands[0])) 5509 1.1 mrg && !TARGET_COLDFIRE) 5510 1.1 mrg { 5511 1.1 mrg if (GET_CODE (operands[0]) != REG) 5512 1.1 mrg operands[0] = adjust_address (operands[0], HImode, 2); 5513 1.1 mrg if (INTVAL (operands[2]) == 0xffff) 5514 1.1 mrg return "mov%.w %2,%0"; 5515 1.1 mrg return "or%.w %2,%0"; 5516 1.1 mrg } 5517 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT 5518 1.1 mrg && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0 5519 1.1 mrg && (DATA_REG_P (operands[0]) 5520 1.1 mrg || offsettable_memref_p (operands[0]))) 5521 1.1 mrg { 5522 1.1 mrg if (DATA_REG_P (operands[0])) 5523 1.1 mrg operands[1] = GEN_INT (logval); 5524 1.1 mrg else 5525 1.1 mrg { 5526 1.1 mrg operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); 5527 1.1 mrg operands[1] = GEN_INT (logval % 8); 5528 1.1 mrg } 5529 1.1 mrg return "bset %1,%0"; 5530 1.1 mrg } 5531 1.1 mrg /* Only a standard logical operation on the whole word sets the 5532 1.1 mrg condition codes in a way we can use. */ 5533 1.1 mrg if (!side_effects_p (operands[0])) 5534 1.1 mrg flags_operand1 = operands[0]; 5535 1.1 mrg flags_valid = FLAGS_VALID_YES; 5536 1.1 mrg return "or%.l %2,%0"; 5537 1.1 mrg } 5538 1.1 mrg 5539 1.1 mrg const char * 5540 1.1 mrg output_xorsi3 (rtx *operands) 5541 1.1 mrg { 5542 1.1 mrg int logval; 5543 1.1 mrg CC_STATUS_INIT; 5544 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT 5545 1.1 mrg && INTVAL (operands[2]) >> 16 == 0 5546 1.1 mrg && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0])) 5547 1.1 mrg && !TARGET_COLDFIRE) 5548 1.1 mrg { 5549 1.1 mrg if (! DATA_REG_P (operands[0])) 5550 1.1 mrg operands[0] = adjust_address (operands[0], HImode, 2); 5551 1.1 mrg if (INTVAL (operands[2]) == 0xffff) 5552 1.1 mrg return "not%.w %0"; 5553 1.1 mrg return "eor%.w %2,%0"; 5554 1.1 mrg } 5555 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT 5556 1.1 mrg && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0 5557 1.1 mrg && (DATA_REG_P (operands[0]) 5558 1.1 mrg || offsettable_memref_p (operands[0]))) 5559 1.1 mrg { 5560 1.1 mrg if (DATA_REG_P (operands[0])) 5561 1.1 mrg operands[1] = GEN_INT (logval); 5562 1.1 mrg else 5563 1.1 mrg { 5564 1.1 mrg operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); 5565 1.1 mrg operands[1] = GEN_INT (logval % 8); 5566 1.1 mrg } 5567 1.1 mrg return "bchg %1,%0"; 5568 1.1 mrg } 5569 1.1 mrg /* Only a standard logical operation on the whole word sets the 5570 1.1 mrg condition codes in a way we can use. */ 5571 1.1 mrg if (!side_effects_p (operands[0])) 5572 1.1 mrg flags_operand1 = operands[0]; 5573 1.1 mrg flags_valid = FLAGS_VALID_YES; 5574 1.1 mrg return "eor%.l %2,%0"; 5575 1.1 mrg } 5576 1.1 mrg 5577 1.1 mrg /* Return the instruction that should be used for a call to address X, 5578 1.1 mrg which is known to be in operand 0. */ 5579 1.1 mrg 5580 1.1 mrg const char * 5581 1.1 mrg output_call (rtx x) 5582 1.1 mrg { 5583 1.1 mrg if (symbolic_operand (x, VOIDmode)) 5584 1.1 mrg return m68k_symbolic_call; 5585 1.1 mrg else 5586 1.1 mrg return "jsr %a0"; 5587 1.1 mrg } 5588 1.1 mrg 5589 1.1 mrg /* Likewise sibling calls. */ 5590 1.1 mrg 5591 1.1 mrg const char * 5592 1.1 mrg output_sibcall (rtx x) 5593 1.1 mrg { 5594 1.1 mrg if (symbolic_operand (x, VOIDmode)) 5595 1.1 mrg return m68k_symbolic_jump; 5596 1.1 mrg else 5597 1.1 mrg return "jmp %a0"; 5598 1.1 mrg } 5599 1.1 mrg 5600 1.1 mrg static void 5601 1.1 mrg m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED, 5602 1.1 mrg HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, 5603 1.1 mrg tree function) 5604 1.1 mrg { 5605 1.1 mrg const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk)); 5606 1.1 mrg rtx this_slot, offset, addr, mem, tmp; 5607 1.1 mrg rtx_insn *insn; 5608 1.1 mrg 5609 1.1 mrg /* Avoid clobbering the struct value reg by using the 5610 1.1 mrg static chain reg as a temporary. */ 5611 1.1 mrg tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM); 5612 1.1 mrg 5613 1.1 mrg /* Pretend to be a post-reload pass while generating rtl. */ 5614 1.1 mrg reload_completed = 1; 5615 1.1 mrg 5616 1.1 mrg /* The "this" pointer is stored at 4(%sp). */ 5617 1.1 mrg this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode, 5618 1.1 mrg stack_pointer_rtx, 4)); 5619 1.1 mrg 5620 1.1 mrg /* Add DELTA to THIS. */ 5621 1.1 mrg if (delta != 0) 5622 1.1 mrg { 5623 1.1 mrg /* Make the offset a legitimate operand for memory addition. */ 5624 1.1 mrg offset = GEN_INT (delta); 5625 1.1 mrg if ((delta < -8 || delta > 8) 5626 1.1 mrg && (TARGET_COLDFIRE || USE_MOVQ (delta))) 5627 1.1 mrg { 5628 1.1 mrg emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset); 5629 1.1 mrg offset = gen_rtx_REG (Pmode, D0_REG); 5630 1.1 mrg } 5631 1.1 mrg emit_insn (gen_add3_insn (copy_rtx (this_slot), 5632 1.1 mrg copy_rtx (this_slot), offset)); 5633 1.1 mrg } 5634 1.1 mrg 5635 1.1 mrg /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */ 5636 1.1 mrg if (vcall_offset != 0) 5637 1.1 mrg { 5638 1.1 mrg /* Set the static chain register to *THIS. */ 5639 1.1 mrg emit_move_insn (tmp, this_slot); 5640 1.1 mrg emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp)); 5641 1.1 mrg 5642 1.1 mrg /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */ 5643 1.1 mrg addr = plus_constant (Pmode, tmp, vcall_offset); 5644 1.1 mrg if (!m68k_legitimate_address_p (Pmode, addr, true)) 5645 1.1 mrg { 5646 1.1 mrg emit_insn (gen_rtx_SET (tmp, addr)); 5647 1.1 mrg addr = tmp; 5648 1.1 mrg } 5649 1.1 mrg 5650 1.1 mrg /* Load the offset into %d0 and add it to THIS. */ 5651 1.1 mrg emit_move_insn (gen_rtx_REG (Pmode, D0_REG), 5652 1.1 mrg gen_rtx_MEM (Pmode, addr)); 5653 1.1 mrg emit_insn (gen_add3_insn (copy_rtx (this_slot), 5654 1.1 mrg copy_rtx (this_slot), 5655 1.1 mrg gen_rtx_REG (Pmode, D0_REG))); 5656 1.1 mrg } 5657 1.1 mrg 5658 1.1 mrg /* Jump to the target function. Use a sibcall if direct jumps are 5659 1.1 mrg allowed, otherwise load the address into a register first. */ 5660 1.1 mrg mem = DECL_RTL (function); 5661 1.1 mrg if (!sibcall_operand (XEXP (mem, 0), VOIDmode)) 5662 1.1 mrg { 5663 1.1 mrg gcc_assert (flag_pic); 5664 1.1 mrg 5665 1.1 mrg if (!TARGET_SEP_DATA) 5666 1.1 mrg { 5667 1.1 mrg /* Use the static chain register as a temporary (call-clobbered) 5668 1.1 mrg GOT pointer for this function. We can use the static chain 5669 1.1 mrg register because it isn't live on entry to the thunk. */ 5670 1.1 mrg SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM); 5671 1.1 mrg emit_insn (gen_load_got (pic_offset_table_rtx)); 5672 1.1 mrg } 5673 1.1 mrg legitimize_pic_address (XEXP (mem, 0), Pmode, tmp); 5674 1.1 mrg mem = replace_equiv_address (mem, tmp); 5675 1.1 mrg } 5676 1.1 mrg insn = emit_call_insn (gen_sibcall (mem, const0_rtx)); 5677 1.1 mrg SIBLING_CALL_P (insn) = 1; 5678 1.1 mrg 5679 1.1 mrg /* Run just enough of rest_of_compilation. */ 5680 1.1 mrg insn = get_insns (); 5681 1.1 mrg split_all_insns_noflow (); 5682 1.1 mrg assemble_start_function (thunk, fnname); 5683 1.1 mrg final_start_function (insn, file, 1); 5684 1.1 mrg final (insn, file, 1); 5685 1.1 mrg final_end_function (); 5686 1.1 mrg assemble_end_function (thunk, fnname); 5687 1.1 mrg 5688 1.1 mrg /* Clean up the vars set above. */ 5689 1.1 mrg reload_completed = 0; 5690 1.1 mrg 5691 1.1 mrg /* Restore the original PIC register. */ 5692 1.1 mrg if (flag_pic) 5693 1.1 mrg SET_REGNO (pic_offset_table_rtx, PIC_REG); 5694 1.1 mrg } 5695 1.1 mrg 5696 1.1 mrg /* Worker function for TARGET_STRUCT_VALUE_RTX. */ 5697 1.1 mrg 5698 1.1 mrg static rtx 5699 1.1 mrg m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED, 5700 1.1 mrg int incoming ATTRIBUTE_UNUSED) 5701 1.1 mrg { 5702 1.1 mrg return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM); 5703 1.1 mrg } 5704 1.1 mrg 5705 1.1 mrg /* Return nonzero if register old_reg can be renamed to register new_reg. */ 5706 1.1 mrg int 5707 1.1 mrg m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED, 5708 1.1 mrg unsigned int new_reg) 5709 1.1 mrg { 5710 1.1 mrg 5711 1.1 mrg /* Interrupt functions can only use registers that have already been 5712 1.1 mrg saved by the prologue, even if they would normally be 5713 1.1 mrg call-clobbered. */ 5714 1.1 mrg 5715 1.1 mrg if ((m68k_get_function_kind (current_function_decl) 5716 1.1 mrg == m68k_fk_interrupt_handler) 5717 1.1 mrg && !df_regs_ever_live_p (new_reg)) 5718 1.1 mrg return 0; 5719 1.1 mrg 5720 1.1 mrg return 1; 5721 1.1 mrg } 5722 1.1 mrg 5723 1.1 mrg /* Implement TARGET_HARD_REGNO_NREGS. 5724 1.1 mrg 5725 1.1 mrg On the m68k, ordinary registers hold 32 bits worth; 5726 1.1 mrg for the 68881 registers, a single register is always enough for 5727 1.1 mrg anything that can be stored in them at all. */ 5728 1.1 mrg 5729 1.1 mrg static unsigned int 5730 1.1 mrg m68k_hard_regno_nregs (unsigned int regno, machine_mode mode) 5731 1.1 mrg { 5732 1.1 mrg if (regno >= 16) 5733 1.1 mrg return GET_MODE_NUNITS (mode); 5734 1.1 mrg return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD); 5735 1.1 mrg } 5736 1.1 mrg 5737 1.1 mrg /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu 5738 1.1 mrg registers can hold any mode, but restrict the 68881 registers to 5739 1.1 mrg floating-point modes. */ 5740 1.1 mrg 5741 1.1 mrg static bool 5742 1.1 mrg m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode) 5743 1.1 mrg { 5744 1.1 mrg if (DATA_REGNO_P (regno)) 5745 1.1 mrg { 5746 1.1 mrg /* Data Registers, can hold aggregate if fits in. */ 5747 1.1 mrg if (regno + GET_MODE_SIZE (mode) / 4 <= 8) 5748 1.1 mrg return true; 5749 1.1 mrg } 5750 1.1 mrg else if (ADDRESS_REGNO_P (regno)) 5751 1.1 mrg { 5752 1.1 mrg if (regno + GET_MODE_SIZE (mode) / 4 <= 16) 5753 1.1 mrg return true; 5754 1.1 mrg } 5755 1.1 mrg else if (FP_REGNO_P (regno)) 5756 1.1 mrg { 5757 1.1 mrg /* FPU registers, hold float or complex float of long double or 5758 1.1 mrg smaller. */ 5759 1.1 mrg if ((GET_MODE_CLASS (mode) == MODE_FLOAT 5760 1.1 mrg || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) 5761 1.1 mrg && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE) 5762 1.1 mrg return true; 5763 1.1 mrg } 5764 1.1 mrg return false; 5765 1.1 mrg } 5766 1.1 mrg 5767 1.1 mrg /* Implement TARGET_MODES_TIEABLE_P. */ 5768 1.1 mrg 5769 1.1 mrg static bool 5770 1.1 mrg m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2) 5771 1.1 mrg { 5772 1.1 mrg return (!TARGET_HARD_FLOAT 5773 1.1 mrg || ((GET_MODE_CLASS (mode1) == MODE_FLOAT 5774 1.1 mrg || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT) 5775 1.1 mrg == (GET_MODE_CLASS (mode2) == MODE_FLOAT 5776 1.1 mrg || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT))); 5777 1.1 mrg } 5778 1.1 mrg 5779 1.1 mrg /* Implement SECONDARY_RELOAD_CLASS. */ 5780 1.1 mrg 5781 1.1 mrg enum reg_class 5782 1.1 mrg m68k_secondary_reload_class (enum reg_class rclass, 5783 1.1 mrg machine_mode mode, rtx x) 5784 1.1 mrg { 5785 1.1 mrg int regno; 5786 1.1 mrg 5787 1.1 mrg regno = true_regnum (x); 5788 1.1 mrg 5789 1.1 mrg /* If one operand of a movqi is an address register, the other 5790 1.1 mrg operand must be a general register or constant. Other types 5791 1.1 mrg of operand must be reloaded through a data register. */ 5792 1.1 mrg if (GET_MODE_SIZE (mode) == 1 5793 1.1 mrg && reg_classes_intersect_p (rclass, ADDR_REGS) 5794 1.1 mrg && !(INT_REGNO_P (regno) || CONSTANT_P (x))) 5795 1.1 mrg return DATA_REGS; 5796 1.1 mrg 5797 1.1 mrg /* PC-relative addresses must be loaded into an address register first. */ 5798 1.1 mrg if (TARGET_PCREL 5799 1.1 mrg && !reg_class_subset_p (rclass, ADDR_REGS) 5800 1.1 mrg && symbolic_operand (x, VOIDmode)) 5801 1.1 mrg return ADDR_REGS; 5802 1.1 mrg 5803 1.1 mrg return NO_REGS; 5804 1.1 mrg } 5805 1.1 mrg 5806 1.1 mrg /* Implement PREFERRED_RELOAD_CLASS. */ 5807 1.1 mrg 5808 1.1 mrg enum reg_class 5809 1.1 mrg m68k_preferred_reload_class (rtx x, enum reg_class rclass) 5810 1.1 mrg { 5811 1.1 mrg enum reg_class secondary_class; 5812 1.1 mrg 5813 1.1 mrg /* If RCLASS might need a secondary reload, try restricting it to 5814 1.1 mrg a class that doesn't. */ 5815 1.1 mrg secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x); 5816 1.1 mrg if (secondary_class != NO_REGS 5817 1.1 mrg && reg_class_subset_p (secondary_class, rclass)) 5818 1.1 mrg return secondary_class; 5819 1.1 mrg 5820 1.1 mrg /* Prefer to use moveq for in-range constants. */ 5821 1.1 mrg if (GET_CODE (x) == CONST_INT 5822 1.1 mrg && reg_class_subset_p (DATA_REGS, rclass) 5823 1.1 mrg && IN_RANGE (INTVAL (x), -0x80, 0x7f)) 5824 1.1 mrg return DATA_REGS; 5825 1.1 mrg 5826 1.1 mrg /* ??? Do we really need this now? */ 5827 1.1 mrg if (GET_CODE (x) == CONST_DOUBLE 5828 1.1 mrg && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) 5829 1.1 mrg { 5830 1.1 mrg if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass)) 5831 1.1 mrg return FP_REGS; 5832 1.1 mrg 5833 1.1 mrg return NO_REGS; 5834 1.1 mrg } 5835 1.1 mrg 5836 1.1 mrg return rclass; 5837 1.1 mrg } 5838 1.1 mrg 5839 1.1 mrg /* Return floating point values in a 68881 register. This makes 68881 code 5840 1.1 mrg a little bit faster. It also makes -msoft-float code incompatible with 5841 1.1 mrg hard-float code, so people have to be careful not to mix the two. 5842 1.1 mrg For ColdFire it was decided the ABI incompatibility is undesirable. 5843 1.1 mrg If there is need for a hard-float ABI it is probably worth doing it 5844 1.1 mrg properly and also passing function arguments in FP registers. */ 5845 1.1 mrg rtx 5846 1.1 mrg m68k_libcall_value (machine_mode mode) 5847 1.1 mrg { 5848 1.1 mrg switch (mode) { 5849 1.1 mrg case E_SFmode: 5850 1.1 mrg case E_DFmode: 5851 1.1 mrg case E_XFmode: 5852 1.1 mrg if (TARGET_68881) 5853 1.1 mrg return gen_rtx_REG (mode, FP0_REG); 5854 1.1 mrg break; 5855 1.1 mrg default: 5856 1.1 mrg break; 5857 1.1 mrg } 5858 1.1 mrg 5859 1.1 mrg return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG); 5860 1.1 mrg } 5861 1.1 mrg 5862 1.1 mrg /* Location in which function value is returned. 5863 1.1 mrg NOTE: Due to differences in ABIs, don't call this function directly, 5864 1.1 mrg use FUNCTION_VALUE instead. */ 5865 1.1 mrg rtx 5866 1.1 mrg m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED) 5867 1.1 mrg { 5868 1.1 mrg machine_mode mode; 5869 1.1 mrg 5870 1.1 mrg mode = TYPE_MODE (valtype); 5871 1.1 mrg switch (mode) { 5872 1.1 mrg case E_SFmode: 5873 1.1 mrg case E_DFmode: 5874 1.1 mrg case E_XFmode: 5875 1.1 mrg if (TARGET_68881) 5876 1.1 mrg return gen_rtx_REG (mode, FP0_REG); 5877 1.1 mrg break; 5878 1.1 mrg default: 5879 1.1 mrg break; 5880 1.1 mrg } 5881 1.1 mrg 5882 1.1 mrg /* If the function returns a pointer, push that into %a0. */ 5883 1.1 mrg if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func)))) 5884 1.1 mrg /* For compatibility with the large body of existing code which 5885 1.1 mrg does not always properly declare external functions returning 5886 1.1 mrg pointer types, the m68k/SVR4 convention is to copy the value 5887 1.1 mrg returned for pointer functions from a0 to d0 in the function 5888 1.1 mrg epilogue, so that callers that have neglected to properly 5889 1.1 mrg declare the callee can still find the correct return value in 5890 1.1 mrg d0. */ 5891 1.1 mrg return gen_rtx_PARALLEL 5892 1.1 mrg (mode, 5893 1.1 mrg gen_rtvec (2, 5894 1.1 mrg gen_rtx_EXPR_LIST (VOIDmode, 5895 1.1 mrg gen_rtx_REG (mode, A0_REG), 5896 1.1 mrg const0_rtx), 5897 1.1 mrg gen_rtx_EXPR_LIST (VOIDmode, 5898 1.1 mrg gen_rtx_REG (mode, D0_REG), 5899 1.1 mrg const0_rtx))); 5900 1.1 mrg else if (POINTER_TYPE_P (valtype)) 5901 1.1 mrg return gen_rtx_REG (mode, A0_REG); 5902 1.1 mrg else 5903 1.1 mrg return gen_rtx_REG (mode, D0_REG); 5904 1.1 mrg } 5905 1.1 mrg 5906 1.1 mrg /* Worker function for TARGET_RETURN_IN_MEMORY. */ 5907 1.1 mrg #if M68K_HONOR_TARGET_STRICT_ALIGNMENT 5908 1.1 mrg static bool 5909 1.1 mrg m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) 5910 1.1 mrg { 5911 1.1 mrg machine_mode mode = TYPE_MODE (type); 5912 1.1 mrg 5913 1.1 mrg if (mode == BLKmode) 5914 1.1 mrg return true; 5915 1.1 mrg 5916 1.1 mrg /* If TYPE's known alignment is less than the alignment of MODE that 5917 1.1 mrg would contain the structure, then return in memory. We need to 5918 1.1 mrg do so to maintain the compatibility between code compiled with 5919 1.1 mrg -mstrict-align and that compiled with -mno-strict-align. */ 5920 1.1 mrg if (AGGREGATE_TYPE_P (type) 5921 1.1 mrg && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode)) 5922 1.1 mrg return true; 5923 1.1 mrg 5924 1.1 mrg return false; 5925 1.1 mrg } 5926 1.1 mrg #endif 5927 1.1 mrg 5928 1.1 mrg /* CPU to schedule the program for. */ 5929 1.1 mrg enum attr_cpu m68k_sched_cpu; 5930 1.1 mrg 5931 1.1 mrg /* MAC to schedule the program for. */ 5932 1.1 mrg enum attr_mac m68k_sched_mac; 5933 1.1 mrg 5934 1.1 mrg /* Operand type. */ 5935 1.1 mrg enum attr_op_type 5936 1.1 mrg { 5937 1.1 mrg /* No operand. */ 5938 1.1 mrg OP_TYPE_NONE, 5939 1.1 mrg 5940 1.1 mrg /* Integer register. */ 5941 1.1 mrg OP_TYPE_RN, 5942 1.1 mrg 5943 1.1 mrg /* FP register. */ 5944 1.1 mrg OP_TYPE_FPN, 5945 1.1 mrg 5946 1.1 mrg /* Implicit mem reference (e.g. stack). */ 5947 1.1 mrg OP_TYPE_MEM1, 5948 1.1 mrg 5949 1.1 mrg /* Memory without offset or indexing. EA modes 2, 3 and 4. */ 5950 1.1 mrg OP_TYPE_MEM234, 5951 1.1 mrg 5952 1.1 mrg /* Memory with offset but without indexing. EA mode 5. */ 5953 1.1 mrg OP_TYPE_MEM5, 5954 1.1 mrg 5955 1.1 mrg /* Memory with indexing. EA mode 6. */ 5956 1.1 mrg OP_TYPE_MEM6, 5957 1.1 mrg 5958 1.1 mrg /* Memory referenced by absolute address. EA mode 7. */ 5959 1.1 mrg OP_TYPE_MEM7, 5960 1.1 mrg 5961 1.1 mrg /* Immediate operand that doesn't require extension word. */ 5962 1.1 mrg OP_TYPE_IMM_Q, 5963 1.1 mrg 5964 1.1 mrg /* Immediate 16 bit operand. */ 5965 1.1 mrg OP_TYPE_IMM_W, 5966 1.1 mrg 5967 1.1 mrg /* Immediate 32 bit operand. */ 5968 1.1 mrg OP_TYPE_IMM_L 5969 1.1 mrg }; 5970 1.1 mrg 5971 1.1 mrg /* Return type of memory ADDR_RTX refers to. */ 5972 1.1 mrg static enum attr_op_type 5973 1.1 mrg sched_address_type (machine_mode mode, rtx addr_rtx) 5974 1.1 mrg { 5975 1.1 mrg struct m68k_address address; 5976 1.1 mrg 5977 1.1 mrg if (symbolic_operand (addr_rtx, VOIDmode)) 5978 1.1 mrg return OP_TYPE_MEM7; 5979 1.1 mrg 5980 1.1 mrg if (!m68k_decompose_address (mode, addr_rtx, 5981 1.1 mrg reload_completed, &address)) 5982 1.1 mrg { 5983 1.1 mrg gcc_assert (!reload_completed); 5984 1.1 mrg /* Reload will likely fix the address to be in the register. */ 5985 1.1 mrg return OP_TYPE_MEM234; 5986 1.1 mrg } 5987 1.1 mrg 5988 1.1 mrg if (address.scale != 0) 5989 1.1 mrg return OP_TYPE_MEM6; 5990 1.1 mrg 5991 1.1 mrg if (address.base != NULL_RTX) 5992 1.1 mrg { 5993 1.1 mrg if (address.offset == NULL_RTX) 5994 1.1 mrg return OP_TYPE_MEM234; 5995 1.1 mrg 5996 1.1 mrg return OP_TYPE_MEM5; 5997 1.1 mrg } 5998 1.1 mrg 5999 1.1 mrg gcc_assert (address.offset != NULL_RTX); 6000 1.1 mrg 6001 1.1 mrg return OP_TYPE_MEM7; 6002 1.1 mrg } 6003 1.1 mrg 6004 1.1 mrg /* Return X or Y (depending on OPX_P) operand of INSN. */ 6005 1.1 mrg static rtx 6006 1.1 mrg sched_get_operand (rtx_insn *insn, bool opx_p) 6007 1.1 mrg { 6008 1.1 mrg int i; 6009 1.1 mrg 6010 1.1 mrg if (recog_memoized (insn) < 0) 6011 1.1 mrg gcc_unreachable (); 6012 1.1 mrg 6013 1.1 mrg extract_constrain_insn_cached (insn); 6014 1.1 mrg 6015 1.1 mrg if (opx_p) 6016 1.1 mrg i = get_attr_opx (insn); 6017 1.1 mrg else 6018 1.1 mrg i = get_attr_opy (insn); 6019 1.1 mrg 6020 1.1 mrg if (i >= recog_data.n_operands) 6021 1.1 mrg return NULL; 6022 1.1 mrg 6023 1.1 mrg return recog_data.operand[i]; 6024 1.1 mrg } 6025 1.1 mrg 6026 1.1 mrg /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P). 6027 1.1 mrg If ADDRESS_P is true, return type of memory location operand refers to. */ 6028 1.1 mrg static enum attr_op_type 6029 1.1 mrg sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p) 6030 1.1 mrg { 6031 1.1 mrg rtx op; 6032 1.1 mrg 6033 1.1 mrg op = sched_get_operand (insn, opx_p); 6034 1.1 mrg 6035 1.1 mrg if (op == NULL) 6036 1.1 mrg { 6037 1.1 mrg gcc_assert (!reload_completed); 6038 1.1 mrg return OP_TYPE_RN; 6039 1.1 mrg } 6040 1.1 mrg 6041 1.1 mrg if (address_p) 6042 1.1 mrg return sched_address_type (QImode, op); 6043 1.1 mrg 6044 1.1 mrg if (memory_operand (op, VOIDmode)) 6045 1.1 mrg return sched_address_type (GET_MODE (op), XEXP (op, 0)); 6046 1.1 mrg 6047 1.1 mrg if (register_operand (op, VOIDmode)) 6048 1.1 mrg { 6049 1.1 mrg if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op))) 6050 1.1 mrg || (reload_completed && FP_REG_P (op))) 6051 1.1 mrg return OP_TYPE_FPN; 6052 1.1 mrg 6053 1.1 mrg return OP_TYPE_RN; 6054 1.1 mrg } 6055 1.1 mrg 6056 1.1 mrg if (GET_CODE (op) == CONST_INT) 6057 1.1 mrg { 6058 1.1 mrg int ival; 6059 1.1 mrg 6060 1.1 mrg ival = INTVAL (op); 6061 1.1 mrg 6062 1.1 mrg /* Check for quick constants. */ 6063 1.1 mrg switch (get_attr_type (insn)) 6064 1.1 mrg { 6065 1.1 mrg case TYPE_ALUQ_L: 6066 1.1 mrg if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1)) 6067 1.1 mrg return OP_TYPE_IMM_Q; 6068 1.1 mrg 6069 1.1 mrg gcc_assert (!reload_completed); 6070 1.1 mrg break; 6071 1.1 mrg 6072 1.1 mrg case TYPE_MOVEQ_L: 6073 1.1 mrg if (USE_MOVQ (ival)) 6074 1.1 mrg return OP_TYPE_IMM_Q; 6075 1.1 mrg 6076 1.1 mrg gcc_assert (!reload_completed); 6077 1.1 mrg break; 6078 1.1 mrg 6079 1.1 mrg case TYPE_MOV3Q_L: 6080 1.1 mrg if (valid_mov3q_const (ival)) 6081 1.1 mrg return OP_TYPE_IMM_Q; 6082 1.1 mrg 6083 1.1 mrg gcc_assert (!reload_completed); 6084 1.1 mrg break; 6085 1.1 mrg 6086 1.1 mrg default: 6087 1.1 mrg break; 6088 1.1 mrg } 6089 1.1 mrg 6090 1.1 mrg if (IN_RANGE (ival, -0x8000, 0x7fff)) 6091 1.1 mrg return OP_TYPE_IMM_W; 6092 1.1 mrg 6093 1.1 mrg return OP_TYPE_IMM_L; 6094 1.1 mrg } 6095 1.1 mrg 6096 1.1 mrg if (GET_CODE (op) == CONST_DOUBLE) 6097 1.1 mrg { 6098 1.1 mrg switch (GET_MODE (op)) 6099 1.1 mrg { 6100 1.1 mrg case E_SFmode: 6101 1.1 mrg return OP_TYPE_IMM_W; 6102 1.1 mrg 6103 1.1 mrg case E_VOIDmode: 6104 1.1 mrg case E_DFmode: 6105 1.1 mrg return OP_TYPE_IMM_L; 6106 1.1 mrg 6107 1.1 mrg default: 6108 1.1 mrg gcc_unreachable (); 6109 1.1 mrg } 6110 1.1 mrg } 6111 1.1 mrg 6112 1.1 mrg if (GET_CODE (op) == CONST 6113 1.1 mrg || symbolic_operand (op, VOIDmode) 6114 1.1 mrg || LABEL_P (op)) 6115 1.1 mrg { 6116 1.1 mrg switch (GET_MODE (op)) 6117 1.1 mrg { 6118 1.1 mrg case E_QImode: 6119 1.1 mrg return OP_TYPE_IMM_Q; 6120 1.1 mrg 6121 1.1 mrg case E_HImode: 6122 1.1 mrg return OP_TYPE_IMM_W; 6123 1.1 mrg 6124 1.1 mrg case E_SImode: 6125 1.1 mrg return OP_TYPE_IMM_L; 6126 1.1 mrg 6127 1.1 mrg default: 6128 1.1 mrg if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode)) 6129 1.1 mrg /* Just a guess. */ 6130 1.1 mrg return OP_TYPE_IMM_W; 6131 1.1 mrg 6132 1.1 mrg return OP_TYPE_IMM_L; 6133 1.1 mrg } 6134 1.1 mrg } 6135 1.1 mrg 6136 1.1 mrg gcc_assert (!reload_completed); 6137 1.1 mrg 6138 1.1 mrg if (FLOAT_MODE_P (GET_MODE (op))) 6139 1.1 mrg return OP_TYPE_FPN; 6140 1.1 mrg 6141 1.1 mrg return OP_TYPE_RN; 6142 1.1 mrg } 6143 1.1 mrg 6144 1.1 mrg /* Implement opx_type attribute. 6145 1.1 mrg Return type of INSN's operand X. 6146 1.1 mrg If ADDRESS_P is true, return type of memory location operand refers to. */ 6147 1.1 mrg enum attr_opx_type 6148 1.1 mrg m68k_sched_attr_opx_type (rtx_insn *insn, int address_p) 6149 1.1 mrg { 6150 1.1 mrg switch (sched_attr_op_type (insn, true, address_p != 0)) 6151 1.1 mrg { 6152 1.1 mrg case OP_TYPE_RN: 6153 1.1 mrg return OPX_TYPE_RN; 6154 1.1 mrg 6155 1.1 mrg case OP_TYPE_FPN: 6156 1.1 mrg return OPX_TYPE_FPN; 6157 1.1 mrg 6158 1.1 mrg case OP_TYPE_MEM1: 6159 1.1 mrg return OPX_TYPE_MEM1; 6160 1.1 mrg 6161 1.1 mrg case OP_TYPE_MEM234: 6162 1.1 mrg return OPX_TYPE_MEM234; 6163 1.1 mrg 6164 1.1 mrg case OP_TYPE_MEM5: 6165 1.1 mrg return OPX_TYPE_MEM5; 6166 1.1 mrg 6167 1.1 mrg case OP_TYPE_MEM6: 6168 1.1 mrg return OPX_TYPE_MEM6; 6169 1.1 mrg 6170 1.1 mrg case OP_TYPE_MEM7: 6171 1.1 mrg return OPX_TYPE_MEM7; 6172 1.1 mrg 6173 1.1 mrg case OP_TYPE_IMM_Q: 6174 1.1 mrg return OPX_TYPE_IMM_Q; 6175 1.1 mrg 6176 1.1 mrg case OP_TYPE_IMM_W: 6177 1.1 mrg return OPX_TYPE_IMM_W; 6178 1.1 mrg 6179 1.1 mrg case OP_TYPE_IMM_L: 6180 1.1 mrg return OPX_TYPE_IMM_L; 6181 1.1 mrg 6182 1.1 mrg default: 6183 1.1 mrg gcc_unreachable (); 6184 1.1 mrg } 6185 1.1 mrg } 6186 1.1 mrg 6187 1.1 mrg /* Implement opy_type attribute. 6188 1.1 mrg Return type of INSN's operand Y. 6189 1.1 mrg If ADDRESS_P is true, return type of memory location operand refers to. */ 6190 1.1 mrg enum attr_opy_type 6191 1.1 mrg m68k_sched_attr_opy_type (rtx_insn *insn, int address_p) 6192 1.1 mrg { 6193 1.1 mrg switch (sched_attr_op_type (insn, false, address_p != 0)) 6194 1.1 mrg { 6195 1.1 mrg case OP_TYPE_RN: 6196 1.1 mrg return OPY_TYPE_RN; 6197 1.1 mrg 6198 1.1 mrg case OP_TYPE_FPN: 6199 1.1 mrg return OPY_TYPE_FPN; 6200 1.1 mrg 6201 1.1 mrg case OP_TYPE_MEM1: 6202 1.1 mrg return OPY_TYPE_MEM1; 6203 1.1 mrg 6204 1.1 mrg case OP_TYPE_MEM234: 6205 1.1 mrg return OPY_TYPE_MEM234; 6206 1.1 mrg 6207 1.1 mrg case OP_TYPE_MEM5: 6208 1.1 mrg return OPY_TYPE_MEM5; 6209 1.1 mrg 6210 1.1 mrg case OP_TYPE_MEM6: 6211 1.1 mrg return OPY_TYPE_MEM6; 6212 1.1 mrg 6213 1.1 mrg case OP_TYPE_MEM7: 6214 1.1 mrg return OPY_TYPE_MEM7; 6215 1.1 mrg 6216 1.1 mrg case OP_TYPE_IMM_Q: 6217 1.1 mrg return OPY_TYPE_IMM_Q; 6218 1.1 mrg 6219 1.1 mrg case OP_TYPE_IMM_W: 6220 1.1 mrg return OPY_TYPE_IMM_W; 6221 1.1 mrg 6222 1.1 mrg case OP_TYPE_IMM_L: 6223 1.1 mrg return OPY_TYPE_IMM_L; 6224 1.1 mrg 6225 1.1 mrg default: 6226 1.1 mrg gcc_unreachable (); 6227 1.1 mrg } 6228 1.1 mrg } 6229 1.1 mrg 6230 1.1 mrg /* Return size of INSN as int. */ 6231 1.1 mrg static int 6232 1.1 mrg sched_get_attr_size_int (rtx_insn *insn) 6233 1.1 mrg { 6234 1.1 mrg int size; 6235 1.1 mrg 6236 1.1 mrg switch (get_attr_type (insn)) 6237 1.1 mrg { 6238 1.1 mrg case TYPE_IGNORE: 6239 1.1 mrg /* There should be no references to m68k_sched_attr_size for 'ignore' 6240 1.1 mrg instructions. */ 6241 1.1 mrg gcc_unreachable (); 6242 1.1 mrg return 0; 6243 1.1 mrg 6244 1.1 mrg case TYPE_MUL_L: 6245 1.1 mrg size = 2; 6246 1.1 mrg break; 6247 1.1 mrg 6248 1.1 mrg default: 6249 1.1 mrg size = 1; 6250 1.1 mrg break; 6251 1.1 mrg } 6252 1.1 mrg 6253 1.1 mrg switch (get_attr_opx_type (insn)) 6254 1.1 mrg { 6255 1.1 mrg case OPX_TYPE_NONE: 6256 1.1 mrg case OPX_TYPE_RN: 6257 1.1 mrg case OPX_TYPE_FPN: 6258 1.1 mrg case OPX_TYPE_MEM1: 6259 1.1 mrg case OPX_TYPE_MEM234: 6260 1.1 mrg case OPY_TYPE_IMM_Q: 6261 1.1 mrg break; 6262 1.1 mrg 6263 1.1 mrg case OPX_TYPE_MEM5: 6264 1.1 mrg case OPX_TYPE_MEM6: 6265 1.1 mrg /* Here we assume that most absolute references are short. */ 6266 1.1 mrg case OPX_TYPE_MEM7: 6267 1.1 mrg case OPY_TYPE_IMM_W: 6268 1.1 mrg ++size; 6269 1.1 mrg break; 6270 1.1 mrg 6271 1.1 mrg case OPY_TYPE_IMM_L: 6272 1.1 mrg size += 2; 6273 1.1 mrg break; 6274 1.1 mrg 6275 1.1 mrg default: 6276 1.1 mrg gcc_unreachable (); 6277 1.1 mrg } 6278 1.1 mrg 6279 1.1 mrg switch (get_attr_opy_type (insn)) 6280 1.1 mrg { 6281 1.1 mrg case OPY_TYPE_NONE: 6282 1.1 mrg case OPY_TYPE_RN: 6283 1.1 mrg case OPY_TYPE_FPN: 6284 1.1 mrg case OPY_TYPE_MEM1: 6285 1.1 mrg case OPY_TYPE_MEM234: 6286 1.1 mrg case OPY_TYPE_IMM_Q: 6287 1.1 mrg break; 6288 1.1 mrg 6289 1.1 mrg case OPY_TYPE_MEM5: 6290 1.1 mrg case OPY_TYPE_MEM6: 6291 1.1 mrg /* Here we assume that most absolute references are short. */ 6292 1.1 mrg case OPY_TYPE_MEM7: 6293 1.1 mrg case OPY_TYPE_IMM_W: 6294 1.1 mrg ++size; 6295 1.1 mrg break; 6296 1.1 mrg 6297 1.1 mrg case OPY_TYPE_IMM_L: 6298 1.1 mrg size += 2; 6299 1.1 mrg break; 6300 1.1 mrg 6301 1.1 mrg default: 6302 1.1 mrg gcc_unreachable (); 6303 1.1 mrg } 6304 1.1 mrg 6305 1.1 mrg if (size > 3) 6306 1.1 mrg { 6307 1.1 mrg gcc_assert (!reload_completed); 6308 1.1 mrg 6309 1.1 mrg size = 3; 6310 1.1 mrg } 6311 1.1 mrg 6312 1.1 mrg return size; 6313 1.1 mrg } 6314 1.1 mrg 6315 1.1 mrg /* Return size of INSN as attribute enum value. */ 6316 1.1 mrg enum attr_size 6317 1.1 mrg m68k_sched_attr_size (rtx_insn *insn) 6318 1.1 mrg { 6319 1.1 mrg switch (sched_get_attr_size_int (insn)) 6320 1.1 mrg { 6321 1.1 mrg case 1: 6322 1.1 mrg return SIZE_1; 6323 1.1 mrg 6324 1.1 mrg case 2: 6325 1.1 mrg return SIZE_2; 6326 1.1 mrg 6327 1.1 mrg case 3: 6328 1.1 mrg return SIZE_3; 6329 1.1 mrg 6330 1.1 mrg default: 6331 1.1 mrg gcc_unreachable (); 6332 1.1 mrg } 6333 1.1 mrg } 6334 1.1 mrg 6335 1.1 mrg /* Return operand X or Y (depending on OPX_P) of INSN, 6336 1.1 mrg if it is a MEM, or NULL overwise. */ 6337 1.1 mrg static enum attr_op_type 6338 1.1 mrg sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p) 6339 1.1 mrg { 6340 1.1 mrg if (opx_p) 6341 1.1 mrg { 6342 1.1 mrg switch (get_attr_opx_type (insn)) 6343 1.1 mrg { 6344 1.1 mrg case OPX_TYPE_NONE: 6345 1.1 mrg case OPX_TYPE_RN: 6346 1.1 mrg case OPX_TYPE_FPN: 6347 1.1 mrg case OPX_TYPE_IMM_Q: 6348 1.1 mrg case OPX_TYPE_IMM_W: 6349 1.1 mrg case OPX_TYPE_IMM_L: 6350 1.1 mrg return OP_TYPE_RN; 6351 1.1 mrg 6352 1.1 mrg case OPX_TYPE_MEM1: 6353 1.1 mrg case OPX_TYPE_MEM234: 6354 1.1 mrg case OPX_TYPE_MEM5: 6355 1.1 mrg case OPX_TYPE_MEM7: 6356 1.1 mrg return OP_TYPE_MEM1; 6357 1.1 mrg 6358 1.1 mrg case OPX_TYPE_MEM6: 6359 1.1 mrg return OP_TYPE_MEM6; 6360 1.1 mrg 6361 1.1 mrg default: 6362 1.1 mrg gcc_unreachable (); 6363 1.1 mrg } 6364 1.1 mrg } 6365 1.1 mrg else 6366 1.1 mrg { 6367 1.1 mrg switch (get_attr_opy_type (insn)) 6368 1.1 mrg { 6369 1.1 mrg case OPY_TYPE_NONE: 6370 1.1 mrg case OPY_TYPE_RN: 6371 1.1 mrg case OPY_TYPE_FPN: 6372 1.1 mrg case OPY_TYPE_IMM_Q: 6373 1.1 mrg case OPY_TYPE_IMM_W: 6374 1.1 mrg case OPY_TYPE_IMM_L: 6375 1.1 mrg return OP_TYPE_RN; 6376 1.1 mrg 6377 1.1 mrg case OPY_TYPE_MEM1: 6378 1.1 mrg case OPY_TYPE_MEM234: 6379 1.1 mrg case OPY_TYPE_MEM5: 6380 1.1 mrg case OPY_TYPE_MEM7: 6381 1.1 mrg return OP_TYPE_MEM1; 6382 1.1 mrg 6383 1.1 mrg case OPY_TYPE_MEM6: 6384 1.1 mrg return OP_TYPE_MEM6; 6385 1.1 mrg 6386 1.1 mrg default: 6387 1.1 mrg gcc_unreachable (); 6388 1.1 mrg } 6389 1.1 mrg } 6390 1.1 mrg } 6391 1.1 mrg 6392 1.1 mrg /* Implement op_mem attribute. */ 6393 1.1 mrg enum attr_op_mem 6394 1.1 mrg m68k_sched_attr_op_mem (rtx_insn *insn) 6395 1.1 mrg { 6396 1.1 mrg enum attr_op_type opx; 6397 1.1 mrg enum attr_op_type opy; 6398 1.1 mrg 6399 1.1 mrg opx = sched_get_opxy_mem_type (insn, true); 6400 1.1 mrg opy = sched_get_opxy_mem_type (insn, false); 6401 1.1 mrg 6402 1.1 mrg if (opy == OP_TYPE_RN && opx == OP_TYPE_RN) 6403 1.1 mrg return OP_MEM_00; 6404 1.1 mrg 6405 1.1 mrg if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1) 6406 1.1 mrg { 6407 1.1 mrg switch (get_attr_opx_access (insn)) 6408 1.1 mrg { 6409 1.1 mrg case OPX_ACCESS_R: 6410 1.1 mrg return OP_MEM_10; 6411 1.1 mrg 6412 1.1 mrg case OPX_ACCESS_W: 6413 1.1 mrg return OP_MEM_01; 6414 1.1 mrg 6415 1.1 mrg case OPX_ACCESS_RW: 6416 1.1 mrg return OP_MEM_11; 6417 1.1 mrg 6418 1.1 mrg default: 6419 1.1 mrg gcc_unreachable (); 6420 1.1 mrg } 6421 1.1 mrg } 6422 1.1 mrg 6423 1.1 mrg if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6) 6424 1.1 mrg { 6425 1.1 mrg switch (get_attr_opx_access (insn)) 6426 1.1 mrg { 6427 1.1 mrg case OPX_ACCESS_R: 6428 1.1 mrg return OP_MEM_I0; 6429 1.1 mrg 6430 1.1 mrg case OPX_ACCESS_W: 6431 1.1 mrg return OP_MEM_0I; 6432 1.1 mrg 6433 1.1 mrg case OPX_ACCESS_RW: 6434 1.1 mrg return OP_MEM_I1; 6435 1.1 mrg 6436 1.1 mrg default: 6437 1.1 mrg gcc_unreachable (); 6438 1.1 mrg } 6439 1.1 mrg } 6440 1.1 mrg 6441 1.1 mrg if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN) 6442 1.1 mrg return OP_MEM_10; 6443 1.1 mrg 6444 1.1 mrg if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1) 6445 1.1 mrg { 6446 1.1 mrg switch (get_attr_opx_access (insn)) 6447 1.1 mrg { 6448 1.1 mrg case OPX_ACCESS_W: 6449 1.1 mrg return OP_MEM_11; 6450 1.1 mrg 6451 1.1 mrg default: 6452 1.1 mrg gcc_assert (!reload_completed); 6453 1.1 mrg return OP_MEM_11; 6454 1.1 mrg } 6455 1.1 mrg } 6456 1.1 mrg 6457 1.1 mrg if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6) 6458 1.1 mrg { 6459 1.1 mrg switch (get_attr_opx_access (insn)) 6460 1.1 mrg { 6461 1.1 mrg case OPX_ACCESS_W: 6462 1.1 mrg return OP_MEM_1I; 6463 1.1 mrg 6464 1.1 mrg default: 6465 1.1 mrg gcc_assert (!reload_completed); 6466 1.1 mrg return OP_MEM_1I; 6467 1.1 mrg } 6468 1.1 mrg } 6469 1.1 mrg 6470 1.1 mrg if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN) 6471 1.1 mrg return OP_MEM_I0; 6472 1.1 mrg 6473 1.1 mrg if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1) 6474 1.1 mrg { 6475 1.1 mrg switch (get_attr_opx_access (insn)) 6476 1.1 mrg { 6477 1.1 mrg case OPX_ACCESS_W: 6478 1.1 mrg return OP_MEM_I1; 6479 1.1 mrg 6480 1.1 mrg default: 6481 1.1 mrg gcc_assert (!reload_completed); 6482 1.1 mrg return OP_MEM_I1; 6483 1.1 mrg } 6484 1.1 mrg } 6485 1.1 mrg 6486 1.1 mrg gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6); 6487 1.1 mrg gcc_assert (!reload_completed); 6488 1.1 mrg return OP_MEM_I1; 6489 1.1 mrg } 6490 1.1 mrg 6491 1.1 mrg /* Data for ColdFire V4 index bypass. 6492 1.1 mrg Producer modifies register that is used as index in consumer with 6493 1.1 mrg specified scale. */ 6494 1.1 mrg static struct 6495 1.1 mrg { 6496 1.1 mrg /* Producer instruction. */ 6497 1.1 mrg rtx pro; 6498 1.1 mrg 6499 1.1 mrg /* Consumer instruction. */ 6500 1.1 mrg rtx con; 6501 1.1 mrg 6502 1.1 mrg /* Scale of indexed memory access within consumer. 6503 1.1 mrg Or zero if bypass should not be effective at the moment. */ 6504 1.1 mrg int scale; 6505 1.1 mrg } sched_cfv4_bypass_data; 6506 1.1 mrg 6507 1.1 mrg /* An empty state that is used in m68k_sched_adjust_cost. */ 6508 1.1 mrg static state_t sched_adjust_cost_state; 6509 1.1 mrg 6510 1.1 mrg /* Implement adjust_cost scheduler hook. 6511 1.1 mrg Return adjusted COST of dependency LINK between DEF_INSN and INSN. */ 6512 1.1 mrg static int 6513 1.1 mrg m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost, 6514 1.1 mrg unsigned int) 6515 1.1 mrg { 6516 1.1 mrg int delay; 6517 1.1 mrg 6518 1.1 mrg if (recog_memoized (def_insn) < 0 6519 1.1 mrg || recog_memoized (insn) < 0) 6520 1.1 mrg return cost; 6521 1.1 mrg 6522 1.1 mrg if (sched_cfv4_bypass_data.scale == 1) 6523 1.1 mrg /* Handle ColdFire V4 bypass for indexed address with 1x scale. */ 6524 1.1 mrg { 6525 1.1 mrg /* haifa-sched.cc: insn_cost () calls bypass_p () just before 6526 1.1 mrg targetm.sched.adjust_cost (). Hence, we can be relatively sure 6527 1.1 mrg that the data in sched_cfv4_bypass_data is up to date. */ 6528 1.1 mrg gcc_assert (sched_cfv4_bypass_data.pro == def_insn 6529 1.1 mrg && sched_cfv4_bypass_data.con == insn); 6530 1.1 mrg 6531 1.1 mrg if (cost < 3) 6532 1.1 mrg cost = 3; 6533 1.1 mrg 6534 1.1 mrg sched_cfv4_bypass_data.pro = NULL; 6535 1.1 mrg sched_cfv4_bypass_data.con = NULL; 6536 1.1 mrg sched_cfv4_bypass_data.scale = 0; 6537 1.1 mrg } 6538 1.1 mrg else 6539 1.1 mrg gcc_assert (sched_cfv4_bypass_data.pro == NULL 6540 1.1 mrg && sched_cfv4_bypass_data.con == NULL 6541 1.1 mrg && sched_cfv4_bypass_data.scale == 0); 6542 1.1 mrg 6543 1.1 mrg /* Don't try to issue INSN earlier than DFA permits. 6544 1.1 mrg This is especially useful for instructions that write to memory, 6545 1.1 mrg as their true dependence (default) latency is better to be set to 0 6546 1.1 mrg to workaround alias analysis limitations. 6547 1.1 mrg This is, in fact, a machine independent tweak, so, probably, 6548 1.1 mrg it should be moved to haifa-sched.cc: insn_cost (). */ 6549 1.1 mrg delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn); 6550 1.1 mrg if (delay > cost) 6551 1.1 mrg cost = delay; 6552 1.1 mrg 6553 1.1 mrg return cost; 6554 1.1 mrg } 6555 1.1 mrg 6556 1.1 mrg /* Return maximal number of insns that can be scheduled on a single cycle. */ 6557 1.1 mrg static int 6558 1.1 mrg m68k_sched_issue_rate (void) 6559 1.1 mrg { 6560 1.1 mrg switch (m68k_sched_cpu) 6561 1.1 mrg { 6562 1.1 mrg case CPU_CFV1: 6563 1.1 mrg case CPU_CFV2: 6564 1.1 mrg case CPU_CFV3: 6565 1.1 mrg return 1; 6566 1.1 mrg 6567 1.1 mrg case CPU_CFV4: 6568 1.1 mrg return 2; 6569 1.1 mrg 6570 1.1 mrg default: 6571 1.1 mrg gcc_unreachable (); 6572 1.1 mrg return 0; 6573 1.1 mrg } 6574 1.1 mrg } 6575 1.1 mrg 6576 1.1 mrg /* Maximal length of instruction for current CPU. 6577 1.1 mrg E.g. it is 3 for any ColdFire core. */ 6578 1.1 mrg static int max_insn_size; 6579 1.1 mrg 6580 1.1 mrg /* Data to model instruction buffer of CPU. */ 6581 1.1 mrg struct _sched_ib 6582 1.1 mrg { 6583 1.1 mrg /* True if instruction buffer model is modeled for current CPU. */ 6584 1.1 mrg bool enabled_p; 6585 1.1 mrg 6586 1.1 mrg /* Size of the instruction buffer in words. */ 6587 1.1 mrg int size; 6588 1.1 mrg 6589 1.1 mrg /* Number of filled words in the instruction buffer. */ 6590 1.1 mrg int filled; 6591 1.1 mrg 6592 1.1 mrg /* Additional information about instruction buffer for CPUs that have 6593 1.1 mrg a buffer of instruction records, rather then a plain buffer 6594 1.1 mrg of instruction words. */ 6595 1.1 mrg struct _sched_ib_records 6596 1.1 mrg { 6597 1.1 mrg /* Size of buffer in records. */ 6598 1.1 mrg int n_insns; 6599 1.1 mrg 6600 1.1 mrg /* Array to hold data on adjustments made to the size of the buffer. */ 6601 1.1 mrg int *adjust; 6602 1.1 mrg 6603 1.1 mrg /* Index of the above array. */ 6604 1.1 mrg int adjust_index; 6605 1.1 mrg } records; 6606 1.1 mrg 6607 1.1 mrg /* An insn that reserves (marks empty) one word in the instruction buffer. */ 6608 1.1 mrg rtx insn; 6609 1.1 mrg }; 6610 1.1 mrg 6611 1.1 mrg static struct _sched_ib sched_ib; 6612 1.1 mrg 6613 1.1 mrg /* ID of memory unit. */ 6614 1.1 mrg static int sched_mem_unit_code; 6615 1.1 mrg 6616 1.1 mrg /* Implementation of the targetm.sched.variable_issue () hook. 6617 1.1 mrg It is called after INSN was issued. It returns the number of insns 6618 1.1 mrg that can possibly get scheduled on the current cycle. 6619 1.1 mrg It is used here to determine the effect of INSN on the instruction 6620 1.1 mrg buffer. */ 6621 1.1 mrg static int 6622 1.1 mrg m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED, 6623 1.1 mrg int sched_verbose ATTRIBUTE_UNUSED, 6624 1.1 mrg rtx_insn *insn, int can_issue_more) 6625 1.1 mrg { 6626 1.1 mrg int insn_size; 6627 1.1 mrg 6628 1.1 mrg if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE) 6629 1.1 mrg { 6630 1.1 mrg switch (m68k_sched_cpu) 6631 1.1 mrg { 6632 1.1 mrg case CPU_CFV1: 6633 1.1 mrg case CPU_CFV2: 6634 1.1 mrg insn_size = sched_get_attr_size_int (insn); 6635 1.1 mrg break; 6636 1.1 mrg 6637 1.1 mrg case CPU_CFV3: 6638 1.1 mrg insn_size = sched_get_attr_size_int (insn); 6639 1.1 mrg 6640 1.1 mrg /* ColdFire V3 and V4 cores have instruction buffers that can 6641 1.1 mrg accumulate up to 8 instructions regardless of instructions' 6642 1.1 mrg sizes. So we should take care not to "prefetch" 24 one-word 6643 1.1 mrg or 12 two-words instructions. 6644 1.1 mrg To model this behavior we temporarily decrease size of the 6645 1.1 mrg buffer by (max_insn_size - insn_size) for next 7 instructions. */ 6646 1.1 mrg { 6647 1.1 mrg int adjust; 6648 1.1 mrg 6649 1.1 mrg adjust = max_insn_size - insn_size; 6650 1.1 mrg sched_ib.size -= adjust; 6651 1.1 mrg 6652 1.1 mrg if (sched_ib.filled > sched_ib.size) 6653 1.1 mrg sched_ib.filled = sched_ib.size; 6654 1.1 mrg 6655 1.1 mrg sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust; 6656 1.1 mrg } 6657 1.1 mrg 6658 1.1 mrg ++sched_ib.records.adjust_index; 6659 1.1 mrg if (sched_ib.records.adjust_index == sched_ib.records.n_insns) 6660 1.1 mrg sched_ib.records.adjust_index = 0; 6661 1.1 mrg 6662 1.1 mrg /* Undo adjustment we did 7 instructions ago. */ 6663 1.1 mrg sched_ib.size 6664 1.1 mrg += sched_ib.records.adjust[sched_ib.records.adjust_index]; 6665 1.1 mrg 6666 1.1 mrg break; 6667 1.1 mrg 6668 1.1 mrg case CPU_CFV4: 6669 1.1 mrg gcc_assert (!sched_ib.enabled_p); 6670 1.1 mrg insn_size = 0; 6671 1.1 mrg break; 6672 1.1 mrg 6673 1.1 mrg default: 6674 1.1 mrg gcc_unreachable (); 6675 1.1 mrg } 6676 1.1 mrg 6677 1.1 mrg if (insn_size > sched_ib.filled) 6678 1.1 mrg /* Scheduling for register pressure does not always take DFA into 6679 1.1 mrg account. Workaround instruction buffer not being filled enough. */ 6680 1.1 mrg { 6681 1.1 mrg gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED); 6682 1.1 mrg insn_size = sched_ib.filled; 6683 1.1 mrg } 6684 1.1 mrg 6685 1.1 mrg --can_issue_more; 6686 1.1 mrg } 6687 1.1 mrg else if (GET_CODE (PATTERN (insn)) == ASM_INPUT 6688 1.1 mrg || asm_noperands (PATTERN (insn)) >= 0) 6689 1.1 mrg insn_size = sched_ib.filled; 6690 1.1 mrg else 6691 1.1 mrg insn_size = 0; 6692 1.1 mrg 6693 1.1 mrg sched_ib.filled -= insn_size; 6694 1.1 mrg 6695 1.1 mrg return can_issue_more; 6696 1.1 mrg } 6697 1.1 mrg 6698 1.1 mrg /* Return how many instructions should scheduler lookahead to choose the 6699 1.1 mrg best one. */ 6700 1.1 mrg static int 6701 1.1 mrg m68k_sched_first_cycle_multipass_dfa_lookahead (void) 6702 1.1 mrg { 6703 1.1 mrg return m68k_sched_issue_rate () - 1; 6704 1.1 mrg } 6705 1.1 mrg 6706 1.1 mrg /* Implementation of targetm.sched.init_global () hook. 6707 1.1 mrg It is invoked once per scheduling pass and is used here 6708 1.1 mrg to initialize scheduler constants. */ 6709 1.1 mrg static void 6710 1.1 mrg m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED, 6711 1.1 mrg int sched_verbose ATTRIBUTE_UNUSED, 6712 1.1 mrg int n_insns ATTRIBUTE_UNUSED) 6713 1.1 mrg { 6714 1.1 mrg /* Check that all instructions have DFA reservations and 6715 1.1 mrg that all instructions can be issued from a clean state. */ 6716 1.1 mrg if (flag_checking) 6717 1.1 mrg { 6718 1.1 mrg rtx_insn *insn; 6719 1.1 mrg state_t state; 6720 1.1 mrg 6721 1.1 mrg state = alloca (state_size ()); 6722 1.1 mrg 6723 1.1 mrg for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn)) 6724 1.1 mrg { 6725 1.1 mrg if (INSN_P (insn) && recog_memoized (insn) >= 0) 6726 1.1 mrg { 6727 1.1 mrg gcc_assert (insn_has_dfa_reservation_p (insn)); 6728 1.1 mrg 6729 1.1 mrg state_reset (state); 6730 1.1 mrg if (state_transition (state, insn) >= 0) 6731 1.1 mrg gcc_unreachable (); 6732 1.1 mrg } 6733 1.1 mrg } 6734 1.1 mrg } 6735 1.1 mrg 6736 1.1 mrg /* Setup target cpu. */ 6737 1.1 mrg 6738 1.1 mrg /* ColdFire V4 has a set of features to keep its instruction buffer full 6739 1.1 mrg (e.g., a separate memory bus for instructions) and, hence, we do not model 6740 1.1 mrg buffer for this CPU. */ 6741 1.1 mrg sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4); 6742 1.1 mrg 6743 1.1 mrg switch (m68k_sched_cpu) 6744 1.1 mrg { 6745 1.1 mrg case CPU_CFV4: 6746 1.1 mrg sched_ib.filled = 0; 6747 1.1 mrg 6748 1.1 mrg /* FALLTHRU */ 6749 1.1 mrg 6750 1.1 mrg case CPU_CFV1: 6751 1.1 mrg case CPU_CFV2: 6752 1.1 mrg max_insn_size = 3; 6753 1.1 mrg sched_ib.records.n_insns = 0; 6754 1.1 mrg sched_ib.records.adjust = NULL; 6755 1.1 mrg break; 6756 1.1 mrg 6757 1.1 mrg case CPU_CFV3: 6758 1.1 mrg max_insn_size = 3; 6759 1.1 mrg sched_ib.records.n_insns = 8; 6760 1.1 mrg sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns); 6761 1.1 mrg break; 6762 1.1 mrg 6763 1.1 mrg default: 6764 1.1 mrg gcc_unreachable (); 6765 1.1 mrg } 6766 1.1 mrg 6767 1.1 mrg sched_mem_unit_code = get_cpu_unit_code ("cf_mem1"); 6768 1.1 mrg 6769 1.1 mrg sched_adjust_cost_state = xmalloc (state_size ()); 6770 1.1 mrg state_reset (sched_adjust_cost_state); 6771 1.1 mrg 6772 1.1 mrg start_sequence (); 6773 1.1 mrg emit_insn (gen_ib ()); 6774 1.1 mrg sched_ib.insn = get_insns (); 6775 1.1 mrg end_sequence (); 6776 1.1 mrg } 6777 1.1 mrg 6778 1.1 mrg /* Scheduling pass is now finished. Free/reset static variables. */ 6779 1.1 mrg static void 6780 1.1 mrg m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED, 6781 1.1 mrg int verbose ATTRIBUTE_UNUSED) 6782 1.1 mrg { 6783 1.1 mrg sched_ib.insn = NULL; 6784 1.1 mrg 6785 1.1 mrg free (sched_adjust_cost_state); 6786 1.1 mrg sched_adjust_cost_state = NULL; 6787 1.1 mrg 6788 1.1 mrg sched_mem_unit_code = 0; 6789 1.1 mrg 6790 1.1 mrg free (sched_ib.records.adjust); 6791 1.1 mrg sched_ib.records.adjust = NULL; 6792 1.1 mrg sched_ib.records.n_insns = 0; 6793 1.1 mrg max_insn_size = 0; 6794 1.1 mrg } 6795 1.1 mrg 6796 1.1 mrg /* Implementation of targetm.sched.init () hook. 6797 1.1 mrg It is invoked each time scheduler starts on the new block (basic block or 6798 1.1 mrg extended basic block). */ 6799 1.1 mrg static void 6800 1.1 mrg m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED, 6801 1.1 mrg int sched_verbose ATTRIBUTE_UNUSED, 6802 1.1 mrg int n_insns ATTRIBUTE_UNUSED) 6803 1.1 mrg { 6804 1.1 mrg switch (m68k_sched_cpu) 6805 1.1 mrg { 6806 1.1 mrg case CPU_CFV1: 6807 1.1 mrg case CPU_CFV2: 6808 1.1 mrg sched_ib.size = 6; 6809 1.1 mrg break; 6810 1.1 mrg 6811 1.1 mrg case CPU_CFV3: 6812 1.1 mrg sched_ib.size = sched_ib.records.n_insns * max_insn_size; 6813 1.1 mrg 6814 1.1 mrg memset (sched_ib.records.adjust, 0, 6815 1.1 mrg sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust)); 6816 1.1 mrg sched_ib.records.adjust_index = 0; 6817 1.1 mrg break; 6818 1.1 mrg 6819 1.1 mrg case CPU_CFV4: 6820 1.1 mrg gcc_assert (!sched_ib.enabled_p); 6821 1.1 mrg sched_ib.size = 0; 6822 1.1 mrg break; 6823 1.1 mrg 6824 1.1 mrg default: 6825 1.1 mrg gcc_unreachable (); 6826 1.1 mrg } 6827 1.1 mrg 6828 1.1 mrg if (sched_ib.enabled_p) 6829 1.1 mrg /* haifa-sched.cc: schedule_block () calls advance_cycle () just before 6830 1.1 mrg the first cycle. Workaround that. */ 6831 1.1 mrg sched_ib.filled = -2; 6832 1.1 mrg } 6833 1.1 mrg 6834 1.1 mrg /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook. 6835 1.1 mrg It is invoked just before current cycle finishes and is used here 6836 1.1 mrg to track if instruction buffer got its two words this cycle. */ 6837 1.1 mrg static void 6838 1.1 mrg m68k_sched_dfa_pre_advance_cycle (void) 6839 1.1 mrg { 6840 1.1 mrg if (!sched_ib.enabled_p) 6841 1.1 mrg return; 6842 1.1 mrg 6843 1.1 mrg if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code)) 6844 1.1 mrg { 6845 1.1 mrg sched_ib.filled += 2; 6846 1.1 mrg 6847 1.1 mrg if (sched_ib.filled > sched_ib.size) 6848 1.1 mrg sched_ib.filled = sched_ib.size; 6849 1.1 mrg } 6850 1.1 mrg } 6851 1.1 mrg 6852 1.1 mrg /* Implementation of targetm.sched.dfa_post_advance_cycle () hook. 6853 1.1 mrg It is invoked just after new cycle begins and is used here 6854 1.1 mrg to setup number of filled words in the instruction buffer so that 6855 1.1 mrg instructions which won't have all their words prefetched would be 6856 1.1 mrg stalled for a cycle. */ 6857 1.1 mrg static void 6858 1.1 mrg m68k_sched_dfa_post_advance_cycle (void) 6859 1.1 mrg { 6860 1.1 mrg int i; 6861 1.1 mrg 6862 1.1 mrg if (!sched_ib.enabled_p) 6863 1.1 mrg return; 6864 1.1 mrg 6865 1.1 mrg /* Setup number of prefetched instruction words in the instruction 6866 1.1 mrg buffer. */ 6867 1.1 mrg i = max_insn_size - sched_ib.filled; 6868 1.1 mrg 6869 1.1 mrg while (--i >= 0) 6870 1.1 mrg { 6871 1.1 mrg if (state_transition (curr_state, sched_ib.insn) >= 0) 6872 1.1 mrg /* Pick up scheduler state. */ 6873 1.1 mrg ++sched_ib.filled; 6874 1.1 mrg } 6875 1.1 mrg } 6876 1.1 mrg 6877 1.1 mrg /* Return X or Y (depending on OPX_P) operand of INSN, 6878 1.1 mrg if it is an integer register, or NULL overwise. */ 6879 1.1 mrg static rtx 6880 1.1 mrg sched_get_reg_operand (rtx_insn *insn, bool opx_p) 6881 1.1 mrg { 6882 1.1 mrg rtx op = NULL; 6883 1.1 mrg 6884 1.1 mrg if (opx_p) 6885 1.1 mrg { 6886 1.1 mrg if (get_attr_opx_type (insn) == OPX_TYPE_RN) 6887 1.1 mrg { 6888 1.1 mrg op = sched_get_operand (insn, true); 6889 1.1 mrg gcc_assert (op != NULL); 6890 1.1 mrg 6891 1.1 mrg if (!reload_completed && !REG_P (op)) 6892 1.1 mrg return NULL; 6893 1.1 mrg } 6894 1.1 mrg } 6895 1.1 mrg else 6896 1.1 mrg { 6897 1.1 mrg if (get_attr_opy_type (insn) == OPY_TYPE_RN) 6898 1.1 mrg { 6899 1.1 mrg op = sched_get_operand (insn, false); 6900 1.1 mrg gcc_assert (op != NULL); 6901 1.1 mrg 6902 1.1 mrg if (!reload_completed && !REG_P (op)) 6903 1.1 mrg return NULL; 6904 1.1 mrg } 6905 1.1 mrg } 6906 1.1 mrg 6907 1.1 mrg return op; 6908 1.1 mrg } 6909 1.1 mrg 6910 1.1 mrg /* Return true, if X or Y (depending on OPX_P) operand of INSN 6911 1.1 mrg is a MEM. */ 6912 1.1 mrg static bool 6913 1.1 mrg sched_mem_operand_p (rtx_insn *insn, bool opx_p) 6914 1.1 mrg { 6915 1.1 mrg switch (sched_get_opxy_mem_type (insn, opx_p)) 6916 1.1 mrg { 6917 1.1 mrg case OP_TYPE_MEM1: 6918 1.1 mrg case OP_TYPE_MEM6: 6919 1.1 mrg return true; 6920 1.1 mrg 6921 1.1 mrg default: 6922 1.1 mrg return false; 6923 1.1 mrg } 6924 1.1 mrg } 6925 1.1 mrg 6926 1.1 mrg /* Return X or Y (depending on OPX_P) operand of INSN, 6927 1.1 mrg if it is a MEM, or NULL overwise. */ 6928 1.1 mrg static rtx 6929 1.1 mrg sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p) 6930 1.1 mrg { 6931 1.1 mrg bool opx_p; 6932 1.1 mrg bool opy_p; 6933 1.1 mrg 6934 1.1 mrg opx_p = false; 6935 1.1 mrg opy_p = false; 6936 1.1 mrg 6937 1.1 mrg if (must_read_p) 6938 1.1 mrg { 6939 1.1 mrg opx_p = true; 6940 1.1 mrg opy_p = true; 6941 1.1 mrg } 6942 1.1 mrg 6943 1.1 mrg if (must_write_p) 6944 1.1 mrg { 6945 1.1 mrg opx_p = true; 6946 1.1 mrg opy_p = false; 6947 1.1 mrg } 6948 1.1 mrg 6949 1.1 mrg if (opy_p && sched_mem_operand_p (insn, false)) 6950 1.1 mrg return sched_get_operand (insn, false); 6951 1.1 mrg 6952 1.1 mrg if (opx_p && sched_mem_operand_p (insn, true)) 6953 1.1 mrg return sched_get_operand (insn, true); 6954 1.1 mrg 6955 1.1 mrg gcc_unreachable (); 6956 1.1 mrg return NULL; 6957 1.1 mrg } 6958 1.1 mrg 6959 1.1 mrg /* Return non-zero if PRO modifies register used as part of 6960 1.1 mrg address in CON. */ 6961 1.1 mrg int 6962 1.1 mrg m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con) 6963 1.1 mrg { 6964 1.1 mrg rtx pro_x; 6965 1.1 mrg rtx con_mem_read; 6966 1.1 mrg 6967 1.1 mrg pro_x = sched_get_reg_operand (pro, true); 6968 1.1 mrg if (pro_x == NULL) 6969 1.1 mrg return 0; 6970 1.1 mrg 6971 1.1 mrg con_mem_read = sched_get_mem_operand (con, true, false); 6972 1.1 mrg gcc_assert (con_mem_read != NULL); 6973 1.1 mrg 6974 1.1 mrg if (reg_mentioned_p (pro_x, con_mem_read)) 6975 1.1 mrg return 1; 6976 1.1 mrg 6977 1.1 mrg return 0; 6978 1.1 mrg } 6979 1.1 mrg 6980 1.1 mrg /* Helper function for m68k_sched_indexed_address_bypass_p. 6981 1.1 mrg if PRO modifies register used as index in CON, 6982 1.1 mrg return scale of indexed memory access in CON. Return zero overwise. */ 6983 1.1 mrg static int 6984 1.1 mrg sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con) 6985 1.1 mrg { 6986 1.1 mrg rtx reg; 6987 1.1 mrg rtx mem; 6988 1.1 mrg struct m68k_address address; 6989 1.1 mrg 6990 1.1 mrg reg = sched_get_reg_operand (pro, true); 6991 1.1 mrg if (reg == NULL) 6992 1.1 mrg return 0; 6993 1.1 mrg 6994 1.1 mrg mem = sched_get_mem_operand (con, true, false); 6995 1.1 mrg gcc_assert (mem != NULL && MEM_P (mem)); 6996 1.1 mrg 6997 1.1 mrg if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed, 6998 1.1 mrg &address)) 6999 1.1 mrg gcc_unreachable (); 7000 1.1 mrg 7001 1.1 mrg if (REGNO (reg) == REGNO (address.index)) 7002 1.1 mrg { 7003 1.1 mrg gcc_assert (address.scale != 0); 7004 1.1 mrg return address.scale; 7005 1.1 mrg } 7006 1.1 mrg 7007 1.1 mrg return 0; 7008 1.1 mrg } 7009 1.1 mrg 7010 1.1 mrg /* Return non-zero if PRO modifies register used 7011 1.1 mrg as index with scale 2 or 4 in CON. */ 7012 1.1 mrg int 7013 1.1 mrg m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con) 7014 1.1 mrg { 7015 1.1 mrg gcc_assert (sched_cfv4_bypass_data.pro == NULL 7016 1.1 mrg && sched_cfv4_bypass_data.con == NULL 7017 1.1 mrg && sched_cfv4_bypass_data.scale == 0); 7018 1.1 mrg 7019 1.1 mrg switch (sched_get_indexed_address_scale (pro, con)) 7020 1.1 mrg { 7021 1.1 mrg case 1: 7022 1.1 mrg /* We can't have a variable latency bypass, so 7023 1.1 mrg remember to adjust the insn cost in adjust_cost hook. */ 7024 1.1 mrg sched_cfv4_bypass_data.pro = pro; 7025 1.1 mrg sched_cfv4_bypass_data.con = con; 7026 1.1 mrg sched_cfv4_bypass_data.scale = 1; 7027 1.1 mrg return 0; 7028 1.1 mrg 7029 1.1 mrg case 2: 7030 1.1 mrg case 4: 7031 1.1 mrg return 1; 7032 1.1 mrg 7033 1.1 mrg default: 7034 1.1 mrg return 0; 7035 1.1 mrg } 7036 1.1 mrg } 7037 1.1 mrg 7038 1.1 mrg /* We generate a two-instructions program at M_TRAMP : 7039 1.1 mrg movea.l &CHAIN_VALUE,%a0 7040 1.1 mrg jmp FNADDR 7041 1.1 mrg where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */ 7042 1.1 mrg 7043 1.1 mrg static void 7044 1.1 mrg m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) 7045 1.1 mrg { 7046 1.1 mrg rtx fnaddr = XEXP (DECL_RTL (fndecl), 0); 7047 1.1 mrg rtx mem; 7048 1.1 mrg 7049 1.1 mrg gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM)); 7050 1.1 mrg 7051 1.1 mrg mem = adjust_address (m_tramp, HImode, 0); 7052 1.1 mrg emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9))); 7053 1.1 mrg mem = adjust_address (m_tramp, SImode, 2); 7054 1.1 mrg emit_move_insn (mem, chain_value); 7055 1.1 mrg 7056 1.1 mrg mem = adjust_address (m_tramp, HImode, 6); 7057 1.1 mrg emit_move_insn (mem, GEN_INT(0x4EF9)); 7058 1.1 mrg mem = adjust_address (m_tramp, SImode, 8); 7059 1.1 mrg emit_move_insn (mem, fnaddr); 7060 1.1 mrg 7061 1.1 mrg FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0)); 7062 1.1 mrg } 7063 1.1 mrg 7064 1.1 mrg /* On the 68000, the RTS insn cannot pop anything. 7065 1.1 mrg On the 68010, the RTD insn may be used to pop them if the number 7066 1.1 mrg of args is fixed, but if the number is variable then the caller 7067 1.1 mrg must pop them all. RTD can't be used for library calls now 7068 1.1 mrg because the library is compiled with the Unix compiler. 7069 1.1 mrg Use of RTD is a selectable option, since it is incompatible with 7070 1.1 mrg standard Unix calling sequences. If the option is not selected, 7071 1.1 mrg the caller must always pop the args. */ 7072 1.1 mrg 7073 1.1 mrg static poly_int64 7074 1.1 mrg m68k_return_pops_args (tree fundecl, tree funtype, poly_int64 size) 7075 1.1 mrg { 7076 1.1 mrg return ((TARGET_RTD 7077 1.1 mrg && (!fundecl 7078 1.1 mrg || TREE_CODE (fundecl) != IDENTIFIER_NODE) 7079 1.1 mrg && (!stdarg_p (funtype))) 7080 1.1 mrg ? (HOST_WIDE_INT) size : 0); 7081 1.1 mrg } 7082 1.1 mrg 7083 1.1 mrg /* Make sure everything's fine if we *don't* have a given processor. 7084 1.1 mrg This assumes that putting a register in fixed_regs will keep the 7085 1.1 mrg compiler's mitts completely off it. We don't bother to zero it out 7086 1.1 mrg of register classes. */ 7087 1.1 mrg 7088 1.1 mrg static void 7089 1.1 mrg m68k_conditional_register_usage (void) 7090 1.1 mrg { 7091 1.1 mrg int i; 7092 1.1 mrg HARD_REG_SET x; 7093 1.1 mrg if (!TARGET_HARD_FLOAT) 7094 1.1 mrg { 7095 1.1 mrg x = reg_class_contents[FP_REGS]; 7096 1.1 mrg for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) 7097 1.1 mrg if (TEST_HARD_REG_BIT (x, i)) 7098 1.1 mrg fixed_regs[i] = call_used_regs[i] = 1; 7099 1.1 mrg } 7100 1.1 mrg if (flag_pic) 7101 1.1 mrg fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1; 7102 1.1 mrg } 7103 1.1 mrg 7104 1.1 mrg static void 7105 1.1 mrg m68k_init_sync_libfuncs (void) 7106 1.1 mrg { 7107 1.1 mrg init_sync_libfuncs (UNITS_PER_WORD); 7108 1.1 mrg } 7109 1.1 mrg 7110 1.1 mrg /* Implements EPILOGUE_USES. All registers are live on exit from an 7111 1.1 mrg interrupt routine. */ 7112 1.1 mrg bool 7113 1.1 mrg m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED) 7114 1.1 mrg { 7115 1.1 mrg return (reload_completed 7116 1.1 mrg && (m68k_get_function_kind (current_function_decl) 7117 1.1 mrg == m68k_fk_interrupt_handler)); 7118 1.1 mrg } 7119 1.1 mrg 7120 1.1 mrg 7121 1.1 mrg /* Implement TARGET_C_EXCESS_PRECISION. 7122 1.1 mrg 7123 1.1 mrg Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp 7124 1.1 mrg instructions, we get proper intermediate rounding, otherwise we 7125 1.1 mrg get extended precision results. */ 7126 1.1 mrg 7127 1.1 mrg static enum flt_eval_method 7128 1.1 mrg m68k_excess_precision (enum excess_precision_type type) 7129 1.1 mrg { 7130 1.1 mrg switch (type) 7131 1.1 mrg { 7132 1.1 mrg case EXCESS_PRECISION_TYPE_FAST: 7133 1.1 mrg /* The fastest type to promote to will always be the native type, 7134 1.1 mrg whether that occurs with implicit excess precision or 7135 1.1 mrg otherwise. */ 7136 1.1 mrg return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT; 7137 1.1 mrg case EXCESS_PRECISION_TYPE_STANDARD: 7138 1.1 mrg case EXCESS_PRECISION_TYPE_IMPLICIT: 7139 1.1 mrg /* Otherwise, the excess precision we want when we are 7140 1.1 mrg in a standards compliant mode, and the implicit precision we 7141 1.1 mrg provide can be identical. */ 7142 1.1 mrg if (TARGET_68040 || ! TARGET_68881) 7143 1.1 mrg return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT; 7144 1.1 mrg 7145 1.1 mrg return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE; 7146 1.1 mrg case EXCESS_PRECISION_TYPE_FLOAT16: 7147 1.1 mrg error ("%<-fexcess-precision=16%> is not supported on this target"); 7148 1.1 mrg break; 7149 1.1 mrg default: 7150 1.1 mrg gcc_unreachable (); 7151 1.1 mrg } 7152 1.1 mrg return FLT_EVAL_METHOD_UNPREDICTABLE; 7153 1.1 mrg } 7154 1.1 mrg 7155 1.1 mrg /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes 7156 1.1 mrg a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */ 7157 1.1 mrg 7158 1.1 mrg poly_int64 7159 1.1 mrg m68k_push_rounding (poly_int64 bytes) 7160 1.1 mrg { 7161 1.1 mrg if (TARGET_COLDFIRE) 7162 1.1 mrg return bytes; 7163 1.1 mrg return (bytes + 1) & ~1; 7164 1.1 mrg } 7165 1.1 mrg 7166 1.1 mrg /* Implement TARGET_PROMOTE_FUNCTION_MODE. */ 7167 1.1 mrg 7168 1.1 mrg static machine_mode 7169 m68k_promote_function_mode (const_tree type, machine_mode mode, 7170 int *punsignedp ATTRIBUTE_UNUSED, 7171 const_tree fntype ATTRIBUTE_UNUSED, 7172 int for_return) 7173 { 7174 /* Promote libcall arguments narrower than int to match the normal C 7175 ABI (for which promotions are handled via 7176 TARGET_PROMOTE_PROTOTYPES). */ 7177 if (type == NULL_TREE && !for_return && (mode == QImode || mode == HImode)) 7178 return SImode; 7179 return mode; 7180 } 7181 7182 #include "gt-m68k.h" 7183