visium.cc revision 1.1 1 1.1 mrg /* Output routines for Visium.
2 1.1 mrg Copyright (C) 2002-2022 Free Software Foundation, Inc.
3 1.1 mrg Contributed by C.Nettleton, J.P.Parkes and P.Garbett.
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it
8 1.1 mrg under the terms of the GNU General Public License as published
9 1.1 mrg by the Free Software Foundation; either version 3, or (at your
10 1.1 mrg option) any later version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT
13 1.1 mrg ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 1.1 mrg or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 1.1 mrg License for more details.
16 1.1 mrg
17 1.1 mrg You should have received a copy of the GNU General Public License
18 1.1 mrg along with GCC; see the file COPYING3. If not see
19 1.1 mrg <http://www.gnu.org/licenses/>. */
20 1.1 mrg
21 1.1 mrg #define IN_TARGET_CODE 1
22 1.1 mrg
23 1.1 mrg #include "config.h"
24 1.1 mrg #include "system.h"
25 1.1 mrg #include "coretypes.h"
26 1.1 mrg #include "backend.h"
27 1.1 mrg #include "target.h"
28 1.1 mrg #include "rtl.h"
29 1.1 mrg #include "tree.h"
30 1.1 mrg #include "gimple-expr.h"
31 1.1 mrg #include "df.h"
32 1.1 mrg #include "memmodel.h"
33 1.1 mrg #include "tm_p.h"
34 1.1 mrg #include "stringpool.h"
35 1.1 mrg #include "attribs.h"
36 1.1 mrg #include "expmed.h"
37 1.1 mrg #include "optabs.h"
38 1.1 mrg #include "regs.h"
39 1.1 mrg #include "emit-rtl.h"
40 1.1 mrg #include "recog.h"
41 1.1 mrg #include "diagnostic-core.h"
42 1.1 mrg #include "alias.h"
43 1.1 mrg #include "flags.h"
44 1.1 mrg #include "fold-const.h"
45 1.1 mrg #include "stor-layout.h"
46 1.1 mrg #include "calls.h"
47 1.1 mrg #include "varasm.h"
48 1.1 mrg #include "output.h"
49 1.1 mrg #include "insn-attr.h"
50 1.1 mrg #include "explow.h"
51 1.1 mrg #include "expr.h"
52 1.1 mrg #include "gimplify.h"
53 1.1 mrg #include "langhooks.h"
54 1.1 mrg #include "reload.h"
55 1.1 mrg #include "tm-constrs.h"
56 1.1 mrg #include "tree-pass.h"
57 1.1 mrg #include "context.h"
58 1.1 mrg #include "builtins.h"
59 1.1 mrg #include "opts.h"
60 1.1 mrg
61 1.1 mrg /* This file should be included last. */
62 1.1 mrg #include "target-def.h"
63 1.1 mrg
64 1.1 mrg /* Enumeration of indexes into machine_libfunc_table. */
65 1.1 mrg enum machine_libfunc_index
66 1.1 mrg {
67 1.1 mrg MLTI_long_int_memcpy,
68 1.1 mrg MLTI_wrd_memcpy,
69 1.1 mrg MLTI_byt_memcpy,
70 1.1 mrg
71 1.1 mrg MLTI_long_int_memset,
72 1.1 mrg MLTI_wrd_memset,
73 1.1 mrg MLTI_byt_memset,
74 1.1 mrg
75 1.1 mrg MLTI_set_trampoline_parity,
76 1.1 mrg
77 1.1 mrg MLTI_MAX
78 1.1 mrg };
79 1.1 mrg
80 1.1 mrg struct GTY(()) machine_libfuncs
81 1.1 mrg {
82 1.1 mrg rtx table[MLTI_MAX];
83 1.1 mrg };
84 1.1 mrg
85 1.1 mrg /* The table of Visium-specific libfuncs. */
86 1.1 mrg static GTY(()) struct machine_libfuncs visium_libfuncs;
87 1.1 mrg
88 1.1 mrg #define vlt visium_libfuncs.table
89 1.1 mrg
90 1.1 mrg /* Accessor macros for visium_libfuncs. */
91 1.1 mrg #define long_int_memcpy_libfunc (vlt[MLTI_long_int_memcpy])
92 1.1 mrg #define wrd_memcpy_libfunc (vlt[MLTI_wrd_memcpy])
93 1.1 mrg #define byt_memcpy_libfunc (vlt[MLTI_byt_memcpy])
94 1.1 mrg #define long_int_memset_libfunc (vlt[MLTI_long_int_memset])
95 1.1 mrg #define wrd_memset_libfunc (vlt[MLTI_wrd_memset])
96 1.1 mrg #define byt_memset_libfunc (vlt[MLTI_byt_memset])
97 1.1 mrg #define set_trampoline_parity_libfunc (vlt[MLTI_set_trampoline_parity])
98 1.1 mrg
99 1.1 mrg /* Machine specific function data. */
100 1.1 mrg struct GTY (()) machine_function
101 1.1 mrg {
102 1.1 mrg /* Size of the frame of the function. */
103 1.1 mrg int frame_size;
104 1.1 mrg
105 1.1 mrg /* Size of the reg parm save area, non-zero only for functions with variable
106 1.1 mrg argument list. We cannot use the crtl->args.pretend_args_size machinery
107 1.1 mrg for this purpose because this size is added to virtual_incoming_args_rtx
108 1.1 mrg to give the location of the first parameter passed by the caller on the
109 1.1 mrg stack and virtual_incoming_args_rtx is also the location of the first
110 1.1 mrg parameter on the stack. So crtl->args.pretend_args_size can be non-zero
111 1.1 mrg only if the first non-register named parameter is not passed entirely on
112 1.1 mrg the stack and this runs afoul of the need to have a reg parm save area
113 1.1 mrg even with a variable argument list starting on the stack because of the
114 1.1 mrg separate handling of general and floating-point registers. */
115 1.1 mrg int reg_parm_save_area_size;
116 1.1 mrg
117 1.1 mrg /* True if we have created an rtx which relies on the frame pointer. */
118 1.1 mrg bool frame_needed;
119 1.1 mrg
120 1.1 mrg /* True if we have exposed the flags register. From this moment on, we
121 1.1 mrg cannot generate simple operations for integer registers. We could
122 1.1 mrg use reload_completed for this purpose, but this would cripple the
123 1.1 mrg postreload CSE and GCSE passes which run before postreload split. */
124 1.1 mrg bool flags_exposed;
125 1.1 mrg };
126 1.1 mrg
127 1.1 mrg #define visium_frame_size cfun->machine->frame_size
128 1.1 mrg #define visium_reg_parm_save_area_size cfun->machine->reg_parm_save_area_size
129 1.1 mrg #define visium_frame_needed cfun->machine->frame_needed
130 1.1 mrg #define visium_flags_exposed cfun->machine->flags_exposed
131 1.1 mrg
132 1.1 mrg /* 1 if the next opcode is to be specially indented. */
133 1.1 mrg int visium_indent_opcode = 0;
134 1.1 mrg
135 1.1 mrg /* Register number used for long branches when LR isn't available. It
136 1.1 mrg must be a call-used register since it isn't saved on function entry.
137 1.1 mrg We do not care whether the branch is predicted or not on the GR6,
138 1.1 mrg given how unlikely it is to have a long branch in a leaf function. */
139 1.1 mrg static unsigned int long_branch_regnum = 31;
140 1.1 mrg
141 1.1 mrg static tree visium_handle_interrupt_attr (tree *, tree, tree, int, bool *);
142 1.1 mrg static inline bool current_function_saves_fp (void);
143 1.1 mrg static inline bool current_function_saves_lr (void);
144 1.1 mrg static inline bool current_function_has_lr_slot (void);
145 1.1 mrg
146 1.1 mrg /* Supported attributes:
147 1.1 mrg interrupt -- specifies this function is an interrupt handler. */
148 1.1 mrg static const struct attribute_spec visium_attribute_table[] =
149 1.1 mrg {
150 1.1 mrg /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
151 1.1 mrg affects_type_identity, handler, exclude } */
152 1.1 mrg { "interrupt", 0, 0, true, false, false, false, visium_handle_interrupt_attr,
153 1.1 mrg NULL},
154 1.1 mrg { NULL, 0, 0, false, false, false, false, NULL, NULL },
155 1.1 mrg };
156 1.1 mrg
157 1.1 mrg static struct machine_function *visium_init_machine_status (void);
158 1.1 mrg
159 1.1 mrg /* Target hooks and TARGET_INITIALIZER */
160 1.1 mrg
161 1.1 mrg static bool visium_pass_by_reference (cumulative_args_t,
162 1.1 mrg const function_arg_info &);
163 1.1 mrg
164 1.1 mrg static rtx visium_function_arg (cumulative_args_t, const function_arg_info &);
165 1.1 mrg
166 1.1 mrg static void visium_function_arg_advance (cumulative_args_t,
167 1.1 mrg const function_arg_info &);
168 1.1 mrg
169 1.1 mrg static bool visium_return_in_memory (const_tree, const_tree fntype);
170 1.1 mrg
171 1.1 mrg static rtx visium_function_value (const_tree, const_tree fn_decl_or_type,
172 1.1 mrg bool);
173 1.1 mrg
174 1.1 mrg static rtx visium_libcall_value (machine_mode, const_rtx);
175 1.1 mrg
176 1.1 mrg static void visium_setup_incoming_varargs (cumulative_args_t,
177 1.1 mrg const function_arg_info &,
178 1.1 mrg int *, int);
179 1.1 mrg
180 1.1 mrg static void visium_va_start (tree valist, rtx nextarg);
181 1.1 mrg
182 1.1 mrg static tree visium_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
183 1.1 mrg
184 1.1 mrg static bool visium_function_ok_for_sibcall (tree, tree);
185 1.1 mrg
186 1.1 mrg static bool visium_frame_pointer_required (void);
187 1.1 mrg
188 1.1 mrg static tree visium_build_builtin_va_list (void);
189 1.1 mrg
190 1.1 mrg static rtx_insn *visium_md_asm_adjust (vec<rtx> &, vec<rtx> &,
191 1.1 mrg vec<machine_mode> &,
192 1.1 mrg vec<const char *> &, vec<rtx> &,
193 1.1 mrg HARD_REG_SET &, location_t);
194 1.1 mrg
195 1.1 mrg static bool visium_legitimate_constant_p (machine_mode, rtx);
196 1.1 mrg
197 1.1 mrg static bool visium_legitimate_address_p (machine_mode, rtx, bool);
198 1.1 mrg
199 1.1 mrg static bool visium_print_operand_punct_valid_p (unsigned char);
200 1.1 mrg static void visium_print_operand (FILE *, rtx, int);
201 1.1 mrg static void visium_print_operand_address (FILE *, machine_mode, rtx);
202 1.1 mrg
203 1.1 mrg static void visium_conditional_register_usage (void);
204 1.1 mrg
205 1.1 mrg static rtx visium_legitimize_address (rtx, rtx, machine_mode);
206 1.1 mrg
207 1.1 mrg static reg_class_t visium_secondary_reload (bool, rtx, reg_class_t,
208 1.1 mrg machine_mode,
209 1.1 mrg secondary_reload_info *);
210 1.1 mrg
211 1.1 mrg static bool visium_class_likely_spilled_p (reg_class_t);
212 1.1 mrg
213 1.1 mrg static void visium_trampoline_init (rtx, tree, rtx);
214 1.1 mrg
215 1.1 mrg static int visium_issue_rate (void);
216 1.1 mrg
217 1.1 mrg static int visium_adjust_priority (rtx_insn *, int);
218 1.1 mrg
219 1.1 mrg static int visium_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
220 1.1 mrg
221 1.1 mrg static int visium_register_move_cost (machine_mode, reg_class_t,
222 1.1 mrg reg_class_t);
223 1.1 mrg
224 1.1 mrg static int visium_memory_move_cost (machine_mode, reg_class_t, bool);
225 1.1 mrg
226 1.1 mrg static bool visium_rtx_costs (rtx, machine_mode, int, int, int *, bool);
227 1.1 mrg
228 1.1 mrg static void visium_option_override (void);
229 1.1 mrg
230 1.1 mrg static void visium_init_libfuncs (void);
231 1.1 mrg
232 1.1 mrg static unsigned int visium_reorg (void);
233 1.1 mrg
234 1.1 mrg static unsigned int visium_hard_regno_nregs (unsigned int, machine_mode);
235 1.1 mrg
236 1.1 mrg static bool visium_hard_regno_mode_ok (unsigned int, machine_mode);
237 1.1 mrg
238 1.1 mrg static bool visium_modes_tieable_p (machine_mode, machine_mode);
239 1.1 mrg
240 1.1 mrg static bool visium_can_change_mode_class (machine_mode, machine_mode,
241 1.1 mrg reg_class_t);
242 1.1 mrg
243 1.1 mrg static HOST_WIDE_INT visium_constant_alignment (const_tree, HOST_WIDE_INT);
244 1.1 mrg
245 1.1 mrg /* Setup the global target hooks structure. */
246 1.1 mrg
247 1.1 mrg #undef TARGET_MAX_ANCHOR_OFFSET
248 1.1 mrg #define TARGET_MAX_ANCHOR_OFFSET 31
249 1.1 mrg
250 1.1 mrg #undef TARGET_PASS_BY_REFERENCE
251 1.1 mrg #define TARGET_PASS_BY_REFERENCE visium_pass_by_reference
252 1.1 mrg
253 1.1 mrg #undef TARGET_FUNCTION_ARG
254 1.1 mrg #define TARGET_FUNCTION_ARG visium_function_arg
255 1.1 mrg
256 1.1 mrg #undef TARGET_FUNCTION_ARG_ADVANCE
257 1.1 mrg #define TARGET_FUNCTION_ARG_ADVANCE visium_function_arg_advance
258 1.1 mrg
259 1.1 mrg #undef TARGET_RETURN_IN_MEMORY
260 1.1 mrg #define TARGET_RETURN_IN_MEMORY visium_return_in_memory
261 1.1 mrg
262 1.1 mrg #undef TARGET_FUNCTION_VALUE
263 1.1 mrg #define TARGET_FUNCTION_VALUE visium_function_value
264 1.1 mrg
265 1.1 mrg #undef TARGET_LIBCALL_VALUE
266 1.1 mrg #define TARGET_LIBCALL_VALUE visium_libcall_value
267 1.1 mrg
268 1.1 mrg #undef TARGET_SETUP_INCOMING_VARARGS
269 1.1 mrg #define TARGET_SETUP_INCOMING_VARARGS visium_setup_incoming_varargs
270 1.1 mrg
271 1.1 mrg #undef TARGET_EXPAND_BUILTIN_VA_START
272 1.1 mrg #define TARGET_EXPAND_BUILTIN_VA_START visium_va_start
273 1.1 mrg
274 1.1 mrg #undef TARGET_BUILD_BUILTIN_VA_LIST
275 1.1 mrg #define TARGET_BUILD_BUILTIN_VA_LIST visium_build_builtin_va_list
276 1.1 mrg
277 1.1 mrg #undef TARGET_GIMPLIFY_VA_ARG_EXPR
278 1.1 mrg #define TARGET_GIMPLIFY_VA_ARG_EXPR visium_gimplify_va_arg
279 1.1 mrg
280 1.1 mrg #undef TARGET_LEGITIMATE_CONSTANT_P
281 1.1 mrg #define TARGET_LEGITIMATE_CONSTANT_P visium_legitimate_constant_p
282 1.1 mrg
283 1.1 mrg #undef TARGET_LRA_P
284 1.1 mrg #define TARGET_LRA_P hook_bool_void_false
285 1.1 mrg
286 1.1 mrg #undef TARGET_LEGITIMATE_ADDRESS_P
287 1.1 mrg #define TARGET_LEGITIMATE_ADDRESS_P visium_legitimate_address_p
288 1.1 mrg
289 1.1 mrg #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
290 1.1 mrg #define TARGET_PRINT_OPERAND_PUNCT_VALID_P visium_print_operand_punct_valid_p
291 1.1 mrg
292 1.1 mrg #undef TARGET_PRINT_OPERAND
293 1.1 mrg #define TARGET_PRINT_OPERAND visium_print_operand
294 1.1 mrg
295 1.1 mrg #undef TARGET_PRINT_OPERAND_ADDRESS
296 1.1 mrg #define TARGET_PRINT_OPERAND_ADDRESS visium_print_operand_address
297 1.1 mrg
298 1.1 mrg #undef TARGET_ATTRIBUTE_TABLE
299 1.1 mrg #define TARGET_ATTRIBUTE_TABLE visium_attribute_table
300 1.1 mrg
301 1.1 mrg #undef TARGET_ADDRESS_COST
302 1.1 mrg #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
303 1.1 mrg
304 1.1 mrg #undef TARGET_STRICT_ARGUMENT_NAMING
305 1.1 mrg #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
306 1.1 mrg
307 1.1 mrg #undef TARGET_SCHED_ISSUE_RATE
308 1.1 mrg #define TARGET_SCHED_ISSUE_RATE visium_issue_rate
309 1.1 mrg
310 1.1 mrg #undef TARGET_SCHED_ADJUST_PRIORITY
311 1.1 mrg #define TARGET_SCHED_ADJUST_PRIORITY visium_adjust_priority
312 1.1 mrg
313 1.1 mrg #undef TARGET_SCHED_ADJUST_COST
314 1.1 mrg #define TARGET_SCHED_ADJUST_COST visium_adjust_cost
315 1.1 mrg
316 1.1 mrg #undef TARGET_MEMORY_MOVE_COST
317 1.1 mrg #define TARGET_MEMORY_MOVE_COST visium_memory_move_cost
318 1.1 mrg
319 1.1 mrg #undef TARGET_REGISTER_MOVE_COST
320 1.1 mrg #define TARGET_REGISTER_MOVE_COST visium_register_move_cost
321 1.1 mrg
322 1.1 mrg #undef TARGET_RTX_COSTS
323 1.1 mrg #define TARGET_RTX_COSTS visium_rtx_costs
324 1.1 mrg
325 1.1 mrg #undef TARGET_FUNCTION_OK_FOR_SIBCALL
326 1.1 mrg #define TARGET_FUNCTION_OK_FOR_SIBCALL visium_function_ok_for_sibcall
327 1.1 mrg
328 1.1 mrg #undef TARGET_FRAME_POINTER_REQUIRED
329 1.1 mrg #define TARGET_FRAME_POINTER_REQUIRED visium_frame_pointer_required
330 1.1 mrg
331 1.1 mrg #undef TARGET_SECONDARY_RELOAD
332 1.1 mrg #define TARGET_SECONDARY_RELOAD visium_secondary_reload
333 1.1 mrg
334 1.1 mrg #undef TARGET_CLASS_LIKELY_SPILLED_P
335 1.1 mrg #define TARGET_CLASS_LIKELY_SPILLED_P visium_class_likely_spilled_p
336 1.1 mrg
337 1.1 mrg #undef TARGET_LEGITIMIZE_ADDRESS
338 1.1 mrg #define TARGET_LEGITIMIZE_ADDRESS visium_legitimize_address
339 1.1 mrg
340 1.1 mrg #undef TARGET_OPTION_OVERRIDE
341 1.1 mrg #define TARGET_OPTION_OVERRIDE visium_option_override
342 1.1 mrg
343 1.1 mrg #undef TARGET_INIT_LIBFUNCS
344 1.1 mrg #define TARGET_INIT_LIBFUNCS visium_init_libfuncs
345 1.1 mrg
346 1.1 mrg #undef TARGET_CONDITIONAL_REGISTER_USAGE
347 1.1 mrg #define TARGET_CONDITIONAL_REGISTER_USAGE visium_conditional_register_usage
348 1.1 mrg
349 1.1 mrg #undef TARGET_TRAMPOLINE_INIT
350 1.1 mrg #define TARGET_TRAMPOLINE_INIT visium_trampoline_init
351 1.1 mrg
352 1.1 mrg #undef TARGET_MD_ASM_ADJUST
353 1.1 mrg #define TARGET_MD_ASM_ADJUST visium_md_asm_adjust
354 1.1 mrg
355 1.1 mrg #undef TARGET_FLAGS_REGNUM
356 1.1 mrg #define TARGET_FLAGS_REGNUM FLAGS_REGNUM
357 1.1 mrg
358 1.1 mrg #undef TARGET_HARD_REGNO_NREGS
359 1.1 mrg #define TARGET_HARD_REGNO_NREGS visium_hard_regno_nregs
360 1.1 mrg
361 1.1 mrg #undef TARGET_HARD_REGNO_MODE_OK
362 1.1 mrg #define TARGET_HARD_REGNO_MODE_OK visium_hard_regno_mode_ok
363 1.1 mrg
364 1.1 mrg #undef TARGET_MODES_TIEABLE_P
365 1.1 mrg #define TARGET_MODES_TIEABLE_P visium_modes_tieable_p
366 1.1 mrg
367 1.1 mrg #undef TARGET_CAN_CHANGE_MODE_CLASS
368 1.1 mrg #define TARGET_CAN_CHANGE_MODE_CLASS visium_can_change_mode_class
369 1.1 mrg
370 1.1 mrg #undef TARGET_CONSTANT_ALIGNMENT
371 1.1 mrg #define TARGET_CONSTANT_ALIGNMENT visium_constant_alignment
372 1.1 mrg
373 1.1 mrg #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
374 1.1 mrg #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
375 1.1 mrg
376 1.1 mrg struct gcc_target targetm = TARGET_INITIALIZER;
377 1.1 mrg
378 1.1 mrg namespace {
379 1.1 mrg
380 1.1 mrg const pass_data pass_data_visium_reorg =
381 1.1 mrg {
382 1.1 mrg RTL_PASS, /* type */
383 1.1 mrg "mach2", /* name */
384 1.1 mrg OPTGROUP_NONE, /* optinfo_flags */
385 1.1 mrg TV_MACH_DEP, /* tv_id */
386 1.1 mrg 0, /* properties_required */
387 1.1 mrg 0, /* properties_provided */
388 1.1 mrg 0, /* properties_destroyed */
389 1.1 mrg 0, /* todo_flags_start */
390 1.1 mrg 0, /* todo_flags_finish */
391 1.1 mrg };
392 1.1 mrg
393 1.1 mrg class pass_visium_reorg : public rtl_opt_pass
394 1.1 mrg {
395 1.1 mrg public:
396 1.1 mrg pass_visium_reorg(gcc::context *ctxt)
397 1.1 mrg : rtl_opt_pass(pass_data_visium_reorg, ctxt)
398 1.1 mrg {}
399 1.1 mrg
400 1.1 mrg /* opt_pass methods: */
401 1.1 mrg virtual unsigned int execute (function *)
402 1.1 mrg {
403 1.1 mrg return visium_reorg ();
404 1.1 mrg }
405 1.1 mrg
406 1.1 mrg }; // class pass_work_around_errata
407 1.1 mrg
408 1.1 mrg } // anon namespace
409 1.1 mrg
410 1.1 mrg rtl_opt_pass *
411 1.1 mrg make_pass_visium_reorg (gcc::context *ctxt)
412 1.1 mrg {
413 1.1 mrg return new pass_visium_reorg (ctxt);
414 1.1 mrg }
415 1.1 mrg
416 1.1 mrg /* Options override for Visium. */
417 1.1 mrg
418 1.1 mrg static void
419 1.1 mrg visium_option_override (void)
420 1.1 mrg {
421 1.1 mrg if (flag_pic == 1)
422 1.1 mrg warning (OPT_fpic, "%<-fpic%> is not supported");
423 1.1 mrg if (flag_pic == 2)
424 1.1 mrg warning (OPT_fPIC, "%<-fPIC%> is not supported");
425 1.1 mrg
426 1.1 mrg /* MCM is the default in the GR5/GR6 era. */
427 1.1 mrg target_flags |= MASK_MCM;
428 1.1 mrg
429 1.1 mrg /* FPU is the default with MCM, but don't override an explicit option. */
430 1.1 mrg if ((target_flags_explicit & MASK_FPU) == 0)
431 1.1 mrg target_flags |= MASK_FPU;
432 1.1 mrg
433 1.1 mrg /* The supervisor mode is the default. */
434 1.1 mrg if ((target_flags_explicit & MASK_SV_MODE) == 0)
435 1.1 mrg target_flags |= MASK_SV_MODE;
436 1.1 mrg
437 1.1 mrg /* The GR6 has the Block Move Instructions and an IEEE-compliant FPU. */
438 1.1 mrg if (visium_cpu_and_features == PROCESSOR_GR6)
439 1.1 mrg {
440 1.1 mrg target_flags |= MASK_BMI;
441 1.1 mrg if (target_flags & MASK_FPU)
442 1.1 mrg target_flags |= MASK_FPU_IEEE;
443 1.1 mrg }
444 1.1 mrg
445 1.1 mrg /* Set -mtune from -mcpu if not specified. */
446 1.1 mrg if (!OPTION_SET_P (visium_cpu))
447 1.1 mrg visium_cpu = visium_cpu_and_features;
448 1.1 mrg
449 1.1 mrg /* Align functions on 256-byte (32-quadword) for GR5 and 64-byte (8-quadword)
450 1.1 mrg boundaries for GR6 so they start a new burst mode window. */
451 1.1 mrg if (flag_align_functions && !str_align_functions)
452 1.1 mrg {
453 1.1 mrg if (visium_cpu == PROCESSOR_GR6)
454 1.1 mrg str_align_functions = "64";
455 1.1 mrg else
456 1.1 mrg str_align_functions = "256";
457 1.1 mrg
458 1.1 mrg /* Allow the size of compilation units to double because of inlining.
459 1.1 mrg In practice the global size of the object code is hardly affected
460 1.1 mrg because the additional instructions will take up the padding. */
461 1.1 mrg SET_OPTION_IF_UNSET (&global_options, &global_options_set,
462 1.1 mrg param_inline_unit_growth, 100);
463 1.1 mrg }
464 1.1 mrg
465 1.1 mrg /* Likewise for loops. */
466 1.1 mrg if (flag_align_loops && !str_align_loops)
467 1.1 mrg {
468 1.1 mrg if (visium_cpu == PROCESSOR_GR6)
469 1.1 mrg str_align_loops = "64";
470 1.1 mrg else
471 1.1 mrg {
472 1.1 mrg /* But not if they are too far away from a 256-byte boundary. */
473 1.1 mrg str_align_loops = "256:32:8";
474 1.1 mrg }
475 1.1 mrg }
476 1.1 mrg
477 1.1 mrg /* Align all jumps on quadword boundaries for the burst mode, and even
478 1.1 mrg on 8-quadword boundaries for GR6 so they start a new window. */
479 1.1 mrg if (flag_align_jumps && !str_align_jumps)
480 1.1 mrg {
481 1.1 mrg if (visium_cpu == PROCESSOR_GR6)
482 1.1 mrg str_align_jumps = "64";
483 1.1 mrg else
484 1.1 mrg str_align_jumps = "8";
485 1.1 mrg }
486 1.1 mrg }
487 1.1 mrg
488 1.1 mrg /* Register the Visium-specific libfuncs with the middle-end. */
489 1.1 mrg
490 1.1 mrg static void
491 1.1 mrg visium_init_libfuncs (void)
492 1.1 mrg {
493 1.1 mrg if (!TARGET_BMI)
494 1.1 mrg long_int_memcpy_libfunc = init_one_libfunc ("__long_int_memcpy");
495 1.1 mrg wrd_memcpy_libfunc = init_one_libfunc ("__wrd_memcpy");
496 1.1 mrg byt_memcpy_libfunc = init_one_libfunc ("__byt_memcpy");
497 1.1 mrg
498 1.1 mrg long_int_memset_libfunc = init_one_libfunc ("__long_int_memset");
499 1.1 mrg wrd_memset_libfunc = init_one_libfunc ("__wrd_memset");
500 1.1 mrg byt_memset_libfunc = init_one_libfunc ("__byt_memset");
501 1.1 mrg
502 1.1 mrg set_trampoline_parity_libfunc = init_one_libfunc ("__set_trampoline_parity");
503 1.1 mrg }
504 1.1 mrg
505 1.1 mrg /* Return the number of instructions that can issue on the same cycle. */
506 1.1 mrg
507 1.1 mrg static int
508 1.1 mrg visium_issue_rate (void)
509 1.1 mrg {
510 1.1 mrg switch (visium_cpu)
511 1.1 mrg {
512 1.1 mrg case PROCESSOR_GR5:
513 1.1 mrg return 1;
514 1.1 mrg
515 1.1 mrg case PROCESSOR_GR6:
516 1.1 mrg return 2;
517 1.1 mrg
518 1.1 mrg default:
519 1.1 mrg gcc_unreachable ();
520 1.1 mrg }
521 1.1 mrg }
522 1.1 mrg
523 1.1 mrg /* Return the adjusted PRIORITY of INSN. */
524 1.1 mrg
525 1.1 mrg static int
526 1.1 mrg visium_adjust_priority (rtx_insn *insn, int priority)
527 1.1 mrg {
528 1.1 mrg /* On the GR5, we slightly increase the priority of writes in order to avoid
529 1.1 mrg scheduling a read on the next cycle. This is necessary in addition to the
530 1.1 mrg associated insn reservation because there are no data dependencies.
531 1.1 mrg We also slightly increase the priority of reads from ROM in order to group
532 1.1 mrg them as much as possible. These reads are a bit problematic because they
533 1.1 mrg conflict with the instruction fetches, i.e. the data and instruction buses
534 1.1 mrg tread on each other's toes when they are executed. */
535 1.1 mrg if (visium_cpu == PROCESSOR_GR5
536 1.1 mrg && reload_completed
537 1.1 mrg && INSN_P (insn)
538 1.1 mrg && recog_memoized (insn) >= 0)
539 1.1 mrg {
540 1.1 mrg enum attr_type attr_type = get_attr_type (insn);
541 1.1 mrg if (attr_type == TYPE_REG_MEM
542 1.1 mrg || (attr_type == TYPE_MEM_REG
543 1.1 mrg && MEM_READONLY_P (SET_SRC (PATTERN (insn)))))
544 1.1 mrg return priority + 1;
545 1.1 mrg }
546 1.1 mrg
547 1.1 mrg return priority;
548 1.1 mrg }
549 1.1 mrg
550 1.1 mrg /* Adjust the cost of a scheduling dependency. Return the new cost of
551 1.1 mrg a dependency LINK of INSN on DEP_INSN. COST is the current cost. */
552 1.1 mrg
553 1.1 mrg static int
554 1.1 mrg visium_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
555 1.1 mrg unsigned int)
556 1.1 mrg {
557 1.1 mrg enum attr_type attr_type;
558 1.1 mrg
559 1.1 mrg /* Don't adjust costs for true dependencies as they are described with
560 1.1 mrg bypasses. But we make an exception for the first scheduling pass to
561 1.1 mrg help the subsequent postreload compare elimination pass. */
562 1.1 mrg if (dep_type == REG_DEP_TRUE)
563 1.1 mrg {
564 1.1 mrg if (!reload_completed
565 1.1 mrg && recog_memoized (insn) >= 0
566 1.1 mrg && get_attr_type (insn) == TYPE_CMP)
567 1.1 mrg {
568 1.1 mrg rtx pat = PATTERN (insn);
569 1.1 mrg gcc_assert (GET_CODE (pat) == SET);
570 1.1 mrg rtx src = SET_SRC (pat);
571 1.1 mrg
572 1.1 mrg /* Only the branches can be modified by the postreload compare
573 1.1 mrg elimination pass, not the cstores because they accept only
574 1.1 mrg unsigned comparison operators and they are eliminated if
575 1.1 mrg one of the operands is zero. */
576 1.1 mrg if (GET_CODE (src) == IF_THEN_ELSE
577 1.1 mrg && XEXP (XEXP (src, 0), 1) == const0_rtx
578 1.1 mrg && recog_memoized (dep_insn) >= 0)
579 1.1 mrg {
580 1.1 mrg enum attr_type dep_attr_type = get_attr_type (dep_insn);
581 1.1 mrg
582 1.1 mrg /* The logical instructions use CCmode and thus work with any
583 1.1 mrg comparison operator, whereas the arithmetic instructions use
584 1.1 mrg CCNZmode and thus work with only a small subset. */
585 1.1 mrg if (dep_attr_type == TYPE_LOGIC
586 1.1 mrg || (dep_attr_type == TYPE_ARITH
587 1.1 mrg && visium_nz_comparison_operator (XEXP (src, 0),
588 1.1 mrg GET_MODE
589 1.1 mrg (XEXP (src, 0)))))
590 1.1 mrg return 0;
591 1.1 mrg }
592 1.1 mrg }
593 1.1 mrg
594 1.1 mrg return cost;
595 1.1 mrg }
596 1.1 mrg
597 1.1 mrg if (recog_memoized (insn) < 0)
598 1.1 mrg return 0;
599 1.1 mrg
600 1.1 mrg attr_type = get_attr_type (insn);
601 1.1 mrg
602 1.1 mrg /* Anti dependency: DEP_INSN reads a register that INSN writes some
603 1.1 mrg cycles later. */
604 1.1 mrg if (dep_type == REG_DEP_ANTI)
605 1.1 mrg {
606 1.1 mrg /* On the GR5, the latency of FP instructions needs to be taken into
607 1.1 mrg account for every dependency involving a write. */
608 1.1 mrg if (attr_type == TYPE_REG_FP && visium_cpu == PROCESSOR_GR5)
609 1.1 mrg {
610 1.1 mrg /* INSN is FLOAD. */
611 1.1 mrg rtx pat = PATTERN (insn);
612 1.1 mrg rtx dep_pat = PATTERN (dep_insn);
613 1.1 mrg
614 1.1 mrg if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
615 1.1 mrg /* If this happens, we have to extend this to schedule
616 1.1 mrg optimally. Return 0 for now. */
617 1.1 mrg return 0;
618 1.1 mrg
619 1.1 mrg if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
620 1.1 mrg {
621 1.1 mrg if (recog_memoized (dep_insn) < 0)
622 1.1 mrg return 0;
623 1.1 mrg
624 1.1 mrg switch (get_attr_type (dep_insn))
625 1.1 mrg {
626 1.1 mrg case TYPE_FDIV:
627 1.1 mrg case TYPE_FSQRT:
628 1.1 mrg case TYPE_FTOI:
629 1.1 mrg case TYPE_ITOF:
630 1.1 mrg case TYPE_FP:
631 1.1 mrg case TYPE_FMOVE:
632 1.1 mrg /* A fload can't be issued until a preceding arithmetic
633 1.1 mrg operation has finished if the target of the fload is
634 1.1 mrg any of the sources (or destination) of the arithmetic
635 1.1 mrg operation. Note that the latency may be (much)
636 1.1 mrg greater than this if the preceding instruction
637 1.1 mrg concerned is in a queue. */
638 1.1 mrg return insn_default_latency (dep_insn);
639 1.1 mrg
640 1.1 mrg default:
641 1.1 mrg return 0;
642 1.1 mrg }
643 1.1 mrg }
644 1.1 mrg }
645 1.1 mrg
646 1.1 mrg /* On the GR6, we try to make sure that the link register is restored
647 1.1 mrg sufficiently ahead of the return as to yield a correct prediction
648 1.1 mrg from the branch predictor. By default there is no true dependency
649 1.1 mrg but an anti dependency between them, so we simply reuse it. */
650 1.1 mrg else if (attr_type == TYPE_RET && visium_cpu == PROCESSOR_GR6)
651 1.1 mrg {
652 1.1 mrg rtx dep_pat = PATTERN (dep_insn);
653 1.1 mrg if (GET_CODE (dep_pat) == SET
654 1.1 mrg && REG_P (SET_DEST (dep_pat))
655 1.1 mrg && REGNO (SET_DEST (dep_pat)) == LINK_REGNUM)
656 1.1 mrg return 8;
657 1.1 mrg }
658 1.1 mrg
659 1.1 mrg /* For other anti dependencies, the cost is 0. */
660 1.1 mrg return 0;
661 1.1 mrg }
662 1.1 mrg
663 1.1 mrg /* Output dependency: DEP_INSN writes a register that INSN writes some
664 1.1 mrg cycles later. */
665 1.1 mrg else if (dep_type == REG_DEP_OUTPUT)
666 1.1 mrg {
667 1.1 mrg /* On the GR5, the latency of FP instructions needs to be taken into
668 1.1 mrg account for every dependency involving a write. */
669 1.1 mrg if (attr_type == TYPE_REG_FP && visium_cpu == PROCESSOR_GR5)
670 1.1 mrg {
671 1.1 mrg /* INSN is FLOAD. */
672 1.1 mrg rtx pat = PATTERN (insn);
673 1.1 mrg rtx dep_pat = PATTERN (dep_insn);
674 1.1 mrg
675 1.1 mrg if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
676 1.1 mrg /* If this happens, we have to extend this to schedule
677 1.1 mrg optimally. Return 0 for now. */
678 1.1 mrg return 0;
679 1.1 mrg
680 1.1 mrg if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
681 1.1 mrg {
682 1.1 mrg if (recog_memoized (dep_insn) < 0)
683 1.1 mrg return 0;
684 1.1 mrg
685 1.1 mrg switch (get_attr_type (dep_insn))
686 1.1 mrg {
687 1.1 mrg case TYPE_FDIV:
688 1.1 mrg case TYPE_FSQRT:
689 1.1 mrg case TYPE_FTOI:
690 1.1 mrg case TYPE_ITOF:
691 1.1 mrg case TYPE_FP:
692 1.1 mrg case TYPE_FMOVE:
693 1.1 mrg /* A fload can't be issued until a preceding arithmetic
694 1.1 mrg operation has finished if the target of the fload is
695 1.1 mrg the destination of the arithmetic operation. Note that
696 1.1 mrg the latency may be (much) greater than this if the
697 1.1 mrg preceding instruction concerned is in a queue. */
698 1.1 mrg return insn_default_latency (dep_insn);
699 1.1 mrg
700 1.1 mrg default:
701 1.1 mrg return 0;
702 1.1 mrg }
703 1.1 mrg }
704 1.1 mrg }
705 1.1 mrg
706 1.1 mrg /* For other output dependencies, the cost is 0. */
707 1.1 mrg return 0;
708 1.1 mrg }
709 1.1 mrg
710 1.1 mrg return 0;
711 1.1 mrg }
712 1.1 mrg
713 1.1 mrg /* Handle an "interrupt_handler" attribute; arguments as in
714 1.1 mrg struct attribute_spec.handler. */
715 1.1 mrg
716 1.1 mrg static tree
717 1.1 mrg visium_handle_interrupt_attr (tree *node, tree name,
718 1.1 mrg tree args ATTRIBUTE_UNUSED,
719 1.1 mrg int flags ATTRIBUTE_UNUSED,
720 1.1 mrg bool *no_add_attrs)
721 1.1 mrg {
722 1.1 mrg if (TREE_CODE (*node) != FUNCTION_DECL)
723 1.1 mrg {
724 1.1 mrg warning (OPT_Wattributes, "%qE attribute only applies to functions",
725 1.1 mrg name);
726 1.1 mrg *no_add_attrs = true;
727 1.1 mrg }
728 1.1 mrg else if (!TARGET_SV_MODE)
729 1.1 mrg {
730 1.1 mrg error ("an interrupt handler cannot be compiled with %<-muser-mode%>");
731 1.1 mrg *no_add_attrs = true;
732 1.1 mrg }
733 1.1 mrg
734 1.1 mrg return NULL_TREE;
735 1.1 mrg }
736 1.1 mrg
737 1.1 mrg /* Return non-zero if the current function is an interrupt function. */
738 1.1 mrg
739 1.1 mrg int
740 1.1 mrg visium_interrupt_function_p (void)
741 1.1 mrg {
742 1.1 mrg return
743 1.1 mrg lookup_attribute ("interrupt",
744 1.1 mrg DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
745 1.1 mrg }
746 1.1 mrg
747 1.1 mrg /* Conditionally modify the settings of the register file. */
748 1.1 mrg
749 1.1 mrg static void
750 1.1 mrg visium_conditional_register_usage (void)
751 1.1 mrg {
752 1.1 mrg /* If the supervisor mode is disabled, mask some general registers. */
753 1.1 mrg if (!TARGET_SV_MODE)
754 1.1 mrg {
755 1.1 mrg if (visium_cpu_and_features == PROCESSOR_GR5)
756 1.1 mrg {
757 1.1 mrg fixed_regs[24] = 1;
758 1.1 mrg fixed_regs[25] = 1;
759 1.1 mrg fixed_regs[26] = 1;
760 1.1 mrg fixed_regs[27] = 1;
761 1.1 mrg fixed_regs[28] = 1;
762 1.1 mrg call_used_regs[24] = 0;
763 1.1 mrg call_used_regs[25] = 0;
764 1.1 mrg call_used_regs[26] = 0;
765 1.1 mrg call_used_regs[27] = 0;
766 1.1 mrg call_used_regs[28] = 0;
767 1.1 mrg }
768 1.1 mrg
769 1.1 mrg fixed_regs[31] = 1;
770 1.1 mrg call_used_regs[31] = 0;
771 1.1 mrg
772 1.1 mrg /* We also need to change the long-branch register. */
773 1.1 mrg if (visium_cpu_and_features == PROCESSOR_GR5)
774 1.1 mrg long_branch_regnum = 20;
775 1.1 mrg else
776 1.1 mrg long_branch_regnum = 28;
777 1.1 mrg }
778 1.1 mrg
779 1.1 mrg /* If the FPU is disabled, mask the FP registers. */
780 1.1 mrg if (!TARGET_FPU)
781 1.1 mrg {
782 1.1 mrg for (int i = FP_FIRST_REGNUM; i <= FP_LAST_REGNUM; i++)
783 1.1 mrg {
784 1.1 mrg fixed_regs[i] = 1;
785 1.1 mrg call_used_regs[i] = 0;
786 1.1 mrg }
787 1.1 mrg }
788 1.1 mrg }
789 1.1 mrg
790 1.1 mrg /* Prepend to CLOBBERS hard registers that are automatically clobbered for
791 1.1 mrg an asm We do this for the FLAGS to maintain source compatibility with
792 1.1 mrg the original cc0-based compiler. */
793 1.1 mrg
794 1.1 mrg static rtx_insn *
795 1.1 mrg visium_md_asm_adjust (vec<rtx> & /*outputs*/, vec<rtx> & /*inputs*/,
796 1.1 mrg vec<machine_mode> & /*input_modes*/,
797 1.1 mrg vec<const char *> & /*constraints*/, vec<rtx> &clobbers,
798 1.1 mrg HARD_REG_SET &clobbered_regs, location_t /*loc*/)
799 1.1 mrg {
800 1.1 mrg clobbers.safe_push (gen_rtx_REG (CCmode, FLAGS_REGNUM));
801 1.1 mrg SET_HARD_REG_BIT (clobbered_regs, FLAGS_REGNUM);
802 1.1 mrg return NULL;
803 1.1 mrg }
804 1.1 mrg
805 1.1 mrg /* Return true if X is a legitimate constant for a MODE immediate operand.
806 1.1 mrg X is guaranteed to satisfy the CONSTANT_P predicate. */
807 1.1 mrg
808 1.1 mrg static bool
809 1.1 mrg visium_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED,
810 1.1 mrg rtx x ATTRIBUTE_UNUSED)
811 1.1 mrg {
812 1.1 mrg return true;
813 1.1 mrg }
814 1.1 mrg
815 1.1 mrg /* Compute the alignment for a variable. The alignment of an aggregate is
816 1.1 mrg set to be at least that of a scalar less than or equal to it in size. */
817 1.1 mrg
818 1.1 mrg unsigned int
819 1.1 mrg visium_data_alignment (tree type, unsigned int align)
820 1.1 mrg {
821 1.1 mrg if (AGGREGATE_TYPE_P (type)
822 1.1 mrg && TYPE_SIZE (type)
823 1.1 mrg && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST && align < 32)
824 1.1 mrg {
825 1.1 mrg if (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 32)
826 1.1 mrg return 32;
827 1.1 mrg
828 1.1 mrg if (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16 && align < 16)
829 1.1 mrg return 16;
830 1.1 mrg }
831 1.1 mrg
832 1.1 mrg return align;
833 1.1 mrg }
834 1.1 mrg
835 1.1 mrg /* Implement TARGET_CONSTANT_ALIGNMENT. */
836 1.1 mrg
837 1.1 mrg static HOST_WIDE_INT
838 1.1 mrg visium_constant_alignment (const_tree exp, HOST_WIDE_INT align)
839 1.1 mrg {
840 1.1 mrg return visium_data_alignment (TREE_TYPE (exp), align);
841 1.1 mrg }
842 1.1 mrg
843 1.1 mrg /* Helper function for HARD_REGNO_RENAME_OK (FROM, TO). Return non-zero if
844 1.1 mrg it is OK to rename a hard register FROM to another hard register TO. */
845 1.1 mrg
846 1.1 mrg int
847 1.1 mrg visium_hard_regno_rename_ok (unsigned int from ATTRIBUTE_UNUSED,
848 1.1 mrg unsigned int to)
849 1.1 mrg {
850 1.1 mrg /* If the function doesn't save LR, then the long-branch register will be
851 1.1 mrg used for long branches so we need to know whether it is live before the
852 1.1 mrg frame layout is computed. */
853 1.1 mrg if (!current_function_saves_lr () && to == long_branch_regnum)
854 1.1 mrg return 0;
855 1.1 mrg
856 1.1 mrg /* Interrupt functions can only use registers that have already been
857 1.1 mrg saved by the prologue, even if they would normally be call-clobbered. */
858 1.1 mrg if (crtl->is_leaf
859 1.1 mrg && !df_regs_ever_live_p (to)
860 1.1 mrg && visium_interrupt_function_p ())
861 1.1 mrg return 0;
862 1.1 mrg
863 1.1 mrg return 1;
864 1.1 mrg }
865 1.1 mrg
866 1.1 mrg /* Implement TARGET_HARD_REGNO_NREGS. */
867 1.1 mrg
868 1.1 mrg static unsigned int
869 1.1 mrg visium_hard_regno_nregs (unsigned int regno, machine_mode mode)
870 1.1 mrg {
871 1.1 mrg if (regno == MDB_REGNUM)
872 1.1 mrg return CEIL (GET_MODE_SIZE (mode), 2 * UNITS_PER_WORD);
873 1.1 mrg return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
874 1.1 mrg }
875 1.1 mrg
876 1.1 mrg /* Implement TARGET_HARD_REGNO_MODE_OK.
877 1.1 mrg
878 1.1 mrg Modes with sizes which cross from the one register class to the
879 1.1 mrg other cannot be allowed. Only single floats are allowed in the
880 1.1 mrg floating point registers, and only fixed point values in the EAM
881 1.1 mrg registers. */
882 1.1 mrg
883 1.1 mrg static bool
884 1.1 mrg visium_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
885 1.1 mrg {
886 1.1 mrg if (GP_REGISTER_P (regno))
887 1.1 mrg return GP_REGISTER_P (end_hard_regno (mode, regno) - 1);
888 1.1 mrg
889 1.1 mrg if (FP_REGISTER_P (regno))
890 1.1 mrg return mode == SFmode || (mode == SImode && TARGET_FPU_IEEE);
891 1.1 mrg
892 1.1 mrg return (GET_MODE_CLASS (mode) == MODE_INT
893 1.1 mrg && visium_hard_regno_nregs (regno, mode) == 1);
894 1.1 mrg }
895 1.1 mrg
896 1.1 mrg /* Implement TARGET_MODES_TIEABLE_P. */
897 1.1 mrg
898 1.1 mrg static bool
899 1.1 mrg visium_modes_tieable_p (machine_mode mode1, machine_mode mode2)
900 1.1 mrg {
901 1.1 mrg return (GET_MODE_CLASS (mode1) == MODE_INT
902 1.1 mrg && GET_MODE_CLASS (mode2) == MODE_INT);
903 1.1 mrg }
904 1.1 mrg
905 1.1 mrg /* Return true if it is ok to do sibling call optimization for the specified
906 1.1 mrg call expression EXP. DECL will be the called function, or NULL if this
907 1.1 mrg is an indirect call. */
908 1.1 mrg
909 1.1 mrg static bool
910 1.1 mrg visium_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
911 1.1 mrg tree exp ATTRIBUTE_UNUSED)
912 1.1 mrg {
913 1.1 mrg return !visium_interrupt_function_p ();
914 1.1 mrg }
915 1.1 mrg
916 1.1 mrg /* Prepare operands for a move define_expand in MODE. */
917 1.1 mrg
918 1.1 mrg void
919 1.1 mrg prepare_move_operands (rtx *operands, machine_mode mode)
920 1.1 mrg {
921 1.1 mrg /* If the output is not a register, the input must be. */
922 1.1 mrg if (GET_CODE (operands[0]) == MEM && !reg_or_0_operand (operands[1], mode))
923 1.1 mrg operands[1] = force_reg (mode, operands[1]);
924 1.1 mrg }
925 1.1 mrg
926 1.1 mrg /* Return true if the operands are valid for a simple move insn. */
927 1.1 mrg
928 1.1 mrg bool
929 1.1 mrg ok_for_simple_move_operands (rtx *operands, machine_mode mode)
930 1.1 mrg {
931 1.1 mrg /* One of the operands must be a register. */
932 1.1 mrg if (!register_operand (operands[0], mode)
933 1.1 mrg && !reg_or_0_operand (operands[1], mode))
934 1.1 mrg return false;
935 1.1 mrg
936 1.1 mrg /* Once the flags are exposed, no simple moves between integer registers. */
937 1.1 mrg if (visium_flags_exposed
938 1.1 mrg && gpc_reg_operand (operands[0], mode)
939 1.1 mrg && gpc_reg_operand (operands[1], mode))
940 1.1 mrg return false;
941 1.1 mrg
942 1.1 mrg return true;
943 1.1 mrg }
944 1.1 mrg
945 1.1 mrg /* Return true if the operands are valid for a simple move strict insn. */
946 1.1 mrg
947 1.1 mrg bool
948 1.1 mrg ok_for_simple_move_strict_operands (rtx *operands, machine_mode mode)
949 1.1 mrg {
950 1.1 mrg /* Once the flags are exposed, no simple moves between integer registers.
951 1.1 mrg Note that, in QImode only, a zero source counts as an integer register
952 1.1 mrg since it will be emitted as r0. */
953 1.1 mrg if (visium_flags_exposed
954 1.1 mrg && gpc_reg_operand (operands[0], mode)
955 1.1 mrg && (gpc_reg_operand (operands[1], mode)
956 1.1 mrg || (mode == QImode && operands[1] == const0_rtx)))
957 1.1 mrg return false;
958 1.1 mrg
959 1.1 mrg return true;
960 1.1 mrg }
961 1.1 mrg
962 1.1 mrg /* Return true if the operands are valid for a simple arithmetic or logical
963 1.1 mrg insn. */
964 1.1 mrg
965 1.1 mrg bool
966 1.1 mrg ok_for_simple_arith_logic_operands (rtx *, machine_mode)
967 1.1 mrg {
968 1.1 mrg /* Once the flags are exposed, no simple arithmetic or logical operations
969 1.1 mrg between integer registers. */
970 1.1 mrg return !visium_flags_exposed;
971 1.1 mrg }
972 1.1 mrg
973 1.1 mrg /* Return non-zero if a branch or call instruction will be emitting a nop
974 1.1 mrg into its delay slot. */
975 1.1 mrg
976 1.1 mrg int
977 1.1 mrg empty_delay_slot (rtx_insn *insn)
978 1.1 mrg {
979 1.1 mrg rtx seq;
980 1.1 mrg
981 1.1 mrg /* If no previous instruction (should not happen), return true. */
982 1.1 mrg if (PREV_INSN (insn) == NULL)
983 1.1 mrg return 1;
984 1.1 mrg
985 1.1 mrg seq = NEXT_INSN (PREV_INSN (insn));
986 1.1 mrg if (GET_CODE (PATTERN (seq)) == SEQUENCE)
987 1.1 mrg return 0;
988 1.1 mrg
989 1.1 mrg return 1;
990 1.1 mrg }
991 1.1 mrg
992 1.1 mrg /* Wrapper around single_set which returns the second SET of a pair if the
993 1.1 mrg first SET is to the flags register. */
994 1.1 mrg
995 1.1 mrg static rtx
996 1.1 mrg single_set_and_flags (rtx_insn *insn)
997 1.1 mrg {
998 1.1 mrg if (multiple_sets (insn))
999 1.1 mrg {
1000 1.1 mrg rtx pat = PATTERN (insn);
1001 1.1 mrg if (XVECLEN (pat, 0) == 2
1002 1.1 mrg && GET_CODE (XVECEXP (pat, 0, 0)) == SET
1003 1.1 mrg && REG_P (SET_DEST (XVECEXP (pat, 0, 0)))
1004 1.1 mrg && REGNO (SET_DEST (XVECEXP (pat, 0, 0))) == FLAGS_REGNUM)
1005 1.1 mrg return XVECEXP (pat, 0, 1);
1006 1.1 mrg }
1007 1.1 mrg
1008 1.1 mrg return single_set (insn);
1009 1.1 mrg }
1010 1.1 mrg
1011 1.1 mrg /* This is called with OUT_INSN an instruction setting a (base) register
1012 1.1 mrg and IN_INSN a read or a write. Return 1 if these instructions together
1013 1.1 mrg constitute a pipeline hazard.
1014 1.1 mrg
1015 1.1 mrg On the original architecture, a pipeline data hazard occurs when the Dest
1016 1.1 mrg of one instruction becomes the SrcA for an immediately following READ or
1017 1.1 mrg WRITE instruction with a non-zero index (indexing occurs at the decode
1018 1.1 mrg stage and so a NOP must be inserted in-between for this to work).
1019 1.1 mrg
1020 1.1 mrg An example is:
1021 1.1 mrg
1022 1.1 mrg move.l r2,r1
1023 1.1 mrg read.l r4,10(r2)
1024 1.1 mrg
1025 1.1 mrg On the MCM, the non-zero index condition is lifted but the hazard is
1026 1.1 mrg patched up by the hardware through the injection of wait states:
1027 1.1 mrg
1028 1.1 mrg move.l r2,r1
1029 1.1 mrg read.l r4,(r2)
1030 1.1 mrg
1031 1.1 mrg We nevertheless try to schedule instructions around this. */
1032 1.1 mrg
1033 1.1 mrg int
1034 1.1 mrg gr5_hazard_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
1035 1.1 mrg {
1036 1.1 mrg rtx out_set, in_set, dest, memexpr;
1037 1.1 mrg unsigned int out_reg, in_reg;
1038 1.1 mrg
1039 1.1 mrg /* A CALL is storage register class, but the link register is of no
1040 1.1 mrg interest here. */
1041 1.1 mrg if (GET_CODE (out_insn) == CALL_INSN)
1042 1.1 mrg return 0;
1043 1.1 mrg
1044 1.1 mrg out_set = single_set_and_flags (out_insn);
1045 1.1 mrg dest = SET_DEST (out_set);
1046 1.1 mrg
1047 1.1 mrg /* Should be no stall/hazard if OUT_INSN is MEM := ???. This only
1048 1.1 mrg occurs prior to reload. */
1049 1.1 mrg if (GET_CODE (dest) == MEM)
1050 1.1 mrg return 0;
1051 1.1 mrg
1052 1.1 mrg if (GET_CODE (dest) == STRICT_LOW_PART)
1053 1.1 mrg dest = XEXP (dest, 0);
1054 1.1 mrg if (GET_CODE (dest) == SUBREG)
1055 1.1 mrg dest = SUBREG_REG (dest);
1056 1.1 mrg out_reg = REGNO (dest);
1057 1.1 mrg
1058 1.1 mrg in_set = single_set_and_flags (in_insn);
1059 1.1 mrg
1060 1.1 mrg /* If IN_INSN is MEM := MEM, it's the source that counts. */
1061 1.1 mrg if (GET_CODE (SET_SRC (in_set)) == MEM)
1062 1.1 mrg memexpr = XEXP (SET_SRC (in_set), 0);
1063 1.1 mrg else
1064 1.1 mrg memexpr = XEXP (SET_DEST (in_set), 0);
1065 1.1 mrg
1066 1.1 mrg if (GET_CODE (memexpr) == PLUS)
1067 1.1 mrg {
1068 1.1 mrg memexpr = XEXP (memexpr, 0);
1069 1.1 mrg if (GET_CODE (memexpr) == SUBREG)
1070 1.1 mrg in_reg = REGNO (SUBREG_REG (memexpr));
1071 1.1 mrg else
1072 1.1 mrg in_reg = REGNO (memexpr);
1073 1.1 mrg
1074 1.1 mrg if (in_reg == out_reg)
1075 1.1 mrg return 1;
1076 1.1 mrg }
1077 1.1 mrg else if (TARGET_MCM)
1078 1.1 mrg {
1079 1.1 mrg if (GET_CODE (memexpr) == STRICT_LOW_PART)
1080 1.1 mrg memexpr = XEXP (memexpr, 0);
1081 1.1 mrg if (GET_CODE (memexpr) == SUBREG)
1082 1.1 mrg memexpr = SUBREG_REG (memexpr);
1083 1.1 mrg in_reg = REGNO (memexpr);
1084 1.1 mrg
1085 1.1 mrg if (in_reg == out_reg)
1086 1.1 mrg return 1;
1087 1.1 mrg }
1088 1.1 mrg
1089 1.1 mrg return 0;
1090 1.1 mrg }
1091 1.1 mrg
1092 1.1 mrg /* Return true if INSN is an empty asm instruction. */
1093 1.1 mrg
1094 1.1 mrg static bool
1095 1.1 mrg empty_asm_p (rtx insn)
1096 1.1 mrg {
1097 1.1 mrg rtx body = PATTERN (insn);
1098 1.1 mrg const char *templ;
1099 1.1 mrg
1100 1.1 mrg if (GET_CODE (body) == ASM_INPUT)
1101 1.1 mrg templ = XSTR (body, 0);
1102 1.1 mrg else if (asm_noperands (body) >= 0)
1103 1.1 mrg templ = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
1104 1.1 mrg else
1105 1.1 mrg templ = NULL;
1106 1.1 mrg
1107 1.1 mrg return (templ && templ[0] == '\0');
1108 1.1 mrg }
1109 1.1 mrg
1110 1.1 mrg /* Insert a NOP immediately before INSN wherever there is a pipeline hazard.
1111 1.1 mrg LAST_REG records the register set in the last insn and LAST_INSN_CALL
1112 1.1 mrg records whether the last insn was a call insn. */
1113 1.1 mrg
1114 1.1 mrg static void
1115 1.1 mrg gr5_avoid_hazard (rtx_insn *insn, unsigned int *last_reg, bool *last_insn_call)
1116 1.1 mrg {
1117 1.1 mrg unsigned int dest_reg = 0;
1118 1.1 mrg rtx set;
1119 1.1 mrg
1120 1.1 mrg switch (GET_CODE (insn))
1121 1.1 mrg {
1122 1.1 mrg case CALL_INSN:
1123 1.1 mrg *last_reg = 0;
1124 1.1 mrg *last_insn_call = true;
1125 1.1 mrg return;
1126 1.1 mrg
1127 1.1 mrg case JUMP_INSN:
1128 1.1 mrg /* If this is an empty asm, just skip it. */
1129 1.1 mrg if (!empty_asm_p (insn))
1130 1.1 mrg {
1131 1.1 mrg *last_reg = 0;
1132 1.1 mrg *last_insn_call = false;
1133 1.1 mrg }
1134 1.1 mrg return;
1135 1.1 mrg
1136 1.1 mrg case INSN:
1137 1.1 mrg /* If this is an empty asm, just skip it. */
1138 1.1 mrg if (empty_asm_p (insn))
1139 1.1 mrg return;
1140 1.1 mrg break;
1141 1.1 mrg
1142 1.1 mrg default:
1143 1.1 mrg return;
1144 1.1 mrg }
1145 1.1 mrg
1146 1.1 mrg set = single_set_and_flags (insn);
1147 1.1 mrg if (set != NULL_RTX)
1148 1.1 mrg {
1149 1.1 mrg rtx dest = SET_DEST (set);
1150 1.1 mrg const bool double_p = GET_MODE_SIZE (GET_MODE (dest)) > UNITS_PER_WORD;
1151 1.1 mrg rtx memrtx = NULL;
1152 1.1 mrg
1153 1.1 mrg if (GET_CODE (SET_SRC (set)) == MEM)
1154 1.1 mrg {
1155 1.1 mrg memrtx = XEXP (SET_SRC (set), 0);
1156 1.1 mrg if (GET_CODE (dest) == STRICT_LOW_PART)
1157 1.1 mrg dest = XEXP (dest, 0);
1158 1.1 mrg if (REG_P (dest))
1159 1.1 mrg dest_reg = REGNO (dest);
1160 1.1 mrg
1161 1.1 mrg /* If this is a DI or DF mode memory to register
1162 1.1 mrg copy, then if rd = rs we get
1163 1.1 mrg
1164 1.1 mrg rs + 1 := 1[rs]
1165 1.1 mrg rs := [rs]
1166 1.1 mrg
1167 1.1 mrg otherwise the order is
1168 1.1 mrg
1169 1.1 mrg rd := [rs]
1170 1.1 mrg rd + 1 := 1[rs] */
1171 1.1 mrg
1172 1.1 mrg if (double_p)
1173 1.1 mrg {
1174 1.1 mrg unsigned int base_reg;
1175 1.1 mrg
1176 1.1 mrg if (GET_CODE (memrtx) == PLUS)
1177 1.1 mrg base_reg = REGNO (XEXP (memrtx, 0));
1178 1.1 mrg else
1179 1.1 mrg base_reg = REGNO (memrtx);
1180 1.1 mrg
1181 1.1 mrg if (dest_reg != base_reg)
1182 1.1 mrg dest_reg++;
1183 1.1 mrg }
1184 1.1 mrg }
1185 1.1 mrg
1186 1.1 mrg else if (GET_CODE (dest) == MEM)
1187 1.1 mrg memrtx = XEXP (dest, 0);
1188 1.1 mrg
1189 1.1 mrg else if (GET_MODE_CLASS (GET_MODE (dest)) != MODE_CC)
1190 1.1 mrg {
1191 1.1 mrg if (GET_CODE (dest) == STRICT_LOW_PART
1192 1.1 mrg ||GET_CODE (dest) == ZERO_EXTRACT)
1193 1.1 mrg dest = XEXP (dest, 0);
1194 1.1 mrg dest_reg = REGNO (dest);
1195 1.1 mrg
1196 1.1 mrg if (GET_CODE (SET_SRC (set)) == REG)
1197 1.1 mrg {
1198 1.1 mrg unsigned int srcreg = REGNO (SET_SRC (set));
1199 1.1 mrg
1200 1.1 mrg /* Check for rs := rs, which will be deleted. */
1201 1.1 mrg if (srcreg == dest_reg)
1202 1.1 mrg return;
1203 1.1 mrg
1204 1.1 mrg /* In the case of a DI or DF mode move from register to
1205 1.1 mrg register there is overlap if rd = rs + 1 in which case
1206 1.1 mrg the order of the copies is reversed :
1207 1.1 mrg
1208 1.1 mrg rd + 1 := rs + 1;
1209 1.1 mrg rd := rs */
1210 1.1 mrg
1211 1.1 mrg if (double_p && dest_reg != srcreg + 1)
1212 1.1 mrg dest_reg++;
1213 1.1 mrg }
1214 1.1 mrg }
1215 1.1 mrg
1216 1.1 mrg /* If this is the delay slot of a call insn, any register it sets
1217 1.1 mrg is not relevant. */
1218 1.1 mrg if (*last_insn_call)
1219 1.1 mrg dest_reg = 0;
1220 1.1 mrg
1221 1.1 mrg /* If the previous insn sets the value of a register, and this insn
1222 1.1 mrg uses a base register, check for the pipeline hazard where it is
1223 1.1 mrg the same register in each case. */
1224 1.1 mrg if (*last_reg != 0 && memrtx != NULL_RTX)
1225 1.1 mrg {
1226 1.1 mrg unsigned int reg = 0;
1227 1.1 mrg
1228 1.1 mrg /* Check for an index (original architecture). */
1229 1.1 mrg if (GET_CODE (memrtx) == PLUS)
1230 1.1 mrg reg = REGNO (XEXP (memrtx, 0));
1231 1.1 mrg
1232 1.1 mrg /* Check for an MCM target or rs := [rs], in DI or DF mode. */
1233 1.1 mrg else if (TARGET_MCM || (double_p && REGNO (memrtx) == dest_reg))
1234 1.1 mrg reg = REGNO (memrtx);
1235 1.1 mrg
1236 1.1 mrg /* Remove any pipeline hazard by inserting a NOP. */
1237 1.1 mrg if (reg == *last_reg)
1238 1.1 mrg {
1239 1.1 mrg if (dump_file)
1240 1.1 mrg fprintf (dump_file,
1241 1.1 mrg "inserting nop before insn %d\n", INSN_UID (insn));
1242 1.1 mrg emit_insn_after (gen_hazard_nop (), prev_active_insn (insn));
1243 1.1 mrg emit_insn_after (gen_blockage (), insn);
1244 1.1 mrg }
1245 1.1 mrg }
1246 1.1 mrg
1247 1.1 mrg *last_reg = dest_reg;
1248 1.1 mrg }
1249 1.1 mrg
1250 1.1 mrg *last_insn_call = false;
1251 1.1 mrg }
1252 1.1 mrg
1253 1.1 mrg /* Go through the instruction stream and insert nops where necessary to avoid
1254 1.1 mrg pipeline hazards. There are two cases:
1255 1.1 mrg
1256 1.1 mrg 1. On the original architecture, it is invalid to set the value of a
1257 1.1 mrg (base) register and then use it in an address with a non-zero index
1258 1.1 mrg in the next instruction.
1259 1.1 mrg
1260 1.1 mrg 2. On the MCM, setting the value of a (base) register and then using
1261 1.1 mrg it in address (including with zero index) in the next instruction
1262 1.1 mrg will result in a pipeline stall of 3 cycles. */
1263 1.1 mrg
1264 1.1 mrg static void
1265 1.1 mrg gr5_hazard_avoidance (void)
1266 1.1 mrg {
1267 1.1 mrg unsigned int last_reg = 0;
1268 1.1 mrg bool last_insn_call = false;
1269 1.1 mrg rtx_insn *insn;
1270 1.1 mrg
1271 1.1 mrg for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1272 1.1 mrg if (INSN_P (insn))
1273 1.1 mrg {
1274 1.1 mrg rtx pat = PATTERN (insn);
1275 1.1 mrg
1276 1.1 mrg if (GET_CODE (pat) == SEQUENCE)
1277 1.1 mrg {
1278 1.1 mrg for (int i = 0; i < XVECLEN (pat, 0); i++)
1279 1.1 mrg gr5_avoid_hazard (as_a <rtx_insn *> (XVECEXP (pat, 0, i)),
1280 1.1 mrg &last_reg, &last_insn_call);
1281 1.1 mrg }
1282 1.1 mrg
1283 1.1 mrg else if (GET_CODE (insn) == CALL_INSN)
1284 1.1 mrg {
1285 1.1 mrg /* This call is going to get a nop in its delay slot. */
1286 1.1 mrg last_reg = 0;
1287 1.1 mrg last_insn_call = false;
1288 1.1 mrg }
1289 1.1 mrg
1290 1.1 mrg else
1291 1.1 mrg gr5_avoid_hazard (insn, &last_reg, &last_insn_call);
1292 1.1 mrg }
1293 1.1 mrg
1294 1.1 mrg else if (GET_CODE (insn) == BARRIER)
1295 1.1 mrg last_reg = 0;
1296 1.1 mrg }
1297 1.1 mrg
1298 1.1 mrg /* Perform a target-specific pass over the instruction stream. The compiler
1299 1.1 mrg will run it at all optimization levels, just after the point at which it
1300 1.1 mrg normally does delayed-branch scheduling. */
1301 1.1 mrg
1302 1.1 mrg static unsigned int
1303 1.1 mrg visium_reorg (void)
1304 1.1 mrg {
1305 1.1 mrg if (visium_cpu == PROCESSOR_GR5)
1306 1.1 mrg gr5_hazard_avoidance ();
1307 1.1 mrg
1308 1.1 mrg return 0;
1309 1.1 mrg }
1310 1.1 mrg /* Return true if an argument must be passed by indirect reference. */
1311 1.1 mrg
1312 1.1 mrg static bool
1313 1.1 mrg visium_pass_by_reference (cumulative_args_t, const function_arg_info &arg)
1314 1.1 mrg {
1315 1.1 mrg tree type = arg.type;
1316 1.1 mrg return type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE);
1317 1.1 mrg }
1318 1.1 mrg
1319 1.1 mrg /* Define how arguments are passed.
1320 1.1 mrg
1321 1.1 mrg A range of general registers and floating registers is available
1322 1.1 mrg for passing arguments. When the class of registers which an
1323 1.1 mrg argument would normally use is exhausted, that argument, is passed
1324 1.1 mrg in the overflow region of the stack. No argument is split between
1325 1.1 mrg registers and stack.
1326 1.1 mrg
1327 1.1 mrg Arguments of type float or _Complex float go in FP registers if FP
1328 1.1 mrg hardware is available. If there is no FP hardware, arguments of
1329 1.1 mrg type float go in general registers. All other arguments are passed
1330 1.1 mrg in general registers. */
1331 1.1 mrg
1332 1.1 mrg static rtx
1333 1.1 mrg visium_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
1334 1.1 mrg {
1335 1.1 mrg int size;
1336 1.1 mrg CUMULATIVE_ARGS *ca = get_cumulative_args (pcum_v);
1337 1.1 mrg
1338 1.1 mrg size = (GET_MODE_SIZE (arg.mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1339 1.1 mrg if (arg.end_marker_p ())
1340 1.1 mrg return GEN_INT (0);
1341 1.1 mrg
1342 1.1 mrg /* Scalar or complex single precision floating point arguments are returned
1343 1.1 mrg in floating registers. */
1344 1.1 mrg if (TARGET_FPU
1345 1.1 mrg && ((GET_MODE_CLASS (arg.mode) == MODE_FLOAT
1346 1.1 mrg && GET_MODE_SIZE (arg.mode) <= UNITS_PER_HWFPVALUE)
1347 1.1 mrg || (GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_FLOAT
1348 1.1 mrg && GET_MODE_SIZE (arg.mode) <= UNITS_PER_HWFPVALUE * 2)))
1349 1.1 mrg {
1350 1.1 mrg if (ca->frcount + size <= MAX_ARGS_IN_FP_REGISTERS)
1351 1.1 mrg return gen_rtx_REG (arg.mode, FP_ARG_FIRST + ca->frcount);
1352 1.1 mrg else
1353 1.1 mrg return NULL_RTX;
1354 1.1 mrg }
1355 1.1 mrg
1356 1.1 mrg if (ca->grcount + size <= MAX_ARGS_IN_GP_REGISTERS)
1357 1.1 mrg return gen_rtx_REG (arg.mode, ca->grcount + GP_ARG_FIRST);
1358 1.1 mrg
1359 1.1 mrg return NULL_RTX;
1360 1.1 mrg }
1361 1.1 mrg
1362 1.1 mrg /* Update the summarizer variable pointed to by PCUM_V to advance past
1363 1.1 mrg argument ARG. Once this is done, the variable CUM is suitable for
1364 1.1 mrg analyzing the _following_ argument with visium_function_arg. */
1365 1.1 mrg
1366 1.1 mrg static void
1367 1.1 mrg visium_function_arg_advance (cumulative_args_t pcum_v,
1368 1.1 mrg const function_arg_info &arg)
1369 1.1 mrg {
1370 1.1 mrg int size = (GET_MODE_SIZE (arg.mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1371 1.1 mrg int stack_size = 0;
1372 1.1 mrg CUMULATIVE_ARGS *ca = get_cumulative_args (pcum_v);
1373 1.1 mrg
1374 1.1 mrg /* Scalar or complex single precision floating point arguments are returned
1375 1.1 mrg in floating registers. */
1376 1.1 mrg if (TARGET_FPU
1377 1.1 mrg && ((GET_MODE_CLASS (arg.mode) == MODE_FLOAT
1378 1.1 mrg && GET_MODE_SIZE (arg.mode) <= UNITS_PER_HWFPVALUE)
1379 1.1 mrg || (GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_FLOAT
1380 1.1 mrg && GET_MODE_SIZE (arg.mode) <= UNITS_PER_HWFPVALUE * 2)))
1381 1.1 mrg {
1382 1.1 mrg if (ca->frcount + size <= MAX_ARGS_IN_FP_REGISTERS)
1383 1.1 mrg ca->frcount += size;
1384 1.1 mrg else
1385 1.1 mrg {
1386 1.1 mrg stack_size = size;
1387 1.1 mrg ca->frcount = MAX_ARGS_IN_FP_REGISTERS;
1388 1.1 mrg }
1389 1.1 mrg }
1390 1.1 mrg else
1391 1.1 mrg {
1392 1.1 mrg /* Everything else goes in a general register, if enough are
1393 1.1 mrg available. */
1394 1.1 mrg if (ca->grcount + size <= MAX_ARGS_IN_GP_REGISTERS)
1395 1.1 mrg ca->grcount += size;
1396 1.1 mrg else
1397 1.1 mrg {
1398 1.1 mrg stack_size = size;
1399 1.1 mrg ca->grcount = MAX_ARGS_IN_GP_REGISTERS;
1400 1.1 mrg }
1401 1.1 mrg }
1402 1.1 mrg
1403 1.1 mrg if (arg.named)
1404 1.1 mrg ca->stack_words += stack_size;
1405 1.1 mrg }
1406 1.1 mrg
1407 1.1 mrg /* Specify whether to return the return value in memory. */
1408 1.1 mrg
1409 1.1 mrg static bool
1410 1.1 mrg visium_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1411 1.1 mrg {
1412 1.1 mrg return (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE);
1413 1.1 mrg }
1414 1.1 mrg
1415 1.1 mrg /* Define how scalar values are returned. */
1416 1.1 mrg
1417 1.1 mrg static rtx
1418 1.1 mrg visium_function_value_1 (machine_mode mode)
1419 1.1 mrg {
1420 1.1 mrg /* Scalar or complex single precision floating point values
1421 1.1 mrg are returned in floating register f1. */
1422 1.1 mrg if (TARGET_FPU
1423 1.1 mrg && ((GET_MODE_CLASS (mode) == MODE_FLOAT
1424 1.1 mrg && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
1425 1.1 mrg || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
1426 1.1 mrg && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)))
1427 1.1 mrg return gen_rtx_REG (mode, FP_RETURN_REGNUM);
1428 1.1 mrg
1429 1.1 mrg /* All others are returned in r1. */
1430 1.1 mrg return gen_rtx_REG (mode, RETURN_REGNUM);
1431 1.1 mrg }
1432 1.1 mrg
1433 1.1 mrg /* Return an RTX representing the place where a function returns or receives
1434 1.1 mrg a value of data type RET_TYPE. */
1435 1.1 mrg
1436 1.1 mrg static rtx
1437 1.1 mrg visium_function_value (const_tree ret_type,
1438 1.1 mrg const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1439 1.1 mrg bool outgoing ATTRIBUTE_UNUSED)
1440 1.1 mrg {
1441 1.1 mrg return visium_function_value_1 (TYPE_MODE (ret_type));
1442 1.1 mrg }
1443 1.1 mrg
1444 1.1 mrg /* Return an RTX representing the place where the library function result will
1445 1.1 mrg be returned. */
1446 1.1 mrg
1447 1.1 mrg static rtx
1448 1.1 mrg visium_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1449 1.1 mrg {
1450 1.1 mrg return visium_function_value_1 (mode);
1451 1.1 mrg }
1452 1.1 mrg
1453 1.1 mrg /* Store the anonymous register arguments into the stack so that all the
1454 1.1 mrg arguments appear to have been passed consecutively on the stack. */
1455 1.1 mrg
1456 1.1 mrg static void
1457 1.1 mrg visium_setup_incoming_varargs (cumulative_args_t pcum_v,
1458 1.1 mrg const function_arg_info &arg,
1459 1.1 mrg int *pretend_size ATTRIBUTE_UNUSED,
1460 1.1 mrg int no_rtl)
1461 1.1 mrg {
1462 1.1 mrg cumulative_args_t local_args_so_far;
1463 1.1 mrg CUMULATIVE_ARGS local_copy;
1464 1.1 mrg CUMULATIVE_ARGS *locargs;
1465 1.1 mrg int gp_saved, fp_saved, size;
1466 1.1 mrg
1467 1.1 mrg /* Create an internal cumulative_args_t pointer to internally define
1468 1.1 mrg storage to ensure calling TARGET_FUNCTION_ARG_ADVANCE does not
1469 1.1 mrg make global changes. */
1470 1.1 mrg local_args_so_far.p = &local_copy;
1471 1.1 mrg locargs = get_cumulative_args (pcum_v);
1472 1.1 mrg
1473 1.1 mrg #if CHECKING_P
1474 1.1 mrg local_args_so_far.magic = CUMULATIVE_ARGS_MAGIC;
1475 1.1 mrg #endif
1476 1.1 mrg
1477 1.1 mrg local_copy.grcount = locargs->grcount;
1478 1.1 mrg local_copy.frcount = locargs->frcount;
1479 1.1 mrg local_copy.stack_words = locargs->stack_words;
1480 1.1 mrg
1481 1.1 mrg /* The caller has advanced ARGS_SO_FAR up to, but not beyond, the last named
1482 1.1 mrg argument. Advance a local copy of ARGS_SO_FAR past the last "real" named
1483 1.1 mrg argument, to find out how many registers are left over. */
1484 1.1 mrg TARGET_FUNCTION_ARG_ADVANCE (local_args_so_far, arg);
1485 1.1 mrg
1486 1.1 mrg /* Find how many registers we need to save. */
1487 1.1 mrg locargs = get_cumulative_args (local_args_so_far);
1488 1.1 mrg gp_saved = MAX_ARGS_IN_GP_REGISTERS - locargs->grcount;
1489 1.1 mrg fp_saved = (TARGET_FPU ? MAX_ARGS_IN_FP_REGISTERS - locargs->frcount : 0);
1490 1.1 mrg size = (gp_saved * UNITS_PER_WORD) + (fp_saved * UNITS_PER_HWFPVALUE);
1491 1.1 mrg
1492 1.1 mrg if (!no_rtl && size > 0)
1493 1.1 mrg {
1494 1.1 mrg /* To avoid negative offsets, which are not valid addressing modes on
1495 1.1 mrg the Visium, we create a base register for the pretend args. */
1496 1.1 mrg rtx ptr
1497 1.1 mrg = force_reg (Pmode,
1498 1.1 mrg plus_constant (Pmode, virtual_incoming_args_rtx, -size));
1499 1.1 mrg
1500 1.1 mrg if (gp_saved > 0)
1501 1.1 mrg {
1502 1.1 mrg rtx mem
1503 1.1 mrg = gen_rtx_MEM (BLKmode,
1504 1.1 mrg plus_constant (Pmode,
1505 1.1 mrg ptr,
1506 1.1 mrg fp_saved * UNITS_PER_HWFPVALUE));
1507 1.1 mrg MEM_NOTRAP_P (mem) = 1;
1508 1.1 mrg set_mem_alias_set (mem, get_varargs_alias_set ());
1509 1.1 mrg move_block_from_reg (locargs->grcount + GP_ARG_FIRST, mem, gp_saved);
1510 1.1 mrg }
1511 1.1 mrg
1512 1.1 mrg if (fp_saved > 0)
1513 1.1 mrg {
1514 1.1 mrg rtx mem = gen_rtx_MEM (BLKmode, ptr);
1515 1.1 mrg MEM_NOTRAP_P (mem) = 1;
1516 1.1 mrg set_mem_alias_set (mem, get_varargs_alias_set ());
1517 1.1 mrg gcc_assert (UNITS_PER_WORD == UNITS_PER_HWFPVALUE);
1518 1.1 mrg move_block_from_reg (locargs->frcount + FP_ARG_FIRST, mem, fp_saved);
1519 1.1 mrg }
1520 1.1 mrg }
1521 1.1 mrg
1522 1.1 mrg visium_reg_parm_save_area_size = size;
1523 1.1 mrg }
1524 1.1 mrg
1525 1.1 mrg /* Define the `__builtin_va_list' type for the ABI. */
1526 1.1 mrg
1527 1.1 mrg static tree
1528 1.1 mrg visium_build_builtin_va_list (void)
1529 1.1 mrg {
1530 1.1 mrg tree f_ovfl, f_gbase, f_fbase, f_gbytes, f_fbytes, record;
1531 1.1 mrg
1532 1.1 mrg record = (*lang_hooks.types.make_type) (RECORD_TYPE);
1533 1.1 mrg f_ovfl = build_decl (BUILTINS_LOCATION, FIELD_DECL,
1534 1.1 mrg get_identifier ("__overflow_argptr"), ptr_type_node);
1535 1.1 mrg f_gbase = build_decl (BUILTINS_LOCATION, FIELD_DECL,
1536 1.1 mrg get_identifier ("__gpr_base"), ptr_type_node);
1537 1.1 mrg f_fbase = build_decl (BUILTINS_LOCATION, FIELD_DECL,
1538 1.1 mrg get_identifier ("__fpr_base"), ptr_type_node);
1539 1.1 mrg f_gbytes = build_decl (BUILTINS_LOCATION, FIELD_DECL,
1540 1.1 mrg get_identifier ("__gpr_bytes"),
1541 1.1 mrg short_unsigned_type_node);
1542 1.1 mrg f_fbytes = build_decl (BUILTINS_LOCATION, FIELD_DECL,
1543 1.1 mrg get_identifier ("__fpr_bytes"),
1544 1.1 mrg short_unsigned_type_node);
1545 1.1 mrg
1546 1.1 mrg DECL_FIELD_CONTEXT (f_ovfl) = record;
1547 1.1 mrg DECL_FIELD_CONTEXT (f_gbase) = record;
1548 1.1 mrg DECL_FIELD_CONTEXT (f_fbase) = record;
1549 1.1 mrg DECL_FIELD_CONTEXT (f_gbytes) = record;
1550 1.1 mrg DECL_FIELD_CONTEXT (f_fbytes) = record;
1551 1.1 mrg TYPE_FIELDS (record) = f_ovfl;
1552 1.1 mrg TREE_CHAIN (f_ovfl) = f_gbase;
1553 1.1 mrg TREE_CHAIN (f_gbase) = f_fbase;
1554 1.1 mrg TREE_CHAIN (f_fbase) = f_gbytes;
1555 1.1 mrg TREE_CHAIN (f_gbytes) = f_fbytes;
1556 1.1 mrg layout_type (record);
1557 1.1 mrg
1558 1.1 mrg return record;
1559 1.1 mrg }
1560 1.1 mrg
1561 1.1 mrg /* Implement `va_start' for varargs and stdarg. */
1562 1.1 mrg
1563 1.1 mrg static void
1564 1.1 mrg visium_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
1565 1.1 mrg {
1566 1.1 mrg const CUMULATIVE_ARGS *ca = &crtl->args.info;
1567 1.1 mrg int gp_saved = MAX_ARGS_IN_GP_REGISTERS - ca->grcount;
1568 1.1 mrg int fp_saved = (TARGET_FPU ? MAX_ARGS_IN_FP_REGISTERS - ca->frcount : 0);
1569 1.1 mrg int named_stack_size = ca->stack_words * UNITS_PER_WORD, offset;
1570 1.1 mrg tree f_ovfl, f_gbase, f_fbase, f_gbytes, f_fbytes;
1571 1.1 mrg tree ovfl, gbase, gbytes, fbase, fbytes, t;
1572 1.1 mrg
1573 1.1 mrg f_ovfl = TYPE_FIELDS (va_list_type_node);
1574 1.1 mrg f_gbase = TREE_CHAIN (f_ovfl);
1575 1.1 mrg f_fbase = TREE_CHAIN (f_gbase);
1576 1.1 mrg f_gbytes = TREE_CHAIN (f_fbase);
1577 1.1 mrg f_fbytes = TREE_CHAIN (f_gbytes);
1578 1.1 mrg ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl, NULL_TREE);
1579 1.1 mrg gbase = build3 (COMPONENT_REF, TREE_TYPE (f_gbase), valist, f_gbase,
1580 1.1 mrg NULL_TREE);
1581 1.1 mrg fbase = build3 (COMPONENT_REF, TREE_TYPE (f_fbase), valist, f_fbase,
1582 1.1 mrg NULL_TREE);
1583 1.1 mrg gbytes = build3 (COMPONENT_REF, TREE_TYPE (f_gbytes), valist, f_gbytes,
1584 1.1 mrg NULL_TREE);
1585 1.1 mrg fbytes = build3 (COMPONENT_REF, TREE_TYPE (f_fbytes), valist, f_fbytes,
1586 1.1 mrg NULL_TREE);
1587 1.1 mrg
1588 1.1 mrg /* Store the stacked vararg pointer in the OVFL member. */
1589 1.1 mrg t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
1590 1.1 mrg t = fold_build_pointer_plus_hwi (t, named_stack_size);
1591 1.1 mrg t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
1592 1.1 mrg expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
1593 1.1 mrg
1594 1.1 mrg /* Store the base address of the GPR save area into GBASE. */
1595 1.1 mrg t = make_tree (TREE_TYPE (gbase), virtual_incoming_args_rtx);
1596 1.1 mrg offset = MAX_ARGS_IN_GP_REGISTERS * UNITS_PER_WORD;
1597 1.1 mrg t = fold_build_pointer_plus_hwi (t, -offset);
1598 1.1 mrg t = build2 (MODIFY_EXPR, TREE_TYPE (gbase), gbase, t);
1599 1.1 mrg expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
1600 1.1 mrg
1601 1.1 mrg /* Store the base address of the FPR save area into FBASE. */
1602 1.1 mrg if (fp_saved)
1603 1.1 mrg {
1604 1.1 mrg t = make_tree (TREE_TYPE (fbase), virtual_incoming_args_rtx);
1605 1.1 mrg offset = gp_saved * UNITS_PER_WORD
1606 1.1 mrg + MAX_ARGS_IN_FP_REGISTERS * UNITS_PER_HWFPVALUE;
1607 1.1 mrg t = fold_build_pointer_plus_hwi (t, -offset);
1608 1.1 mrg t = build2 (MODIFY_EXPR, TREE_TYPE (fbase), fbase, t);
1609 1.1 mrg expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
1610 1.1 mrg }
1611 1.1 mrg
1612 1.1 mrg /* Fill in the GBYTES member. */
1613 1.1 mrg t = build2 (MODIFY_EXPR, TREE_TYPE (gbytes), gbytes,
1614 1.1 mrg size_int (gp_saved * UNITS_PER_WORD));
1615 1.1 mrg expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
1616 1.1 mrg
1617 1.1 mrg /* Fill in the FBYTES member. */
1618 1.1 mrg t = build2 (MODIFY_EXPR, TREE_TYPE (fbytes),
1619 1.1 mrg fbytes, size_int (fp_saved * UNITS_PER_HWFPVALUE));
1620 1.1 mrg expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
1621 1.1 mrg }
1622 1.1 mrg
1623 1.1 mrg /* Implement `va_arg'. */
1624 1.1 mrg
1625 1.1 mrg static tree
1626 1.1 mrg visium_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
1627 1.1 mrg gimple_seq *post_p)
1628 1.1 mrg {
1629 1.1 mrg tree f_ovfl, f_gbase, f_fbase, f_gbytes, f_fbytes;
1630 1.1 mrg tree ovfl, base, bytes;
1631 1.1 mrg HOST_WIDE_INT size, rsize;
1632 1.1 mrg const bool by_reference_p = pass_va_arg_by_reference (type);
1633 1.1 mrg const bool float_reg_arg_p
1634 1.1 mrg = (TARGET_FPU && !by_reference_p
1635 1.1 mrg && ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
1636 1.1 mrg && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_HWFPVALUE)
1637 1.1 mrg || (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
1638 1.1 mrg && (GET_MODE_SIZE (TYPE_MODE (type))
1639 1.1 mrg <= UNITS_PER_HWFPVALUE * 2))));
1640 1.1 mrg const int max_save_area_size
1641 1.1 mrg = (float_reg_arg_p ? MAX_ARGS_IN_FP_REGISTERS * UNITS_PER_HWFPVALUE
1642 1.1 mrg : MAX_ARGS_IN_GP_REGISTERS * UNITS_PER_WORD);
1643 1.1 mrg tree t, u, offs;
1644 1.1 mrg tree lab_false, lab_over, addr;
1645 1.1 mrg tree ptrtype = build_pointer_type (type);
1646 1.1 mrg
1647 1.1 mrg if (by_reference_p)
1648 1.1 mrg {
1649 1.1 mrg t = visium_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
1650 1.1 mrg return build_va_arg_indirect_ref (t);
1651 1.1 mrg }
1652 1.1 mrg
1653 1.1 mrg size = int_size_in_bytes (type);
1654 1.1 mrg rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
1655 1.1 mrg f_ovfl = TYPE_FIELDS (va_list_type_node);
1656 1.1 mrg f_gbase = TREE_CHAIN (f_ovfl);
1657 1.1 mrg f_fbase = TREE_CHAIN (f_gbase);
1658 1.1 mrg f_gbytes = TREE_CHAIN (f_fbase);
1659 1.1 mrg f_fbytes = TREE_CHAIN (f_gbytes);
1660 1.1 mrg
1661 1.1 mrg /* We maintain separate pointers and offsets for floating-point and
1662 1.1 mrg general registers, but we need similar code in both cases.
1663 1.1 mrg
1664 1.1 mrg Let:
1665 1.1 mrg
1666 1.1 mrg BYTES be the number of unused bytes in the register save area.
1667 1.1 mrg BASE be the base address of the register save area.
1668 1.1 mrg OFFS be the current offset into the register save area. Either
1669 1.1 mrg MAX_ARGS_IN_GP_REGISTERS * UNITS_PER_WORD - bytes or
1670 1.1 mrg MAX_ARGS_IN_FP_REGISTERS * UNITS_PER_HWFPVALUE - bytes
1671 1.1 mrg depending upon whether the argument is in general or floating
1672 1.1 mrg registers.
1673 1.1 mrg ADDR_RTX be the address of the argument.
1674 1.1 mrg RSIZE be the size in bytes of the argument.
1675 1.1 mrg OVFL be the pointer to the stack overflow area.
1676 1.1 mrg
1677 1.1 mrg The code we want is:
1678 1.1 mrg
1679 1.1 mrg 1: if (bytes >= rsize)
1680 1.1 mrg 2: {
1681 1.1 mrg 3: addr_rtx = base + offs;
1682 1.1 mrg 4: bytes -= rsize;
1683 1.1 mrg 5: }
1684 1.1 mrg 6: else
1685 1.1 mrg 7: {
1686 1.1 mrg 8: bytes = 0;
1687 1.1 mrg 9: addr_rtx = ovfl;
1688 1.1 mrg 10: ovfl += rsize;
1689 1.1 mrg 11: }
1690 1.1 mrg
1691 1.1 mrg */
1692 1.1 mrg
1693 1.1 mrg addr = create_tmp_var (ptr_type_node, "addr");
1694 1.1 mrg lab_false = create_artificial_label (UNKNOWN_LOCATION);
1695 1.1 mrg lab_over = create_artificial_label (UNKNOWN_LOCATION);
1696 1.1 mrg if (float_reg_arg_p)
1697 1.1 mrg bytes = build3 (COMPONENT_REF, TREE_TYPE (f_fbytes), unshare_expr (valist),
1698 1.1 mrg f_fbytes, NULL_TREE);
1699 1.1 mrg else
1700 1.1 mrg bytes = build3 (COMPONENT_REF, TREE_TYPE (f_gbytes), unshare_expr (valist),
1701 1.1 mrg f_gbytes, NULL_TREE);
1702 1.1 mrg
1703 1.1 mrg /* [1] Emit code to branch if bytes < rsize. */
1704 1.1 mrg t = fold_convert (TREE_TYPE (bytes), size_int (rsize));
1705 1.1 mrg t = build2 (LT_EXPR, boolean_type_node, bytes, t);
1706 1.1 mrg u = build1 (GOTO_EXPR, void_type_node, lab_false);
1707 1.1 mrg t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
1708 1.1 mrg gimplify_and_add (t, pre_p);
1709 1.1 mrg
1710 1.1 mrg /* [3] Emit code for: addr_rtx = base + offs, where
1711 1.1 mrg offs = max_save_area_size - bytes. */
1712 1.1 mrg t = fold_convert (sizetype, bytes);
1713 1.1 mrg offs = build2 (MINUS_EXPR, sizetype, size_int (max_save_area_size), t);
1714 1.1 mrg if (float_reg_arg_p)
1715 1.1 mrg base = build3 (COMPONENT_REF, TREE_TYPE (f_fbase), valist, f_fbase,
1716 1.1 mrg NULL_TREE);
1717 1.1 mrg else
1718 1.1 mrg base = build3 (COMPONENT_REF, TREE_TYPE (f_gbase), valist, f_gbase,
1719 1.1 mrg NULL_TREE);
1720 1.1 mrg
1721 1.1 mrg t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, offs);
1722 1.1 mrg t = build2 (MODIFY_EXPR, void_type_node, addr, t);
1723 1.1 mrg gimplify_and_add (t, pre_p);
1724 1.1 mrg
1725 1.1 mrg /* [4] Emit code for: bytes -= rsize. */
1726 1.1 mrg t = fold_convert (TREE_TYPE (bytes), size_int (rsize));
1727 1.1 mrg t = build2 (MINUS_EXPR, TREE_TYPE (bytes), bytes, t);
1728 1.1 mrg t = build2 (MODIFY_EXPR, TREE_TYPE (bytes), bytes, t);
1729 1.1 mrg gimplify_and_add (t, pre_p);
1730 1.1 mrg
1731 1.1 mrg /* [6] Emit code to branch over the else clause, then the label. */
1732 1.1 mrg t = build1 (GOTO_EXPR, void_type_node, lab_over);
1733 1.1 mrg gimplify_and_add (t, pre_p);
1734 1.1 mrg t = build1 (LABEL_EXPR, void_type_node, lab_false);
1735 1.1 mrg gimplify_and_add (t, pre_p);
1736 1.1 mrg
1737 1.1 mrg /* [8] Emit code for: bytes = 0. */
1738 1.1 mrg t = fold_convert (TREE_TYPE (bytes), size_int (0));
1739 1.1 mrg t = build2 (MODIFY_EXPR, TREE_TYPE (bytes), unshare_expr (bytes), t);
1740 1.1 mrg gimplify_and_add (t, pre_p);
1741 1.1 mrg
1742 1.1 mrg /* [9] Emit code for: addr_rtx = ovfl. */
1743 1.1 mrg ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl, NULL_TREE);
1744 1.1 mrg t = build2 (MODIFY_EXPR, void_type_node, addr, ovfl);
1745 1.1 mrg gimplify_and_add (t, pre_p);
1746 1.1 mrg
1747 1.1 mrg /* [10] Emit code for: ovfl += rsize. */
1748 1.1 mrg t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, size_int (rsize));
1749 1.1 mrg t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), unshare_expr (ovfl), t);
1750 1.1 mrg gimplify_and_add (t, pre_p);
1751 1.1 mrg t = build1 (LABEL_EXPR, void_type_node, lab_over);
1752 1.1 mrg gimplify_and_add (t, pre_p);
1753 1.1 mrg
1754 1.1 mrg /* Emit a big-endian correction if size < UNITS_PER_WORD. */
1755 1.1 mrg if (size < UNITS_PER_WORD)
1756 1.1 mrg {
1757 1.1 mrg t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr), addr,
1758 1.1 mrg size_int (UNITS_PER_WORD - size));
1759 1.1 mrg t = build2 (MODIFY_EXPR, void_type_node, addr, t);
1760 1.1 mrg gimplify_and_add (t, pre_p);
1761 1.1 mrg }
1762 1.1 mrg
1763 1.1 mrg addr = fold_convert (ptrtype, addr);
1764 1.1 mrg
1765 1.1 mrg return build_va_arg_indirect_ref (addr);
1766 1.1 mrg }
1767 1.1 mrg
1768 1.1 mrg /* Return true if OP is an offset suitable for use as a displacement in the
1769 1.1 mrg address of a memory access in mode MODE. */
1770 1.1 mrg
1771 1.1 mrg static bool
1772 1.1 mrg rtx_ok_for_offset_p (machine_mode mode, rtx op)
1773 1.1 mrg {
1774 1.1 mrg if (!CONST_INT_P (op) || INTVAL (op) < 0)
1775 1.1 mrg return false;
1776 1.1 mrg
1777 1.1 mrg switch (mode)
1778 1.1 mrg {
1779 1.1 mrg case E_QImode:
1780 1.1 mrg return INTVAL (op) <= 31;
1781 1.1 mrg
1782 1.1 mrg case E_HImode:
1783 1.1 mrg return (INTVAL (op) % 2) == 0 && INTVAL (op) < 63;
1784 1.1 mrg
1785 1.1 mrg case E_SImode:
1786 1.1 mrg case E_SFmode:
1787 1.1 mrg return (INTVAL (op) % 4) == 0 && INTVAL (op) < 127;
1788 1.1 mrg
1789 1.1 mrg case E_DImode:
1790 1.1 mrg case E_DFmode:
1791 1.1 mrg return (INTVAL (op) % 4) == 0 && INTVAL (op) < 123;
1792 1.1 mrg
1793 1.1 mrg default:
1794 1.1 mrg return false;
1795 1.1 mrg }
1796 1.1 mrg }
1797 1.1 mrg
1798 1.1 mrg /* Return whether X is a legitimate memory address for a memory operand
1799 1.1 mrg of mode MODE.
1800 1.1 mrg
1801 1.1 mrg Legitimate addresses are defined in two variants: a strict variant
1802 1.1 mrg and a non-strict one. The STRICT parameter chooses which variant
1803 1.1 mrg is desired by the caller.
1804 1.1 mrg
1805 1.1 mrg The strict variant is used in the reload pass. It must be defined
1806 1.1 mrg so that any pseudo-register that has not been allocated a hard
1807 1.1 mrg register is considered a memory reference. This is because in
1808 1.1 mrg contexts where some kind of register is required, a
1809 1.1 mrg pseudo-register with no hard register must be rejected. For
1810 1.1 mrg non-hard registers, the strict variant should look up the
1811 1.1 mrg `reg_renumber' array; it should then proceed using the hard
1812 1.1 mrg register number in the array, or treat the pseudo as a memory
1813 1.1 mrg reference if the array holds `-1'.
1814 1.1 mrg
1815 1.1 mrg The non-strict variant is used in other passes. It must be
1816 1.1 mrg defined to accept all pseudo-registers in every context where some
1817 1.1 mrg kind of register is required. */
1818 1.1 mrg
1819 1.1 mrg static bool
1820 1.1 mrg visium_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1821 1.1 mrg {
1822 1.1 mrg rtx base;
1823 1.1 mrg unsigned int regno;
1824 1.1 mrg
1825 1.1 mrg /* If X is base+disp, check that we have an appropriate offset. */
1826 1.1 mrg if (GET_CODE (x) == PLUS)
1827 1.1 mrg {
1828 1.1 mrg if (!rtx_ok_for_offset_p (mode, XEXP (x, 1)))
1829 1.1 mrg return false;
1830 1.1 mrg base = XEXP (x, 0);
1831 1.1 mrg }
1832 1.1 mrg else
1833 1.1 mrg base = x;
1834 1.1 mrg
1835 1.1 mrg /* Now check the base: it must be either a register or a subreg thereof. */
1836 1.1 mrg if (GET_CODE (base) == SUBREG)
1837 1.1 mrg base = SUBREG_REG (base);
1838 1.1 mrg if (!REG_P (base))
1839 1.1 mrg return false;
1840 1.1 mrg
1841 1.1 mrg regno = REGNO (base);
1842 1.1 mrg
1843 1.1 mrg /* For the strict variant, the register must be REGNO_OK_FOR_BASE_P. */
1844 1.1 mrg if (strict)
1845 1.1 mrg return REGNO_OK_FOR_BASE_P (regno);
1846 1.1 mrg
1847 1.1 mrg /* For the non-strict variant, the register may also be a pseudo. */
1848 1.1 mrg return BASE_REGISTER_P (regno) || regno >= FIRST_PSEUDO_REGISTER;
1849 1.1 mrg }
1850 1.1 mrg
1851 1.1 mrg /* Try machine-dependent ways of modifying an illegitimate address
1852 1.1 mrg to be legitimate. If we find one, return the new, valid address.
1853 1.1 mrg This macro is used in only one place: `memory_address' in explow.cc.
1854 1.1 mrg
1855 1.1 mrg OLDX is the address as it was before break_out_memory_refs was called.
1856 1.1 mrg In some cases it is useful to look at this to decide what needs to be done.
1857 1.1 mrg
1858 1.1 mrg MODE and WIN are passed so that this macro can use
1859 1.1 mrg GO_IF_LEGITIMATE_ADDRESS.
1860 1.1 mrg
1861 1.1 mrg It is always safe for this macro to do nothing. It exists to recognize
1862 1.1 mrg opportunities to optimize the output.
1863 1.1 mrg
1864 1.1 mrg For Visium
1865 1.1 mrg
1866 1.1 mrg memory (reg + <out of range int>)
1867 1.1 mrg
1868 1.1 mrg is transformed to
1869 1.1 mrg
1870 1.1 mrg base_int = <out of range int> & ~mask
1871 1.1 mrg ptr_reg = reg + base_int
1872 1.1 mrg memory (ptr_reg + <out of range int> - base_int)
1873 1.1 mrg
1874 1.1 mrg Thus ptr_reg is a base register for a range of addresses,
1875 1.1 mrg which should help CSE.
1876 1.1 mrg
1877 1.1 mrg For a 1 byte reference mask is 0x1f
1878 1.1 mrg for a 2 byte reference mask is 0x3f
1879 1.1 mrg For a 4 byte reference mask is 0x7f
1880 1.1 mrg
1881 1.1 mrg This reflects the indexing range of the processor.
1882 1.1 mrg
1883 1.1 mrg For a > 4 byte reference the mask is 0x7f provided all of the words
1884 1.1 mrg can be accessed with the base address obtained. Otherwise a mask
1885 1.1 mrg of 0x3f is used.
1886 1.1 mrg
1887 1.1 mrg On rare occasions an unaligned base register value with an
1888 1.1 mrg unaligned offset is generated. Unaligned offsets are left alone for
1889 1.1 mrg this reason. */
1890 1.1 mrg
1891 1.1 mrg static rtx
1892 1.1 mrg visium_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1893 1.1 mrg machine_mode mode)
1894 1.1 mrg {
1895 1.1 mrg if (GET_CODE (x) == PLUS
1896 1.1 mrg && GET_CODE (XEXP (x, 1)) == CONST_INT
1897 1.1 mrg && GET_CODE (XEXP (x, 0)) == REG && mode != BLKmode)
1898 1.1 mrg {
1899 1.1 mrg int offset = INTVAL (XEXP (x, 1));
1900 1.1 mrg int size = GET_MODE_SIZE (mode);
1901 1.1 mrg int mask = (size == 1 ? 0x1f : (size == 2 ? 0x3f : 0x7f));
1902 1.1 mrg int mask1 = (size == 1 ? 0 : (size == 2 ? 1 : 3));
1903 1.1 mrg int offset_base = offset & ~mask;
1904 1.1 mrg
1905 1.1 mrg /* Check that all of the words can be accessed. */
1906 1.1 mrg if (size > 4 && size + offset - offset_base > 0x80)
1907 1.1 mrg offset_base = offset & ~0x3f;
1908 1.1 mrg if (offset_base != 0 && offset_base != offset && (offset & mask1) == 0)
1909 1.1 mrg {
1910 1.1 mrg rtx ptr_reg = force_reg (Pmode,
1911 1.1 mrg gen_rtx_PLUS (Pmode,
1912 1.1 mrg XEXP (x, 0),
1913 1.1 mrg GEN_INT (offset_base)));
1914 1.1 mrg
1915 1.1 mrg return plus_constant (Pmode, ptr_reg, offset - offset_base);
1916 1.1 mrg }
1917 1.1 mrg }
1918 1.1 mrg
1919 1.1 mrg return x;
1920 1.1 mrg }
1921 1.1 mrg
1922 1.1 mrg /* Perform a similar function to visium_legitimize_address, but this time
1923 1.1 mrg for reload. Generating new registers is not an option here. Parts
1924 1.1 mrg that need reloading are indicated by calling push_reload. */
1925 1.1 mrg
1926 1.1 mrg rtx
1927 1.1 mrg visium_legitimize_reload_address (rtx x, machine_mode mode, int opnum,
1928 1.1 mrg int type, int ind ATTRIBUTE_UNUSED)
1929 1.1 mrg {
1930 1.1 mrg rtx newrtx, tem = NULL_RTX;
1931 1.1 mrg
1932 1.1 mrg if (mode == BLKmode)
1933 1.1 mrg return NULL_RTX;
1934 1.1 mrg
1935 1.1 mrg if (optimize && GET_CODE (x) == PLUS)
1936 1.1 mrg tem = simplify_binary_operation (PLUS, GET_MODE (x), XEXP (x, 0),
1937 1.1 mrg XEXP (x, 1));
1938 1.1 mrg
1939 1.1 mrg newrtx = tem ? tem : x;
1940 1.1 mrg if (GET_CODE (newrtx) == PLUS
1941 1.1 mrg && GET_CODE (XEXP (newrtx, 1)) == CONST_INT
1942 1.1 mrg && GET_CODE (XEXP (newrtx, 0)) == REG
1943 1.1 mrg && BASE_REGISTER_P (REGNO (XEXP (newrtx, 0))))
1944 1.1 mrg {
1945 1.1 mrg int offset = INTVAL (XEXP (newrtx, 1));
1946 1.1 mrg int size = GET_MODE_SIZE (mode);
1947 1.1 mrg int mask = (size == 1 ? 0x1f : (size == 2 ? 0x3f : 0x7f));
1948 1.1 mrg int mask1 = (size == 1 ? 0 : (size == 2 ? 1 : 3));
1949 1.1 mrg int offset_base = offset & ~mask;
1950 1.1 mrg
1951 1.1 mrg /* Check that all of the words can be accessed. */
1952 1.1 mrg if (size > 4 && size + offset - offset_base > 0x80)
1953 1.1 mrg offset_base = offset & ~0x3f;
1954 1.1 mrg
1955 1.1 mrg if (offset_base && (offset & mask1) == 0)
1956 1.1 mrg {
1957 1.1 mrg rtx temp = gen_rtx_PLUS (Pmode,
1958 1.1 mrg XEXP (newrtx, 0), GEN_INT (offset_base));
1959 1.1 mrg
1960 1.1 mrg x = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - offset_base));
1961 1.1 mrg push_reload (XEXP (x, 0), 0, &XEXP (x, 0), 0,
1962 1.1 mrg BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum,
1963 1.1 mrg (enum reload_type) type);
1964 1.1 mrg return x;
1965 1.1 mrg }
1966 1.1 mrg }
1967 1.1 mrg
1968 1.1 mrg return NULL_RTX;
1969 1.1 mrg }
1970 1.1 mrg
1971 1.1 mrg /* Return the cost of moving data of mode MODE from a register in class FROM to
1972 1.1 mrg one in class TO. A value of 2 is the default; other values are interpreted
1973 1.1 mrg relative to that. */
1974 1.1 mrg
1975 1.1 mrg static int
1976 1.1 mrg visium_register_move_cost (machine_mode mode, reg_class_t from,
1977 1.1 mrg reg_class_t to)
1978 1.1 mrg {
1979 1.1 mrg const int numwords = (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) ? 1 : 2;
1980 1.1 mrg
1981 1.1 mrg if (from == MDB || to == MDB)
1982 1.1 mrg return 4;
1983 1.1 mrg else if (from == MDC || to == MDC || (from == FP_REGS) != (to == FP_REGS))
1984 1.1 mrg return 4 * numwords;
1985 1.1 mrg else
1986 1.1 mrg return 2 * numwords;
1987 1.1 mrg }
1988 1.1 mrg
1989 1.1 mrg /* Return the cost of moving data of mode MODE between a register of class
1990 1.1 mrg CLASS and memory. IN is zero if the value is to be written to memory,
1991 1.1 mrg non-zero if it is to be read in. This cost is relative to those in
1992 1.1 mrg visium_register_move_cost. */
1993 1.1 mrg
1994 1.1 mrg static int
1995 1.1 mrg visium_memory_move_cost (machine_mode mode,
1996 1.1 mrg reg_class_t to ATTRIBUTE_UNUSED,
1997 1.1 mrg bool in)
1998 1.1 mrg {
1999 1.1 mrg /* Moving data in can be from PROM and this is expensive. */
2000 1.1 mrg if (in)
2001 1.1 mrg {
2002 1.1 mrg if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2003 1.1 mrg return 7;
2004 1.1 mrg else
2005 1.1 mrg return 13;
2006 1.1 mrg }
2007 1.1 mrg
2008 1.1 mrg /* Moving data out is mostly to RAM and should be cheaper. */
2009 1.1 mrg else
2010 1.1 mrg {
2011 1.1 mrg if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2012 1.1 mrg return 6;
2013 1.1 mrg else
2014 1.1 mrg return 12;
2015 1.1 mrg }
2016 1.1 mrg }
2017 1.1 mrg
2018 1.1 mrg /* Return the relative costs of expression X. */
2019 1.1 mrg
2020 1.1 mrg static bool
2021 1.1 mrg visium_rtx_costs (rtx x, machine_mode mode, int outer_code ATTRIBUTE_UNUSED,
2022 1.1 mrg int opno ATTRIBUTE_UNUSED, int *total,
2023 1.1 mrg bool speed ATTRIBUTE_UNUSED)
2024 1.1 mrg {
2025 1.1 mrg int code = GET_CODE (x);
2026 1.1 mrg
2027 1.1 mrg switch (code)
2028 1.1 mrg {
2029 1.1 mrg case CONST_INT:
2030 1.1 mrg /* Small integers are as cheap as registers. 4-byte values can
2031 1.1 mrg be fetched as immediate constants - let's give that the cost
2032 1.1 mrg of an extra insn. */
2033 1.1 mrg *total = COSTS_N_INSNS (!satisfies_constraint_J (x));
2034 1.1 mrg return true;
2035 1.1 mrg
2036 1.1 mrg case CONST:
2037 1.1 mrg case LABEL_REF:
2038 1.1 mrg case SYMBOL_REF:
2039 1.1 mrg *total = COSTS_N_INSNS (2);
2040 1.1 mrg return true;
2041 1.1 mrg
2042 1.1 mrg case CONST_DOUBLE:
2043 1.1 mrg {
2044 1.1 mrg rtx high, low;
2045 1.1 mrg split_double (x, &high, &low);
2046 1.1 mrg *total =
2047 1.1 mrg COSTS_N_INSNS
2048 1.1 mrg (!satisfies_constraint_J (high) + !satisfies_constraint_J (low));
2049 1.1 mrg return true;
2050 1.1 mrg }
2051 1.1 mrg
2052 1.1 mrg case MULT:
2053 1.1 mrg *total = COSTS_N_INSNS (3);
2054 1.1 mrg return false;
2055 1.1 mrg
2056 1.1 mrg case DIV:
2057 1.1 mrg case UDIV:
2058 1.1 mrg case MOD:
2059 1.1 mrg case UMOD:
2060 1.1 mrg if (mode == DImode)
2061 1.1 mrg *total = COSTS_N_INSNS (64);
2062 1.1 mrg else
2063 1.1 mrg *total = COSTS_N_INSNS (32);
2064 1.1 mrg return false;
2065 1.1 mrg
2066 1.1 mrg case PLUS:
2067 1.1 mrg case MINUS:
2068 1.1 mrg case NEG:
2069 1.1 mrg /* DImode operations are performed directly on the ALU. */
2070 1.1 mrg if (mode == DImode)
2071 1.1 mrg *total = COSTS_N_INSNS (2);
2072 1.1 mrg else
2073 1.1 mrg *total = COSTS_N_INSNS (1);
2074 1.1 mrg return false;
2075 1.1 mrg
2076 1.1 mrg case ASHIFT:
2077 1.1 mrg case ASHIFTRT:
2078 1.1 mrg case LSHIFTRT:
2079 1.1 mrg /* DImode operations are performed on the EAM instead. */
2080 1.1 mrg if (mode == DImode)
2081 1.1 mrg *total = COSTS_N_INSNS (3);
2082 1.1 mrg else
2083 1.1 mrg *total = COSTS_N_INSNS (1);
2084 1.1 mrg return false;
2085 1.1 mrg
2086 1.1 mrg case COMPARE:
2087 1.1 mrg /* This matches the btst pattern. */
2088 1.1 mrg if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
2089 1.1 mrg && XEXP (x, 1) == const0_rtx
2090 1.1 mrg && XEXP (XEXP (x, 0), 1) == const1_rtx
2091 1.1 mrg && satisfies_constraint_K (XEXP (XEXP (x, 0), 2)))
2092 1.1 mrg *total = COSTS_N_INSNS (1);
2093 1.1 mrg return false;
2094 1.1 mrg
2095 1.1 mrg default:
2096 1.1 mrg return false;
2097 1.1 mrg }
2098 1.1 mrg }
2099 1.1 mrg
2100 1.1 mrg /* Split a double move of OPERANDS in MODE. */
2101 1.1 mrg
2102 1.1 mrg void
2103 1.1 mrg visium_split_double_move (rtx *operands, machine_mode mode)
2104 1.1 mrg {
2105 1.1 mrg bool swap = false;
2106 1.1 mrg
2107 1.1 mrg /* Check register to register with overlap. */
2108 1.1 mrg if (GET_CODE (operands[0]) == REG
2109 1.1 mrg && GET_CODE (operands[1]) == REG
2110 1.1 mrg && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2111 1.1 mrg swap = true;
2112 1.1 mrg
2113 1.1 mrg /* Check memory to register where the base reg overlaps the destination. */
2114 1.1 mrg if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == MEM)
2115 1.1 mrg {
2116 1.1 mrg rtx op = XEXP (operands[1], 0);
2117 1.1 mrg
2118 1.1 mrg if (GET_CODE (op) == SUBREG)
2119 1.1 mrg op = SUBREG_REG (op);
2120 1.1 mrg
2121 1.1 mrg if (GET_CODE (op) == REG && REGNO (op) == REGNO (operands[0]))
2122 1.1 mrg swap = true;
2123 1.1 mrg
2124 1.1 mrg if (GET_CODE (op) == PLUS)
2125 1.1 mrg {
2126 1.1 mrg rtx x = XEXP (op, 0);
2127 1.1 mrg rtx y = XEXP (op, 1);
2128 1.1 mrg
2129 1.1 mrg if (GET_CODE (x) == REG && REGNO (x) == REGNO (operands[0]))
2130 1.1 mrg swap = true;
2131 1.1 mrg
2132 1.1 mrg if (GET_CODE (y) == REG && REGNO (y) == REGNO (operands[0]))
2133 1.1 mrg swap = true;
2134 1.1 mrg }
2135 1.1 mrg }
2136 1.1 mrg
2137 1.1 mrg if (swap)
2138 1.1 mrg {
2139 1.1 mrg operands[2] = operand_subword (operands[0], 1, 1, mode);
2140 1.1 mrg operands[3] = operand_subword (operands[1], 1, 1, mode);
2141 1.1 mrg operands[4] = operand_subword (operands[0], 0, 1, mode);
2142 1.1 mrg operands[5] = operand_subword (operands[1], 0, 1, mode);
2143 1.1 mrg }
2144 1.1 mrg else
2145 1.1 mrg {
2146 1.1 mrg operands[2] = operand_subword (operands[0], 0, 1, mode);
2147 1.1 mrg operands[3] = operand_subword (operands[1], 0, 1, mode);
2148 1.1 mrg operands[4] = operand_subword (operands[0], 1, 1, mode);
2149 1.1 mrg operands[5] = operand_subword (operands[1], 1, 1, mode);
2150 1.1 mrg }
2151 1.1 mrg }
2152 1.1 mrg
2153 1.1 mrg /* Split a double addition or subtraction of operands. */
2154 1.1 mrg
2155 1.1 mrg void
2156 1.1 mrg visium_split_double_add (enum rtx_code code, rtx op0, rtx op1, rtx op2)
2157 1.1 mrg {
2158 1.1 mrg rtx op3 = gen_lowpart (SImode, op0);
2159 1.1 mrg rtx op4 = gen_lowpart (SImode, op1);
2160 1.1 mrg rtx op5;
2161 1.1 mrg rtx op6 = gen_highpart (SImode, op0);
2162 1.1 mrg rtx op7 = (op1 == const0_rtx ? op1 : gen_highpart (SImode, op1));
2163 1.1 mrg rtx op8;
2164 1.1 mrg rtx x, pat, flags;
2165 1.1 mrg
2166 1.1 mrg /* If operand #2 is a small constant, then its high part is null. */
2167 1.1 mrg if (CONST_INT_P (op2))
2168 1.1 mrg {
2169 1.1 mrg HOST_WIDE_INT val = INTVAL (op2);
2170 1.1 mrg
2171 1.1 mrg if (val < 0)
2172 1.1 mrg {
2173 1.1 mrg code = (code == MINUS ? PLUS : MINUS);
2174 1.1 mrg val = -val;
2175 1.1 mrg }
2176 1.1 mrg
2177 1.1 mrg op5 = gen_int_mode (val, SImode);
2178 1.1 mrg op8 = const0_rtx;
2179 1.1 mrg }
2180 1.1 mrg else
2181 1.1 mrg {
2182 1.1 mrg op5 = gen_lowpart (SImode, op2);
2183 1.1 mrg op8 = gen_highpart (SImode, op2);
2184 1.1 mrg }
2185 1.1 mrg
2186 1.1 mrg if (op4 == const0_rtx)
2187 1.1 mrg pat = gen_negsi2_insn_set_carry (op3, op5);
2188 1.1 mrg else if (code == MINUS)
2189 1.1 mrg pat = gen_subsi3_insn_set_carry (op3, op4, op5);
2190 1.1 mrg else
2191 1.1 mrg pat = gen_addsi3_insn_set_carry (op3, op4, op5);
2192 1.1 mrg emit_insn (pat);
2193 1.1 mrg
2194 1.1 mrg /* This is the plus_[plus_]sltu_flags or minus_[minus_]sltu_flags pattern. */
2195 1.1 mrg if (op8 == const0_rtx)
2196 1.1 mrg x = op7;
2197 1.1 mrg else
2198 1.1 mrg x = gen_rtx_fmt_ee (code, SImode, op7, op8);
2199 1.1 mrg flags = gen_rtx_REG (CCCmode, FLAGS_REGNUM);
2200 1.1 mrg x = gen_rtx_fmt_ee (code, SImode, x, gen_rtx_LTU (SImode, flags, const0_rtx));
2201 1.1 mrg pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
2202 1.1 mrg XVECEXP (pat, 0, 0) = gen_rtx_SET (op6, x);
2203 1.1 mrg flags = gen_rtx_REG (CCmode, FLAGS_REGNUM);
2204 1.1 mrg XVECEXP (pat, 0, 1) = gen_rtx_CLOBBER (VOIDmode, flags);
2205 1.1 mrg emit_insn (pat);
2206 1.1 mrg
2207 1.1 mrg visium_flags_exposed = true;
2208 1.1 mrg }
2209 1.1 mrg
2210 1.1 mrg /* Expand a copysign of OPERANDS in MODE. */
2211 1.1 mrg
2212 1.1 mrg void
2213 1.1 mrg visium_expand_copysign (rtx *operands, machine_mode mode)
2214 1.1 mrg {
2215 1.1 mrg rtx op0 = operands[0];
2216 1.1 mrg rtx op1 = operands[1];
2217 1.1 mrg rtx op2 = operands[2];
2218 1.1 mrg rtx mask = force_reg (SImode, GEN_INT (0x7fffffff));
2219 1.1 mrg rtx x;
2220 1.1 mrg
2221 1.1 mrg /* We manually handle SFmode because the abs and neg instructions of
2222 1.1 mrg the FPU on the MCM have a non-standard behavior wrt NaNs. */
2223 1.1 mrg gcc_assert (mode == SFmode);
2224 1.1 mrg
2225 1.1 mrg /* First get all the non-sign bits of op1. */
2226 1.1 mrg if (GET_CODE (op1) == CONST_DOUBLE)
2227 1.1 mrg {
2228 1.1 mrg if (real_isneg (CONST_DOUBLE_REAL_VALUE (op1)))
2229 1.1 mrg op1 = simplify_unary_operation (ABS, mode, op1, mode);
2230 1.1 mrg if (op1 != CONST0_RTX (mode))
2231 1.1 mrg {
2232 1.1 mrg long l;
2233 1.1 mrg REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op1), l);
2234 1.1 mrg op1 = force_reg (SImode, gen_int_mode (l, SImode));
2235 1.1 mrg }
2236 1.1 mrg }
2237 1.1 mrg else
2238 1.1 mrg {
2239 1.1 mrg op1 = copy_to_mode_reg (SImode, gen_lowpart (SImode, op1));
2240 1.1 mrg op1 = force_reg (SImode, gen_rtx_AND (SImode, op1, mask));
2241 1.1 mrg }
2242 1.1 mrg
2243 1.1 mrg /* Then get the sign bit of op2. */
2244 1.1 mrg mask = force_reg (SImode, gen_rtx_NOT (SImode, mask));
2245 1.1 mrg op2 = copy_to_mode_reg (SImode, gen_lowpart (SImode, op2));
2246 1.1 mrg op2 = force_reg (SImode, gen_rtx_AND (SImode, op2, mask));
2247 1.1 mrg
2248 1.1 mrg /* Finally OR the two values. */
2249 1.1 mrg if (op1 == CONST0_RTX (SFmode))
2250 1.1 mrg x = op2;
2251 1.1 mrg else
2252 1.1 mrg x = force_reg (SImode, gen_rtx_IOR (SImode, op1, op2));
2253 1.1 mrg
2254 1.1 mrg /* And move the result to the destination. */
2255 1.1 mrg emit_insn (gen_rtx_SET (op0, gen_lowpart (SFmode, x)));
2256 1.1 mrg }
2257 1.1 mrg
2258 1.1 mrg /* Expand a cstore of OPERANDS in MODE for EQ/NE/LTU/GTU/GEU/LEU. We generate
2259 1.1 mrg the result in the C flag and use the ADC/SUBC instructions to write it into
2260 1.1 mrg the destination register.
2261 1.1 mrg
2262 1.1 mrg It would also be possible to implement support for LT/GT/LE/GE by means of
2263 1.1 mrg the RFLAG instruction followed by some shifts, but this can pessimize the
2264 1.1 mrg generated code. */
2265 1.1 mrg
2266 1.1 mrg void
2267 1.1 mrg visium_expand_int_cstore (rtx *operands, machine_mode mode)
2268 1.1 mrg {
2269 1.1 mrg enum rtx_code code = GET_CODE (operands[1]);
2270 1.1 mrg rtx op0 = operands[0], op1 = operands[2], op2 = operands[3], sltu;
2271 1.1 mrg bool reverse = false;
2272 1.1 mrg
2273 1.1 mrg switch (code)
2274 1.1 mrg {
2275 1.1 mrg case EQ:
2276 1.1 mrg case NE:
2277 1.1 mrg /* We use a special comparison to get the result in the C flag. */
2278 1.1 mrg if (op2 != const0_rtx)
2279 1.1 mrg op1 = force_reg (mode, gen_rtx_XOR (mode, op1, op2));
2280 1.1 mrg op1 = gen_rtx_NOT (mode, op1);
2281 1.1 mrg op2 = constm1_rtx;
2282 1.1 mrg if (code == EQ)
2283 1.1 mrg reverse = true;
2284 1.1 mrg break;
2285 1.1 mrg
2286 1.1 mrg case LEU:
2287 1.1 mrg case GEU:
2288 1.1 mrg /* The result is naturally in the C flag modulo a couple of tricks. */
2289 1.1 mrg code = reverse_condition (code);
2290 1.1 mrg reverse = true;
2291 1.1 mrg
2292 1.1 mrg /* ... fall through ... */
2293 1.1 mrg
2294 1.1 mrg case LTU:
2295 1.1 mrg case GTU:
2296 1.1 mrg if (code == GTU)
2297 1.1 mrg {
2298 1.1 mrg rtx tmp = op1;
2299 1.1 mrg op1 = op2;
2300 1.1 mrg op2 = tmp;
2301 1.1 mrg }
2302 1.1 mrg break;
2303 1.1 mrg
2304 1.1 mrg default:
2305 1.1 mrg gcc_unreachable ();
2306 1.1 mrg }
2307 1.1 mrg
2308 1.1 mrg /* We need either a single ADC or a SUBC and a PLUS. */
2309 1.1 mrg sltu = gen_rtx_LTU (SImode, op1, op2);
2310 1.1 mrg
2311 1.1 mrg if (reverse)
2312 1.1 mrg {
2313 1.1 mrg rtx tmp = copy_to_mode_reg (SImode, gen_rtx_NEG (SImode, sltu));
2314 1.1 mrg emit_insn (gen_add3_insn (op0, tmp, const1_rtx));
2315 1.1 mrg }
2316 1.1 mrg else
2317 1.1 mrg emit_insn (gen_rtx_SET (op0, sltu));
2318 1.1 mrg }
2319 1.1 mrg
2320 1.1 mrg /* Expand a cstore of OPERANDS in MODE for LT/GT/UNGE/UNLE. We generate the
2321 1.1 mrg result in the C flag and use the ADC/SUBC instructions to write it into
2322 1.1 mrg the destination register. */
2323 1.1 mrg
2324 1.1 mrg void
2325 1.1 mrg visium_expand_fp_cstore (rtx *operands,
2326 1.1 mrg machine_mode mode ATTRIBUTE_UNUSED)
2327 1.1 mrg {
2328 1.1 mrg enum rtx_code code = GET_CODE (operands[1]);
2329 1.1 mrg rtx op0 = operands[0], op1 = operands[2], op2 = operands[3], slt;
2330 1.1 mrg bool reverse = false;
2331 1.1 mrg
2332 1.1 mrg switch (code)
2333 1.1 mrg {
2334 1.1 mrg case UNLE:
2335 1.1 mrg case UNGE:
2336 1.1 mrg /* The result is naturally in the C flag modulo a couple of tricks. */
2337 1.1 mrg code = reverse_condition_maybe_unordered (code);
2338 1.1 mrg reverse = true;
2339 1.1 mrg
2340 1.1 mrg /* ... fall through ... */
2341 1.1 mrg
2342 1.1 mrg case LT:
2343 1.1 mrg case GT:
2344 1.1 mrg if (code == GT)
2345 1.1 mrg {
2346 1.1 mrg rtx tmp = op1;
2347 1.1 mrg op1 = op2;
2348 1.1 mrg op2 = tmp;
2349 1.1 mrg }
2350 1.1 mrg break;
2351 1.1 mrg
2352 1.1 mrg default:
2353 1.1 mrg gcc_unreachable ();
2354 1.1 mrg }
2355 1.1 mrg
2356 1.1 mrg /* We need either a single ADC or a SUBC and a PLUS. */
2357 1.1 mrg slt = gen_rtx_LT (SImode, op1, op2);
2358 1.1 mrg
2359 1.1 mrg if (reverse)
2360 1.1 mrg {
2361 1.1 mrg rtx tmp = copy_to_mode_reg (SImode, gen_rtx_NEG (SImode, slt));
2362 1.1 mrg emit_insn (gen_add3_insn (op0, tmp, const1_rtx));
2363 1.1 mrg }
2364 1.1 mrg else
2365 1.1 mrg emit_insn (gen_rtx_SET (op0, slt));
2366 1.1 mrg }
2367 1.1 mrg
2368 1.1 mrg /* Split a compare-and-store with CODE, operands OP2 and OP3, combined with
2369 1.1 mrg operation with OP_CODE, operands OP0 and OP1. */
2370 1.1 mrg
2371 1.1 mrg void
2372 1.1 mrg visium_split_cstore (enum rtx_code op_code, rtx op0, rtx op1,
2373 1.1 mrg enum rtx_code code, rtx op2, rtx op3)
2374 1.1 mrg {
2375 1.1 mrg machine_mode cc_mode = visium_select_cc_mode (code, op2, op3);
2376 1.1 mrg
2377 1.1 mrg /* If a FP cstore was reversed, then it was originally UNGE/UNLE. */
2378 1.1 mrg if (cc_mode == CCFPEmode && (op_code == NEG || op_code == MINUS))
2379 1.1 mrg cc_mode = CCFPmode;
2380 1.1 mrg
2381 1.1 mrg rtx flags = gen_rtx_REG (cc_mode, FLAGS_REGNUM);
2382 1.1 mrg rtx x = gen_rtx_COMPARE (cc_mode, op2, op3);
2383 1.1 mrg x = gen_rtx_SET (flags, x);
2384 1.1 mrg emit_insn (x);
2385 1.1 mrg
2386 1.1 mrg x = gen_rtx_fmt_ee (code, SImode, flags, const0_rtx);
2387 1.1 mrg switch (op_code)
2388 1.1 mrg {
2389 1.1 mrg case SET:
2390 1.1 mrg break;
2391 1.1 mrg case NEG:
2392 1.1 mrg x = gen_rtx_NEG (SImode, x);
2393 1.1 mrg break;
2394 1.1 mrg case PLUS:
2395 1.1 mrg case MINUS:
2396 1.1 mrg x = gen_rtx_fmt_ee (op_code, SImode, op1, x);
2397 1.1 mrg break;
2398 1.1 mrg default:
2399 1.1 mrg gcc_unreachable ();
2400 1.1 mrg }
2401 1.1 mrg
2402 1.1 mrg rtx pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
2403 1.1 mrg XVECEXP (pat, 0, 0) = gen_rtx_SET (op0, x);
2404 1.1 mrg flags = gen_rtx_REG (CCmode, FLAGS_REGNUM);
2405 1.1 mrg XVECEXP (pat, 0, 1) = gen_rtx_CLOBBER (VOIDmode, flags);
2406 1.1 mrg emit_insn (pat);
2407 1.1 mrg
2408 1.1 mrg visium_flags_exposed = true;
2409 1.1 mrg }
2410 1.1 mrg
2411 1.1 mrg /* Generate a call to a library function to move BYTES_RTX bytes from SRC with
2412 1.1 mrg address SRC_REG to DST with address DST_REG in 4-byte chunks. */
2413 1.1 mrg
2414 1.1 mrg static void
2415 1.1 mrg expand_block_move_4 (rtx dst, rtx dst_reg, rtx src, rtx src_reg, rtx bytes_rtx)
2416 1.1 mrg {
2417 1.1 mrg unsigned HOST_WIDE_INT bytes = UINTVAL (bytes_rtx);
2418 1.1 mrg unsigned int rem = bytes % 4;
2419 1.1 mrg
2420 1.1 mrg if (TARGET_BMI)
2421 1.1 mrg {
2422 1.1 mrg unsigned int i;
2423 1.1 mrg rtx insn;
2424 1.1 mrg
2425 1.1 mrg emit_move_insn (regno_reg_rtx[1], dst_reg);
2426 1.1 mrg emit_move_insn (regno_reg_rtx[2], src_reg);
2427 1.1 mrg emit_move_insn (regno_reg_rtx[3], bytes_rtx);
2428 1.1 mrg
2429 1.1 mrg insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (8));
2430 1.1 mrg XVECEXP (insn, 0, 0)
2431 1.1 mrg = gen_rtx_SET (replace_equiv_address_nv (dst, regno_reg_rtx[1]),
2432 1.1 mrg replace_equiv_address_nv (src, regno_reg_rtx[2]));
2433 1.1 mrg XVECEXP (insn, 0, 1) = gen_rtx_USE (VOIDmode, regno_reg_rtx[3]);
2434 1.1 mrg for (i = 1; i <= 6; i++)
2435 1.1 mrg XVECEXP (insn, 0, 1 + i)
2436 1.1 mrg = gen_rtx_CLOBBER (VOIDmode, regno_reg_rtx[i]);
2437 1.1 mrg emit_insn (insn);
2438 1.1 mrg }
2439 1.1 mrg else
2440 1.1 mrg emit_library_call (long_int_memcpy_libfunc, LCT_NORMAL, VOIDmode,
2441 1.1 mrg dst_reg, Pmode,
2442 1.1 mrg src_reg, Pmode,
2443 1.1 mrg convert_to_mode (TYPE_MODE (sizetype),
2444 1.1 mrg GEN_INT (bytes >> 2),
2445 1.1 mrg TYPE_UNSIGNED (sizetype)),
2446 1.1 mrg TYPE_MODE (sizetype));
2447 1.1 mrg if (rem == 0)
2448 1.1 mrg return;
2449 1.1 mrg
2450 1.1 mrg dst = replace_equiv_address_nv (dst, dst_reg);
2451 1.1 mrg src = replace_equiv_address_nv (src, src_reg);
2452 1.1 mrg bytes -= rem;
2453 1.1 mrg
2454 1.1 mrg if (rem > 1)
2455 1.1 mrg {
2456 1.1 mrg emit_move_insn (adjust_address_nv (dst, HImode, bytes),
2457 1.1 mrg adjust_address_nv (src, HImode, bytes));
2458 1.1 mrg bytes += 2;
2459 1.1 mrg rem -= 2;
2460 1.1 mrg }
2461 1.1 mrg
2462 1.1 mrg if (rem > 0)
2463 1.1 mrg emit_move_insn (adjust_address_nv (dst, QImode, bytes),
2464 1.1 mrg adjust_address_nv (src, QImode, bytes));
2465 1.1 mrg }
2466 1.1 mrg
2467 1.1 mrg /* Generate a call to a library function to move BYTES_RTX bytes from SRC with
2468 1.1 mrg address SRC_REG to DST with address DST_REG in 2-bytes chunks. */
2469 1.1 mrg
2470 1.1 mrg static void
2471 1.1 mrg expand_block_move_2 (rtx dst, rtx dst_reg, rtx src, rtx src_reg, rtx bytes_rtx)
2472 1.1 mrg {
2473 1.1 mrg unsigned HOST_WIDE_INT bytes = UINTVAL (bytes_rtx);
2474 1.1 mrg unsigned int rem = bytes % 2;
2475 1.1 mrg
2476 1.1 mrg emit_library_call (wrd_memcpy_libfunc, LCT_NORMAL, VOIDmode,
2477 1.1 mrg dst_reg, Pmode,
2478 1.1 mrg src_reg, Pmode,
2479 1.1 mrg convert_to_mode (TYPE_MODE (sizetype),
2480 1.1 mrg GEN_INT (bytes >> 1),
2481 1.1 mrg TYPE_UNSIGNED (sizetype)),
2482 1.1 mrg TYPE_MODE (sizetype));
2483 1.1 mrg if (rem == 0)
2484 1.1 mrg return;
2485 1.1 mrg
2486 1.1 mrg dst = replace_equiv_address_nv (dst, dst_reg);
2487 1.1 mrg src = replace_equiv_address_nv (src, src_reg);
2488 1.1 mrg bytes -= rem;
2489 1.1 mrg
2490 1.1 mrg emit_move_insn (adjust_address_nv (dst, QImode, bytes),
2491 1.1 mrg adjust_address_nv (src, QImode, bytes));
2492 1.1 mrg }
2493 1.1 mrg
2494 1.1 mrg /* Generate a call to a library function to move BYTES_RTX bytes from address
2495 1.1 mrg SRC_REG to address DST_REG in 1-byte chunks. */
2496 1.1 mrg
2497 1.1 mrg static void
2498 1.1 mrg expand_block_move_1 (rtx dst_reg, rtx src_reg, rtx bytes_rtx)
2499 1.1 mrg {
2500 1.1 mrg emit_library_call (byt_memcpy_libfunc, LCT_NORMAL, VOIDmode,
2501 1.1 mrg dst_reg, Pmode,
2502 1.1 mrg src_reg, Pmode,
2503 1.1 mrg convert_to_mode (TYPE_MODE (sizetype),
2504 1.1 mrg bytes_rtx,
2505 1.1 mrg TYPE_UNSIGNED (sizetype)),
2506 1.1 mrg TYPE_MODE (sizetype));
2507 1.1 mrg }
2508 1.1 mrg
2509 1.1 mrg /* Generate a call to a library function to set BYTES_RTX bytes of DST with
2510 1.1 mrg address DST_REG to VALUE_RTX in 4-byte chunks. */
2511 1.1 mrg
2512 1.1 mrg static void
2513 1.1 mrg expand_block_set_4 (rtx dst, rtx dst_reg, rtx value_rtx, rtx bytes_rtx)
2514 1.1 mrg {
2515 1.1 mrg unsigned HOST_WIDE_INT bytes = UINTVAL (bytes_rtx);
2516 1.1 mrg unsigned int rem = bytes % 4;
2517 1.1 mrg
2518 1.1 mrg value_rtx = convert_to_mode (Pmode, value_rtx, 1);
2519 1.1 mrg emit_library_call (long_int_memset_libfunc, LCT_NORMAL, VOIDmode,
2520 1.1 mrg dst_reg, Pmode,
2521 1.1 mrg value_rtx, Pmode,
2522 1.1 mrg convert_to_mode (TYPE_MODE (sizetype),
2523 1.1 mrg GEN_INT (bytes >> 2),
2524 1.1 mrg TYPE_UNSIGNED (sizetype)),
2525 1.1 mrg TYPE_MODE (sizetype));
2526 1.1 mrg if (rem == 0)
2527 1.1 mrg return;
2528 1.1 mrg
2529 1.1 mrg dst = replace_equiv_address_nv (dst, dst_reg);
2530 1.1 mrg bytes -= rem;
2531 1.1 mrg
2532 1.1 mrg if (rem > 1)
2533 1.1 mrg {
2534 1.1 mrg if (CONST_INT_P (value_rtx))
2535 1.1 mrg {
2536 1.1 mrg const unsigned HOST_WIDE_INT value = UINTVAL (value_rtx) & 0xff;
2537 1.1 mrg emit_move_insn (adjust_address_nv (dst, HImode, bytes),
2538 1.1 mrg gen_int_mode ((value << 8) | value, HImode));
2539 1.1 mrg }
2540 1.1 mrg else
2541 1.1 mrg {
2542 1.1 mrg rtx temp = convert_to_mode (QImode, value_rtx, 1);
2543 1.1 mrg emit_move_insn (adjust_address_nv (dst, QImode, bytes), temp);
2544 1.1 mrg emit_move_insn (adjust_address_nv (dst, QImode, bytes + 1), temp);
2545 1.1 mrg }
2546 1.1 mrg bytes += 2;
2547 1.1 mrg rem -= 2;
2548 1.1 mrg }
2549 1.1 mrg
2550 1.1 mrg if (rem > 0)
2551 1.1 mrg emit_move_insn (adjust_address_nv (dst, QImode, bytes),
2552 1.1 mrg convert_to_mode (QImode, value_rtx, 1));
2553 1.1 mrg }
2554 1.1 mrg
2555 1.1 mrg /* Generate a call to a library function to set BYTES_RTX bytes of DST with
2556 1.1 mrg address DST_REG to VALUE_RTX in 2-byte chunks. */
2557 1.1 mrg
2558 1.1 mrg static void
2559 1.1 mrg expand_block_set_2 (rtx dst, rtx dst_reg, rtx value_rtx, rtx bytes_rtx)
2560 1.1 mrg {
2561 1.1 mrg unsigned HOST_WIDE_INT bytes = UINTVAL (bytes_rtx);
2562 1.1 mrg unsigned int rem = bytes % 2;
2563 1.1 mrg
2564 1.1 mrg value_rtx = convert_to_mode (Pmode, value_rtx, 1);
2565 1.1 mrg emit_library_call (wrd_memset_libfunc, LCT_NORMAL, VOIDmode,
2566 1.1 mrg dst_reg, Pmode,
2567 1.1 mrg value_rtx, Pmode,
2568 1.1 mrg convert_to_mode (TYPE_MODE (sizetype),
2569 1.1 mrg GEN_INT (bytes >> 1),
2570 1.1 mrg TYPE_UNSIGNED (sizetype)),
2571 1.1 mrg TYPE_MODE (sizetype));
2572 1.1 mrg if (rem == 0)
2573 1.1 mrg return;
2574 1.1 mrg
2575 1.1 mrg dst = replace_equiv_address_nv (dst, dst_reg);
2576 1.1 mrg bytes -= rem;
2577 1.1 mrg
2578 1.1 mrg emit_move_insn (adjust_address_nv (dst, QImode, bytes),
2579 1.1 mrg convert_to_mode (QImode, value_rtx, 1));
2580 1.1 mrg }
2581 1.1 mrg
2582 1.1 mrg /* Generate a call to a library function to set BYTES_RTX bytes at address
2583 1.1 mrg DST_REG to VALUE_RTX in 1-byte chunks. */
2584 1.1 mrg
2585 1.1 mrg static void
2586 1.1 mrg expand_block_set_1 (rtx dst_reg, rtx value_rtx, rtx bytes_rtx)
2587 1.1 mrg {
2588 1.1 mrg value_rtx = convert_to_mode (Pmode, value_rtx, 1);
2589 1.1 mrg emit_library_call (byt_memset_libfunc, LCT_NORMAL, VOIDmode,
2590 1.1 mrg dst_reg, Pmode,
2591 1.1 mrg value_rtx, Pmode,
2592 1.1 mrg convert_to_mode (TYPE_MODE (sizetype),
2593 1.1 mrg bytes_rtx,
2594 1.1 mrg TYPE_UNSIGNED (sizetype)),
2595 1.1 mrg TYPE_MODE (sizetype));
2596 1.1 mrg }
2597 1.1 mrg
2598 1.1 mrg /* Expand string/block move operations.
2599 1.1 mrg
2600 1.1 mrg operands[0] is the pointer to the destination.
2601 1.1 mrg operands[1] is the pointer to the source.
2602 1.1 mrg operands[2] is the number of bytes to move.
2603 1.1 mrg operands[3] is the alignment.
2604 1.1 mrg
2605 1.1 mrg Return 1 upon success, 0 otherwise. */
2606 1.1 mrg
2607 1.1 mrg int
2608 1.1 mrg visium_expand_block_move (rtx *operands)
2609 1.1 mrg {
2610 1.1 mrg rtx dst = operands[0];
2611 1.1 mrg rtx src = operands[1];
2612 1.1 mrg rtx bytes_rtx = operands[2];
2613 1.1 mrg rtx align_rtx = operands[3];
2614 1.1 mrg const int align = INTVAL (align_rtx);
2615 1.1 mrg rtx dst_reg, src_reg;
2616 1.1 mrg tree dst_expr, src_expr;
2617 1.1 mrg
2618 1.1 mrg /* We only handle a fixed number of bytes for now. */
2619 1.1 mrg if (!CONST_INT_P (bytes_rtx) || INTVAL (bytes_rtx) <= 0)
2620 1.1 mrg return 0;
2621 1.1 mrg
2622 1.1 mrg /* Copy the addresses into scratch registers. */
2623 1.1 mrg dst_reg = copy_addr_to_reg (XEXP (dst, 0));
2624 1.1 mrg src_reg = copy_addr_to_reg (XEXP (src, 0));
2625 1.1 mrg
2626 1.1 mrg /* Move the data with the appropriate granularity. */
2627 1.1 mrg if (align >= 4)
2628 1.1 mrg expand_block_move_4 (dst, dst_reg, src, src_reg, bytes_rtx);
2629 1.1 mrg else if (align >= 2)
2630 1.1 mrg expand_block_move_2 (dst, dst_reg, src, src_reg, bytes_rtx);
2631 1.1 mrg else
2632 1.1 mrg expand_block_move_1 (dst_reg, src_reg, bytes_rtx);
2633 1.1 mrg
2634 1.1 mrg /* Since DST and SRC are passed to a libcall, mark the corresponding
2635 1.1 mrg tree EXPR as addressable. */
2636 1.1 mrg dst_expr = MEM_EXPR (dst);
2637 1.1 mrg src_expr = MEM_EXPR (src);
2638 1.1 mrg if (dst_expr)
2639 1.1 mrg mark_addressable (dst_expr);
2640 1.1 mrg if (src_expr)
2641 1.1 mrg mark_addressable (src_expr);
2642 1.1 mrg
2643 1.1 mrg return 1;
2644 1.1 mrg }
2645 1.1 mrg
2646 1.1 mrg /* Expand string/block set operations.
2647 1.1 mrg
2648 1.1 mrg operands[0] is the pointer to the destination.
2649 1.1 mrg operands[1] is the number of bytes to set.
2650 1.1 mrg operands[2] is the source value.
2651 1.1 mrg operands[3] is the alignment.
2652 1.1 mrg
2653 1.1 mrg Return 1 upon success, 0 otherwise. */
2654 1.1 mrg
2655 1.1 mrg int
2656 1.1 mrg visium_expand_block_set (rtx *operands)
2657 1.1 mrg {
2658 1.1 mrg rtx dst = operands[0];
2659 1.1 mrg rtx bytes_rtx = operands[1];
2660 1.1 mrg rtx value_rtx = operands[2];
2661 1.1 mrg rtx align_rtx = operands[3];
2662 1.1 mrg const int align = INTVAL (align_rtx);
2663 1.1 mrg rtx dst_reg;
2664 1.1 mrg tree dst_expr;
2665 1.1 mrg
2666 1.1 mrg /* We only handle a fixed number of bytes for now. */
2667 1.1 mrg if (!CONST_INT_P (bytes_rtx) || INTVAL (bytes_rtx) <= 0)
2668 1.1 mrg return 0;
2669 1.1 mrg
2670 1.1 mrg /* Copy the address into a scratch register. */
2671 1.1 mrg dst_reg = copy_addr_to_reg (XEXP (dst, 0));
2672 1.1 mrg
2673 1.1 mrg /* Set the data with the appropriate granularity. */
2674 1.1 mrg if (align >= 4)
2675 1.1 mrg expand_block_set_4 (dst, dst_reg, value_rtx, bytes_rtx);
2676 1.1 mrg else if (align >= 2)
2677 1.1 mrg expand_block_set_2 (dst, dst_reg, value_rtx, bytes_rtx);
2678 1.1 mrg else
2679 1.1 mrg expand_block_set_1 (dst_reg, value_rtx, bytes_rtx);
2680 1.1 mrg
2681 1.1 mrg /* Since DST is passed to a libcall, mark the corresponding
2682 1.1 mrg tree EXPR as addressable. */
2683 1.1 mrg dst_expr = MEM_EXPR (dst);
2684 1.1 mrg if (dst_expr)
2685 1.1 mrg mark_addressable (dst_expr);
2686 1.1 mrg
2687 1.1 mrg return 1;
2688 1.1 mrg }
2689 1.1 mrg
2690 1.1 mrg /* Initialize a trampoline. M_TRAMP is an RTX for the memory block for the
2691 1.1 mrg trampoline, FNDECL is the FUNCTION_DECL for the nested function and
2692 1.1 mrg STATIC_CHAIN is an RTX for the static chain value that should be passed
2693 1.1 mrg to the function when it is called. */
2694 1.1 mrg
2695 1.1 mrg static void
2696 1.1 mrg visium_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
2697 1.1 mrg {
2698 1.1 mrg rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
2699 1.1 mrg rtx addr = XEXP (m_tramp, 0);
2700 1.1 mrg
2701 1.1 mrg /* The trampoline initialization sequence is:
2702 1.1 mrg
2703 1.1 mrg moviu r9,%u FUNCTION
2704 1.1 mrg movil r9,%l FUNCTION
2705 1.1 mrg [nop]
2706 1.1 mrg moviu r20,%u STATIC
2707 1.1 mrg bra tr,r9,r9
2708 1.1 mrg movil r20,%l STATIC
2709 1.1 mrg
2710 1.1 mrg We don't use r0 as the destination register of the branch because we want
2711 1.1 mrg the Branch Pre-decode Logic of the GR6 to use the Address Load Array to
2712 1.1 mrg predict the branch target. */
2713 1.1 mrg
2714 1.1 mrg emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, addr, 0)),
2715 1.1 mrg plus_constant (SImode,
2716 1.1 mrg expand_shift (RSHIFT_EXPR, SImode, fnaddr,
2717 1.1 mrg 16, NULL_RTX, 1),
2718 1.1 mrg 0x04a90000));
2719 1.1 mrg
2720 1.1 mrg emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, addr, 4)),
2721 1.1 mrg plus_constant (SImode,
2722 1.1 mrg expand_and (SImode, fnaddr, GEN_INT (0xffff),
2723 1.1 mrg NULL_RTX),
2724 1.1 mrg 0x04890000));
2725 1.1 mrg
2726 1.1 mrg if (visium_cpu == PROCESSOR_GR6)
2727 1.1 mrg {
2728 1.1 mrg /* For the GR6, the BRA insn must be aligned on a 64-bit boundary. */
2729 1.1 mrg gcc_assert (TRAMPOLINE_ALIGNMENT >= 64);
2730 1.1 mrg emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, addr, 12)),
2731 1.1 mrg gen_int_mode (0, SImode));
2732 1.1 mrg }
2733 1.1 mrg
2734 1.1 mrg emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, addr, 8)),
2735 1.1 mrg plus_constant (SImode,
2736 1.1 mrg expand_shift (RSHIFT_EXPR, SImode,
2737 1.1 mrg static_chain,
2738 1.1 mrg 16, NULL_RTX, 1),
2739 1.1 mrg 0x04b40000));
2740 1.1 mrg
2741 1.1 mrg emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, addr, 12)),
2742 1.1 mrg gen_int_mode (0xff892404, SImode));
2743 1.1 mrg
2744 1.1 mrg emit_move_insn (gen_rtx_MEM (SImode, plus_constant (Pmode, addr, 16)),
2745 1.1 mrg plus_constant (SImode,
2746 1.1 mrg expand_and (SImode, static_chain,
2747 1.1 mrg GEN_INT (0xffff), NULL_RTX),
2748 1.1 mrg 0x04940000));
2749 1.1 mrg
2750 1.1 mrg emit_library_call (set_trampoline_parity_libfunc, LCT_NORMAL, VOIDmode,
2751 1.1 mrg addr, SImode);
2752 1.1 mrg }
2753 1.1 mrg
2754 1.1 mrg /* Return true if the current function must have and use a frame pointer. */
2755 1.1 mrg
2756 1.1 mrg static bool
2757 1.1 mrg visium_frame_pointer_required (void)
2758 1.1 mrg {
2759 1.1 mrg /* The frame pointer is required if the function isn't leaf to be able to
2760 1.1 mrg do manual stack unwinding. */
2761 1.1 mrg if (!crtl->is_leaf)
2762 1.1 mrg return true;
2763 1.1 mrg
2764 1.1 mrg /* If the stack pointer is dynamically modified in the function, it cannot
2765 1.1 mrg serve as the frame pointer. */
2766 1.1 mrg if (!crtl->sp_is_unchanging)
2767 1.1 mrg return true;
2768 1.1 mrg
2769 1.1 mrg /* If the function receives nonlocal gotos, it needs to save the frame
2770 1.1 mrg pointer in the nonlocal_goto_save_area object. */
2771 1.1 mrg if (cfun->has_nonlocal_label)
2772 1.1 mrg return true;
2773 1.1 mrg
2774 1.1 mrg /* The frame also needs to be established in some special cases. */
2775 1.1 mrg if (visium_frame_needed)
2776 1.1 mrg return true;
2777 1.1 mrg
2778 1.1 mrg return false;
2779 1.1 mrg }
2780 1.1 mrg
2781 1.1 mrg /* Profiling support. Just a call to MCOUNT is needed. No labelled counter
2782 1.1 mrg location is involved. Proper support for __builtin_return_address is also
2783 1.1 mrg required, which is fairly straightforward provided a frame gets created. */
2784 1.1 mrg
2785 1.1 mrg void
2786 1.1 mrg visium_profile_hook (void)
2787 1.1 mrg {
2788 1.1 mrg visium_frame_needed = true;
2789 1.1 mrg emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "mcount"), LCT_NORMAL,
2790 1.1 mrg VOIDmode);
2791 1.1 mrg }
2792 1.1 mrg
2793 1.1 mrg /* A C expression whose value is RTL representing the address in a stack frame
2794 1.1 mrg where the pointer to the caller's frame is stored. Assume that FRAMEADDR is
2795 1.1 mrg an RTL expression for the address of the stack frame itself.
2796 1.1 mrg
2797 1.1 mrg If you don't define this macro, the default is to return the value of
2798 1.1 mrg FRAMEADDR--that is, the stack frame address is also the address of the stack
2799 1.1 mrg word that points to the previous frame. */
2800 1.1 mrg
2801 1.1 mrg rtx
2802 1.1 mrg visium_dynamic_chain_address (rtx frame)
2803 1.1 mrg {
2804 1.1 mrg /* This is the default, but we need to make sure the frame gets created. */
2805 1.1 mrg visium_frame_needed = true;
2806 1.1 mrg return frame;
2807 1.1 mrg }
2808 1.1 mrg
2809 1.1 mrg /* A C expression whose value is RTL representing the value of the return
2810 1.1 mrg address for the frame COUNT steps up from the current frame, after the
2811 1.1 mrg prologue. FRAMEADDR is the frame pointer of the COUNT frame, or the frame
2812 1.1 mrg pointer of the COUNT - 1 frame if `RETURN_ADDR_IN_PREVIOUS_FRAME' is
2813 1.1 mrg defined.
2814 1.1 mrg
2815 1.1 mrg The value of the expression must always be the correct address when COUNT is
2816 1.1 mrg zero, but may be `NULL_RTX' if there is not way to determine the return
2817 1.1 mrg address of other frames. */
2818 1.1 mrg
2819 1.1 mrg rtx
2820 1.1 mrg visium_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
2821 1.1 mrg {
2822 1.1 mrg /* Dont try to compute anything other than frame zero. */
2823 1.1 mrg if (count != 0)
2824 1.1 mrg return NULL_RTX;
2825 1.1 mrg
2826 1.1 mrg visium_frame_needed = true;
2827 1.1 mrg return
2828 1.1 mrg gen_frame_mem (Pmode, plus_constant (Pmode, hard_frame_pointer_rtx, 4));
2829 1.1 mrg }
2830 1.1 mrg
2831 1.1 mrg /* Helper function for EH_RETURN_HANDLER_RTX. Return the RTX representing a
2832 1.1 mrg location in which to store the address of an exception handler to which we
2833 1.1 mrg should return. */
2834 1.1 mrg
2835 1.1 mrg rtx
2836 1.1 mrg visium_eh_return_handler_rtx (void)
2837 1.1 mrg {
2838 1.1 mrg rtx mem
2839 1.1 mrg = gen_frame_mem (SImode, plus_constant (Pmode, hard_frame_pointer_rtx, 4));
2840 1.1 mrg MEM_VOLATILE_P (mem) = 1;
2841 1.1 mrg return mem;
2842 1.1 mrg }
2843 1.1 mrg
2844 1.1 mrg static struct machine_function *
2845 1.1 mrg visium_init_machine_status (void)
2846 1.1 mrg {
2847 1.1 mrg return ggc_cleared_alloc<machine_function> ();
2848 1.1 mrg }
2849 1.1 mrg
2850 1.1 mrg /* The per-function data machinery is needed to indicate when a frame
2851 1.1 mrg is required. */
2852 1.1 mrg
2853 1.1 mrg void
2854 1.1 mrg visium_init_expanders (void)
2855 1.1 mrg {
2856 1.1 mrg init_machine_status = visium_init_machine_status;
2857 1.1 mrg }
2858 1.1 mrg
2859 1.1 mrg /* Given a comparison code (EQ, NE, etc.) and the operands of a COMPARE,
2860 1.1 mrg return the mode to be used for the comparison. */
2861 1.1 mrg
2862 1.1 mrg machine_mode
2863 1.1 mrg visium_select_cc_mode (enum rtx_code code, rtx op0, rtx op1)
2864 1.1 mrg {
2865 1.1 mrg if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
2866 1.1 mrg {
2867 1.1 mrg switch (code)
2868 1.1 mrg {
2869 1.1 mrg case EQ:
2870 1.1 mrg case NE:
2871 1.1 mrg case ORDERED:
2872 1.1 mrg case UNORDERED:
2873 1.1 mrg case UNLT:
2874 1.1 mrg case UNLE:
2875 1.1 mrg case UNGT:
2876 1.1 mrg case UNGE:
2877 1.1 mrg return CCFPmode;
2878 1.1 mrg
2879 1.1 mrg case LT:
2880 1.1 mrg case LE:
2881 1.1 mrg case GT:
2882 1.1 mrg case GE:
2883 1.1 mrg return CCFPEmode;
2884 1.1 mrg
2885 1.1 mrg /* These 2 comparison codes are not supported. */
2886 1.1 mrg case UNEQ:
2887 1.1 mrg case LTGT:
2888 1.1 mrg default:
2889 1.1 mrg gcc_unreachable ();
2890 1.1 mrg }
2891 1.1 mrg }
2892 1.1 mrg
2893 1.1 mrg /* This is for the cmp<mode>_sne pattern. */
2894 1.1 mrg if (op1 == constm1_rtx)
2895 1.1 mrg return CCCmode;
2896 1.1 mrg
2897 1.1 mrg /* This is for the add<mode>3_insn_set_carry pattern. */
2898 1.1 mrg if ((code == LTU || code == GEU)
2899 1.1 mrg && GET_CODE (op0) == PLUS
2900 1.1 mrg && rtx_equal_p (XEXP (op0, 0), op1))
2901 1.1 mrg return CCCmode;
2902 1.1 mrg
2903 1.1 mrg /* This is for the {add,sub,neg}<mode>3_insn_set_overflow pattern. */
2904 1.1 mrg if ((code == EQ || code == NE)
2905 1.1 mrg && GET_CODE (op1) == UNSPEC
2906 1.1 mrg && (XINT (op1, 1) == UNSPEC_ADDV
2907 1.1 mrg || XINT (op1, 1) == UNSPEC_SUBV
2908 1.1 mrg || XINT (op1, 1) == UNSPEC_NEGV))
2909 1.1 mrg return CCVmode;
2910 1.1 mrg
2911 1.1 mrg if (op1 != const0_rtx)
2912 1.1 mrg return CCmode;
2913 1.1 mrg
2914 1.1 mrg switch (GET_CODE (op0))
2915 1.1 mrg {
2916 1.1 mrg case PLUS:
2917 1.1 mrg case MINUS:
2918 1.1 mrg case NEG:
2919 1.1 mrg case ASHIFT:
2920 1.1 mrg case LTU:
2921 1.1 mrg case LT:
2922 1.1 mrg /* The C and V flags may be set differently from a COMPARE with zero.
2923 1.1 mrg The consequence is that a comparison operator testing C or V must
2924 1.1 mrg be turned into another operator not testing C or V and yielding
2925 1.1 mrg the same result for a comparison with zero. That's possible for
2926 1.1 mrg GE/LT which become NC/NS respectively, but not for GT/LE for which
2927 1.1 mrg the altered operator doesn't exist on the Visium. */
2928 1.1 mrg return CCNZmode;
2929 1.1 mrg
2930 1.1 mrg case ZERO_EXTRACT:
2931 1.1 mrg /* This is a btst, the result is in C instead of Z. */
2932 1.1 mrg return CCCmode;
2933 1.1 mrg
2934 1.1 mrg case REG:
2935 1.1 mrg case AND:
2936 1.1 mrg case IOR:
2937 1.1 mrg case XOR:
2938 1.1 mrg case NOT:
2939 1.1 mrg case ASHIFTRT:
2940 1.1 mrg case LSHIFTRT:
2941 1.1 mrg case TRUNCATE:
2942 1.1 mrg case SIGN_EXTEND:
2943 1.1 mrg /* Pretend that the flags are set as for a COMPARE with zero.
2944 1.1 mrg That's mostly true, except for the 2 right shift insns that
2945 1.1 mrg will set the C flag. But the C flag is relevant only for
2946 1.1 mrg the unsigned comparison operators and they are eliminated
2947 1.1 mrg when applied to a comparison with zero. */
2948 1.1 mrg return CCmode;
2949 1.1 mrg
2950 1.1 mrg /* ??? Cater to the junk RTXes sent by try_merge_compare. */
2951 1.1 mrg case ASM_OPERANDS:
2952 1.1 mrg case CALL:
2953 1.1 mrg case CONST_INT:
2954 1.1 mrg case LO_SUM:
2955 1.1 mrg case HIGH:
2956 1.1 mrg case MEM:
2957 1.1 mrg case UNSPEC:
2958 1.1 mrg case ZERO_EXTEND:
2959 1.1 mrg return CCmode;
2960 1.1 mrg
2961 1.1 mrg default:
2962 1.1 mrg gcc_unreachable ();
2963 1.1 mrg }
2964 1.1 mrg }
2965 1.1 mrg
2966 1.1 mrg /* Split a compare-and-branch with CODE, operands OP0 and OP1, and LABEL. */
2967 1.1 mrg
2968 1.1 mrg void
2969 1.1 mrg visium_split_cbranch (enum rtx_code code, rtx op0, rtx op1, rtx label)
2970 1.1 mrg {
2971 1.1 mrg machine_mode cc_mode = visium_select_cc_mode (code, op0, op1);
2972 1.1 mrg rtx flags = gen_rtx_REG (cc_mode, FLAGS_REGNUM);
2973 1.1 mrg
2974 1.1 mrg rtx x = gen_rtx_COMPARE (cc_mode, op0, op1);
2975 1.1 mrg x = gen_rtx_SET (flags, x);
2976 1.1 mrg emit_insn (x);
2977 1.1 mrg
2978 1.1 mrg x = gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
2979 1.1 mrg x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, gen_rtx_LABEL_REF (Pmode, label),
2980 1.1 mrg pc_rtx);
2981 1.1 mrg x = gen_rtx_SET (pc_rtx, x);
2982 1.1 mrg emit_jump_insn (x);
2983 1.1 mrg
2984 1.1 mrg visium_flags_exposed = true;
2985 1.1 mrg }
2986 1.1 mrg
2987 1.1 mrg /* Branch instructions on the Visium.
2988 1.1 mrg
2989 1.1 mrg Setting aside the interrupt-handling specific instructions, the ISA has
2990 1.1 mrg two branch instructions: BRR and BRA. The former is used to implement
2991 1.1 mrg short branches (+/- 2^17) within functions and its target is encoded in
2992 1.1 mrg the instruction. The latter is used to implement all the other types
2993 1.1 mrg of control flow changes and its target might not be statically known
2994 1.1 mrg or even easily predictable at run time. Here's a complete summary of
2995 1.1 mrg the patterns that generate a BRA instruction:
2996 1.1 mrg
2997 1.1 mrg 1. Indirect jump
2998 1.1 mrg 2. Table jump
2999 1.1 mrg 3. Call
3000 1.1 mrg 4. Sibling call
3001 1.1 mrg 5. Return
3002 1.1 mrg 6. Long branch
3003 1.1 mrg 7. Trampoline
3004 1.1 mrg
3005 1.1 mrg Among these patterns, only the return (5) and the long branch (6) can be
3006 1.1 mrg conditional; all the other patterns are always unconditional.
3007 1.1 mrg
3008 1.1 mrg The following algorithm can be used to identify the pattern for which
3009 1.1 mrg the BRA instruction was generated and work out its target:
3010 1.1 mrg
3011 1.1 mrg A. If the source is r21 and the destination is r0, this is a return (5)
3012 1.1 mrg and the target is the caller (i.e. the value of r21 on function's
3013 1.1 mrg entry).
3014 1.1 mrg
3015 1.1 mrg B. If the source is rN, N != 21 and the destination is r0, this is either
3016 1.1 mrg an indirect jump or a table jump (1, 2) and the target is not easily
3017 1.1 mrg predictable.
3018 1.1 mrg
3019 1.1 mrg C. If the source is rN, N != 21 and the destination is r21, this is a call
3020 1.1 mrg (3) and the target is given by the preceding MOVIL/MOVIU pair for rN,
3021 1.1 mrg unless this is an indirect call in which case the target is not easily
3022 1.1 mrg predictable.
3023 1.1 mrg
3024 1.1 mrg D. If the source is rN, N != 21 and the destination is also rN, this is
3025 1.1 mrg either a sibling call or a trampoline (4, 7) and the target is given
3026 1.1 mrg by the preceding MOVIL/MOVIU pair for rN.
3027 1.1 mrg
3028 1.1 mrg E. If the source is r21 and the destination is also r21, this is a long
3029 1.1 mrg branch (6) and the target is given by the preceding MOVIL/MOVIU pair
3030 1.1 mrg for r21.
3031 1.1 mrg
3032 1.1 mrg The other combinations are not used. This implementation has been devised
3033 1.1 mrg to accommodate the branch predictor of the GR6 but is used unconditionally
3034 1.1 mrg by the compiler, i.e. including for earlier processors. */
3035 1.1 mrg
3036 1.1 mrg /* Output a conditional/unconditional branch to LABEL. COND is the string
3037 1.1 mrg condition. INSN is the instruction. */
3038 1.1 mrg
3039 1.1 mrg static const char *
3040 1.1 mrg output_branch (rtx label, const char *cond, rtx_insn *insn)
3041 1.1 mrg {
3042 1.1 mrg char str[64];
3043 1.1 mrg rtx operands[2];
3044 1.1 mrg
3045 1.1 mrg gcc_assert (cond);
3046 1.1 mrg operands[0] = label;
3047 1.1 mrg
3048 1.1 mrg /* If the length of the instruction is greater than 12, then this is a
3049 1.1 mrg long branch and we need to work harder to emit it properly. */
3050 1.1 mrg if (get_attr_length (insn) > 12)
3051 1.1 mrg {
3052 1.1 mrg bool spilled;
3053 1.1 mrg
3054 1.1 mrg /* If the link register has been saved, then we use it. */
3055 1.1 mrg if (current_function_saves_lr ())
3056 1.1 mrg {
3057 1.1 mrg operands[1] = regno_reg_rtx [LINK_REGNUM];
3058 1.1 mrg spilled = false;
3059 1.1 mrg }
3060 1.1 mrg
3061 1.1 mrg /* Or else, if the long-branch register isn't live, we use it. */
3062 1.1 mrg else if (!df_regs_ever_live_p (long_branch_regnum))
3063 1.1 mrg {
3064 1.1 mrg operands[1] = regno_reg_rtx [long_branch_regnum];
3065 1.1 mrg spilled = false;
3066 1.1 mrg }
3067 1.1 mrg
3068 1.1 mrg /* Otherwise, we will use the long-branch register but we need to
3069 1.1 mrg spill it to the stack and reload it at the end. We should have
3070 1.1 mrg reserved the LR slot for this purpose. */
3071 1.1 mrg else
3072 1.1 mrg {
3073 1.1 mrg operands[1] = regno_reg_rtx [long_branch_regnum];
3074 1.1 mrg spilled = true;
3075 1.1 mrg gcc_assert (current_function_has_lr_slot ());
3076 1.1 mrg }
3077 1.1 mrg
3078 1.1 mrg /* First emit the spill to the stack:
3079 1.1 mrg
3080 1.1 mrg insn_in_delay_slot
3081 1.1 mrg write.l [1](sp),reg */
3082 1.1 mrg if (spilled)
3083 1.1 mrg {
3084 1.1 mrg if (final_sequence)
3085 1.1 mrg {
3086 1.1 mrg rtx_insn *delay = NEXT_INSN (insn);
3087 1.1 mrg gcc_assert (delay);
3088 1.1 mrg
3089 1.1 mrg final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
3090 1.1 mrg PATTERN (delay) = gen_blockage ();
3091 1.1 mrg INSN_CODE (delay) = -1;
3092 1.1 mrg }
3093 1.1 mrg
3094 1.1 mrg if (current_function_saves_fp ())
3095 1.1 mrg output_asm_insn ("write.l 1(sp),%1", operands);
3096 1.1 mrg else
3097 1.1 mrg output_asm_insn ("write.l (sp),%1", operands);
3098 1.1 mrg }
3099 1.1 mrg
3100 1.1 mrg /* Then emit the core sequence:
3101 1.1 mrg
3102 1.1 mrg moviu reg,%u label
3103 1.1 mrg movil reg,%l label
3104 1.1 mrg bra tr,reg,reg
3105 1.1 mrg
3106 1.1 mrg We don't use r0 as the destination register of the branch because we
3107 1.1 mrg want the Branch Pre-decode Logic of the GR6 to use the Address Load
3108 1.1 mrg Array to predict the branch target. */
3109 1.1 mrg output_asm_insn ("moviu %1,%%u %0", operands);
3110 1.1 mrg output_asm_insn ("movil %1,%%l %0", operands);
3111 1.1 mrg strcpy (str, "bra ");
3112 1.1 mrg strcat (str, cond);
3113 1.1 mrg strcat (str, ",%1,%1");
3114 1.1 mrg if (!spilled)
3115 1.1 mrg strcat (str, "%#");
3116 1.1 mrg strcat (str, "\t\t;long branch");
3117 1.1 mrg output_asm_insn (str, operands);
3118 1.1 mrg
3119 1.1 mrg /* Finally emit the reload:
3120 1.1 mrg
3121 1.1 mrg read.l reg,[1](sp) */
3122 1.1 mrg if (spilled)
3123 1.1 mrg {
3124 1.1 mrg if (current_function_saves_fp ())
3125 1.1 mrg output_asm_insn (" read.l %1,1(sp)", operands);
3126 1.1 mrg else
3127 1.1 mrg output_asm_insn (" read.l %1,(sp)", operands);
3128 1.1 mrg }
3129 1.1 mrg }
3130 1.1 mrg
3131 1.1 mrg /* Or else, if the label is PC, then this is a return. */
3132 1.1 mrg else if (label == pc_rtx)
3133 1.1 mrg {
3134 1.1 mrg strcpy (str, "bra ");
3135 1.1 mrg strcat (str, cond);
3136 1.1 mrg strcat (str, ",r21,r0%#\t\t;return");
3137 1.1 mrg output_asm_insn (str, operands);
3138 1.1 mrg }
3139 1.1 mrg
3140 1.1 mrg /* Otherwise, this is a short branch. */
3141 1.1 mrg else
3142 1.1 mrg {
3143 1.1 mrg strcpy (str, "brr ");
3144 1.1 mrg strcat (str, cond);
3145 1.1 mrg strcat (str, ",%0%#");
3146 1.1 mrg output_asm_insn (str, operands);
3147 1.1 mrg }
3148 1.1 mrg
3149 1.1 mrg return "";
3150 1.1 mrg }
3151 1.1 mrg
3152 1.1 mrg /* Output an unconditional branch to LABEL. INSN is the instruction. */
3153 1.1 mrg
3154 1.1 mrg const char *
3155 1.1 mrg output_ubranch (rtx label, rtx_insn *insn)
3156 1.1 mrg {
3157 1.1 mrg return output_branch (label, "tr", insn);
3158 1.1 mrg }
3159 1.1 mrg
3160 1.1 mrg /* Output a conditional branch to LABEL. CODE is the comparison code.
3161 1.1 mrg CC_MODE is the mode of the CC register. REVERSED is non-zero if we
3162 1.1 mrg should reverse the sense of the comparison. INSN is the instruction. */
3163 1.1 mrg
3164 1.1 mrg const char *
3165 1.1 mrg output_cbranch (rtx label, enum rtx_code code, machine_mode cc_mode,
3166 1.1 mrg int reversed, rtx_insn *insn)
3167 1.1 mrg {
3168 1.1 mrg const char *cond;
3169 1.1 mrg
3170 1.1 mrg if (reversed)
3171 1.1 mrg {
3172 1.1 mrg if (cc_mode == CCFPmode || cc_mode == CCFPEmode)
3173 1.1 mrg code = reverse_condition_maybe_unordered (code);
3174 1.1 mrg else
3175 1.1 mrg code = reverse_condition (code);
3176 1.1 mrg }
3177 1.1 mrg
3178 1.1 mrg switch (code)
3179 1.1 mrg {
3180 1.1 mrg case NE:
3181 1.1 mrg if (cc_mode == CCCmode)
3182 1.1 mrg cond = "cs";
3183 1.1 mrg else if (cc_mode == CCVmode)
3184 1.1 mrg cond = "os";
3185 1.1 mrg else
3186 1.1 mrg cond = "ne";
3187 1.1 mrg break;
3188 1.1 mrg
3189 1.1 mrg case EQ:
3190 1.1 mrg if (cc_mode == CCCmode)
3191 1.1 mrg cond = "cc";
3192 1.1 mrg else if (cc_mode == CCVmode)
3193 1.1 mrg cond = "oc";
3194 1.1 mrg else
3195 1.1 mrg cond = "eq";
3196 1.1 mrg break;
3197 1.1 mrg
3198 1.1 mrg case GE:
3199 1.1 mrg if (cc_mode == CCNZmode)
3200 1.1 mrg cond = "nc";
3201 1.1 mrg else
3202 1.1 mrg cond = "ge";
3203 1.1 mrg break;
3204 1.1 mrg
3205 1.1 mrg case GT:
3206 1.1 mrg cond = "gt";
3207 1.1 mrg break;
3208 1.1 mrg
3209 1.1 mrg case LE:
3210 1.1 mrg if (cc_mode == CCFPmode || cc_mode == CCFPEmode)
3211 1.1 mrg cond = "ls";
3212 1.1 mrg else
3213 1.1 mrg cond = "le";
3214 1.1 mrg break;
3215 1.1 mrg
3216 1.1 mrg case LT:
3217 1.1 mrg if (cc_mode == CCFPmode || cc_mode == CCFPEmode)
3218 1.1 mrg cond = "cs"; /* or "ns" */
3219 1.1 mrg else if (cc_mode == CCNZmode)
3220 1.1 mrg cond = "ns";
3221 1.1 mrg else
3222 1.1 mrg cond = "lt";
3223 1.1 mrg break;
3224 1.1 mrg
3225 1.1 mrg case GEU:
3226 1.1 mrg cond = "cc";
3227 1.1 mrg break;
3228 1.1 mrg
3229 1.1 mrg case GTU:
3230 1.1 mrg cond = "hi";
3231 1.1 mrg break;
3232 1.1 mrg
3233 1.1 mrg case LEU:
3234 1.1 mrg cond = "ls";
3235 1.1 mrg break;
3236 1.1 mrg
3237 1.1 mrg case LTU:
3238 1.1 mrg cond = "cs";
3239 1.1 mrg break;
3240 1.1 mrg
3241 1.1 mrg case UNORDERED:
3242 1.1 mrg cond = "os";
3243 1.1 mrg break;
3244 1.1 mrg
3245 1.1 mrg case ORDERED:
3246 1.1 mrg cond = "oc";
3247 1.1 mrg break;
3248 1.1 mrg
3249 1.1 mrg case UNGE:
3250 1.1 mrg cond = "cc"; /* or "nc" */
3251 1.1 mrg break;
3252 1.1 mrg
3253 1.1 mrg case UNGT:
3254 1.1 mrg cond = "hi";
3255 1.1 mrg break;
3256 1.1 mrg
3257 1.1 mrg case UNLE:
3258 1.1 mrg cond = "le";
3259 1.1 mrg break;
3260 1.1 mrg
3261 1.1 mrg case UNLT:
3262 1.1 mrg cond = "lt";
3263 1.1 mrg break;
3264 1.1 mrg
3265 1.1 mrg /* These 2 comparison codes are not supported. */
3266 1.1 mrg case UNEQ:
3267 1.1 mrg case LTGT:
3268 1.1 mrg default:
3269 1.1 mrg gcc_unreachable ();
3270 1.1 mrg }
3271 1.1 mrg
3272 1.1 mrg return output_branch (label, cond, insn);
3273 1.1 mrg }
3274 1.1 mrg
3275 1.1 mrg /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
3276 1.1 mrg
3277 1.1 mrg static bool
3278 1.1 mrg visium_print_operand_punct_valid_p (unsigned char code)
3279 1.1 mrg {
3280 1.1 mrg return code == '#';
3281 1.1 mrg }
3282 1.1 mrg
3283 1.1 mrg /* Implement TARGET_PRINT_OPERAND. Output to stdio stream FILE the assembler
3284 1.1 mrg syntax for an instruction operand OP subject to the modifier LETTER. */
3285 1.1 mrg
3286 1.1 mrg static void
3287 1.1 mrg visium_print_operand (FILE *file, rtx op, int letter)
3288 1.1 mrg {
3289 1.1 mrg switch (letter)
3290 1.1 mrg {
3291 1.1 mrg case '#':
3292 1.1 mrg /* Output an insn in a delay slot. */
3293 1.1 mrg if (final_sequence)
3294 1.1 mrg visium_indent_opcode = 1;
3295 1.1 mrg else
3296 1.1 mrg fputs ("\n\t nop", file);
3297 1.1 mrg return;
3298 1.1 mrg
3299 1.1 mrg case 'b':
3300 1.1 mrg /* Print LS 8 bits of operand. */
3301 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, UINTVAL (op) & 0xff);
3302 1.1 mrg return;
3303 1.1 mrg
3304 1.1 mrg case 'w':
3305 1.1 mrg /* Print LS 16 bits of operand. */
3306 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, UINTVAL (op) & 0xffff);
3307 1.1 mrg return;
3308 1.1 mrg
3309 1.1 mrg case 'u':
3310 1.1 mrg /* Print MS 16 bits of operand. */
3311 1.1 mrg fprintf (file,
3312 1.1 mrg HOST_WIDE_INT_PRINT_UNSIGNED, (UINTVAL (op) >> 16) & 0xffff);
3313 1.1 mrg return;
3314 1.1 mrg
3315 1.1 mrg case 'r':
3316 1.1 mrg /* It's either a register or zero. */
3317 1.1 mrg if (GET_CODE (op) == REG)
3318 1.1 mrg fputs (reg_names[REGNO (op)], file);
3319 1.1 mrg else
3320 1.1 mrg fputs (reg_names[0], file);
3321 1.1 mrg return;
3322 1.1 mrg
3323 1.1 mrg case 'f':
3324 1.1 mrg /* It's either a FP register or zero. */
3325 1.1 mrg if (GET_CODE (op) == REG)
3326 1.1 mrg fputs (reg_names[REGNO (op)], file);
3327 1.1 mrg else
3328 1.1 mrg fputs (reg_names[FP_FIRST_REGNUM], file);
3329 1.1 mrg return;
3330 1.1 mrg }
3331 1.1 mrg
3332 1.1 mrg switch (GET_CODE (op))
3333 1.1 mrg {
3334 1.1 mrg case REG:
3335 1.1 mrg if (letter == 'd')
3336 1.1 mrg fputs (reg_names[REGNO (op) + 1], file);
3337 1.1 mrg else
3338 1.1 mrg fputs (reg_names[REGNO (op)], file);
3339 1.1 mrg break;
3340 1.1 mrg
3341 1.1 mrg case SYMBOL_REF:
3342 1.1 mrg case LABEL_REF:
3343 1.1 mrg case CONST:
3344 1.1 mrg output_addr_const (file, op);
3345 1.1 mrg break;
3346 1.1 mrg
3347 1.1 mrg case MEM:
3348 1.1 mrg visium_print_operand_address (file, GET_MODE (op), XEXP (op, 0));
3349 1.1 mrg break;
3350 1.1 mrg
3351 1.1 mrg case CONST_INT:
3352 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
3353 1.1 mrg break;
3354 1.1 mrg
3355 1.1 mrg case CODE_LABEL:
3356 1.1 mrg asm_fprintf (file, "%LL%d", CODE_LABEL_NUMBER (op));
3357 1.1 mrg break;
3358 1.1 mrg
3359 1.1 mrg case HIGH:
3360 1.1 mrg visium_print_operand (file, XEXP (op, 1), letter);
3361 1.1 mrg break;
3362 1.1 mrg
3363 1.1 mrg default:
3364 1.1 mrg fatal_insn ("illegal operand ", op);
3365 1.1 mrg }
3366 1.1 mrg }
3367 1.1 mrg
3368 1.1 mrg /* Implement TARGET_PRINT_OPERAND_ADDRESS. Output to stdio stream FILE the
3369 1.1 mrg assembler syntax for an instruction operand that is a memory reference
3370 1.1 mrg whose address is ADDR. */
3371 1.1 mrg
3372 1.1 mrg static void
3373 1.1 mrg visium_print_operand_address (FILE *file, machine_mode mode, rtx addr)
3374 1.1 mrg {
3375 1.1 mrg switch (GET_CODE (addr))
3376 1.1 mrg {
3377 1.1 mrg case REG:
3378 1.1 mrg case SUBREG:
3379 1.1 mrg fprintf (file, "(%s)", reg_names[true_regnum (addr)]);
3380 1.1 mrg break;
3381 1.1 mrg
3382 1.1 mrg case PLUS:
3383 1.1 mrg {
3384 1.1 mrg rtx x = XEXP (addr, 0), y = XEXP (addr, 1);
3385 1.1 mrg
3386 1.1 mrg switch (GET_CODE (x))
3387 1.1 mrg {
3388 1.1 mrg case REG:
3389 1.1 mrg case SUBREG:
3390 1.1 mrg if (CONST_INT_P (y))
3391 1.1 mrg {
3392 1.1 mrg unsigned int regno = true_regnum (x);
3393 1.1 mrg HOST_WIDE_INT val = INTVAL (y);
3394 1.1 mrg switch (mode)
3395 1.1 mrg {
3396 1.1 mrg case E_SImode:
3397 1.1 mrg case E_DImode:
3398 1.1 mrg case E_SFmode:
3399 1.1 mrg case E_DFmode:
3400 1.1 mrg val >>= 2;
3401 1.1 mrg break;
3402 1.1 mrg
3403 1.1 mrg case E_HImode:
3404 1.1 mrg val >>= 1;
3405 1.1 mrg break;
3406 1.1 mrg
3407 1.1 mrg case E_QImode:
3408 1.1 mrg default:
3409 1.1 mrg break;
3410 1.1 mrg }
3411 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_DEC"(%s)", val,
3412 1.1 mrg reg_names[regno]);
3413 1.1 mrg }
3414 1.1 mrg else
3415 1.1 mrg fatal_insn ("illegal operand address (1)", addr);
3416 1.1 mrg break;
3417 1.1 mrg
3418 1.1 mrg default:
3419 1.1 mrg if (CONSTANT_P (x) && CONSTANT_P (y))
3420 1.1 mrg output_addr_const (file, addr);
3421 1.1 mrg else
3422 1.1 mrg fatal_insn ("illegal operand address (2)", addr);
3423 1.1 mrg break;
3424 1.1 mrg }
3425 1.1 mrg }
3426 1.1 mrg break;
3427 1.1 mrg
3428 1.1 mrg case LABEL_REF:
3429 1.1 mrg case SYMBOL_REF:
3430 1.1 mrg case CONST_INT:
3431 1.1 mrg case CONST:
3432 1.1 mrg output_addr_const (file, addr);
3433 1.1 mrg break;
3434 1.1 mrg
3435 1.1 mrg case NOTE:
3436 1.1 mrg if (NOTE_KIND (addr) != NOTE_INSN_DELETED_LABEL)
3437 1.1 mrg fatal_insn ("illegal operand address (3)", addr);
3438 1.1 mrg break;
3439 1.1 mrg
3440 1.1 mrg case CODE_LABEL:
3441 1.1 mrg asm_fprintf (file, "%LL%d", CODE_LABEL_NUMBER (addr));
3442 1.1 mrg break;
3443 1.1 mrg
3444 1.1 mrg default:
3445 1.1 mrg fatal_insn ("illegal operand address (4)", addr);
3446 1.1 mrg break;
3447 1.1 mrg }
3448 1.1 mrg }
3449 1.1 mrg
3450 1.1 mrg /* The Visium stack frames look like:
3451 1.1 mrg
3452 1.1 mrg Before call After call
3453 1.1 mrg +-----------------------+ +-----------------------+
3454 1.1 mrg | | | |
3455 1.1 mrg high | previous | | previous |
3456 1.1 mrg mem | frame | | frame |
3457 1.1 mrg | | | |
3458 1.1 mrg +-----------------------+ +-----------------------+
3459 1.1 mrg | | | |
3460 1.1 mrg | arguments on stack | | arguments on stack |
3461 1.1 mrg | | | |
3462 1.1 mrg SP+0->+-----------------------+ +-----------------------+
3463 1.1 mrg | reg parm save area, |
3464 1.1 mrg | only created for |
3465 1.1 mrg | variable argument |
3466 1.1 mrg | functions |
3467 1.1 mrg +-----------------------+
3468 1.1 mrg | |
3469 1.1 mrg | register save area |
3470 1.1 mrg | |
3471 1.1 mrg +-----------------------+
3472 1.1 mrg | |
3473 1.1 mrg | local variables |
3474 1.1 mrg | |
3475 1.1 mrg FP+8->+-----------------------+
3476 1.1 mrg | return address |
3477 1.1 mrg FP+4->+-----------------------+
3478 1.1 mrg | previous FP |
3479 1.1 mrg FP+0->+-----------------------+
3480 1.1 mrg | |
3481 1.1 mrg | alloca allocations |
3482 1.1 mrg | |
3483 1.1 mrg +-----------------------+
3484 1.1 mrg | |
3485 1.1 mrg low | arguments on stack |
3486 1.1 mrg mem | |
3487 1.1 mrg SP+0->+-----------------------+
3488 1.1 mrg
3489 1.1 mrg Notes:
3490 1.1 mrg 1) The "reg parm save area" does not exist for non variable argument fns.
3491 1.1 mrg 2) The FP register is not saved if `frame_pointer_needed' is zero and it
3492 1.1 mrg is not altered in the current function.
3493 1.1 mrg 3) The return address is not saved if there is no frame pointer and the
3494 1.1 mrg current function is leaf.
3495 1.1 mrg 4) If the return address is not saved and the static chain register is
3496 1.1 mrg live in the function, we allocate the return address slot to be able
3497 1.1 mrg to spill the register for a long branch. */
3498 1.1 mrg
3499 1.1 mrg /* Define the register classes for local purposes. */
3500 1.1 mrg enum reg_type { general, mdb, mdc, floating, last_type};
3501 1.1 mrg
3502 1.1 mrg #define GET_REG_TYPE(regno) \
3503 1.1 mrg (GP_REGISTER_P (regno) ? general : \
3504 1.1 mrg (regno) == MDB_REGNUM ? mdb : \
3505 1.1 mrg (regno) == MDC_REGNUM ? mdc : \
3506 1.1 mrg floating)
3507 1.1 mrg
3508 1.1 mrg /* First regno of each register type. */
3509 1.1 mrg const int first_regno[last_type] = {0, MDB_REGNUM, MDC_REGNUM, FP_FIRST_REGNUM};
3510 1.1 mrg
3511 1.1 mrg /* Size in bytes of each register type. */
3512 1.1 mrg const int reg_type_size[last_type] = {4, 8, 4, 4};
3513 1.1 mrg
3514 1.1 mrg /* Structure to be filled in by visium_compute_frame_size. */
3515 1.1 mrg struct visium_frame_info
3516 1.1 mrg {
3517 1.1 mrg unsigned int save_area_size; /* # bytes in the reg parm save area. */
3518 1.1 mrg unsigned int reg_size1; /* # bytes to store first block of regs. */
3519 1.1 mrg unsigned int reg_size2; /* # bytes to store second block of regs. */
3520 1.1 mrg unsigned int max_reg1; /* max. regno in first block */
3521 1.1 mrg unsigned int var_size; /* # bytes that variables take up. */
3522 1.1 mrg unsigned int save_fp; /* Nonzero if fp must be saved. */
3523 1.1 mrg unsigned int save_lr; /* Nonzero if lr must be saved. */
3524 1.1 mrg unsigned int lr_slot; /* Nonzero if the lr slot is needed. */
3525 1.1 mrg unsigned int combine; /* Nonzero if we can combine the allocation of
3526 1.1 mrg variables and regs. */
3527 1.1 mrg unsigned int interrupt; /* Nonzero if the function is an interrupt
3528 1.1 mrg handler. */
3529 1.1 mrg unsigned int mask[last_type]; /* Masks of saved regs: gp, mdb, mdc, fp */
3530 1.1 mrg };
3531 1.1 mrg
3532 1.1 mrg /* Current frame information calculated by visium_compute_frame_size. */
3533 1.1 mrg static struct visium_frame_info current_frame_info;
3534 1.1 mrg
3535 1.1 mrg /* Accessor for current_frame_info.save_fp. */
3536 1.1 mrg
3537 1.1 mrg static inline bool
3538 1.1 mrg current_function_saves_fp (void)
3539 1.1 mrg {
3540 1.1 mrg return current_frame_info.save_fp != 0;
3541 1.1 mrg }
3542 1.1 mrg
3543 1.1 mrg /* Accessor for current_frame_info.save_lr. */
3544 1.1 mrg
3545 1.1 mrg static inline bool
3546 1.1 mrg current_function_saves_lr (void)
3547 1.1 mrg {
3548 1.1 mrg return current_frame_info.save_lr != 0;
3549 1.1 mrg }
3550 1.1 mrg
3551 1.1 mrg /* Accessor for current_frame_info.lr_slot. */
3552 1.1 mrg
3553 1.1 mrg static inline bool
3554 1.1 mrg current_function_has_lr_slot (void)
3555 1.1 mrg {
3556 1.1 mrg return current_frame_info.lr_slot != 0;
3557 1.1 mrg }
3558 1.1 mrg
3559 1.1 mrg /* Return non-zero if register REGNO needs to be saved in the frame. */
3560 1.1 mrg
3561 1.1 mrg static int
3562 1.1 mrg visium_save_reg_p (int interrupt, int regno)
3563 1.1 mrg {
3564 1.1 mrg switch (regno)
3565 1.1 mrg {
3566 1.1 mrg case HARD_FRAME_POINTER_REGNUM:
3567 1.1 mrg /* This register is call-saved but handled specially. */
3568 1.1 mrg return 0;
3569 1.1 mrg
3570 1.1 mrg case MDC_REGNUM:
3571 1.1 mrg /* This register is fixed but can be modified. */
3572 1.1 mrg break;
3573 1.1 mrg
3574 1.1 mrg case 29:
3575 1.1 mrg case 30:
3576 1.1 mrg /* These registers are fixed and hold the interrupt context. */
3577 1.1 mrg return (interrupt != 0);
3578 1.1 mrg
3579 1.1 mrg default:
3580 1.1 mrg /* The other fixed registers are either immutable or special. */
3581 1.1 mrg if (fixed_regs[regno])
3582 1.1 mrg return 0;
3583 1.1 mrg break;
3584 1.1 mrg }
3585 1.1 mrg
3586 1.1 mrg if (interrupt)
3587 1.1 mrg {
3588 1.1 mrg if (crtl->is_leaf)
3589 1.1 mrg {
3590 1.1 mrg if (df_regs_ever_live_p (regno))
3591 1.1 mrg return 1;
3592 1.1 mrg }
3593 1.1 mrg else if (call_used_or_fixed_reg_p (regno))
3594 1.1 mrg return 1;
3595 1.1 mrg
3596 1.1 mrg /* To save mdb requires two temporary registers. To save mdc or
3597 1.1 mrg any of the floating registers requires one temporary
3598 1.1 mrg register. If this is an interrupt routine, the temporary
3599 1.1 mrg registers need to be saved as well. These temporary registers
3600 1.1 mrg are call used, so we only need deal with the case of leaf
3601 1.1 mrg functions here. */
3602 1.1 mrg if (regno == PROLOGUE_TMP_REGNUM)
3603 1.1 mrg {
3604 1.1 mrg if (df_regs_ever_live_p (MDB_REGNUM)
3605 1.1 mrg || df_regs_ever_live_p (MDC_REGNUM))
3606 1.1 mrg return 1;
3607 1.1 mrg
3608 1.1 mrg for (int i = FP_FIRST_REGNUM; i <= FP_LAST_REGNUM; i++)
3609 1.1 mrg if (df_regs_ever_live_p (i))
3610 1.1 mrg return 1;
3611 1.1 mrg }
3612 1.1 mrg
3613 1.1 mrg else if (regno == PROLOGUE_TMP_REGNUM + 1)
3614 1.1 mrg {
3615 1.1 mrg if (df_regs_ever_live_p (MDB_REGNUM))
3616 1.1 mrg return 1;
3617 1.1 mrg }
3618 1.1 mrg }
3619 1.1 mrg
3620 1.1 mrg return df_regs_ever_live_p (regno) && !call_used_or_fixed_reg_p (regno);
3621 1.1 mrg }
3622 1.1 mrg
3623 1.1 mrg /* Compute the frame size required by the function. This function is called
3624 1.1 mrg during the reload pass and also by visium_expand_prologue. */
3625 1.1 mrg
3626 1.1 mrg static int
3627 1.1 mrg visium_compute_frame_size (int size)
3628 1.1 mrg {
3629 1.1 mrg const int save_area_size = visium_reg_parm_save_area_size;
3630 1.1 mrg const int var_size = VISIUM_STACK_ALIGN (size);
3631 1.1 mrg const int save_fp
3632 1.1 mrg = frame_pointer_needed || df_regs_ever_live_p (HARD_FRAME_POINTER_REGNUM);
3633 1.1 mrg const int save_lr = frame_pointer_needed || !crtl->is_leaf;
3634 1.1 mrg const int lr_slot = !save_lr && df_regs_ever_live_p (long_branch_regnum);
3635 1.1 mrg const int local_frame_offset
3636 1.1 mrg = (save_fp + save_lr + lr_slot) * UNITS_PER_WORD;
3637 1.1 mrg const int interrupt = visium_interrupt_function_p ();
3638 1.1 mrg unsigned int mask[last_type];
3639 1.1 mrg int reg_size1 = 0;
3640 1.1 mrg int max_reg1 = 0;
3641 1.1 mrg int reg_size2 = 0;
3642 1.1 mrg int reg_size;
3643 1.1 mrg int combine;
3644 1.1 mrg int frame_size;
3645 1.1 mrg int regno;
3646 1.1 mrg
3647 1.1 mrg memset (mask, 0, last_type * sizeof (unsigned int));
3648 1.1 mrg
3649 1.1 mrg /* The registers may need stacking in 2 blocks since only 32 32-bit words
3650 1.1 mrg can be indexed from a given base address. */
3651 1.1 mrg for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
3652 1.1 mrg {
3653 1.1 mrg if (visium_save_reg_p (interrupt, regno))
3654 1.1 mrg {
3655 1.1 mrg enum reg_type reg_type = GET_REG_TYPE (regno);
3656 1.1 mrg int mask_bit = 1 << (regno - first_regno[reg_type]);
3657 1.1 mrg int nbytes = reg_type_size[reg_type];
3658 1.1 mrg
3659 1.1 mrg if (reg_size1 + nbytes > 32 * UNITS_PER_WORD)
3660 1.1 mrg break;
3661 1.1 mrg
3662 1.1 mrg reg_size1 += nbytes;
3663 1.1 mrg max_reg1 = regno;
3664 1.1 mrg mask[reg_type] |= mask_bit;
3665 1.1 mrg }
3666 1.1 mrg }
3667 1.1 mrg
3668 1.1 mrg for (regno = max_reg1 + 1; regno < FIRST_PSEUDO_REGISTER; regno++)
3669 1.1 mrg {
3670 1.1 mrg if (visium_save_reg_p (interrupt, regno))
3671 1.1 mrg {
3672 1.1 mrg enum reg_type reg_type = GET_REG_TYPE (regno);
3673 1.1 mrg int mask_bit = 1 << (regno - first_regno[reg_type]);
3674 1.1 mrg int nbytes = reg_type_size[reg_type];
3675 1.1 mrg
3676 1.1 mrg reg_size2 += nbytes;
3677 1.1 mrg mask[reg_type] |= mask_bit;
3678 1.1 mrg }
3679 1.1 mrg }
3680 1.1 mrg
3681 1.1 mrg reg_size = reg_size2 ? reg_size2 : reg_size1;
3682 1.1 mrg combine = (local_frame_offset + var_size + reg_size) <= 32 * UNITS_PER_WORD;
3683 1.1 mrg frame_size
3684 1.1 mrg = local_frame_offset + var_size + reg_size2 + reg_size1 + save_area_size;
3685 1.1 mrg
3686 1.1 mrg current_frame_info.save_area_size = save_area_size;
3687 1.1 mrg current_frame_info.reg_size1 = reg_size1;
3688 1.1 mrg current_frame_info.max_reg1 = max_reg1;
3689 1.1 mrg current_frame_info.reg_size2 = reg_size2;
3690 1.1 mrg current_frame_info.var_size = var_size;
3691 1.1 mrg current_frame_info.save_fp = save_fp;
3692 1.1 mrg current_frame_info.save_lr = save_lr;
3693 1.1 mrg current_frame_info.lr_slot = lr_slot;
3694 1.1 mrg current_frame_info.combine = combine;
3695 1.1 mrg current_frame_info.interrupt = interrupt;
3696 1.1 mrg
3697 1.1 mrg memcpy (current_frame_info.mask, mask, last_type * sizeof (unsigned int));
3698 1.1 mrg
3699 1.1 mrg return frame_size;
3700 1.1 mrg }
3701 1.1 mrg
3702 1.1 mrg /* Helper function for INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET). Define
3703 1.1 mrg the offset between two registers, one to be eliminated, and the other its
3704 1.1 mrg replacement, at the start of a routine. */
3705 1.1 mrg
3706 1.1 mrg int
3707 1.1 mrg visium_initial_elimination_offset (int from, int to ATTRIBUTE_UNUSED)
3708 1.1 mrg {
3709 1.1 mrg const int save_fp = current_frame_info.save_fp;
3710 1.1 mrg const int save_lr = current_frame_info.save_lr;
3711 1.1 mrg const int lr_slot = current_frame_info.lr_slot;
3712 1.1 mrg int offset;
3713 1.1 mrg
3714 1.1 mrg if (from == FRAME_POINTER_REGNUM)
3715 1.1 mrg offset = (save_fp + save_lr + lr_slot) * UNITS_PER_WORD;
3716 1.1 mrg else if (from == ARG_POINTER_REGNUM)
3717 1.1 mrg offset = visium_compute_frame_size (get_frame_size ());
3718 1.1 mrg else
3719 1.1 mrg gcc_unreachable ();
3720 1.1 mrg
3721 1.1 mrg return offset;
3722 1.1 mrg }
3723 1.1 mrg
3724 1.1 mrg /* For an interrupt handler, we may be saving call-clobbered registers.
3725 1.1 mrg Say the epilogue uses these in addition to the link register. */
3726 1.1 mrg
3727 1.1 mrg int
3728 1.1 mrg visium_epilogue_uses (int regno)
3729 1.1 mrg {
3730 1.1 mrg if (regno == LINK_REGNUM)
3731 1.1 mrg return 1;
3732 1.1 mrg
3733 1.1 mrg if (reload_completed)
3734 1.1 mrg {
3735 1.1 mrg enum reg_type reg_type = GET_REG_TYPE (regno);
3736 1.1 mrg int mask_bit = 1 << (regno - first_regno[reg_type]);
3737 1.1 mrg
3738 1.1 mrg return (current_frame_info.mask[reg_type] & mask_bit) != 0;
3739 1.1 mrg }
3740 1.1 mrg
3741 1.1 mrg return 0;
3742 1.1 mrg }
3743 1.1 mrg
3744 1.1 mrg /* Wrapper around emit_insn that sets RTX_FRAME_RELATED_P on the insn. */
3745 1.1 mrg
3746 1.1 mrg static rtx
3747 1.1 mrg emit_frame_insn (rtx x)
3748 1.1 mrg {
3749 1.1 mrg x = emit_insn (x);
3750 1.1 mrg RTX_FRAME_RELATED_P (x) = 1;
3751 1.1 mrg return x;
3752 1.1 mrg }
3753 1.1 mrg
3754 1.1 mrg /* Allocate ALLOC bytes on the stack and save the registers LOW_REGNO to
3755 1.1 mrg HIGH_REGNO at OFFSET from the stack pointer. */
3756 1.1 mrg
3757 1.1 mrg static void
3758 1.1 mrg visium_save_regs (int alloc, int offset, int low_regno, int high_regno)
3759 1.1 mrg {
3760 1.1 mrg /* If this is an interrupt handler function, then mark the register
3761 1.1 mrg stores as volatile. This will prevent the instruction scheduler
3762 1.1 mrg from scrambling the order of register saves. */
3763 1.1 mrg const int volatile_p = current_frame_info.interrupt;
3764 1.1 mrg int regno;
3765 1.1 mrg
3766 1.1 mrg /* Allocate the stack space. */
3767 1.1 mrg emit_frame_insn (gen_addsi3_flags (stack_pointer_rtx, stack_pointer_rtx,
3768 1.1 mrg GEN_INT (-alloc)));
3769 1.1 mrg
3770 1.1 mrg for (regno = low_regno; regno <= high_regno; regno++)
3771 1.1 mrg {
3772 1.1 mrg enum reg_type reg_type = GET_REG_TYPE (regno);
3773 1.1 mrg int mask_bit = 1 << (regno - first_regno[reg_type]);
3774 1.1 mrg rtx insn;
3775 1.1 mrg
3776 1.1 mrg if (current_frame_info.mask[reg_type] & mask_bit)
3777 1.1 mrg {
3778 1.1 mrg offset -= reg_type_size[reg_type];
3779 1.1 mrg switch (reg_type)
3780 1.1 mrg {
3781 1.1 mrg case general:
3782 1.1 mrg {
3783 1.1 mrg rtx mem
3784 1.1 mrg = gen_frame_mem (SImode,
3785 1.1 mrg plus_constant (Pmode,
3786 1.1 mrg stack_pointer_rtx, offset));
3787 1.1 mrg MEM_VOLATILE_P (mem) = volatile_p;
3788 1.1 mrg emit_frame_insn (gen_movsi (mem, gen_rtx_REG (SImode, regno)));
3789 1.1 mrg }
3790 1.1 mrg break;
3791 1.1 mrg
3792 1.1 mrg case mdb:
3793 1.1 mrg {
3794 1.1 mrg rtx tmp = gen_rtx_REG (DImode, PROLOGUE_TMP_REGNUM);
3795 1.1 mrg rtx mem
3796 1.1 mrg = gen_frame_mem (DImode,
3797 1.1 mrg plus_constant (Pmode,
3798 1.1 mrg stack_pointer_rtx, offset));
3799 1.1 mrg rtx reg = gen_rtx_REG (DImode, regno);
3800 1.1 mrg MEM_VOLATILE_P (mem) = volatile_p;
3801 1.1 mrg emit_insn (gen_movdi (tmp, reg));
3802 1.1 mrg /* Do not generate CFI if in interrupt handler. */
3803 1.1 mrg if (volatile_p)
3804 1.1 mrg emit_insn (gen_movdi (mem, tmp));
3805 1.1 mrg else
3806 1.1 mrg {
3807 1.1 mrg insn = emit_frame_insn (gen_movdi (mem, tmp));
3808 1.1 mrg add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3809 1.1 mrg gen_rtx_SET (mem, reg));
3810 1.1 mrg }
3811 1.1 mrg }
3812 1.1 mrg break;
3813 1.1 mrg
3814 1.1 mrg case mdc:
3815 1.1 mrg {
3816 1.1 mrg rtx tmp = gen_rtx_REG (SImode, PROLOGUE_TMP_REGNUM);
3817 1.1 mrg rtx mem
3818 1.1 mrg = gen_frame_mem (SImode,
3819 1.1 mrg plus_constant (Pmode,
3820 1.1 mrg stack_pointer_rtx, offset));
3821 1.1 mrg rtx reg = gen_rtx_REG (SImode, regno);
3822 1.1 mrg MEM_VOLATILE_P (mem) = volatile_p;
3823 1.1 mrg emit_insn (gen_movsi (tmp, reg));
3824 1.1 mrg insn = emit_frame_insn (gen_movsi (mem, tmp));
3825 1.1 mrg add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3826 1.1 mrg gen_rtx_SET (mem, reg));
3827 1.1 mrg }
3828 1.1 mrg break;
3829 1.1 mrg
3830 1.1 mrg case floating:
3831 1.1 mrg {
3832 1.1 mrg rtx tmp = gen_rtx_REG (SFmode, PROLOGUE_TMP_REGNUM);
3833 1.1 mrg rtx mem
3834 1.1 mrg = gen_frame_mem (SFmode,
3835 1.1 mrg plus_constant (Pmode,
3836 1.1 mrg stack_pointer_rtx, offset));
3837 1.1 mrg rtx reg = gen_rtx_REG (SFmode, regno);
3838 1.1 mrg MEM_VOLATILE_P (mem) = volatile_p;
3839 1.1 mrg emit_insn (gen_movsf (tmp, reg));
3840 1.1 mrg insn = emit_frame_insn (gen_movsf (mem, tmp));
3841 1.1 mrg add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3842 1.1 mrg gen_rtx_SET (mem, reg));
3843 1.1 mrg }
3844 1.1 mrg break;
3845 1.1 mrg
3846 1.1 mrg default:
3847 1.1 mrg break;
3848 1.1 mrg }
3849 1.1 mrg }
3850 1.1 mrg }
3851 1.1 mrg }
3852 1.1 mrg
3853 1.1 mrg /* This function generates the code for function entry. */
3854 1.1 mrg
3855 1.1 mrg void
3856 1.1 mrg visium_expand_prologue (void)
3857 1.1 mrg {
3858 1.1 mrg const int frame_size = visium_compute_frame_size (get_frame_size ());
3859 1.1 mrg const int save_area_size = current_frame_info.save_area_size;
3860 1.1 mrg const int reg_size1 = current_frame_info.reg_size1;
3861 1.1 mrg const int max_reg1 = current_frame_info.max_reg1;
3862 1.1 mrg const int reg_size2 = current_frame_info.reg_size2;
3863 1.1 mrg const int var_size = current_frame_info.var_size;
3864 1.1 mrg const int save_fp = current_frame_info.save_fp;
3865 1.1 mrg const int save_lr = current_frame_info.save_lr;
3866 1.1 mrg const int lr_slot = current_frame_info.lr_slot;
3867 1.1 mrg const int local_frame_offset
3868 1.1 mrg = (save_fp + save_lr + lr_slot) * UNITS_PER_WORD;
3869 1.1 mrg const int combine = current_frame_info.combine;
3870 1.1 mrg int reg_size;
3871 1.1 mrg int first_reg;
3872 1.1 mrg int fsize;
3873 1.1 mrg
3874 1.1 mrg /* Save the frame size for future references. */
3875 1.1 mrg visium_frame_size = frame_size;
3876 1.1 mrg
3877 1.1 mrg if (flag_stack_usage_info)
3878 1.1 mrg current_function_static_stack_size = frame_size;
3879 1.1 mrg
3880 1.1 mrg /* If the registers have to be stacked in 2 blocks, stack the first one. */
3881 1.1 mrg if (reg_size2)
3882 1.1 mrg {
3883 1.1 mrg visium_save_regs (reg_size1 + save_area_size, reg_size1, 0, max_reg1);
3884 1.1 mrg reg_size = reg_size2;
3885 1.1 mrg first_reg = max_reg1 + 1;
3886 1.1 mrg fsize = local_frame_offset + var_size + reg_size2;
3887 1.1 mrg }
3888 1.1 mrg else
3889 1.1 mrg {
3890 1.1 mrg reg_size = reg_size1;
3891 1.1 mrg first_reg = 0;
3892 1.1 mrg fsize = local_frame_offset + var_size + reg_size1 + save_area_size;
3893 1.1 mrg }
3894 1.1 mrg
3895 1.1 mrg /* If we can't combine register stacking with variable allocation, partially
3896 1.1 mrg allocate and stack the (remaining) registers now. */
3897 1.1 mrg if (reg_size && !combine)
3898 1.1 mrg visium_save_regs (fsize - local_frame_offset - var_size, reg_size,
3899 1.1 mrg first_reg, FIRST_PSEUDO_REGISTER - 1);
3900 1.1 mrg
3901 1.1 mrg /* If we can combine register stacking with variable allocation, fully
3902 1.1 mrg allocate and stack the (remaining) registers now. */
3903 1.1 mrg if (reg_size && combine)
3904 1.1 mrg visium_save_regs (fsize, local_frame_offset + var_size + reg_size,
3905 1.1 mrg first_reg, FIRST_PSEUDO_REGISTER - 1);
3906 1.1 mrg
3907 1.1 mrg /* Otherwise space may still need to be allocated for the variables. */
3908 1.1 mrg else if (fsize)
3909 1.1 mrg {
3910 1.1 mrg const int alloc_size = reg_size ? local_frame_offset + var_size : fsize;
3911 1.1 mrg
3912 1.1 mrg if (alloc_size > 65535)
3913 1.1 mrg {
3914 1.1 mrg rtx tmp = gen_rtx_REG (SImode, PROLOGUE_TMP_REGNUM), insn;
3915 1.1 mrg emit_insn (gen_movsi (tmp, GEN_INT (alloc_size)));
3916 1.1 mrg insn = emit_frame_insn (gen_subsi3_flags (stack_pointer_rtx,
3917 1.1 mrg stack_pointer_rtx,
3918 1.1 mrg tmp));
3919 1.1 mrg add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3920 1.1 mrg gen_rtx_SET (stack_pointer_rtx,
3921 1.1 mrg gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3922 1.1 mrg GEN_INT (-alloc_size))));
3923 1.1 mrg }
3924 1.1 mrg else
3925 1.1 mrg emit_frame_insn (gen_addsi3_flags (stack_pointer_rtx,
3926 1.1 mrg stack_pointer_rtx,
3927 1.1 mrg GEN_INT (-alloc_size)));
3928 1.1 mrg }
3929 1.1 mrg
3930 1.1 mrg if (save_fp)
3931 1.1 mrg emit_frame_insn (gen_movsi (gen_frame_mem (SImode, stack_pointer_rtx),
3932 1.1 mrg hard_frame_pointer_rtx));
3933 1.1 mrg
3934 1.1 mrg if (frame_pointer_needed)
3935 1.1 mrg emit_frame_insn (gen_stack_save ());
3936 1.1 mrg
3937 1.1 mrg if (save_lr)
3938 1.1 mrg {
3939 1.1 mrg rtx base_rtx, mem;
3940 1.1 mrg
3941 1.1 mrg /* Normally the frame pointer and link register get saved via
3942 1.1 mrg write.l (sp),fp
3943 1.1 mrg move.l fp,sp
3944 1.1 mrg write.l 1(sp),r21
3945 1.1 mrg
3946 1.1 mrg Indexing off sp rather than fp to store the link register
3947 1.1 mrg avoids presenting the instruction scheduler with an initial
3948 1.1 mrg pipeline hazard. If however the frame is needed for eg.
3949 1.1 mrg __builtin_return_address which needs to retrieve the saved
3950 1.1 mrg value of the link register from the stack at fp + 4 then
3951 1.1 mrg indexing from sp can confuse the dataflow, causing the link
3952 1.1 mrg register to be retrieved before it has been saved. */
3953 1.1 mrg if (cfun->machine->frame_needed)
3954 1.1 mrg base_rtx = hard_frame_pointer_rtx;
3955 1.1 mrg else
3956 1.1 mrg base_rtx = stack_pointer_rtx;
3957 1.1 mrg
3958 1.1 mrg mem = gen_frame_mem (SImode,
3959 1.1 mrg plus_constant (Pmode,
3960 1.1 mrg base_rtx, save_fp * UNITS_PER_WORD));
3961 1.1 mrg emit_frame_insn (gen_movsi (mem, gen_rtx_REG (SImode, LINK_REGNUM)));
3962 1.1 mrg }
3963 1.1 mrg }
3964 1.1 mrg
3965 1.1 mrg static GTY(()) rtx cfa_restores;
3966 1.1 mrg
3967 1.1 mrg /* Queue a REG_CFA_RESTORE note until next stack manipulation insn. */
3968 1.1 mrg
3969 1.1 mrg static void
3970 1.1 mrg visium_add_cfa_restore_note (rtx reg)
3971 1.1 mrg {
3972 1.1 mrg cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
3973 1.1 mrg }
3974 1.1 mrg
3975 1.1 mrg /* Add queued REG_CFA_RESTORE notes to INSN, if any. */
3976 1.1 mrg
3977 1.1 mrg static void
3978 1.1 mrg visium_add_queued_cfa_restore_notes (rtx insn)
3979 1.1 mrg {
3980 1.1 mrg rtx last;
3981 1.1 mrg if (!cfa_restores)
3982 1.1 mrg return;
3983 1.1 mrg for (last = cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
3984 1.1 mrg ;
3985 1.1 mrg XEXP (last, 1) = REG_NOTES (insn);
3986 1.1 mrg REG_NOTES (insn) = cfa_restores;
3987 1.1 mrg cfa_restores = NULL_RTX;
3988 1.1 mrg }
3989 1.1 mrg
3990 1.1 mrg /* Restore the registers LOW_REGNO to HIGH_REGNO from the save area at OFFSET
3991 1.1 mrg from the stack pointer and pop DEALLOC bytes off the stack. */
3992 1.1 mrg
3993 1.1 mrg static void
3994 1.1 mrg visium_restore_regs (int dealloc, int offset, int high_regno, int low_regno)
3995 1.1 mrg {
3996 1.1 mrg /* If this is an interrupt handler function, then mark the register
3997 1.1 mrg restores as volatile. This will prevent the instruction scheduler
3998 1.1 mrg from scrambling the order of register restores. */
3999 1.1 mrg const int volatile_p = current_frame_info.interrupt;
4000 1.1 mrg int r30_offset = -1;
4001 1.1 mrg int regno;
4002 1.1 mrg
4003 1.1 mrg for (regno = high_regno; regno >= low_regno; --regno)
4004 1.1 mrg {
4005 1.1 mrg enum reg_type reg_type = GET_REG_TYPE (regno);
4006 1.1 mrg int mask_bit = 1 << (regno - first_regno[reg_type]);
4007 1.1 mrg
4008 1.1 mrg if (current_frame_info.mask[reg_type] & mask_bit)
4009 1.1 mrg {
4010 1.1 mrg switch (reg_type)
4011 1.1 mrg {
4012 1.1 mrg case general:
4013 1.1 mrg /* Postpone restoring the interrupted context registers
4014 1.1 mrg until last, since they need to be preceded by a dsi. */
4015 1.1 mrg if (regno == 29)
4016 1.1 mrg ;
4017 1.1 mrg else if (regno == 30)
4018 1.1 mrg r30_offset = offset;
4019 1.1 mrg else
4020 1.1 mrg {
4021 1.1 mrg rtx mem
4022 1.1 mrg = gen_frame_mem (SImode,
4023 1.1 mrg plus_constant (Pmode,
4024 1.1 mrg stack_pointer_rtx,
4025 1.1 mrg offset));
4026 1.1 mrg rtx reg = gen_rtx_REG (SImode, regno);
4027 1.1 mrg MEM_VOLATILE_P (mem) = volatile_p;
4028 1.1 mrg emit_insn (gen_movsi (reg, mem));
4029 1.1 mrg visium_add_cfa_restore_note (reg);
4030 1.1 mrg }
4031 1.1 mrg break;
4032 1.1 mrg
4033 1.1 mrg case mdb:
4034 1.1 mrg {
4035 1.1 mrg rtx tmp = gen_rtx_REG (DImode, PROLOGUE_TMP_REGNUM);
4036 1.1 mrg rtx mem
4037 1.1 mrg = gen_frame_mem (DImode,
4038 1.1 mrg plus_constant (Pmode,
4039 1.1 mrg stack_pointer_rtx, offset));
4040 1.1 mrg rtx reg = gen_rtx_REG (DImode, regno);
4041 1.1 mrg MEM_VOLATILE_P (mem) = volatile_p;
4042 1.1 mrg emit_insn (gen_movdi (tmp, mem));
4043 1.1 mrg emit_insn (gen_movdi (reg, tmp));
4044 1.1 mrg /* Do not generate CFI if in interrupt handler. */
4045 1.1 mrg if (!volatile_p)
4046 1.1 mrg visium_add_cfa_restore_note (reg);
4047 1.1 mrg }
4048 1.1 mrg break;
4049 1.1 mrg
4050 1.1 mrg case mdc:
4051 1.1 mrg {
4052 1.1 mrg rtx tmp = gen_rtx_REG (SImode, PROLOGUE_TMP_REGNUM);
4053 1.1 mrg rtx mem
4054 1.1 mrg = gen_frame_mem (SImode,
4055 1.1 mrg plus_constant (Pmode,
4056 1.1 mrg stack_pointer_rtx, offset));
4057 1.1 mrg rtx reg = gen_rtx_REG (SImode, regno);
4058 1.1 mrg MEM_VOLATILE_P (mem) = volatile_p;
4059 1.1 mrg emit_insn (gen_movsi (tmp, mem));
4060 1.1 mrg emit_insn (gen_movsi (reg, tmp));
4061 1.1 mrg visium_add_cfa_restore_note (reg);
4062 1.1 mrg }
4063 1.1 mrg break;
4064 1.1 mrg
4065 1.1 mrg case floating:
4066 1.1 mrg {
4067 1.1 mrg rtx tmp = gen_rtx_REG (SFmode, PROLOGUE_TMP_REGNUM);
4068 1.1 mrg rtx mem
4069 1.1 mrg = gen_frame_mem (SFmode,
4070 1.1 mrg plus_constant (Pmode,
4071 1.1 mrg stack_pointer_rtx, offset));
4072 1.1 mrg rtx reg = gen_rtx_REG (SFmode, regno);
4073 1.1 mrg MEM_VOLATILE_P (mem) = volatile_p;
4074 1.1 mrg emit_insn (gen_movsf (tmp, mem));
4075 1.1 mrg emit_insn (gen_movsf (reg, tmp));
4076 1.1 mrg visium_add_cfa_restore_note (reg);
4077 1.1 mrg }
4078 1.1 mrg break;
4079 1.1 mrg
4080 1.1 mrg default:
4081 1.1 mrg break;
4082 1.1 mrg }
4083 1.1 mrg
4084 1.1 mrg offset += reg_type_size[reg_type];
4085 1.1 mrg }
4086 1.1 mrg }
4087 1.1 mrg
4088 1.1 mrg /* If the interrupted context needs to be restored, precede the
4089 1.1 mrg restores of r29 and r30 by a dsi. */
4090 1.1 mrg if (r30_offset >= 0)
4091 1.1 mrg {
4092 1.1 mrg emit_insn (gen_dsi ());
4093 1.1 mrg emit_move_insn (gen_rtx_REG (SImode, 30),
4094 1.1 mrg gen_frame_mem (SImode,
4095 1.1 mrg plus_constant (Pmode,
4096 1.1 mrg stack_pointer_rtx,
4097 1.1 mrg r30_offset)));
4098 1.1 mrg emit_move_insn (gen_rtx_REG (SImode, 29),
4099 1.1 mrg gen_frame_mem (SImode,
4100 1.1 mrg plus_constant (Pmode,
4101 1.1 mrg stack_pointer_rtx,
4102 1.1 mrg r30_offset + 4)));
4103 1.1 mrg }
4104 1.1 mrg
4105 1.1 mrg /* Deallocate the stack space. */
4106 1.1 mrg rtx insn = emit_frame_insn (gen_stack_pop (GEN_INT (dealloc)));
4107 1.1 mrg add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4108 1.1 mrg gen_rtx_SET (stack_pointer_rtx,
4109 1.1 mrg gen_rtx_PLUS (Pmode, stack_pointer_rtx,
4110 1.1 mrg GEN_INT (dealloc))));
4111 1.1 mrg visium_add_queued_cfa_restore_notes (insn);
4112 1.1 mrg }
4113 1.1 mrg
4114 1.1 mrg /* This function generates the code for function exit. */
4115 1.1 mrg
4116 1.1 mrg void
4117 1.1 mrg visium_expand_epilogue (void)
4118 1.1 mrg {
4119 1.1 mrg const int save_area_size = current_frame_info.save_area_size;
4120 1.1 mrg const int reg_size1 = current_frame_info.reg_size1;
4121 1.1 mrg const int max_reg1 = current_frame_info.max_reg1;
4122 1.1 mrg const int reg_size2 = current_frame_info.reg_size2;
4123 1.1 mrg const int var_size = current_frame_info.var_size;
4124 1.1 mrg const int restore_fp = current_frame_info.save_fp;
4125 1.1 mrg const int restore_lr = current_frame_info.save_lr;
4126 1.1 mrg const int lr_slot = current_frame_info.lr_slot;
4127 1.1 mrg const int local_frame_offset
4128 1.1 mrg = (restore_fp + restore_lr + lr_slot) * UNITS_PER_WORD;
4129 1.1 mrg const int combine = current_frame_info.combine;
4130 1.1 mrg int reg_size;
4131 1.1 mrg int last_reg;
4132 1.1 mrg int fsize;
4133 1.1 mrg
4134 1.1 mrg /* Do not bother restoring the stack pointer if it hasn't been changed in
4135 1.1 mrg the function since it was saved _after_ the allocation of the frame. */
4136 1.1 mrg if (!crtl->sp_is_unchanging)
4137 1.1 mrg emit_insn (gen_stack_restore ());
4138 1.1 mrg
4139 1.1 mrg /* Restore the frame pointer if necessary. The usual code would be:
4140 1.1 mrg
4141 1.1 mrg move.l sp,fp
4142 1.1 mrg read.l fp,(sp)
4143 1.1 mrg
4144 1.1 mrg but for the MCM this constitutes a stall/hazard so it is changed to:
4145 1.1 mrg
4146 1.1 mrg move.l sp,fp
4147 1.1 mrg read.l fp,(fp)
4148 1.1 mrg
4149 1.1 mrg if the stack pointer has actually been restored. */
4150 1.1 mrg if (restore_fp)
4151 1.1 mrg {
4152 1.1 mrg rtx src;
4153 1.1 mrg
4154 1.1 mrg if (TARGET_MCM && !crtl->sp_is_unchanging)
4155 1.1 mrg src = gen_frame_mem (SImode, hard_frame_pointer_rtx);
4156 1.1 mrg else
4157 1.1 mrg src = gen_frame_mem (SImode, stack_pointer_rtx);
4158 1.1 mrg
4159 1.1 mrg rtx insn = emit_frame_insn (gen_movsi (hard_frame_pointer_rtx, src));
4160 1.1 mrg add_reg_note (insn, REG_CFA_ADJUST_CFA,
4161 1.1 mrg gen_rtx_SET (stack_pointer_rtx,
4162 1.1 mrg hard_frame_pointer_rtx));
4163 1.1 mrg visium_add_cfa_restore_note (hard_frame_pointer_rtx);
4164 1.1 mrg }
4165 1.1 mrg
4166 1.1 mrg /* Restore the link register if necessary. */
4167 1.1 mrg if (restore_lr)
4168 1.1 mrg {
4169 1.1 mrg rtx mem = gen_frame_mem (SImode,
4170 1.1 mrg plus_constant (Pmode,
4171 1.1 mrg stack_pointer_rtx,
4172 1.1 mrg restore_fp * UNITS_PER_WORD));
4173 1.1 mrg rtx reg = gen_rtx_REG (SImode, LINK_REGNUM);
4174 1.1 mrg emit_insn (gen_movsi (reg, mem));
4175 1.1 mrg visium_add_cfa_restore_note (reg);
4176 1.1 mrg }
4177 1.1 mrg
4178 1.1 mrg /* If we have two blocks of registers, deal with the second one first. */
4179 1.1 mrg if (reg_size2)
4180 1.1 mrg {
4181 1.1 mrg reg_size = reg_size2;
4182 1.1 mrg last_reg = max_reg1 + 1;
4183 1.1 mrg fsize = local_frame_offset + var_size + reg_size2;
4184 1.1 mrg }
4185 1.1 mrg else
4186 1.1 mrg {
4187 1.1 mrg reg_size = reg_size1;
4188 1.1 mrg last_reg = 0;
4189 1.1 mrg fsize = local_frame_offset + var_size + reg_size1 + save_area_size;
4190 1.1 mrg }
4191 1.1 mrg
4192 1.1 mrg /* If the variable allocation could be combined with register stacking,
4193 1.1 mrg restore the (remaining) registers and fully deallocate now. */
4194 1.1 mrg if (reg_size && combine)
4195 1.1 mrg visium_restore_regs (fsize, local_frame_offset + var_size,
4196 1.1 mrg FIRST_PSEUDO_REGISTER - 1, last_reg);
4197 1.1 mrg
4198 1.1 mrg /* Otherwise deallocate the variables first. */
4199 1.1 mrg else if (fsize)
4200 1.1 mrg {
4201 1.1 mrg const int pop_size = reg_size ? local_frame_offset + var_size : fsize;
4202 1.1 mrg rtx insn;
4203 1.1 mrg
4204 1.1 mrg if (pop_size > 65535)
4205 1.1 mrg {
4206 1.1 mrg rtx tmp = gen_rtx_REG (SImode, PROLOGUE_TMP_REGNUM);
4207 1.1 mrg emit_move_insn (tmp, GEN_INT (pop_size));
4208 1.1 mrg insn = emit_frame_insn (gen_stack_pop (tmp));
4209 1.1 mrg }
4210 1.1 mrg else
4211 1.1 mrg insn = emit_frame_insn (gen_stack_pop (GEN_INT (pop_size)));
4212 1.1 mrg add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4213 1.1 mrg gen_rtx_SET (stack_pointer_rtx,
4214 1.1 mrg gen_rtx_PLUS (Pmode, stack_pointer_rtx,
4215 1.1 mrg GEN_INT (pop_size))));
4216 1.1 mrg visium_add_queued_cfa_restore_notes (insn);
4217 1.1 mrg }
4218 1.1 mrg
4219 1.1 mrg /* If the variable allocation couldn't be combined with register stacking,
4220 1.1 mrg restore the (remaining) registers now and partially deallocate. */
4221 1.1 mrg if (reg_size && !combine)
4222 1.1 mrg visium_restore_regs (fsize - local_frame_offset - var_size, 0,
4223 1.1 mrg FIRST_PSEUDO_REGISTER - 1, last_reg);
4224 1.1 mrg
4225 1.1 mrg /* If the first block of registers has yet to be restored, do it now. */
4226 1.1 mrg if (reg_size2)
4227 1.1 mrg visium_restore_regs (reg_size1 + save_area_size, 0, max_reg1, 0);
4228 1.1 mrg
4229 1.1 mrg /* If this is an exception return, make the necessary stack adjustment. */
4230 1.1 mrg if (crtl->calls_eh_return)
4231 1.1 mrg emit_insn (gen_stack_pop (EH_RETURN_STACKADJ_RTX));
4232 1.1 mrg }
4233 1.1 mrg
4234 1.1 mrg /* Return true if it is appropriate to emit `return' instructions in the
4235 1.1 mrg body of a function. */
4236 1.1 mrg
4237 1.1 mrg bool
4238 1.1 mrg visium_can_use_return_insn_p (void)
4239 1.1 mrg {
4240 1.1 mrg return reload_completed
4241 1.1 mrg && visium_frame_size == 0
4242 1.1 mrg && !visium_interrupt_function_p ();
4243 1.1 mrg }
4244 1.1 mrg
4245 1.1 mrg /* Return the register class required for an intermediate register used to
4246 1.1 mrg copy a register of RCLASS from/to X. If no such intermediate register is
4247 1.1 mrg required, return NO_REGS. If more than one such intermediate register is
4248 1.1 mrg required, describe the one that is closest in the copy chain to the reload
4249 1.1 mrg register. */
4250 1.1 mrg
4251 1.1 mrg static reg_class_t
4252 1.1 mrg visium_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
4253 1.1 mrg reg_class_t rclass,
4254 1.1 mrg machine_mode mode ATTRIBUTE_UNUSED,
4255 1.1 mrg secondary_reload_info *sri ATTRIBUTE_UNUSED)
4256 1.1 mrg {
4257 1.1 mrg int regno = true_regnum (x);
4258 1.1 mrg
4259 1.1 mrg /* For MDB, MDC and FP_REGS, a general register is needed for a move to
4260 1.1 mrg or from memory. */
4261 1.1 mrg if (regno == -1 && (rclass == MDB || rclass == MDC || rclass == FP_REGS))
4262 1.1 mrg return GENERAL_REGS;
4263 1.1 mrg
4264 1.1 mrg /* Moves between MDB, MDC and FP_REGS also require a general register. */
4265 1.1 mrg else if (((regno == R_MDB || regno == R_MDC) && rclass == FP_REGS)
4266 1.1 mrg || (FP_REGISTER_P (regno) && (rclass == MDB || rclass == MDC)))
4267 1.1 mrg return GENERAL_REGS;
4268 1.1 mrg
4269 1.1 mrg /* Finally an (unlikely ?) move between MDB and MDC needs a general reg. */
4270 1.1 mrg else if ((regno == R_MDB && rclass == MDC)
4271 1.1 mrg || (rclass == MDB && regno == R_MDC))
4272 1.1 mrg return GENERAL_REGS;
4273 1.1 mrg
4274 1.1 mrg return NO_REGS;
4275 1.1 mrg }
4276 1.1 mrg
4277 1.1 mrg /* Return true if pseudos that have been assigned to registers of RCLASS
4278 1.1 mrg would likely be spilled because registers of RCLASS are needed for
4279 1.1 mrg spill registers. */
4280 1.1 mrg
4281 1.1 mrg static bool
4282 1.1 mrg visium_class_likely_spilled_p (reg_class_t rclass ATTRIBUTE_UNUSED)
4283 1.1 mrg {
4284 1.1 mrg /* Return false for classes R1, R2 and R3, which are intended to be used
4285 1.1 mrg only in the source code in conjunction with block move instructions. */
4286 1.1 mrg return false;
4287 1.1 mrg }
4288 1.1 mrg
4289 1.1 mrg /* Return the register number if OP is a REG or a SUBREG of a REG, and
4290 1.1 mrg INVALID_REGNUM in all the other cases. */
4291 1.1 mrg
4292 1.1 mrg unsigned int
4293 1.1 mrg reg_or_subreg_regno (rtx op)
4294 1.1 mrg {
4295 1.1 mrg unsigned int regno;
4296 1.1 mrg
4297 1.1 mrg if (GET_CODE (op) == REG)
4298 1.1 mrg regno = REGNO (op);
4299 1.1 mrg else if (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
4300 1.1 mrg {
4301 1.1 mrg if (REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER)
4302 1.1 mrg regno = subreg_regno (op);
4303 1.1 mrg else
4304 1.1 mrg regno = REGNO (SUBREG_REG (op));
4305 1.1 mrg }
4306 1.1 mrg else
4307 1.1 mrg regno = INVALID_REGNUM;
4308 1.1 mrg
4309 1.1 mrg return regno;
4310 1.1 mrg }
4311 1.1 mrg
4312 1.1 mrg /* Implement TARGET_CAN_CHANGE_MODE_CLASS.
4313 1.1 mrg
4314 1.1 mrg It's not obvious from the documentation of the hook that MDB cannot
4315 1.1 mrg change mode. However difficulties arise from expressions of the form
4316 1.1 mrg
4317 1.1 mrg (subreg:SI (reg:DI R_MDB) 0)
4318 1.1 mrg
4319 1.1 mrg There is no way to convert that reference to a single machine
4320 1.1 mrg register and, without the following definition, reload will quietly
4321 1.1 mrg convert it to
4322 1.1 mrg
4323 1.1 mrg (reg:SI R_MDB). */
4324 1.1 mrg
4325 1.1 mrg static bool
4326 1.1 mrg visium_can_change_mode_class (machine_mode from, machine_mode to,
4327 1.1 mrg reg_class_t rclass)
4328 1.1 mrg {
4329 1.1 mrg return (rclass != MDB || GET_MODE_SIZE (from) == GET_MODE_SIZE (to));
4330 1.1 mrg }
4331 1.1 mrg
4332 1.1 mrg #include "gt-visium.h"
4333