m32c.cc revision 1.1 1 1.1 mrg /* Target Code for R8C/M16C/M32C
2 1.1 mrg Copyright (C) 2005-2022 Free Software Foundation, Inc.
3 1.1 mrg Contributed by Red Hat.
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it
8 1.1 mrg under the terms of the GNU General Public License as published
9 1.1 mrg by the Free Software Foundation; either version 3, or (at your
10 1.1 mrg option) any later version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT
13 1.1 mrg ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 1.1 mrg or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 1.1 mrg License for more details.
16 1.1 mrg
17 1.1 mrg You should have received a copy of the GNU General Public License
18 1.1 mrg along with GCC; see the file COPYING3. If not see
19 1.1 mrg <http://www.gnu.org/licenses/>. */
20 1.1 mrg
21 1.1 mrg #define IN_TARGET_CODE 1
22 1.1 mrg
23 1.1 mrg #include "config.h"
24 1.1 mrg #include "system.h"
25 1.1 mrg #include "coretypes.h"
26 1.1 mrg #include "backend.h"
27 1.1 mrg #include "target.h"
28 1.1 mrg #include "rtl.h"
29 1.1 mrg #include "tree.h"
30 1.1 mrg #include "stringpool.h"
31 1.1 mrg #include "attribs.h"
32 1.1 mrg #include "df.h"
33 1.1 mrg #include "memmodel.h"
34 1.1 mrg #include "tm_p.h"
35 1.1 mrg #include "optabs.h"
36 1.1 mrg #include "regs.h"
37 1.1 mrg #include "emit-rtl.h"
38 1.1 mrg #include "recog.h"
39 1.1 mrg #include "diagnostic-core.h"
40 1.1 mrg #include "output.h"
41 1.1 mrg #include "insn-attr.h"
42 1.1 mrg #include "flags.h"
43 1.1 mrg #include "reload.h"
44 1.1 mrg #include "stor-layout.h"
45 1.1 mrg #include "varasm.h"
46 1.1 mrg #include "calls.h"
47 1.1 mrg #include "explow.h"
48 1.1 mrg #include "expr.h"
49 1.1 mrg #include "tm-constrs.h"
50 1.1 mrg #include "builtins.h"
51 1.1 mrg #include "opts.h"
52 1.1 mrg
53 1.1 mrg /* This file should be included last. */
54 1.1 mrg #include "target-def.h"
55 1.1 mrg
56 1.1 mrg /* Prototypes */
57 1.1 mrg
58 1.1 mrg /* Used by m32c_pushm_popm. */
59 1.1 mrg typedef enum
60 1.1 mrg {
61 1.1 mrg PP_pushm,
62 1.1 mrg PP_popm,
63 1.1 mrg PP_justcount
64 1.1 mrg } Push_Pop_Type;
65 1.1 mrg
66 1.1 mrg static bool m32c_function_needs_enter (void);
67 1.1 mrg static tree interrupt_handler (tree *, tree, tree, int, bool *);
68 1.1 mrg static tree function_vector_handler (tree *, tree, tree, int, bool *);
69 1.1 mrg static int interrupt_p (tree node);
70 1.1 mrg static int bank_switch_p (tree node);
71 1.1 mrg static int fast_interrupt_p (tree node);
72 1.1 mrg static int interrupt_p (tree node);
73 1.1 mrg static bool m32c_asm_integer (rtx, unsigned int, int);
74 1.1 mrg static int m32c_comp_type_attributes (const_tree, const_tree);
75 1.1 mrg static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
76 1.1 mrg static struct machine_function *m32c_init_machine_status (void);
77 1.1 mrg static void m32c_insert_attributes (tree, tree *);
78 1.1 mrg static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
79 1.1 mrg static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
80 1.1 mrg static rtx m32c_function_arg (cumulative_args_t, const function_arg_info &);
81 1.1 mrg static bool m32c_pass_by_reference (cumulative_args_t,
82 1.1 mrg const function_arg_info &);
83 1.1 mrg static void m32c_function_arg_advance (cumulative_args_t,
84 1.1 mrg const function_arg_info &);
85 1.1 mrg static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
86 1.1 mrg static int m32c_pushm_popm (Push_Pop_Type);
87 1.1 mrg static bool m32c_strict_argument_naming (cumulative_args_t);
88 1.1 mrg static rtx m32c_struct_value_rtx (tree, int);
89 1.1 mrg static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
90 1.1 mrg static int need_to_save (int);
91 1.1 mrg static rtx m32c_function_value (const_tree, const_tree, bool);
92 1.1 mrg static rtx m32c_libcall_value (machine_mode, const_rtx);
93 1.1 mrg
94 1.1 mrg /* Returns true if an address is specified, else false. */
95 1.1 mrg static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
96 1.1 mrg
97 1.1 mrg static bool m32c_hard_regno_mode_ok (unsigned int, machine_mode);
98 1.1 mrg
99 1.1 mrg #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
100 1.1 mrg
101 1.1 mrg #define streq(a,b) (strcmp ((a), (b)) == 0)
102 1.1 mrg
103 1.1 mrg /* Internal support routines */
104 1.1 mrg
105 1.1 mrg /* Debugging statements are tagged with DEBUG0 only so that they can
106 1.1 mrg be easily enabled individually, by replacing the '0' with '1' as
107 1.1 mrg needed. */
108 1.1 mrg #define DEBUG0 0
109 1.1 mrg #define DEBUG1 1
110 1.1 mrg
111 1.1 mrg #if DEBUG0
112 1.1 mrg #include "print-tree.h"
113 1.1 mrg /* This is needed by some of the commented-out debug statements
114 1.1 mrg below. */
115 1.1 mrg static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
116 1.1 mrg #endif
117 1.1 mrg static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
118 1.1 mrg
119 1.1 mrg /* These are all to support encode_pattern(). */
120 1.1 mrg static char pattern[30], *patternp;
121 1.1 mrg static GTY(()) rtx patternr[30];
122 1.1 mrg #define RTX_IS(x) (streq (pattern, x))
123 1.1 mrg
124 1.1 mrg /* Some macros to simplify the logic throughout this file. */
125 1.1 mrg #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
126 1.1 mrg #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
127 1.1 mrg
128 1.1 mrg #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
129 1.1 mrg #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
130 1.1 mrg
131 1.1 mrg static int
132 1.1 mrg far_addr_space_p (rtx x)
133 1.1 mrg {
134 1.1 mrg if (GET_CODE (x) != MEM)
135 1.1 mrg return 0;
136 1.1 mrg #if DEBUG0
137 1.1 mrg fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
138 1.1 mrg fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
139 1.1 mrg #endif
140 1.1 mrg return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
141 1.1 mrg }
142 1.1 mrg
143 1.1 mrg /* We do most RTX matching by converting the RTX into a string, and
144 1.1 mrg using string compares. This vastly simplifies the logic in many of
145 1.1 mrg the functions in this file.
146 1.1 mrg
147 1.1 mrg On exit, pattern[] has the encoded string (use RTX_IS("...") to
148 1.1 mrg compare it) and patternr[] has pointers to the nodes in the RTX
149 1.1 mrg corresponding to each character in the encoded string. The latter
150 1.1 mrg is mostly used by print_operand().
151 1.1 mrg
152 1.1 mrg Unrecognized patterns have '?' in them; this shows up when the
153 1.1 mrg assembler complains about syntax errors.
154 1.1 mrg */
155 1.1 mrg
156 1.1 mrg static void
157 1.1 mrg encode_pattern_1 (rtx x)
158 1.1 mrg {
159 1.1 mrg int i;
160 1.1 mrg
161 1.1 mrg if (patternp == pattern + sizeof (pattern) - 2)
162 1.1 mrg {
163 1.1 mrg patternp[-1] = '?';
164 1.1 mrg return;
165 1.1 mrg }
166 1.1 mrg
167 1.1 mrg patternr[patternp - pattern] = x;
168 1.1 mrg
169 1.1 mrg switch (GET_CODE (x))
170 1.1 mrg {
171 1.1 mrg case REG:
172 1.1 mrg *patternp++ = 'r';
173 1.1 mrg break;
174 1.1 mrg case SUBREG:
175 1.1 mrg if (GET_MODE_SIZE (GET_MODE (x)) !=
176 1.1 mrg GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
177 1.1 mrg *patternp++ = 'S';
178 1.1 mrg if (GET_MODE (x) == PSImode
179 1.1 mrg && GET_CODE (XEXP (x, 0)) == REG)
180 1.1 mrg *patternp++ = 'S';
181 1.1 mrg encode_pattern_1 (XEXP (x, 0));
182 1.1 mrg break;
183 1.1 mrg case MEM:
184 1.1 mrg *patternp++ = 'm';
185 1.1 mrg /* FALLTHRU */
186 1.1 mrg case CONST:
187 1.1 mrg encode_pattern_1 (XEXP (x, 0));
188 1.1 mrg break;
189 1.1 mrg case SIGN_EXTEND:
190 1.1 mrg *patternp++ = '^';
191 1.1 mrg *patternp++ = 'S';
192 1.1 mrg encode_pattern_1 (XEXP (x, 0));
193 1.1 mrg break;
194 1.1 mrg case ZERO_EXTEND:
195 1.1 mrg *patternp++ = '^';
196 1.1 mrg *patternp++ = 'Z';
197 1.1 mrg encode_pattern_1 (XEXP (x, 0));
198 1.1 mrg break;
199 1.1 mrg case PLUS:
200 1.1 mrg *patternp++ = '+';
201 1.1 mrg encode_pattern_1 (XEXP (x, 0));
202 1.1 mrg encode_pattern_1 (XEXP (x, 1));
203 1.1 mrg break;
204 1.1 mrg case PRE_DEC:
205 1.1 mrg *patternp++ = '>';
206 1.1 mrg encode_pattern_1 (XEXP (x, 0));
207 1.1 mrg break;
208 1.1 mrg case POST_INC:
209 1.1 mrg *patternp++ = '<';
210 1.1 mrg encode_pattern_1 (XEXP (x, 0));
211 1.1 mrg break;
212 1.1 mrg case LO_SUM:
213 1.1 mrg *patternp++ = 'L';
214 1.1 mrg encode_pattern_1 (XEXP (x, 0));
215 1.1 mrg encode_pattern_1 (XEXP (x, 1));
216 1.1 mrg break;
217 1.1 mrg case HIGH:
218 1.1 mrg *patternp++ = 'H';
219 1.1 mrg encode_pattern_1 (XEXP (x, 0));
220 1.1 mrg break;
221 1.1 mrg case SYMBOL_REF:
222 1.1 mrg *patternp++ = 's';
223 1.1 mrg break;
224 1.1 mrg case LABEL_REF:
225 1.1 mrg *patternp++ = 'l';
226 1.1 mrg break;
227 1.1 mrg case CODE_LABEL:
228 1.1 mrg *patternp++ = 'c';
229 1.1 mrg break;
230 1.1 mrg case CONST_INT:
231 1.1 mrg case CONST_DOUBLE:
232 1.1 mrg *patternp++ = 'i';
233 1.1 mrg break;
234 1.1 mrg case UNSPEC:
235 1.1 mrg *patternp++ = 'u';
236 1.1 mrg *patternp++ = '0' + XCINT (x, 1, UNSPEC);
237 1.1 mrg for (i = 0; i < XVECLEN (x, 0); i++)
238 1.1 mrg encode_pattern_1 (XVECEXP (x, 0, i));
239 1.1 mrg break;
240 1.1 mrg case USE:
241 1.1 mrg *patternp++ = 'U';
242 1.1 mrg break;
243 1.1 mrg case PARALLEL:
244 1.1 mrg *patternp++ = '|';
245 1.1 mrg for (i = 0; i < XVECLEN (x, 0); i++)
246 1.1 mrg encode_pattern_1 (XVECEXP (x, 0, i));
247 1.1 mrg break;
248 1.1 mrg case EXPR_LIST:
249 1.1 mrg *patternp++ = 'E';
250 1.1 mrg encode_pattern_1 (XEXP (x, 0));
251 1.1 mrg if (XEXP (x, 1))
252 1.1 mrg encode_pattern_1 (XEXP (x, 1));
253 1.1 mrg break;
254 1.1 mrg default:
255 1.1 mrg *patternp++ = '?';
256 1.1 mrg #if DEBUG0
257 1.1 mrg fprintf (stderr, "can't encode pattern %s\n",
258 1.1 mrg GET_RTX_NAME (GET_CODE (x)));
259 1.1 mrg debug_rtx (x);
260 1.1 mrg #endif
261 1.1 mrg break;
262 1.1 mrg }
263 1.1 mrg }
264 1.1 mrg
265 1.1 mrg static void
266 1.1 mrg encode_pattern (rtx x)
267 1.1 mrg {
268 1.1 mrg patternp = pattern;
269 1.1 mrg encode_pattern_1 (x);
270 1.1 mrg *patternp = 0;
271 1.1 mrg }
272 1.1 mrg
273 1.1 mrg /* Since register names indicate the mode they're used in, we need a
274 1.1 mrg way to determine which name to refer to the register with. Called
275 1.1 mrg by print_operand(). */
276 1.1 mrg
277 1.1 mrg static const char *
278 1.1 mrg reg_name_with_mode (int regno, machine_mode mode)
279 1.1 mrg {
280 1.1 mrg int mlen = GET_MODE_SIZE (mode);
281 1.1 mrg if (regno == R0_REGNO && mlen == 1)
282 1.1 mrg return "r0l";
283 1.1 mrg if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
284 1.1 mrg return "r2r0";
285 1.1 mrg if (regno == R0_REGNO && mlen == 6)
286 1.1 mrg return "r2r1r0";
287 1.1 mrg if (regno == R0_REGNO && mlen == 8)
288 1.1 mrg return "r3r1r2r0";
289 1.1 mrg if (regno == R1_REGNO && mlen == 1)
290 1.1 mrg return "r1l";
291 1.1 mrg if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
292 1.1 mrg return "r3r1";
293 1.1 mrg if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
294 1.1 mrg return "a1a0";
295 1.1 mrg return reg_names[regno];
296 1.1 mrg }
297 1.1 mrg
298 1.1 mrg /* How many bytes a register uses on stack when it's pushed. We need
299 1.1 mrg to know this because the push opcode needs to explicitly indicate
300 1.1 mrg the size of the register, even though the name of the register
301 1.1 mrg already tells it that. Used by m32c_output_reg_{push,pop}, which
302 1.1 mrg is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
303 1.1 mrg
304 1.1 mrg static int
305 1.1 mrg reg_push_size (int regno)
306 1.1 mrg {
307 1.1 mrg switch (regno)
308 1.1 mrg {
309 1.1 mrg case R0_REGNO:
310 1.1 mrg case R1_REGNO:
311 1.1 mrg return 2;
312 1.1 mrg case R2_REGNO:
313 1.1 mrg case R3_REGNO:
314 1.1 mrg case FLG_REGNO:
315 1.1 mrg return 2;
316 1.1 mrg case A0_REGNO:
317 1.1 mrg case A1_REGNO:
318 1.1 mrg case SB_REGNO:
319 1.1 mrg case FB_REGNO:
320 1.1 mrg case SP_REGNO:
321 1.1 mrg if (TARGET_A16)
322 1.1 mrg return 2;
323 1.1 mrg else
324 1.1 mrg return 3;
325 1.1 mrg default:
326 1.1 mrg gcc_unreachable ();
327 1.1 mrg }
328 1.1 mrg }
329 1.1 mrg
330 1.1 mrg /* Given two register classes, find the largest intersection between
331 1.1 mrg them. If there is no intersection, return RETURNED_IF_EMPTY
332 1.1 mrg instead. */
333 1.1 mrg static reg_class_t
334 1.1 mrg reduce_class (reg_class_t original_class, reg_class_t limiting_class,
335 1.1 mrg reg_class_t returned_if_empty)
336 1.1 mrg {
337 1.1 mrg HARD_REG_SET cc;
338 1.1 mrg int i;
339 1.1 mrg reg_class_t best = NO_REGS;
340 1.1 mrg unsigned int best_size = 0;
341 1.1 mrg
342 1.1 mrg if (original_class == limiting_class)
343 1.1 mrg return original_class;
344 1.1 mrg
345 1.1 mrg cc = reg_class_contents[original_class] & reg_class_contents[limiting_class];
346 1.1 mrg
347 1.1 mrg for (i = 0; i < LIM_REG_CLASSES; i++)
348 1.1 mrg {
349 1.1 mrg if (hard_reg_set_subset_p (reg_class_contents[i], cc))
350 1.1 mrg if (best_size < reg_class_size[i])
351 1.1 mrg {
352 1.1 mrg best = (reg_class_t) i;
353 1.1 mrg best_size = reg_class_size[i];
354 1.1 mrg }
355 1.1 mrg
356 1.1 mrg }
357 1.1 mrg if (best == NO_REGS)
358 1.1 mrg return returned_if_empty;
359 1.1 mrg return best;
360 1.1 mrg }
361 1.1 mrg
362 1.1 mrg /* Used by m32c_register_move_cost to determine if a move is
363 1.1 mrg impossibly expensive. */
364 1.1 mrg static bool
365 1.1 mrg class_can_hold_mode (reg_class_t rclass, machine_mode mode)
366 1.1 mrg {
367 1.1 mrg /* Cache the results: 0=untested 1=no 2=yes */
368 1.1 mrg static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
369 1.1 mrg
370 1.1 mrg if (results[(int) rclass][mode] == 0)
371 1.1 mrg {
372 1.1 mrg int r;
373 1.1 mrg results[rclass][mode] = 1;
374 1.1 mrg for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
375 1.1 mrg if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
376 1.1 mrg && m32c_hard_regno_mode_ok (r, mode))
377 1.1 mrg {
378 1.1 mrg results[rclass][mode] = 2;
379 1.1 mrg break;
380 1.1 mrg }
381 1.1 mrg }
382 1.1 mrg
383 1.1 mrg #if DEBUG0
384 1.1 mrg fprintf (stderr, "class %s can hold %s? %s\n",
385 1.1 mrg class_names[(int) rclass], mode_name[mode],
386 1.1 mrg (results[rclass][mode] == 2) ? "yes" : "no");
387 1.1 mrg #endif
388 1.1 mrg return results[(int) rclass][mode] == 2;
389 1.1 mrg }
390 1.1 mrg
391 1.1 mrg /* Run-time Target Specification. */
392 1.1 mrg
393 1.1 mrg /* Memregs are memory locations that gcc treats like general
394 1.1 mrg registers, as there are a limited number of true registers and the
395 1.1 mrg m32c families can use memory in most places that registers can be
396 1.1 mrg used.
397 1.1 mrg
398 1.1 mrg However, since memory accesses are more expensive than registers,
399 1.1 mrg we allow the user to limit the number of memregs available, in
400 1.1 mrg order to try to persuade gcc to try harder to use real registers.
401 1.1 mrg
402 1.1 mrg Memregs are provided by lib1funcs.S.
403 1.1 mrg */
404 1.1 mrg
405 1.1 mrg int ok_to_change_target_memregs = TRUE;
406 1.1 mrg
407 1.1 mrg /* Implements TARGET_OPTION_OVERRIDE. */
408 1.1 mrg
409 1.1 mrg #undef TARGET_OPTION_OVERRIDE
410 1.1 mrg #define TARGET_OPTION_OVERRIDE m32c_option_override
411 1.1 mrg
412 1.1 mrg static void
413 1.1 mrg m32c_option_override (void)
414 1.1 mrg {
415 1.1 mrg /* We limit memregs to 0..16, and provide a default. */
416 1.1 mrg if (OPTION_SET_P (target_memregs))
417 1.1 mrg {
418 1.1 mrg if (target_memregs < 0 || target_memregs > 16)
419 1.1 mrg error ("invalid target memregs value %<%d%>", target_memregs);
420 1.1 mrg }
421 1.1 mrg else
422 1.1 mrg target_memregs = 16;
423 1.1 mrg
424 1.1 mrg if (TARGET_A24)
425 1.1 mrg flag_ivopts = 0;
426 1.1 mrg
427 1.1 mrg /* This target defaults to strict volatile bitfields. */
428 1.1 mrg if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
429 1.1 mrg flag_strict_volatile_bitfields = 1;
430 1.1 mrg
431 1.1 mrg /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
432 1.1 mrg This is always worse than an absolute call. */
433 1.1 mrg if (TARGET_A16)
434 1.1 mrg flag_no_function_cse = 1;
435 1.1 mrg
436 1.1 mrg /* This wants to put insns between compares and their jumps. */
437 1.1 mrg /* FIXME: The right solution is to properly trace the flags register
438 1.1 mrg values, but that is too much work for stage 4. */
439 1.1 mrg flag_combine_stack_adjustments = 0;
440 1.1 mrg }
441 1.1 mrg
442 1.1 mrg #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
443 1.1 mrg #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
444 1.1 mrg
445 1.1 mrg static void
446 1.1 mrg m32c_override_options_after_change (void)
447 1.1 mrg {
448 1.1 mrg if (TARGET_A16)
449 1.1 mrg flag_no_function_cse = 1;
450 1.1 mrg }
451 1.1 mrg
452 1.1 mrg /* Defining data structures for per-function information */
453 1.1 mrg
454 1.1 mrg /* The usual; we set up our machine_function data. */
455 1.1 mrg static struct machine_function *
456 1.1 mrg m32c_init_machine_status (void)
457 1.1 mrg {
458 1.1 mrg return ggc_cleared_alloc<machine_function> ();
459 1.1 mrg }
460 1.1 mrg
461 1.1 mrg /* Implements INIT_EXPANDERS. We just set up to call the above
462 1.1 mrg function. */
463 1.1 mrg void
464 1.1 mrg m32c_init_expanders (void)
465 1.1 mrg {
466 1.1 mrg init_machine_status = m32c_init_machine_status;
467 1.1 mrg }
468 1.1 mrg
469 1.1 mrg /* Storage Layout */
470 1.1 mrg
471 1.1 mrg /* Register Basics */
472 1.1 mrg
473 1.1 mrg /* Basic Characteristics of Registers */
474 1.1 mrg
475 1.1 mrg /* Whether a mode fits in a register is complex enough to warrant a
476 1.1 mrg table. */
477 1.1 mrg static struct
478 1.1 mrg {
479 1.1 mrg char qi_regs;
480 1.1 mrg char hi_regs;
481 1.1 mrg char pi_regs;
482 1.1 mrg char si_regs;
483 1.1 mrg char di_regs;
484 1.1 mrg } nregs_table[FIRST_PSEUDO_REGISTER] =
485 1.1 mrg {
486 1.1 mrg { 1, 1, 2, 2, 4 }, /* r0 */
487 1.1 mrg { 0, 1, 0, 0, 0 }, /* r2 */
488 1.1 mrg { 1, 1, 2, 2, 0 }, /* r1 */
489 1.1 mrg { 0, 1, 0, 0, 0 }, /* r3 */
490 1.1 mrg { 0, 1, 1, 0, 0 }, /* a0 */
491 1.1 mrg { 0, 1, 1, 0, 0 }, /* a1 */
492 1.1 mrg { 0, 1, 1, 0, 0 }, /* sb */
493 1.1 mrg { 0, 1, 1, 0, 0 }, /* fb */
494 1.1 mrg { 0, 1, 1, 0, 0 }, /* sp */
495 1.1 mrg { 1, 1, 1, 0, 0 }, /* pc */
496 1.1 mrg { 0, 0, 0, 0, 0 }, /* fl */
497 1.1 mrg { 1, 1, 1, 0, 0 }, /* ap */
498 1.1 mrg { 1, 1, 2, 2, 4 }, /* mem0 */
499 1.1 mrg { 1, 1, 2, 2, 4 }, /* mem1 */
500 1.1 mrg { 1, 1, 2, 2, 4 }, /* mem2 */
501 1.1 mrg { 1, 1, 2, 2, 4 }, /* mem3 */
502 1.1 mrg { 1, 1, 2, 2, 4 }, /* mem4 */
503 1.1 mrg { 1, 1, 2, 2, 0 }, /* mem5 */
504 1.1 mrg { 1, 1, 2, 2, 0 }, /* mem6 */
505 1.1 mrg { 1, 1, 0, 0, 0 }, /* mem7 */
506 1.1 mrg };
507 1.1 mrg
508 1.1 mrg /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
509 1.1 mrg of available memregs, and select which registers need to be preserved
510 1.1 mrg across calls based on the chip family. */
511 1.1 mrg
512 1.1 mrg #undef TARGET_CONDITIONAL_REGISTER_USAGE
513 1.1 mrg #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
514 1.1 mrg void
515 1.1 mrg m32c_conditional_register_usage (void)
516 1.1 mrg {
517 1.1 mrg int i;
518 1.1 mrg
519 1.1 mrg if (target_memregs >= 0 && target_memregs <= 16)
520 1.1 mrg {
521 1.1 mrg /* The command line option is bytes, but our "registers" are
522 1.1 mrg 16-bit words. */
523 1.1 mrg for (i = (target_memregs+1)/2; i < 8; i++)
524 1.1 mrg {
525 1.1 mrg fixed_regs[MEM0_REGNO + i] = 1;
526 1.1 mrg CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
527 1.1 mrg }
528 1.1 mrg }
529 1.1 mrg
530 1.1 mrg /* M32CM and M32C preserve more registers across function calls. */
531 1.1 mrg if (TARGET_A24)
532 1.1 mrg {
533 1.1 mrg call_used_regs[R1_REGNO] = 0;
534 1.1 mrg call_used_regs[R2_REGNO] = 0;
535 1.1 mrg call_used_regs[R3_REGNO] = 0;
536 1.1 mrg call_used_regs[A0_REGNO] = 0;
537 1.1 mrg call_used_regs[A1_REGNO] = 0;
538 1.1 mrg }
539 1.1 mrg }
540 1.1 mrg
541 1.1 mrg /* How Values Fit in Registers */
542 1.1 mrg
543 1.1 mrg /* Implements TARGET_HARD_REGNO_NREGS. This is complicated by the fact that
544 1.1 mrg different registers are different sizes from each other, *and* may
545 1.1 mrg be different sizes in different chip families. */
546 1.1 mrg static unsigned int
547 1.1 mrg m32c_hard_regno_nregs_1 (unsigned int regno, machine_mode mode)
548 1.1 mrg {
549 1.1 mrg if (regno == FLG_REGNO && mode == CCmode)
550 1.1 mrg return 1;
551 1.1 mrg if (regno >= FIRST_PSEUDO_REGISTER)
552 1.1 mrg return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
553 1.1 mrg
554 1.1 mrg if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
555 1.1 mrg return (GET_MODE_SIZE (mode) + 1) / 2;
556 1.1 mrg
557 1.1 mrg if (GET_MODE_SIZE (mode) <= 1)
558 1.1 mrg return nregs_table[regno].qi_regs;
559 1.1 mrg if (GET_MODE_SIZE (mode) <= 2)
560 1.1 mrg return nregs_table[regno].hi_regs;
561 1.1 mrg if (regno == A0_REGNO && mode == SImode && TARGET_A16)
562 1.1 mrg return 2;
563 1.1 mrg if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
564 1.1 mrg return nregs_table[regno].pi_regs;
565 1.1 mrg if (GET_MODE_SIZE (mode) <= 4)
566 1.1 mrg return nregs_table[regno].si_regs;
567 1.1 mrg if (GET_MODE_SIZE (mode) <= 8)
568 1.1 mrg return nregs_table[regno].di_regs;
569 1.1 mrg return 0;
570 1.1 mrg }
571 1.1 mrg
572 1.1 mrg static unsigned int
573 1.1 mrg m32c_hard_regno_nregs (unsigned int regno, machine_mode mode)
574 1.1 mrg {
575 1.1 mrg unsigned int rv = m32c_hard_regno_nregs_1 (regno, mode);
576 1.1 mrg return rv ? rv : 1;
577 1.1 mrg }
578 1.1 mrg
579 1.1 mrg /* Implement TARGET_HARD_REGNO_MODE_OK. The above function does the work
580 1.1 mrg already; just test its return value. */
581 1.1 mrg static bool
582 1.1 mrg m32c_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
583 1.1 mrg {
584 1.1 mrg return m32c_hard_regno_nregs_1 (regno, mode) != 0;
585 1.1 mrg }
586 1.1 mrg
587 1.1 mrg /* Implement TARGET_MODES_TIEABLE_P. In general, modes aren't tieable since
588 1.1 mrg registers are all different sizes. However, since most modes are
589 1.1 mrg bigger than our registers anyway, it's easier to implement this
590 1.1 mrg function that way, leaving QImode as the only unique case. */
591 1.1 mrg static bool
592 1.1 mrg m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
593 1.1 mrg {
594 1.1 mrg if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
595 1.1 mrg return 1;
596 1.1 mrg
597 1.1 mrg #if 0
598 1.1 mrg if (m1 == QImode || m2 == QImode)
599 1.1 mrg return 0;
600 1.1 mrg #endif
601 1.1 mrg
602 1.1 mrg return 1;
603 1.1 mrg }
604 1.1 mrg
605 1.1 mrg /* Register Classes */
606 1.1 mrg
607 1.1 mrg /* Implements REGNO_REG_CLASS. */
608 1.1 mrg enum reg_class
609 1.1 mrg m32c_regno_reg_class (int regno)
610 1.1 mrg {
611 1.1 mrg switch (regno)
612 1.1 mrg {
613 1.1 mrg case R0_REGNO:
614 1.1 mrg return R0_REGS;
615 1.1 mrg case R1_REGNO:
616 1.1 mrg return R1_REGS;
617 1.1 mrg case R2_REGNO:
618 1.1 mrg return R2_REGS;
619 1.1 mrg case R3_REGNO:
620 1.1 mrg return R3_REGS;
621 1.1 mrg case A0_REGNO:
622 1.1 mrg return A0_REGS;
623 1.1 mrg case A1_REGNO:
624 1.1 mrg return A1_REGS;
625 1.1 mrg case SB_REGNO:
626 1.1 mrg return SB_REGS;
627 1.1 mrg case FB_REGNO:
628 1.1 mrg return FB_REGS;
629 1.1 mrg case SP_REGNO:
630 1.1 mrg return SP_REGS;
631 1.1 mrg case FLG_REGNO:
632 1.1 mrg return FLG_REGS;
633 1.1 mrg default:
634 1.1 mrg if (IS_MEM_REGNO (regno))
635 1.1 mrg return MEM_REGS;
636 1.1 mrg return ALL_REGS;
637 1.1 mrg }
638 1.1 mrg }
639 1.1 mrg
640 1.1 mrg /* Implements REGNO_OK_FOR_BASE_P. */
641 1.1 mrg int
642 1.1 mrg m32c_regno_ok_for_base_p (int regno)
643 1.1 mrg {
644 1.1 mrg if (regno == A0_REGNO
645 1.1 mrg || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
646 1.1 mrg return 1;
647 1.1 mrg return 0;
648 1.1 mrg }
649 1.1 mrg
650 1.1 mrg /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
651 1.1 mrg registers of the appropriate size. */
652 1.1 mrg
653 1.1 mrg #undef TARGET_PREFERRED_RELOAD_CLASS
654 1.1 mrg #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
655 1.1 mrg
656 1.1 mrg static reg_class_t
657 1.1 mrg m32c_preferred_reload_class (rtx x, reg_class_t rclass)
658 1.1 mrg {
659 1.1 mrg reg_class_t newclass = rclass;
660 1.1 mrg
661 1.1 mrg #if DEBUG0
662 1.1 mrg fprintf (stderr, "\npreferred_reload_class for %s is ",
663 1.1 mrg class_names[rclass]);
664 1.1 mrg #endif
665 1.1 mrg if (rclass == NO_REGS)
666 1.1 mrg rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
667 1.1 mrg
668 1.1 mrg if (reg_classes_intersect_p (rclass, CR_REGS))
669 1.1 mrg {
670 1.1 mrg switch (GET_MODE (x))
671 1.1 mrg {
672 1.1 mrg case E_QImode:
673 1.1 mrg newclass = HL_REGS;
674 1.1 mrg break;
675 1.1 mrg default:
676 1.1 mrg /* newclass = HI_REGS; */
677 1.1 mrg break;
678 1.1 mrg }
679 1.1 mrg }
680 1.1 mrg
681 1.1 mrg else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
682 1.1 mrg newclass = SI_REGS;
683 1.1 mrg else if (GET_MODE_SIZE (GET_MODE (x)) > 4
684 1.1 mrg && ! reg_class_subset_p (R03_REGS, rclass))
685 1.1 mrg newclass = DI_REGS;
686 1.1 mrg
687 1.1 mrg rclass = reduce_class (rclass, newclass, rclass);
688 1.1 mrg
689 1.1 mrg if (GET_MODE (x) == QImode)
690 1.1 mrg rclass = reduce_class (rclass, HL_REGS, rclass);
691 1.1 mrg
692 1.1 mrg #if DEBUG0
693 1.1 mrg fprintf (stderr, "%s\n", class_names[rclass]);
694 1.1 mrg debug_rtx (x);
695 1.1 mrg
696 1.1 mrg if (GET_CODE (x) == MEM
697 1.1 mrg && GET_CODE (XEXP (x, 0)) == PLUS
698 1.1 mrg && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
699 1.1 mrg fprintf (stderr, "Glorm!\n");
700 1.1 mrg #endif
701 1.1 mrg return rclass;
702 1.1 mrg }
703 1.1 mrg
704 1.1 mrg /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
705 1.1 mrg
706 1.1 mrg #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
707 1.1 mrg #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
708 1.1 mrg
709 1.1 mrg static reg_class_t
710 1.1 mrg m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
711 1.1 mrg {
712 1.1 mrg return m32c_preferred_reload_class (x, rclass);
713 1.1 mrg }
714 1.1 mrg
715 1.1 mrg /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
716 1.1 mrg address registers for reloads since they're needed for address
717 1.1 mrg reloads. */
718 1.1 mrg int
719 1.1 mrg m32c_limit_reload_class (machine_mode mode, int rclass)
720 1.1 mrg {
721 1.1 mrg #if DEBUG0
722 1.1 mrg fprintf (stderr, "limit_reload_class for %s: %s ->",
723 1.1 mrg mode_name[mode], class_names[rclass]);
724 1.1 mrg #endif
725 1.1 mrg
726 1.1 mrg if (mode == QImode)
727 1.1 mrg rclass = reduce_class (rclass, HL_REGS, rclass);
728 1.1 mrg else if (mode == HImode)
729 1.1 mrg rclass = reduce_class (rclass, HI_REGS, rclass);
730 1.1 mrg else if (mode == SImode)
731 1.1 mrg rclass = reduce_class (rclass, SI_REGS, rclass);
732 1.1 mrg
733 1.1 mrg if (rclass != A_REGS)
734 1.1 mrg rclass = reduce_class (rclass, DI_REGS, rclass);
735 1.1 mrg
736 1.1 mrg #if DEBUG0
737 1.1 mrg fprintf (stderr, " %s\n", class_names[rclass]);
738 1.1 mrg #endif
739 1.1 mrg return rclass;
740 1.1 mrg }
741 1.1 mrg
742 1.1 mrg /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
743 1.1 mrg r0 or r1, as those are the only real QImode registers. CR regs get
744 1.1 mrg reloaded through appropriately sized general or address
745 1.1 mrg registers. */
746 1.1 mrg int
747 1.1 mrg m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
748 1.1 mrg {
749 1.1 mrg int cc = class_contents[rclass][0];
750 1.1 mrg #if DEBUG0
751 1.1 mrg fprintf (stderr, "\nsecondary reload class %s %s\n",
752 1.1 mrg class_names[rclass], mode_name[mode]);
753 1.1 mrg debug_rtx (x);
754 1.1 mrg #endif
755 1.1 mrg if (mode == QImode
756 1.1 mrg && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
757 1.1 mrg return QI_REGS;
758 1.1 mrg if (reg_classes_intersect_p (rclass, CR_REGS)
759 1.1 mrg && GET_CODE (x) == REG
760 1.1 mrg && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
761 1.1 mrg return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
762 1.1 mrg return NO_REGS;
763 1.1 mrg }
764 1.1 mrg
765 1.1 mrg /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
766 1.1 mrg reloads. */
767 1.1 mrg
768 1.1 mrg #undef TARGET_CLASS_LIKELY_SPILLED_P
769 1.1 mrg #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
770 1.1 mrg
771 1.1 mrg static bool
772 1.1 mrg m32c_class_likely_spilled_p (reg_class_t regclass)
773 1.1 mrg {
774 1.1 mrg if (regclass == A_REGS)
775 1.1 mrg return true;
776 1.1 mrg
777 1.1 mrg return (reg_class_size[(int) regclass] == 1);
778 1.1 mrg }
779 1.1 mrg
780 1.1 mrg /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
781 1.1 mrg documented meaning, to avoid potential inconsistencies with actual
782 1.1 mrg class definitions. */
783 1.1 mrg
784 1.1 mrg #undef TARGET_CLASS_MAX_NREGS
785 1.1 mrg #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
786 1.1 mrg
787 1.1 mrg static unsigned char
788 1.1 mrg m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
789 1.1 mrg {
790 1.1 mrg int rn;
791 1.1 mrg unsigned char max = 0;
792 1.1 mrg
793 1.1 mrg for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
794 1.1 mrg if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
795 1.1 mrg {
796 1.1 mrg unsigned char n = m32c_hard_regno_nregs (rn, mode);
797 1.1 mrg if (max < n)
798 1.1 mrg max = n;
799 1.1 mrg }
800 1.1 mrg return max;
801 1.1 mrg }
802 1.1 mrg
803 1.1 mrg /* Implements TARGET_CAN_CHANGE_MODE_CLASS. Only r0 and r1 can change to
804 1.1 mrg QI (r0l, r1l) because the chip doesn't support QI ops on other
805 1.1 mrg registers (well, it does on a0/a1 but if we let gcc do that, reload
806 1.1 mrg suffers). Otherwise, we allow changes to larger modes. */
807 1.1 mrg static bool
808 1.1 mrg m32c_can_change_mode_class (machine_mode from,
809 1.1 mrg machine_mode to, reg_class_t rclass)
810 1.1 mrg {
811 1.1 mrg int rn;
812 1.1 mrg #if DEBUG0
813 1.1 mrg fprintf (stderr, "can change from %s to %s in %s\n",
814 1.1 mrg mode_name[from], mode_name[to], class_names[rclass]);
815 1.1 mrg #endif
816 1.1 mrg
817 1.1 mrg /* If the larger mode isn't allowed in any of these registers, we
818 1.1 mrg can't allow the change. */
819 1.1 mrg for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
820 1.1 mrg if (class_contents[rclass][0] & (1 << rn))
821 1.1 mrg if (! m32c_hard_regno_mode_ok (rn, to))
822 1.1 mrg return false;
823 1.1 mrg
824 1.1 mrg if (to == QImode)
825 1.1 mrg return (class_contents[rclass][0] & 0x1ffa) == 0;
826 1.1 mrg
827 1.1 mrg if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
828 1.1 mrg && GET_MODE_SIZE (from) > 1)
829 1.1 mrg return true;
830 1.1 mrg if (GET_MODE_SIZE (from) > 2) /* all other regs */
831 1.1 mrg return true;
832 1.1 mrg
833 1.1 mrg return false;
834 1.1 mrg }
835 1.1 mrg
836 1.1 mrg /* Helpers for the rest of the file. */
837 1.1 mrg /* TRUE if the rtx is a REG rtx for the given register. */
838 1.1 mrg #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
839 1.1 mrg && REGNO (rtx) == regno)
840 1.1 mrg /* TRUE if the rtx is a pseudo - specifically, one we can use as a
841 1.1 mrg base register in address calculations (hence the "strict"
842 1.1 mrg argument). */
843 1.1 mrg #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
844 1.1 mrg && (REGNO (rtx) == AP_REGNO \
845 1.1 mrg || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
846 1.1 mrg
847 1.1 mrg #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
848 1.1 mrg
849 1.1 mrg /* Implements matching for constraints (see next function too). 'S' is
850 1.1 mrg for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
851 1.1 mrg call return values. */
852 1.1 mrg bool
853 1.1 mrg m32c_matches_constraint_p (rtx value, int constraint)
854 1.1 mrg {
855 1.1 mrg encode_pattern (value);
856 1.1 mrg
857 1.1 mrg switch (constraint) {
858 1.1 mrg case CONSTRAINT_SF:
859 1.1 mrg return (far_addr_space_p (value)
860 1.1 mrg && ((RTX_IS ("mr")
861 1.1 mrg && A0_OR_PSEUDO (patternr[1])
862 1.1 mrg && GET_MODE (patternr[1]) == SImode)
863 1.1 mrg || (RTX_IS ("m+^Sri")
864 1.1 mrg && A0_OR_PSEUDO (patternr[4])
865 1.1 mrg && GET_MODE (patternr[4]) == HImode)
866 1.1 mrg || (RTX_IS ("m+^Srs")
867 1.1 mrg && A0_OR_PSEUDO (patternr[4])
868 1.1 mrg && GET_MODE (patternr[4]) == HImode)
869 1.1 mrg || (RTX_IS ("m+^S+ris")
870 1.1 mrg && A0_OR_PSEUDO (patternr[5])
871 1.1 mrg && GET_MODE (patternr[5]) == HImode)
872 1.1 mrg || RTX_IS ("ms")));
873 1.1 mrg case CONSTRAINT_Sd:
874 1.1 mrg {
875 1.1 mrg /* This is the common "src/dest" address */
876 1.1 mrg rtx r;
877 1.1 mrg if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
878 1.1 mrg return true;
879 1.1 mrg if (RTX_IS ("ms") || RTX_IS ("m+si"))
880 1.1 mrg return true;
881 1.1 mrg if (RTX_IS ("m++rii"))
882 1.1 mrg {
883 1.1 mrg if (REGNO (patternr[3]) == FB_REGNO
884 1.1 mrg && INTVAL (patternr[4]) == 0)
885 1.1 mrg return true;
886 1.1 mrg }
887 1.1 mrg if (RTX_IS ("mr"))
888 1.1 mrg r = patternr[1];
889 1.1 mrg else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
890 1.1 mrg r = patternr[2];
891 1.1 mrg else
892 1.1 mrg return false;
893 1.1 mrg if (REGNO (r) == SP_REGNO)
894 1.1 mrg return false;
895 1.1 mrg return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
896 1.1 mrg }
897 1.1 mrg case CONSTRAINT_Sa:
898 1.1 mrg {
899 1.1 mrg rtx r;
900 1.1 mrg if (RTX_IS ("mr"))
901 1.1 mrg r = patternr[1];
902 1.1 mrg else if (RTX_IS ("m+ri"))
903 1.1 mrg r = patternr[2];
904 1.1 mrg else
905 1.1 mrg return false;
906 1.1 mrg return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
907 1.1 mrg }
908 1.1 mrg case CONSTRAINT_Si:
909 1.1 mrg return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
910 1.1 mrg case CONSTRAINT_Ss:
911 1.1 mrg return ((RTX_IS ("mr")
912 1.1 mrg && (IS_REG (patternr[1], SP_REGNO)))
913 1.1 mrg || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
914 1.1 mrg case CONSTRAINT_Sf:
915 1.1 mrg return ((RTX_IS ("mr")
916 1.1 mrg && (IS_REG (patternr[1], FB_REGNO)))
917 1.1 mrg || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
918 1.1 mrg case CONSTRAINT_Sb:
919 1.1 mrg return ((RTX_IS ("mr")
920 1.1 mrg && (IS_REG (patternr[1], SB_REGNO)))
921 1.1 mrg || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
922 1.1 mrg case CONSTRAINT_Sp:
923 1.1 mrg /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
924 1.1 mrg return (RTX_IS ("mi")
925 1.1 mrg && !(INTVAL (patternr[1]) & ~0x1fff));
926 1.1 mrg case CONSTRAINT_S1:
927 1.1 mrg return r1h_operand (value, QImode);
928 1.1 mrg case CONSTRAINT_Rpa:
929 1.1 mrg return GET_CODE (value) == PARALLEL;
930 1.1 mrg default:
931 1.1 mrg return false;
932 1.1 mrg }
933 1.1 mrg }
934 1.1 mrg
935 1.1 mrg /* STACK AND CALLING */
936 1.1 mrg
937 1.1 mrg /* Frame Layout */
938 1.1 mrg
939 1.1 mrg /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
940 1.1 mrg (yes, THREE bytes) onto the stack for the return address, but we
941 1.1 mrg don't support pointers bigger than 16 bits on those chips. This
942 1.1 mrg will likely wreak havoc with exception unwinding. FIXME. */
943 1.1 mrg rtx
944 1.1 mrg m32c_return_addr_rtx (int count)
945 1.1 mrg {
946 1.1 mrg machine_mode mode;
947 1.1 mrg int offset;
948 1.1 mrg rtx ra_mem;
949 1.1 mrg
950 1.1 mrg if (count)
951 1.1 mrg return NULL_RTX;
952 1.1 mrg /* we want 2[$fb] */
953 1.1 mrg
954 1.1 mrg if (TARGET_A24)
955 1.1 mrg {
956 1.1 mrg /* It's four bytes */
957 1.1 mrg mode = PSImode;
958 1.1 mrg offset = 4;
959 1.1 mrg }
960 1.1 mrg else
961 1.1 mrg {
962 1.1 mrg /* FIXME: it's really 3 bytes */
963 1.1 mrg mode = HImode;
964 1.1 mrg offset = 2;
965 1.1 mrg }
966 1.1 mrg
967 1.1 mrg ra_mem =
968 1.1 mrg gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
969 1.1 mrg offset));
970 1.1 mrg return copy_to_mode_reg (mode, ra_mem);
971 1.1 mrg }
972 1.1 mrg
973 1.1 mrg /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
974 1.1 mrg rtx
975 1.1 mrg m32c_incoming_return_addr_rtx (void)
976 1.1 mrg {
977 1.1 mrg /* we want [sp] */
978 1.1 mrg return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
979 1.1 mrg }
980 1.1 mrg
981 1.1 mrg /* Exception Handling Support */
982 1.1 mrg
983 1.1 mrg /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
984 1.1 mrg pointers. */
985 1.1 mrg int
986 1.1 mrg m32c_eh_return_data_regno (int n)
987 1.1 mrg {
988 1.1 mrg switch (n)
989 1.1 mrg {
990 1.1 mrg case 0:
991 1.1 mrg return MEM0_REGNO;
992 1.1 mrg case 1:
993 1.1 mrg return MEM0_REGNO+4;
994 1.1 mrg default:
995 1.1 mrg return INVALID_REGNUM;
996 1.1 mrg }
997 1.1 mrg }
998 1.1 mrg
999 1.1 mrg /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1000 1.1 mrg m32c_emit_eh_epilogue. */
1001 1.1 mrg rtx
1002 1.1 mrg m32c_eh_return_stackadj_rtx (void)
1003 1.1 mrg {
1004 1.1 mrg if (!cfun->machine->eh_stack_adjust)
1005 1.1 mrg {
1006 1.1 mrg rtx sa;
1007 1.1 mrg
1008 1.1 mrg sa = gen_rtx_REG (Pmode, R0_REGNO);
1009 1.1 mrg cfun->machine->eh_stack_adjust = sa;
1010 1.1 mrg }
1011 1.1 mrg return cfun->machine->eh_stack_adjust;
1012 1.1 mrg }
1013 1.1 mrg
1014 1.1 mrg /* Registers That Address the Stack Frame */
1015 1.1 mrg
1016 1.1 mrg /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1017 1.1 mrg the original spec called for dwarf numbers to vary with register
1018 1.1 mrg width as well, for example, r0l, r0, and r2r0 would each have
1019 1.1 mrg different dwarf numbers. GCC doesn't support this, and we don't do
1020 1.1 mrg it, and gdb seems to like it this way anyway. */
1021 1.1 mrg unsigned int
1022 1.1 mrg m32c_dwarf_frame_regnum (int n)
1023 1.1 mrg {
1024 1.1 mrg switch (n)
1025 1.1 mrg {
1026 1.1 mrg case R0_REGNO:
1027 1.1 mrg return 5;
1028 1.1 mrg case R1_REGNO:
1029 1.1 mrg return 6;
1030 1.1 mrg case R2_REGNO:
1031 1.1 mrg return 7;
1032 1.1 mrg case R3_REGNO:
1033 1.1 mrg return 8;
1034 1.1 mrg case A0_REGNO:
1035 1.1 mrg return 9;
1036 1.1 mrg case A1_REGNO:
1037 1.1 mrg return 10;
1038 1.1 mrg case FB_REGNO:
1039 1.1 mrg return 11;
1040 1.1 mrg case SB_REGNO:
1041 1.1 mrg return 19;
1042 1.1 mrg
1043 1.1 mrg case SP_REGNO:
1044 1.1 mrg return 12;
1045 1.1 mrg case PC_REGNO:
1046 1.1 mrg return 13;
1047 1.1 mrg default:
1048 1.1 mrg return DWARF_FRAME_REGISTERS + 1;
1049 1.1 mrg }
1050 1.1 mrg }
1051 1.1 mrg
1052 1.1 mrg /* The frame looks like this:
1053 1.1 mrg
1054 1.1 mrg ap -> +------------------------------
1055 1.1 mrg | Return address (3 or 4 bytes)
1056 1.1 mrg | Saved FB (2 or 4 bytes)
1057 1.1 mrg fb -> +------------------------------
1058 1.1 mrg | local vars
1059 1.1 mrg | register saves fb
1060 1.1 mrg | through r0 as needed
1061 1.1 mrg sp -> +------------------------------
1062 1.1 mrg */
1063 1.1 mrg
1064 1.1 mrg /* We use this to wrap all emitted insns in the prologue. */
1065 1.1 mrg static rtx
1066 1.1 mrg F (rtx x)
1067 1.1 mrg {
1068 1.1 mrg RTX_FRAME_RELATED_P (x) = 1;
1069 1.1 mrg return x;
1070 1.1 mrg }
1071 1.1 mrg
1072 1.1 mrg /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1073 1.1 mrg how much the stack pointer moves for each, for each cpu family. */
1074 1.1 mrg static struct
1075 1.1 mrg {
1076 1.1 mrg int reg1;
1077 1.1 mrg int bit;
1078 1.1 mrg int a16_bytes;
1079 1.1 mrg int a24_bytes;
1080 1.1 mrg } pushm_info[] =
1081 1.1 mrg {
1082 1.1 mrg /* These are in reverse push (nearest-to-sp) order. */
1083 1.1 mrg { R0_REGNO, 0x80, 2, 2 },
1084 1.1 mrg { R1_REGNO, 0x40, 2, 2 },
1085 1.1 mrg { R2_REGNO, 0x20, 2, 2 },
1086 1.1 mrg { R3_REGNO, 0x10, 2, 2 },
1087 1.1 mrg { A0_REGNO, 0x08, 2, 4 },
1088 1.1 mrg { A1_REGNO, 0x04, 2, 4 },
1089 1.1 mrg { SB_REGNO, 0x02, 2, 4 },
1090 1.1 mrg { FB_REGNO, 0x01, 2, 4 }
1091 1.1 mrg };
1092 1.1 mrg
1093 1.1 mrg #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1094 1.1 mrg
1095 1.1 mrg /* Returns TRUE if we need to save/restore the given register. We
1096 1.1 mrg save everything for exception handlers, so that any register can be
1097 1.1 mrg unwound. For interrupt handlers, we save everything if the handler
1098 1.1 mrg calls something else (because we don't know what *that* function
1099 1.1 mrg might do), but try to be a bit smarter if the handler is a leaf
1100 1.1 mrg function. We always save $a0, though, because we use that in the
1101 1.1 mrg epilogue to copy $fb to $sp. */
1102 1.1 mrg static int
1103 1.1 mrg need_to_save (int regno)
1104 1.1 mrg {
1105 1.1 mrg if (fixed_regs[regno])
1106 1.1 mrg return 0;
1107 1.1 mrg if (crtl->calls_eh_return)
1108 1.1 mrg return 1;
1109 1.1 mrg if (regno == FP_REGNO)
1110 1.1 mrg return 0;
1111 1.1 mrg if (cfun->machine->is_interrupt
1112 1.1 mrg && (!cfun->machine->is_leaf
1113 1.1 mrg || (regno == A0_REGNO
1114 1.1 mrg && m32c_function_needs_enter ())
1115 1.1 mrg ))
1116 1.1 mrg return 1;
1117 1.1 mrg if (df_regs_ever_live_p (regno)
1118 1.1 mrg && (!call_used_or_fixed_reg_p (regno) || cfun->machine->is_interrupt))
1119 1.1 mrg return 1;
1120 1.1 mrg return 0;
1121 1.1 mrg }
1122 1.1 mrg
1123 1.1 mrg /* This function contains all the intelligence about saving and
1124 1.1 mrg restoring registers. It always figures out the register save set.
1125 1.1 mrg When called with PP_justcount, it merely returns the size of the
1126 1.1 mrg save set (for eliminating the frame pointer, for example). When
1127 1.1 mrg called with PP_pushm or PP_popm, it emits the appropriate
1128 1.1 mrg instructions for saving (pushm) or restoring (popm) the
1129 1.1 mrg registers. */
1130 1.1 mrg static int
1131 1.1 mrg m32c_pushm_popm (Push_Pop_Type ppt)
1132 1.1 mrg {
1133 1.1 mrg int reg_mask = 0;
1134 1.1 mrg int byte_count = 0, bytes;
1135 1.1 mrg int i;
1136 1.1 mrg rtx dwarf_set[PUSHM_N];
1137 1.1 mrg int n_dwarfs = 0;
1138 1.1 mrg int nosave_mask = 0;
1139 1.1 mrg
1140 1.1 mrg if (crtl->return_rtx
1141 1.1 mrg && GET_CODE (crtl->return_rtx) == PARALLEL
1142 1.1 mrg && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1143 1.1 mrg {
1144 1.1 mrg rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1145 1.1 mrg rtx rv = XEXP (exp, 0);
1146 1.1 mrg int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1147 1.1 mrg
1148 1.1 mrg if (rv_bytes > 2)
1149 1.1 mrg nosave_mask |= 0x20; /* PSI, SI */
1150 1.1 mrg else
1151 1.1 mrg nosave_mask |= 0xf0; /* DF */
1152 1.1 mrg if (rv_bytes > 4)
1153 1.1 mrg nosave_mask |= 0x50; /* DI */
1154 1.1 mrg }
1155 1.1 mrg
1156 1.1 mrg for (i = 0; i < (int) PUSHM_N; i++)
1157 1.1 mrg {
1158 1.1 mrg /* Skip if neither register needs saving. */
1159 1.1 mrg if (!need_to_save (pushm_info[i].reg1))
1160 1.1 mrg continue;
1161 1.1 mrg
1162 1.1 mrg if (pushm_info[i].bit & nosave_mask)
1163 1.1 mrg continue;
1164 1.1 mrg
1165 1.1 mrg reg_mask |= pushm_info[i].bit;
1166 1.1 mrg bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1167 1.1 mrg
1168 1.1 mrg if (ppt == PP_pushm)
1169 1.1 mrg {
1170 1.1 mrg machine_mode mode = (bytes == 2) ? HImode : SImode;
1171 1.1 mrg rtx addr;
1172 1.1 mrg
1173 1.1 mrg /* Always use stack_pointer_rtx instead of calling
1174 1.1 mrg rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1175 1.1 mrg that there is a single rtx representing the stack pointer,
1176 1.1 mrg namely stack_pointer_rtx, and uses == to recognize it. */
1177 1.1 mrg addr = stack_pointer_rtx;
1178 1.1 mrg
1179 1.1 mrg if (byte_count != 0)
1180 1.1 mrg addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1181 1.1 mrg
1182 1.1 mrg dwarf_set[n_dwarfs++] =
1183 1.1 mrg gen_rtx_SET (gen_rtx_MEM (mode, addr),
1184 1.1 mrg gen_rtx_REG (mode, pushm_info[i].reg1));
1185 1.1 mrg F (dwarf_set[n_dwarfs - 1]);
1186 1.1 mrg
1187 1.1 mrg }
1188 1.1 mrg byte_count += bytes;
1189 1.1 mrg }
1190 1.1 mrg
1191 1.1 mrg if (cfun->machine->is_interrupt)
1192 1.1 mrg {
1193 1.1 mrg cfun->machine->intr_pushm = reg_mask & 0xfe;
1194 1.1 mrg reg_mask = 0;
1195 1.1 mrg byte_count = 0;
1196 1.1 mrg }
1197 1.1 mrg
1198 1.1 mrg if (cfun->machine->is_interrupt)
1199 1.1 mrg for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1200 1.1 mrg if (need_to_save (i))
1201 1.1 mrg {
1202 1.1 mrg byte_count += 2;
1203 1.1 mrg cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1204 1.1 mrg }
1205 1.1 mrg
1206 1.1 mrg if (ppt == PP_pushm && byte_count)
1207 1.1 mrg {
1208 1.1 mrg rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1209 1.1 mrg rtx pushm;
1210 1.1 mrg
1211 1.1 mrg if (reg_mask)
1212 1.1 mrg {
1213 1.1 mrg XVECEXP (note, 0, 0)
1214 1.1 mrg = gen_rtx_SET (stack_pointer_rtx,
1215 1.1 mrg gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1216 1.1 mrg stack_pointer_rtx,
1217 1.1 mrg GEN_INT (-byte_count)));
1218 1.1 mrg F (XVECEXP (note, 0, 0));
1219 1.1 mrg
1220 1.1 mrg for (i = 0; i < n_dwarfs; i++)
1221 1.1 mrg XVECEXP (note, 0, i + 1) = dwarf_set[i];
1222 1.1 mrg
1223 1.1 mrg pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1224 1.1 mrg
1225 1.1 mrg add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1226 1.1 mrg }
1227 1.1 mrg
1228 1.1 mrg if (cfun->machine->is_interrupt)
1229 1.1 mrg for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1230 1.1 mrg if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1231 1.1 mrg {
1232 1.1 mrg if (TARGET_A16)
1233 1.1 mrg pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1234 1.1 mrg else
1235 1.1 mrg pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1236 1.1 mrg F (pushm);
1237 1.1 mrg }
1238 1.1 mrg }
1239 1.1 mrg if (ppt == PP_popm && byte_count)
1240 1.1 mrg {
1241 1.1 mrg if (cfun->machine->is_interrupt)
1242 1.1 mrg for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1243 1.1 mrg if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1244 1.1 mrg {
1245 1.1 mrg if (TARGET_A16)
1246 1.1 mrg emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1247 1.1 mrg else
1248 1.1 mrg emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1249 1.1 mrg }
1250 1.1 mrg if (reg_mask)
1251 1.1 mrg emit_insn (gen_popm (GEN_INT (reg_mask)));
1252 1.1 mrg }
1253 1.1 mrg
1254 1.1 mrg return byte_count;
1255 1.1 mrg }
1256 1.1 mrg
1257 1.1 mrg /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1258 1.1 mrg diagrams our call frame. */
1259 1.1 mrg int
1260 1.1 mrg m32c_initial_elimination_offset (int from, int to)
1261 1.1 mrg {
1262 1.1 mrg int ofs = 0;
1263 1.1 mrg
1264 1.1 mrg if (from == AP_REGNO)
1265 1.1 mrg {
1266 1.1 mrg if (TARGET_A16)
1267 1.1 mrg ofs += 5;
1268 1.1 mrg else
1269 1.1 mrg ofs += 8;
1270 1.1 mrg }
1271 1.1 mrg
1272 1.1 mrg if (to == SP_REGNO)
1273 1.1 mrg {
1274 1.1 mrg ofs += m32c_pushm_popm (PP_justcount);
1275 1.1 mrg ofs += get_frame_size ();
1276 1.1 mrg }
1277 1.1 mrg
1278 1.1 mrg /* Account for push rounding. */
1279 1.1 mrg if (TARGET_A24)
1280 1.1 mrg ofs = (ofs + 1) & ~1;
1281 1.1 mrg #if DEBUG0
1282 1.1 mrg fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1283 1.1 mrg to, ofs);
1284 1.1 mrg #endif
1285 1.1 mrg return ofs;
1286 1.1 mrg }
1287 1.1 mrg
1288 1.1 mrg /* Passing Function Arguments on the Stack */
1289 1.1 mrg
1290 1.1 mrg /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1291 1.1 mrg M32C has word stacks. */
1292 1.1 mrg poly_int64
1293 1.1 mrg m32c_push_rounding (poly_int64 n)
1294 1.1 mrg {
1295 1.1 mrg if (TARGET_R8C || TARGET_M16C)
1296 1.1 mrg return n;
1297 1.1 mrg return (n + 1) & ~1;
1298 1.1 mrg }
1299 1.1 mrg
1300 1.1 mrg #undef TARGET_PUSH_ARGUMENT
1301 1.1 mrg #define TARGET_PUSH_ARGUMENT hook_bool_uint_true
1302 1.1 mrg
1303 1.1 mrg /* Passing Arguments in Registers */
1304 1.1 mrg
1305 1.1 mrg /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1306 1.1 mrg registers, partly on stack. If our function returns a struct, a
1307 1.1 mrg pointer to a buffer for it is at the top of the stack (last thing
1308 1.1 mrg pushed). The first few real arguments may be in registers as
1309 1.1 mrg follows:
1310 1.1 mrg
1311 1.1 mrg R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1312 1.1 mrg arg2 in r2 if it's HI (else pushed on stack)
1313 1.1 mrg rest on stack
1314 1.1 mrg M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1315 1.1 mrg rest on stack
1316 1.1 mrg
1317 1.1 mrg Structs are not passed in registers, even if they fit. Only
1318 1.1 mrg integer and pointer types are passed in registers.
1319 1.1 mrg
1320 1.1 mrg Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1321 1.1 mrg r2 if it fits. */
1322 1.1 mrg #undef TARGET_FUNCTION_ARG
1323 1.1 mrg #define TARGET_FUNCTION_ARG m32c_function_arg
1324 1.1 mrg static rtx
1325 1.1 mrg m32c_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
1326 1.1 mrg {
1327 1.1 mrg CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1328 1.1 mrg
1329 1.1 mrg /* Can return a reg, parallel, or 0 for stack */
1330 1.1 mrg rtx rv = NULL_RTX;
1331 1.1 mrg #if DEBUG0
1332 1.1 mrg fprintf (stderr, "func_arg %d (%s, %d)\n",
1333 1.1 mrg ca->parm_num, mode_name[arg.mode], arg.named);
1334 1.1 mrg debug_tree (arg.type);
1335 1.1 mrg #endif
1336 1.1 mrg
1337 1.1 mrg if (arg.end_marker_p ())
1338 1.1 mrg return GEN_INT (0);
1339 1.1 mrg
1340 1.1 mrg if (ca->force_mem || !arg.named)
1341 1.1 mrg {
1342 1.1 mrg #if DEBUG0
1343 1.1 mrg fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1344 1.1 mrg arg.named);
1345 1.1 mrg #endif
1346 1.1 mrg return NULL_RTX;
1347 1.1 mrg }
1348 1.1 mrg
1349 1.1 mrg if (arg.type && INTEGRAL_TYPE_P (arg.type) && POINTER_TYPE_P (arg.type))
1350 1.1 mrg return NULL_RTX;
1351 1.1 mrg
1352 1.1 mrg if (arg.aggregate_type_p ())
1353 1.1 mrg return NULL_RTX;
1354 1.1 mrg
1355 1.1 mrg switch (ca->parm_num)
1356 1.1 mrg {
1357 1.1 mrg case 1:
1358 1.1 mrg if (GET_MODE_SIZE (arg.mode) == 1 || GET_MODE_SIZE (arg.mode) == 2)
1359 1.1 mrg rv = gen_rtx_REG (arg.mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1360 1.1 mrg break;
1361 1.1 mrg
1362 1.1 mrg case 2:
1363 1.1 mrg if (TARGET_A16 && GET_MODE_SIZE (arg.mode) == 2)
1364 1.1 mrg rv = gen_rtx_REG (arg.mode, R2_REGNO);
1365 1.1 mrg break;
1366 1.1 mrg }
1367 1.1 mrg
1368 1.1 mrg #if DEBUG0
1369 1.1 mrg debug_rtx (rv);
1370 1.1 mrg #endif
1371 1.1 mrg return rv;
1372 1.1 mrg }
1373 1.1 mrg
1374 1.1 mrg #undef TARGET_PASS_BY_REFERENCE
1375 1.1 mrg #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1376 1.1 mrg static bool
1377 1.1 mrg m32c_pass_by_reference (cumulative_args_t, const function_arg_info &)
1378 1.1 mrg {
1379 1.1 mrg return 0;
1380 1.1 mrg }
1381 1.1 mrg
1382 1.1 mrg /* Implements INIT_CUMULATIVE_ARGS. */
1383 1.1 mrg void
1384 1.1 mrg m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1385 1.1 mrg tree fntype,
1386 1.1 mrg rtx libname ATTRIBUTE_UNUSED,
1387 1.1 mrg tree fndecl,
1388 1.1 mrg int n_named_args ATTRIBUTE_UNUSED)
1389 1.1 mrg {
1390 1.1 mrg if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1391 1.1 mrg ca->force_mem = 1;
1392 1.1 mrg else
1393 1.1 mrg ca->force_mem = 0;
1394 1.1 mrg ca->parm_num = 1;
1395 1.1 mrg }
1396 1.1 mrg
1397 1.1 mrg /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1398 1.1 mrg functions returning structures, so we always reset that. Otherwise,
1399 1.1 mrg we only need to know the sequence number of the argument to know what
1400 1.1 mrg to do with it. */
1401 1.1 mrg #undef TARGET_FUNCTION_ARG_ADVANCE
1402 1.1 mrg #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1403 1.1 mrg static void
1404 1.1 mrg m32c_function_arg_advance (cumulative_args_t ca_v,
1405 1.1 mrg const function_arg_info &)
1406 1.1 mrg {
1407 1.1 mrg CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1408 1.1 mrg
1409 1.1 mrg if (ca->force_mem)
1410 1.1 mrg ca->force_mem = 0;
1411 1.1 mrg else
1412 1.1 mrg ca->parm_num++;
1413 1.1 mrg }
1414 1.1 mrg
1415 1.1 mrg /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1416 1.1 mrg #undef TARGET_FUNCTION_ARG_BOUNDARY
1417 1.1 mrg #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1418 1.1 mrg static unsigned int
1419 1.1 mrg m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1420 1.1 mrg const_tree type ATTRIBUTE_UNUSED)
1421 1.1 mrg {
1422 1.1 mrg return (TARGET_A16 ? 8 : 16);
1423 1.1 mrg }
1424 1.1 mrg
1425 1.1 mrg /* Implements FUNCTION_ARG_REGNO_P. */
1426 1.1 mrg int
1427 1.1 mrg m32c_function_arg_regno_p (int r)
1428 1.1 mrg {
1429 1.1 mrg if (TARGET_A24)
1430 1.1 mrg return (r == R0_REGNO);
1431 1.1 mrg return (r == R1_REGNO || r == R2_REGNO);
1432 1.1 mrg }
1433 1.1 mrg
1434 1.1 mrg /* HImode and PSImode are the two "native" modes as far as GCC is
1435 1.1 mrg concerned, but the chips also support a 32-bit mode which is used
1436 1.1 mrg for some opcodes in R8C/M16C and for reset vectors and such. */
1437 1.1 mrg #undef TARGET_VALID_POINTER_MODE
1438 1.1 mrg #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1439 1.1 mrg static bool
1440 1.1 mrg m32c_valid_pointer_mode (scalar_int_mode mode)
1441 1.1 mrg {
1442 1.1 mrg if (mode == HImode
1443 1.1 mrg || mode == PSImode
1444 1.1 mrg || mode == SImode
1445 1.1 mrg )
1446 1.1 mrg return 1;
1447 1.1 mrg return 0;
1448 1.1 mrg }
1449 1.1 mrg
1450 1.1 mrg /* How Scalar Function Values Are Returned */
1451 1.1 mrg
1452 1.1 mrg /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1453 1.1 mrg combination of registers starting there (r2r0 for longs, r3r1r2r0
1454 1.1 mrg for long long, r3r2r1r0 for doubles), except that that ABI
1455 1.1 mrg currently doesn't work because it ends up using all available
1456 1.1 mrg general registers and gcc often can't compile it. So, instead, we
1457 1.1 mrg return anything bigger than 16 bits in "mem0" (effectively, a
1458 1.1 mrg memory location). */
1459 1.1 mrg
1460 1.1 mrg #undef TARGET_LIBCALL_VALUE
1461 1.1 mrg #define TARGET_LIBCALL_VALUE m32c_libcall_value
1462 1.1 mrg
1463 1.1 mrg static rtx
1464 1.1 mrg m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1465 1.1 mrg {
1466 1.1 mrg /* return reg or parallel */
1467 1.1 mrg #if 0
1468 1.1 mrg /* FIXME: GCC has difficulty returning large values in registers,
1469 1.1 mrg because that ties up most of the general registers and gives the
1470 1.1 mrg register allocator little to work with. Until we can resolve
1471 1.1 mrg this, large values are returned in memory. */
1472 1.1 mrg if (mode == DFmode)
1473 1.1 mrg {
1474 1.1 mrg rtx rv;
1475 1.1 mrg
1476 1.1 mrg rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1477 1.1 mrg XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1478 1.1 mrg gen_rtx_REG (HImode,
1479 1.1 mrg R0_REGNO),
1480 1.1 mrg GEN_INT (0));
1481 1.1 mrg XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1482 1.1 mrg gen_rtx_REG (HImode,
1483 1.1 mrg R1_REGNO),
1484 1.1 mrg GEN_INT (2));
1485 1.1 mrg XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1486 1.1 mrg gen_rtx_REG (HImode,
1487 1.1 mrg R2_REGNO),
1488 1.1 mrg GEN_INT (4));
1489 1.1 mrg XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1490 1.1 mrg gen_rtx_REG (HImode,
1491 1.1 mrg R3_REGNO),
1492 1.1 mrg GEN_INT (6));
1493 1.1 mrg return rv;
1494 1.1 mrg }
1495 1.1 mrg
1496 1.1 mrg if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1497 1.1 mrg {
1498 1.1 mrg rtx rv;
1499 1.1 mrg
1500 1.1 mrg rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1501 1.1 mrg XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1502 1.1 mrg gen_rtx_REG (mode,
1503 1.1 mrg R0_REGNO),
1504 1.1 mrg GEN_INT (0));
1505 1.1 mrg return rv;
1506 1.1 mrg }
1507 1.1 mrg #endif
1508 1.1 mrg
1509 1.1 mrg if (GET_MODE_SIZE (mode) > 2)
1510 1.1 mrg return gen_rtx_REG (mode, MEM0_REGNO);
1511 1.1 mrg return gen_rtx_REG (mode, R0_REGNO);
1512 1.1 mrg }
1513 1.1 mrg
1514 1.1 mrg /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1515 1.1 mrg conventions. */
1516 1.1 mrg
1517 1.1 mrg #undef TARGET_FUNCTION_VALUE
1518 1.1 mrg #define TARGET_FUNCTION_VALUE m32c_function_value
1519 1.1 mrg
1520 1.1 mrg static rtx
1521 1.1 mrg m32c_function_value (const_tree valtype,
1522 1.1 mrg const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1523 1.1 mrg bool outgoing ATTRIBUTE_UNUSED)
1524 1.1 mrg {
1525 1.1 mrg /* return reg or parallel */
1526 1.1 mrg const machine_mode mode = TYPE_MODE (valtype);
1527 1.1 mrg return m32c_libcall_value (mode, NULL_RTX);
1528 1.1 mrg }
1529 1.1 mrg
1530 1.1 mrg /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1531 1.1 mrg
1532 1.1 mrg #undef TARGET_FUNCTION_VALUE_REGNO_P
1533 1.1 mrg #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1534 1.1 mrg
1535 1.1 mrg static bool
1536 1.1 mrg m32c_function_value_regno_p (const unsigned int regno)
1537 1.1 mrg {
1538 1.1 mrg return (regno == R0_REGNO || regno == MEM0_REGNO);
1539 1.1 mrg }
1540 1.1 mrg
1541 1.1 mrg /* How Large Values Are Returned */
1542 1.1 mrg
1543 1.1 mrg /* We return structures by pushing the address on the stack, even if
1544 1.1 mrg we use registers for the first few "real" arguments. */
1545 1.1 mrg #undef TARGET_STRUCT_VALUE_RTX
1546 1.1 mrg #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1547 1.1 mrg static rtx
1548 1.1 mrg m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1549 1.1 mrg int incoming ATTRIBUTE_UNUSED)
1550 1.1 mrg {
1551 1.1 mrg return 0;
1552 1.1 mrg }
1553 1.1 mrg
1554 1.1 mrg /* Function Entry and Exit */
1555 1.1 mrg
1556 1.1 mrg /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1557 1.1 mrg int
1558 1.1 mrg m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1559 1.1 mrg {
1560 1.1 mrg if (cfun->machine->is_interrupt)
1561 1.1 mrg return 1;
1562 1.1 mrg return 0;
1563 1.1 mrg }
1564 1.1 mrg
1565 1.1 mrg /* Implementing the Varargs Macros */
1566 1.1 mrg
1567 1.1 mrg #undef TARGET_STRICT_ARGUMENT_NAMING
1568 1.1 mrg #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1569 1.1 mrg static bool
1570 1.1 mrg m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1571 1.1 mrg {
1572 1.1 mrg return 1;
1573 1.1 mrg }
1574 1.1 mrg
1575 1.1 mrg /* Trampolines for Nested Functions */
1576 1.1 mrg
1577 1.1 mrg /*
1578 1.1 mrg m16c:
1579 1.1 mrg 1 0000 75C43412 mov.w #0x1234,a0
1580 1.1 mrg 2 0004 FC000000 jmp.a label
1581 1.1 mrg
1582 1.1 mrg m32c:
1583 1.1 mrg 1 0000 BC563412 mov.l:s #0x123456,a0
1584 1.1 mrg 2 0004 CC000000 jmp.a label
1585 1.1 mrg */
1586 1.1 mrg
1587 1.1 mrg /* Implements TRAMPOLINE_SIZE. */
1588 1.1 mrg int
1589 1.1 mrg m32c_trampoline_size (void)
1590 1.1 mrg {
1591 1.1 mrg /* Allocate extra space so we can avoid the messy shifts when we
1592 1.1 mrg initialize the trampoline; we just write past the end of the
1593 1.1 mrg opcode. */
1594 1.1 mrg return TARGET_A16 ? 8 : 10;
1595 1.1 mrg }
1596 1.1 mrg
1597 1.1 mrg /* Implements TRAMPOLINE_ALIGNMENT. */
1598 1.1 mrg int
1599 1.1 mrg m32c_trampoline_alignment (void)
1600 1.1 mrg {
1601 1.1 mrg return 2;
1602 1.1 mrg }
1603 1.1 mrg
1604 1.1 mrg /* Implements TARGET_TRAMPOLINE_INIT. */
1605 1.1 mrg
1606 1.1 mrg #undef TARGET_TRAMPOLINE_INIT
1607 1.1 mrg #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1608 1.1 mrg static void
1609 1.1 mrg m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1610 1.1 mrg {
1611 1.1 mrg rtx function = XEXP (DECL_RTL (fndecl), 0);
1612 1.1 mrg
1613 1.1 mrg #define A0(m,i) adjust_address (m_tramp, m, i)
1614 1.1 mrg if (TARGET_A16)
1615 1.1 mrg {
1616 1.1 mrg /* Note: we subtract a "word" because the moves want signed
1617 1.1 mrg constants, not unsigned constants. */
1618 1.1 mrg emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1619 1.1 mrg emit_move_insn (A0 (HImode, 2), chainval);
1620 1.1 mrg emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1621 1.1 mrg /* We use 16-bit addresses here, but store the zero to turn it
1622 1.1 mrg into a 24-bit offset. */
1623 1.1 mrg emit_move_insn (A0 (HImode, 5), function);
1624 1.1 mrg emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1625 1.1 mrg }
1626 1.1 mrg else
1627 1.1 mrg {
1628 1.1 mrg /* Note that the PSI moves actually write 4 bytes. Make sure we
1629 1.1 mrg write stuff out in the right order, and leave room for the
1630 1.1 mrg extra byte at the end. */
1631 1.1 mrg emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1632 1.1 mrg emit_move_insn (A0 (PSImode, 1), chainval);
1633 1.1 mrg emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1634 1.1 mrg emit_move_insn (A0 (PSImode, 5), function);
1635 1.1 mrg }
1636 1.1 mrg #undef A0
1637 1.1 mrg }
1638 1.1 mrg
1639 1.1 mrg #undef TARGET_LRA_P
1640 1.1 mrg #define TARGET_LRA_P hook_bool_void_false
1641 1.1 mrg
1642 1.1 mrg /* Addressing Modes */
1643 1.1 mrg
1644 1.1 mrg /* The r8c/m32c family supports a wide range of non-orthogonal
1645 1.1 mrg addressing modes, including the ability to double-indirect on *some*
1646 1.1 mrg of them. Not all insns support all modes, either, but we rely on
1647 1.1 mrg predicates and constraints to deal with that. */
1648 1.1 mrg #undef TARGET_LEGITIMATE_ADDRESS_P
1649 1.1 mrg #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1650 1.1 mrg bool
1651 1.1 mrg m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1652 1.1 mrg {
1653 1.1 mrg int mode_adjust;
1654 1.1 mrg if (CONSTANT_P (x))
1655 1.1 mrg return 1;
1656 1.1 mrg
1657 1.1 mrg if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1658 1.1 mrg return 0;
1659 1.1 mrg if (TARGET_A24 && GET_MODE (x) != PSImode)
1660 1.1 mrg return 0;
1661 1.1 mrg
1662 1.1 mrg /* Wide references to memory will be split after reload, so we must
1663 1.1 mrg ensure that all parts of such splits remain legitimate
1664 1.1 mrg addresses. */
1665 1.1 mrg mode_adjust = GET_MODE_SIZE (mode) - 1;
1666 1.1 mrg
1667 1.1 mrg /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1668 1.1 mrg if (GET_CODE (x) == PRE_DEC
1669 1.1 mrg || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1670 1.1 mrg {
1671 1.1 mrg return (GET_CODE (XEXP (x, 0)) == REG
1672 1.1 mrg && REGNO (XEXP (x, 0)) == SP_REGNO);
1673 1.1 mrg }
1674 1.1 mrg
1675 1.1 mrg #if 0
1676 1.1 mrg /* This is the double indirection detection, but it currently
1677 1.1 mrg doesn't work as cleanly as this code implies, so until we've had
1678 1.1 mrg a chance to debug it, leave it disabled. */
1679 1.1 mrg if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1680 1.1 mrg {
1681 1.1 mrg #if DEBUG_DOUBLE
1682 1.1 mrg fprintf (stderr, "double indirect\n");
1683 1.1 mrg #endif
1684 1.1 mrg x = XEXP (x, 0);
1685 1.1 mrg }
1686 1.1 mrg #endif
1687 1.1 mrg
1688 1.1 mrg encode_pattern (x);
1689 1.1 mrg if (RTX_IS ("r"))
1690 1.1 mrg {
1691 1.1 mrg /* Most indexable registers can be used without displacements,
1692 1.1 mrg although some of them will be emitted with an explicit zero
1693 1.1 mrg to please the assembler. */
1694 1.1 mrg switch (REGNO (patternr[0]))
1695 1.1 mrg {
1696 1.1 mrg case A1_REGNO:
1697 1.1 mrg case SB_REGNO:
1698 1.1 mrg case FB_REGNO:
1699 1.1 mrg case SP_REGNO:
1700 1.1 mrg if (TARGET_A16 && GET_MODE (x) == SImode)
1701 1.1 mrg return 0;
1702 1.1 mrg /* FALLTHRU */
1703 1.1 mrg case A0_REGNO:
1704 1.1 mrg return 1;
1705 1.1 mrg
1706 1.1 mrg default:
1707 1.1 mrg if (IS_PSEUDO (patternr[0], strict))
1708 1.1 mrg return 1;
1709 1.1 mrg return 0;
1710 1.1 mrg }
1711 1.1 mrg }
1712 1.1 mrg
1713 1.1 mrg if (TARGET_A16 && GET_MODE (x) == SImode)
1714 1.1 mrg return 0;
1715 1.1 mrg
1716 1.1 mrg if (RTX_IS ("+ri"))
1717 1.1 mrg {
1718 1.1 mrg /* This is more interesting, because different base registers
1719 1.1 mrg allow for different displacements - both range and signedness
1720 1.1 mrg - and it differs from chip series to chip series too. */
1721 1.1 mrg int rn = REGNO (patternr[1]);
1722 1.1 mrg HOST_WIDE_INT offs = INTVAL (patternr[2]);
1723 1.1 mrg switch (rn)
1724 1.1 mrg {
1725 1.1 mrg case A0_REGNO:
1726 1.1 mrg case A1_REGNO:
1727 1.1 mrg case SB_REGNO:
1728 1.1 mrg /* The syntax only allows positive offsets, but when the
1729 1.1 mrg offsets span the entire memory range, we can simulate
1730 1.1 mrg negative offsets by wrapping. */
1731 1.1 mrg if (TARGET_A16)
1732 1.1 mrg return (offs >= -65536 && offs <= 65535 - mode_adjust);
1733 1.1 mrg if (rn == SB_REGNO)
1734 1.1 mrg return (offs >= 0 && offs <= 65535 - mode_adjust);
1735 1.1 mrg /* A0 or A1 */
1736 1.1 mrg return (offs >= -16777216 && offs <= 16777215);
1737 1.1 mrg
1738 1.1 mrg case FB_REGNO:
1739 1.1 mrg if (TARGET_A16)
1740 1.1 mrg return (offs >= -128 && offs <= 127 - mode_adjust);
1741 1.1 mrg return (offs >= -65536 && offs <= 65535 - mode_adjust);
1742 1.1 mrg
1743 1.1 mrg case SP_REGNO:
1744 1.1 mrg return (offs >= -128 && offs <= 127 - mode_adjust);
1745 1.1 mrg
1746 1.1 mrg default:
1747 1.1 mrg if (IS_PSEUDO (patternr[1], strict))
1748 1.1 mrg return 1;
1749 1.1 mrg return 0;
1750 1.1 mrg }
1751 1.1 mrg }
1752 1.1 mrg if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1753 1.1 mrg {
1754 1.1 mrg rtx reg = patternr[1];
1755 1.1 mrg
1756 1.1 mrg /* We don't know where the symbol is, so only allow base
1757 1.1 mrg registers which support displacements spanning the whole
1758 1.1 mrg address range. */
1759 1.1 mrg switch (REGNO (reg))
1760 1.1 mrg {
1761 1.1 mrg case A0_REGNO:
1762 1.1 mrg case A1_REGNO:
1763 1.1 mrg /* $sb needs a secondary reload, but since it's involved in
1764 1.1 mrg memory address reloads too, we don't deal with it very
1765 1.1 mrg well. */
1766 1.1 mrg /* case SB_REGNO: */
1767 1.1 mrg return 1;
1768 1.1 mrg default:
1769 1.1 mrg if (GET_CODE (reg) == SUBREG)
1770 1.1 mrg return 0;
1771 1.1 mrg if (IS_PSEUDO (reg, strict))
1772 1.1 mrg return 1;
1773 1.1 mrg return 0;
1774 1.1 mrg }
1775 1.1 mrg }
1776 1.1 mrg return 0;
1777 1.1 mrg }
1778 1.1 mrg
1779 1.1 mrg /* Implements REG_OK_FOR_BASE_P. */
1780 1.1 mrg int
1781 1.1 mrg m32c_reg_ok_for_base_p (rtx x, int strict)
1782 1.1 mrg {
1783 1.1 mrg if (GET_CODE (x) != REG)
1784 1.1 mrg return 0;
1785 1.1 mrg switch (REGNO (x))
1786 1.1 mrg {
1787 1.1 mrg case A0_REGNO:
1788 1.1 mrg case A1_REGNO:
1789 1.1 mrg case SB_REGNO:
1790 1.1 mrg case FB_REGNO:
1791 1.1 mrg case SP_REGNO:
1792 1.1 mrg return 1;
1793 1.1 mrg default:
1794 1.1 mrg if (IS_PSEUDO (x, strict))
1795 1.1 mrg return 1;
1796 1.1 mrg return 0;
1797 1.1 mrg }
1798 1.1 mrg }
1799 1.1 mrg
1800 1.1 mrg /* We have three choices for choosing fb->aN offsets. If we choose -128,
1801 1.1 mrg we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1802 1.1 mrg like this:
1803 1.1 mrg EB 4B FF mova -128[$fb],$a0
1804 1.1 mrg D8 0C FF FF mov.w:Q #0,-1[$a0]
1805 1.1 mrg
1806 1.1 mrg Alternately, we subtract the frame size, and hopefully use 8-bit aN
1807 1.1 mrg displacements:
1808 1.1 mrg 7B F4 stc $fb,$a0
1809 1.1 mrg 77 54 00 01 sub #256,$a0
1810 1.1 mrg D8 08 01 mov.w:Q #0,1[$a0]
1811 1.1 mrg
1812 1.1 mrg If we don't offset (i.e. offset by zero), we end up with:
1813 1.1 mrg 7B F4 stc $fb,$a0
1814 1.1 mrg D8 0C 00 FF mov.w:Q #0,-256[$a0]
1815 1.1 mrg
1816 1.1 mrg We have to subtract *something* so that we have a PLUS rtx to mark
1817 1.1 mrg that we've done this reload. The -128 offset will never result in
1818 1.1 mrg an 8-bit aN offset, and the payoff for the second case is five
1819 1.1 mrg loads *if* those loads are within 256 bytes of the other end of the
1820 1.1 mrg frame, so the third case seems best. Note that we subtract the
1821 1.1 mrg zero, but detect that in the addhi3 pattern. */
1822 1.1 mrg
1823 1.1 mrg #define BIG_FB_ADJ 0
1824 1.1 mrg
1825 1.1 mrg /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1826 1.1 mrg worry about is frame base offsets, as $fb has a limited
1827 1.1 mrg displacement range. We deal with this by attempting to reload $fb
1828 1.1 mrg itself into an address register; that seems to result in the best
1829 1.1 mrg code. */
1830 1.1 mrg #undef TARGET_LEGITIMIZE_ADDRESS
1831 1.1 mrg #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1832 1.1 mrg static rtx
1833 1.1 mrg m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1834 1.1 mrg machine_mode mode)
1835 1.1 mrg {
1836 1.1 mrg #if DEBUG0
1837 1.1 mrg fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1838 1.1 mrg debug_rtx (x);
1839 1.1 mrg fprintf (stderr, "\n");
1840 1.1 mrg #endif
1841 1.1 mrg
1842 1.1 mrg if (GET_CODE (x) == PLUS
1843 1.1 mrg && GET_CODE (XEXP (x, 0)) == REG
1844 1.1 mrg && REGNO (XEXP (x, 0)) == FB_REGNO
1845 1.1 mrg && GET_CODE (XEXP (x, 1)) == CONST_INT
1846 1.1 mrg && (INTVAL (XEXP (x, 1)) < -128
1847 1.1 mrg || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1848 1.1 mrg {
1849 1.1 mrg /* reload FB to A_REGS */
1850 1.1 mrg rtx temp = gen_reg_rtx (Pmode);
1851 1.1 mrg x = copy_rtx (x);
1852 1.1 mrg emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
1853 1.1 mrg XEXP (x, 0) = temp;
1854 1.1 mrg }
1855 1.1 mrg
1856 1.1 mrg return x;
1857 1.1 mrg }
1858 1.1 mrg
1859 1.1 mrg /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1860 1.1 mrg int
1861 1.1 mrg m32c_legitimize_reload_address (rtx * x,
1862 1.1 mrg machine_mode mode,
1863 1.1 mrg int opnum,
1864 1.1 mrg int type, int ind_levels ATTRIBUTE_UNUSED)
1865 1.1 mrg {
1866 1.1 mrg #if DEBUG0
1867 1.1 mrg fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1868 1.1 mrg mode_name[mode]);
1869 1.1 mrg debug_rtx (*x);
1870 1.1 mrg #endif
1871 1.1 mrg
1872 1.1 mrg /* At one point, this function tried to get $fb copied to an address
1873 1.1 mrg register, which in theory would maximize sharing, but gcc was
1874 1.1 mrg *also* still trying to reload the whole address, and we'd run out
1875 1.1 mrg of address registers. So we let gcc do the naive (but safe)
1876 1.1 mrg reload instead, when the above function doesn't handle it for
1877 1.1 mrg us.
1878 1.1 mrg
1879 1.1 mrg The code below is a second attempt at the above. */
1880 1.1 mrg
1881 1.1 mrg if (GET_CODE (*x) == PLUS
1882 1.1 mrg && GET_CODE (XEXP (*x, 0)) == REG
1883 1.1 mrg && REGNO (XEXP (*x, 0)) == FB_REGNO
1884 1.1 mrg && GET_CODE (XEXP (*x, 1)) == CONST_INT
1885 1.1 mrg && (INTVAL (XEXP (*x, 1)) < -128
1886 1.1 mrg || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1887 1.1 mrg {
1888 1.1 mrg rtx sum;
1889 1.1 mrg int offset = INTVAL (XEXP (*x, 1));
1890 1.1 mrg int adjustment = -BIG_FB_ADJ;
1891 1.1 mrg
1892 1.1 mrg sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1893 1.1 mrg GEN_INT (adjustment));
1894 1.1 mrg *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1895 1.1 mrg if (type == RELOAD_OTHER)
1896 1.1 mrg type = RELOAD_FOR_OTHER_ADDRESS;
1897 1.1 mrg push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1898 1.1 mrg A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1899 1.1 mrg (enum reload_type) type);
1900 1.1 mrg return 1;
1901 1.1 mrg }
1902 1.1 mrg
1903 1.1 mrg if (GET_CODE (*x) == PLUS
1904 1.1 mrg && GET_CODE (XEXP (*x, 0)) == PLUS
1905 1.1 mrg && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1906 1.1 mrg && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1907 1.1 mrg && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1908 1.1 mrg && GET_CODE (XEXP (*x, 1)) == CONST_INT
1909 1.1 mrg )
1910 1.1 mrg {
1911 1.1 mrg if (type == RELOAD_OTHER)
1912 1.1 mrg type = RELOAD_FOR_OTHER_ADDRESS;
1913 1.1 mrg push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1914 1.1 mrg A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1915 1.1 mrg (enum reload_type) type);
1916 1.1 mrg return 1;
1917 1.1 mrg }
1918 1.1 mrg
1919 1.1 mrg if (TARGET_A24 && GET_MODE (*x) == PSImode)
1920 1.1 mrg {
1921 1.1 mrg push_reload (*x, NULL_RTX, x, NULL,
1922 1.1 mrg A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1923 1.1 mrg (enum reload_type) type);
1924 1.1 mrg return 1;
1925 1.1 mrg }
1926 1.1 mrg
1927 1.1 mrg return 0;
1928 1.1 mrg }
1929 1.1 mrg
1930 1.1 mrg /* Return the appropriate mode for a named address pointer. */
1931 1.1 mrg #undef TARGET_ADDR_SPACE_POINTER_MODE
1932 1.1 mrg #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1933 1.1 mrg static scalar_int_mode
1934 1.1 mrg m32c_addr_space_pointer_mode (addr_space_t addrspace)
1935 1.1 mrg {
1936 1.1 mrg switch (addrspace)
1937 1.1 mrg {
1938 1.1 mrg case ADDR_SPACE_GENERIC:
1939 1.1 mrg return TARGET_A24 ? PSImode : HImode;
1940 1.1 mrg case ADDR_SPACE_FAR:
1941 1.1 mrg return SImode;
1942 1.1 mrg default:
1943 1.1 mrg gcc_unreachable ();
1944 1.1 mrg }
1945 1.1 mrg }
1946 1.1 mrg
1947 1.1 mrg /* Return the appropriate mode for a named address address. */
1948 1.1 mrg #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1949 1.1 mrg #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1950 1.1 mrg static scalar_int_mode
1951 1.1 mrg m32c_addr_space_address_mode (addr_space_t addrspace)
1952 1.1 mrg {
1953 1.1 mrg switch (addrspace)
1954 1.1 mrg {
1955 1.1 mrg case ADDR_SPACE_GENERIC:
1956 1.1 mrg return TARGET_A24 ? PSImode : HImode;
1957 1.1 mrg case ADDR_SPACE_FAR:
1958 1.1 mrg return SImode;
1959 1.1 mrg default:
1960 1.1 mrg gcc_unreachable ();
1961 1.1 mrg }
1962 1.1 mrg }
1963 1.1 mrg
1964 1.1 mrg /* Like m32c_legitimate_address_p, except with named addresses. */
1965 1.1 mrg #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1966 1.1 mrg #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1967 1.1 mrg m32c_addr_space_legitimate_address_p
1968 1.1 mrg static bool
1969 1.1 mrg m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1970 1.1 mrg bool strict, addr_space_t as)
1971 1.1 mrg {
1972 1.1 mrg if (as == ADDR_SPACE_FAR)
1973 1.1 mrg {
1974 1.1 mrg if (TARGET_A24)
1975 1.1 mrg return 0;
1976 1.1 mrg encode_pattern (x);
1977 1.1 mrg if (RTX_IS ("r"))
1978 1.1 mrg {
1979 1.1 mrg if (GET_MODE (x) != SImode)
1980 1.1 mrg return 0;
1981 1.1 mrg switch (REGNO (patternr[0]))
1982 1.1 mrg {
1983 1.1 mrg case A0_REGNO:
1984 1.1 mrg return 1;
1985 1.1 mrg
1986 1.1 mrg default:
1987 1.1 mrg if (IS_PSEUDO (patternr[0], strict))
1988 1.1 mrg return 1;
1989 1.1 mrg return 0;
1990 1.1 mrg }
1991 1.1 mrg }
1992 1.1 mrg if (RTX_IS ("+^Sri"))
1993 1.1 mrg {
1994 1.1 mrg int rn = REGNO (patternr[3]);
1995 1.1 mrg HOST_WIDE_INT offs = INTVAL (patternr[4]);
1996 1.1 mrg if (GET_MODE (patternr[3]) != HImode)
1997 1.1 mrg return 0;
1998 1.1 mrg switch (rn)
1999 1.1 mrg {
2000 1.1 mrg case A0_REGNO:
2001 1.1 mrg return (offs >= 0 && offs <= 0xfffff);
2002 1.1 mrg
2003 1.1 mrg default:
2004 1.1 mrg if (IS_PSEUDO (patternr[3], strict))
2005 1.1 mrg return 1;
2006 1.1 mrg return 0;
2007 1.1 mrg }
2008 1.1 mrg }
2009 1.1 mrg if (RTX_IS ("+^Srs"))
2010 1.1 mrg {
2011 1.1 mrg int rn = REGNO (patternr[3]);
2012 1.1 mrg if (GET_MODE (patternr[3]) != HImode)
2013 1.1 mrg return 0;
2014 1.1 mrg switch (rn)
2015 1.1 mrg {
2016 1.1 mrg case A0_REGNO:
2017 1.1 mrg return 1;
2018 1.1 mrg
2019 1.1 mrg default:
2020 1.1 mrg if (IS_PSEUDO (patternr[3], strict))
2021 1.1 mrg return 1;
2022 1.1 mrg return 0;
2023 1.1 mrg }
2024 1.1 mrg }
2025 1.1 mrg if (RTX_IS ("+^S+ris"))
2026 1.1 mrg {
2027 1.1 mrg int rn = REGNO (patternr[4]);
2028 1.1 mrg if (GET_MODE (patternr[4]) != HImode)
2029 1.1 mrg return 0;
2030 1.1 mrg switch (rn)
2031 1.1 mrg {
2032 1.1 mrg case A0_REGNO:
2033 1.1 mrg return 1;
2034 1.1 mrg
2035 1.1 mrg default:
2036 1.1 mrg if (IS_PSEUDO (patternr[4], strict))
2037 1.1 mrg return 1;
2038 1.1 mrg return 0;
2039 1.1 mrg }
2040 1.1 mrg }
2041 1.1 mrg if (RTX_IS ("s"))
2042 1.1 mrg {
2043 1.1 mrg return 1;
2044 1.1 mrg }
2045 1.1 mrg return 0;
2046 1.1 mrg }
2047 1.1 mrg
2048 1.1 mrg else if (as != ADDR_SPACE_GENERIC)
2049 1.1 mrg gcc_unreachable ();
2050 1.1 mrg
2051 1.1 mrg return m32c_legitimate_address_p (mode, x, strict);
2052 1.1 mrg }
2053 1.1 mrg
2054 1.1 mrg /* Like m32c_legitimate_address, except with named address support. */
2055 1.1 mrg #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2056 1.1 mrg #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2057 1.1 mrg static rtx
2058 1.1 mrg m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2059 1.1 mrg addr_space_t as)
2060 1.1 mrg {
2061 1.1 mrg if (as != ADDR_SPACE_GENERIC)
2062 1.1 mrg {
2063 1.1 mrg #if DEBUG0
2064 1.1 mrg fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2065 1.1 mrg debug_rtx (x);
2066 1.1 mrg fprintf (stderr, "\n");
2067 1.1 mrg #endif
2068 1.1 mrg
2069 1.1 mrg if (GET_CODE (x) != REG)
2070 1.1 mrg {
2071 1.1 mrg x = force_reg (SImode, x);
2072 1.1 mrg }
2073 1.1 mrg return x;
2074 1.1 mrg }
2075 1.1 mrg
2076 1.1 mrg return m32c_legitimize_address (x, oldx, mode);
2077 1.1 mrg }
2078 1.1 mrg
2079 1.1 mrg /* Determine if one named address space is a subset of another. */
2080 1.1 mrg #undef TARGET_ADDR_SPACE_SUBSET_P
2081 1.1 mrg #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2082 1.1 mrg static bool
2083 1.1 mrg m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2084 1.1 mrg {
2085 1.1 mrg gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2086 1.1 mrg gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2087 1.1 mrg
2088 1.1 mrg if (subset == superset)
2089 1.1 mrg return true;
2090 1.1 mrg
2091 1.1 mrg else
2092 1.1 mrg return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2093 1.1 mrg }
2094 1.1 mrg
2095 1.1 mrg #undef TARGET_ADDR_SPACE_CONVERT
2096 1.1 mrg #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2097 1.1 mrg /* Convert from one address space to another. */
2098 1.1 mrg static rtx
2099 1.1 mrg m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2100 1.1 mrg {
2101 1.1 mrg addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2102 1.1 mrg addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2103 1.1 mrg rtx result;
2104 1.1 mrg
2105 1.1 mrg gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2106 1.1 mrg gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2107 1.1 mrg
2108 1.1 mrg if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2109 1.1 mrg {
2110 1.1 mrg /* This is unpredictable, as we're truncating off usable address
2111 1.1 mrg bits. */
2112 1.1 mrg
2113 1.1 mrg result = gen_reg_rtx (HImode);
2114 1.1 mrg emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2115 1.1 mrg return result;
2116 1.1 mrg }
2117 1.1 mrg else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2118 1.1 mrg {
2119 1.1 mrg /* This always works. */
2120 1.1 mrg result = gen_reg_rtx (SImode);
2121 1.1 mrg emit_insn (gen_zero_extendhisi2 (result, op));
2122 1.1 mrg return result;
2123 1.1 mrg }
2124 1.1 mrg else
2125 1.1 mrg gcc_unreachable ();
2126 1.1 mrg }
2127 1.1 mrg
2128 1.1 mrg /* Condition Code Status */
2129 1.1 mrg
2130 1.1 mrg #undef TARGET_FIXED_CONDITION_CODE_REGS
2131 1.1 mrg #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2132 1.1 mrg static bool
2133 1.1 mrg m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2134 1.1 mrg {
2135 1.1 mrg *p1 = FLG_REGNO;
2136 1.1 mrg *p2 = INVALID_REGNUM;
2137 1.1 mrg return true;
2138 1.1 mrg }
2139 1.1 mrg
2140 1.1 mrg /* Describing Relative Costs of Operations */
2141 1.1 mrg
2142 1.1 mrg /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2143 1.1 mrg prohibitively expensive, like trying to put QIs in r2/r3 (there are
2144 1.1 mrg no opcodes to do that). We also discourage use of mem* registers
2145 1.1 mrg since they're really memory. */
2146 1.1 mrg
2147 1.1 mrg #undef TARGET_REGISTER_MOVE_COST
2148 1.1 mrg #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2149 1.1 mrg
2150 1.1 mrg static int
2151 1.1 mrg m32c_register_move_cost (machine_mode mode, reg_class_t from,
2152 1.1 mrg reg_class_t to)
2153 1.1 mrg {
2154 1.1 mrg int cost = COSTS_N_INSNS (3);
2155 1.1 mrg HARD_REG_SET cc;
2156 1.1 mrg
2157 1.1 mrg /* FIXME: pick real values, but not 2 for now. */
2158 1.1 mrg cc = reg_class_contents[from] | reg_class_contents[(int) to];
2159 1.1 mrg
2160 1.1 mrg if (mode == QImode
2161 1.1 mrg && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2162 1.1 mrg {
2163 1.1 mrg if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2164 1.1 mrg cost = COSTS_N_INSNS (1000);
2165 1.1 mrg else
2166 1.1 mrg cost = COSTS_N_INSNS (80);
2167 1.1 mrg }
2168 1.1 mrg
2169 1.1 mrg if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2170 1.1 mrg cost = COSTS_N_INSNS (1000);
2171 1.1 mrg
2172 1.1 mrg if (reg_classes_intersect_p (from, CR_REGS))
2173 1.1 mrg cost += COSTS_N_INSNS (5);
2174 1.1 mrg
2175 1.1 mrg if (reg_classes_intersect_p (to, CR_REGS))
2176 1.1 mrg cost += COSTS_N_INSNS (5);
2177 1.1 mrg
2178 1.1 mrg if (from == MEM_REGS || to == MEM_REGS)
2179 1.1 mrg cost += COSTS_N_INSNS (50);
2180 1.1 mrg else if (reg_classes_intersect_p (from, MEM_REGS)
2181 1.1 mrg || reg_classes_intersect_p (to, MEM_REGS))
2182 1.1 mrg cost += COSTS_N_INSNS (10);
2183 1.1 mrg
2184 1.1 mrg #if DEBUG0
2185 1.1 mrg fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2186 1.1 mrg mode_name[mode], class_names[(int) from], class_names[(int) to],
2187 1.1 mrg cost);
2188 1.1 mrg #endif
2189 1.1 mrg return cost;
2190 1.1 mrg }
2191 1.1 mrg
2192 1.1 mrg /* Implements TARGET_MEMORY_MOVE_COST. */
2193 1.1 mrg
2194 1.1 mrg #undef TARGET_MEMORY_MOVE_COST
2195 1.1 mrg #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2196 1.1 mrg
2197 1.1 mrg static int
2198 1.1 mrg m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2199 1.1 mrg reg_class_t rclass ATTRIBUTE_UNUSED,
2200 1.1 mrg bool in ATTRIBUTE_UNUSED)
2201 1.1 mrg {
2202 1.1 mrg /* FIXME: pick real values. */
2203 1.1 mrg return COSTS_N_INSNS (10);
2204 1.1 mrg }
2205 1.1 mrg
2206 1.1 mrg /* Here we try to describe when we use multiple opcodes for one RTX so
2207 1.1 mrg that gcc knows when to use them. */
2208 1.1 mrg #undef TARGET_RTX_COSTS
2209 1.1 mrg #define TARGET_RTX_COSTS m32c_rtx_costs
2210 1.1 mrg static bool
2211 1.1 mrg m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2212 1.1 mrg int opno ATTRIBUTE_UNUSED,
2213 1.1 mrg int *total, bool speed ATTRIBUTE_UNUSED)
2214 1.1 mrg {
2215 1.1 mrg int code = GET_CODE (x);
2216 1.1 mrg switch (code)
2217 1.1 mrg {
2218 1.1 mrg case REG:
2219 1.1 mrg if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2220 1.1 mrg *total += COSTS_N_INSNS (500);
2221 1.1 mrg else
2222 1.1 mrg *total += COSTS_N_INSNS (1);
2223 1.1 mrg return true;
2224 1.1 mrg
2225 1.1 mrg case ASHIFT:
2226 1.1 mrg case LSHIFTRT:
2227 1.1 mrg case ASHIFTRT:
2228 1.1 mrg if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2229 1.1 mrg {
2230 1.1 mrg /* mov.b r1l, r1h */
2231 1.1 mrg *total += COSTS_N_INSNS (1);
2232 1.1 mrg return true;
2233 1.1 mrg }
2234 1.1 mrg if (INTVAL (XEXP (x, 1)) > 8
2235 1.1 mrg || INTVAL (XEXP (x, 1)) < -8)
2236 1.1 mrg {
2237 1.1 mrg /* mov.b #N, r1l */
2238 1.1 mrg /* mov.b r1l, r1h */
2239 1.1 mrg *total += COSTS_N_INSNS (2);
2240 1.1 mrg return true;
2241 1.1 mrg }
2242 1.1 mrg return true;
2243 1.1 mrg
2244 1.1 mrg case LE:
2245 1.1 mrg case LEU:
2246 1.1 mrg case LT:
2247 1.1 mrg case LTU:
2248 1.1 mrg case GT:
2249 1.1 mrg case GTU:
2250 1.1 mrg case GE:
2251 1.1 mrg case GEU:
2252 1.1 mrg case NE:
2253 1.1 mrg case EQ:
2254 1.1 mrg if (outer_code == SET)
2255 1.1 mrg {
2256 1.1 mrg *total += COSTS_N_INSNS (2);
2257 1.1 mrg return true;
2258 1.1 mrg }
2259 1.1 mrg break;
2260 1.1 mrg
2261 1.1 mrg case ZERO_EXTRACT:
2262 1.1 mrg {
2263 1.1 mrg rtx dest = XEXP (x, 0);
2264 1.1 mrg rtx addr = XEXP (dest, 0);
2265 1.1 mrg switch (GET_CODE (addr))
2266 1.1 mrg {
2267 1.1 mrg case CONST_INT:
2268 1.1 mrg *total += COSTS_N_INSNS (1);
2269 1.1 mrg break;
2270 1.1 mrg case SYMBOL_REF:
2271 1.1 mrg *total += COSTS_N_INSNS (3);
2272 1.1 mrg break;
2273 1.1 mrg default:
2274 1.1 mrg *total += COSTS_N_INSNS (2);
2275 1.1 mrg break;
2276 1.1 mrg }
2277 1.1 mrg return true;
2278 1.1 mrg }
2279 1.1 mrg break;
2280 1.1 mrg
2281 1.1 mrg default:
2282 1.1 mrg /* Reasonable default. */
2283 1.1 mrg if (TARGET_A16 && mode == SImode)
2284 1.1 mrg *total += COSTS_N_INSNS (2);
2285 1.1 mrg break;
2286 1.1 mrg }
2287 1.1 mrg return false;
2288 1.1 mrg }
2289 1.1 mrg
2290 1.1 mrg #undef TARGET_ADDRESS_COST
2291 1.1 mrg #define TARGET_ADDRESS_COST m32c_address_cost
2292 1.1 mrg static int
2293 1.1 mrg m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2294 1.1 mrg addr_space_t as ATTRIBUTE_UNUSED,
2295 1.1 mrg bool speed ATTRIBUTE_UNUSED)
2296 1.1 mrg {
2297 1.1 mrg int i;
2298 1.1 mrg /* fprintf(stderr, "\naddress_cost\n");
2299 1.1 mrg debug_rtx(addr);*/
2300 1.1 mrg switch (GET_CODE (addr))
2301 1.1 mrg {
2302 1.1 mrg case CONST_INT:
2303 1.1 mrg i = INTVAL (addr);
2304 1.1 mrg if (i == 0)
2305 1.1 mrg return COSTS_N_INSNS(1);
2306 1.1 mrg if (i > 0 && i <= 255)
2307 1.1 mrg return COSTS_N_INSNS(2);
2308 1.1 mrg if (i > 0 && i <= 65535)
2309 1.1 mrg return COSTS_N_INSNS(3);
2310 1.1 mrg return COSTS_N_INSNS(4);
2311 1.1 mrg case SYMBOL_REF:
2312 1.1 mrg return COSTS_N_INSNS(4);
2313 1.1 mrg case REG:
2314 1.1 mrg return COSTS_N_INSNS(1);
2315 1.1 mrg case PLUS:
2316 1.1 mrg if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2317 1.1 mrg {
2318 1.1 mrg i = INTVAL (XEXP (addr, 1));
2319 1.1 mrg if (i == 0)
2320 1.1 mrg return COSTS_N_INSNS(1);
2321 1.1 mrg if (i > 0 && i <= 255)
2322 1.1 mrg return COSTS_N_INSNS(2);
2323 1.1 mrg if (i > 0 && i <= 65535)
2324 1.1 mrg return COSTS_N_INSNS(3);
2325 1.1 mrg }
2326 1.1 mrg return COSTS_N_INSNS(4);
2327 1.1 mrg default:
2328 1.1 mrg return 0;
2329 1.1 mrg }
2330 1.1 mrg }
2331 1.1 mrg
2332 1.1 mrg /* Defining the Output Assembler Language */
2333 1.1 mrg
2334 1.1 mrg /* Output of Data */
2335 1.1 mrg
2336 1.1 mrg /* We may have 24 bit sizes, which is the native address size.
2337 1.1 mrg Currently unused, but provided for completeness. */
2338 1.1 mrg #undef TARGET_ASM_INTEGER
2339 1.1 mrg #define TARGET_ASM_INTEGER m32c_asm_integer
2340 1.1 mrg static bool
2341 1.1 mrg m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2342 1.1 mrg {
2343 1.1 mrg switch (size)
2344 1.1 mrg {
2345 1.1 mrg case 3:
2346 1.1 mrg fprintf (asm_out_file, "\t.3byte\t");
2347 1.1 mrg output_addr_const (asm_out_file, x);
2348 1.1 mrg fputc ('\n', asm_out_file);
2349 1.1 mrg return true;
2350 1.1 mrg case 4:
2351 1.1 mrg if (GET_CODE (x) == SYMBOL_REF)
2352 1.1 mrg {
2353 1.1 mrg fprintf (asm_out_file, "\t.long\t");
2354 1.1 mrg output_addr_const (asm_out_file, x);
2355 1.1 mrg fputc ('\n', asm_out_file);
2356 1.1 mrg return true;
2357 1.1 mrg }
2358 1.1 mrg break;
2359 1.1 mrg }
2360 1.1 mrg return default_assemble_integer (x, size, aligned_p);
2361 1.1 mrg }
2362 1.1 mrg
2363 1.1 mrg /* Output of Assembler Instructions */
2364 1.1 mrg
2365 1.1 mrg /* We use a lookup table because the addressing modes are non-orthogonal. */
2366 1.1 mrg
2367 1.1 mrg static struct
2368 1.1 mrg {
2369 1.1 mrg char code;
2370 1.1 mrg char const *pattern;
2371 1.1 mrg char const *format;
2372 1.1 mrg }
2373 1.1 mrg const conversions[] = {
2374 1.1 mrg { 0, "r", "0" },
2375 1.1 mrg
2376 1.1 mrg { 0, "mr", "z[1]" },
2377 1.1 mrg { 0, "m+ri", "3[2]" },
2378 1.1 mrg { 0, "m+rs", "3[2]" },
2379 1.1 mrg { 0, "m+^Zrs", "5[4]" },
2380 1.1 mrg { 0, "m+^Zri", "5[4]" },
2381 1.1 mrg { 0, "m+^Z+ris", "7+6[5]" },
2382 1.1 mrg { 0, "m+^Srs", "5[4]" },
2383 1.1 mrg { 0, "m+^Sri", "5[4]" },
2384 1.1 mrg { 0, "m+^S+ris", "7+6[5]" },
2385 1.1 mrg { 0, "m+r+si", "4+5[2]" },
2386 1.1 mrg { 0, "ms", "1" },
2387 1.1 mrg { 0, "mi", "1" },
2388 1.1 mrg { 0, "m+si", "2+3" },
2389 1.1 mrg
2390 1.1 mrg { 0, "mmr", "[z[2]]" },
2391 1.1 mrg { 0, "mm+ri", "[4[3]]" },
2392 1.1 mrg { 0, "mm+rs", "[4[3]]" },
2393 1.1 mrg { 0, "mm+r+si", "[5+6[3]]" },
2394 1.1 mrg { 0, "mms", "[[2]]" },
2395 1.1 mrg { 0, "mmi", "[[2]]" },
2396 1.1 mrg { 0, "mm+si", "[4[3]]" },
2397 1.1 mrg
2398 1.1 mrg { 0, "i", "#0" },
2399 1.1 mrg { 0, "s", "#0" },
2400 1.1 mrg { 0, "+si", "#1+2" },
2401 1.1 mrg { 0, "l", "#0" },
2402 1.1 mrg
2403 1.1 mrg { 'l', "l", "0" },
2404 1.1 mrg { 'd', "i", "0" },
2405 1.1 mrg { 'd', "s", "0" },
2406 1.1 mrg { 'd', "+si", "1+2" },
2407 1.1 mrg { 'D', "i", "0" },
2408 1.1 mrg { 'D', "s", "0" },
2409 1.1 mrg { 'D', "+si", "1+2" },
2410 1.1 mrg { 'x', "i", "#0" },
2411 1.1 mrg { 'X', "i", "#0" },
2412 1.1 mrg { 'm', "i", "#0" },
2413 1.1 mrg { 'b', "i", "#0" },
2414 1.1 mrg { 'B', "i", "0" },
2415 1.1 mrg { 'p', "i", "0" },
2416 1.1 mrg
2417 1.1 mrg { 0, 0, 0 }
2418 1.1 mrg };
2419 1.1 mrg
2420 1.1 mrg /* This is in order according to the bitfield that pushm/popm use. */
2421 1.1 mrg static char const *pushm_regs[] = {
2422 1.1 mrg "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2423 1.1 mrg };
2424 1.1 mrg
2425 1.1 mrg /* Implements TARGET_PRINT_OPERAND. */
2426 1.1 mrg
2427 1.1 mrg #undef TARGET_PRINT_OPERAND
2428 1.1 mrg #define TARGET_PRINT_OPERAND m32c_print_operand
2429 1.1 mrg
2430 1.1 mrg static void
2431 1.1 mrg m32c_print_operand (FILE * file, rtx x, int code)
2432 1.1 mrg {
2433 1.1 mrg int i, j, b;
2434 1.1 mrg const char *comma;
2435 1.1 mrg HOST_WIDE_INT ival;
2436 1.1 mrg int unsigned_const = 0;
2437 1.1 mrg int force_sign;
2438 1.1 mrg
2439 1.1 mrg /* Multiplies; constants are converted to sign-extended format but
2440 1.1 mrg we need unsigned, so 'u' and 'U' tell us what size unsigned we
2441 1.1 mrg need. */
2442 1.1 mrg if (code == 'u')
2443 1.1 mrg {
2444 1.1 mrg unsigned_const = 2;
2445 1.1 mrg code = 0;
2446 1.1 mrg }
2447 1.1 mrg if (code == 'U')
2448 1.1 mrg {
2449 1.1 mrg unsigned_const = 1;
2450 1.1 mrg code = 0;
2451 1.1 mrg }
2452 1.1 mrg /* This one is only for debugging; you can put it in a pattern to
2453 1.1 mrg force this error. */
2454 1.1 mrg if (code == '!')
2455 1.1 mrg {
2456 1.1 mrg fprintf (stderr, "dj: unreviewed pattern:");
2457 1.1 mrg if (current_output_insn)
2458 1.1 mrg debug_rtx (current_output_insn);
2459 1.1 mrg gcc_unreachable ();
2460 1.1 mrg }
2461 1.1 mrg /* PSImode operations are either .w or .l depending on the target. */
2462 1.1 mrg if (code == '&')
2463 1.1 mrg {
2464 1.1 mrg if (TARGET_A16)
2465 1.1 mrg fprintf (file, "w");
2466 1.1 mrg else
2467 1.1 mrg fprintf (file, "l");
2468 1.1 mrg return;
2469 1.1 mrg }
2470 1.1 mrg /* Inverted conditionals. */
2471 1.1 mrg if (code == 'C')
2472 1.1 mrg {
2473 1.1 mrg switch (GET_CODE (x))
2474 1.1 mrg {
2475 1.1 mrg case LE:
2476 1.1 mrg fputs ("gt", file);
2477 1.1 mrg break;
2478 1.1 mrg case LEU:
2479 1.1 mrg fputs ("gtu", file);
2480 1.1 mrg break;
2481 1.1 mrg case LT:
2482 1.1 mrg fputs ("ge", file);
2483 1.1 mrg break;
2484 1.1 mrg case LTU:
2485 1.1 mrg fputs ("geu", file);
2486 1.1 mrg break;
2487 1.1 mrg case GT:
2488 1.1 mrg fputs ("le", file);
2489 1.1 mrg break;
2490 1.1 mrg case GTU:
2491 1.1 mrg fputs ("leu", file);
2492 1.1 mrg break;
2493 1.1 mrg case GE:
2494 1.1 mrg fputs ("lt", file);
2495 1.1 mrg break;
2496 1.1 mrg case GEU:
2497 1.1 mrg fputs ("ltu", file);
2498 1.1 mrg break;
2499 1.1 mrg case NE:
2500 1.1 mrg fputs ("eq", file);
2501 1.1 mrg break;
2502 1.1 mrg case EQ:
2503 1.1 mrg fputs ("ne", file);
2504 1.1 mrg break;
2505 1.1 mrg default:
2506 1.1 mrg gcc_unreachable ();
2507 1.1 mrg }
2508 1.1 mrg return;
2509 1.1 mrg }
2510 1.1 mrg /* Regular conditionals. */
2511 1.1 mrg if (code == 'c')
2512 1.1 mrg {
2513 1.1 mrg switch (GET_CODE (x))
2514 1.1 mrg {
2515 1.1 mrg case LE:
2516 1.1 mrg fputs ("le", file);
2517 1.1 mrg break;
2518 1.1 mrg case LEU:
2519 1.1 mrg fputs ("leu", file);
2520 1.1 mrg break;
2521 1.1 mrg case LT:
2522 1.1 mrg fputs ("lt", file);
2523 1.1 mrg break;
2524 1.1 mrg case LTU:
2525 1.1 mrg fputs ("ltu", file);
2526 1.1 mrg break;
2527 1.1 mrg case GT:
2528 1.1 mrg fputs ("gt", file);
2529 1.1 mrg break;
2530 1.1 mrg case GTU:
2531 1.1 mrg fputs ("gtu", file);
2532 1.1 mrg break;
2533 1.1 mrg case GE:
2534 1.1 mrg fputs ("ge", file);
2535 1.1 mrg break;
2536 1.1 mrg case GEU:
2537 1.1 mrg fputs ("geu", file);
2538 1.1 mrg break;
2539 1.1 mrg case NE:
2540 1.1 mrg fputs ("ne", file);
2541 1.1 mrg break;
2542 1.1 mrg case EQ:
2543 1.1 mrg fputs ("eq", file);
2544 1.1 mrg break;
2545 1.1 mrg default:
2546 1.1 mrg gcc_unreachable ();
2547 1.1 mrg }
2548 1.1 mrg return;
2549 1.1 mrg }
2550 1.1 mrg /* Used in negsi2 to do HImode ops on the two parts of an SImode
2551 1.1 mrg operand. */
2552 1.1 mrg if (code == 'h' && GET_MODE (x) == SImode)
2553 1.1 mrg {
2554 1.1 mrg x = m32c_subreg (HImode, x, SImode, 0);
2555 1.1 mrg code = 0;
2556 1.1 mrg }
2557 1.1 mrg if (code == 'H' && GET_MODE (x) == SImode)
2558 1.1 mrg {
2559 1.1 mrg x = m32c_subreg (HImode, x, SImode, 2);
2560 1.1 mrg code = 0;
2561 1.1 mrg }
2562 1.1 mrg if (code == 'h' && GET_MODE (x) == HImode)
2563 1.1 mrg {
2564 1.1 mrg x = m32c_subreg (QImode, x, HImode, 0);
2565 1.1 mrg code = 0;
2566 1.1 mrg }
2567 1.1 mrg if (code == 'H' && GET_MODE (x) == HImode)
2568 1.1 mrg {
2569 1.1 mrg /* We can't actually represent this as an rtx. Do it here. */
2570 1.1 mrg if (GET_CODE (x) == REG)
2571 1.1 mrg {
2572 1.1 mrg switch (REGNO (x))
2573 1.1 mrg {
2574 1.1 mrg case R0_REGNO:
2575 1.1 mrg fputs ("r0h", file);
2576 1.1 mrg return;
2577 1.1 mrg case R1_REGNO:
2578 1.1 mrg fputs ("r1h", file);
2579 1.1 mrg return;
2580 1.1 mrg default:
2581 1.1 mrg gcc_unreachable();
2582 1.1 mrg }
2583 1.1 mrg }
2584 1.1 mrg /* This should be a MEM. */
2585 1.1 mrg x = m32c_subreg (QImode, x, HImode, 1);
2586 1.1 mrg code = 0;
2587 1.1 mrg }
2588 1.1 mrg /* This is for BMcond, which always wants word register names. */
2589 1.1 mrg if (code == 'h' && GET_MODE (x) == QImode)
2590 1.1 mrg {
2591 1.1 mrg if (GET_CODE (x) == REG)
2592 1.1 mrg x = gen_rtx_REG (HImode, REGNO (x));
2593 1.1 mrg code = 0;
2594 1.1 mrg }
2595 1.1 mrg /* 'x' and 'X' need to be ignored for non-immediates. */
2596 1.1 mrg if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2597 1.1 mrg code = 0;
2598 1.1 mrg
2599 1.1 mrg encode_pattern (x);
2600 1.1 mrg force_sign = 0;
2601 1.1 mrg for (i = 0; conversions[i].pattern; i++)
2602 1.1 mrg if (conversions[i].code == code
2603 1.1 mrg && streq (conversions[i].pattern, pattern))
2604 1.1 mrg {
2605 1.1 mrg for (j = 0; conversions[i].format[j]; j++)
2606 1.1 mrg /* backslash quotes the next character in the output pattern. */
2607 1.1 mrg if (conversions[i].format[j] == '\\')
2608 1.1 mrg {
2609 1.1 mrg fputc (conversions[i].format[j + 1], file);
2610 1.1 mrg j++;
2611 1.1 mrg }
2612 1.1 mrg /* Digits in the output pattern indicate that the
2613 1.1 mrg corresponding RTX is to be output at that point. */
2614 1.1 mrg else if (ISDIGIT (conversions[i].format[j]))
2615 1.1 mrg {
2616 1.1 mrg rtx r = patternr[conversions[i].format[j] - '0'];
2617 1.1 mrg switch (GET_CODE (r))
2618 1.1 mrg {
2619 1.1 mrg case REG:
2620 1.1 mrg fprintf (file, "%s",
2621 1.1 mrg reg_name_with_mode (REGNO (r), GET_MODE (r)));
2622 1.1 mrg break;
2623 1.1 mrg case CONST_INT:
2624 1.1 mrg switch (code)
2625 1.1 mrg {
2626 1.1 mrg case 'b':
2627 1.1 mrg case 'B':
2628 1.1 mrg {
2629 1.1 mrg int v = INTVAL (r);
2630 1.1 mrg int i = (int) exact_log2 (v);
2631 1.1 mrg if (i == -1)
2632 1.1 mrg i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2633 1.1 mrg if (i == -1)
2634 1.1 mrg i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2635 1.1 mrg /* Bit position. */
2636 1.1 mrg fprintf (file, "%d", i);
2637 1.1 mrg }
2638 1.1 mrg break;
2639 1.1 mrg case 'x':
2640 1.1 mrg /* Unsigned byte. */
2641 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2642 1.1 mrg INTVAL (r) & 0xff);
2643 1.1 mrg break;
2644 1.1 mrg case 'X':
2645 1.1 mrg /* Unsigned word. */
2646 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2647 1.1 mrg INTVAL (r) & 0xffff);
2648 1.1 mrg break;
2649 1.1 mrg case 'p':
2650 1.1 mrg /* pushm and popm encode a register set into a single byte. */
2651 1.1 mrg comma = "";
2652 1.1 mrg for (b = 7; b >= 0; b--)
2653 1.1 mrg if (INTVAL (r) & (1 << b))
2654 1.1 mrg {
2655 1.1 mrg fprintf (file, "%s%s", comma, pushm_regs[b]);
2656 1.1 mrg comma = ",";
2657 1.1 mrg }
2658 1.1 mrg break;
2659 1.1 mrg case 'm':
2660 1.1 mrg /* "Minus". Output -X */
2661 1.1 mrg ival = (-INTVAL (r) & 0xffff);
2662 1.1 mrg if (ival & 0x8000)
2663 1.1 mrg ival = ival - 0x10000;
2664 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2665 1.1 mrg break;
2666 1.1 mrg default:
2667 1.1 mrg ival = INTVAL (r);
2668 1.1 mrg if (conversions[i].format[j + 1] == '[' && ival < 0)
2669 1.1 mrg {
2670 1.1 mrg /* We can simulate negative displacements by
2671 1.1 mrg taking advantage of address space
2672 1.1 mrg wrapping when the offset can span the
2673 1.1 mrg entire address range. */
2674 1.1 mrg rtx base =
2675 1.1 mrg patternr[conversions[i].format[j + 2] - '0'];
2676 1.1 mrg if (GET_CODE (base) == REG)
2677 1.1 mrg switch (REGNO (base))
2678 1.1 mrg {
2679 1.1 mrg case A0_REGNO:
2680 1.1 mrg case A1_REGNO:
2681 1.1 mrg if (TARGET_A24)
2682 1.1 mrg ival = 0x1000000 + ival;
2683 1.1 mrg else
2684 1.1 mrg ival = 0x10000 + ival;
2685 1.1 mrg break;
2686 1.1 mrg case SB_REGNO:
2687 1.1 mrg if (TARGET_A16)
2688 1.1 mrg ival = 0x10000 + ival;
2689 1.1 mrg break;
2690 1.1 mrg }
2691 1.1 mrg }
2692 1.1 mrg else if (code == 'd' && ival < 0 && j == 0)
2693 1.1 mrg /* The "mova" opcode is used to do addition by
2694 1.1 mrg computing displacements, but again, we need
2695 1.1 mrg displacements to be unsigned *if* they're
2696 1.1 mrg the only component of the displacement
2697 1.1 mrg (i.e. no "symbol-4" type displacement). */
2698 1.1 mrg ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2699 1.1 mrg
2700 1.1 mrg if (conversions[i].format[j] == '0')
2701 1.1 mrg {
2702 1.1 mrg /* More conversions to unsigned. */
2703 1.1 mrg if (unsigned_const == 2)
2704 1.1 mrg ival &= 0xffff;
2705 1.1 mrg if (unsigned_const == 1)
2706 1.1 mrg ival &= 0xff;
2707 1.1 mrg }
2708 1.1 mrg if (streq (conversions[i].pattern, "mi")
2709 1.1 mrg || streq (conversions[i].pattern, "mmi"))
2710 1.1 mrg {
2711 1.1 mrg /* Integers used as addresses are unsigned. */
2712 1.1 mrg ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2713 1.1 mrg }
2714 1.1 mrg if (force_sign && ival >= 0)
2715 1.1 mrg fputc ('+', file);
2716 1.1 mrg fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2717 1.1 mrg break;
2718 1.1 mrg }
2719 1.1 mrg break;
2720 1.1 mrg case CONST_DOUBLE:
2721 1.1 mrg /* We don't have const_double constants. If it
2722 1.1 mrg happens, make it obvious. */
2723 1.1 mrg fprintf (file, "[const_double 0x%lx]",
2724 1.1 mrg (unsigned long) CONST_DOUBLE_HIGH (r));
2725 1.1 mrg break;
2726 1.1 mrg case SYMBOL_REF:
2727 1.1 mrg assemble_name (file, XSTR (r, 0));
2728 1.1 mrg break;
2729 1.1 mrg case LABEL_REF:
2730 1.1 mrg output_asm_label (r);
2731 1.1 mrg break;
2732 1.1 mrg default:
2733 1.1 mrg fprintf (stderr, "don't know how to print this operand:");
2734 1.1 mrg debug_rtx (r);
2735 1.1 mrg gcc_unreachable ();
2736 1.1 mrg }
2737 1.1 mrg }
2738 1.1 mrg else
2739 1.1 mrg {
2740 1.1 mrg if (conversions[i].format[j] == 'z')
2741 1.1 mrg {
2742 1.1 mrg /* Some addressing modes *must* have a displacement,
2743 1.1 mrg so insert a zero here if needed. */
2744 1.1 mrg int k;
2745 1.1 mrg for (k = j + 1; conversions[i].format[k]; k++)
2746 1.1 mrg if (ISDIGIT (conversions[i].format[k]))
2747 1.1 mrg {
2748 1.1 mrg rtx reg = patternr[conversions[i].format[k] - '0'];
2749 1.1 mrg if (GET_CODE (reg) == REG
2750 1.1 mrg && (REGNO (reg) == SB_REGNO
2751 1.1 mrg || REGNO (reg) == FB_REGNO
2752 1.1 mrg || REGNO (reg) == SP_REGNO))
2753 1.1 mrg fputc ('0', file);
2754 1.1 mrg }
2755 1.1 mrg continue;
2756 1.1 mrg }
2757 1.1 mrg /* Signed displacements off symbols need to have signs
2758 1.1 mrg blended cleanly. */
2759 1.1 mrg if (conversions[i].format[j] == '+'
2760 1.1 mrg && (!code || code == 'D' || code == 'd')
2761 1.1 mrg && ISDIGIT (conversions[i].format[j + 1])
2762 1.1 mrg && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2763 1.1 mrg == CONST_INT))
2764 1.1 mrg {
2765 1.1 mrg force_sign = 1;
2766 1.1 mrg continue;
2767 1.1 mrg }
2768 1.1 mrg fputc (conversions[i].format[j], file);
2769 1.1 mrg }
2770 1.1 mrg break;
2771 1.1 mrg }
2772 1.1 mrg if (!conversions[i].pattern)
2773 1.1 mrg {
2774 1.1 mrg fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2775 1.1 mrg pattern);
2776 1.1 mrg debug_rtx (x);
2777 1.1 mrg fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2778 1.1 mrg }
2779 1.1 mrg
2780 1.1 mrg return;
2781 1.1 mrg }
2782 1.1 mrg
2783 1.1 mrg /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2784 1.1 mrg
2785 1.1 mrg See m32c_print_operand above for descriptions of what these do. */
2786 1.1 mrg
2787 1.1 mrg #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2788 1.1 mrg #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2789 1.1 mrg
2790 1.1 mrg static bool
2791 1.1 mrg m32c_print_operand_punct_valid_p (unsigned char c)
2792 1.1 mrg {
2793 1.1 mrg if (c == '&' || c == '!')
2794 1.1 mrg return true;
2795 1.1 mrg
2796 1.1 mrg return false;
2797 1.1 mrg }
2798 1.1 mrg
2799 1.1 mrg /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2800 1.1 mrg
2801 1.1 mrg #undef TARGET_PRINT_OPERAND_ADDRESS
2802 1.1 mrg #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2803 1.1 mrg
2804 1.1 mrg static void
2805 1.1 mrg m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
2806 1.1 mrg {
2807 1.1 mrg if (GET_CODE (address) == MEM)
2808 1.1 mrg address = XEXP (address, 0);
2809 1.1 mrg else
2810 1.1 mrg /* cf: gcc.dg/asm-4.c. */
2811 1.1 mrg gcc_assert (GET_CODE (address) == REG);
2812 1.1 mrg
2813 1.1 mrg m32c_print_operand (stream, address, 0);
2814 1.1 mrg }
2815 1.1 mrg
2816 1.1 mrg /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2817 1.1 mrg differently than general registers. */
2818 1.1 mrg void
2819 1.1 mrg m32c_output_reg_push (FILE * s, int regno)
2820 1.1 mrg {
2821 1.1 mrg if (regno == FLG_REGNO)
2822 1.1 mrg fprintf (s, "\tpushc\tflg\n");
2823 1.1 mrg else
2824 1.1 mrg fprintf (s, "\tpush.%c\t%s\n",
2825 1.1 mrg " bwll"[reg_push_size (regno)], reg_names[regno]);
2826 1.1 mrg }
2827 1.1 mrg
2828 1.1 mrg /* Likewise for ASM_OUTPUT_REG_POP. */
2829 1.1 mrg void
2830 1.1 mrg m32c_output_reg_pop (FILE * s, int regno)
2831 1.1 mrg {
2832 1.1 mrg if (regno == FLG_REGNO)
2833 1.1 mrg fprintf (s, "\tpopc\tflg\n");
2834 1.1 mrg else
2835 1.1 mrg fprintf (s, "\tpop.%c\t%s\n",
2836 1.1 mrg " bwll"[reg_push_size (regno)], reg_names[regno]);
2837 1.1 mrg }
2838 1.1 mrg
2839 1.1 mrg /* Defining target-specific uses of `__attribute__' */
2840 1.1 mrg
2841 1.1 mrg /* Used to simplify the logic below. Find the attributes wherever
2842 1.1 mrg they may be. */
2843 1.1 mrg #define M32C_ATTRIBUTES(decl) \
2844 1.1 mrg (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2845 1.1 mrg : DECL_ATTRIBUTES (decl) \
2846 1.1 mrg ? (DECL_ATTRIBUTES (decl)) \
2847 1.1 mrg : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2848 1.1 mrg
2849 1.1 mrg /* Returns TRUE if the given tree has the "interrupt" attribute. */
2850 1.1 mrg static int
2851 1.1 mrg interrupt_p (tree node ATTRIBUTE_UNUSED)
2852 1.1 mrg {
2853 1.1 mrg tree list = M32C_ATTRIBUTES (node);
2854 1.1 mrg while (list)
2855 1.1 mrg {
2856 1.1 mrg if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2857 1.1 mrg return 1;
2858 1.1 mrg list = TREE_CHAIN (list);
2859 1.1 mrg }
2860 1.1 mrg return fast_interrupt_p (node);
2861 1.1 mrg }
2862 1.1 mrg
2863 1.1 mrg /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2864 1.1 mrg static int
2865 1.1 mrg bank_switch_p (tree node ATTRIBUTE_UNUSED)
2866 1.1 mrg {
2867 1.1 mrg tree list = M32C_ATTRIBUTES (node);
2868 1.1 mrg while (list)
2869 1.1 mrg {
2870 1.1 mrg if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2871 1.1 mrg return 1;
2872 1.1 mrg list = TREE_CHAIN (list);
2873 1.1 mrg }
2874 1.1 mrg return 0;
2875 1.1 mrg }
2876 1.1 mrg
2877 1.1 mrg /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2878 1.1 mrg static int
2879 1.1 mrg fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2880 1.1 mrg {
2881 1.1 mrg tree list = M32C_ATTRIBUTES (node);
2882 1.1 mrg while (list)
2883 1.1 mrg {
2884 1.1 mrg if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2885 1.1 mrg return 1;
2886 1.1 mrg list = TREE_CHAIN (list);
2887 1.1 mrg }
2888 1.1 mrg return 0;
2889 1.1 mrg }
2890 1.1 mrg
2891 1.1 mrg static tree
2892 1.1 mrg interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2893 1.1 mrg tree name ATTRIBUTE_UNUSED,
2894 1.1 mrg tree args ATTRIBUTE_UNUSED,
2895 1.1 mrg int flags ATTRIBUTE_UNUSED,
2896 1.1 mrg bool * no_add_attrs ATTRIBUTE_UNUSED)
2897 1.1 mrg {
2898 1.1 mrg return NULL_TREE;
2899 1.1 mrg }
2900 1.1 mrg
2901 1.1 mrg /* Returns TRUE if given tree has the "function_vector" attribute. */
2902 1.1 mrg int
2903 1.1 mrg m32c_special_page_vector_p (tree func)
2904 1.1 mrg {
2905 1.1 mrg tree list;
2906 1.1 mrg
2907 1.1 mrg if (TREE_CODE (func) != FUNCTION_DECL)
2908 1.1 mrg return 0;
2909 1.1 mrg
2910 1.1 mrg list = M32C_ATTRIBUTES (func);
2911 1.1 mrg while (list)
2912 1.1 mrg {
2913 1.1 mrg if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2914 1.1 mrg return 1;
2915 1.1 mrg list = TREE_CHAIN (list);
2916 1.1 mrg }
2917 1.1 mrg return 0;
2918 1.1 mrg }
2919 1.1 mrg
2920 1.1 mrg static tree
2921 1.1 mrg function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2922 1.1 mrg tree name ATTRIBUTE_UNUSED,
2923 1.1 mrg tree args ATTRIBUTE_UNUSED,
2924 1.1 mrg int flags ATTRIBUTE_UNUSED,
2925 1.1 mrg bool * no_add_attrs ATTRIBUTE_UNUSED)
2926 1.1 mrg {
2927 1.1 mrg if (TARGET_R8C)
2928 1.1 mrg {
2929 1.1 mrg /* The attribute is not supported for R8C target. */
2930 1.1 mrg warning (OPT_Wattributes,
2931 1.1 mrg "%qE attribute is not supported for R8C target",
2932 1.1 mrg name);
2933 1.1 mrg *no_add_attrs = true;
2934 1.1 mrg }
2935 1.1 mrg else if (TREE_CODE (*node) != FUNCTION_DECL)
2936 1.1 mrg {
2937 1.1 mrg /* The attribute must be applied to functions only. */
2938 1.1 mrg warning (OPT_Wattributes,
2939 1.1 mrg "%qE attribute applies only to functions",
2940 1.1 mrg name);
2941 1.1 mrg *no_add_attrs = true;
2942 1.1 mrg }
2943 1.1 mrg else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2944 1.1 mrg {
2945 1.1 mrg /* The argument must be a constant integer. */
2946 1.1 mrg warning (OPT_Wattributes,
2947 1.1 mrg "%qE attribute argument not an integer constant",
2948 1.1 mrg name);
2949 1.1 mrg *no_add_attrs = true;
2950 1.1 mrg }
2951 1.1 mrg else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2952 1.1 mrg || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2953 1.1 mrg {
2954 1.1 mrg /* The argument value must be between 18 to 255. */
2955 1.1 mrg warning (OPT_Wattributes,
2956 1.1 mrg "%qE attribute argument should be between 18 to 255",
2957 1.1 mrg name);
2958 1.1 mrg *no_add_attrs = true;
2959 1.1 mrg }
2960 1.1 mrg return NULL_TREE;
2961 1.1 mrg }
2962 1.1 mrg
2963 1.1 mrg /* If the function is assigned the attribute 'function_vector', it
2964 1.1 mrg returns the function vector number, otherwise returns zero. */
2965 1.1 mrg int
2966 1.1 mrg current_function_special_page_vector (rtx x)
2967 1.1 mrg {
2968 1.1 mrg int num;
2969 1.1 mrg
2970 1.1 mrg if ((GET_CODE(x) == SYMBOL_REF)
2971 1.1 mrg && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2972 1.1 mrg {
2973 1.1 mrg tree list;
2974 1.1 mrg tree t = SYMBOL_REF_DECL (x);
2975 1.1 mrg
2976 1.1 mrg if (TREE_CODE (t) != FUNCTION_DECL)
2977 1.1 mrg return 0;
2978 1.1 mrg
2979 1.1 mrg list = M32C_ATTRIBUTES (t);
2980 1.1 mrg while (list)
2981 1.1 mrg {
2982 1.1 mrg if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2983 1.1 mrg {
2984 1.1 mrg num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2985 1.1 mrg return num;
2986 1.1 mrg }
2987 1.1 mrg
2988 1.1 mrg list = TREE_CHAIN (list);
2989 1.1 mrg }
2990 1.1 mrg
2991 1.1 mrg return 0;
2992 1.1 mrg }
2993 1.1 mrg else
2994 1.1 mrg return 0;
2995 1.1 mrg }
2996 1.1 mrg
2997 1.1 mrg #undef TARGET_ATTRIBUTE_TABLE
2998 1.1 mrg #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2999 1.1 mrg static const struct attribute_spec m32c_attribute_table[] = {
3000 1.1 mrg /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
3001 1.1 mrg affects_type_identity, handler, exclude } */
3002 1.1 mrg { "interrupt", 0, 0, false, false, false, false, interrupt_handler, NULL },
3003 1.1 mrg { "bank_switch", 0, 0, false, false, false, false, interrupt_handler, NULL },
3004 1.1 mrg { "fast_interrupt", 0, 0, false, false, false, false,
3005 1.1 mrg interrupt_handler, NULL },
3006 1.1 mrg { "function_vector", 1, 1, true, false, false, false,
3007 1.1 mrg function_vector_handler, NULL },
3008 1.1 mrg { NULL, 0, 0, false, false, false, false, NULL, NULL }
3009 1.1 mrg };
3010 1.1 mrg
3011 1.1 mrg #undef TARGET_COMP_TYPE_ATTRIBUTES
3012 1.1 mrg #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3013 1.1 mrg static int
3014 1.1 mrg m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3015 1.1 mrg const_tree type2 ATTRIBUTE_UNUSED)
3016 1.1 mrg {
3017 1.1 mrg /* 0=incompatible 1=compatible 2=warning */
3018 1.1 mrg return 1;
3019 1.1 mrg }
3020 1.1 mrg
3021 1.1 mrg #undef TARGET_INSERT_ATTRIBUTES
3022 1.1 mrg #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3023 1.1 mrg static void
3024 1.1 mrg m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3025 1.1 mrg tree * attr_ptr ATTRIBUTE_UNUSED)
3026 1.1 mrg {
3027 1.1 mrg unsigned addr;
3028 1.1 mrg /* See if we need to make #pragma address variables volatile. */
3029 1.1 mrg
3030 1.1 mrg if (TREE_CODE (node) == VAR_DECL)
3031 1.1 mrg {
3032 1.1 mrg const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3033 1.1 mrg if (m32c_get_pragma_address (name, &addr))
3034 1.1 mrg {
3035 1.1 mrg TREE_THIS_VOLATILE (node) = true;
3036 1.1 mrg }
3037 1.1 mrg }
3038 1.1 mrg }
3039 1.1 mrg
3040 1.1 mrg /* Hash table of pragma info. */
3041 1.1 mrg static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
3042 1.1 mrg
3043 1.1 mrg void
3044 1.1 mrg m32c_note_pragma_address (const char *varname, unsigned address)
3045 1.1 mrg {
3046 1.1 mrg if (!pragma_htab)
3047 1.1 mrg pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
3048 1.1 mrg
3049 1.1 mrg const char *name = ggc_strdup (varname);
3050 1.1 mrg unsigned int *slot = &pragma_htab->get_or_insert (name);
3051 1.1 mrg *slot = address;
3052 1.1 mrg }
3053 1.1 mrg
3054 1.1 mrg static bool
3055 1.1 mrg m32c_get_pragma_address (const char *varname, unsigned *address)
3056 1.1 mrg {
3057 1.1 mrg if (!pragma_htab)
3058 1.1 mrg return false;
3059 1.1 mrg
3060 1.1 mrg unsigned int *slot = pragma_htab->get (varname);
3061 1.1 mrg if (slot)
3062 1.1 mrg {
3063 1.1 mrg *address = *slot;
3064 1.1 mrg return true;
3065 1.1 mrg }
3066 1.1 mrg return false;
3067 1.1 mrg }
3068 1.1 mrg
3069 1.1 mrg void
3070 1.1 mrg m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3071 1.1 mrg const char *name,
3072 1.1 mrg int size, int align, int global)
3073 1.1 mrg {
3074 1.1 mrg unsigned address;
3075 1.1 mrg
3076 1.1 mrg if (m32c_get_pragma_address (name, &address))
3077 1.1 mrg {
3078 1.1 mrg /* We never output these as global. */
3079 1.1 mrg assemble_name (stream, name);
3080 1.1 mrg fprintf (stream, " = 0x%04x\n", address);
3081 1.1 mrg return;
3082 1.1 mrg }
3083 1.1 mrg if (!global)
3084 1.1 mrg {
3085 1.1 mrg fprintf (stream, "\t.local\t");
3086 1.1 mrg assemble_name (stream, name);
3087 1.1 mrg fprintf (stream, "\n");
3088 1.1 mrg }
3089 1.1 mrg fprintf (stream, "\t.comm\t");
3090 1.1 mrg assemble_name (stream, name);
3091 1.1 mrg fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3092 1.1 mrg }
3093 1.1 mrg
3094 1.1 mrg /* Predicates */
3095 1.1 mrg
3096 1.1 mrg /* This is a list of legal subregs of hard regs. */
3097 1.1 mrg static const struct {
3098 1.1 mrg unsigned char outer_mode_size;
3099 1.1 mrg unsigned char inner_mode_size;
3100 1.1 mrg unsigned char byte_mask;
3101 1.1 mrg unsigned char legal_when;
3102 1.1 mrg unsigned int regno;
3103 1.1 mrg } legal_subregs[] = {
3104 1.1 mrg {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3105 1.1 mrg {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3106 1.1 mrg {1, 2, 0x01, 1, A0_REGNO},
3107 1.1 mrg {1, 2, 0x01, 1, A1_REGNO},
3108 1.1 mrg
3109 1.1 mrg {1, 4, 0x01, 1, A0_REGNO},
3110 1.1 mrg {1, 4, 0x01, 1, A1_REGNO},
3111 1.1 mrg
3112 1.1 mrg {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3113 1.1 mrg {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3114 1.1 mrg {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3115 1.1 mrg {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3116 1.1 mrg {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3117 1.1 mrg
3118 1.1 mrg {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3119 1.1 mrg };
3120 1.1 mrg
3121 1.1 mrg /* Returns TRUE if OP is a subreg of a hard reg which we don't
3122 1.1 mrg support. We also bail on MEMs with illegal addresses. */
3123 1.1 mrg bool
3124 1.1 mrg m32c_illegal_subreg_p (rtx op)
3125 1.1 mrg {
3126 1.1 mrg int offset;
3127 1.1 mrg unsigned int i;
3128 1.1 mrg machine_mode src_mode, dest_mode;
3129 1.1 mrg
3130 1.1 mrg if (GET_CODE (op) == MEM
3131 1.1 mrg && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3132 1.1 mrg {
3133 1.1 mrg return true;
3134 1.1 mrg }
3135 1.1 mrg
3136 1.1 mrg if (GET_CODE (op) != SUBREG)
3137 1.1 mrg return false;
3138 1.1 mrg
3139 1.1 mrg dest_mode = GET_MODE (op);
3140 1.1 mrg offset = SUBREG_BYTE (op);
3141 1.1 mrg op = SUBREG_REG (op);
3142 1.1 mrg src_mode = GET_MODE (op);
3143 1.1 mrg
3144 1.1 mrg if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3145 1.1 mrg return false;
3146 1.1 mrg if (GET_CODE (op) != REG)
3147 1.1 mrg return false;
3148 1.1 mrg if (REGNO (op) >= MEM0_REGNO)
3149 1.1 mrg return false;
3150 1.1 mrg
3151 1.1 mrg offset = (1 << offset);
3152 1.1 mrg
3153 1.1 mrg for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3154 1.1 mrg if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3155 1.1 mrg && legal_subregs[i].regno == REGNO (op)
3156 1.1 mrg && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3157 1.1 mrg && legal_subregs[i].byte_mask & offset)
3158 1.1 mrg {
3159 1.1 mrg switch (legal_subregs[i].legal_when)
3160 1.1 mrg {
3161 1.1 mrg case 1:
3162 1.1 mrg return false;
3163 1.1 mrg case 16:
3164 1.1 mrg if (TARGET_A16)
3165 1.1 mrg return false;
3166 1.1 mrg break;
3167 1.1 mrg case 24:
3168 1.1 mrg if (TARGET_A24)
3169 1.1 mrg return false;
3170 1.1 mrg break;
3171 1.1 mrg }
3172 1.1 mrg }
3173 1.1 mrg return true;
3174 1.1 mrg }
3175 1.1 mrg
3176 1.1 mrg /* Returns TRUE if we support a move between the first two operands.
3177 1.1 mrg At the moment, we just want to discourage mem to mem moves until
3178 1.1 mrg after reload, because reload has a hard time with our limited
3179 1.1 mrg number of address registers, and we can get into a situation where
3180 1.1 mrg we need three of them when we only have two. */
3181 1.1 mrg bool
3182 1.1 mrg m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3183 1.1 mrg {
3184 1.1 mrg rtx op0 = operands[0];
3185 1.1 mrg rtx op1 = operands[1];
3186 1.1 mrg
3187 1.1 mrg if (TARGET_A24)
3188 1.1 mrg return true;
3189 1.1 mrg
3190 1.1 mrg #define DEBUG_MOV_OK 0
3191 1.1 mrg #if DEBUG_MOV_OK
3192 1.1 mrg fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3193 1.1 mrg debug_rtx (op0);
3194 1.1 mrg debug_rtx (op1);
3195 1.1 mrg #endif
3196 1.1 mrg
3197 1.1 mrg if (GET_CODE (op0) == SUBREG)
3198 1.1 mrg op0 = XEXP (op0, 0);
3199 1.1 mrg if (GET_CODE (op1) == SUBREG)
3200 1.1 mrg op1 = XEXP (op1, 0);
3201 1.1 mrg
3202 1.1 mrg if (GET_CODE (op0) == MEM
3203 1.1 mrg && GET_CODE (op1) == MEM
3204 1.1 mrg && ! reload_completed)
3205 1.1 mrg {
3206 1.1 mrg #if DEBUG_MOV_OK
3207 1.1 mrg fprintf (stderr, " - no, mem to mem\n");
3208 1.1 mrg #endif
3209 1.1 mrg return false;
3210 1.1 mrg }
3211 1.1 mrg
3212 1.1 mrg #if DEBUG_MOV_OK
3213 1.1 mrg fprintf (stderr, " - ok\n");
3214 1.1 mrg #endif
3215 1.1 mrg return true;
3216 1.1 mrg }
3217 1.1 mrg
3218 1.1 mrg /* Returns TRUE if two consecutive HImode mov instructions, generated
3219 1.1 mrg for moving an immediate double data to a double data type variable
3220 1.1 mrg location, can be combined into single SImode mov instruction. */
3221 1.1 mrg bool
3222 1.1 mrg m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3223 1.1 mrg machine_mode mode ATTRIBUTE_UNUSED)
3224 1.1 mrg {
3225 1.1 mrg /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3226 1.1 mrg flags. */
3227 1.1 mrg return false;
3228 1.1 mrg }
3229 1.1 mrg
3230 1.1 mrg /* Expanders */
3231 1.1 mrg
3232 1.1 mrg /* Subregs are non-orthogonal for us, because our registers are all
3233 1.1 mrg different sizes. */
3234 1.1 mrg static rtx
3235 1.1 mrg m32c_subreg (machine_mode outer,
3236 1.1 mrg rtx x, machine_mode inner, int byte)
3237 1.1 mrg {
3238 1.1 mrg int r, nr = -1;
3239 1.1 mrg
3240 1.1 mrg /* Converting MEMs to different types that are the same size, we
3241 1.1 mrg just rewrite them. */
3242 1.1 mrg if (GET_CODE (x) == SUBREG
3243 1.1 mrg && SUBREG_BYTE (x) == 0
3244 1.1 mrg && GET_CODE (SUBREG_REG (x)) == MEM
3245 1.1 mrg && (GET_MODE_SIZE (GET_MODE (x))
3246 1.1 mrg == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3247 1.1 mrg {
3248 1.1 mrg rtx oldx = x;
3249 1.1 mrg x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3250 1.1 mrg MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3251 1.1 mrg }
3252 1.1 mrg
3253 1.1 mrg /* Push/pop get done as smaller push/pops. */
3254 1.1 mrg if (GET_CODE (x) == MEM
3255 1.1 mrg && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3256 1.1 mrg || GET_CODE (XEXP (x, 0)) == POST_INC))
3257 1.1 mrg return gen_rtx_MEM (outer, XEXP (x, 0));
3258 1.1 mrg if (GET_CODE (x) == SUBREG
3259 1.1 mrg && GET_CODE (XEXP (x, 0)) == MEM
3260 1.1 mrg && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3261 1.1 mrg || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3262 1.1 mrg return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3263 1.1 mrg
3264 1.1 mrg if (GET_CODE (x) != REG)
3265 1.1 mrg {
3266 1.1 mrg rtx r = simplify_gen_subreg (outer, x, inner, byte);
3267 1.1 mrg if (GET_CODE (r) == SUBREG
3268 1.1 mrg && GET_CODE (x) == MEM
3269 1.1 mrg && MEM_VOLATILE_P (x))
3270 1.1 mrg {
3271 1.1 mrg /* Volatile MEMs don't get simplified, but we need them to
3272 1.1 mrg be. We are little endian, so the subreg byte is the
3273 1.1 mrg offset. */
3274 1.1 mrg r = adjust_address_nv (x, outer, byte);
3275 1.1 mrg }
3276 1.1 mrg return r;
3277 1.1 mrg }
3278 1.1 mrg
3279 1.1 mrg r = REGNO (x);
3280 1.1 mrg if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3281 1.1 mrg return simplify_gen_subreg (outer, x, inner, byte);
3282 1.1 mrg
3283 1.1 mrg if (IS_MEM_REGNO (r))
3284 1.1 mrg return simplify_gen_subreg (outer, x, inner, byte);
3285 1.1 mrg
3286 1.1 mrg /* This is where the complexities of our register layout are
3287 1.1 mrg described. */
3288 1.1 mrg if (byte == 0)
3289 1.1 mrg nr = r;
3290 1.1 mrg else if (outer == HImode)
3291 1.1 mrg {
3292 1.1 mrg if (r == R0_REGNO && byte == 2)
3293 1.1 mrg nr = R2_REGNO;
3294 1.1 mrg else if (r == R0_REGNO && byte == 4)
3295 1.1 mrg nr = R1_REGNO;
3296 1.1 mrg else if (r == R0_REGNO && byte == 6)
3297 1.1 mrg nr = R3_REGNO;
3298 1.1 mrg else if (r == R1_REGNO && byte == 2)
3299 1.1 mrg nr = R3_REGNO;
3300 1.1 mrg else if (r == A0_REGNO && byte == 2)
3301 1.1 mrg nr = A1_REGNO;
3302 1.1 mrg }
3303 1.1 mrg else if (outer == SImode)
3304 1.1 mrg {
3305 1.1 mrg if (r == R0_REGNO && byte == 0)
3306 1.1 mrg nr = R0_REGNO;
3307 1.1 mrg else if (r == R0_REGNO && byte == 4)
3308 1.1 mrg nr = R1_REGNO;
3309 1.1 mrg }
3310 1.1 mrg if (nr == -1)
3311 1.1 mrg {
3312 1.1 mrg fprintf (stderr, "m32c_subreg %s %s %d\n",
3313 1.1 mrg mode_name[outer], mode_name[inner], byte);
3314 1.1 mrg debug_rtx (x);
3315 1.1 mrg gcc_unreachable ();
3316 1.1 mrg }
3317 1.1 mrg return gen_rtx_REG (outer, nr);
3318 1.1 mrg }
3319 1.1 mrg
3320 1.1 mrg /* Used to emit move instructions. We split some moves,
3321 1.1 mrg and avoid mem-mem moves. */
3322 1.1 mrg int
3323 1.1 mrg m32c_prepare_move (rtx * operands, machine_mode mode)
3324 1.1 mrg {
3325 1.1 mrg if (far_addr_space_p (operands[0])
3326 1.1 mrg && CONSTANT_P (operands[1]))
3327 1.1 mrg {
3328 1.1 mrg operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3329 1.1 mrg }
3330 1.1 mrg if (TARGET_A16 && mode == PSImode)
3331 1.1 mrg return m32c_split_move (operands, mode, 1);
3332 1.1 mrg if ((GET_CODE (operands[0]) == MEM)
3333 1.1 mrg && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3334 1.1 mrg {
3335 1.1 mrg rtx pmv = XEXP (operands[0], 0);
3336 1.1 mrg rtx dest_reg = XEXP (pmv, 0);
3337 1.1 mrg rtx dest_mod = XEXP (pmv, 1);
3338 1.1 mrg
3339 1.1 mrg emit_insn (gen_rtx_SET (dest_reg, dest_mod));
3340 1.1 mrg operands[0] = gen_rtx_MEM (mode, dest_reg);
3341 1.1 mrg }
3342 1.1 mrg if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3343 1.1 mrg operands[1] = copy_to_mode_reg (mode, operands[1]);
3344 1.1 mrg return 0;
3345 1.1 mrg }
3346 1.1 mrg
3347 1.1 mrg #define DEBUG_SPLIT 0
3348 1.1 mrg
3349 1.1 mrg /* Returns TRUE if the given PSImode move should be split. We split
3350 1.1 mrg for all r8c/m16c moves, since it doesn't support them, and for
3351 1.1 mrg POP.L as we can only *push* SImode. */
3352 1.1 mrg int
3353 1.1 mrg m32c_split_psi_p (rtx * operands)
3354 1.1 mrg {
3355 1.1 mrg #if DEBUG_SPLIT
3356 1.1 mrg fprintf (stderr, "\nm32c_split_psi_p\n");
3357 1.1 mrg debug_rtx (operands[0]);
3358 1.1 mrg debug_rtx (operands[1]);
3359 1.1 mrg #endif
3360 1.1 mrg if (TARGET_A16)
3361 1.1 mrg {
3362 1.1 mrg #if DEBUG_SPLIT
3363 1.1 mrg fprintf (stderr, "yes, A16\n");
3364 1.1 mrg #endif
3365 1.1 mrg return 1;
3366 1.1 mrg }
3367 1.1 mrg if (GET_CODE (operands[1]) == MEM
3368 1.1 mrg && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3369 1.1 mrg {
3370 1.1 mrg #if DEBUG_SPLIT
3371 1.1 mrg fprintf (stderr, "yes, pop.l\n");
3372 1.1 mrg #endif
3373 1.1 mrg return 1;
3374 1.1 mrg }
3375 1.1 mrg #if DEBUG_SPLIT
3376 1.1 mrg fprintf (stderr, "no, default\n");
3377 1.1 mrg #endif
3378 1.1 mrg return 0;
3379 1.1 mrg }
3380 1.1 mrg
3381 1.1 mrg /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3382 1.1 mrg (define_expand), 1 if it is not optional (define_insn_and_split),
3383 1.1 mrg and 3 for define_split (alternate api). */
3384 1.1 mrg int
3385 1.1 mrg m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3386 1.1 mrg {
3387 1.1 mrg rtx s[4], d[4];
3388 1.1 mrg int parts, si, di, rev = 0;
3389 1.1 mrg int rv = 0, opi = 2;
3390 1.1 mrg machine_mode submode = HImode;
3391 1.1 mrg rtx *ops, local_ops[10];
3392 1.1 mrg
3393 1.1 mrg /* define_split modifies the existing operands, but the other two
3394 1.1 mrg emit new insns. OPS is where we store the operand pairs, which
3395 1.1 mrg we emit later. */
3396 1.1 mrg if (split_all == 3)
3397 1.1 mrg ops = operands;
3398 1.1 mrg else
3399 1.1 mrg ops = local_ops;
3400 1.1 mrg
3401 1.1 mrg /* Else HImode. */
3402 1.1 mrg if (mode == DImode)
3403 1.1 mrg submode = SImode;
3404 1.1 mrg
3405 1.1 mrg /* Before splitting mem-mem moves, force one operand into a
3406 1.1 mrg register. */
3407 1.1 mrg if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3408 1.1 mrg {
3409 1.1 mrg #if DEBUG0
3410 1.1 mrg fprintf (stderr, "force_reg...\n");
3411 1.1 mrg debug_rtx (operands[1]);
3412 1.1 mrg #endif
3413 1.1 mrg operands[1] = force_reg (mode, operands[1]);
3414 1.1 mrg #if DEBUG0
3415 1.1 mrg debug_rtx (operands[1]);
3416 1.1 mrg #endif
3417 1.1 mrg }
3418 1.1 mrg
3419 1.1 mrg parts = 2;
3420 1.1 mrg
3421 1.1 mrg #if DEBUG_SPLIT
3422 1.1 mrg fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3423 1.1 mrg split_all);
3424 1.1 mrg debug_rtx (operands[0]);
3425 1.1 mrg debug_rtx (operands[1]);
3426 1.1 mrg #endif
3427 1.1 mrg
3428 1.1 mrg /* Note that split_all is not used to select the api after this
3429 1.1 mrg point, so it's safe to set it to 3 even with define_insn. */
3430 1.1 mrg /* None of the chips can move SI operands to sp-relative addresses,
3431 1.1 mrg so we always split those. */
3432 1.1 mrg if (satisfies_constraint_Ss (operands[0]))
3433 1.1 mrg split_all = 3;
3434 1.1 mrg
3435 1.1 mrg if (TARGET_A16
3436 1.1 mrg && (far_addr_space_p (operands[0])
3437 1.1 mrg || far_addr_space_p (operands[1])))
3438 1.1 mrg split_all |= 1;
3439 1.1 mrg
3440 1.1 mrg /* We don't need to split these. */
3441 1.1 mrg if (TARGET_A24
3442 1.1 mrg && split_all != 3
3443 1.1 mrg && (mode == SImode || mode == PSImode)
3444 1.1 mrg && !(GET_CODE (operands[1]) == MEM
3445 1.1 mrg && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3446 1.1 mrg return 0;
3447 1.1 mrg
3448 1.1 mrg /* First, enumerate the subregs we'll be dealing with. */
3449 1.1 mrg for (si = 0; si < parts; si++)
3450 1.1 mrg {
3451 1.1 mrg d[si] =
3452 1.1 mrg m32c_subreg (submode, operands[0], mode,
3453 1.1 mrg si * GET_MODE_SIZE (submode));
3454 1.1 mrg s[si] =
3455 1.1 mrg m32c_subreg (submode, operands[1], mode,
3456 1.1 mrg si * GET_MODE_SIZE (submode));
3457 1.1 mrg }
3458 1.1 mrg
3459 1.1 mrg /* Split pushes by emitting a sequence of smaller pushes. */
3460 1.1 mrg if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3461 1.1 mrg {
3462 1.1 mrg for (si = parts - 1; si >= 0; si--)
3463 1.1 mrg {
3464 1.1 mrg ops[opi++] = gen_rtx_MEM (submode,
3465 1.1 mrg gen_rtx_PRE_DEC (Pmode,
3466 1.1 mrg gen_rtx_REG (Pmode,
3467 1.1 mrg SP_REGNO)));
3468 1.1 mrg ops[opi++] = s[si];
3469 1.1 mrg }
3470 1.1 mrg
3471 1.1 mrg rv = 1;
3472 1.1 mrg }
3473 1.1 mrg /* Likewise for pops. */
3474 1.1 mrg else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3475 1.1 mrg {
3476 1.1 mrg for (di = 0; di < parts; di++)
3477 1.1 mrg {
3478 1.1 mrg ops[opi++] = d[di];
3479 1.1 mrg ops[opi++] = gen_rtx_MEM (submode,
3480 1.1 mrg gen_rtx_POST_INC (Pmode,
3481 1.1 mrg gen_rtx_REG (Pmode,
3482 1.1 mrg SP_REGNO)));
3483 1.1 mrg }
3484 1.1 mrg rv = 1;
3485 1.1 mrg }
3486 1.1 mrg else if (split_all)
3487 1.1 mrg {
3488 1.1 mrg /* if d[di] == s[si] for any di < si, we'll early clobber. */
3489 1.1 mrg for (di = 0; di < parts - 1; di++)
3490 1.1 mrg for (si = di + 1; si < parts; si++)
3491 1.1 mrg if (reg_mentioned_p (d[di], s[si]))
3492 1.1 mrg rev = 1;
3493 1.1 mrg
3494 1.1 mrg if (rev)
3495 1.1 mrg for (si = 0; si < parts; si++)
3496 1.1 mrg {
3497 1.1 mrg ops[opi++] = d[si];
3498 1.1 mrg ops[opi++] = s[si];
3499 1.1 mrg }
3500 1.1 mrg else
3501 1.1 mrg for (si = parts - 1; si >= 0; si--)
3502 1.1 mrg {
3503 1.1 mrg ops[opi++] = d[si];
3504 1.1 mrg ops[opi++] = s[si];
3505 1.1 mrg }
3506 1.1 mrg rv = 1;
3507 1.1 mrg }
3508 1.1 mrg /* Now emit any moves we may have accumulated. */
3509 1.1 mrg if (rv && split_all != 3)
3510 1.1 mrg {
3511 1.1 mrg int i;
3512 1.1 mrg for (i = 2; i < opi; i += 2)
3513 1.1 mrg emit_move_insn (ops[i], ops[i + 1]);
3514 1.1 mrg }
3515 1.1 mrg return rv;
3516 1.1 mrg }
3517 1.1 mrg
3518 1.1 mrg /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3519 1.1 mrg the like. For the R8C they expect one of the addresses to be in
3520 1.1 mrg R1L:An so we need to arrange for that. Otherwise, it's just a
3521 1.1 mrg matter of picking out the operands we want and emitting the right
3522 1.1 mrg pattern for them. All these expanders, which correspond to
3523 1.1 mrg patterns in blkmov.md, must return nonzero if they expand the insn,
3524 1.1 mrg or zero if they should FAIL. */
3525 1.1 mrg
3526 1.1 mrg /* This is a memset() opcode. All operands are implied, so we need to
3527 1.1 mrg arrange for them to be in the right registers. The opcode wants
3528 1.1 mrg addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3529 1.1 mrg the count (HI), and $2 the value (QI). */
3530 1.1 mrg int
3531 1.1 mrg m32c_expand_setmemhi(rtx *operands)
3532 1.1 mrg {
3533 1.1 mrg rtx desta, count, val;
3534 1.1 mrg rtx desto, counto;
3535 1.1 mrg
3536 1.1 mrg desta = XEXP (operands[0], 0);
3537 1.1 mrg count = operands[1];
3538 1.1 mrg val = operands[2];
3539 1.1 mrg
3540 1.1 mrg desto = gen_reg_rtx (Pmode);
3541 1.1 mrg counto = gen_reg_rtx (HImode);
3542 1.1 mrg
3543 1.1 mrg if (GET_CODE (desta) != REG
3544 1.1 mrg || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3545 1.1 mrg desta = copy_to_mode_reg (Pmode, desta);
3546 1.1 mrg
3547 1.1 mrg /* This looks like an arbitrary restriction, but this is by far the
3548 1.1 mrg most common case. For counts 8..14 this actually results in
3549 1.1 mrg smaller code with no speed penalty because the half-sized
3550 1.1 mrg constant can be loaded with a shorter opcode. */
3551 1.1 mrg if (GET_CODE (count) == CONST_INT
3552 1.1 mrg && GET_CODE (val) == CONST_INT
3553 1.1 mrg && ! (INTVAL (count) & 1)
3554 1.1 mrg && (INTVAL (count) > 1)
3555 1.1 mrg && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3556 1.1 mrg {
3557 1.1 mrg unsigned v = INTVAL (val) & 0xff;
3558 1.1 mrg v = v | (v << 8);
3559 1.1 mrg count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3560 1.1 mrg val = copy_to_mode_reg (HImode, GEN_INT (v));
3561 1.1 mrg if (TARGET_A16)
3562 1.1 mrg emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3563 1.1 mrg else
3564 1.1 mrg emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3565 1.1 mrg return 1;
3566 1.1 mrg }
3567 1.1 mrg
3568 1.1 mrg /* This is the generalized memset() case. */
3569 1.1 mrg if (GET_CODE (val) != REG
3570 1.1 mrg || REGNO (val) < FIRST_PSEUDO_REGISTER)
3571 1.1 mrg val = copy_to_mode_reg (QImode, val);
3572 1.1 mrg
3573 1.1 mrg if (GET_CODE (count) != REG
3574 1.1 mrg || REGNO (count) < FIRST_PSEUDO_REGISTER)
3575 1.1 mrg count = copy_to_mode_reg (HImode, count);
3576 1.1 mrg
3577 1.1 mrg if (TARGET_A16)
3578 1.1 mrg emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3579 1.1 mrg else
3580 1.1 mrg emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3581 1.1 mrg
3582 1.1 mrg return 1;
3583 1.1 mrg }
3584 1.1 mrg
3585 1.1 mrg /* This is a memcpy() opcode. All operands are implied, so we need to
3586 1.1 mrg arrange for them to be in the right registers. The opcode wants
3587 1.1 mrg addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3588 1.1 mrg is the source (MEM:BLK), and $2 the count (HI). */
3589 1.1 mrg int
3590 1.1 mrg m32c_expand_cpymemhi(rtx *operands)
3591 1.1 mrg {
3592 1.1 mrg rtx desta, srca, count;
3593 1.1 mrg rtx desto, srco, counto;
3594 1.1 mrg
3595 1.1 mrg desta = XEXP (operands[0], 0);
3596 1.1 mrg srca = XEXP (operands[1], 0);
3597 1.1 mrg count = operands[2];
3598 1.1 mrg
3599 1.1 mrg desto = gen_reg_rtx (Pmode);
3600 1.1 mrg srco = gen_reg_rtx (Pmode);
3601 1.1 mrg counto = gen_reg_rtx (HImode);
3602 1.1 mrg
3603 1.1 mrg if (GET_CODE (desta) != REG
3604 1.1 mrg || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3605 1.1 mrg desta = copy_to_mode_reg (Pmode, desta);
3606 1.1 mrg
3607 1.1 mrg if (GET_CODE (srca) != REG
3608 1.1 mrg || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3609 1.1 mrg srca = copy_to_mode_reg (Pmode, srca);
3610 1.1 mrg
3611 1.1 mrg /* Similar to setmem, but we don't need to check the value. */
3612 1.1 mrg if (GET_CODE (count) == CONST_INT
3613 1.1 mrg && ! (INTVAL (count) & 1)
3614 1.1 mrg && (INTVAL (count) > 1))
3615 1.1 mrg {
3616 1.1 mrg count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3617 1.1 mrg if (TARGET_A16)
3618 1.1 mrg emit_insn (gen_cpymemhi_whi_op (desto, srco, counto, desta, srca, count));
3619 1.1 mrg else
3620 1.1 mrg emit_insn (gen_cpymemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3621 1.1 mrg return 1;
3622 1.1 mrg }
3623 1.1 mrg
3624 1.1 mrg /* This is the generalized memset() case. */
3625 1.1 mrg if (GET_CODE (count) != REG
3626 1.1 mrg || REGNO (count) < FIRST_PSEUDO_REGISTER)
3627 1.1 mrg count = copy_to_mode_reg (HImode, count);
3628 1.1 mrg
3629 1.1 mrg if (TARGET_A16)
3630 1.1 mrg emit_insn (gen_cpymemhi_bhi_op (desto, srco, counto, desta, srca, count));
3631 1.1 mrg else
3632 1.1 mrg emit_insn (gen_cpymemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3633 1.1 mrg
3634 1.1 mrg return 1;
3635 1.1 mrg }
3636 1.1 mrg
3637 1.1 mrg /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3638 1.1 mrg the copy, which should point to the NUL at the end of the string,
3639 1.1 mrg $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3640 1.1 mrg Since our opcode leaves the destination pointing *after* the NUL,
3641 1.1 mrg we must emit an adjustment. */
3642 1.1 mrg int
3643 1.1 mrg m32c_expand_movstr(rtx *operands)
3644 1.1 mrg {
3645 1.1 mrg rtx desta, srca;
3646 1.1 mrg rtx desto, srco;
3647 1.1 mrg
3648 1.1 mrg desta = XEXP (operands[1], 0);
3649 1.1 mrg srca = XEXP (operands[2], 0);
3650 1.1 mrg
3651 1.1 mrg desto = gen_reg_rtx (Pmode);
3652 1.1 mrg srco = gen_reg_rtx (Pmode);
3653 1.1 mrg
3654 1.1 mrg if (GET_CODE (desta) != REG
3655 1.1 mrg || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3656 1.1 mrg desta = copy_to_mode_reg (Pmode, desta);
3657 1.1 mrg
3658 1.1 mrg if (GET_CODE (srca) != REG
3659 1.1 mrg || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3660 1.1 mrg srca = copy_to_mode_reg (Pmode, srca);
3661 1.1 mrg
3662 1.1 mrg emit_insn (gen_movstr_op (desto, srco, desta, srca));
3663 1.1 mrg /* desto ends up being a1, which allows this type of add through MOVA. */
3664 1.1 mrg emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3665 1.1 mrg
3666 1.1 mrg return 1;
3667 1.1 mrg }
3668 1.1 mrg
3669 1.1 mrg /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3670 1.1 mrg <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3671 1.1 mrg $2 is the other (MEM:BLK). We must do the comparison, and then
3672 1.1 mrg convert the flags to a signed integer result. */
3673 1.1 mrg int
3674 1.1 mrg m32c_expand_cmpstr(rtx *operands)
3675 1.1 mrg {
3676 1.1 mrg rtx src1a, src2a;
3677 1.1 mrg
3678 1.1 mrg src1a = XEXP (operands[1], 0);
3679 1.1 mrg src2a = XEXP (operands[2], 0);
3680 1.1 mrg
3681 1.1 mrg if (GET_CODE (src1a) != REG
3682 1.1 mrg || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3683 1.1 mrg src1a = copy_to_mode_reg (Pmode, src1a);
3684 1.1 mrg
3685 1.1 mrg if (GET_CODE (src2a) != REG
3686 1.1 mrg || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3687 1.1 mrg src2a = copy_to_mode_reg (Pmode, src2a);
3688 1.1 mrg
3689 1.1 mrg emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3690 1.1 mrg emit_insn (gen_cond_to_int (operands[0]));
3691 1.1 mrg
3692 1.1 mrg return 1;
3693 1.1 mrg }
3694 1.1 mrg
3695 1.1 mrg
3696 1.1 mrg typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3697 1.1 mrg
3698 1.1 mrg static shift_gen_func
3699 1.1 mrg shift_gen_func_for (int mode, int code)
3700 1.1 mrg {
3701 1.1 mrg #define GFF(m,c,f) if (mode == m && code == c) return f
3702 1.1 mrg GFF(QImode, ASHIFT, gen_ashlqi3_i);
3703 1.1 mrg GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3704 1.1 mrg GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3705 1.1 mrg GFF(HImode, ASHIFT, gen_ashlhi3_i);
3706 1.1 mrg GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3707 1.1 mrg GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3708 1.1 mrg GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3709 1.1 mrg GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3710 1.1 mrg GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3711 1.1 mrg GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3712 1.1 mrg GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3713 1.1 mrg GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3714 1.1 mrg #undef GFF
3715 1.1 mrg gcc_unreachable ();
3716 1.1 mrg }
3717 1.1 mrg
3718 1.1 mrg /* The m32c only has one shift, but it takes a signed count. GCC
3719 1.1 mrg doesn't want this, so we fake it by negating any shift count when
3720 1.1 mrg we're pretending to shift the other way. Also, the shift count is
3721 1.1 mrg limited to -8..8. It's slightly better to use two shifts for 9..15
3722 1.1 mrg than to load the count into r1h, so we do that too. */
3723 1.1 mrg int
3724 1.1 mrg m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3725 1.1 mrg {
3726 1.1 mrg machine_mode mode = GET_MODE (operands[0]);
3727 1.1 mrg shift_gen_func func = shift_gen_func_for (mode, shift_code);
3728 1.1 mrg rtx temp;
3729 1.1 mrg
3730 1.1 mrg if (GET_CODE (operands[2]) == CONST_INT)
3731 1.1 mrg {
3732 1.1 mrg int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3733 1.1 mrg int count = INTVAL (operands[2]) * scale;
3734 1.1 mrg
3735 1.1 mrg while (count > maxc)
3736 1.1 mrg {
3737 1.1 mrg temp = gen_reg_rtx (mode);
3738 1.1 mrg emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3739 1.1 mrg operands[1] = temp;
3740 1.1 mrg count -= maxc;
3741 1.1 mrg }
3742 1.1 mrg while (count < -maxc)
3743 1.1 mrg {
3744 1.1 mrg temp = gen_reg_rtx (mode);
3745 1.1 mrg emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3746 1.1 mrg operands[1] = temp;
3747 1.1 mrg count += maxc;
3748 1.1 mrg }
3749 1.1 mrg emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3750 1.1 mrg return 1;
3751 1.1 mrg }
3752 1.1 mrg
3753 1.1 mrg temp = gen_reg_rtx (QImode);
3754 1.1 mrg if (scale < 0)
3755 1.1 mrg /* The pattern has a NEG that corresponds to this. */
3756 1.1 mrg emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3757 1.1 mrg else if (TARGET_A16 && mode == SImode)
3758 1.1 mrg /* We do this because the code below may modify this, we don't
3759 1.1 mrg want to modify the origin of this value. */
3760 1.1 mrg emit_move_insn (temp, operands[2]);
3761 1.1 mrg else
3762 1.1 mrg /* We'll only use it for the shift, no point emitting a move. */
3763 1.1 mrg temp = operands[2];
3764 1.1 mrg
3765 1.1 mrg if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3766 1.1 mrg {
3767 1.1 mrg /* The m16c has a limit of -16..16 for SI shifts, even when the
3768 1.1 mrg shift count is in a register. Since there are so many targets
3769 1.1 mrg of these shifts, it's better to expand the RTL here than to
3770 1.1 mrg call a helper function.
3771 1.1 mrg
3772 1.1 mrg The resulting code looks something like this:
3773 1.1 mrg
3774 1.1 mrg cmp.b r1h,-16
3775 1.1 mrg jge.b 1f
3776 1.1 mrg shl.l -16,dest
3777 1.1 mrg add.b r1h,16
3778 1.1 mrg 1f: cmp.b r1h,16
3779 1.1 mrg jle.b 1f
3780 1.1 mrg shl.l 16,dest
3781 1.1 mrg sub.b r1h,16
3782 1.1 mrg 1f: shl.l r1h,dest
3783 1.1 mrg
3784 1.1 mrg We take advantage of the fact that "negative" shifts are
3785 1.1 mrg undefined to skip one of the comparisons. */
3786 1.1 mrg
3787 1.1 mrg rtx count;
3788 1.1 mrg rtx tempvar;
3789 1.1 mrg rtx_insn *insn;
3790 1.1 mrg
3791 1.1 mrg emit_move_insn (operands[0], operands[1]);
3792 1.1 mrg
3793 1.1 mrg count = temp;
3794 1.1 mrg rtx_code_label *label = gen_label_rtx ();
3795 1.1 mrg LABEL_NUSES (label) ++;
3796 1.1 mrg
3797 1.1 mrg tempvar = gen_reg_rtx (mode);
3798 1.1 mrg
3799 1.1 mrg if (shift_code == ASHIFT)
3800 1.1 mrg {
3801 1.1 mrg /* This is a left shift. We only need check positive counts. */
3802 1.1 mrg emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3803 1.1 mrg count, GEN_INT (16), label));
3804 1.1 mrg emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3805 1.1 mrg emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3806 1.1 mrg insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3807 1.1 mrg emit_label_after (label, insn);
3808 1.1 mrg }
3809 1.1 mrg else
3810 1.1 mrg {
3811 1.1 mrg /* This is a right shift. We only need check negative counts. */
3812 1.1 mrg emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3813 1.1 mrg count, GEN_INT (-16), label));
3814 1.1 mrg emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3815 1.1 mrg emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3816 1.1 mrg insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3817 1.1 mrg emit_label_after (label, insn);
3818 1.1 mrg }
3819 1.1 mrg operands[1] = operands[0];
3820 1.1 mrg emit_insn (func (operands[0], operands[0], count));
3821 1.1 mrg return 1;
3822 1.1 mrg }
3823 1.1 mrg
3824 1.1 mrg operands[2] = temp;
3825 1.1 mrg return 0;
3826 1.1 mrg }
3827 1.1 mrg
3828 1.1 mrg /* The m32c has a limited range of operations that work on PSImode
3829 1.1 mrg values; we have to expand to SI, do the math, and truncate back to
3830 1.1 mrg PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3831 1.1 mrg those cases. */
3832 1.1 mrg void
3833 1.1 mrg m32c_expand_neg_mulpsi3 (rtx * operands)
3834 1.1 mrg {
3835 1.1 mrg /* operands: a = b * i */
3836 1.1 mrg rtx temp1; /* b as SI */
3837 1.1 mrg rtx scale /* i as SI */;
3838 1.1 mrg rtx temp2; /* a*b as SI */
3839 1.1 mrg
3840 1.1 mrg temp1 = gen_reg_rtx (SImode);
3841 1.1 mrg temp2 = gen_reg_rtx (SImode);
3842 1.1 mrg if (GET_CODE (operands[2]) != CONST_INT)
3843 1.1 mrg {
3844 1.1 mrg scale = gen_reg_rtx (SImode);
3845 1.1 mrg emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3846 1.1 mrg }
3847 1.1 mrg else
3848 1.1 mrg scale = copy_to_mode_reg (SImode, operands[2]);
3849 1.1 mrg
3850 1.1 mrg emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3851 1.1 mrg temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3852 1.1 mrg emit_insn (gen_truncsipsi2 (operands[0], temp2));
3853 1.1 mrg }
3854 1.1 mrg
3855 1.1 mrg /* Pattern Output Functions */
3856 1.1 mrg
3857 1.1 mrg int
3858 1.1 mrg m32c_expand_movcc (rtx *operands)
3859 1.1 mrg {
3860 1.1 mrg rtx rel = operands[1];
3861 1.1 mrg
3862 1.1 mrg if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3863 1.1 mrg return 1;
3864 1.1 mrg if (GET_CODE (operands[2]) != CONST_INT
3865 1.1 mrg || GET_CODE (operands[3]) != CONST_INT)
3866 1.1 mrg return 1;
3867 1.1 mrg if (GET_CODE (rel) == NE)
3868 1.1 mrg {
3869 1.1 mrg rtx tmp = operands[2];
3870 1.1 mrg operands[2] = operands[3];
3871 1.1 mrg operands[3] = tmp;
3872 1.1 mrg rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3873 1.1 mrg }
3874 1.1 mrg
3875 1.1 mrg emit_move_insn (operands[0],
3876 1.1 mrg gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3877 1.1 mrg rel,
3878 1.1 mrg operands[2],
3879 1.1 mrg operands[3]));
3880 1.1 mrg return 0;
3881 1.1 mrg }
3882 1.1 mrg
3883 1.1 mrg /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3884 1.1 mrg int
3885 1.1 mrg m32c_expand_insv (rtx *operands)
3886 1.1 mrg {
3887 1.1 mrg rtx op0, src0, p;
3888 1.1 mrg int mask;
3889 1.1 mrg
3890 1.1 mrg if (INTVAL (operands[1]) != 1)
3891 1.1 mrg return 1;
3892 1.1 mrg
3893 1.1 mrg /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3894 1.1 mrg if (GET_CODE (operands[3]) != CONST_INT)
3895 1.1 mrg return 1;
3896 1.1 mrg if (INTVAL (operands[3]) != 0
3897 1.1 mrg && INTVAL (operands[3]) != 1
3898 1.1 mrg && INTVAL (operands[3]) != -1)
3899 1.1 mrg return 1;
3900 1.1 mrg
3901 1.1 mrg mask = 1 << INTVAL (operands[2]);
3902 1.1 mrg
3903 1.1 mrg op0 = operands[0];
3904 1.1 mrg if (GET_CODE (op0) == SUBREG
3905 1.1 mrg && SUBREG_BYTE (op0) == 0)
3906 1.1 mrg {
3907 1.1 mrg rtx sub = SUBREG_REG (op0);
3908 1.1 mrg if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3909 1.1 mrg op0 = sub;
3910 1.1 mrg }
3911 1.1 mrg
3912 1.1 mrg if (!can_create_pseudo_p ()
3913 1.1 mrg || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3914 1.1 mrg src0 = op0;
3915 1.1 mrg else
3916 1.1 mrg {
3917 1.1 mrg src0 = gen_reg_rtx (GET_MODE (op0));
3918 1.1 mrg emit_move_insn (src0, op0);
3919 1.1 mrg }
3920 1.1 mrg
3921 1.1 mrg if (GET_MODE (op0) == HImode
3922 1.1 mrg && INTVAL (operands[2]) >= 8
3923 1.1 mrg && GET_CODE (op0) == MEM)
3924 1.1 mrg {
3925 1.1 mrg /* We are little endian. */
3926 1.1 mrg rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3927 1.1 mrg XEXP (op0, 0), 1));
3928 1.1 mrg MEM_COPY_ATTRIBUTES (new_mem, op0);
3929 1.1 mrg mask >>= 8;
3930 1.1 mrg }
3931 1.1 mrg
3932 1.1 mrg /* First, we generate a mask with the correct polarity. If we are
3933 1.1 mrg storing a zero, we want an AND mask, so invert it. */
3934 1.1 mrg if (INTVAL (operands[3]) == 0)
3935 1.1 mrg {
3936 1.1 mrg /* Storing a zero, use an AND mask */
3937 1.1 mrg if (GET_MODE (op0) == HImode)
3938 1.1 mrg mask ^= 0xffff;
3939 1.1 mrg else
3940 1.1 mrg mask ^= 0xff;
3941 1.1 mrg }
3942 1.1 mrg /* Now we need to properly sign-extend the mask in case we need to
3943 1.1 mrg fall back to an AND or OR opcode. */
3944 1.1 mrg if (GET_MODE (op0) == HImode)
3945 1.1 mrg {
3946 1.1 mrg if (mask & 0x8000)
3947 1.1 mrg mask -= 0x10000;
3948 1.1 mrg }
3949 1.1 mrg else
3950 1.1 mrg {
3951 1.1 mrg if (mask & 0x80)
3952 1.1 mrg mask -= 0x100;
3953 1.1 mrg }
3954 1.1 mrg
3955 1.1 mrg switch ( (INTVAL (operands[3]) ? 4 : 0)
3956 1.1 mrg + ((GET_MODE (op0) == HImode) ? 2 : 0)
3957 1.1 mrg + (TARGET_A24 ? 1 : 0))
3958 1.1 mrg {
3959 1.1 mrg case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3960 1.1 mrg case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3961 1.1 mrg case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3962 1.1 mrg case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3963 1.1 mrg case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3964 1.1 mrg case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3965 1.1 mrg case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3966 1.1 mrg case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3967 1.1 mrg default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3968 1.1 mrg }
3969 1.1 mrg
3970 1.1 mrg emit_insn (p);
3971 1.1 mrg return 0;
3972 1.1 mrg }
3973 1.1 mrg
3974 1.1 mrg const char *
3975 1.1 mrg m32c_scc_pattern(rtx *operands, RTX_CODE code)
3976 1.1 mrg {
3977 1.1 mrg static char buf[30];
3978 1.1 mrg if (GET_CODE (operands[0]) == REG
3979 1.1 mrg && REGNO (operands[0]) == R0_REGNO)
3980 1.1 mrg {
3981 1.1 mrg if (code == EQ)
3982 1.1 mrg return "stzx\t#1,#0,r0l";
3983 1.1 mrg if (code == NE)
3984 1.1 mrg return "stzx\t#0,#1,r0l";
3985 1.1 mrg }
3986 1.1 mrg sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3987 1.1 mrg return buf;
3988 1.1 mrg }
3989 1.1 mrg
3990 1.1 mrg /* Encode symbol attributes of a SYMBOL_REF into its
3991 1.1 mrg SYMBOL_REF_FLAGS. */
3992 1.1 mrg static void
3993 1.1 mrg m32c_encode_section_info (tree decl, rtx rtl, int first)
3994 1.1 mrg {
3995 1.1 mrg int extra_flags = 0;
3996 1.1 mrg
3997 1.1 mrg default_encode_section_info (decl, rtl, first);
3998 1.1 mrg if (TREE_CODE (decl) == FUNCTION_DECL
3999 1.1 mrg && m32c_special_page_vector_p (decl))
4000 1.1 mrg
4001 1.1 mrg extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4002 1.1 mrg
4003 1.1 mrg if (extra_flags)
4004 1.1 mrg SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4005 1.1 mrg }
4006 1.1 mrg
4007 1.1 mrg /* Returns TRUE if the current function is a leaf, and thus we can
4008 1.1 mrg determine which registers an interrupt function really needs to
4009 1.1 mrg save. The logic below is mostly about finding the insn sequence
4010 1.1 mrg that's the function, versus any sequence that might be open for the
4011 1.1 mrg current insn. */
4012 1.1 mrg static int
4013 1.1 mrg m32c_leaf_function_p (void)
4014 1.1 mrg {
4015 1.1 mrg int rv;
4016 1.1 mrg
4017 1.1 mrg push_topmost_sequence ();
4018 1.1 mrg rv = leaf_function_p ();
4019 1.1 mrg pop_topmost_sequence ();
4020 1.1 mrg return rv;
4021 1.1 mrg }
4022 1.1 mrg
4023 1.1 mrg /* Returns TRUE if the current function needs to use the ENTER/EXIT
4024 1.1 mrg opcodes. If the function doesn't need the frame base or stack
4025 1.1 mrg pointer, it can use the simpler RTS opcode. */
4026 1.1 mrg static bool
4027 1.1 mrg m32c_function_needs_enter (void)
4028 1.1 mrg {
4029 1.1 mrg rtx_insn *insn;
4030 1.1 mrg rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4031 1.1 mrg rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4032 1.1 mrg
4033 1.1 mrg for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4034 1.1 mrg if (NONDEBUG_INSN_P (insn))
4035 1.1 mrg {
4036 1.1 mrg if (reg_mentioned_p (sp, insn))
4037 1.1 mrg return true;
4038 1.1 mrg if (reg_mentioned_p (fb, insn))
4039 1.1 mrg return true;
4040 1.1 mrg }
4041 1.1 mrg return false;
4042 1.1 mrg }
4043 1.1 mrg
4044 1.1 mrg /* Mark all the subexpressions of the PARALLEL rtx PAR as
4045 1.1 mrg frame-related. Return PAR.
4046 1.1 mrg
4047 1.1 mrg dwarf2out.cc:dwarf2out_frame_debug_expr ignores sub-expressions of a
4048 1.1 mrg PARALLEL rtx other than the first if they do not have the
4049 1.1 mrg FRAME_RELATED flag set on them. So this function is handy for
4050 1.1 mrg marking up 'enter' instructions. */
4051 1.1 mrg static rtx
4052 1.1 mrg m32c_all_frame_related (rtx par)
4053 1.1 mrg {
4054 1.1 mrg int len = XVECLEN (par, 0);
4055 1.1 mrg int i;
4056 1.1 mrg
4057 1.1 mrg for (i = 0; i < len; i++)
4058 1.1 mrg F (XVECEXP (par, 0, i));
4059 1.1 mrg
4060 1.1 mrg return par;
4061 1.1 mrg }
4062 1.1 mrg
4063 1.1 mrg /* Emits the prologue. See the frame layout comment earlier in this
4064 1.1 mrg file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4065 1.1 mrg that we manually update sp. */
4066 1.1 mrg void
4067 1.1 mrg m32c_emit_prologue (void)
4068 1.1 mrg {
4069 1.1 mrg int frame_size, extra_frame_size = 0, reg_save_size;
4070 1.1 mrg int complex_prologue = 0;
4071 1.1 mrg
4072 1.1 mrg cfun->machine->is_leaf = m32c_leaf_function_p ();
4073 1.1 mrg if (interrupt_p (cfun->decl))
4074 1.1 mrg {
4075 1.1 mrg cfun->machine->is_interrupt = 1;
4076 1.1 mrg complex_prologue = 1;
4077 1.1 mrg }
4078 1.1 mrg else if (bank_switch_p (cfun->decl))
4079 1.1 mrg warning (OPT_Wattributes,
4080 1.1 mrg "%<bank_switch%> has no effect on non-interrupt functions");
4081 1.1 mrg
4082 1.1 mrg reg_save_size = m32c_pushm_popm (PP_justcount);
4083 1.1 mrg
4084 1.1 mrg if (interrupt_p (cfun->decl))
4085 1.1 mrg {
4086 1.1 mrg if (bank_switch_p (cfun->decl))
4087 1.1 mrg emit_insn (gen_fset_b ());
4088 1.1 mrg else if (cfun->machine->intr_pushm)
4089 1.1 mrg emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4090 1.1 mrg }
4091 1.1 mrg
4092 1.1 mrg frame_size =
4093 1.1 mrg m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4094 1.1 mrg if (frame_size == 0
4095 1.1 mrg && !m32c_function_needs_enter ())
4096 1.1 mrg cfun->machine->use_rts = 1;
4097 1.1 mrg
4098 1.1 mrg if (flag_stack_usage_info)
4099 1.1 mrg current_function_static_stack_size = frame_size;
4100 1.1 mrg
4101 1.1 mrg if (frame_size > 254)
4102 1.1 mrg {
4103 1.1 mrg extra_frame_size = frame_size - 254;
4104 1.1 mrg frame_size = 254;
4105 1.1 mrg }
4106 1.1 mrg if (cfun->machine->use_rts == 0)
4107 1.1 mrg F (emit_insn (m32c_all_frame_related
4108 1.1 mrg (TARGET_A16
4109 1.1 mrg ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4110 1.1 mrg : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4111 1.1 mrg
4112 1.1 mrg if (extra_frame_size)
4113 1.1 mrg {
4114 1.1 mrg complex_prologue = 1;
4115 1.1 mrg if (TARGET_A16)
4116 1.1 mrg F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4117 1.1 mrg gen_rtx_REG (HImode, SP_REGNO),
4118 1.1 mrg GEN_INT (-extra_frame_size))));
4119 1.1 mrg else
4120 1.1 mrg F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4121 1.1 mrg gen_rtx_REG (PSImode, SP_REGNO),
4122 1.1 mrg GEN_INT (-extra_frame_size))));
4123 1.1 mrg }
4124 1.1 mrg
4125 1.1 mrg complex_prologue += m32c_pushm_popm (PP_pushm);
4126 1.1 mrg
4127 1.1 mrg /* This just emits a comment into the .s file for debugging. */
4128 1.1 mrg if (complex_prologue)
4129 1.1 mrg emit_insn (gen_prologue_end ());
4130 1.1 mrg }
4131 1.1 mrg
4132 1.1 mrg /* Likewise, for the epilogue. The only exception is that, for
4133 1.1 mrg interrupts, we must manually unwind the frame as the REIT opcode
4134 1.1 mrg doesn't do that. */
4135 1.1 mrg void
4136 1.1 mrg m32c_emit_epilogue (void)
4137 1.1 mrg {
4138 1.1 mrg int popm_count = m32c_pushm_popm (PP_justcount);
4139 1.1 mrg
4140 1.1 mrg /* This just emits a comment into the .s file for debugging. */
4141 1.1 mrg if (popm_count > 0 || cfun->machine->is_interrupt)
4142 1.1 mrg emit_insn (gen_epilogue_start ());
4143 1.1 mrg
4144 1.1 mrg if (popm_count > 0)
4145 1.1 mrg m32c_pushm_popm (PP_popm);
4146 1.1 mrg
4147 1.1 mrg if (cfun->machine->is_interrupt)
4148 1.1 mrg {
4149 1.1 mrg machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4150 1.1 mrg
4151 1.1 mrg /* REIT clears B flag and restores $fp for us, but we still
4152 1.1 mrg have to fix up the stack. USE_RTS just means we didn't
4153 1.1 mrg emit ENTER. */
4154 1.1 mrg if (!cfun->machine->use_rts)
4155 1.1 mrg {
4156 1.1 mrg emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4157 1.1 mrg gen_rtx_REG (spmode, FP_REGNO));
4158 1.1 mrg emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4159 1.1 mrg gen_rtx_REG (spmode, A0_REGNO));
4160 1.1 mrg /* We can't just add this to the POPM because it would be in
4161 1.1 mrg the wrong order, and wouldn't fix the stack if we're bank
4162 1.1 mrg switching. */
4163 1.1 mrg if (TARGET_A16)
4164 1.1 mrg emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4165 1.1 mrg else
4166 1.1 mrg emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4167 1.1 mrg }
4168 1.1 mrg if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4169 1.1 mrg emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4170 1.1 mrg
4171 1.1 mrg /* The FREIT (Fast REturn from InTerrupt) instruction should be
4172 1.1 mrg generated only for M32C/M32CM targets (generate the REIT
4173 1.1 mrg instruction otherwise). */
4174 1.1 mrg if (fast_interrupt_p (cfun->decl))
4175 1.1 mrg {
4176 1.1 mrg /* Check if fast_attribute is set for M32C or M32CM. */
4177 1.1 mrg if (TARGET_A24)
4178 1.1 mrg {
4179 1.1 mrg emit_jump_insn (gen_epilogue_freit ());
4180 1.1 mrg }
4181 1.1 mrg /* If fast_interrupt attribute is set for an R8C or M16C
4182 1.1 mrg target ignore this attribute and generated REIT
4183 1.1 mrg instruction. */
4184 1.1 mrg else
4185 1.1 mrg {
4186 1.1 mrg warning (OPT_Wattributes,
4187 1.1 mrg "%<fast_interrupt%> attribute directive ignored");
4188 1.1 mrg emit_jump_insn (gen_epilogue_reit_16 ());
4189 1.1 mrg }
4190 1.1 mrg }
4191 1.1 mrg else if (TARGET_A16)
4192 1.1 mrg emit_jump_insn (gen_epilogue_reit_16 ());
4193 1.1 mrg else
4194 1.1 mrg emit_jump_insn (gen_epilogue_reit_24 ());
4195 1.1 mrg }
4196 1.1 mrg else if (cfun->machine->use_rts)
4197 1.1 mrg emit_jump_insn (gen_epilogue_rts ());
4198 1.1 mrg else if (TARGET_A16)
4199 1.1 mrg emit_jump_insn (gen_epilogue_exitd_16 ());
4200 1.1 mrg else
4201 1.1 mrg emit_jump_insn (gen_epilogue_exitd_24 ());
4202 1.1 mrg }
4203 1.1 mrg
4204 1.1 mrg void
4205 1.1 mrg m32c_emit_eh_epilogue (rtx ret_addr)
4206 1.1 mrg {
4207 1.1 mrg /* R0[R2] has the stack adjustment. R1[R3] has the address to
4208 1.1 mrg return to. We have to fudge the stack, pop everything, pop SP
4209 1.1 mrg (fudged), and return (fudged). This is actually easier to do in
4210 1.1 mrg assembler, so punt to libgcc. */
4211 1.1 mrg emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4212 1.1 mrg /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4213 1.1 mrg }
4214 1.1 mrg
4215 1.1 mrg /* Indicate which flags must be properly set for a given conditional. */
4216 1.1 mrg static int
4217 1.1 mrg flags_needed_for_conditional (rtx cond)
4218 1.1 mrg {
4219 1.1 mrg switch (GET_CODE (cond))
4220 1.1 mrg {
4221 1.1 mrg case LE:
4222 1.1 mrg case GT:
4223 1.1 mrg return FLAGS_OSZ;
4224 1.1 mrg case LEU:
4225 1.1 mrg case GTU:
4226 1.1 mrg return FLAGS_ZC;
4227 1.1 mrg case LT:
4228 1.1 mrg case GE:
4229 1.1 mrg return FLAGS_OS;
4230 1.1 mrg case LTU:
4231 1.1 mrg case GEU:
4232 1.1 mrg return FLAGS_C;
4233 1.1 mrg case EQ:
4234 1.1 mrg case NE:
4235 1.1 mrg return FLAGS_Z;
4236 1.1 mrg default:
4237 1.1 mrg return FLAGS_N;
4238 1.1 mrg }
4239 1.1 mrg }
4240 1.1 mrg
4241 1.1 mrg #define DEBUG_CMP 0
4242 1.1 mrg
4243 1.1 mrg /* Returns true if a compare insn is redundant because it would only
4244 1.1 mrg set flags that are already set correctly. */
4245 1.1 mrg static bool
4246 1.1 mrg m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4247 1.1 mrg {
4248 1.1 mrg int flags_needed;
4249 1.1 mrg int pflags;
4250 1.1 mrg rtx_insn *prev;
4251 1.1 mrg rtx pp, next;
4252 1.1 mrg rtx op0, op1;
4253 1.1 mrg #if DEBUG_CMP
4254 1.1 mrg int prev_icode, i;
4255 1.1 mrg #endif
4256 1.1 mrg
4257 1.1 mrg op0 = operands[0];
4258 1.1 mrg op1 = operands[1];
4259 1.1 mrg
4260 1.1 mrg #if DEBUG_CMP
4261 1.1 mrg fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4262 1.1 mrg debug_rtx(cmp);
4263 1.1 mrg for (i=0; i<2; i++)
4264 1.1 mrg {
4265 1.1 mrg fprintf(stderr, "operands[%d] = ", i);
4266 1.1 mrg debug_rtx(operands[i]);
4267 1.1 mrg }
4268 1.1 mrg #endif
4269 1.1 mrg
4270 1.1 mrg next = next_nonnote_insn (cmp);
4271 1.1 mrg if (!next || !INSN_P (next))
4272 1.1 mrg {
4273 1.1 mrg #if DEBUG_CMP
4274 1.1 mrg fprintf(stderr, "compare not followed by insn\n");
4275 1.1 mrg debug_rtx(next);
4276 1.1 mrg #endif
4277 1.1 mrg return false;
4278 1.1 mrg }
4279 1.1 mrg if (GET_CODE (PATTERN (next)) == SET
4280 1.1 mrg && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4281 1.1 mrg {
4282 1.1 mrg next = XEXP (XEXP (PATTERN (next), 1), 0);
4283 1.1 mrg }
4284 1.1 mrg else if (GET_CODE (PATTERN (next)) == SET)
4285 1.1 mrg {
4286 1.1 mrg /* If this is a conditional, flags_needed will be something
4287 1.1 mrg other than FLAGS_N, which we test below. */
4288 1.1 mrg next = XEXP (PATTERN (next), 1);
4289 1.1 mrg }
4290 1.1 mrg else
4291 1.1 mrg {
4292 1.1 mrg #if DEBUG_CMP
4293 1.1 mrg fprintf(stderr, "compare not followed by conditional\n");
4294 1.1 mrg debug_rtx(next);
4295 1.1 mrg #endif
4296 1.1 mrg return false;
4297 1.1 mrg }
4298 1.1 mrg #if DEBUG_CMP
4299 1.1 mrg fprintf(stderr, "conditional is: ");
4300 1.1 mrg debug_rtx(next);
4301 1.1 mrg #endif
4302 1.1 mrg
4303 1.1 mrg flags_needed = flags_needed_for_conditional (next);
4304 1.1 mrg if (flags_needed == FLAGS_N)
4305 1.1 mrg {
4306 1.1 mrg #if DEBUG_CMP
4307 1.1 mrg fprintf(stderr, "compare not followed by conditional\n");
4308 1.1 mrg debug_rtx(next);
4309 1.1 mrg #endif
4310 1.1 mrg return false;
4311 1.1 mrg }
4312 1.1 mrg
4313 1.1 mrg /* Compare doesn't set overflow and carry the same way that
4314 1.1 mrg arithmetic instructions do, so we can't replace those. */
4315 1.1 mrg if (flags_needed & FLAGS_OC)
4316 1.1 mrg return false;
4317 1.1 mrg
4318 1.1 mrg prev = cmp;
4319 1.1 mrg do {
4320 1.1 mrg prev = prev_nonnote_insn (prev);
4321 1.1 mrg if (!prev)
4322 1.1 mrg {
4323 1.1 mrg #if DEBUG_CMP
4324 1.1 mrg fprintf(stderr, "No previous insn.\n");
4325 1.1 mrg #endif
4326 1.1 mrg return false;
4327 1.1 mrg }
4328 1.1 mrg if (!INSN_P (prev))
4329 1.1 mrg {
4330 1.1 mrg #if DEBUG_CMP
4331 1.1 mrg fprintf(stderr, "Previous insn is a non-insn.\n");
4332 1.1 mrg #endif
4333 1.1 mrg return false;
4334 1.1 mrg }
4335 1.1 mrg pp = PATTERN (prev);
4336 1.1 mrg if (GET_CODE (pp) != SET)
4337 1.1 mrg {
4338 1.1 mrg #if DEBUG_CMP
4339 1.1 mrg fprintf(stderr, "Previous insn is not a SET.\n");
4340 1.1 mrg #endif
4341 1.1 mrg return false;
4342 1.1 mrg }
4343 1.1 mrg pflags = get_attr_flags (prev);
4344 1.1 mrg
4345 1.1 mrg /* Looking up attributes of previous insns corrupted the recog
4346 1.1 mrg tables. */
4347 1.1 mrg INSN_UID (cmp) = -1;
4348 1.1 mrg recog (PATTERN (cmp), cmp, 0);
4349 1.1 mrg
4350 1.1 mrg if (pflags == FLAGS_N
4351 1.1 mrg && reg_mentioned_p (op0, pp))
4352 1.1 mrg {
4353 1.1 mrg #if DEBUG_CMP
4354 1.1 mrg fprintf(stderr, "intermediate non-flags insn uses op:\n");
4355 1.1 mrg debug_rtx(prev);
4356 1.1 mrg #endif
4357 1.1 mrg return false;
4358 1.1 mrg }
4359 1.1 mrg
4360 1.1 mrg /* Check for comparisons against memory - between volatiles and
4361 1.1 mrg aliases, we just can't risk this one. */
4362 1.1 mrg if (GET_CODE (operands[0]) == MEM
4363 1.1 mrg || GET_CODE (operands[0]) == MEM)
4364 1.1 mrg {
4365 1.1 mrg #if DEBUG_CMP
4366 1.1 mrg fprintf(stderr, "comparisons with memory:\n");
4367 1.1 mrg debug_rtx(prev);
4368 1.1 mrg #endif
4369 1.1 mrg return false;
4370 1.1 mrg }
4371 1.1 mrg
4372 1.1 mrg /* Check for PREV changing a register that's used to compute a
4373 1.1 mrg value in CMP, even if it doesn't otherwise change flags. */
4374 1.1 mrg if (GET_CODE (operands[0]) == REG
4375 1.1 mrg && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4376 1.1 mrg {
4377 1.1 mrg #if DEBUG_CMP
4378 1.1 mrg fprintf(stderr, "sub-value affected, op0:\n");
4379 1.1 mrg debug_rtx(prev);
4380 1.1 mrg #endif
4381 1.1 mrg return false;
4382 1.1 mrg }
4383 1.1 mrg if (GET_CODE (operands[1]) == REG
4384 1.1 mrg && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4385 1.1 mrg {
4386 1.1 mrg #if DEBUG_CMP
4387 1.1 mrg fprintf(stderr, "sub-value affected, op1:\n");
4388 1.1 mrg debug_rtx(prev);
4389 1.1 mrg #endif
4390 1.1 mrg return false;
4391 1.1 mrg }
4392 1.1 mrg
4393 1.1 mrg } while (pflags == FLAGS_N);
4394 1.1 mrg #if DEBUG_CMP
4395 1.1 mrg fprintf(stderr, "previous flag-setting insn:\n");
4396 1.1 mrg debug_rtx(prev);
4397 1.1 mrg debug_rtx(pp);
4398 1.1 mrg #endif
4399 1.1 mrg
4400 1.1 mrg if (GET_CODE (pp) == SET
4401 1.1 mrg && GET_CODE (XEXP (pp, 0)) == REG
4402 1.1 mrg && REGNO (XEXP (pp, 0)) == FLG_REGNO
4403 1.1 mrg && GET_CODE (XEXP (pp, 1)) == COMPARE)
4404 1.1 mrg {
4405 1.1 mrg /* Adjacent cbranches must have the same operands to be
4406 1.1 mrg redundant. */
4407 1.1 mrg rtx pop0 = XEXP (XEXP (pp, 1), 0);
4408 1.1 mrg rtx pop1 = XEXP (XEXP (pp, 1), 1);
4409 1.1 mrg #if DEBUG_CMP
4410 1.1 mrg fprintf(stderr, "adjacent cbranches\n");
4411 1.1 mrg debug_rtx(pop0);
4412 1.1 mrg debug_rtx(pop1);
4413 1.1 mrg #endif
4414 1.1 mrg if (rtx_equal_p (op0, pop0)
4415 1.1 mrg && rtx_equal_p (op1, pop1))
4416 1.1 mrg return true;
4417 1.1 mrg #if DEBUG_CMP
4418 1.1 mrg fprintf(stderr, "prev cmp not same\n");
4419 1.1 mrg #endif
4420 1.1 mrg return false;
4421 1.1 mrg }
4422 1.1 mrg
4423 1.1 mrg /* Else the previous insn must be a SET, with either the source or
4424 1.1 mrg dest equal to operands[0], and operands[1] must be zero. */
4425 1.1 mrg
4426 1.1 mrg if (!rtx_equal_p (op1, const0_rtx))
4427 1.1 mrg {
4428 1.1 mrg #if DEBUG_CMP
4429 1.1 mrg fprintf(stderr, "operands[1] not const0_rtx\n");
4430 1.1 mrg #endif
4431 1.1 mrg return false;
4432 1.1 mrg }
4433 1.1 mrg if (GET_CODE (pp) != SET)
4434 1.1 mrg {
4435 1.1 mrg #if DEBUG_CMP
4436 1.1 mrg fprintf (stderr, "pp not set\n");
4437 1.1 mrg #endif
4438 1.1 mrg return false;
4439 1.1 mrg }
4440 1.1 mrg if (!rtx_equal_p (op0, SET_SRC (pp))
4441 1.1 mrg && !rtx_equal_p (op0, SET_DEST (pp)))
4442 1.1 mrg {
4443 1.1 mrg #if DEBUG_CMP
4444 1.1 mrg fprintf(stderr, "operands[0] not found in set\n");
4445 1.1 mrg #endif
4446 1.1 mrg return false;
4447 1.1 mrg }
4448 1.1 mrg
4449 1.1 mrg #if DEBUG_CMP
4450 1.1 mrg fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4451 1.1 mrg #endif
4452 1.1 mrg if ((pflags & flags_needed) == flags_needed)
4453 1.1 mrg return true;
4454 1.1 mrg
4455 1.1 mrg return false;
4456 1.1 mrg }
4457 1.1 mrg
4458 1.1 mrg /* Return the pattern for a compare. This will be commented out if
4459 1.1 mrg the compare is redundant, else a normal pattern is returned. Thus,
4460 1.1 mrg the assembler output says where the compare would have been. */
4461 1.1 mrg char *
4462 1.1 mrg m32c_output_compare (rtx_insn *insn, rtx *operands)
4463 1.1 mrg {
4464 1.1 mrg static char templ[] = ";cmp.b\t%1,%0";
4465 1.1 mrg /* ^ 5 */
4466 1.1 mrg
4467 1.1 mrg templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4468 1.1 mrg if (m32c_compare_redundant (insn, operands))
4469 1.1 mrg {
4470 1.1 mrg #if DEBUG_CMP
4471 1.1 mrg fprintf(stderr, "cbranch: cmp not needed\n");
4472 1.1 mrg #endif
4473 1.1 mrg return templ;
4474 1.1 mrg }
4475 1.1 mrg
4476 1.1 mrg #if DEBUG_CMP
4477 1.1 mrg fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4478 1.1 mrg #endif
4479 1.1 mrg return templ + 1;
4480 1.1 mrg }
4481 1.1 mrg
4482 1.1 mrg #undef TARGET_ENCODE_SECTION_INFO
4483 1.1 mrg #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4484 1.1 mrg
4485 1.1 mrg /* If the frame pointer isn't used, we detect it manually. But the
4486 1.1 mrg stack pointer doesn't have as flexible addressing as the frame
4487 1.1 mrg pointer, so we always assume we have it. */
4488 1.1 mrg
4489 1.1 mrg #undef TARGET_FRAME_POINTER_REQUIRED
4490 1.1 mrg #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4491 1.1 mrg
4492 1.1 mrg #undef TARGET_HARD_REGNO_NREGS
4493 1.1 mrg #define TARGET_HARD_REGNO_NREGS m32c_hard_regno_nregs
4494 1.1 mrg #undef TARGET_HARD_REGNO_MODE_OK
4495 1.1 mrg #define TARGET_HARD_REGNO_MODE_OK m32c_hard_regno_mode_ok
4496 1.1 mrg #undef TARGET_MODES_TIEABLE_P
4497 1.1 mrg #define TARGET_MODES_TIEABLE_P m32c_modes_tieable_p
4498 1.1 mrg
4499 1.1 mrg #undef TARGET_CAN_CHANGE_MODE_CLASS
4500 1.1 mrg #define TARGET_CAN_CHANGE_MODE_CLASS m32c_can_change_mode_class
4501 1.1 mrg
4502 1.1 mrg /* The Global `targetm' Variable. */
4503 1.1 mrg
4504 1.1 mrg struct gcc_target targetm = TARGET_INITIALIZER;
4505 1.1 mrg
4506 1.1 mrg #include "gt-m32c.h"
4507