Home | History | Annotate | Line # | Download | only in m32c
m32c.cc revision 1.1
      1 /* Target Code for R8C/M16C/M32C
      2    Copyright (C) 2005-2022 Free Software Foundation, Inc.
      3    Contributed by Red Hat.
      4 
      5    This file is part of GCC.
      6 
      7    GCC is free software; you can redistribute it and/or modify it
      8    under the terms of the GNU General Public License as published
      9    by the Free Software Foundation; either version 3, or (at your
     10    option) any later version.
     11 
     12    GCC is distributed in the hope that it will be useful, but WITHOUT
     13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
     15    License for more details.
     16 
     17    You should have received a copy of the GNU General Public License
     18    along with GCC; see the file COPYING3.  If not see
     19    <http://www.gnu.org/licenses/>.  */
     20 
     21 #define IN_TARGET_CODE 1
     22 
     23 #include "config.h"
     24 #include "system.h"
     25 #include "coretypes.h"
     26 #include "backend.h"
     27 #include "target.h"
     28 #include "rtl.h"
     29 #include "tree.h"
     30 #include "stringpool.h"
     31 #include "attribs.h"
     32 #include "df.h"
     33 #include "memmodel.h"
     34 #include "tm_p.h"
     35 #include "optabs.h"
     36 #include "regs.h"
     37 #include "emit-rtl.h"
     38 #include "recog.h"
     39 #include "diagnostic-core.h"
     40 #include "output.h"
     41 #include "insn-attr.h"
     42 #include "flags.h"
     43 #include "reload.h"
     44 #include "stor-layout.h"
     45 #include "varasm.h"
     46 #include "calls.h"
     47 #include "explow.h"
     48 #include "expr.h"
     49 #include "tm-constrs.h"
     50 #include "builtins.h"
     51 #include "opts.h"
     52 
     53 /* This file should be included last.  */
     54 #include "target-def.h"
     55 
     56 /* Prototypes */
     57 
     58 /* Used by m32c_pushm_popm.  */
     59 typedef enum
     60 {
     61   PP_pushm,
     62   PP_popm,
     63   PP_justcount
     64 } Push_Pop_Type;
     65 
     66 static bool m32c_function_needs_enter (void);
     67 static tree interrupt_handler (tree *, tree, tree, int, bool *);
     68 static tree function_vector_handler (tree *, tree, tree, int, bool *);
     69 static int interrupt_p (tree node);
     70 static int bank_switch_p (tree node);
     71 static int fast_interrupt_p (tree node);
     72 static int interrupt_p (tree node);
     73 static bool m32c_asm_integer (rtx, unsigned int, int);
     74 static int m32c_comp_type_attributes (const_tree, const_tree);
     75 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
     76 static struct machine_function *m32c_init_machine_status (void);
     77 static void m32c_insert_attributes (tree, tree *);
     78 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
     79 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
     80 static rtx m32c_function_arg (cumulative_args_t, const function_arg_info &);
     81 static bool m32c_pass_by_reference (cumulative_args_t,
     82 				    const function_arg_info &);
     83 static void m32c_function_arg_advance (cumulative_args_t,
     84 				       const function_arg_info &);
     85 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
     86 static int m32c_pushm_popm (Push_Pop_Type);
     87 static bool m32c_strict_argument_naming (cumulative_args_t);
     88 static rtx m32c_struct_value_rtx (tree, int);
     89 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
     90 static int need_to_save (int);
     91 static rtx m32c_function_value (const_tree, const_tree, bool);
     92 static rtx m32c_libcall_value (machine_mode, const_rtx);
     93 
     94 /* Returns true if an address is specified, else false.  */
     95 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
     96 
     97 static bool m32c_hard_regno_mode_ok (unsigned int, machine_mode);
     98 
     99 #define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
    100 
    101 #define streq(a,b) (strcmp ((a), (b)) == 0)
    102 
    103 /* Internal support routines */
    104 
    105 /* Debugging statements are tagged with DEBUG0 only so that they can
    106    be easily enabled individually, by replacing the '0' with '1' as
    107    needed.  */
    108 #define DEBUG0 0
    109 #define DEBUG1 1
    110 
    111 #if DEBUG0
    112 #include "print-tree.h"
    113 /* This is needed by some of the commented-out debug statements
    114    below.  */
    115 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
    116 #endif
    117 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
    118 
    119 /* These are all to support encode_pattern().  */
    120 static char pattern[30], *patternp;
    121 static GTY(()) rtx patternr[30];
    122 #define RTX_IS(x) (streq (pattern, x))
    123 
    124 /* Some macros to simplify the logic throughout this file.  */
    125 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
    126 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
    127 
    128 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
    129 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
    130 
    131 static int
    132 far_addr_space_p (rtx x)
    133 {
    134   if (GET_CODE (x) != MEM)
    135     return 0;
    136 #if DEBUG0
    137   fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
    138   fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
    139 #endif
    140   return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
    141 }
    142 
    143 /* We do most RTX matching by converting the RTX into a string, and
    144    using string compares.  This vastly simplifies the logic in many of
    145    the functions in this file.
    146 
    147    On exit, pattern[] has the encoded string (use RTX_IS("...") to
    148    compare it) and patternr[] has pointers to the nodes in the RTX
    149    corresponding to each character in the encoded string.  The latter
    150    is mostly used by print_operand().
    151 
    152    Unrecognized patterns have '?' in them; this shows up when the
    153    assembler complains about syntax errors.
    154 */
    155 
    156 static void
    157 encode_pattern_1 (rtx x)
    158 {
    159   int i;
    160 
    161   if (patternp == pattern + sizeof (pattern) - 2)
    162     {
    163       patternp[-1] = '?';
    164       return;
    165     }
    166 
    167   patternr[patternp - pattern] = x;
    168 
    169   switch (GET_CODE (x))
    170     {
    171     case REG:
    172       *patternp++ = 'r';
    173       break;
    174     case SUBREG:
    175       if (GET_MODE_SIZE (GET_MODE (x)) !=
    176 	  GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
    177 	*patternp++ = 'S';
    178       if (GET_MODE (x) == PSImode
    179 	  && GET_CODE (XEXP (x, 0)) == REG)
    180 	*patternp++ = 'S';
    181       encode_pattern_1 (XEXP (x, 0));
    182       break;
    183     case MEM:
    184       *patternp++ = 'm';
    185       /* FALLTHRU */
    186     case CONST:
    187       encode_pattern_1 (XEXP (x, 0));
    188       break;
    189     case SIGN_EXTEND:
    190       *patternp++ = '^';
    191       *patternp++ = 'S';
    192       encode_pattern_1 (XEXP (x, 0));
    193       break;
    194     case ZERO_EXTEND:
    195       *patternp++ = '^';
    196       *patternp++ = 'Z';
    197       encode_pattern_1 (XEXP (x, 0));
    198       break;
    199     case PLUS:
    200       *patternp++ = '+';
    201       encode_pattern_1 (XEXP (x, 0));
    202       encode_pattern_1 (XEXP (x, 1));
    203       break;
    204     case PRE_DEC:
    205       *patternp++ = '>';
    206       encode_pattern_1 (XEXP (x, 0));
    207       break;
    208     case POST_INC:
    209       *patternp++ = '<';
    210       encode_pattern_1 (XEXP (x, 0));
    211       break;
    212     case LO_SUM:
    213       *patternp++ = 'L';
    214       encode_pattern_1 (XEXP (x, 0));
    215       encode_pattern_1 (XEXP (x, 1));
    216       break;
    217     case HIGH:
    218       *patternp++ = 'H';
    219       encode_pattern_1 (XEXP (x, 0));
    220       break;
    221     case SYMBOL_REF:
    222       *patternp++ = 's';
    223       break;
    224     case LABEL_REF:
    225       *patternp++ = 'l';
    226       break;
    227     case CODE_LABEL:
    228       *patternp++ = 'c';
    229       break;
    230     case CONST_INT:
    231     case CONST_DOUBLE:
    232       *patternp++ = 'i';
    233       break;
    234     case UNSPEC:
    235       *patternp++ = 'u';
    236       *patternp++ = '0' + XCINT (x, 1, UNSPEC);
    237       for (i = 0; i < XVECLEN (x, 0); i++)
    238 	encode_pattern_1 (XVECEXP (x, 0, i));
    239       break;
    240     case USE:
    241       *patternp++ = 'U';
    242       break;
    243     case PARALLEL:
    244       *patternp++ = '|';
    245       for (i = 0; i < XVECLEN (x, 0); i++)
    246 	encode_pattern_1 (XVECEXP (x, 0, i));
    247       break;
    248     case EXPR_LIST:
    249       *patternp++ = 'E';
    250       encode_pattern_1 (XEXP (x, 0));
    251       if (XEXP (x, 1))
    252 	encode_pattern_1 (XEXP (x, 1));
    253       break;
    254     default:
    255       *patternp++ = '?';
    256 #if DEBUG0
    257       fprintf (stderr, "can't encode pattern %s\n",
    258 	       GET_RTX_NAME (GET_CODE (x)));
    259       debug_rtx (x);
    260 #endif
    261       break;
    262     }
    263 }
    264 
    265 static void
    266 encode_pattern (rtx x)
    267 {
    268   patternp = pattern;
    269   encode_pattern_1 (x);
    270   *patternp = 0;
    271 }
    272 
    273 /* Since register names indicate the mode they're used in, we need a
    274    way to determine which name to refer to the register with.  Called
    275    by print_operand().  */
    276 
    277 static const char *
    278 reg_name_with_mode (int regno, machine_mode mode)
    279 {
    280   int mlen = GET_MODE_SIZE (mode);
    281   if (regno == R0_REGNO && mlen == 1)
    282     return "r0l";
    283   if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
    284     return "r2r0";
    285   if (regno == R0_REGNO && mlen == 6)
    286     return "r2r1r0";
    287   if (regno == R0_REGNO && mlen == 8)
    288     return "r3r1r2r0";
    289   if (regno == R1_REGNO && mlen == 1)
    290     return "r1l";
    291   if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
    292     return "r3r1";
    293   if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
    294     return "a1a0";
    295   return reg_names[regno];
    296 }
    297 
    298 /* How many bytes a register uses on stack when it's pushed.  We need
    299    to know this because the push opcode needs to explicitly indicate
    300    the size of the register, even though the name of the register
    301    already tells it that.  Used by m32c_output_reg_{push,pop}, which
    302    is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
    303 
    304 static int
    305 reg_push_size (int regno)
    306 {
    307   switch (regno)
    308     {
    309     case R0_REGNO:
    310     case R1_REGNO:
    311       return 2;
    312     case R2_REGNO:
    313     case R3_REGNO:
    314     case FLG_REGNO:
    315       return 2;
    316     case A0_REGNO:
    317     case A1_REGNO:
    318     case SB_REGNO:
    319     case FB_REGNO:
    320     case SP_REGNO:
    321       if (TARGET_A16)
    322 	return 2;
    323       else
    324 	return 3;
    325     default:
    326       gcc_unreachable ();
    327     }
    328 }
    329 
    330 /* Given two register classes, find the largest intersection between
    331    them.  If there is no intersection, return RETURNED_IF_EMPTY
    332    instead.  */
    333 static reg_class_t
    334 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
    335 	      reg_class_t returned_if_empty)
    336 {
    337   HARD_REG_SET cc;
    338   int i;
    339   reg_class_t best = NO_REGS;
    340   unsigned int best_size = 0;
    341 
    342   if (original_class == limiting_class)
    343     return original_class;
    344 
    345   cc = reg_class_contents[original_class] & reg_class_contents[limiting_class];
    346 
    347   for (i = 0; i < LIM_REG_CLASSES; i++)
    348     {
    349       if (hard_reg_set_subset_p (reg_class_contents[i], cc))
    350 	if (best_size < reg_class_size[i])
    351 	  {
    352 	    best = (reg_class_t) i;
    353 	    best_size = reg_class_size[i];
    354 	  }
    355 
    356     }
    357   if (best == NO_REGS)
    358     return returned_if_empty;
    359   return best;
    360 }
    361 
    362 /* Used by m32c_register_move_cost to determine if a move is
    363    impossibly expensive.  */
    364 static bool
    365 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
    366 {
    367   /* Cache the results:  0=untested  1=no  2=yes */
    368   static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
    369 
    370   if (results[(int) rclass][mode] == 0)
    371     {
    372       int r;
    373       results[rclass][mode] = 1;
    374       for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
    375 	if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
    376 	    && m32c_hard_regno_mode_ok (r, mode))
    377 	  {
    378 	    results[rclass][mode] = 2;
    379 	    break;
    380 	  }
    381     }
    382 
    383 #if DEBUG0
    384   fprintf (stderr, "class %s can hold %s? %s\n",
    385 	   class_names[(int) rclass], mode_name[mode],
    386 	   (results[rclass][mode] == 2) ? "yes" : "no");
    387 #endif
    388   return results[(int) rclass][mode] == 2;
    389 }
    390 
    391 /* Run-time Target Specification.  */
    392 
    393 /* Memregs are memory locations that gcc treats like general
    394    registers, as there are a limited number of true registers and the
    395    m32c families can use memory in most places that registers can be
    396    used.
    397 
    398    However, since memory accesses are more expensive than registers,
    399    we allow the user to limit the number of memregs available, in
    400    order to try to persuade gcc to try harder to use real registers.
    401 
    402    Memregs are provided by lib1funcs.S.
    403 */
    404 
    405 int ok_to_change_target_memregs = TRUE;
    406 
    407 /* Implements TARGET_OPTION_OVERRIDE.  */
    408 
    409 #undef TARGET_OPTION_OVERRIDE
    410 #define TARGET_OPTION_OVERRIDE m32c_option_override
    411 
    412 static void
    413 m32c_option_override (void)
    414 {
    415   /* We limit memregs to 0..16, and provide a default.  */
    416   if (OPTION_SET_P (target_memregs))
    417     {
    418       if (target_memregs < 0 || target_memregs > 16)
    419 	error ("invalid target memregs value %<%d%>", target_memregs);
    420     }
    421   else
    422     target_memregs = 16;
    423 
    424   if (TARGET_A24)
    425     flag_ivopts = 0;
    426 
    427   /* This target defaults to strict volatile bitfields.  */
    428   if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
    429     flag_strict_volatile_bitfields = 1;
    430 
    431   /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
    432      This is always worse than an absolute call.  */
    433   if (TARGET_A16)
    434     flag_no_function_cse = 1;
    435 
    436   /* This wants to put insns between compares and their jumps.  */
    437   /* FIXME: The right solution is to properly trace the flags register
    438      values, but that is too much work for stage 4.  */
    439   flag_combine_stack_adjustments = 0;
    440 }
    441 
    442 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
    443 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
    444 
    445 static void
    446 m32c_override_options_after_change (void)
    447 {
    448   if (TARGET_A16)
    449     flag_no_function_cse = 1;
    450 }
    451 
    452 /* Defining data structures for per-function information */
    453 
    454 /* The usual; we set up our machine_function data.  */
    455 static struct machine_function *
    456 m32c_init_machine_status (void)
    457 {
    458   return ggc_cleared_alloc<machine_function> ();
    459 }
    460 
    461 /* Implements INIT_EXPANDERS.  We just set up to call the above
    462    function.  */
    463 void
    464 m32c_init_expanders (void)
    465 {
    466   init_machine_status = m32c_init_machine_status;
    467 }
    468 
    469 /* Storage Layout */
    470 
    471 /* Register Basics */
    472 
    473 /* Basic Characteristics of Registers */
    474 
    475 /* Whether a mode fits in a register is complex enough to warrant a
    476    table.  */
    477 static struct
    478 {
    479   char qi_regs;
    480   char hi_regs;
    481   char pi_regs;
    482   char si_regs;
    483   char di_regs;
    484 } nregs_table[FIRST_PSEUDO_REGISTER] =
    485 {
    486   { 1, 1, 2, 2, 4 },		/* r0 */
    487   { 0, 1, 0, 0, 0 },		/* r2 */
    488   { 1, 1, 2, 2, 0 },		/* r1 */
    489   { 0, 1, 0, 0, 0 },		/* r3 */
    490   { 0, 1, 1, 0, 0 },		/* a0 */
    491   { 0, 1, 1, 0, 0 },		/* a1 */
    492   { 0, 1, 1, 0, 0 },		/* sb */
    493   { 0, 1, 1, 0, 0 },		/* fb */
    494   { 0, 1, 1, 0, 0 },		/* sp */
    495   { 1, 1, 1, 0, 0 },		/* pc */
    496   { 0, 0, 0, 0, 0 },		/* fl */
    497   { 1, 1, 1, 0, 0 },		/* ap */
    498   { 1, 1, 2, 2, 4 },		/* mem0 */
    499   { 1, 1, 2, 2, 4 },		/* mem1 */
    500   { 1, 1, 2, 2, 4 },		/* mem2 */
    501   { 1, 1, 2, 2, 4 },		/* mem3 */
    502   { 1, 1, 2, 2, 4 },		/* mem4 */
    503   { 1, 1, 2, 2, 0 },		/* mem5 */
    504   { 1, 1, 2, 2, 0 },		/* mem6 */
    505   { 1, 1, 0, 0, 0 },		/* mem7 */
    506 };
    507 
    508 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE.  We adjust the number
    509    of available memregs, and select which registers need to be preserved
    510    across calls based on the chip family.  */
    511 
    512 #undef TARGET_CONDITIONAL_REGISTER_USAGE
    513 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
    514 void
    515 m32c_conditional_register_usage (void)
    516 {
    517   int i;
    518 
    519   if (target_memregs >= 0 && target_memregs <= 16)
    520     {
    521       /* The command line option is bytes, but our "registers" are
    522 	 16-bit words.  */
    523       for (i = (target_memregs+1)/2; i < 8; i++)
    524 	{
    525 	  fixed_regs[MEM0_REGNO + i] = 1;
    526 	  CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
    527 	}
    528     }
    529 
    530   /* M32CM and M32C preserve more registers across function calls.  */
    531   if (TARGET_A24)
    532     {
    533       call_used_regs[R1_REGNO] = 0;
    534       call_used_regs[R2_REGNO] = 0;
    535       call_used_regs[R3_REGNO] = 0;
    536       call_used_regs[A0_REGNO] = 0;
    537       call_used_regs[A1_REGNO] = 0;
    538     }
    539 }
    540 
    541 /* How Values Fit in Registers */
    542 
    543 /* Implements TARGET_HARD_REGNO_NREGS.  This is complicated by the fact that
    544    different registers are different sizes from each other, *and* may
    545    be different sizes in different chip families.  */
    546 static unsigned int
    547 m32c_hard_regno_nregs_1 (unsigned int regno, machine_mode mode)
    548 {
    549   if (regno == FLG_REGNO && mode == CCmode)
    550     return 1;
    551   if (regno >= FIRST_PSEUDO_REGISTER)
    552     return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
    553 
    554   if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
    555     return (GET_MODE_SIZE (mode) + 1) / 2;
    556 
    557   if (GET_MODE_SIZE (mode) <= 1)
    558     return nregs_table[regno].qi_regs;
    559   if (GET_MODE_SIZE (mode) <= 2)
    560     return nregs_table[regno].hi_regs;
    561   if (regno == A0_REGNO && mode == SImode && TARGET_A16)
    562     return 2;
    563   if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
    564     return nregs_table[regno].pi_regs;
    565   if (GET_MODE_SIZE (mode) <= 4)
    566     return nregs_table[regno].si_regs;
    567   if (GET_MODE_SIZE (mode) <= 8)
    568     return nregs_table[regno].di_regs;
    569   return 0;
    570 }
    571 
    572 static unsigned int
    573 m32c_hard_regno_nregs (unsigned int regno, machine_mode mode)
    574 {
    575   unsigned int rv = m32c_hard_regno_nregs_1 (regno, mode);
    576   return rv ? rv : 1;
    577 }
    578 
    579 /* Implement TARGET_HARD_REGNO_MODE_OK.  The above function does the work
    580    already; just test its return value.  */
    581 static bool
    582 m32c_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
    583 {
    584   return m32c_hard_regno_nregs_1 (regno, mode) != 0;
    585 }
    586 
    587 /* Implement TARGET_MODES_TIEABLE_P.  In general, modes aren't tieable since
    588    registers are all different sizes.  However, since most modes are
    589    bigger than our registers anyway, it's easier to implement this
    590    function that way, leaving QImode as the only unique case.  */
    591 static bool
    592 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
    593 {
    594   if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
    595     return 1;
    596 
    597 #if 0
    598   if (m1 == QImode || m2 == QImode)
    599     return 0;
    600 #endif
    601 
    602   return 1;
    603 }
    604 
    605 /* Register Classes */
    606 
    607 /* Implements REGNO_REG_CLASS.  */
    608 enum reg_class
    609 m32c_regno_reg_class (int regno)
    610 {
    611   switch (regno)
    612     {
    613     case R0_REGNO:
    614       return R0_REGS;
    615     case R1_REGNO:
    616       return R1_REGS;
    617     case R2_REGNO:
    618       return R2_REGS;
    619     case R3_REGNO:
    620       return R3_REGS;
    621     case A0_REGNO:
    622       return A0_REGS;
    623     case A1_REGNO:
    624       return A1_REGS;
    625     case SB_REGNO:
    626       return SB_REGS;
    627     case FB_REGNO:
    628       return FB_REGS;
    629     case SP_REGNO:
    630       return SP_REGS;
    631     case FLG_REGNO:
    632       return FLG_REGS;
    633     default:
    634       if (IS_MEM_REGNO (regno))
    635 	return MEM_REGS;
    636       return ALL_REGS;
    637     }
    638 }
    639 
    640 /* Implements REGNO_OK_FOR_BASE_P.  */
    641 int
    642 m32c_regno_ok_for_base_p (int regno)
    643 {
    644   if (regno == A0_REGNO
    645       || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
    646     return 1;
    647   return 0;
    648 }
    649 
    650 /* Implements TARGET_PREFERRED_RELOAD_CLASS.  In general, prefer general
    651    registers of the appropriate size.  */
    652 
    653 #undef TARGET_PREFERRED_RELOAD_CLASS
    654 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
    655 
    656 static reg_class_t
    657 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
    658 {
    659   reg_class_t newclass = rclass;
    660 
    661 #if DEBUG0
    662   fprintf (stderr, "\npreferred_reload_class for %s is ",
    663 	   class_names[rclass]);
    664 #endif
    665   if (rclass == NO_REGS)
    666     rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
    667 
    668   if (reg_classes_intersect_p (rclass, CR_REGS))
    669     {
    670       switch (GET_MODE (x))
    671 	{
    672 	case E_QImode:
    673 	  newclass = HL_REGS;
    674 	  break;
    675 	default:
    676 	  /*      newclass = HI_REGS; */
    677 	  break;
    678 	}
    679     }
    680 
    681   else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
    682     newclass = SI_REGS;
    683   else if (GET_MODE_SIZE (GET_MODE (x)) > 4
    684 	   && ! reg_class_subset_p (R03_REGS, rclass))
    685     newclass = DI_REGS;
    686 
    687   rclass = reduce_class (rclass, newclass, rclass);
    688 
    689   if (GET_MODE (x) == QImode)
    690     rclass = reduce_class (rclass, HL_REGS, rclass);
    691 
    692 #if DEBUG0
    693   fprintf (stderr, "%s\n", class_names[rclass]);
    694   debug_rtx (x);
    695 
    696   if (GET_CODE (x) == MEM
    697       && GET_CODE (XEXP (x, 0)) == PLUS
    698       && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
    699     fprintf (stderr, "Glorm!\n");
    700 #endif
    701   return rclass;
    702 }
    703 
    704 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
    705 
    706 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
    707 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
    708 
    709 static reg_class_t
    710 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
    711 {
    712   return m32c_preferred_reload_class (x, rclass);
    713 }
    714 
    715 /* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
    716    address registers for reloads since they're needed for address
    717    reloads.  */
    718 int
    719 m32c_limit_reload_class (machine_mode mode, int rclass)
    720 {
    721 #if DEBUG0
    722   fprintf (stderr, "limit_reload_class for %s: %s ->",
    723 	   mode_name[mode], class_names[rclass]);
    724 #endif
    725 
    726   if (mode == QImode)
    727     rclass = reduce_class (rclass, HL_REGS, rclass);
    728   else if (mode == HImode)
    729     rclass = reduce_class (rclass, HI_REGS, rclass);
    730   else if (mode == SImode)
    731     rclass = reduce_class (rclass, SI_REGS, rclass);
    732 
    733   if (rclass != A_REGS)
    734     rclass = reduce_class (rclass, DI_REGS, rclass);
    735 
    736 #if DEBUG0
    737   fprintf (stderr, " %s\n", class_names[rclass]);
    738 #endif
    739   return rclass;
    740 }
    741 
    742 /* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
    743    r0 or r1, as those are the only real QImode registers.  CR regs get
    744    reloaded through appropriately sized general or address
    745    registers.  */
    746 int
    747 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
    748 {
    749   int cc = class_contents[rclass][0];
    750 #if DEBUG0
    751   fprintf (stderr, "\nsecondary reload class %s %s\n",
    752 	   class_names[rclass], mode_name[mode]);
    753   debug_rtx (x);
    754 #endif
    755   if (mode == QImode
    756       && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
    757     return QI_REGS;
    758   if (reg_classes_intersect_p (rclass, CR_REGS)
    759       && GET_CODE (x) == REG
    760       && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
    761     return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
    762   return NO_REGS;
    763 }
    764 
    765 /* Implements TARGET_CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
    766    reloads.  */
    767 
    768 #undef TARGET_CLASS_LIKELY_SPILLED_P
    769 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
    770 
    771 static bool
    772 m32c_class_likely_spilled_p (reg_class_t regclass)
    773 {
    774   if (regclass == A_REGS)
    775     return true;
    776 
    777   return (reg_class_size[(int) regclass] == 1);
    778 }
    779 
    780 /* Implements TARGET_CLASS_MAX_NREGS.  We calculate this according to its
    781    documented meaning, to avoid potential inconsistencies with actual
    782    class definitions.  */
    783 
    784 #undef TARGET_CLASS_MAX_NREGS
    785 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
    786 
    787 static unsigned char
    788 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
    789 {
    790   int rn;
    791   unsigned char max = 0;
    792 
    793   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
    794     if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
    795       {
    796 	unsigned char n = m32c_hard_regno_nregs (rn, mode);
    797 	if (max < n)
    798 	  max = n;
    799       }
    800   return max;
    801 }
    802 
    803 /* Implements TARGET_CAN_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
    804    QI (r0l, r1l) because the chip doesn't support QI ops on other
    805    registers (well, it does on a0/a1 but if we let gcc do that, reload
    806    suffers).  Otherwise, we allow changes to larger modes.  */
    807 static bool
    808 m32c_can_change_mode_class (machine_mode from,
    809 			    machine_mode to, reg_class_t rclass)
    810 {
    811   int rn;
    812 #if DEBUG0
    813   fprintf (stderr, "can change from %s to %s in %s\n",
    814 	   mode_name[from], mode_name[to], class_names[rclass]);
    815 #endif
    816 
    817   /* If the larger mode isn't allowed in any of these registers, we
    818      can't allow the change.  */
    819   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
    820     if (class_contents[rclass][0] & (1 << rn))
    821       if (! m32c_hard_regno_mode_ok (rn, to))
    822 	return false;
    823 
    824   if (to == QImode)
    825     return (class_contents[rclass][0] & 0x1ffa) == 0;
    826 
    827   if (class_contents[rclass][0] & 0x0005	/* r0, r1 */
    828       && GET_MODE_SIZE (from) > 1)
    829     return true;
    830   if (GET_MODE_SIZE (from) > 2)	/* all other regs */
    831     return true;
    832 
    833   return false;
    834 }
    835 
    836 /* Helpers for the rest of the file.  */
    837 /* TRUE if the rtx is a REG rtx for the given register.  */
    838 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
    839 			   && REGNO (rtx) == regno)
    840 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
    841    base register in address calculations (hence the "strict"
    842    argument).  */
    843 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
    844 			       && (REGNO (rtx) == AP_REGNO \
    845 				   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
    846 
    847 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
    848 
    849 /* Implements matching for constraints (see next function too).  'S' is
    850    for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
    851    call return values.  */
    852 bool
    853 m32c_matches_constraint_p (rtx value, int constraint)
    854 {
    855   encode_pattern (value);
    856 
    857   switch (constraint) {
    858   case CONSTRAINT_SF:
    859     return (far_addr_space_p (value)
    860 	    && ((RTX_IS ("mr")
    861 		 && A0_OR_PSEUDO (patternr[1])
    862 		 && GET_MODE (patternr[1]) == SImode)
    863 		|| (RTX_IS ("m+^Sri")
    864 		    && A0_OR_PSEUDO (patternr[4])
    865 		    && GET_MODE (patternr[4]) == HImode)
    866 		|| (RTX_IS ("m+^Srs")
    867 		    && A0_OR_PSEUDO (patternr[4])
    868 		    && GET_MODE (patternr[4]) == HImode)
    869 		|| (RTX_IS ("m+^S+ris")
    870 		    && A0_OR_PSEUDO (patternr[5])
    871 		    && GET_MODE (patternr[5]) == HImode)
    872 		|| RTX_IS ("ms")));
    873   case CONSTRAINT_Sd:
    874     {
    875       /* This is the common "src/dest" address */
    876       rtx r;
    877       if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
    878 	return true;
    879       if (RTX_IS ("ms") || RTX_IS ("m+si"))
    880 	return true;
    881       if (RTX_IS ("m++rii"))
    882 	{
    883 	  if (REGNO (patternr[3]) == FB_REGNO
    884 	      && INTVAL (patternr[4]) == 0)
    885 	    return true;
    886 	}
    887       if (RTX_IS ("mr"))
    888 	r = patternr[1];
    889       else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
    890 	r = patternr[2];
    891       else
    892 	return false;
    893       if (REGNO (r) == SP_REGNO)
    894 	return false;
    895       return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
    896     }
    897   case CONSTRAINT_Sa:
    898     {
    899       rtx r;
    900       if (RTX_IS ("mr"))
    901 	r = patternr[1];
    902       else if (RTX_IS ("m+ri"))
    903 	r = patternr[2];
    904       else
    905 	return false;
    906       return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
    907     }
    908   case CONSTRAINT_Si:
    909     return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
    910   case CONSTRAINT_Ss:
    911     return ((RTX_IS ("mr")
    912 	     && (IS_REG (patternr[1], SP_REGNO)))
    913 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
    914   case CONSTRAINT_Sf:
    915     return ((RTX_IS ("mr")
    916 	     && (IS_REG (patternr[1], FB_REGNO)))
    917 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
    918   case CONSTRAINT_Sb:
    919     return ((RTX_IS ("mr")
    920 	     && (IS_REG (patternr[1], SB_REGNO)))
    921 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
    922   case CONSTRAINT_Sp:
    923     /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
    924     return (RTX_IS ("mi")
    925 	    && !(INTVAL (patternr[1]) & ~0x1fff));
    926   case CONSTRAINT_S1:
    927     return r1h_operand (value, QImode);
    928   case CONSTRAINT_Rpa:
    929     return GET_CODE (value) == PARALLEL;
    930   default:
    931     return false;
    932   }
    933 }
    934 
    935 /* STACK AND CALLING */
    936 
    937 /* Frame Layout */
    938 
    939 /* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
    940    (yes, THREE bytes) onto the stack for the return address, but we
    941    don't support pointers bigger than 16 bits on those chips.  This
    942    will likely wreak havoc with exception unwinding.  FIXME.  */
    943 rtx
    944 m32c_return_addr_rtx (int count)
    945 {
    946   machine_mode mode;
    947   int offset;
    948   rtx ra_mem;
    949 
    950   if (count)
    951     return NULL_RTX;
    952   /* we want 2[$fb] */
    953 
    954   if (TARGET_A24)
    955     {
    956       /* It's four bytes */
    957       mode = PSImode;
    958       offset = 4;
    959     }
    960   else
    961     {
    962       /* FIXME: it's really 3 bytes */
    963       mode = HImode;
    964       offset = 2;
    965     }
    966 
    967   ra_mem =
    968     gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
    969 				      offset));
    970   return copy_to_mode_reg (mode, ra_mem);
    971 }
    972 
    973 /* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
    974 rtx
    975 m32c_incoming_return_addr_rtx (void)
    976 {
    977   /* we want [sp] */
    978   return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
    979 }
    980 
    981 /* Exception Handling Support */
    982 
    983 /* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
    984    pointers.  */
    985 int
    986 m32c_eh_return_data_regno (int n)
    987 {
    988   switch (n)
    989     {
    990     case 0:
    991       return MEM0_REGNO;
    992     case 1:
    993       return MEM0_REGNO+4;
    994     default:
    995       return INVALID_REGNUM;
    996     }
    997 }
    998 
    999 /* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
   1000    m32c_emit_eh_epilogue.  */
   1001 rtx
   1002 m32c_eh_return_stackadj_rtx (void)
   1003 {
   1004   if (!cfun->machine->eh_stack_adjust)
   1005     {
   1006       rtx sa;
   1007 
   1008       sa = gen_rtx_REG (Pmode, R0_REGNO);
   1009       cfun->machine->eh_stack_adjust = sa;
   1010     }
   1011   return cfun->machine->eh_stack_adjust;
   1012 }
   1013 
   1014 /* Registers That Address the Stack Frame */
   1015 
   1016 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
   1017    the original spec called for dwarf numbers to vary with register
   1018    width as well, for example, r0l, r0, and r2r0 would each have
   1019    different dwarf numbers.  GCC doesn't support this, and we don't do
   1020    it, and gdb seems to like it this way anyway.  */
   1021 unsigned int
   1022 m32c_dwarf_frame_regnum (int n)
   1023 {
   1024   switch (n)
   1025     {
   1026     case R0_REGNO:
   1027       return 5;
   1028     case R1_REGNO:
   1029       return 6;
   1030     case R2_REGNO:
   1031       return 7;
   1032     case R3_REGNO:
   1033       return 8;
   1034     case A0_REGNO:
   1035       return 9;
   1036     case A1_REGNO:
   1037       return 10;
   1038     case FB_REGNO:
   1039       return 11;
   1040     case SB_REGNO:
   1041       return 19;
   1042 
   1043     case SP_REGNO:
   1044       return 12;
   1045     case PC_REGNO:
   1046       return 13;
   1047     default:
   1048       return DWARF_FRAME_REGISTERS + 1;
   1049     }
   1050 }
   1051 
   1052 /* The frame looks like this:
   1053 
   1054    ap -> +------------------------------
   1055          | Return address (3 or 4 bytes)
   1056 	 | Saved FB (2 or 4 bytes)
   1057    fb -> +------------------------------
   1058 	 | local vars
   1059          | register saves fb
   1060 	 |        through r0 as needed
   1061    sp -> +------------------------------
   1062 */
   1063 
   1064 /* We use this to wrap all emitted insns in the prologue.  */
   1065 static rtx
   1066 F (rtx x)
   1067 {
   1068   RTX_FRAME_RELATED_P (x) = 1;
   1069   return x;
   1070 }
   1071 
   1072 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
   1073    how much the stack pointer moves for each, for each cpu family.  */
   1074 static struct
   1075 {
   1076   int reg1;
   1077   int bit;
   1078   int a16_bytes;
   1079   int a24_bytes;
   1080 } pushm_info[] =
   1081 {
   1082   /* These are in reverse push (nearest-to-sp) order.  */
   1083   { R0_REGNO, 0x80, 2, 2 },
   1084   { R1_REGNO, 0x40, 2, 2 },
   1085   { R2_REGNO, 0x20, 2, 2 },
   1086   { R3_REGNO, 0x10, 2, 2 },
   1087   { A0_REGNO, 0x08, 2, 4 },
   1088   { A1_REGNO, 0x04, 2, 4 },
   1089   { SB_REGNO, 0x02, 2, 4 },
   1090   { FB_REGNO, 0x01, 2, 4 }
   1091 };
   1092 
   1093 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
   1094 
   1095 /* Returns TRUE if we need to save/restore the given register.  We
   1096    save everything for exception handlers, so that any register can be
   1097    unwound.  For interrupt handlers, we save everything if the handler
   1098    calls something else (because we don't know what *that* function
   1099    might do), but try to be a bit smarter if the handler is a leaf
   1100    function.  We always save $a0, though, because we use that in the
   1101    epilogue to copy $fb to $sp.  */
   1102 static int
   1103 need_to_save (int regno)
   1104 {
   1105   if (fixed_regs[regno])
   1106     return 0;
   1107   if (crtl->calls_eh_return)
   1108     return 1;
   1109   if (regno == FP_REGNO)
   1110     return 0;
   1111   if (cfun->machine->is_interrupt
   1112       && (!cfun->machine->is_leaf
   1113 	  || (regno == A0_REGNO
   1114 	      && m32c_function_needs_enter ())
   1115 	  ))
   1116     return 1;
   1117   if (df_regs_ever_live_p (regno)
   1118       && (!call_used_or_fixed_reg_p (regno) || cfun->machine->is_interrupt))
   1119     return 1;
   1120   return 0;
   1121 }
   1122 
   1123 /* This function contains all the intelligence about saving and
   1124    restoring registers.  It always figures out the register save set.
   1125    When called with PP_justcount, it merely returns the size of the
   1126    save set (for eliminating the frame pointer, for example).  When
   1127    called with PP_pushm or PP_popm, it emits the appropriate
   1128    instructions for saving (pushm) or restoring (popm) the
   1129    registers.  */
   1130 static int
   1131 m32c_pushm_popm (Push_Pop_Type ppt)
   1132 {
   1133   int reg_mask = 0;
   1134   int byte_count = 0, bytes;
   1135   int i;
   1136   rtx dwarf_set[PUSHM_N];
   1137   int n_dwarfs = 0;
   1138   int nosave_mask = 0;
   1139 
   1140   if (crtl->return_rtx
   1141       && GET_CODE (crtl->return_rtx) == PARALLEL
   1142       && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
   1143     {
   1144       rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
   1145       rtx rv = XEXP (exp, 0);
   1146       int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
   1147 
   1148       if (rv_bytes > 2)
   1149 	nosave_mask |= 0x20;	/* PSI, SI */
   1150       else
   1151 	nosave_mask |= 0xf0;	/* DF */
   1152       if (rv_bytes > 4)
   1153 	nosave_mask |= 0x50;	/* DI */
   1154     }
   1155 
   1156   for (i = 0; i < (int) PUSHM_N; i++)
   1157     {
   1158       /* Skip if neither register needs saving.  */
   1159       if (!need_to_save (pushm_info[i].reg1))
   1160 	continue;
   1161 
   1162       if (pushm_info[i].bit & nosave_mask)
   1163 	continue;
   1164 
   1165       reg_mask |= pushm_info[i].bit;
   1166       bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
   1167 
   1168       if (ppt == PP_pushm)
   1169 	{
   1170 	  machine_mode mode = (bytes == 2) ? HImode : SImode;
   1171 	  rtx addr;
   1172 
   1173 	  /* Always use stack_pointer_rtx instead of calling
   1174 	     rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
   1175 	     that there is a single rtx representing the stack pointer,
   1176 	     namely stack_pointer_rtx, and uses == to recognize it.  */
   1177 	  addr = stack_pointer_rtx;
   1178 
   1179 	  if (byte_count != 0)
   1180 	    addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
   1181 
   1182 	  dwarf_set[n_dwarfs++] =
   1183 	    gen_rtx_SET (gen_rtx_MEM (mode, addr),
   1184 			 gen_rtx_REG (mode, pushm_info[i].reg1));
   1185 	  F (dwarf_set[n_dwarfs - 1]);
   1186 
   1187 	}
   1188       byte_count += bytes;
   1189     }
   1190 
   1191   if (cfun->machine->is_interrupt)
   1192     {
   1193       cfun->machine->intr_pushm = reg_mask & 0xfe;
   1194       reg_mask = 0;
   1195       byte_count = 0;
   1196     }
   1197 
   1198   if (cfun->machine->is_interrupt)
   1199     for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
   1200       if (need_to_save (i))
   1201 	{
   1202 	  byte_count += 2;
   1203 	  cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
   1204 	}
   1205 
   1206   if (ppt == PP_pushm && byte_count)
   1207     {
   1208       rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
   1209       rtx pushm;
   1210 
   1211       if (reg_mask)
   1212 	{
   1213 	  XVECEXP (note, 0, 0)
   1214 	    = gen_rtx_SET (stack_pointer_rtx,
   1215 			   gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
   1216 					 stack_pointer_rtx,
   1217 					 GEN_INT (-byte_count)));
   1218 	  F (XVECEXP (note, 0, 0));
   1219 
   1220 	  for (i = 0; i < n_dwarfs; i++)
   1221 	    XVECEXP (note, 0, i + 1) = dwarf_set[i];
   1222 
   1223 	  pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
   1224 
   1225 	  add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
   1226 	}
   1227 
   1228       if (cfun->machine->is_interrupt)
   1229 	for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
   1230 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
   1231 	    {
   1232 	      if (TARGET_A16)
   1233 		pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
   1234 	      else
   1235 		pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
   1236 	      F (pushm);
   1237 	    }
   1238     }
   1239   if (ppt == PP_popm && byte_count)
   1240     {
   1241       if (cfun->machine->is_interrupt)
   1242 	for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
   1243 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
   1244 	    {
   1245 	      if (TARGET_A16)
   1246 		emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
   1247 	      else
   1248 		emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
   1249 	    }
   1250       if (reg_mask)
   1251 	emit_insn (gen_popm (GEN_INT (reg_mask)));
   1252     }
   1253 
   1254   return byte_count;
   1255 }
   1256 
   1257 /* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
   1258    diagrams our call frame.  */
   1259 int
   1260 m32c_initial_elimination_offset (int from, int to)
   1261 {
   1262   int ofs = 0;
   1263 
   1264   if (from == AP_REGNO)
   1265     {
   1266       if (TARGET_A16)
   1267 	ofs += 5;
   1268       else
   1269 	ofs += 8;
   1270     }
   1271 
   1272   if (to == SP_REGNO)
   1273     {
   1274       ofs += m32c_pushm_popm (PP_justcount);
   1275       ofs += get_frame_size ();
   1276     }
   1277 
   1278   /* Account for push rounding.  */
   1279   if (TARGET_A24)
   1280     ofs = (ofs + 1) & ~1;
   1281 #if DEBUG0
   1282   fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
   1283 	   to, ofs);
   1284 #endif
   1285   return ofs;
   1286 }
   1287 
   1288 /* Passing Function Arguments on the Stack */
   1289 
   1290 /* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
   1291    M32C has word stacks.  */
   1292 poly_int64
   1293 m32c_push_rounding (poly_int64 n)
   1294 {
   1295   if (TARGET_R8C || TARGET_M16C)
   1296     return n;
   1297   return (n + 1) & ~1;
   1298 }
   1299 
   1300 #undef TARGET_PUSH_ARGUMENT
   1301 #define TARGET_PUSH_ARGUMENT hook_bool_uint_true
   1302 
   1303 /* Passing Arguments in Registers */
   1304 
   1305 /* Implements TARGET_FUNCTION_ARG.  Arguments are passed partly in
   1306    registers, partly on stack.  If our function returns a struct, a
   1307    pointer to a buffer for it is at the top of the stack (last thing
   1308    pushed).  The first few real arguments may be in registers as
   1309    follows:
   1310 
   1311    R8C/M16C:	arg1 in r1 if it's QI or HI (else it's pushed on stack)
   1312 		arg2 in r2 if it's HI (else pushed on stack)
   1313 		rest on stack
   1314    M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
   1315 		rest on stack
   1316 
   1317    Structs are not passed in registers, even if they fit.  Only
   1318    integer and pointer types are passed in registers.
   1319 
   1320    Note that when arg1 doesn't fit in r1, arg2 may still be passed in
   1321    r2 if it fits.  */
   1322 #undef TARGET_FUNCTION_ARG
   1323 #define TARGET_FUNCTION_ARG m32c_function_arg
   1324 static rtx
   1325 m32c_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
   1326 {
   1327   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
   1328 
   1329   /* Can return a reg, parallel, or 0 for stack */
   1330   rtx rv = NULL_RTX;
   1331 #if DEBUG0
   1332   fprintf (stderr, "func_arg %d (%s, %d)\n",
   1333 	   ca->parm_num, mode_name[arg.mode], arg.named);
   1334   debug_tree (arg.type);
   1335 #endif
   1336 
   1337   if (arg.end_marker_p ())
   1338     return GEN_INT (0);
   1339 
   1340   if (ca->force_mem || !arg.named)
   1341     {
   1342 #if DEBUG0
   1343       fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
   1344 	       arg.named);
   1345 #endif
   1346       return NULL_RTX;
   1347     }
   1348 
   1349   if (arg.type && INTEGRAL_TYPE_P (arg.type) && POINTER_TYPE_P (arg.type))
   1350     return NULL_RTX;
   1351 
   1352   if (arg.aggregate_type_p ())
   1353     return NULL_RTX;
   1354 
   1355   switch (ca->parm_num)
   1356     {
   1357     case 1:
   1358       if (GET_MODE_SIZE (arg.mode) == 1 || GET_MODE_SIZE (arg.mode) == 2)
   1359 	rv = gen_rtx_REG (arg.mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
   1360       break;
   1361 
   1362     case 2:
   1363       if (TARGET_A16 && GET_MODE_SIZE (arg.mode) == 2)
   1364 	rv = gen_rtx_REG (arg.mode, R2_REGNO);
   1365       break;
   1366     }
   1367 
   1368 #if DEBUG0
   1369   debug_rtx (rv);
   1370 #endif
   1371   return rv;
   1372 }
   1373 
   1374 #undef TARGET_PASS_BY_REFERENCE
   1375 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
   1376 static bool
   1377 m32c_pass_by_reference (cumulative_args_t, const function_arg_info &)
   1378 {
   1379   return 0;
   1380 }
   1381 
   1382 /* Implements INIT_CUMULATIVE_ARGS.  */
   1383 void
   1384 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
   1385 			   tree fntype,
   1386 			   rtx libname ATTRIBUTE_UNUSED,
   1387 			   tree fndecl,
   1388 			   int n_named_args ATTRIBUTE_UNUSED)
   1389 {
   1390   if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
   1391     ca->force_mem = 1;
   1392   else
   1393     ca->force_mem = 0;
   1394   ca->parm_num = 1;
   1395 }
   1396 
   1397 /* Implements TARGET_FUNCTION_ARG_ADVANCE.  force_mem is set for
   1398    functions returning structures, so we always reset that.  Otherwise,
   1399    we only need to know the sequence number of the argument to know what
   1400    to do with it.  */
   1401 #undef TARGET_FUNCTION_ARG_ADVANCE
   1402 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
   1403 static void
   1404 m32c_function_arg_advance (cumulative_args_t ca_v,
   1405 			   const function_arg_info &)
   1406 {
   1407   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
   1408 
   1409   if (ca->force_mem)
   1410     ca->force_mem = 0;
   1411   else
   1412     ca->parm_num++;
   1413 }
   1414 
   1415 /* Implements TARGET_FUNCTION_ARG_BOUNDARY.  */
   1416 #undef TARGET_FUNCTION_ARG_BOUNDARY
   1417 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
   1418 static unsigned int
   1419 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
   1420 			    const_tree type ATTRIBUTE_UNUSED)
   1421 {
   1422   return (TARGET_A16 ? 8 : 16);
   1423 }
   1424 
   1425 /* Implements FUNCTION_ARG_REGNO_P.  */
   1426 int
   1427 m32c_function_arg_regno_p (int r)
   1428 {
   1429   if (TARGET_A24)
   1430     return (r == R0_REGNO);
   1431   return (r == R1_REGNO || r == R2_REGNO);
   1432 }
   1433 
   1434 /* HImode and PSImode are the two "native" modes as far as GCC is
   1435    concerned, but the chips also support a 32-bit mode which is used
   1436    for some opcodes in R8C/M16C and for reset vectors and such.  */
   1437 #undef TARGET_VALID_POINTER_MODE
   1438 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
   1439 static bool
   1440 m32c_valid_pointer_mode (scalar_int_mode mode)
   1441 {
   1442   if (mode == HImode
   1443       || mode == PSImode
   1444       || mode == SImode
   1445       )
   1446     return 1;
   1447   return 0;
   1448 }
   1449 
   1450 /* How Scalar Function Values Are Returned */
   1451 
   1452 /* Implements TARGET_LIBCALL_VALUE.  Most values are returned in $r0, or some
   1453    combination of registers starting there (r2r0 for longs, r3r1r2r0
   1454    for long long, r3r2r1r0 for doubles), except that that ABI
   1455    currently doesn't work because it ends up using all available
   1456    general registers and gcc often can't compile it.  So, instead, we
   1457    return anything bigger than 16 bits in "mem0" (effectively, a
   1458    memory location).  */
   1459 
   1460 #undef TARGET_LIBCALL_VALUE
   1461 #define TARGET_LIBCALL_VALUE m32c_libcall_value
   1462 
   1463 static rtx
   1464 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
   1465 {
   1466   /* return reg or parallel */
   1467 #if 0
   1468   /* FIXME: GCC has difficulty returning large values in registers,
   1469      because that ties up most of the general registers and gives the
   1470      register allocator little to work with.  Until we can resolve
   1471      this, large values are returned in memory.  */
   1472   if (mode == DFmode)
   1473     {
   1474       rtx rv;
   1475 
   1476       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
   1477       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
   1478 					      gen_rtx_REG (HImode,
   1479 							   R0_REGNO),
   1480 					      GEN_INT (0));
   1481       XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
   1482 					      gen_rtx_REG (HImode,
   1483 							   R1_REGNO),
   1484 					      GEN_INT (2));
   1485       XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
   1486 					      gen_rtx_REG (HImode,
   1487 							   R2_REGNO),
   1488 					      GEN_INT (4));
   1489       XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
   1490 					      gen_rtx_REG (HImode,
   1491 							   R3_REGNO),
   1492 					      GEN_INT (6));
   1493       return rv;
   1494     }
   1495 
   1496   if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
   1497     {
   1498       rtx rv;
   1499 
   1500       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
   1501       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
   1502 					      gen_rtx_REG (mode,
   1503 							   R0_REGNO),
   1504 					      GEN_INT (0));
   1505       return rv;
   1506     }
   1507 #endif
   1508 
   1509   if (GET_MODE_SIZE (mode) > 2)
   1510     return gen_rtx_REG (mode, MEM0_REGNO);
   1511   return gen_rtx_REG (mode, R0_REGNO);
   1512 }
   1513 
   1514 /* Implements TARGET_FUNCTION_VALUE.  Functions and libcalls have the same
   1515    conventions.  */
   1516 
   1517 #undef TARGET_FUNCTION_VALUE
   1518 #define TARGET_FUNCTION_VALUE m32c_function_value
   1519 
   1520 static rtx
   1521 m32c_function_value (const_tree valtype,
   1522 		     const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
   1523 		     bool outgoing ATTRIBUTE_UNUSED)
   1524 {
   1525   /* return reg or parallel */
   1526   const machine_mode mode = TYPE_MODE (valtype);
   1527   return m32c_libcall_value (mode, NULL_RTX);
   1528 }
   1529 
   1530 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.  */
   1531 
   1532 #undef TARGET_FUNCTION_VALUE_REGNO_P
   1533 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
   1534 
   1535 static bool
   1536 m32c_function_value_regno_p (const unsigned int regno)
   1537 {
   1538   return (regno == R0_REGNO || regno == MEM0_REGNO);
   1539 }
   1540 
   1541 /* How Large Values Are Returned */
   1542 
   1543 /* We return structures by pushing the address on the stack, even if
   1544    we use registers for the first few "real" arguments.  */
   1545 #undef TARGET_STRUCT_VALUE_RTX
   1546 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
   1547 static rtx
   1548 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
   1549 		       int incoming ATTRIBUTE_UNUSED)
   1550 {
   1551   return 0;
   1552 }
   1553 
   1554 /* Function Entry and Exit */
   1555 
   1556 /* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
   1557 int
   1558 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
   1559 {
   1560   if (cfun->machine->is_interrupt)
   1561     return 1;
   1562   return 0;
   1563 }
   1564 
   1565 /* Implementing the Varargs Macros */
   1566 
   1567 #undef TARGET_STRICT_ARGUMENT_NAMING
   1568 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
   1569 static bool
   1570 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
   1571 {
   1572   return 1;
   1573 }
   1574 
   1575 /* Trampolines for Nested Functions */
   1576 
   1577 /*
   1578    m16c:
   1579    1 0000 75C43412              mov.w   #0x1234,a0
   1580    2 0004 FC000000              jmp.a   label
   1581 
   1582    m32c:
   1583    1 0000 BC563412              mov.l:s #0x123456,a0
   1584    2 0004 CC000000              jmp.a   label
   1585 */
   1586 
   1587 /* Implements TRAMPOLINE_SIZE.  */
   1588 int
   1589 m32c_trampoline_size (void)
   1590 {
   1591   /* Allocate extra space so we can avoid the messy shifts when we
   1592      initialize the trampoline; we just write past the end of the
   1593      opcode.  */
   1594   return TARGET_A16 ? 8 : 10;
   1595 }
   1596 
   1597 /* Implements TRAMPOLINE_ALIGNMENT.  */
   1598 int
   1599 m32c_trampoline_alignment (void)
   1600 {
   1601   return 2;
   1602 }
   1603 
   1604 /* Implements TARGET_TRAMPOLINE_INIT.  */
   1605 
   1606 #undef TARGET_TRAMPOLINE_INIT
   1607 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
   1608 static void
   1609 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
   1610 {
   1611   rtx function = XEXP (DECL_RTL (fndecl), 0);
   1612 
   1613 #define A0(m,i) adjust_address (m_tramp, m, i)
   1614   if (TARGET_A16)
   1615     {
   1616       /* Note: we subtract a "word" because the moves want signed
   1617 	 constants, not unsigned constants.  */
   1618       emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
   1619       emit_move_insn (A0 (HImode, 2), chainval);
   1620       emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
   1621       /* We use 16-bit addresses here, but store the zero to turn it
   1622 	 into a 24-bit offset.  */
   1623       emit_move_insn (A0 (HImode, 5), function);
   1624       emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
   1625     }
   1626   else
   1627     {
   1628       /* Note that the PSI moves actually write 4 bytes.  Make sure we
   1629 	 write stuff out in the right order, and leave room for the
   1630 	 extra byte at the end.  */
   1631       emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
   1632       emit_move_insn (A0 (PSImode, 1), chainval);
   1633       emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
   1634       emit_move_insn (A0 (PSImode, 5), function);
   1635     }
   1636 #undef A0
   1637 }
   1638 
   1639 #undef TARGET_LRA_P
   1640 #define TARGET_LRA_P hook_bool_void_false
   1641 
   1642 /* Addressing Modes */
   1643 
   1644 /* The r8c/m32c family supports a wide range of non-orthogonal
   1645    addressing modes, including the ability to double-indirect on *some*
   1646    of them.  Not all insns support all modes, either, but we rely on
   1647    predicates and constraints to deal with that.  */
   1648 #undef TARGET_LEGITIMATE_ADDRESS_P
   1649 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
   1650 bool
   1651 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
   1652 {
   1653   int mode_adjust;
   1654   if (CONSTANT_P (x))
   1655     return 1;
   1656 
   1657   if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
   1658     return 0;
   1659   if (TARGET_A24 && GET_MODE (x) != PSImode)
   1660     return 0;
   1661 
   1662   /* Wide references to memory will be split after reload, so we must
   1663      ensure that all parts of such splits remain legitimate
   1664      addresses.  */
   1665   mode_adjust = GET_MODE_SIZE (mode) - 1;
   1666 
   1667   /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
   1668   if (GET_CODE (x) == PRE_DEC
   1669       || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
   1670     {
   1671       return (GET_CODE (XEXP (x, 0)) == REG
   1672 	      && REGNO (XEXP (x, 0)) == SP_REGNO);
   1673     }
   1674 
   1675 #if 0
   1676   /* This is the double indirection detection, but it currently
   1677      doesn't work as cleanly as this code implies, so until we've had
   1678      a chance to debug it, leave it disabled.  */
   1679   if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
   1680     {
   1681 #if DEBUG_DOUBLE
   1682       fprintf (stderr, "double indirect\n");
   1683 #endif
   1684       x = XEXP (x, 0);
   1685     }
   1686 #endif
   1687 
   1688   encode_pattern (x);
   1689   if (RTX_IS ("r"))
   1690     {
   1691       /* Most indexable registers can be used without displacements,
   1692 	 although some of them will be emitted with an explicit zero
   1693 	 to please the assembler.  */
   1694       switch (REGNO (patternr[0]))
   1695 	{
   1696 	case A1_REGNO:
   1697 	case SB_REGNO:
   1698 	case FB_REGNO:
   1699 	case SP_REGNO:
   1700 	  if (TARGET_A16 && GET_MODE (x) == SImode)
   1701 	    return 0;
   1702 	  /* FALLTHRU */
   1703 	case A0_REGNO:
   1704 	  return 1;
   1705 
   1706 	default:
   1707 	  if (IS_PSEUDO (patternr[0], strict))
   1708 	    return 1;
   1709 	  return 0;
   1710 	}
   1711     }
   1712 
   1713   if (TARGET_A16 && GET_MODE (x) == SImode)
   1714     return 0;
   1715 
   1716   if (RTX_IS ("+ri"))
   1717     {
   1718       /* This is more interesting, because different base registers
   1719 	 allow for different displacements - both range and signedness
   1720 	 - and it differs from chip series to chip series too.  */
   1721       int rn = REGNO (patternr[1]);
   1722       HOST_WIDE_INT offs = INTVAL (patternr[2]);
   1723       switch (rn)
   1724 	{
   1725 	case A0_REGNO:
   1726 	case A1_REGNO:
   1727 	case SB_REGNO:
   1728 	  /* The syntax only allows positive offsets, but when the
   1729 	     offsets span the entire memory range, we can simulate
   1730 	     negative offsets by wrapping.  */
   1731 	  if (TARGET_A16)
   1732 	    return (offs >= -65536 && offs <= 65535 - mode_adjust);
   1733 	  if (rn == SB_REGNO)
   1734 	    return (offs >= 0 && offs <= 65535 - mode_adjust);
   1735 	  /* A0 or A1 */
   1736 	  return (offs >= -16777216 && offs <= 16777215);
   1737 
   1738 	case FB_REGNO:
   1739 	  if (TARGET_A16)
   1740 	    return (offs >= -128 && offs <= 127 - mode_adjust);
   1741 	  return (offs >= -65536 && offs <= 65535 - mode_adjust);
   1742 
   1743 	case SP_REGNO:
   1744 	  return (offs >= -128 && offs <= 127 - mode_adjust);
   1745 
   1746 	default:
   1747 	  if (IS_PSEUDO (patternr[1], strict))
   1748 	    return 1;
   1749 	  return 0;
   1750 	}
   1751     }
   1752   if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
   1753     {
   1754       rtx reg = patternr[1];
   1755 
   1756       /* We don't know where the symbol is, so only allow base
   1757 	 registers which support displacements spanning the whole
   1758 	 address range.  */
   1759       switch (REGNO (reg))
   1760 	{
   1761 	case A0_REGNO:
   1762 	case A1_REGNO:
   1763 	  /* $sb needs a secondary reload, but since it's involved in
   1764 	     memory address reloads too, we don't deal with it very
   1765 	     well.  */
   1766 	  /*    case SB_REGNO: */
   1767 	  return 1;
   1768 	default:
   1769 	  if (GET_CODE (reg) == SUBREG)
   1770 	    return 0;
   1771 	  if (IS_PSEUDO (reg, strict))
   1772 	    return 1;
   1773 	  return 0;
   1774 	}
   1775     }
   1776   return 0;
   1777 }
   1778 
   1779 /* Implements REG_OK_FOR_BASE_P.  */
   1780 int
   1781 m32c_reg_ok_for_base_p (rtx x, int strict)
   1782 {
   1783   if (GET_CODE (x) != REG)
   1784     return 0;
   1785   switch (REGNO (x))
   1786     {
   1787     case A0_REGNO:
   1788     case A1_REGNO:
   1789     case SB_REGNO:
   1790     case FB_REGNO:
   1791     case SP_REGNO:
   1792       return 1;
   1793     default:
   1794       if (IS_PSEUDO (x, strict))
   1795 	return 1;
   1796       return 0;
   1797     }
   1798 }
   1799 
   1800 /* We have three choices for choosing fb->aN offsets.  If we choose -128,
   1801    we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
   1802    like this:
   1803        EB 4B FF    mova    -128[$fb],$a0
   1804        D8 0C FF FF mov.w:Q #0,-1[$a0]
   1805 
   1806    Alternately, we subtract the frame size, and hopefully use 8-bit aN
   1807    displacements:
   1808        7B F4       stc $fb,$a0
   1809        77 54 00 01 sub #256,$a0
   1810        D8 08 01    mov.w:Q #0,1[$a0]
   1811 
   1812    If we don't offset (i.e. offset by zero), we end up with:
   1813        7B F4       stc $fb,$a0
   1814        D8 0C 00 FF mov.w:Q #0,-256[$a0]
   1815 
   1816    We have to subtract *something* so that we have a PLUS rtx to mark
   1817    that we've done this reload.  The -128 offset will never result in
   1818    an 8-bit aN offset, and the payoff for the second case is five
   1819    loads *if* those loads are within 256 bytes of the other end of the
   1820    frame, so the third case seems best.  Note that we subtract the
   1821    zero, but detect that in the addhi3 pattern.  */
   1822 
   1823 #define BIG_FB_ADJ 0
   1824 
   1825 /* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
   1826    worry about is frame base offsets, as $fb has a limited
   1827    displacement range.  We deal with this by attempting to reload $fb
   1828    itself into an address register; that seems to result in the best
   1829    code.  */
   1830 #undef TARGET_LEGITIMIZE_ADDRESS
   1831 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
   1832 static rtx
   1833 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
   1834 			 machine_mode mode)
   1835 {
   1836 #if DEBUG0
   1837   fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
   1838   debug_rtx (x);
   1839   fprintf (stderr, "\n");
   1840 #endif
   1841 
   1842   if (GET_CODE (x) == PLUS
   1843       && GET_CODE (XEXP (x, 0)) == REG
   1844       && REGNO (XEXP (x, 0)) == FB_REGNO
   1845       && GET_CODE (XEXP (x, 1)) == CONST_INT
   1846       && (INTVAL (XEXP (x, 1)) < -128
   1847 	  || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
   1848     {
   1849       /* reload FB to A_REGS */
   1850       rtx temp = gen_reg_rtx (Pmode);
   1851       x = copy_rtx (x);
   1852       emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
   1853       XEXP (x, 0) = temp;
   1854     }
   1855 
   1856   return x;
   1857 }
   1858 
   1859 /* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
   1860 int
   1861 m32c_legitimize_reload_address (rtx * x,
   1862 				machine_mode mode,
   1863 				int opnum,
   1864 				int type, int ind_levels ATTRIBUTE_UNUSED)
   1865 {
   1866 #if DEBUG0
   1867   fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
   1868 	   mode_name[mode]);
   1869   debug_rtx (*x);
   1870 #endif
   1871 
   1872   /* At one point, this function tried to get $fb copied to an address
   1873      register, which in theory would maximize sharing, but gcc was
   1874      *also* still trying to reload the whole address, and we'd run out
   1875      of address registers.  So we let gcc do the naive (but safe)
   1876      reload instead, when the above function doesn't handle it for
   1877      us.
   1878 
   1879      The code below is a second attempt at the above.  */
   1880 
   1881   if (GET_CODE (*x) == PLUS
   1882       && GET_CODE (XEXP (*x, 0)) == REG
   1883       && REGNO (XEXP (*x, 0)) == FB_REGNO
   1884       && GET_CODE (XEXP (*x, 1)) == CONST_INT
   1885       && (INTVAL (XEXP (*x, 1)) < -128
   1886 	  || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
   1887     {
   1888       rtx sum;
   1889       int offset = INTVAL (XEXP (*x, 1));
   1890       int adjustment = -BIG_FB_ADJ;
   1891 
   1892       sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
   1893 			  GEN_INT (adjustment));
   1894       *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
   1895       if (type == RELOAD_OTHER)
   1896 	type = RELOAD_FOR_OTHER_ADDRESS;
   1897       push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
   1898 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
   1899 		   (enum reload_type) type);
   1900       return 1;
   1901     }
   1902 
   1903   if (GET_CODE (*x) == PLUS
   1904       && GET_CODE (XEXP (*x, 0)) == PLUS
   1905       && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
   1906       && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
   1907       && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
   1908       && GET_CODE (XEXP (*x, 1)) == CONST_INT
   1909       )
   1910     {
   1911       if (type == RELOAD_OTHER)
   1912 	type = RELOAD_FOR_OTHER_ADDRESS;
   1913       push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
   1914 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
   1915 		   (enum reload_type) type);
   1916       return 1;
   1917     }
   1918 
   1919   if (TARGET_A24 && GET_MODE (*x) == PSImode)
   1920     {
   1921       push_reload (*x, NULL_RTX, x, NULL,
   1922 		   A_REGS, PSImode, VOIDmode, 0, 0, opnum,
   1923 		   (enum reload_type) type);
   1924       return 1;
   1925     }
   1926 
   1927   return 0;
   1928 }
   1929 
   1930 /* Return the appropriate mode for a named address pointer.  */
   1931 #undef TARGET_ADDR_SPACE_POINTER_MODE
   1932 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
   1933 static scalar_int_mode
   1934 m32c_addr_space_pointer_mode (addr_space_t addrspace)
   1935 {
   1936   switch (addrspace)
   1937     {
   1938     case ADDR_SPACE_GENERIC:
   1939       return TARGET_A24 ? PSImode : HImode;
   1940     case ADDR_SPACE_FAR:
   1941       return SImode;
   1942     default:
   1943       gcc_unreachable ();
   1944     }
   1945 }
   1946 
   1947 /* Return the appropriate mode for a named address address.  */
   1948 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
   1949 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
   1950 static scalar_int_mode
   1951 m32c_addr_space_address_mode (addr_space_t addrspace)
   1952 {
   1953   switch (addrspace)
   1954     {
   1955     case ADDR_SPACE_GENERIC:
   1956       return TARGET_A24 ? PSImode : HImode;
   1957     case ADDR_SPACE_FAR:
   1958       return SImode;
   1959     default:
   1960       gcc_unreachable ();
   1961     }
   1962 }
   1963 
   1964 /* Like m32c_legitimate_address_p, except with named addresses.  */
   1965 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
   1966 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
   1967   m32c_addr_space_legitimate_address_p
   1968 static bool
   1969 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
   1970 				      bool strict, addr_space_t as)
   1971 {
   1972   if (as == ADDR_SPACE_FAR)
   1973     {
   1974       if (TARGET_A24)
   1975 	return 0;
   1976       encode_pattern (x);
   1977       if (RTX_IS ("r"))
   1978 	{
   1979 	  if (GET_MODE (x) != SImode)
   1980 	    return 0;
   1981 	  switch (REGNO (patternr[0]))
   1982 	    {
   1983 	    case A0_REGNO:
   1984 	      return 1;
   1985 
   1986 	    default:
   1987 	      if (IS_PSEUDO (patternr[0], strict))
   1988 		return 1;
   1989 	      return 0;
   1990 	    }
   1991 	}
   1992       if (RTX_IS ("+^Sri"))
   1993 	{
   1994 	  int rn = REGNO (patternr[3]);
   1995 	  HOST_WIDE_INT offs = INTVAL (patternr[4]);
   1996 	  if (GET_MODE (patternr[3]) != HImode)
   1997 	    return 0;
   1998 	  switch (rn)
   1999 	    {
   2000 	    case A0_REGNO:
   2001 	      return (offs >= 0 && offs <= 0xfffff);
   2002 
   2003 	    default:
   2004 	      if (IS_PSEUDO (patternr[3], strict))
   2005 		return 1;
   2006 	      return 0;
   2007 	    }
   2008 	}
   2009       if (RTX_IS ("+^Srs"))
   2010 	{
   2011 	  int rn = REGNO (patternr[3]);
   2012 	  if (GET_MODE (patternr[3]) != HImode)
   2013 	    return 0;
   2014 	  switch (rn)
   2015 	    {
   2016 	    case A0_REGNO:
   2017 	      return 1;
   2018 
   2019 	    default:
   2020 	      if (IS_PSEUDO (patternr[3], strict))
   2021 		return 1;
   2022 	      return 0;
   2023 	    }
   2024 	}
   2025       if (RTX_IS ("+^S+ris"))
   2026 	{
   2027 	  int rn = REGNO (patternr[4]);
   2028 	  if (GET_MODE (patternr[4]) != HImode)
   2029 	    return 0;
   2030 	  switch (rn)
   2031 	    {
   2032 	    case A0_REGNO:
   2033 	      return 1;
   2034 
   2035 	    default:
   2036 	      if (IS_PSEUDO (patternr[4], strict))
   2037 		return 1;
   2038 	      return 0;
   2039 	    }
   2040 	}
   2041       if (RTX_IS ("s"))
   2042 	{
   2043 	  return 1;
   2044 	}
   2045       return 0;
   2046     }
   2047 
   2048   else if (as != ADDR_SPACE_GENERIC)
   2049     gcc_unreachable ();
   2050 
   2051   return m32c_legitimate_address_p (mode, x, strict);
   2052 }
   2053 
   2054 /* Like m32c_legitimate_address, except with named address support.  */
   2055 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
   2056 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
   2057 static rtx
   2058 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
   2059 				    addr_space_t as)
   2060 {
   2061   if (as != ADDR_SPACE_GENERIC)
   2062     {
   2063 #if DEBUG0
   2064       fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
   2065       debug_rtx (x);
   2066       fprintf (stderr, "\n");
   2067 #endif
   2068 
   2069       if (GET_CODE (x) != REG)
   2070 	{
   2071 	  x = force_reg (SImode, x);
   2072 	}
   2073       return x;
   2074     }
   2075 
   2076   return m32c_legitimize_address (x, oldx, mode);
   2077 }
   2078 
   2079 /* Determine if one named address space is a subset of another.  */
   2080 #undef TARGET_ADDR_SPACE_SUBSET_P
   2081 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
   2082 static bool
   2083 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
   2084 {
   2085   gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
   2086   gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
   2087 
   2088   if (subset == superset)
   2089     return true;
   2090 
   2091   else
   2092     return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
   2093 }
   2094 
   2095 #undef TARGET_ADDR_SPACE_CONVERT
   2096 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
   2097 /* Convert from one address space to another.  */
   2098 static rtx
   2099 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
   2100 {
   2101   addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
   2102   addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
   2103   rtx result;
   2104 
   2105   gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
   2106   gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
   2107 
   2108   if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
   2109     {
   2110       /* This is unpredictable, as we're truncating off usable address
   2111 	 bits.  */
   2112 
   2113       result = gen_reg_rtx (HImode);
   2114       emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
   2115       return result;
   2116     }
   2117   else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
   2118     {
   2119       /* This always works.  */
   2120       result = gen_reg_rtx (SImode);
   2121       emit_insn (gen_zero_extendhisi2 (result, op));
   2122       return result;
   2123     }
   2124   else
   2125     gcc_unreachable ();
   2126 }
   2127 
   2128 /* Condition Code Status */
   2129 
   2130 #undef TARGET_FIXED_CONDITION_CODE_REGS
   2131 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
   2132 static bool
   2133 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
   2134 {
   2135   *p1 = FLG_REGNO;
   2136   *p2 = INVALID_REGNUM;
   2137   return true;
   2138 }
   2139 
   2140 /* Describing Relative Costs of Operations */
   2141 
   2142 /* Implements TARGET_REGISTER_MOVE_COST.  We make impossible moves
   2143    prohibitively expensive, like trying to put QIs in r2/r3 (there are
   2144    no opcodes to do that).  We also discourage use of mem* registers
   2145    since they're really memory.  */
   2146 
   2147 #undef TARGET_REGISTER_MOVE_COST
   2148 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
   2149 
   2150 static int
   2151 m32c_register_move_cost (machine_mode mode, reg_class_t from,
   2152 			 reg_class_t to)
   2153 {
   2154   int cost = COSTS_N_INSNS (3);
   2155   HARD_REG_SET cc;
   2156 
   2157 /* FIXME: pick real values, but not 2 for now.  */
   2158   cc = reg_class_contents[from] | reg_class_contents[(int) to];
   2159 
   2160   if (mode == QImode
   2161       && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
   2162     {
   2163       if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
   2164 	cost = COSTS_N_INSNS (1000);
   2165       else
   2166 	cost = COSTS_N_INSNS (80);
   2167     }
   2168 
   2169   if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
   2170     cost = COSTS_N_INSNS (1000);
   2171 
   2172   if (reg_classes_intersect_p (from, CR_REGS))
   2173     cost += COSTS_N_INSNS (5);
   2174 
   2175   if (reg_classes_intersect_p (to, CR_REGS))
   2176     cost += COSTS_N_INSNS (5);
   2177 
   2178   if (from == MEM_REGS || to == MEM_REGS)
   2179     cost += COSTS_N_INSNS (50);
   2180   else if (reg_classes_intersect_p (from, MEM_REGS)
   2181 	   || reg_classes_intersect_p (to, MEM_REGS))
   2182     cost += COSTS_N_INSNS (10);
   2183 
   2184 #if DEBUG0
   2185   fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
   2186 	   mode_name[mode], class_names[(int) from], class_names[(int) to],
   2187 	   cost);
   2188 #endif
   2189   return cost;
   2190 }
   2191 
   2192 /*  Implements TARGET_MEMORY_MOVE_COST.  */
   2193 
   2194 #undef TARGET_MEMORY_MOVE_COST
   2195 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
   2196 
   2197 static int
   2198 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
   2199 		       reg_class_t rclass ATTRIBUTE_UNUSED,
   2200 		       bool in ATTRIBUTE_UNUSED)
   2201 {
   2202   /* FIXME: pick real values.  */
   2203   return COSTS_N_INSNS (10);
   2204 }
   2205 
   2206 /* Here we try to describe when we use multiple opcodes for one RTX so
   2207    that gcc knows when to use them.  */
   2208 #undef TARGET_RTX_COSTS
   2209 #define TARGET_RTX_COSTS m32c_rtx_costs
   2210 static bool
   2211 m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
   2212 		int opno ATTRIBUTE_UNUSED,
   2213 		int *total, bool speed ATTRIBUTE_UNUSED)
   2214 {
   2215   int code = GET_CODE (x);
   2216   switch (code)
   2217     {
   2218     case REG:
   2219       if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
   2220 	*total += COSTS_N_INSNS (500);
   2221       else
   2222 	*total += COSTS_N_INSNS (1);
   2223       return true;
   2224 
   2225     case ASHIFT:
   2226     case LSHIFTRT:
   2227     case ASHIFTRT:
   2228       if (GET_CODE (XEXP (x, 1)) != CONST_INT)
   2229 	{
   2230 	  /* mov.b r1l, r1h */
   2231 	  *total +=  COSTS_N_INSNS (1);
   2232 	  return true;
   2233 	}
   2234       if (INTVAL (XEXP (x, 1)) > 8
   2235 	  || INTVAL (XEXP (x, 1)) < -8)
   2236 	{
   2237 	  /* mov.b #N, r1l */
   2238 	  /* mov.b r1l, r1h */
   2239 	  *total +=  COSTS_N_INSNS (2);
   2240 	  return true;
   2241 	}
   2242       return true;
   2243 
   2244     case LE:
   2245     case LEU:
   2246     case LT:
   2247     case LTU:
   2248     case GT:
   2249     case GTU:
   2250     case GE:
   2251     case GEU:
   2252     case NE:
   2253     case EQ:
   2254       if (outer_code == SET)
   2255 	{
   2256 	  *total += COSTS_N_INSNS (2);
   2257 	  return true;
   2258 	}
   2259       break;
   2260 
   2261     case ZERO_EXTRACT:
   2262       {
   2263 	rtx dest = XEXP (x, 0);
   2264 	rtx addr = XEXP (dest, 0);
   2265 	switch (GET_CODE (addr))
   2266 	  {
   2267 	  case CONST_INT:
   2268 	    *total += COSTS_N_INSNS (1);
   2269 	    break;
   2270 	  case SYMBOL_REF:
   2271 	    *total += COSTS_N_INSNS (3);
   2272 	    break;
   2273 	  default:
   2274 	    *total += COSTS_N_INSNS (2);
   2275 	    break;
   2276 	  }
   2277 	return true;
   2278       }
   2279       break;
   2280 
   2281     default:
   2282       /* Reasonable default.  */
   2283       if (TARGET_A16 && mode == SImode)
   2284 	*total += COSTS_N_INSNS (2);
   2285       break;
   2286     }
   2287   return false;
   2288 }
   2289 
   2290 #undef TARGET_ADDRESS_COST
   2291 #define TARGET_ADDRESS_COST m32c_address_cost
   2292 static int
   2293 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
   2294 		   addr_space_t as ATTRIBUTE_UNUSED,
   2295 		   bool speed ATTRIBUTE_UNUSED)
   2296 {
   2297   int i;
   2298   /*  fprintf(stderr, "\naddress_cost\n");
   2299       debug_rtx(addr);*/
   2300   switch (GET_CODE (addr))
   2301     {
   2302     case CONST_INT:
   2303       i = INTVAL (addr);
   2304       if (i == 0)
   2305 	return COSTS_N_INSNS(1);
   2306       if (i > 0 && i <= 255)
   2307 	return COSTS_N_INSNS(2);
   2308       if (i > 0 && i <= 65535)
   2309 	return COSTS_N_INSNS(3);
   2310       return COSTS_N_INSNS(4);
   2311     case SYMBOL_REF:
   2312       return COSTS_N_INSNS(4);
   2313     case REG:
   2314       return COSTS_N_INSNS(1);
   2315     case PLUS:
   2316       if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
   2317 	{
   2318 	  i = INTVAL (XEXP (addr, 1));
   2319 	  if (i == 0)
   2320 	    return COSTS_N_INSNS(1);
   2321 	  if (i > 0 && i <= 255)
   2322 	    return COSTS_N_INSNS(2);
   2323 	  if (i > 0 && i <= 65535)
   2324 	    return COSTS_N_INSNS(3);
   2325 	}
   2326       return COSTS_N_INSNS(4);
   2327     default:
   2328       return 0;
   2329     }
   2330 }
   2331 
   2332 /* Defining the Output Assembler Language */
   2333 
   2334 /* Output of Data */
   2335 
   2336 /* We may have 24 bit sizes, which is the native address size.
   2337    Currently unused, but provided for completeness.  */
   2338 #undef TARGET_ASM_INTEGER
   2339 #define TARGET_ASM_INTEGER m32c_asm_integer
   2340 static bool
   2341 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
   2342 {
   2343   switch (size)
   2344     {
   2345     case 3:
   2346       fprintf (asm_out_file, "\t.3byte\t");
   2347       output_addr_const (asm_out_file, x);
   2348       fputc ('\n', asm_out_file);
   2349       return true;
   2350     case 4:
   2351       if (GET_CODE (x) == SYMBOL_REF)
   2352 	{
   2353 	  fprintf (asm_out_file, "\t.long\t");
   2354 	  output_addr_const (asm_out_file, x);
   2355 	  fputc ('\n', asm_out_file);
   2356 	  return true;
   2357 	}
   2358       break;
   2359     }
   2360   return default_assemble_integer (x, size, aligned_p);
   2361 }
   2362 
   2363 /* Output of Assembler Instructions */
   2364 
   2365 /* We use a lookup table because the addressing modes are non-orthogonal.  */
   2366 
   2367 static struct
   2368 {
   2369   char code;
   2370   char const *pattern;
   2371   char const *format;
   2372 }
   2373 const conversions[] = {
   2374   { 0, "r", "0" },
   2375 
   2376   { 0, "mr", "z[1]" },
   2377   { 0, "m+ri", "3[2]" },
   2378   { 0, "m+rs", "3[2]" },
   2379   { 0, "m+^Zrs", "5[4]" },
   2380   { 0, "m+^Zri", "5[4]" },
   2381   { 0, "m+^Z+ris", "7+6[5]" },
   2382   { 0, "m+^Srs", "5[4]" },
   2383   { 0, "m+^Sri", "5[4]" },
   2384   { 0, "m+^S+ris", "7+6[5]" },
   2385   { 0, "m+r+si", "4+5[2]" },
   2386   { 0, "ms", "1" },
   2387   { 0, "mi", "1" },
   2388   { 0, "m+si", "2+3" },
   2389 
   2390   { 0, "mmr", "[z[2]]" },
   2391   { 0, "mm+ri", "[4[3]]" },
   2392   { 0, "mm+rs", "[4[3]]" },
   2393   { 0, "mm+r+si", "[5+6[3]]" },
   2394   { 0, "mms", "[[2]]" },
   2395   { 0, "mmi", "[[2]]" },
   2396   { 0, "mm+si", "[4[3]]" },
   2397 
   2398   { 0, "i", "#0" },
   2399   { 0, "s", "#0" },
   2400   { 0, "+si", "#1+2" },
   2401   { 0, "l", "#0" },
   2402 
   2403   { 'l', "l", "0" },
   2404   { 'd', "i", "0" },
   2405   { 'd', "s", "0" },
   2406   { 'd', "+si", "1+2" },
   2407   { 'D', "i", "0" },
   2408   { 'D', "s", "0" },
   2409   { 'D', "+si", "1+2" },
   2410   { 'x', "i", "#0" },
   2411   { 'X', "i", "#0" },
   2412   { 'm', "i", "#0" },
   2413   { 'b', "i", "#0" },
   2414   { 'B', "i", "0" },
   2415   { 'p', "i", "0" },
   2416 
   2417   { 0, 0, 0 }
   2418 };
   2419 
   2420 /* This is in order according to the bitfield that pushm/popm use.  */
   2421 static char const *pushm_regs[] = {
   2422   "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
   2423 };
   2424 
   2425 /* Implements TARGET_PRINT_OPERAND.  */
   2426 
   2427 #undef TARGET_PRINT_OPERAND
   2428 #define TARGET_PRINT_OPERAND m32c_print_operand
   2429 
   2430 static void
   2431 m32c_print_operand (FILE * file, rtx x, int code)
   2432 {
   2433   int i, j, b;
   2434   const char *comma;
   2435   HOST_WIDE_INT ival;
   2436   int unsigned_const = 0;
   2437   int force_sign;
   2438 
   2439   /* Multiplies; constants are converted to sign-extended format but
   2440    we need unsigned, so 'u' and 'U' tell us what size unsigned we
   2441    need.  */
   2442   if (code == 'u')
   2443     {
   2444       unsigned_const = 2;
   2445       code = 0;
   2446     }
   2447   if (code == 'U')
   2448     {
   2449       unsigned_const = 1;
   2450       code = 0;
   2451     }
   2452   /* This one is only for debugging; you can put it in a pattern to
   2453      force this error.  */
   2454   if (code == '!')
   2455     {
   2456       fprintf (stderr, "dj: unreviewed pattern:");
   2457       if (current_output_insn)
   2458 	debug_rtx (current_output_insn);
   2459       gcc_unreachable ();
   2460     }
   2461   /* PSImode operations are either .w or .l depending on the target.  */
   2462   if (code == '&')
   2463     {
   2464       if (TARGET_A16)
   2465 	fprintf (file, "w");
   2466       else
   2467 	fprintf (file, "l");
   2468       return;
   2469     }
   2470   /* Inverted conditionals.  */
   2471   if (code == 'C')
   2472     {
   2473       switch (GET_CODE (x))
   2474 	{
   2475 	case LE:
   2476 	  fputs ("gt", file);
   2477 	  break;
   2478 	case LEU:
   2479 	  fputs ("gtu", file);
   2480 	  break;
   2481 	case LT:
   2482 	  fputs ("ge", file);
   2483 	  break;
   2484 	case LTU:
   2485 	  fputs ("geu", file);
   2486 	  break;
   2487 	case GT:
   2488 	  fputs ("le", file);
   2489 	  break;
   2490 	case GTU:
   2491 	  fputs ("leu", file);
   2492 	  break;
   2493 	case GE:
   2494 	  fputs ("lt", file);
   2495 	  break;
   2496 	case GEU:
   2497 	  fputs ("ltu", file);
   2498 	  break;
   2499 	case NE:
   2500 	  fputs ("eq", file);
   2501 	  break;
   2502 	case EQ:
   2503 	  fputs ("ne", file);
   2504 	  break;
   2505 	default:
   2506 	  gcc_unreachable ();
   2507 	}
   2508       return;
   2509     }
   2510   /* Regular conditionals.  */
   2511   if (code == 'c')
   2512     {
   2513       switch (GET_CODE (x))
   2514 	{
   2515 	case LE:
   2516 	  fputs ("le", file);
   2517 	  break;
   2518 	case LEU:
   2519 	  fputs ("leu", file);
   2520 	  break;
   2521 	case LT:
   2522 	  fputs ("lt", file);
   2523 	  break;
   2524 	case LTU:
   2525 	  fputs ("ltu", file);
   2526 	  break;
   2527 	case GT:
   2528 	  fputs ("gt", file);
   2529 	  break;
   2530 	case GTU:
   2531 	  fputs ("gtu", file);
   2532 	  break;
   2533 	case GE:
   2534 	  fputs ("ge", file);
   2535 	  break;
   2536 	case GEU:
   2537 	  fputs ("geu", file);
   2538 	  break;
   2539 	case NE:
   2540 	  fputs ("ne", file);
   2541 	  break;
   2542 	case EQ:
   2543 	  fputs ("eq", file);
   2544 	  break;
   2545 	default:
   2546 	  gcc_unreachable ();
   2547 	}
   2548       return;
   2549     }
   2550   /* Used in negsi2 to do HImode ops on the two parts of an SImode
   2551      operand.  */
   2552   if (code == 'h' && GET_MODE (x) == SImode)
   2553     {
   2554       x = m32c_subreg (HImode, x, SImode, 0);
   2555       code = 0;
   2556     }
   2557   if (code == 'H' && GET_MODE (x) == SImode)
   2558     {
   2559       x = m32c_subreg (HImode, x, SImode, 2);
   2560       code = 0;
   2561     }
   2562   if (code == 'h' && GET_MODE (x) == HImode)
   2563     {
   2564       x = m32c_subreg (QImode, x, HImode, 0);
   2565       code = 0;
   2566     }
   2567   if (code == 'H' && GET_MODE (x) == HImode)
   2568     {
   2569       /* We can't actually represent this as an rtx.  Do it here.  */
   2570       if (GET_CODE (x) == REG)
   2571 	{
   2572 	  switch (REGNO (x))
   2573 	    {
   2574 	    case R0_REGNO:
   2575 	      fputs ("r0h", file);
   2576 	      return;
   2577 	    case R1_REGNO:
   2578 	      fputs ("r1h", file);
   2579 	      return;
   2580 	    default:
   2581 	      gcc_unreachable();
   2582 	    }
   2583 	}
   2584       /* This should be a MEM.  */
   2585       x = m32c_subreg (QImode, x, HImode, 1);
   2586       code = 0;
   2587     }
   2588   /* This is for BMcond, which always wants word register names.  */
   2589   if (code == 'h' && GET_MODE (x) == QImode)
   2590     {
   2591       if (GET_CODE (x) == REG)
   2592 	x = gen_rtx_REG (HImode, REGNO (x));
   2593       code = 0;
   2594     }
   2595   /* 'x' and 'X' need to be ignored for non-immediates.  */
   2596   if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
   2597     code = 0;
   2598 
   2599   encode_pattern (x);
   2600   force_sign = 0;
   2601   for (i = 0; conversions[i].pattern; i++)
   2602     if (conversions[i].code == code
   2603 	&& streq (conversions[i].pattern, pattern))
   2604       {
   2605 	for (j = 0; conversions[i].format[j]; j++)
   2606 	  /* backslash quotes the next character in the output pattern.  */
   2607 	  if (conversions[i].format[j] == '\\')
   2608 	    {
   2609 	      fputc (conversions[i].format[j + 1], file);
   2610 	      j++;
   2611 	    }
   2612 	  /* Digits in the output pattern indicate that the
   2613 	     corresponding RTX is to be output at that point.  */
   2614 	  else if (ISDIGIT (conversions[i].format[j]))
   2615 	    {
   2616 	      rtx r = patternr[conversions[i].format[j] - '0'];
   2617 	      switch (GET_CODE (r))
   2618 		{
   2619 		case REG:
   2620 		  fprintf (file, "%s",
   2621 			   reg_name_with_mode (REGNO (r), GET_MODE (r)));
   2622 		  break;
   2623 		case CONST_INT:
   2624 		  switch (code)
   2625 		    {
   2626 		    case 'b':
   2627 		    case 'B':
   2628 		      {
   2629 			int v = INTVAL (r);
   2630 			int i = (int) exact_log2 (v);
   2631 			if (i == -1)
   2632 			  i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
   2633 			if (i == -1)
   2634 			  i = (int) exact_log2 ((v ^ 0xff) & 0xff);
   2635 			/* Bit position.  */
   2636 			fprintf (file, "%d", i);
   2637 		      }
   2638 		      break;
   2639 		    case 'x':
   2640 		      /* Unsigned byte.  */
   2641 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
   2642 			       INTVAL (r) & 0xff);
   2643 		      break;
   2644 		    case 'X':
   2645 		      /* Unsigned word.  */
   2646 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
   2647 			       INTVAL (r) & 0xffff);
   2648 		      break;
   2649 		    case 'p':
   2650 		      /* pushm and popm encode a register set into a single byte.  */
   2651 		      comma = "";
   2652 		      for (b = 7; b >= 0; b--)
   2653 			if (INTVAL (r) & (1 << b))
   2654 			  {
   2655 			    fprintf (file, "%s%s", comma, pushm_regs[b]);
   2656 			    comma = ",";
   2657 			  }
   2658 		      break;
   2659 		    case 'm':
   2660 		      /* "Minus".  Output -X  */
   2661 		      ival = (-INTVAL (r) & 0xffff);
   2662 		      if (ival & 0x8000)
   2663 			ival = ival - 0x10000;
   2664 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
   2665 		      break;
   2666 		    default:
   2667 		      ival = INTVAL (r);
   2668 		      if (conversions[i].format[j + 1] == '[' && ival < 0)
   2669 			{
   2670 			  /* We can simulate negative displacements by
   2671 			     taking advantage of address space
   2672 			     wrapping when the offset can span the
   2673 			     entire address range.  */
   2674 			  rtx base =
   2675 			    patternr[conversions[i].format[j + 2] - '0'];
   2676 			  if (GET_CODE (base) == REG)
   2677 			    switch (REGNO (base))
   2678 			      {
   2679 			      case A0_REGNO:
   2680 			      case A1_REGNO:
   2681 				if (TARGET_A24)
   2682 				  ival = 0x1000000 + ival;
   2683 				else
   2684 				  ival = 0x10000 + ival;
   2685 				break;
   2686 			      case SB_REGNO:
   2687 				if (TARGET_A16)
   2688 				  ival = 0x10000 + ival;
   2689 				break;
   2690 			      }
   2691 			}
   2692 		      else if (code == 'd' && ival < 0 && j == 0)
   2693 			/* The "mova" opcode is used to do addition by
   2694 			   computing displacements, but again, we need
   2695 			   displacements to be unsigned *if* they're
   2696 			   the only component of the displacement
   2697 			   (i.e. no "symbol-4" type displacement).  */
   2698 			ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
   2699 
   2700 		      if (conversions[i].format[j] == '0')
   2701 			{
   2702 			  /* More conversions to unsigned.  */
   2703 			  if (unsigned_const == 2)
   2704 			    ival &= 0xffff;
   2705 			  if (unsigned_const == 1)
   2706 			    ival &= 0xff;
   2707 			}
   2708 		      if (streq (conversions[i].pattern, "mi")
   2709 			  || streq (conversions[i].pattern, "mmi"))
   2710 			{
   2711 			  /* Integers used as addresses are unsigned.  */
   2712 			  ival &= (TARGET_A24 ? 0xffffff : 0xffff);
   2713 			}
   2714 		      if (force_sign && ival >= 0)
   2715 			fputc ('+', file);
   2716 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
   2717 		      break;
   2718 		    }
   2719 		  break;
   2720 		case CONST_DOUBLE:
   2721 		  /* We don't have const_double constants.  If it
   2722 		     happens, make it obvious.  */
   2723 		  fprintf (file, "[const_double 0x%lx]",
   2724 			   (unsigned long) CONST_DOUBLE_HIGH (r));
   2725 		  break;
   2726 		case SYMBOL_REF:
   2727 		  assemble_name (file, XSTR (r, 0));
   2728 		  break;
   2729 		case LABEL_REF:
   2730 		  output_asm_label (r);
   2731 		  break;
   2732 		default:
   2733 		  fprintf (stderr, "don't know how to print this operand:");
   2734 		  debug_rtx (r);
   2735 		  gcc_unreachable ();
   2736 		}
   2737 	    }
   2738 	  else
   2739 	    {
   2740 	      if (conversions[i].format[j] == 'z')
   2741 		{
   2742 		  /* Some addressing modes *must* have a displacement,
   2743 		     so insert a zero here if needed.  */
   2744 		  int k;
   2745 		  for (k = j + 1; conversions[i].format[k]; k++)
   2746 		    if (ISDIGIT (conversions[i].format[k]))
   2747 		      {
   2748 			rtx reg = patternr[conversions[i].format[k] - '0'];
   2749 			if (GET_CODE (reg) == REG
   2750 			    && (REGNO (reg) == SB_REGNO
   2751 				|| REGNO (reg) == FB_REGNO
   2752 				|| REGNO (reg) == SP_REGNO))
   2753 			  fputc ('0', file);
   2754 		      }
   2755 		  continue;
   2756 		}
   2757 	      /* Signed displacements off symbols need to have signs
   2758 		 blended cleanly.  */
   2759 	      if (conversions[i].format[j] == '+'
   2760 		  && (!code || code == 'D' || code == 'd')
   2761 		  && ISDIGIT (conversions[i].format[j + 1])
   2762 		  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
   2763 		      == CONST_INT))
   2764 		{
   2765 		  force_sign = 1;
   2766 		  continue;
   2767 		}
   2768 	      fputc (conversions[i].format[j], file);
   2769 	    }
   2770 	break;
   2771       }
   2772   if (!conversions[i].pattern)
   2773     {
   2774       fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
   2775 	       pattern);
   2776       debug_rtx (x);
   2777       fprintf (file, "[%c.%s]", code ? code : '-', pattern);
   2778     }
   2779 
   2780   return;
   2781 }
   2782 
   2783 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
   2784 
   2785    See m32c_print_operand above for descriptions of what these do.  */
   2786 
   2787 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
   2788 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
   2789 
   2790 static bool
   2791 m32c_print_operand_punct_valid_p (unsigned char c)
   2792 {
   2793   if (c == '&' || c == '!')
   2794     return true;
   2795 
   2796   return false;
   2797 }
   2798 
   2799 /* Implements TARGET_PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
   2800 
   2801 #undef TARGET_PRINT_OPERAND_ADDRESS
   2802 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
   2803 
   2804 static void
   2805 m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
   2806 {
   2807   if (GET_CODE (address) == MEM)
   2808     address = XEXP (address, 0);
   2809   else
   2810     /* cf: gcc.dg/asm-4.c.  */
   2811     gcc_assert (GET_CODE (address) == REG);
   2812 
   2813   m32c_print_operand (stream, address, 0);
   2814 }
   2815 
   2816 /* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
   2817    differently than general registers.  */
   2818 void
   2819 m32c_output_reg_push (FILE * s, int regno)
   2820 {
   2821   if (regno == FLG_REGNO)
   2822     fprintf (s, "\tpushc\tflg\n");
   2823   else
   2824     fprintf (s, "\tpush.%c\t%s\n",
   2825 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
   2826 }
   2827 
   2828 /* Likewise for ASM_OUTPUT_REG_POP.  */
   2829 void
   2830 m32c_output_reg_pop (FILE * s, int regno)
   2831 {
   2832   if (regno == FLG_REGNO)
   2833     fprintf (s, "\tpopc\tflg\n");
   2834   else
   2835     fprintf (s, "\tpop.%c\t%s\n",
   2836 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
   2837 }
   2838 
   2839 /* Defining target-specific uses of `__attribute__' */
   2840 
   2841 /* Used to simplify the logic below.  Find the attributes wherever
   2842    they may be.  */
   2843 #define M32C_ATTRIBUTES(decl) \
   2844   (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
   2845                 : DECL_ATTRIBUTES (decl) \
   2846                   ? (DECL_ATTRIBUTES (decl)) \
   2847 		  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
   2848 
   2849 /* Returns TRUE if the given tree has the "interrupt" attribute.  */
   2850 static int
   2851 interrupt_p (tree node ATTRIBUTE_UNUSED)
   2852 {
   2853   tree list = M32C_ATTRIBUTES (node);
   2854   while (list)
   2855     {
   2856       if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
   2857 	return 1;
   2858       list = TREE_CHAIN (list);
   2859     }
   2860   return fast_interrupt_p (node);
   2861 }
   2862 
   2863 /* Returns TRUE if the given tree has the "bank_switch" attribute.  */
   2864 static int
   2865 bank_switch_p (tree node ATTRIBUTE_UNUSED)
   2866 {
   2867   tree list = M32C_ATTRIBUTES (node);
   2868   while (list)
   2869     {
   2870       if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
   2871 	return 1;
   2872       list = TREE_CHAIN (list);
   2873     }
   2874   return 0;
   2875 }
   2876 
   2877 /* Returns TRUE if the given tree has the "fast_interrupt" attribute.  */
   2878 static int
   2879 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
   2880 {
   2881   tree list = M32C_ATTRIBUTES (node);
   2882   while (list)
   2883     {
   2884       if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
   2885 	return 1;
   2886       list = TREE_CHAIN (list);
   2887     }
   2888   return 0;
   2889 }
   2890 
   2891 static tree
   2892 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
   2893 		   tree name ATTRIBUTE_UNUSED,
   2894 		   tree args ATTRIBUTE_UNUSED,
   2895 		   int flags ATTRIBUTE_UNUSED,
   2896 		   bool * no_add_attrs ATTRIBUTE_UNUSED)
   2897 {
   2898   return NULL_TREE;
   2899 }
   2900 
   2901 /* Returns TRUE if given tree has the "function_vector" attribute. */
   2902 int
   2903 m32c_special_page_vector_p (tree func)
   2904 {
   2905   tree list;
   2906 
   2907   if (TREE_CODE (func) != FUNCTION_DECL)
   2908     return 0;
   2909 
   2910   list = M32C_ATTRIBUTES (func);
   2911   while (list)
   2912     {
   2913       if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
   2914         return 1;
   2915       list = TREE_CHAIN (list);
   2916     }
   2917   return 0;
   2918 }
   2919 
   2920 static tree
   2921 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
   2922                          tree name ATTRIBUTE_UNUSED,
   2923                          tree args ATTRIBUTE_UNUSED,
   2924                          int flags ATTRIBUTE_UNUSED,
   2925                          bool * no_add_attrs ATTRIBUTE_UNUSED)
   2926 {
   2927   if (TARGET_R8C)
   2928     {
   2929       /* The attribute is not supported for R8C target.  */
   2930       warning (OPT_Wattributes,
   2931                 "%qE attribute is not supported for R8C target",
   2932                 name);
   2933       *no_add_attrs = true;
   2934     }
   2935   else if (TREE_CODE (*node) != FUNCTION_DECL)
   2936     {
   2937       /* The attribute must be applied to functions only.  */
   2938       warning (OPT_Wattributes,
   2939                 "%qE attribute applies only to functions",
   2940                 name);
   2941       *no_add_attrs = true;
   2942     }
   2943   else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
   2944     {
   2945       /* The argument must be a constant integer.  */
   2946       warning (OPT_Wattributes,
   2947                 "%qE attribute argument not an integer constant",
   2948                 name);
   2949       *no_add_attrs = true;
   2950     }
   2951   else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
   2952            || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
   2953     {
   2954       /* The argument value must be between 18 to 255.  */
   2955       warning (OPT_Wattributes,
   2956                 "%qE attribute argument should be between 18 to 255",
   2957                 name);
   2958       *no_add_attrs = true;
   2959     }
   2960   return NULL_TREE;
   2961 }
   2962 
   2963 /* If the function is assigned the attribute 'function_vector', it
   2964    returns the function vector number, otherwise returns zero.  */
   2965 int
   2966 current_function_special_page_vector (rtx x)
   2967 {
   2968   int num;
   2969 
   2970   if ((GET_CODE(x) == SYMBOL_REF)
   2971       && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
   2972     {
   2973       tree list;
   2974       tree t = SYMBOL_REF_DECL (x);
   2975 
   2976       if (TREE_CODE (t) != FUNCTION_DECL)
   2977         return 0;
   2978 
   2979       list = M32C_ATTRIBUTES (t);
   2980       while (list)
   2981         {
   2982           if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
   2983             {
   2984               num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
   2985               return num;
   2986             }
   2987 
   2988           list = TREE_CHAIN (list);
   2989         }
   2990 
   2991       return 0;
   2992     }
   2993   else
   2994     return 0;
   2995 }
   2996 
   2997 #undef TARGET_ATTRIBUTE_TABLE
   2998 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
   2999 static const struct attribute_spec m32c_attribute_table[] = {
   3000   /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
   3001        affects_type_identity, handler, exclude } */
   3002   { "interrupt", 0, 0, false, false, false, false, interrupt_handler, NULL },
   3003   { "bank_switch", 0, 0, false, false, false, false, interrupt_handler, NULL },
   3004   { "fast_interrupt", 0, 0, false, false, false, false,
   3005     interrupt_handler, NULL },
   3006   { "function_vector", 1, 1, true,  false, false, false,
   3007     function_vector_handler, NULL },
   3008   { NULL, 0, 0, false, false, false, false, NULL, NULL }
   3009 };
   3010 
   3011 #undef TARGET_COMP_TYPE_ATTRIBUTES
   3012 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
   3013 static int
   3014 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
   3015 			   const_tree type2 ATTRIBUTE_UNUSED)
   3016 {
   3017   /* 0=incompatible 1=compatible 2=warning */
   3018   return 1;
   3019 }
   3020 
   3021 #undef TARGET_INSERT_ATTRIBUTES
   3022 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
   3023 static void
   3024 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
   3025 			tree * attr_ptr ATTRIBUTE_UNUSED)
   3026 {
   3027   unsigned addr;
   3028   /* See if we need to make #pragma address variables volatile.  */
   3029 
   3030   if (TREE_CODE (node) == VAR_DECL)
   3031     {
   3032       const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
   3033       if (m32c_get_pragma_address  (name, &addr))
   3034 	{
   3035 	  TREE_THIS_VOLATILE (node) = true;
   3036 	}
   3037     }
   3038 }
   3039 
   3040 /* Hash table of pragma info.  */
   3041 static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
   3042 
   3043 void
   3044 m32c_note_pragma_address (const char *varname, unsigned address)
   3045 {
   3046   if (!pragma_htab)
   3047     pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
   3048 
   3049   const char *name = ggc_strdup (varname);
   3050   unsigned int *slot = &pragma_htab->get_or_insert (name);
   3051   *slot = address;
   3052 }
   3053 
   3054 static bool
   3055 m32c_get_pragma_address (const char *varname, unsigned *address)
   3056 {
   3057   if (!pragma_htab)
   3058     return false;
   3059 
   3060   unsigned int *slot = pragma_htab->get (varname);
   3061   if (slot)
   3062     {
   3063       *address = *slot;
   3064       return true;
   3065     }
   3066   return false;
   3067 }
   3068 
   3069 void
   3070 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
   3071 			    const char *name,
   3072 			    int size, int align, int global)
   3073 {
   3074   unsigned address;
   3075 
   3076   if (m32c_get_pragma_address (name, &address))
   3077     {
   3078       /* We never output these as global.  */
   3079       assemble_name (stream, name);
   3080       fprintf (stream, " = 0x%04x\n", address);
   3081       return;
   3082     }
   3083   if (!global)
   3084     {
   3085       fprintf (stream, "\t.local\t");
   3086       assemble_name (stream, name);
   3087       fprintf (stream, "\n");
   3088     }
   3089   fprintf (stream, "\t.comm\t");
   3090   assemble_name (stream, name);
   3091   fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
   3092 }
   3093 
   3094 /* Predicates */
   3095 
   3096 /* This is a list of legal subregs of hard regs.  */
   3097 static const struct {
   3098   unsigned char outer_mode_size;
   3099   unsigned char inner_mode_size;
   3100   unsigned char byte_mask;
   3101   unsigned char legal_when;
   3102   unsigned int regno;
   3103 } legal_subregs[] = {
   3104   {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
   3105   {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
   3106   {1, 2, 0x01, 1, A0_REGNO},
   3107   {1, 2, 0x01, 1, A1_REGNO},
   3108 
   3109   {1, 4, 0x01, 1, A0_REGNO},
   3110   {1, 4, 0x01, 1, A1_REGNO},
   3111 
   3112   {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
   3113   {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
   3114   {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
   3115   {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
   3116   {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
   3117 
   3118   {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
   3119 };
   3120 
   3121 /* Returns TRUE if OP is a subreg of a hard reg which we don't
   3122    support.  We also bail on MEMs with illegal addresses.  */
   3123 bool
   3124 m32c_illegal_subreg_p (rtx op)
   3125 {
   3126   int offset;
   3127   unsigned int i;
   3128   machine_mode src_mode, dest_mode;
   3129 
   3130   if (GET_CODE (op) == MEM
   3131       && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
   3132     {
   3133       return true;
   3134     }
   3135 
   3136   if (GET_CODE (op) != SUBREG)
   3137     return false;
   3138 
   3139   dest_mode = GET_MODE (op);
   3140   offset = SUBREG_BYTE (op);
   3141   op = SUBREG_REG (op);
   3142   src_mode = GET_MODE (op);
   3143 
   3144   if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
   3145     return false;
   3146   if (GET_CODE (op) != REG)
   3147     return false;
   3148   if (REGNO (op) >= MEM0_REGNO)
   3149     return false;
   3150 
   3151   offset = (1 << offset);
   3152 
   3153   for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
   3154     if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
   3155 	&& legal_subregs[i].regno == REGNO (op)
   3156 	&& legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
   3157 	&& legal_subregs[i].byte_mask & offset)
   3158       {
   3159 	switch (legal_subregs[i].legal_when)
   3160 	  {
   3161 	  case 1:
   3162 	    return false;
   3163 	  case 16:
   3164 	    if (TARGET_A16)
   3165 	      return false;
   3166 	    break;
   3167 	  case 24:
   3168 	    if (TARGET_A24)
   3169 	      return false;
   3170 	    break;
   3171 	  }
   3172       }
   3173   return true;
   3174 }
   3175 
   3176 /* Returns TRUE if we support a move between the first two operands.
   3177    At the moment, we just want to discourage mem to mem moves until
   3178    after reload, because reload has a hard time with our limited
   3179    number of address registers, and we can get into a situation where
   3180    we need three of them when we only have two.  */
   3181 bool
   3182 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
   3183 {
   3184   rtx op0 = operands[0];
   3185   rtx op1 = operands[1];
   3186 
   3187   if (TARGET_A24)
   3188     return true;
   3189 
   3190 #define DEBUG_MOV_OK 0
   3191 #if DEBUG_MOV_OK
   3192   fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
   3193   debug_rtx (op0);
   3194   debug_rtx (op1);
   3195 #endif
   3196 
   3197   if (GET_CODE (op0) == SUBREG)
   3198     op0 = XEXP (op0, 0);
   3199   if (GET_CODE (op1) == SUBREG)
   3200     op1 = XEXP (op1, 0);
   3201 
   3202   if (GET_CODE (op0) == MEM
   3203       && GET_CODE (op1) == MEM
   3204       && ! reload_completed)
   3205     {
   3206 #if DEBUG_MOV_OK
   3207       fprintf (stderr, " - no, mem to mem\n");
   3208 #endif
   3209       return false;
   3210     }
   3211 
   3212 #if DEBUG_MOV_OK
   3213   fprintf (stderr, " - ok\n");
   3214 #endif
   3215   return true;
   3216 }
   3217 
   3218 /* Returns TRUE if two consecutive HImode mov instructions, generated
   3219    for moving an immediate double data to a double data type variable
   3220    location, can be combined into single SImode mov instruction.  */
   3221 bool
   3222 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
   3223 		   machine_mode mode ATTRIBUTE_UNUSED)
   3224 {
   3225   /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
   3226      flags.  */
   3227   return false;
   3228 }
   3229 
   3230 /* Expanders */
   3231 
   3232 /* Subregs are non-orthogonal for us, because our registers are all
   3233    different sizes.  */
   3234 static rtx
   3235 m32c_subreg (machine_mode outer,
   3236 	     rtx x, machine_mode inner, int byte)
   3237 {
   3238   int r, nr = -1;
   3239 
   3240   /* Converting MEMs to different types that are the same size, we
   3241      just rewrite them.  */
   3242   if (GET_CODE (x) == SUBREG
   3243       && SUBREG_BYTE (x) == 0
   3244       && GET_CODE (SUBREG_REG (x)) == MEM
   3245       && (GET_MODE_SIZE (GET_MODE (x))
   3246 	  == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
   3247     {
   3248       rtx oldx = x;
   3249       x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
   3250       MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
   3251     }
   3252 
   3253   /* Push/pop get done as smaller push/pops.  */
   3254   if (GET_CODE (x) == MEM
   3255       && (GET_CODE (XEXP (x, 0)) == PRE_DEC
   3256 	  || GET_CODE (XEXP (x, 0)) == POST_INC))
   3257     return gen_rtx_MEM (outer, XEXP (x, 0));
   3258   if (GET_CODE (x) == SUBREG
   3259       && GET_CODE (XEXP (x, 0)) == MEM
   3260       && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
   3261 	  || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
   3262     return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
   3263 
   3264   if (GET_CODE (x) != REG)
   3265     {
   3266       rtx r = simplify_gen_subreg (outer, x, inner, byte);
   3267       if (GET_CODE (r) == SUBREG
   3268 	  && GET_CODE (x) == MEM
   3269 	  && MEM_VOLATILE_P (x))
   3270 	{
   3271 	  /* Volatile MEMs don't get simplified, but we need them to
   3272 	     be.  We are little endian, so the subreg byte is the
   3273 	     offset.  */
   3274 	  r = adjust_address_nv (x, outer, byte);
   3275 	}
   3276       return r;
   3277     }
   3278 
   3279   r = REGNO (x);
   3280   if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
   3281     return simplify_gen_subreg (outer, x, inner, byte);
   3282 
   3283   if (IS_MEM_REGNO (r))
   3284     return simplify_gen_subreg (outer, x, inner, byte);
   3285 
   3286   /* This is where the complexities of our register layout are
   3287      described.  */
   3288   if (byte == 0)
   3289     nr = r;
   3290   else if (outer == HImode)
   3291     {
   3292       if (r == R0_REGNO && byte == 2)
   3293 	nr = R2_REGNO;
   3294       else if (r == R0_REGNO && byte == 4)
   3295 	nr = R1_REGNO;
   3296       else if (r == R0_REGNO && byte == 6)
   3297 	nr = R3_REGNO;
   3298       else if (r == R1_REGNO && byte == 2)
   3299 	nr = R3_REGNO;
   3300       else if (r == A0_REGNO && byte == 2)
   3301 	nr = A1_REGNO;
   3302     }
   3303   else if (outer == SImode)
   3304     {
   3305       if (r == R0_REGNO && byte == 0)
   3306 	nr = R0_REGNO;
   3307       else if (r == R0_REGNO && byte == 4)
   3308 	nr = R1_REGNO;
   3309     }
   3310   if (nr == -1)
   3311     {
   3312       fprintf (stderr, "m32c_subreg %s %s %d\n",
   3313 	       mode_name[outer], mode_name[inner], byte);
   3314       debug_rtx (x);
   3315       gcc_unreachable ();
   3316     }
   3317   return gen_rtx_REG (outer, nr);
   3318 }
   3319 
   3320 /* Used to emit move instructions.  We split some moves,
   3321    and avoid mem-mem moves.  */
   3322 int
   3323 m32c_prepare_move (rtx * operands, machine_mode mode)
   3324 {
   3325   if (far_addr_space_p (operands[0])
   3326       && CONSTANT_P (operands[1]))
   3327     {
   3328       operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
   3329     }
   3330   if (TARGET_A16 && mode == PSImode)
   3331     return m32c_split_move (operands, mode, 1);
   3332   if ((GET_CODE (operands[0]) == MEM)
   3333       && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
   3334     {
   3335       rtx pmv = XEXP (operands[0], 0);
   3336       rtx dest_reg = XEXP (pmv, 0);
   3337       rtx dest_mod = XEXP (pmv, 1);
   3338 
   3339       emit_insn (gen_rtx_SET (dest_reg, dest_mod));
   3340       operands[0] = gen_rtx_MEM (mode, dest_reg);
   3341     }
   3342   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
   3343     operands[1] = copy_to_mode_reg (mode, operands[1]);
   3344   return 0;
   3345 }
   3346 
   3347 #define DEBUG_SPLIT 0
   3348 
   3349 /* Returns TRUE if the given PSImode move should be split.  We split
   3350    for all r8c/m16c moves, since it doesn't support them, and for
   3351    POP.L as we can only *push* SImode.  */
   3352 int
   3353 m32c_split_psi_p (rtx * operands)
   3354 {
   3355 #if DEBUG_SPLIT
   3356   fprintf (stderr, "\nm32c_split_psi_p\n");
   3357   debug_rtx (operands[0]);
   3358   debug_rtx (operands[1]);
   3359 #endif
   3360   if (TARGET_A16)
   3361     {
   3362 #if DEBUG_SPLIT
   3363       fprintf (stderr, "yes, A16\n");
   3364 #endif
   3365       return 1;
   3366     }
   3367   if (GET_CODE (operands[1]) == MEM
   3368       && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
   3369     {
   3370 #if DEBUG_SPLIT
   3371       fprintf (stderr, "yes, pop.l\n");
   3372 #endif
   3373       return 1;
   3374     }
   3375 #if DEBUG_SPLIT
   3376   fprintf (stderr, "no, default\n");
   3377 #endif
   3378   return 0;
   3379 }
   3380 
   3381 /* Split the given move.  SPLIT_ALL is 0 if splitting is optional
   3382    (define_expand), 1 if it is not optional (define_insn_and_split),
   3383    and 3 for define_split (alternate api). */
   3384 int
   3385 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
   3386 {
   3387   rtx s[4], d[4];
   3388   int parts, si, di, rev = 0;
   3389   int rv = 0, opi = 2;
   3390   machine_mode submode = HImode;
   3391   rtx *ops, local_ops[10];
   3392 
   3393   /* define_split modifies the existing operands, but the other two
   3394      emit new insns.  OPS is where we store the operand pairs, which
   3395      we emit later.  */
   3396   if (split_all == 3)
   3397     ops = operands;
   3398   else
   3399     ops = local_ops;
   3400 
   3401   /* Else HImode.  */
   3402   if (mode == DImode)
   3403     submode = SImode;
   3404 
   3405   /* Before splitting mem-mem moves, force one operand into a
   3406      register.  */
   3407   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
   3408     {
   3409 #if DEBUG0
   3410       fprintf (stderr, "force_reg...\n");
   3411       debug_rtx (operands[1]);
   3412 #endif
   3413       operands[1] = force_reg (mode, operands[1]);
   3414 #if DEBUG0
   3415       debug_rtx (operands[1]);
   3416 #endif
   3417     }
   3418 
   3419   parts = 2;
   3420 
   3421 #if DEBUG_SPLIT
   3422   fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
   3423 	   split_all);
   3424   debug_rtx (operands[0]);
   3425   debug_rtx (operands[1]);
   3426 #endif
   3427 
   3428   /* Note that split_all is not used to select the api after this
   3429      point, so it's safe to set it to 3 even with define_insn.  */
   3430   /* None of the chips can move SI operands to sp-relative addresses,
   3431      so we always split those.  */
   3432   if (satisfies_constraint_Ss (operands[0]))
   3433     split_all = 3;
   3434 
   3435   if (TARGET_A16
   3436       && (far_addr_space_p (operands[0])
   3437 	  || far_addr_space_p (operands[1])))
   3438     split_all |= 1;
   3439 
   3440   /* We don't need to split these.  */
   3441   if (TARGET_A24
   3442       && split_all != 3
   3443       && (mode == SImode || mode == PSImode)
   3444       && !(GET_CODE (operands[1]) == MEM
   3445 	   && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
   3446     return 0;
   3447 
   3448   /* First, enumerate the subregs we'll be dealing with.  */
   3449   for (si = 0; si < parts; si++)
   3450     {
   3451       d[si] =
   3452 	m32c_subreg (submode, operands[0], mode,
   3453 		     si * GET_MODE_SIZE (submode));
   3454       s[si] =
   3455 	m32c_subreg (submode, operands[1], mode,
   3456 		     si * GET_MODE_SIZE (submode));
   3457     }
   3458 
   3459   /* Split pushes by emitting a sequence of smaller pushes.  */
   3460   if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
   3461     {
   3462       for (si = parts - 1; si >= 0; si--)
   3463 	{
   3464 	  ops[opi++] = gen_rtx_MEM (submode,
   3465 				    gen_rtx_PRE_DEC (Pmode,
   3466 						     gen_rtx_REG (Pmode,
   3467 								  SP_REGNO)));
   3468 	  ops[opi++] = s[si];
   3469 	}
   3470 
   3471       rv = 1;
   3472     }
   3473   /* Likewise for pops.  */
   3474   else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
   3475     {
   3476       for (di = 0; di < parts; di++)
   3477 	{
   3478 	  ops[opi++] = d[di];
   3479 	  ops[opi++] = gen_rtx_MEM (submode,
   3480 				    gen_rtx_POST_INC (Pmode,
   3481 						      gen_rtx_REG (Pmode,
   3482 								   SP_REGNO)));
   3483 	}
   3484       rv = 1;
   3485     }
   3486   else if (split_all)
   3487     {
   3488       /* if d[di] == s[si] for any di < si, we'll early clobber. */
   3489       for (di = 0; di < parts - 1; di++)
   3490 	for (si = di + 1; si < parts; si++)
   3491 	  if (reg_mentioned_p (d[di], s[si]))
   3492 	    rev = 1;
   3493 
   3494       if (rev)
   3495 	for (si = 0; si < parts; si++)
   3496 	  {
   3497 	    ops[opi++] = d[si];
   3498 	    ops[opi++] = s[si];
   3499 	  }
   3500       else
   3501 	for (si = parts - 1; si >= 0; si--)
   3502 	  {
   3503 	    ops[opi++] = d[si];
   3504 	    ops[opi++] = s[si];
   3505 	  }
   3506       rv = 1;
   3507     }
   3508   /* Now emit any moves we may have accumulated.  */
   3509   if (rv && split_all != 3)
   3510     {
   3511       int i;
   3512       for (i = 2; i < opi; i += 2)
   3513 	emit_move_insn (ops[i], ops[i + 1]);
   3514     }
   3515   return rv;
   3516 }
   3517 
   3518 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
   3519    the like.  For the R8C they expect one of the addresses to be in
   3520    R1L:An so we need to arrange for that.  Otherwise, it's just a
   3521    matter of picking out the operands we want and emitting the right
   3522    pattern for them.  All these expanders, which correspond to
   3523    patterns in blkmov.md, must return nonzero if they expand the insn,
   3524    or zero if they should FAIL.  */
   3525 
   3526 /* This is a memset() opcode.  All operands are implied, so we need to
   3527    arrange for them to be in the right registers.  The opcode wants
   3528    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
   3529    the count (HI), and $2 the value (QI).  */
   3530 int
   3531 m32c_expand_setmemhi(rtx *operands)
   3532 {
   3533   rtx desta, count, val;
   3534   rtx desto, counto;
   3535 
   3536   desta = XEXP (operands[0], 0);
   3537   count = operands[1];
   3538   val = operands[2];
   3539 
   3540   desto = gen_reg_rtx (Pmode);
   3541   counto = gen_reg_rtx (HImode);
   3542 
   3543   if (GET_CODE (desta) != REG
   3544       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
   3545     desta = copy_to_mode_reg (Pmode, desta);
   3546 
   3547   /* This looks like an arbitrary restriction, but this is by far the
   3548      most common case.  For counts 8..14 this actually results in
   3549      smaller code with no speed penalty because the half-sized
   3550      constant can be loaded with a shorter opcode.  */
   3551   if (GET_CODE (count) == CONST_INT
   3552       && GET_CODE (val) == CONST_INT
   3553       && ! (INTVAL (count) & 1)
   3554       && (INTVAL (count) > 1)
   3555       && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
   3556     {
   3557       unsigned v = INTVAL (val) & 0xff;
   3558       v = v | (v << 8);
   3559       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
   3560       val = copy_to_mode_reg (HImode, GEN_INT (v));
   3561       if (TARGET_A16)
   3562 	emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
   3563       else
   3564 	emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
   3565       return 1;
   3566     }
   3567 
   3568   /* This is the generalized memset() case.  */
   3569   if (GET_CODE (val) != REG
   3570       || REGNO (val) < FIRST_PSEUDO_REGISTER)
   3571     val = copy_to_mode_reg (QImode, val);
   3572 
   3573   if (GET_CODE (count) != REG
   3574       || REGNO (count) < FIRST_PSEUDO_REGISTER)
   3575     count = copy_to_mode_reg (HImode, count);
   3576 
   3577   if (TARGET_A16)
   3578     emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
   3579   else
   3580     emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
   3581 
   3582   return 1;
   3583 }
   3584 
   3585 /* This is a memcpy() opcode.  All operands are implied, so we need to
   3586    arrange for them to be in the right registers.  The opcode wants
   3587    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
   3588    is the source (MEM:BLK), and $2 the count (HI).  */
   3589 int
   3590 m32c_expand_cpymemhi(rtx *operands)
   3591 {
   3592   rtx desta, srca, count;
   3593   rtx desto, srco, counto;
   3594 
   3595   desta = XEXP (operands[0], 0);
   3596   srca = XEXP (operands[1], 0);
   3597   count = operands[2];
   3598 
   3599   desto = gen_reg_rtx (Pmode);
   3600   srco = gen_reg_rtx (Pmode);
   3601   counto = gen_reg_rtx (HImode);
   3602 
   3603   if (GET_CODE (desta) != REG
   3604       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
   3605     desta = copy_to_mode_reg (Pmode, desta);
   3606 
   3607   if (GET_CODE (srca) != REG
   3608       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
   3609     srca = copy_to_mode_reg (Pmode, srca);
   3610 
   3611   /* Similar to setmem, but we don't need to check the value.  */
   3612   if (GET_CODE (count) == CONST_INT
   3613       && ! (INTVAL (count) & 1)
   3614       && (INTVAL (count) > 1))
   3615     {
   3616       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
   3617       if (TARGET_A16)
   3618 	emit_insn (gen_cpymemhi_whi_op (desto, srco, counto, desta, srca, count));
   3619       else
   3620 	emit_insn (gen_cpymemhi_wpsi_op (desto, srco, counto, desta, srca, count));
   3621       return 1;
   3622     }
   3623 
   3624   /* This is the generalized memset() case.  */
   3625   if (GET_CODE (count) != REG
   3626       || REGNO (count) < FIRST_PSEUDO_REGISTER)
   3627     count = copy_to_mode_reg (HImode, count);
   3628 
   3629   if (TARGET_A16)
   3630     emit_insn (gen_cpymemhi_bhi_op (desto, srco, counto, desta, srca, count));
   3631   else
   3632     emit_insn (gen_cpymemhi_bpsi_op (desto, srco, counto, desta, srca, count));
   3633 
   3634   return 1;
   3635 }
   3636 
   3637 /* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
   3638    the copy, which should point to the NUL at the end of the string,
   3639    $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
   3640    Since our opcode leaves the destination pointing *after* the NUL,
   3641    we must emit an adjustment.  */
   3642 int
   3643 m32c_expand_movstr(rtx *operands)
   3644 {
   3645   rtx desta, srca;
   3646   rtx desto, srco;
   3647 
   3648   desta = XEXP (operands[1], 0);
   3649   srca = XEXP (operands[2], 0);
   3650 
   3651   desto = gen_reg_rtx (Pmode);
   3652   srco = gen_reg_rtx (Pmode);
   3653 
   3654   if (GET_CODE (desta) != REG
   3655       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
   3656     desta = copy_to_mode_reg (Pmode, desta);
   3657 
   3658   if (GET_CODE (srca) != REG
   3659       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
   3660     srca = copy_to_mode_reg (Pmode, srca);
   3661 
   3662   emit_insn (gen_movstr_op (desto, srco, desta, srca));
   3663   /* desto ends up being a1, which allows this type of add through MOVA.  */
   3664   emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
   3665 
   3666   return 1;
   3667 }
   3668 
   3669 /* This is a strcmp() opcode.  $0 is the destination (HI) which holds
   3670    <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
   3671    $2 is the other (MEM:BLK).  We must do the comparison, and then
   3672    convert the flags to a signed integer result.  */
   3673 int
   3674 m32c_expand_cmpstr(rtx *operands)
   3675 {
   3676   rtx src1a, src2a;
   3677 
   3678   src1a = XEXP (operands[1], 0);
   3679   src2a = XEXP (operands[2], 0);
   3680 
   3681   if (GET_CODE (src1a) != REG
   3682       || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
   3683     src1a = copy_to_mode_reg (Pmode, src1a);
   3684 
   3685   if (GET_CODE (src2a) != REG
   3686       || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
   3687     src2a = copy_to_mode_reg (Pmode, src2a);
   3688 
   3689   emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
   3690   emit_insn (gen_cond_to_int (operands[0]));
   3691 
   3692   return 1;
   3693 }
   3694 
   3695 
   3696 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
   3697 
   3698 static shift_gen_func
   3699 shift_gen_func_for (int mode, int code)
   3700 {
   3701 #define GFF(m,c,f) if (mode == m && code == c) return f
   3702   GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
   3703   GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
   3704   GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
   3705   GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
   3706   GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
   3707   GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
   3708   GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
   3709   GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
   3710   GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
   3711   GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
   3712   GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
   3713   GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
   3714 #undef GFF
   3715   gcc_unreachable ();
   3716 }
   3717 
   3718 /* The m32c only has one shift, but it takes a signed count.  GCC
   3719    doesn't want this, so we fake it by negating any shift count when
   3720    we're pretending to shift the other way.  Also, the shift count is
   3721    limited to -8..8.  It's slightly better to use two shifts for 9..15
   3722    than to load the count into r1h, so we do that too.  */
   3723 int
   3724 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
   3725 {
   3726   machine_mode mode = GET_MODE (operands[0]);
   3727   shift_gen_func func = shift_gen_func_for (mode, shift_code);
   3728   rtx temp;
   3729 
   3730   if (GET_CODE (operands[2]) == CONST_INT)
   3731     {
   3732       int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
   3733       int count = INTVAL (operands[2]) * scale;
   3734 
   3735       while (count > maxc)
   3736 	{
   3737 	  temp = gen_reg_rtx (mode);
   3738 	  emit_insn (func (temp, operands[1], GEN_INT (maxc)));
   3739 	  operands[1] = temp;
   3740 	  count -= maxc;
   3741 	}
   3742       while (count < -maxc)
   3743 	{
   3744 	  temp = gen_reg_rtx (mode);
   3745 	  emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
   3746 	  operands[1] = temp;
   3747 	  count += maxc;
   3748 	}
   3749       emit_insn (func (operands[0], operands[1], GEN_INT (count)));
   3750       return 1;
   3751     }
   3752 
   3753   temp = gen_reg_rtx (QImode);
   3754   if (scale < 0)
   3755     /* The pattern has a NEG that corresponds to this. */
   3756     emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
   3757   else if (TARGET_A16 && mode == SImode)
   3758     /* We do this because the code below may modify this, we don't
   3759        want to modify the origin of this value.  */
   3760     emit_move_insn (temp, operands[2]);
   3761   else
   3762     /* We'll only use it for the shift, no point emitting a move.  */
   3763     temp = operands[2];
   3764 
   3765   if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
   3766     {
   3767       /* The m16c has a limit of -16..16 for SI shifts, even when the
   3768 	 shift count is in a register.  Since there are so many targets
   3769 	 of these shifts, it's better to expand the RTL here than to
   3770 	 call a helper function.
   3771 
   3772 	 The resulting code looks something like this:
   3773 
   3774 		cmp.b	r1h,-16
   3775 		jge.b	1f
   3776 		shl.l	-16,dest
   3777 		add.b	r1h,16
   3778 	1f:	cmp.b	r1h,16
   3779 		jle.b	1f
   3780 		shl.l	16,dest
   3781 		sub.b	r1h,16
   3782 	1f:	shl.l	r1h,dest
   3783 
   3784 	 We take advantage of the fact that "negative" shifts are
   3785 	 undefined to skip one of the comparisons.  */
   3786 
   3787       rtx count;
   3788       rtx tempvar;
   3789       rtx_insn *insn;
   3790 
   3791       emit_move_insn (operands[0], operands[1]);
   3792 
   3793       count = temp;
   3794       rtx_code_label *label = gen_label_rtx ();
   3795       LABEL_NUSES (label) ++;
   3796 
   3797       tempvar = gen_reg_rtx (mode);
   3798 
   3799       if (shift_code == ASHIFT)
   3800 	{
   3801 	  /* This is a left shift.  We only need check positive counts.  */
   3802 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
   3803 					  count, GEN_INT (16), label));
   3804 	  emit_insn (func (tempvar, operands[0], GEN_INT (8)));
   3805 	  emit_insn (func (operands[0], tempvar, GEN_INT (8)));
   3806 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
   3807 	  emit_label_after (label, insn);
   3808 	}
   3809       else
   3810 	{
   3811 	  /* This is a right shift.  We only need check negative counts.  */
   3812 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
   3813 					  count, GEN_INT (-16), label));
   3814 	  emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
   3815 	  emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
   3816 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
   3817 	  emit_label_after (label, insn);
   3818 	}
   3819       operands[1] = operands[0];
   3820       emit_insn (func (operands[0], operands[0], count));
   3821       return 1;
   3822     }
   3823 
   3824   operands[2] = temp;
   3825   return 0;
   3826 }
   3827 
   3828 /* The m32c has a limited range of operations that work on PSImode
   3829    values; we have to expand to SI, do the math, and truncate back to
   3830    PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
   3831    those cases.  */
   3832 void
   3833 m32c_expand_neg_mulpsi3 (rtx * operands)
   3834 {
   3835   /* operands: a = b * i */
   3836   rtx temp1; /* b as SI */
   3837   rtx scale /* i as SI */;
   3838   rtx temp2; /* a*b as SI */
   3839 
   3840   temp1 = gen_reg_rtx (SImode);
   3841   temp2 = gen_reg_rtx (SImode);
   3842   if (GET_CODE (operands[2]) != CONST_INT)
   3843     {
   3844       scale = gen_reg_rtx (SImode);
   3845       emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
   3846     }
   3847   else
   3848     scale = copy_to_mode_reg (SImode, operands[2]);
   3849 
   3850   emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
   3851   temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
   3852   emit_insn (gen_truncsipsi2 (operands[0], temp2));
   3853 }
   3854 
   3855 /* Pattern Output Functions */
   3856 
   3857 int
   3858 m32c_expand_movcc (rtx *operands)
   3859 {
   3860   rtx rel = operands[1];
   3861 
   3862   if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
   3863     return 1;
   3864   if (GET_CODE (operands[2]) != CONST_INT
   3865       || GET_CODE (operands[3]) != CONST_INT)
   3866     return 1;
   3867   if (GET_CODE (rel) == NE)
   3868     {
   3869       rtx tmp = operands[2];
   3870       operands[2] = operands[3];
   3871       operands[3] = tmp;
   3872       rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
   3873     }
   3874 
   3875   emit_move_insn (operands[0],
   3876 		  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
   3877 					rel,
   3878 					operands[2],
   3879 					operands[3]));
   3880   return 0;
   3881 }
   3882 
   3883 /* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
   3884 int
   3885 m32c_expand_insv (rtx *operands)
   3886 {
   3887   rtx op0, src0, p;
   3888   int mask;
   3889 
   3890   if (INTVAL (operands[1]) != 1)
   3891     return 1;
   3892 
   3893   /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
   3894   if (GET_CODE (operands[3]) != CONST_INT)
   3895     return 1;
   3896   if (INTVAL (operands[3]) != 0
   3897       && INTVAL (operands[3]) != 1
   3898       && INTVAL (operands[3]) != -1)
   3899     return 1;
   3900 
   3901   mask = 1 << INTVAL (operands[2]);
   3902 
   3903   op0 = operands[0];
   3904   if (GET_CODE (op0) == SUBREG
   3905       && SUBREG_BYTE (op0) == 0)
   3906     {
   3907       rtx sub = SUBREG_REG (op0);
   3908       if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
   3909 	op0 = sub;
   3910     }
   3911 
   3912   if (!can_create_pseudo_p ()
   3913       || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
   3914     src0 = op0;
   3915   else
   3916     {
   3917       src0 = gen_reg_rtx (GET_MODE (op0));
   3918       emit_move_insn (src0, op0);
   3919     }
   3920 
   3921   if (GET_MODE (op0) == HImode
   3922       && INTVAL (operands[2]) >= 8
   3923       && GET_CODE (op0) == MEM)
   3924     {
   3925       /* We are little endian.  */
   3926       rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
   3927 							XEXP (op0, 0), 1));
   3928       MEM_COPY_ATTRIBUTES (new_mem, op0);
   3929       mask >>= 8;
   3930     }
   3931 
   3932   /* First, we generate a mask with the correct polarity.  If we are
   3933      storing a zero, we want an AND mask, so invert it.  */
   3934   if (INTVAL (operands[3]) == 0)
   3935     {
   3936       /* Storing a zero, use an AND mask */
   3937       if (GET_MODE (op0) == HImode)
   3938 	mask ^= 0xffff;
   3939       else
   3940 	mask ^= 0xff;
   3941     }
   3942   /* Now we need to properly sign-extend the mask in case we need to
   3943      fall back to an AND or OR opcode.  */
   3944   if (GET_MODE (op0) == HImode)
   3945     {
   3946       if (mask & 0x8000)
   3947 	mask -= 0x10000;
   3948     }
   3949   else
   3950     {
   3951       if (mask & 0x80)
   3952 	mask -= 0x100;
   3953     }
   3954 
   3955   switch (  (INTVAL (operands[3]) ? 4 : 0)
   3956 	  + ((GET_MODE (op0) == HImode) ? 2 : 0)
   3957 	  + (TARGET_A24 ? 1 : 0))
   3958     {
   3959     case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
   3960     case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
   3961     case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
   3962     case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
   3963     case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
   3964     case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
   3965     case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
   3966     case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
   3967     default: p = NULL_RTX; break; /* Not reached, but silences a warning.  */
   3968     }
   3969 
   3970   emit_insn (p);
   3971   return 0;
   3972 }
   3973 
   3974 const char *
   3975 m32c_scc_pattern(rtx *operands, RTX_CODE code)
   3976 {
   3977   static char buf[30];
   3978   if (GET_CODE (operands[0]) == REG
   3979       && REGNO (operands[0]) == R0_REGNO)
   3980     {
   3981       if (code == EQ)
   3982 	return "stzx\t#1,#0,r0l";
   3983       if (code == NE)
   3984 	return "stzx\t#0,#1,r0l";
   3985     }
   3986   sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
   3987   return buf;
   3988 }
   3989 
   3990 /* Encode symbol attributes of a SYMBOL_REF into its
   3991    SYMBOL_REF_FLAGS. */
   3992 static void
   3993 m32c_encode_section_info (tree decl, rtx rtl, int first)
   3994 {
   3995   int extra_flags = 0;
   3996 
   3997   default_encode_section_info (decl, rtl, first);
   3998   if (TREE_CODE (decl) == FUNCTION_DECL
   3999       && m32c_special_page_vector_p (decl))
   4000 
   4001     extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
   4002 
   4003   if (extra_flags)
   4004     SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
   4005 }
   4006 
   4007 /* Returns TRUE if the current function is a leaf, and thus we can
   4008    determine which registers an interrupt function really needs to
   4009    save.  The logic below is mostly about finding the insn sequence
   4010    that's the function, versus any sequence that might be open for the
   4011    current insn.  */
   4012 static int
   4013 m32c_leaf_function_p (void)
   4014 {
   4015   int rv;
   4016 
   4017   push_topmost_sequence ();
   4018   rv = leaf_function_p ();
   4019   pop_topmost_sequence ();
   4020   return rv;
   4021 }
   4022 
   4023 /* Returns TRUE if the current function needs to use the ENTER/EXIT
   4024    opcodes.  If the function doesn't need the frame base or stack
   4025    pointer, it can use the simpler RTS opcode.  */
   4026 static bool
   4027 m32c_function_needs_enter (void)
   4028 {
   4029   rtx_insn *insn;
   4030   rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
   4031   rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
   4032 
   4033   for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
   4034     if (NONDEBUG_INSN_P (insn))
   4035       {
   4036 	if (reg_mentioned_p (sp, insn))
   4037 	  return true;
   4038 	if (reg_mentioned_p (fb, insn))
   4039 	  return true;
   4040       }
   4041   return false;
   4042 }
   4043 
   4044 /* Mark all the subexpressions of the PARALLEL rtx PAR as
   4045    frame-related.  Return PAR.
   4046 
   4047    dwarf2out.cc:dwarf2out_frame_debug_expr ignores sub-expressions of a
   4048    PARALLEL rtx other than the first if they do not have the
   4049    FRAME_RELATED flag set on them.  So this function is handy for
   4050    marking up 'enter' instructions.  */
   4051 static rtx
   4052 m32c_all_frame_related (rtx par)
   4053 {
   4054   int len = XVECLEN (par, 0);
   4055   int i;
   4056 
   4057   for (i = 0; i < len; i++)
   4058     F (XVECEXP (par, 0, i));
   4059 
   4060   return par;
   4061 }
   4062 
   4063 /* Emits the prologue.  See the frame layout comment earlier in this
   4064    file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
   4065    that we manually update sp.  */
   4066 void
   4067 m32c_emit_prologue (void)
   4068 {
   4069   int frame_size, extra_frame_size = 0, reg_save_size;
   4070   int complex_prologue = 0;
   4071 
   4072   cfun->machine->is_leaf = m32c_leaf_function_p ();
   4073   if (interrupt_p (cfun->decl))
   4074     {
   4075       cfun->machine->is_interrupt = 1;
   4076       complex_prologue = 1;
   4077     }
   4078   else if (bank_switch_p (cfun->decl))
   4079     warning (OPT_Wattributes,
   4080 	     "%<bank_switch%> has no effect on non-interrupt functions");
   4081 
   4082   reg_save_size = m32c_pushm_popm (PP_justcount);
   4083 
   4084   if (interrupt_p (cfun->decl))
   4085     {
   4086       if (bank_switch_p (cfun->decl))
   4087 	emit_insn (gen_fset_b ());
   4088       else if (cfun->machine->intr_pushm)
   4089 	emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
   4090     }
   4091 
   4092   frame_size =
   4093     m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
   4094   if (frame_size == 0
   4095       && !m32c_function_needs_enter ())
   4096     cfun->machine->use_rts = 1;
   4097 
   4098   if (flag_stack_usage_info)
   4099     current_function_static_stack_size = frame_size;
   4100 
   4101   if (frame_size > 254)
   4102     {
   4103       extra_frame_size = frame_size - 254;
   4104       frame_size = 254;
   4105     }
   4106   if (cfun->machine->use_rts == 0)
   4107     F (emit_insn (m32c_all_frame_related
   4108 		  (TARGET_A16
   4109 		   ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
   4110 		   : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
   4111 
   4112   if (extra_frame_size)
   4113     {
   4114       complex_prologue = 1;
   4115       if (TARGET_A16)
   4116 	F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
   4117 				  gen_rtx_REG (HImode, SP_REGNO),
   4118 				  GEN_INT (-extra_frame_size))));
   4119       else
   4120 	F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
   4121 				   gen_rtx_REG (PSImode, SP_REGNO),
   4122 				   GEN_INT (-extra_frame_size))));
   4123     }
   4124 
   4125   complex_prologue += m32c_pushm_popm (PP_pushm);
   4126 
   4127   /* This just emits a comment into the .s file for debugging.  */
   4128   if (complex_prologue)
   4129     emit_insn (gen_prologue_end ());
   4130 }
   4131 
   4132 /* Likewise, for the epilogue.  The only exception is that, for
   4133    interrupts, we must manually unwind the frame as the REIT opcode
   4134    doesn't do that.  */
   4135 void
   4136 m32c_emit_epilogue (void)
   4137 {
   4138   int popm_count = m32c_pushm_popm (PP_justcount);
   4139 
   4140   /* This just emits a comment into the .s file for debugging.  */
   4141   if (popm_count > 0 || cfun->machine->is_interrupt)
   4142     emit_insn (gen_epilogue_start ());
   4143 
   4144   if (popm_count > 0)
   4145     m32c_pushm_popm (PP_popm);
   4146 
   4147   if (cfun->machine->is_interrupt)
   4148     {
   4149       machine_mode spmode = TARGET_A16 ? HImode : PSImode;
   4150 
   4151       /* REIT clears B flag and restores $fp for us, but we still
   4152 	 have to fix up the stack.  USE_RTS just means we didn't
   4153 	 emit ENTER.  */
   4154       if (!cfun->machine->use_rts)
   4155 	{
   4156 	  emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
   4157 			  gen_rtx_REG (spmode, FP_REGNO));
   4158 	  emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
   4159 			  gen_rtx_REG (spmode, A0_REGNO));
   4160 	  /* We can't just add this to the POPM because it would be in
   4161 	     the wrong order, and wouldn't fix the stack if we're bank
   4162 	     switching.  */
   4163 	  if (TARGET_A16)
   4164 	    emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
   4165 	  else
   4166 	    emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
   4167 	}
   4168       if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
   4169 	emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
   4170 
   4171       /* The FREIT (Fast REturn from InTerrupt) instruction should be
   4172          generated only for M32C/M32CM targets (generate the REIT
   4173          instruction otherwise).  */
   4174       if (fast_interrupt_p (cfun->decl))
   4175         {
   4176           /* Check if fast_attribute is set for M32C or M32CM.  */
   4177           if (TARGET_A24)
   4178             {
   4179               emit_jump_insn (gen_epilogue_freit ());
   4180             }
   4181           /* If fast_interrupt attribute is set for an R8C or M16C
   4182              target ignore this attribute and generated REIT
   4183              instruction.  */
   4184           else
   4185 	    {
   4186 	      warning (OPT_Wattributes,
   4187 		       "%<fast_interrupt%> attribute directive ignored");
   4188 	      emit_jump_insn (gen_epilogue_reit_16 ());
   4189 	    }
   4190         }
   4191       else if (TARGET_A16)
   4192 	emit_jump_insn (gen_epilogue_reit_16 ());
   4193       else
   4194 	emit_jump_insn (gen_epilogue_reit_24 ());
   4195     }
   4196   else if (cfun->machine->use_rts)
   4197     emit_jump_insn (gen_epilogue_rts ());
   4198   else if (TARGET_A16)
   4199     emit_jump_insn (gen_epilogue_exitd_16 ());
   4200   else
   4201     emit_jump_insn (gen_epilogue_exitd_24 ());
   4202 }
   4203 
   4204 void
   4205 m32c_emit_eh_epilogue (rtx ret_addr)
   4206 {
   4207   /* R0[R2] has the stack adjustment.  R1[R3] has the address to
   4208      return to.  We have to fudge the stack, pop everything, pop SP
   4209      (fudged), and return (fudged).  This is actually easier to do in
   4210      assembler, so punt to libgcc.  */
   4211   emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
   4212   /*  emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
   4213 }
   4214 
   4215 /* Indicate which flags must be properly set for a given conditional.  */
   4216 static int
   4217 flags_needed_for_conditional (rtx cond)
   4218 {
   4219   switch (GET_CODE (cond))
   4220     {
   4221     case LE:
   4222     case GT:
   4223       return FLAGS_OSZ;
   4224     case LEU:
   4225     case GTU:
   4226       return FLAGS_ZC;
   4227     case LT:
   4228     case GE:
   4229       return FLAGS_OS;
   4230     case LTU:
   4231     case GEU:
   4232       return FLAGS_C;
   4233     case EQ:
   4234     case NE:
   4235       return FLAGS_Z;
   4236     default:
   4237       return FLAGS_N;
   4238     }
   4239 }
   4240 
   4241 #define DEBUG_CMP 0
   4242 
   4243 /* Returns true if a compare insn is redundant because it would only
   4244    set flags that are already set correctly.  */
   4245 static bool
   4246 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
   4247 {
   4248   int flags_needed;
   4249   int pflags;
   4250   rtx_insn *prev;
   4251   rtx pp, next;
   4252   rtx op0, op1;
   4253 #if DEBUG_CMP
   4254   int prev_icode, i;
   4255 #endif
   4256 
   4257   op0 = operands[0];
   4258   op1 = operands[1];
   4259 
   4260 #if DEBUG_CMP
   4261   fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
   4262   debug_rtx(cmp);
   4263   for (i=0; i<2; i++)
   4264     {
   4265       fprintf(stderr, "operands[%d] = ", i);
   4266       debug_rtx(operands[i]);
   4267     }
   4268 #endif
   4269 
   4270   next = next_nonnote_insn (cmp);
   4271   if (!next || !INSN_P (next))
   4272     {
   4273 #if DEBUG_CMP
   4274       fprintf(stderr, "compare not followed by insn\n");
   4275       debug_rtx(next);
   4276 #endif
   4277       return false;
   4278     }
   4279   if (GET_CODE (PATTERN (next)) == SET
   4280       && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
   4281     {
   4282       next = XEXP (XEXP (PATTERN (next), 1), 0);
   4283     }
   4284   else if (GET_CODE (PATTERN (next)) == SET)
   4285     {
   4286       /* If this is a conditional, flags_needed will be something
   4287 	 other than FLAGS_N, which we test below.  */
   4288       next = XEXP (PATTERN (next), 1);
   4289     }
   4290   else
   4291     {
   4292 #if DEBUG_CMP
   4293       fprintf(stderr, "compare not followed by conditional\n");
   4294       debug_rtx(next);
   4295 #endif
   4296       return false;
   4297     }
   4298 #if DEBUG_CMP
   4299   fprintf(stderr, "conditional is: ");
   4300   debug_rtx(next);
   4301 #endif
   4302 
   4303   flags_needed = flags_needed_for_conditional (next);
   4304   if (flags_needed == FLAGS_N)
   4305     {
   4306 #if DEBUG_CMP
   4307       fprintf(stderr, "compare not followed by conditional\n");
   4308       debug_rtx(next);
   4309 #endif
   4310       return false;
   4311     }
   4312 
   4313   /* Compare doesn't set overflow and carry the same way that
   4314      arithmetic instructions do, so we can't replace those.  */
   4315   if (flags_needed & FLAGS_OC)
   4316     return false;
   4317 
   4318   prev = cmp;
   4319   do {
   4320     prev = prev_nonnote_insn (prev);
   4321     if (!prev)
   4322       {
   4323 #if DEBUG_CMP
   4324 	fprintf(stderr, "No previous insn.\n");
   4325 #endif
   4326 	return false;
   4327       }
   4328     if (!INSN_P (prev))
   4329       {
   4330 #if DEBUG_CMP
   4331 	fprintf(stderr, "Previous insn is a non-insn.\n");
   4332 #endif
   4333 	return false;
   4334       }
   4335     pp = PATTERN (prev);
   4336     if (GET_CODE (pp) != SET)
   4337       {
   4338 #if DEBUG_CMP
   4339 	fprintf(stderr, "Previous insn is not a SET.\n");
   4340 #endif
   4341 	return false;
   4342       }
   4343     pflags = get_attr_flags (prev);
   4344 
   4345     /* Looking up attributes of previous insns corrupted the recog
   4346        tables.  */
   4347     INSN_UID (cmp) = -1;
   4348     recog (PATTERN (cmp), cmp, 0);
   4349 
   4350     if (pflags == FLAGS_N
   4351 	&& reg_mentioned_p (op0, pp))
   4352       {
   4353 #if DEBUG_CMP
   4354 	fprintf(stderr, "intermediate non-flags insn uses op:\n");
   4355 	debug_rtx(prev);
   4356 #endif
   4357 	return false;
   4358       }
   4359 
   4360     /* Check for comparisons against memory - between volatiles and
   4361        aliases, we just can't risk this one.  */
   4362     if (GET_CODE (operands[0]) == MEM
   4363 	|| GET_CODE (operands[0]) == MEM)
   4364       {
   4365 #if DEBUG_CMP
   4366 	fprintf(stderr, "comparisons with memory:\n");
   4367 	debug_rtx(prev);
   4368 #endif
   4369 	return false;
   4370       }
   4371 
   4372     /* Check for PREV changing a register that's used to compute a
   4373        value in CMP, even if it doesn't otherwise change flags.  */
   4374     if (GET_CODE (operands[0]) == REG
   4375 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
   4376       {
   4377 #if DEBUG_CMP
   4378 	fprintf(stderr, "sub-value affected, op0:\n");
   4379 	debug_rtx(prev);
   4380 #endif
   4381 	return false;
   4382       }
   4383     if (GET_CODE (operands[1]) == REG
   4384 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
   4385       {
   4386 #if DEBUG_CMP
   4387 	fprintf(stderr, "sub-value affected, op1:\n");
   4388 	debug_rtx(prev);
   4389 #endif
   4390 	return false;
   4391       }
   4392 
   4393   } while (pflags == FLAGS_N);
   4394 #if DEBUG_CMP
   4395   fprintf(stderr, "previous flag-setting insn:\n");
   4396   debug_rtx(prev);
   4397   debug_rtx(pp);
   4398 #endif
   4399 
   4400   if (GET_CODE (pp) == SET
   4401       && GET_CODE (XEXP (pp, 0)) == REG
   4402       && REGNO (XEXP (pp, 0)) == FLG_REGNO
   4403       && GET_CODE (XEXP (pp, 1)) == COMPARE)
   4404     {
   4405       /* Adjacent cbranches must have the same operands to be
   4406 	 redundant.  */
   4407       rtx pop0 = XEXP (XEXP (pp, 1), 0);
   4408       rtx pop1 = XEXP (XEXP (pp, 1), 1);
   4409 #if DEBUG_CMP
   4410       fprintf(stderr, "adjacent cbranches\n");
   4411       debug_rtx(pop0);
   4412       debug_rtx(pop1);
   4413 #endif
   4414       if (rtx_equal_p (op0, pop0)
   4415 	  && rtx_equal_p (op1, pop1))
   4416 	return true;
   4417 #if DEBUG_CMP
   4418       fprintf(stderr, "prev cmp not same\n");
   4419 #endif
   4420       return false;
   4421     }
   4422 
   4423   /* Else the previous insn must be a SET, with either the source or
   4424      dest equal to operands[0], and operands[1] must be zero.  */
   4425 
   4426   if (!rtx_equal_p (op1, const0_rtx))
   4427     {
   4428 #if DEBUG_CMP
   4429       fprintf(stderr, "operands[1] not const0_rtx\n");
   4430 #endif
   4431       return false;
   4432     }
   4433   if (GET_CODE (pp) != SET)
   4434     {
   4435 #if DEBUG_CMP
   4436       fprintf (stderr, "pp not set\n");
   4437 #endif
   4438       return false;
   4439     }
   4440   if (!rtx_equal_p (op0, SET_SRC (pp))
   4441       && !rtx_equal_p (op0, SET_DEST (pp)))
   4442     {
   4443 #if DEBUG_CMP
   4444       fprintf(stderr, "operands[0] not found in set\n");
   4445 #endif
   4446       return false;
   4447     }
   4448 
   4449 #if DEBUG_CMP
   4450   fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
   4451 #endif
   4452   if ((pflags & flags_needed) == flags_needed)
   4453     return true;
   4454 
   4455   return false;
   4456 }
   4457 
   4458 /* Return the pattern for a compare.  This will be commented out if
   4459    the compare is redundant, else a normal pattern is returned.  Thus,
   4460    the assembler output says where the compare would have been.  */
   4461 char *
   4462 m32c_output_compare (rtx_insn *insn, rtx *operands)
   4463 {
   4464   static char templ[] = ";cmp.b\t%1,%0";
   4465   /*                             ^ 5  */
   4466 
   4467   templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
   4468   if (m32c_compare_redundant (insn, operands))
   4469     {
   4470 #if DEBUG_CMP
   4471       fprintf(stderr, "cbranch: cmp not needed\n");
   4472 #endif
   4473       return templ;
   4474     }
   4475 
   4476 #if DEBUG_CMP
   4477   fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
   4478 #endif
   4479   return templ + 1;
   4480 }
   4481 
   4482 #undef TARGET_ENCODE_SECTION_INFO
   4483 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
   4484 
   4485 /* If the frame pointer isn't used, we detect it manually.  But the
   4486    stack pointer doesn't have as flexible addressing as the frame
   4487    pointer, so we always assume we have it.  */
   4488 
   4489 #undef TARGET_FRAME_POINTER_REQUIRED
   4490 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
   4491 
   4492 #undef TARGET_HARD_REGNO_NREGS
   4493 #define TARGET_HARD_REGNO_NREGS m32c_hard_regno_nregs
   4494 #undef TARGET_HARD_REGNO_MODE_OK
   4495 #define TARGET_HARD_REGNO_MODE_OK m32c_hard_regno_mode_ok
   4496 #undef TARGET_MODES_TIEABLE_P
   4497 #define TARGET_MODES_TIEABLE_P m32c_modes_tieable_p
   4498 
   4499 #undef TARGET_CAN_CHANGE_MODE_CLASS
   4500 #define TARGET_CAN_CHANGE_MODE_CLASS m32c_can_change_mode_class
   4501 
   4502 /* The Global `targetm' Variable. */
   4503 
   4504 struct gcc_target targetm = TARGET_INITIALIZER;
   4505 
   4506 #include "gt-m32c.h"
   4507