Home | History | Annotate | Line # | Download | only in gcc
      1  1.1  mrg /* Instruction scheduling pass.  This file computes dependencies between
      2  1.1  mrg    instructions.
      3  1.1  mrg    Copyright (C) 1992-2022 Free Software Foundation, Inc.
      4  1.1  mrg    Contributed by Michael Tiemann (tiemann (at) cygnus.com) Enhanced by,
      5  1.1  mrg    and currently maintained by, Jim Wilson (wilson (at) cygnus.com)
      6  1.1  mrg 
      7  1.1  mrg This file is part of GCC.
      8  1.1  mrg 
      9  1.1  mrg GCC is free software; you can redistribute it and/or modify it under
     10  1.1  mrg the terms of the GNU General Public License as published by the Free
     11  1.1  mrg Software Foundation; either version 3, or (at your option) any later
     12  1.1  mrg version.
     13  1.1  mrg 
     14  1.1  mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
     15  1.1  mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
     16  1.1  mrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     17  1.1  mrg for more details.
     18  1.1  mrg 
     19  1.1  mrg You should have received a copy of the GNU General Public License
     20  1.1  mrg along with GCC; see the file COPYING3.  If not see
     21  1.1  mrg <http://www.gnu.org/licenses/>.  */
     22  1.1  mrg 
     23  1.1  mrg #include "config.h"
     25  1.1  mrg #include "system.h"
     26  1.1  mrg #include "coretypes.h"
     27  1.1  mrg #include "backend.h"
     28  1.1  mrg #include "target.h"
     29  1.1  mrg #include "rtl.h"
     30  1.1  mrg #include "tree.h"
     31  1.1  mrg #include "df.h"
     32  1.1  mrg #include "insn-config.h"
     33  1.1  mrg #include "regs.h"
     34  1.1  mrg #include "memmodel.h"
     35  1.1  mrg #include "ira.h"
     36  1.1  mrg #include "ira-int.h"
     37  1.1  mrg #include "insn-attr.h"
     38  1.1  mrg #include "cfgbuild.h"
     39  1.1  mrg #include "sched-int.h"
     40  1.1  mrg #include "cselib.h"
     41  1.1  mrg #include "function-abi.h"
     42  1.1  mrg 
     43  1.1  mrg #ifdef INSN_SCHEDULING
     44  1.1  mrg 
     45  1.1  mrg /* Holds current parameters for the dependency analyzer.  */
     46  1.1  mrg struct sched_deps_info_def *sched_deps_info;
     47  1.1  mrg 
     48  1.1  mrg /* The data is specific to the Haifa scheduler.  */
     49  1.1  mrg vec<haifa_deps_insn_data_def>
     50  1.1  mrg     h_d_i_d = vNULL;
     51  1.1  mrg 
     52  1.1  mrg /* Return the major type present in the DS.  */
     53  1.1  mrg enum reg_note
     54  1.1  mrg ds_to_dk (ds_t ds)
     55  1.1  mrg {
     56  1.1  mrg   if (ds & DEP_TRUE)
     57  1.1  mrg     return REG_DEP_TRUE;
     58  1.1  mrg 
     59  1.1  mrg   if (ds & DEP_OUTPUT)
     60  1.1  mrg     return REG_DEP_OUTPUT;
     61  1.1  mrg 
     62  1.1  mrg   if (ds & DEP_CONTROL)
     63  1.1  mrg     return REG_DEP_CONTROL;
     64  1.1  mrg 
     65  1.1  mrg   gcc_assert (ds & DEP_ANTI);
     66  1.1  mrg 
     67  1.1  mrg   return REG_DEP_ANTI;
     68  1.1  mrg }
     69  1.1  mrg 
     70  1.1  mrg /* Return equivalent dep_status.  */
     71  1.1  mrg ds_t
     72  1.1  mrg dk_to_ds (enum reg_note dk)
     73  1.1  mrg {
     74  1.1  mrg   switch (dk)
     75  1.1  mrg     {
     76  1.1  mrg     case REG_DEP_TRUE:
     77  1.1  mrg       return DEP_TRUE;
     78  1.1  mrg 
     79  1.1  mrg     case REG_DEP_OUTPUT:
     80  1.1  mrg       return DEP_OUTPUT;
     81  1.1  mrg 
     82  1.1  mrg     case REG_DEP_CONTROL:
     83  1.1  mrg       return DEP_CONTROL;
     84  1.1  mrg 
     85  1.1  mrg     default:
     86  1.1  mrg       gcc_assert (dk == REG_DEP_ANTI);
     87  1.1  mrg       return DEP_ANTI;
     88  1.1  mrg     }
     89  1.1  mrg }
     90  1.1  mrg 
     91  1.1  mrg /* Functions to operate with dependence information container - dep_t.  */
     92  1.1  mrg 
     93  1.1  mrg /* Init DEP with the arguments.  */
     94  1.1  mrg void
     95  1.1  mrg init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
     96  1.1  mrg {
     97  1.1  mrg   DEP_PRO (dep) = pro;
     98  1.1  mrg   DEP_CON (dep) = con;
     99  1.1  mrg   DEP_TYPE (dep) = type;
    100  1.1  mrg   DEP_STATUS (dep) = ds;
    101  1.1  mrg   DEP_COST (dep) = UNKNOWN_DEP_COST;
    102  1.1  mrg   DEP_NONREG (dep) = 0;
    103  1.1  mrg   DEP_MULTIPLE (dep) = 0;
    104  1.1  mrg   DEP_REPLACE (dep) = NULL;
    105  1.1  mrg   dep->unused = 0;
    106  1.1  mrg }
    107  1.1  mrg 
    108  1.1  mrg /* Init DEP with the arguments.
    109  1.1  mrg    While most of the scheduler (including targets) only need the major type
    110  1.1  mrg    of the dependency, it is convenient to hide full dep_status from them.  */
    111  1.1  mrg void
    112  1.1  mrg init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
    113  1.1  mrg {
    114  1.1  mrg   ds_t ds;
    115  1.1  mrg 
    116  1.1  mrg   if ((current_sched_info->flags & USE_DEPS_LIST))
    117  1.1  mrg     ds = dk_to_ds (kind);
    118  1.1  mrg   else
    119  1.1  mrg     ds = 0;
    120  1.1  mrg 
    121  1.1  mrg   init_dep_1 (dep, pro, con, kind, ds);
    122  1.1  mrg }
    123  1.1  mrg 
    124  1.1  mrg /* Make a copy of FROM in TO.  */
    125  1.1  mrg static void
    126  1.1  mrg copy_dep (dep_t to, dep_t from)
    127  1.1  mrg {
    128  1.1  mrg   memcpy (to, from, sizeof (*to));
    129  1.1  mrg }
    130  1.1  mrg 
    131  1.1  mrg static void dump_ds (FILE *, ds_t);
    132  1.1  mrg 
    133  1.1  mrg /* Define flags for dump_dep ().  */
    134  1.1  mrg 
    135  1.1  mrg /* Dump producer of the dependence.  */
    136  1.1  mrg #define DUMP_DEP_PRO (2)
    137  1.1  mrg 
    138  1.1  mrg /* Dump consumer of the dependence.  */
    139  1.1  mrg #define DUMP_DEP_CON (4)
    140  1.1  mrg 
    141  1.1  mrg /* Dump type of the dependence.  */
    142  1.1  mrg #define DUMP_DEP_TYPE (8)
    143  1.1  mrg 
    144  1.1  mrg /* Dump status of the dependence.  */
    145  1.1  mrg #define DUMP_DEP_STATUS (16)
    146  1.1  mrg 
    147  1.1  mrg /* Dump all information about the dependence.  */
    148  1.1  mrg #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
    149  1.1  mrg 		      |DUMP_DEP_STATUS)
    150  1.1  mrg 
    151  1.1  mrg /* Dump DEP to DUMP.
    152  1.1  mrg    FLAGS is a bit mask specifying what information about DEP needs
    153  1.1  mrg    to be printed.
    154  1.1  mrg    If FLAGS has the very first bit set, then dump all information about DEP
    155  1.1  mrg    and propagate this bit into the callee dump functions.  */
    156  1.1  mrg static void
    157  1.1  mrg dump_dep (FILE *dump, dep_t dep, int flags)
    158  1.1  mrg {
    159  1.1  mrg   if (flags & 1)
    160  1.1  mrg     flags |= DUMP_DEP_ALL;
    161  1.1  mrg 
    162  1.1  mrg   fprintf (dump, "<");
    163  1.1  mrg 
    164  1.1  mrg   if (flags & DUMP_DEP_PRO)
    165  1.1  mrg     fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
    166  1.1  mrg 
    167  1.1  mrg   if (flags & DUMP_DEP_CON)
    168  1.1  mrg     fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
    169  1.1  mrg 
    170  1.1  mrg   if (flags & DUMP_DEP_TYPE)
    171  1.1  mrg     {
    172  1.1  mrg       char t;
    173  1.1  mrg       enum reg_note type = DEP_TYPE (dep);
    174  1.1  mrg 
    175  1.1  mrg       switch (type)
    176  1.1  mrg 	{
    177  1.1  mrg 	case REG_DEP_TRUE:
    178  1.1  mrg 	  t = 't';
    179  1.1  mrg 	  break;
    180  1.1  mrg 
    181  1.1  mrg 	case REG_DEP_OUTPUT:
    182  1.1  mrg 	  t = 'o';
    183  1.1  mrg 	  break;
    184  1.1  mrg 
    185  1.1  mrg 	case REG_DEP_CONTROL:
    186  1.1  mrg 	  t = 'c';
    187  1.1  mrg 	  break;
    188  1.1  mrg 
    189  1.1  mrg 	case REG_DEP_ANTI:
    190  1.1  mrg 	  t = 'a';
    191  1.1  mrg 	  break;
    192  1.1  mrg 
    193  1.1  mrg 	default:
    194  1.1  mrg 	  gcc_unreachable ();
    195  1.1  mrg 	  break;
    196  1.1  mrg 	}
    197  1.1  mrg 
    198  1.1  mrg       fprintf (dump, "%c; ", t);
    199  1.1  mrg     }
    200  1.1  mrg 
    201  1.1  mrg   if (flags & DUMP_DEP_STATUS)
    202  1.1  mrg     {
    203  1.1  mrg       if (current_sched_info->flags & USE_DEPS_LIST)
    204  1.1  mrg 	dump_ds (dump, DEP_STATUS (dep));
    205  1.1  mrg     }
    206  1.1  mrg 
    207  1.1  mrg   fprintf (dump, ">");
    208  1.1  mrg }
    209  1.1  mrg 
    210  1.1  mrg /* Default flags for dump_dep ().  */
    211  1.1  mrg static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
    212  1.1  mrg 
    213  1.1  mrg /* Dump all fields of DEP to STDERR.  */
    214  1.1  mrg void
    215  1.1  mrg sd_debug_dep (dep_t dep)
    216  1.1  mrg {
    217  1.1  mrg   dump_dep (stderr, dep, 1);
    218  1.1  mrg   fprintf (stderr, "\n");
    219  1.1  mrg }
    220  1.1  mrg 
    221  1.1  mrg /* Determine whether DEP is a dependency link of a non-debug insn on a
    222  1.1  mrg    debug insn.  */
    223  1.1  mrg 
    224  1.1  mrg static inline bool
    225  1.1  mrg depl_on_debug_p (dep_link_t dep)
    226  1.1  mrg {
    227  1.1  mrg   return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
    228  1.1  mrg 	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
    229  1.1  mrg }
    230  1.1  mrg 
    231  1.1  mrg /* Functions to operate with a single link from the dependencies lists -
    232  1.1  mrg    dep_link_t.  */
    233  1.1  mrg 
    234  1.1  mrg /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
    235  1.1  mrg    PREV_NEXT_P.  */
    236  1.1  mrg static void
    237  1.1  mrg attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
    238  1.1  mrg {
    239  1.1  mrg   dep_link_t next = *prev_nextp;
    240  1.1  mrg 
    241  1.1  mrg   gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
    242  1.1  mrg 	      && DEP_LINK_NEXT (l) == NULL);
    243  1.1  mrg 
    244  1.1  mrg   /* Init node being inserted.  */
    245  1.1  mrg   DEP_LINK_PREV_NEXTP (l) = prev_nextp;
    246  1.1  mrg   DEP_LINK_NEXT (l) = next;
    247  1.1  mrg 
    248  1.1  mrg   /* Fix next node.  */
    249  1.1  mrg   if (next != NULL)
    250  1.1  mrg     {
    251  1.1  mrg       gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
    252  1.1  mrg 
    253  1.1  mrg       DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
    254  1.1  mrg     }
    255  1.1  mrg 
    256  1.1  mrg   /* Fix prev node.  */
    257  1.1  mrg   *prev_nextp = l;
    258  1.1  mrg }
    259  1.1  mrg 
    260  1.1  mrg /* Add dep_link LINK to deps_list L.  */
    261  1.1  mrg static void
    262  1.1  mrg add_to_deps_list (dep_link_t link, deps_list_t l)
    263  1.1  mrg {
    264  1.1  mrg   attach_dep_link (link, &DEPS_LIST_FIRST (l));
    265  1.1  mrg 
    266  1.1  mrg   /* Don't count debug deps.  */
    267  1.1  mrg   if (!depl_on_debug_p (link))
    268  1.1  mrg     ++DEPS_LIST_N_LINKS (l);
    269  1.1  mrg }
    270  1.1  mrg 
    271  1.1  mrg /* Detach dep_link L from the list.  */
    272  1.1  mrg static void
    273  1.1  mrg detach_dep_link (dep_link_t l)
    274  1.1  mrg {
    275  1.1  mrg   dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
    276  1.1  mrg   dep_link_t next = DEP_LINK_NEXT (l);
    277  1.1  mrg 
    278  1.1  mrg   *prev_nextp = next;
    279  1.1  mrg 
    280  1.1  mrg   if (next != NULL)
    281  1.1  mrg     DEP_LINK_PREV_NEXTP (next) = prev_nextp;
    282  1.1  mrg 
    283  1.1  mrg   DEP_LINK_PREV_NEXTP (l) = NULL;
    284  1.1  mrg   DEP_LINK_NEXT (l) = NULL;
    285  1.1  mrg }
    286  1.1  mrg 
    287  1.1  mrg /* Remove link LINK from list LIST.  */
    288  1.1  mrg static void
    289  1.1  mrg remove_from_deps_list (dep_link_t link, deps_list_t list)
    290  1.1  mrg {
    291  1.1  mrg   detach_dep_link (link);
    292  1.1  mrg 
    293  1.1  mrg   /* Don't count debug deps.  */
    294  1.1  mrg   if (!depl_on_debug_p (link))
    295  1.1  mrg     --DEPS_LIST_N_LINKS (list);
    296  1.1  mrg }
    297  1.1  mrg 
    298  1.1  mrg /* Move link LINK from list FROM to list TO.  */
    299  1.1  mrg static void
    300  1.1  mrg move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
    301  1.1  mrg {
    302  1.1  mrg   remove_from_deps_list (link, from);
    303  1.1  mrg   add_to_deps_list (link, to);
    304  1.1  mrg }
    305  1.1  mrg 
    306  1.1  mrg /* Return true of LINK is not attached to any list.  */
    307  1.1  mrg static bool
    308  1.1  mrg dep_link_is_detached_p (dep_link_t link)
    309  1.1  mrg {
    310  1.1  mrg   return DEP_LINK_PREV_NEXTP (link) == NULL;
    311  1.1  mrg }
    312  1.1  mrg 
    313  1.1  mrg /* Pool to hold all dependency nodes (dep_node_t).  */
    314  1.1  mrg static object_allocator<_dep_node> *dn_pool;
    315  1.1  mrg 
    316  1.1  mrg /* Number of dep_nodes out there.  */
    317  1.1  mrg static int dn_pool_diff = 0;
    318  1.1  mrg 
    319  1.1  mrg /* Create a dep_node.  */
    320  1.1  mrg static dep_node_t
    321  1.1  mrg create_dep_node (void)
    322  1.1  mrg {
    323  1.1  mrg   dep_node_t n = dn_pool->allocate ();
    324  1.1  mrg   dep_link_t back = DEP_NODE_BACK (n);
    325  1.1  mrg   dep_link_t forw = DEP_NODE_FORW (n);
    326  1.1  mrg 
    327  1.1  mrg   DEP_LINK_NODE (back) = n;
    328  1.1  mrg   DEP_LINK_NEXT (back) = NULL;
    329  1.1  mrg   DEP_LINK_PREV_NEXTP (back) = NULL;
    330  1.1  mrg 
    331  1.1  mrg   DEP_LINK_NODE (forw) = n;
    332  1.1  mrg   DEP_LINK_NEXT (forw) = NULL;
    333  1.1  mrg   DEP_LINK_PREV_NEXTP (forw) = NULL;
    334  1.1  mrg 
    335  1.1  mrg   ++dn_pool_diff;
    336  1.1  mrg 
    337  1.1  mrg   return n;
    338  1.1  mrg }
    339  1.1  mrg 
    340  1.1  mrg /* Delete dep_node N.  N must not be connected to any deps_list.  */
    341  1.1  mrg static void
    342  1.1  mrg delete_dep_node (dep_node_t n)
    343  1.1  mrg {
    344  1.1  mrg   gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
    345  1.1  mrg 	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
    346  1.1  mrg 
    347  1.1  mrg   XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
    348  1.1  mrg 
    349  1.1  mrg   --dn_pool_diff;
    350  1.1  mrg 
    351  1.1  mrg   dn_pool->remove (n);
    352  1.1  mrg }
    353  1.1  mrg 
    354  1.1  mrg /* Pool to hold dependencies lists (deps_list_t).  */
    355  1.1  mrg static object_allocator<_deps_list> *dl_pool;
    356  1.1  mrg 
    357  1.1  mrg /* Number of deps_lists out there.  */
    358  1.1  mrg static int dl_pool_diff = 0;
    359  1.1  mrg 
    360  1.1  mrg /* Functions to operate with dependences lists - deps_list_t.  */
    361  1.1  mrg 
    362  1.1  mrg /* Return true if list L is empty.  */
    363  1.1  mrg static bool
    364  1.1  mrg deps_list_empty_p (deps_list_t l)
    365  1.1  mrg {
    366  1.1  mrg   return DEPS_LIST_N_LINKS (l) == 0;
    367  1.1  mrg }
    368  1.1  mrg 
    369  1.1  mrg /* Create a new deps_list.  */
    370  1.1  mrg static deps_list_t
    371  1.1  mrg create_deps_list (void)
    372  1.1  mrg {
    373  1.1  mrg   deps_list_t l = dl_pool->allocate ();
    374  1.1  mrg 
    375  1.1  mrg   DEPS_LIST_FIRST (l) = NULL;
    376  1.1  mrg   DEPS_LIST_N_LINKS (l) = 0;
    377  1.1  mrg 
    378  1.1  mrg   ++dl_pool_diff;
    379  1.1  mrg   return l;
    380  1.1  mrg }
    381  1.1  mrg 
    382  1.1  mrg /* Free deps_list L.  */
    383  1.1  mrg static void
    384  1.1  mrg free_deps_list (deps_list_t l)
    385  1.1  mrg {
    386  1.1  mrg   gcc_assert (deps_list_empty_p (l));
    387  1.1  mrg 
    388  1.1  mrg   --dl_pool_diff;
    389  1.1  mrg 
    390  1.1  mrg   dl_pool->remove (l);
    391  1.1  mrg }
    392  1.1  mrg 
    393  1.1  mrg /* Return true if there is no dep_nodes and deps_lists out there.
    394  1.1  mrg    After the region is scheduled all the dependency nodes and lists
    395  1.1  mrg    should [generally] be returned to pool.  */
    396  1.1  mrg bool
    397  1.1  mrg deps_pools_are_empty_p (void)
    398  1.1  mrg {
    399  1.1  mrg   return dn_pool_diff == 0 && dl_pool_diff == 0;
    400  1.1  mrg }
    401  1.1  mrg 
    402  1.1  mrg /* Remove all elements from L.  */
    403  1.1  mrg static void
    404  1.1  mrg clear_deps_list (deps_list_t l)
    405  1.1  mrg {
    406  1.1  mrg   do
    407  1.1  mrg     {
    408  1.1  mrg       dep_link_t link = DEPS_LIST_FIRST (l);
    409  1.1  mrg 
    410  1.1  mrg       if (link == NULL)
    411  1.1  mrg 	break;
    412  1.1  mrg 
    413  1.1  mrg       remove_from_deps_list (link, l);
    414  1.1  mrg     }
    415  1.1  mrg   while (1);
    416  1.1  mrg }
    417  1.1  mrg 
    418  1.1  mrg /* Decide whether a dependency should be treated as a hard or a speculative
    419  1.1  mrg    dependency.  */
    420  1.1  mrg static bool
    421  1.1  mrg dep_spec_p (dep_t dep)
    422  1.1  mrg {
    423  1.1  mrg   if (current_sched_info->flags & DO_SPECULATION)
    424  1.1  mrg     {
    425  1.1  mrg       if (DEP_STATUS (dep) & SPECULATIVE)
    426  1.1  mrg 	return true;
    427  1.1  mrg     }
    428  1.1  mrg   if (current_sched_info->flags & DO_PREDICATION)
    429  1.1  mrg     {
    430  1.1  mrg       if (DEP_TYPE (dep) == REG_DEP_CONTROL)
    431  1.1  mrg 	return true;
    432  1.1  mrg     }
    433  1.1  mrg   if (DEP_REPLACE (dep) != NULL)
    434  1.1  mrg     return true;
    435  1.1  mrg   return false;
    436  1.1  mrg }
    437  1.1  mrg 
    438  1.1  mrg static regset reg_pending_sets;
    439  1.1  mrg static regset reg_pending_clobbers;
    440  1.1  mrg static regset reg_pending_uses;
    441  1.1  mrg static regset reg_pending_control_uses;
    442  1.1  mrg static enum reg_pending_barrier_mode reg_pending_barrier;
    443  1.1  mrg 
    444  1.1  mrg /* Hard registers implicitly clobbered or used (or may be implicitly
    445  1.1  mrg    clobbered or used) by the currently analyzed insn.  For example,
    446  1.1  mrg    insn in its constraint has one register class.  Even if there is
    447  1.1  mrg    currently no hard register in the insn, the particular hard
    448  1.1  mrg    register will be in the insn after reload pass because the
    449  1.1  mrg    constraint requires it.  */
    450  1.1  mrg static HARD_REG_SET implicit_reg_pending_clobbers;
    451  1.1  mrg static HARD_REG_SET implicit_reg_pending_uses;
    452  1.1  mrg 
    453  1.1  mrg /* To speed up the test for duplicate dependency links we keep a
    454  1.1  mrg    record of dependencies created by add_dependence when the average
    455  1.1  mrg    number of instructions in a basic block is very large.
    456  1.1  mrg 
    457  1.1  mrg    Studies have shown that there is typically around 5 instructions between
    458  1.1  mrg    branches for typical C code.  So we can make a guess that the average
    459  1.1  mrg    basic block is approximately 5 instructions long; we will choose 100X
    460  1.1  mrg    the average size as a very large basic block.
    461  1.1  mrg 
    462  1.1  mrg    Each insn has associated bitmaps for its dependencies.  Each bitmap
    463  1.1  mrg    has enough entries to represent a dependency on any other insn in
    464  1.1  mrg    the insn chain.  All bitmap for true dependencies cache is
    465  1.1  mrg    allocated then the rest two ones are also allocated.  */
    466  1.1  mrg static bitmap true_dependency_cache = NULL;
    467  1.1  mrg static bitmap output_dependency_cache = NULL;
    468  1.1  mrg static bitmap anti_dependency_cache = NULL;
    469  1.1  mrg static bitmap control_dependency_cache = NULL;
    470  1.1  mrg static bitmap spec_dependency_cache = NULL;
    471  1.1  mrg static int cache_size;
    472  1.1  mrg 
    473  1.1  mrg /* True if we should mark added dependencies as a non-register deps.  */
    474  1.1  mrg static bool mark_as_hard;
    475  1.1  mrg 
    476  1.1  mrg static int deps_may_trap_p (const_rtx);
    477  1.1  mrg static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
    478  1.1  mrg static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
    479  1.1  mrg 				 enum reg_note, bool);
    480  1.1  mrg static void add_dependence_list_and_free (class deps_desc *, rtx_insn *,
    481  1.1  mrg 					  rtx_insn_list **, int, enum reg_note,
    482  1.1  mrg 					  bool);
    483  1.1  mrg static void delete_all_dependences (rtx_insn *);
    484  1.1  mrg static void chain_to_prev_insn (rtx_insn *);
    485  1.1  mrg 
    486  1.1  mrg static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int);
    487  1.1  mrg static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *);
    488  1.1  mrg static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *);
    489  1.1  mrg static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *);
    490  1.1  mrg 
    491  1.1  mrg static bool sched_has_condition_p (const rtx_insn *);
    492  1.1  mrg static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
    493  1.1  mrg 
    494  1.1  mrg static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
    495  1.1  mrg 							  rtx, rtx);
    496  1.1  mrg static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
    497  1.1  mrg 
    498  1.1  mrg static void check_dep (dep_t, bool);
    499  1.1  mrg 
    500  1.1  mrg 
    501  1.1  mrg /* Return nonzero if a load of the memory reference MEM can cause a trap.  */
    503  1.1  mrg 
    504  1.1  mrg static int
    505  1.1  mrg deps_may_trap_p (const_rtx mem)
    506  1.1  mrg {
    507  1.1  mrg   const_rtx addr = XEXP (mem, 0);
    508  1.1  mrg 
    509  1.1  mrg   if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
    510  1.1  mrg     {
    511  1.1  mrg       const_rtx t = get_reg_known_value (REGNO (addr));
    512  1.1  mrg       if (t)
    513  1.1  mrg 	addr = t;
    514  1.1  mrg     }
    515  1.1  mrg   return rtx_addr_can_trap_p (addr);
    516  1.1  mrg }
    517  1.1  mrg 
    518  1.1  mrg 
    520  1.1  mrg /* Find the condition under which INSN is executed.  If REV is not NULL,
    521  1.1  mrg    it is set to TRUE when the returned comparison should be reversed
    522  1.1  mrg    to get the actual condition.  */
    523  1.1  mrg static rtx
    524  1.1  mrg sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
    525  1.1  mrg {
    526  1.1  mrg   rtx pat = PATTERN (insn);
    527  1.1  mrg   rtx src;
    528  1.1  mrg 
    529  1.1  mrg   if (rev)
    530  1.1  mrg     *rev = false;
    531  1.1  mrg 
    532  1.1  mrg   if (GET_CODE (pat) == COND_EXEC)
    533  1.1  mrg     return COND_EXEC_TEST (pat);
    534  1.1  mrg 
    535  1.1  mrg   if (!any_condjump_p (insn) || !onlyjump_p (insn))
    536  1.1  mrg     return 0;
    537  1.1  mrg 
    538  1.1  mrg   src = SET_SRC (pc_set (insn));
    539  1.1  mrg 
    540  1.1  mrg   if (XEXP (src, 2) == pc_rtx)
    541  1.1  mrg     return XEXP (src, 0);
    542  1.1  mrg   else if (XEXP (src, 1) == pc_rtx)
    543  1.1  mrg     {
    544  1.1  mrg       rtx cond = XEXP (src, 0);
    545  1.1  mrg       enum rtx_code revcode = reversed_comparison_code (cond, insn);
    546  1.1  mrg 
    547  1.1  mrg       if (revcode == UNKNOWN)
    548  1.1  mrg 	return 0;
    549  1.1  mrg 
    550  1.1  mrg       if (rev)
    551  1.1  mrg 	*rev = true;
    552  1.1  mrg       return cond;
    553  1.1  mrg     }
    554  1.1  mrg 
    555  1.1  mrg   return 0;
    556  1.1  mrg }
    557  1.1  mrg 
    558  1.1  mrg /* Return the condition under which INSN does not execute (i.e.  the
    559  1.1  mrg    not-taken condition for a conditional branch), or NULL if we cannot
    560  1.1  mrg    find such a condition.  The caller should make a copy of the condition
    561  1.1  mrg    before using it.  */
    562  1.1  mrg rtx
    563  1.1  mrg sched_get_reverse_condition_uncached (const rtx_insn *insn)
    564  1.1  mrg {
    565  1.1  mrg   bool rev;
    566  1.1  mrg   rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
    567  1.1  mrg   if (cond == NULL_RTX)
    568  1.1  mrg     return cond;
    569  1.1  mrg   if (!rev)
    570  1.1  mrg     {
    571  1.1  mrg       enum rtx_code revcode = reversed_comparison_code (cond, insn);
    572  1.1  mrg       cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
    573  1.1  mrg 			     XEXP (cond, 0),
    574  1.1  mrg 			     XEXP (cond, 1));
    575  1.1  mrg     }
    576  1.1  mrg   return cond;
    577  1.1  mrg }
    578  1.1  mrg 
    579  1.1  mrg /* Caching variant of sched_get_condition_with_rev_uncached.
    580  1.1  mrg    We only do actual work the first time we come here for an insn; the
    581  1.1  mrg    results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
    582  1.1  mrg static rtx
    583  1.1  mrg sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
    584  1.1  mrg {
    585  1.1  mrg   bool tmp;
    586  1.1  mrg 
    587  1.1  mrg   if (INSN_LUID (insn) == 0)
    588  1.1  mrg     return sched_get_condition_with_rev_uncached (insn, rev);
    589  1.1  mrg 
    590  1.1  mrg   if (INSN_CACHED_COND (insn) == const_true_rtx)
    591  1.1  mrg     return NULL_RTX;
    592  1.1  mrg 
    593  1.1  mrg   if (INSN_CACHED_COND (insn) != NULL_RTX)
    594  1.1  mrg     {
    595  1.1  mrg       if (rev)
    596  1.1  mrg 	*rev = INSN_REVERSE_COND (insn);
    597  1.1  mrg       return INSN_CACHED_COND (insn);
    598  1.1  mrg     }
    599  1.1  mrg 
    600  1.1  mrg   INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
    601  1.1  mrg   INSN_REVERSE_COND (insn) = tmp;
    602  1.1  mrg 
    603  1.1  mrg   if (INSN_CACHED_COND (insn) == NULL_RTX)
    604  1.1  mrg     {
    605  1.1  mrg       INSN_CACHED_COND (insn) = const_true_rtx;
    606  1.1  mrg       return NULL_RTX;
    607  1.1  mrg     }
    608  1.1  mrg 
    609  1.1  mrg   if (rev)
    610  1.1  mrg     *rev = INSN_REVERSE_COND (insn);
    611  1.1  mrg   return INSN_CACHED_COND (insn);
    612  1.1  mrg }
    613  1.1  mrg 
    614  1.1  mrg /* True when we can find a condition under which INSN is executed.  */
    615  1.1  mrg static bool
    616  1.1  mrg sched_has_condition_p (const rtx_insn *insn)
    617  1.1  mrg {
    618  1.1  mrg   return !! sched_get_condition_with_rev (insn, NULL);
    619  1.1  mrg }
    620  1.1  mrg 
    621  1.1  mrg 
    622  1.1  mrg 
    624  1.1  mrg /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
    625  1.1  mrg static int
    626  1.1  mrg conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
    627  1.1  mrg {
    628  1.1  mrg   if (COMPARISON_P (cond1)
    629  1.1  mrg       && COMPARISON_P (cond2)
    630  1.1  mrg       && GET_CODE (cond1) ==
    631  1.1  mrg 	  (rev1==rev2
    632  1.1  mrg 	  ? reversed_comparison_code (cond2, NULL)
    633  1.1  mrg 	  : GET_CODE (cond2))
    634  1.1  mrg       && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
    635  1.1  mrg       && XEXP (cond1, 1) == XEXP (cond2, 1))
    636  1.1  mrg     return 1;
    637  1.1  mrg   return 0;
    638  1.1  mrg }
    639  1.1  mrg 
    640  1.1  mrg /* Return true if insn1 and insn2 can never depend on one another because
    641  1.1  mrg    the conditions under which they are executed are mutually exclusive.  */
    642  1.1  mrg bool
    643  1.1  mrg sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
    644  1.1  mrg {
    645  1.1  mrg   rtx cond1, cond2;
    646  1.1  mrg   bool rev1 = false, rev2 = false;
    647  1.1  mrg 
    648  1.1  mrg   /* df doesn't handle conditional lifetimes entirely correctly;
    649  1.1  mrg      calls mess up the conditional lifetimes.  */
    650  1.1  mrg   if (!CALL_P (insn1) && !CALL_P (insn2))
    651  1.1  mrg     {
    652  1.1  mrg       cond1 = sched_get_condition_with_rev (insn1, &rev1);
    653  1.1  mrg       cond2 = sched_get_condition_with_rev (insn2, &rev2);
    654  1.1  mrg       if (cond1 && cond2
    655  1.1  mrg 	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
    656  1.1  mrg 	  /* Make sure first instruction doesn't affect condition of second
    657  1.1  mrg 	     instruction if switched.  */
    658  1.1  mrg 	  && !modified_in_p (cond1, insn2)
    659  1.1  mrg 	  /* Make sure second instruction doesn't affect condition of first
    660  1.1  mrg 	     instruction if switched.  */
    661  1.1  mrg 	  && !modified_in_p (cond2, insn1))
    662  1.1  mrg 	return true;
    663  1.1  mrg     }
    664  1.1  mrg   return false;
    665  1.1  mrg }
    666  1.1  mrg 
    667  1.1  mrg 
    669  1.1  mrg /* Return true if INSN can potentially be speculated with type DS.  */
    670  1.1  mrg bool
    671  1.1  mrg sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
    672  1.1  mrg {
    673  1.1  mrg   if (HAS_INTERNAL_DEP (insn))
    674  1.1  mrg     return false;
    675  1.1  mrg 
    676  1.1  mrg   if (!NONJUMP_INSN_P (insn))
    677  1.1  mrg     return false;
    678  1.1  mrg 
    679  1.1  mrg   if (SCHED_GROUP_P (insn))
    680  1.1  mrg     return false;
    681  1.1  mrg 
    682  1.1  mrg   if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
    683  1.1  mrg     return false;
    684  1.1  mrg 
    685  1.1  mrg   if (side_effects_p (PATTERN (insn)))
    686  1.1  mrg     return false;
    687  1.1  mrg 
    688  1.1  mrg   if (ds & BE_IN_SPEC)
    689  1.1  mrg     /* The following instructions, which depend on a speculatively scheduled
    690  1.1  mrg        instruction, cannot be speculatively scheduled along.  */
    691  1.1  mrg     {
    692  1.1  mrg       if (may_trap_or_fault_p (PATTERN (insn)))
    693  1.1  mrg 	/* If instruction might fault, it cannot be speculatively scheduled.
    694  1.1  mrg 	   For control speculation it's obvious why and for data speculation
    695  1.1  mrg 	   it's because the insn might get wrong input if speculation
    696  1.1  mrg 	   wasn't successful.  */
    697  1.1  mrg 	return false;
    698  1.1  mrg 
    699  1.1  mrg       if ((ds & BE_IN_DATA)
    700  1.1  mrg 	  && sched_has_condition_p (insn))
    701  1.1  mrg 	/* If this is a predicated instruction, then it cannot be
    702  1.1  mrg 	   speculatively scheduled.  See PR35659.  */
    703  1.1  mrg 	return false;
    704  1.1  mrg     }
    705  1.1  mrg 
    706  1.1  mrg   return true;
    707  1.1  mrg }
    708  1.1  mrg 
    709  1.1  mrg /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
    710  1.1  mrg    initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
    711  1.1  mrg    and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
    712  1.1  mrg    This function is used to switch sd_iterator to the next list.
    713  1.1  mrg    !!! For internal use only.  Might consider moving it to sched-int.h.  */
    714  1.1  mrg void
    715  1.1  mrg sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
    716  1.1  mrg 	      deps_list_t *list_ptr, bool *resolved_p_ptr)
    717  1.1  mrg {
    718  1.1  mrg   sd_list_types_def types = *types_ptr;
    719  1.1  mrg 
    720  1.1  mrg   if (types & SD_LIST_HARD_BACK)
    721  1.1  mrg     {
    722  1.1  mrg       *list_ptr = INSN_HARD_BACK_DEPS (insn);
    723  1.1  mrg       *resolved_p_ptr = false;
    724  1.1  mrg       *types_ptr = types & ~SD_LIST_HARD_BACK;
    725  1.1  mrg     }
    726  1.1  mrg   else if (types & SD_LIST_SPEC_BACK)
    727  1.1  mrg     {
    728  1.1  mrg       *list_ptr = INSN_SPEC_BACK_DEPS (insn);
    729  1.1  mrg       *resolved_p_ptr = false;
    730  1.1  mrg       *types_ptr = types & ~SD_LIST_SPEC_BACK;
    731  1.1  mrg     }
    732  1.1  mrg   else if (types & SD_LIST_FORW)
    733  1.1  mrg     {
    734  1.1  mrg       *list_ptr = INSN_FORW_DEPS (insn);
    735  1.1  mrg       *resolved_p_ptr = false;
    736  1.1  mrg       *types_ptr = types & ~SD_LIST_FORW;
    737  1.1  mrg     }
    738  1.1  mrg   else if (types & SD_LIST_RES_BACK)
    739  1.1  mrg     {
    740  1.1  mrg       *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
    741  1.1  mrg       *resolved_p_ptr = true;
    742  1.1  mrg       *types_ptr = types & ~SD_LIST_RES_BACK;
    743  1.1  mrg     }
    744  1.1  mrg   else if (types & SD_LIST_RES_FORW)
    745  1.1  mrg     {
    746  1.1  mrg       *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
    747  1.1  mrg       *resolved_p_ptr = true;
    748  1.1  mrg       *types_ptr = types & ~SD_LIST_RES_FORW;
    749  1.1  mrg     }
    750  1.1  mrg   else
    751  1.1  mrg     {
    752  1.1  mrg       *list_ptr = NULL;
    753  1.1  mrg       *resolved_p_ptr = false;
    754  1.1  mrg       *types_ptr = SD_LIST_NONE;
    755  1.1  mrg     }
    756  1.1  mrg }
    757  1.1  mrg 
    758  1.1  mrg /* Return the summary size of INSN's lists defined by LIST_TYPES.  */
    759  1.1  mrg int
    760  1.1  mrg sd_lists_size (const_rtx insn, sd_list_types_def list_types)
    761  1.1  mrg {
    762  1.1  mrg   int size = 0;
    763  1.1  mrg 
    764  1.1  mrg   while (list_types != SD_LIST_NONE)
    765  1.1  mrg     {
    766  1.1  mrg       deps_list_t list;
    767  1.1  mrg       bool resolved_p;
    768  1.1  mrg 
    769  1.1  mrg       sd_next_list (insn, &list_types, &list, &resolved_p);
    770  1.1  mrg       if (list)
    771  1.1  mrg 	size += DEPS_LIST_N_LINKS (list);
    772  1.1  mrg     }
    773  1.1  mrg 
    774  1.1  mrg   return size;
    775  1.1  mrg }
    776  1.1  mrg 
    777  1.1  mrg /* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
    778  1.1  mrg 
    779  1.1  mrg bool
    780  1.1  mrg sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
    781  1.1  mrg {
    782  1.1  mrg   while (list_types != SD_LIST_NONE)
    783  1.1  mrg     {
    784  1.1  mrg       deps_list_t list;
    785  1.1  mrg       bool resolved_p;
    786  1.1  mrg 
    787  1.1  mrg       sd_next_list (insn, &list_types, &list, &resolved_p);
    788  1.1  mrg       if (!deps_list_empty_p (list))
    789  1.1  mrg 	return false;
    790  1.1  mrg     }
    791  1.1  mrg 
    792  1.1  mrg   return true;
    793  1.1  mrg }
    794  1.1  mrg 
    795  1.1  mrg /* Initialize data for INSN.  */
    796  1.1  mrg void
    797  1.1  mrg sd_init_insn (rtx_insn *insn)
    798  1.1  mrg {
    799  1.1  mrg   INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
    800  1.1  mrg   INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
    801  1.1  mrg   INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
    802  1.1  mrg   INSN_FORW_DEPS (insn) = create_deps_list ();
    803  1.1  mrg   INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
    804  1.1  mrg 
    805  1.1  mrg   /* ??? It would be nice to allocate dependency caches here.  */
    806  1.1  mrg }
    807  1.1  mrg 
    808  1.1  mrg /* Free data for INSN.  */
    809  1.1  mrg void
    810  1.1  mrg sd_finish_insn (rtx_insn *insn)
    811  1.1  mrg {
    812  1.1  mrg   /* ??? It would be nice to deallocate dependency caches here.  */
    813  1.1  mrg 
    814  1.1  mrg   free_deps_list (INSN_HARD_BACK_DEPS (insn));
    815  1.1  mrg   INSN_HARD_BACK_DEPS (insn) = NULL;
    816  1.1  mrg 
    817  1.1  mrg   free_deps_list (INSN_SPEC_BACK_DEPS (insn));
    818  1.1  mrg   INSN_SPEC_BACK_DEPS (insn) = NULL;
    819  1.1  mrg 
    820  1.1  mrg   free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
    821  1.1  mrg   INSN_RESOLVED_BACK_DEPS (insn) = NULL;
    822  1.1  mrg 
    823  1.1  mrg   free_deps_list (INSN_FORW_DEPS (insn));
    824  1.1  mrg   INSN_FORW_DEPS (insn) = NULL;
    825  1.1  mrg 
    826  1.1  mrg   free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
    827  1.1  mrg   INSN_RESOLVED_FORW_DEPS (insn) = NULL;
    828  1.1  mrg }
    829  1.1  mrg 
    830  1.1  mrg /* Find a dependency between producer PRO and consumer CON.
    831  1.1  mrg    Search through resolved dependency lists if RESOLVED_P is true.
    832  1.1  mrg    If no such dependency is found return NULL,
    833  1.1  mrg    otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
    834  1.1  mrg    with an iterator pointing to it.  */
    835  1.1  mrg static dep_t
    836  1.1  mrg sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
    837  1.1  mrg 			      sd_iterator_def *sd_it_ptr)
    838  1.1  mrg {
    839  1.1  mrg   sd_list_types_def pro_list_type;
    840  1.1  mrg   sd_list_types_def con_list_type;
    841  1.1  mrg   sd_iterator_def sd_it;
    842  1.1  mrg   dep_t dep;
    843  1.1  mrg   bool found_p = false;
    844  1.1  mrg 
    845  1.1  mrg   if (resolved_p)
    846  1.1  mrg     {
    847  1.1  mrg       pro_list_type = SD_LIST_RES_FORW;
    848  1.1  mrg       con_list_type = SD_LIST_RES_BACK;
    849  1.1  mrg     }
    850  1.1  mrg   else
    851  1.1  mrg     {
    852  1.1  mrg       pro_list_type = SD_LIST_FORW;
    853  1.1  mrg       con_list_type = SD_LIST_BACK;
    854  1.1  mrg     }
    855  1.1  mrg 
    856  1.1  mrg   /* Walk through either back list of INSN or forw list of ELEM
    857  1.1  mrg      depending on which one is shorter.  */
    858  1.1  mrg   if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
    859  1.1  mrg     {
    860  1.1  mrg       /* Find the dep_link with producer PRO in consumer's back_deps.  */
    861  1.1  mrg       FOR_EACH_DEP (con, con_list_type, sd_it, dep)
    862  1.1  mrg 	if (DEP_PRO (dep) == pro)
    863  1.1  mrg 	  {
    864  1.1  mrg 	    found_p = true;
    865  1.1  mrg 	    break;
    866  1.1  mrg 	  }
    867  1.1  mrg     }
    868  1.1  mrg   else
    869  1.1  mrg     {
    870  1.1  mrg       /* Find the dep_link with consumer CON in producer's forw_deps.  */
    871  1.1  mrg       FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
    872  1.1  mrg 	if (DEP_CON (dep) == con)
    873  1.1  mrg 	  {
    874  1.1  mrg 	    found_p = true;
    875  1.1  mrg 	    break;
    876  1.1  mrg 	  }
    877  1.1  mrg     }
    878  1.1  mrg 
    879  1.1  mrg   if (found_p)
    880  1.1  mrg     {
    881  1.1  mrg       if (sd_it_ptr != NULL)
    882  1.1  mrg 	*sd_it_ptr = sd_it;
    883  1.1  mrg 
    884  1.1  mrg       return dep;
    885  1.1  mrg     }
    886  1.1  mrg 
    887  1.1  mrg   return NULL;
    888  1.1  mrg }
    889  1.1  mrg 
    890  1.1  mrg /* Find a dependency between producer PRO and consumer CON.
    891  1.1  mrg    Use dependency [if available] to check if dependency is present at all.
    892  1.1  mrg    Search through resolved dependency lists if RESOLVED_P is true.
    893  1.1  mrg    If the dependency or NULL if none found.  */
    894  1.1  mrg dep_t
    895  1.1  mrg sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
    896  1.1  mrg {
    897  1.1  mrg   if (true_dependency_cache != NULL)
    898  1.1  mrg     /* Avoiding the list walk below can cut compile times dramatically
    899  1.1  mrg        for some code.  */
    900  1.1  mrg     {
    901  1.1  mrg       int elem_luid = INSN_LUID (pro);
    902  1.1  mrg       int insn_luid = INSN_LUID (con);
    903  1.1  mrg 
    904  1.1  mrg       if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
    905  1.1  mrg 	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
    906  1.1  mrg 	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
    907  1.1  mrg 	  && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
    908  1.1  mrg 	return NULL;
    909  1.1  mrg     }
    910  1.1  mrg 
    911  1.1  mrg   return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
    912  1.1  mrg }
    913  1.1  mrg 
    914  1.1  mrg /* Add or update  a dependence described by DEP.
    915  1.1  mrg    MEM1 and MEM2, if non-null, correspond to memory locations in case of
    916  1.1  mrg    data speculation.
    917  1.1  mrg 
    918  1.1  mrg    The function returns a value indicating if an old entry has been changed
    919  1.1  mrg    or a new entry has been added to insn's backward deps.
    920  1.1  mrg 
    921  1.1  mrg    This function merely checks if producer and consumer is the same insn
    922  1.1  mrg    and doesn't create a dep in this case.  Actual manipulation of
    923  1.1  mrg    dependence data structures is performed in add_or_update_dep_1.  */
    924  1.1  mrg static enum DEPS_ADJUST_RESULT
    925  1.1  mrg maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
    926  1.1  mrg {
    927  1.1  mrg   rtx_insn *elem = DEP_PRO (dep);
    928  1.1  mrg   rtx_insn *insn = DEP_CON (dep);
    929  1.1  mrg 
    930  1.1  mrg   gcc_assert (INSN_P (insn) && INSN_P (elem));
    931  1.1  mrg 
    932  1.1  mrg   /* Don't depend an insn on itself.  */
    933  1.1  mrg   if (insn == elem)
    934  1.1  mrg     {
    935  1.1  mrg       if (sched_deps_info->generate_spec_deps)
    936  1.1  mrg         /* INSN has an internal dependence, which we can't overcome.  */
    937  1.1  mrg         HAS_INTERNAL_DEP (insn) = 1;
    938  1.1  mrg 
    939  1.1  mrg       return DEP_NODEP;
    940  1.1  mrg     }
    941  1.1  mrg 
    942  1.1  mrg   return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
    943  1.1  mrg }
    944  1.1  mrg 
    945  1.1  mrg /* Ask dependency caches what needs to be done for dependence DEP.
    946  1.1  mrg    Return DEP_CREATED if new dependence should be created and there is no
    947  1.1  mrg    need to try to find one searching the dependencies lists.
    948  1.1  mrg    Return DEP_PRESENT if there already is a dependence described by DEP and
    949  1.1  mrg    hence nothing is to be done.
    950  1.1  mrg    Return DEP_CHANGED if there already is a dependence, but it should be
    951  1.1  mrg    updated to incorporate additional information from DEP.  */
    952  1.1  mrg static enum DEPS_ADJUST_RESULT
    953  1.1  mrg ask_dependency_caches (dep_t dep)
    954  1.1  mrg {
    955  1.1  mrg   int elem_luid = INSN_LUID (DEP_PRO (dep));
    956  1.1  mrg   int insn_luid = INSN_LUID (DEP_CON (dep));
    957  1.1  mrg 
    958  1.1  mrg   gcc_assert (true_dependency_cache != NULL
    959  1.1  mrg 	      && output_dependency_cache != NULL
    960  1.1  mrg 	      && anti_dependency_cache != NULL
    961  1.1  mrg 	      && control_dependency_cache != NULL);
    962  1.1  mrg 
    963  1.1  mrg   if (!(current_sched_info->flags & USE_DEPS_LIST))
    964  1.1  mrg     {
    965  1.1  mrg       enum reg_note present_dep_type;
    966  1.1  mrg 
    967  1.1  mrg       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
    968  1.1  mrg 	present_dep_type = REG_DEP_TRUE;
    969  1.1  mrg       else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
    970  1.1  mrg 	present_dep_type = REG_DEP_OUTPUT;
    971  1.1  mrg       else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
    972  1.1  mrg 	present_dep_type = REG_DEP_ANTI;
    973  1.1  mrg       else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
    974  1.1  mrg 	present_dep_type = REG_DEP_CONTROL;
    975  1.1  mrg       else
    976  1.1  mrg 	/* There is no existing dep so it should be created.  */
    977  1.1  mrg 	return DEP_CREATED;
    978  1.1  mrg 
    979  1.1  mrg       if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
    980  1.1  mrg 	/* DEP does not add anything to the existing dependence.  */
    981  1.1  mrg 	return DEP_PRESENT;
    982  1.1  mrg     }
    983  1.1  mrg   else
    984  1.1  mrg     {
    985  1.1  mrg       ds_t present_dep_types = 0;
    986  1.1  mrg 
    987  1.1  mrg       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
    988  1.1  mrg 	present_dep_types |= DEP_TRUE;
    989  1.1  mrg       if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
    990  1.1  mrg 	present_dep_types |= DEP_OUTPUT;
    991  1.1  mrg       if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
    992  1.1  mrg 	present_dep_types |= DEP_ANTI;
    993  1.1  mrg       if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
    994  1.1  mrg 	present_dep_types |= DEP_CONTROL;
    995  1.1  mrg 
    996  1.1  mrg       if (present_dep_types == 0)
    997  1.1  mrg 	/* There is no existing dep so it should be created.  */
    998  1.1  mrg 	return DEP_CREATED;
    999  1.1  mrg 
   1000  1.1  mrg       if (!(current_sched_info->flags & DO_SPECULATION)
   1001  1.1  mrg 	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
   1002  1.1  mrg 	{
   1003  1.1  mrg 	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
   1004  1.1  mrg 	      == present_dep_types)
   1005  1.1  mrg 	    /* DEP does not add anything to the existing dependence.  */
   1006  1.1  mrg 	    return DEP_PRESENT;
   1007  1.1  mrg 	}
   1008  1.1  mrg       else
   1009  1.1  mrg 	{
   1010  1.1  mrg 	  /* Only true dependencies can be data speculative and
   1011  1.1  mrg 	     only anti dependencies can be control speculative.  */
   1012  1.1  mrg 	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
   1013  1.1  mrg 		      == present_dep_types);
   1014  1.1  mrg 
   1015  1.1  mrg 	  /* if (DEP is SPECULATIVE) then
   1016  1.1  mrg 	     ..we should update DEP_STATUS
   1017  1.1  mrg 	     else
   1018  1.1  mrg 	     ..we should reset existing dep to non-speculative.  */
   1019  1.1  mrg 	}
   1020  1.1  mrg     }
   1021  1.1  mrg 
   1022  1.1  mrg   return DEP_CHANGED;
   1023  1.1  mrg }
   1024  1.1  mrg 
   1025  1.1  mrg /* Set dependency caches according to DEP.  */
   1026  1.1  mrg static void
   1027  1.1  mrg set_dependency_caches (dep_t dep)
   1028  1.1  mrg {
   1029  1.1  mrg   int elem_luid = INSN_LUID (DEP_PRO (dep));
   1030  1.1  mrg   int insn_luid = INSN_LUID (DEP_CON (dep));
   1031  1.1  mrg 
   1032  1.1  mrg   if (!(current_sched_info->flags & USE_DEPS_LIST))
   1033  1.1  mrg     {
   1034  1.1  mrg       switch (DEP_TYPE (dep))
   1035  1.1  mrg 	{
   1036  1.1  mrg 	case REG_DEP_TRUE:
   1037  1.1  mrg 	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
   1038  1.1  mrg 	  break;
   1039  1.1  mrg 
   1040  1.1  mrg 	case REG_DEP_OUTPUT:
   1041  1.1  mrg 	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
   1042  1.1  mrg 	  break;
   1043  1.1  mrg 
   1044  1.1  mrg 	case REG_DEP_ANTI:
   1045  1.1  mrg 	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
   1046  1.1  mrg 	  break;
   1047  1.1  mrg 
   1048  1.1  mrg 	case REG_DEP_CONTROL:
   1049  1.1  mrg 	  bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
   1050  1.1  mrg 	  break;
   1051  1.1  mrg 
   1052  1.1  mrg 	default:
   1053  1.1  mrg 	  gcc_unreachable ();
   1054  1.1  mrg 	}
   1055  1.1  mrg     }
   1056  1.1  mrg   else
   1057  1.1  mrg     {
   1058  1.1  mrg       ds_t ds = DEP_STATUS (dep);
   1059  1.1  mrg 
   1060  1.1  mrg       if (ds & DEP_TRUE)
   1061  1.1  mrg 	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
   1062  1.1  mrg       if (ds & DEP_OUTPUT)
   1063  1.1  mrg 	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
   1064  1.1  mrg       if (ds & DEP_ANTI)
   1065  1.1  mrg 	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
   1066  1.1  mrg       if (ds & DEP_CONTROL)
   1067  1.1  mrg 	bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
   1068  1.1  mrg 
   1069  1.1  mrg       if (ds & SPECULATIVE)
   1070  1.1  mrg 	{
   1071  1.1  mrg 	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
   1072  1.1  mrg 	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
   1073  1.1  mrg 	}
   1074  1.1  mrg     }
   1075  1.1  mrg }
   1076  1.1  mrg 
   1077  1.1  mrg /* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
   1078  1.1  mrg    caches accordingly.  */
   1079  1.1  mrg static void
   1080  1.1  mrg update_dependency_caches (dep_t dep, enum reg_note old_type)
   1081  1.1  mrg {
   1082  1.1  mrg   int elem_luid = INSN_LUID (DEP_PRO (dep));
   1083  1.1  mrg   int insn_luid = INSN_LUID (DEP_CON (dep));
   1084  1.1  mrg 
   1085  1.1  mrg   /* Clear corresponding cache entry because type of the link
   1086  1.1  mrg      may have changed.  Keep them if we use_deps_list.  */
   1087  1.1  mrg   if (!(current_sched_info->flags & USE_DEPS_LIST))
   1088  1.1  mrg     {
   1089  1.1  mrg       switch (old_type)
   1090  1.1  mrg 	{
   1091  1.1  mrg 	case REG_DEP_OUTPUT:
   1092  1.1  mrg 	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
   1093  1.1  mrg 	  break;
   1094  1.1  mrg 
   1095  1.1  mrg 	case REG_DEP_ANTI:
   1096  1.1  mrg 	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
   1097  1.1  mrg 	  break;
   1098  1.1  mrg 
   1099  1.1  mrg 	case REG_DEP_CONTROL:
   1100  1.1  mrg 	  bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
   1101  1.1  mrg 	  break;
   1102  1.1  mrg 
   1103  1.1  mrg 	default:
   1104  1.1  mrg 	  gcc_unreachable ();
   1105  1.1  mrg 	}
   1106  1.1  mrg     }
   1107  1.1  mrg 
   1108  1.1  mrg   set_dependency_caches (dep);
   1109  1.1  mrg }
   1110  1.1  mrg 
   1111  1.1  mrg /* Convert a dependence pointed to by SD_IT to be non-speculative.  */
   1112  1.1  mrg static void
   1113  1.1  mrg change_spec_dep_to_hard (sd_iterator_def sd_it)
   1114  1.1  mrg {
   1115  1.1  mrg   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
   1116  1.1  mrg   dep_link_t link = DEP_NODE_BACK (node);
   1117  1.1  mrg   dep_t dep = DEP_NODE_DEP (node);
   1118  1.1  mrg   rtx_insn *elem = DEP_PRO (dep);
   1119  1.1  mrg   rtx_insn *insn = DEP_CON (dep);
   1120  1.1  mrg 
   1121  1.1  mrg   move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
   1122  1.1  mrg 
   1123  1.1  mrg   DEP_STATUS (dep) &= ~SPECULATIVE;
   1124  1.1  mrg 
   1125  1.1  mrg   if (true_dependency_cache != NULL)
   1126  1.1  mrg     /* Clear the cache entry.  */
   1127  1.1  mrg     bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
   1128  1.1  mrg 		      INSN_LUID (elem));
   1129  1.1  mrg }
   1130  1.1  mrg 
   1131  1.1  mrg /* Update DEP to incorporate information from NEW_DEP.
   1132  1.1  mrg    SD_IT points to DEP in case it should be moved to another list.
   1133  1.1  mrg    MEM1 and MEM2, if nonnull, correspond to memory locations in case if
   1134  1.1  mrg    data-speculative dependence should be updated.  */
   1135  1.1  mrg static enum DEPS_ADJUST_RESULT
   1136  1.1  mrg update_dep (dep_t dep, dep_t new_dep,
   1137  1.1  mrg 	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
   1138  1.1  mrg 	    rtx mem1 ATTRIBUTE_UNUSED,
   1139  1.1  mrg 	    rtx mem2 ATTRIBUTE_UNUSED)
   1140  1.1  mrg {
   1141  1.1  mrg   enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
   1142  1.1  mrg   enum reg_note old_type = DEP_TYPE (dep);
   1143  1.1  mrg   bool was_spec = dep_spec_p (dep);
   1144  1.1  mrg 
   1145  1.1  mrg   DEP_NONREG (dep) |= DEP_NONREG (new_dep);
   1146  1.1  mrg   DEP_MULTIPLE (dep) = 1;
   1147  1.1  mrg 
   1148  1.1  mrg   /* If this is a more restrictive type of dependence than the
   1149  1.1  mrg      existing one, then change the existing dependence to this
   1150  1.1  mrg      type.  */
   1151  1.1  mrg   if ((int) DEP_TYPE (new_dep) < (int) old_type)
   1152  1.1  mrg     {
   1153  1.1  mrg       DEP_TYPE (dep) = DEP_TYPE (new_dep);
   1154  1.1  mrg       res = DEP_CHANGED;
   1155  1.1  mrg     }
   1156  1.1  mrg 
   1157  1.1  mrg   if (current_sched_info->flags & USE_DEPS_LIST)
   1158  1.1  mrg     /* Update DEP_STATUS.  */
   1159  1.1  mrg     {
   1160  1.1  mrg       ds_t dep_status = DEP_STATUS (dep);
   1161  1.1  mrg       ds_t ds = DEP_STATUS (new_dep);
   1162  1.1  mrg       ds_t new_status = ds | dep_status;
   1163  1.1  mrg 
   1164  1.1  mrg       if (new_status & SPECULATIVE)
   1165  1.1  mrg 	{
   1166  1.1  mrg 	  /* Either existing dep or a dep we're adding or both are
   1167  1.1  mrg 	     speculative.  */
   1168  1.1  mrg 	  if (!(ds & SPECULATIVE)
   1169  1.1  mrg 	      || !(dep_status & SPECULATIVE))
   1170  1.1  mrg 	    /* The new dep can't be speculative.  */
   1171  1.1  mrg 	    new_status &= ~SPECULATIVE;
   1172  1.1  mrg 	  else
   1173  1.1  mrg 	    {
   1174  1.1  mrg 	      /* Both are speculative.  Merge probabilities.  */
   1175  1.1  mrg 	      if (mem1 != NULL)
   1176  1.1  mrg 		{
   1177  1.1  mrg 		  dw_t dw;
   1178  1.1  mrg 
   1179  1.1  mrg 		  dw = estimate_dep_weak (mem1, mem2);
   1180  1.1  mrg 		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
   1181  1.1  mrg 		}
   1182  1.1  mrg 
   1183  1.1  mrg 	      new_status = ds_merge (dep_status, ds);
   1184  1.1  mrg 	    }
   1185  1.1  mrg 	}
   1186  1.1  mrg 
   1187  1.1  mrg       ds = new_status;
   1188  1.1  mrg 
   1189  1.1  mrg       if (dep_status != ds)
   1190  1.1  mrg 	{
   1191  1.1  mrg 	  DEP_STATUS (dep) = ds;
   1192  1.1  mrg 	  res = DEP_CHANGED;
   1193  1.1  mrg 	}
   1194  1.1  mrg     }
   1195  1.1  mrg 
   1196  1.1  mrg   if (was_spec && !dep_spec_p (dep))
   1197  1.1  mrg     /* The old dep was speculative, but now it isn't.  */
   1198  1.1  mrg     change_spec_dep_to_hard (sd_it);
   1199  1.1  mrg 
   1200  1.1  mrg   if (true_dependency_cache != NULL
   1201  1.1  mrg       && res == DEP_CHANGED)
   1202  1.1  mrg     update_dependency_caches (dep, old_type);
   1203  1.1  mrg 
   1204  1.1  mrg   return res;
   1205  1.1  mrg }
   1206  1.1  mrg 
   1207  1.1  mrg /* Add or update  a dependence described by DEP.
   1208  1.1  mrg    MEM1 and MEM2, if non-null, correspond to memory locations in case of
   1209  1.1  mrg    data speculation.
   1210  1.1  mrg 
   1211  1.1  mrg    The function returns a value indicating if an old entry has been changed
   1212  1.1  mrg    or a new entry has been added to insn's backward deps or nothing has
   1213  1.1  mrg    been updated at all.  */
   1214  1.1  mrg static enum DEPS_ADJUST_RESULT
   1215  1.1  mrg add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
   1216  1.1  mrg 		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
   1217  1.1  mrg {
   1218  1.1  mrg   bool maybe_present_p = true;
   1219  1.1  mrg   bool present_p = false;
   1220  1.1  mrg 
   1221  1.1  mrg   gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
   1222  1.1  mrg 	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
   1223  1.1  mrg 
   1224  1.1  mrg   if (flag_checking)
   1225  1.1  mrg     check_dep (new_dep, mem1 != NULL);
   1226  1.1  mrg 
   1227  1.1  mrg   if (true_dependency_cache != NULL)
   1228  1.1  mrg     {
   1229  1.1  mrg       switch (ask_dependency_caches (new_dep))
   1230  1.1  mrg 	{
   1231  1.1  mrg 	case DEP_PRESENT:
   1232  1.1  mrg 	  dep_t present_dep;
   1233  1.1  mrg 	  sd_iterator_def sd_it;
   1234  1.1  mrg 
   1235  1.1  mrg 	  present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
   1236  1.1  mrg 						      DEP_CON (new_dep),
   1237  1.1  mrg 						      resolved_p, &sd_it);
   1238  1.1  mrg 	  DEP_MULTIPLE (present_dep) = 1;
   1239  1.1  mrg 	  return DEP_PRESENT;
   1240  1.1  mrg 
   1241  1.1  mrg 	case DEP_CHANGED:
   1242  1.1  mrg 	  maybe_present_p = true;
   1243  1.1  mrg 	  present_p = true;
   1244  1.1  mrg 	  break;
   1245  1.1  mrg 
   1246  1.1  mrg 	case DEP_CREATED:
   1247  1.1  mrg 	  maybe_present_p = false;
   1248  1.1  mrg 	  present_p = false;
   1249  1.1  mrg 	  break;
   1250  1.1  mrg 
   1251  1.1  mrg 	default:
   1252  1.1  mrg 	  gcc_unreachable ();
   1253  1.1  mrg 	  break;
   1254  1.1  mrg 	}
   1255  1.1  mrg     }
   1256  1.1  mrg 
   1257  1.1  mrg   /* Check that we don't already have this dependence.  */
   1258  1.1  mrg   if (maybe_present_p)
   1259  1.1  mrg     {
   1260  1.1  mrg       dep_t present_dep;
   1261  1.1  mrg       sd_iterator_def sd_it;
   1262  1.1  mrg 
   1263  1.1  mrg       gcc_assert (true_dependency_cache == NULL || present_p);
   1264  1.1  mrg 
   1265  1.1  mrg       present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
   1266  1.1  mrg 						  DEP_CON (new_dep),
   1267  1.1  mrg 						  resolved_p, &sd_it);
   1268  1.1  mrg 
   1269  1.1  mrg       if (present_dep != NULL)
   1270  1.1  mrg 	/* We found an existing dependency between ELEM and INSN.  */
   1271  1.1  mrg 	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
   1272  1.1  mrg       else
   1273  1.1  mrg 	/* We didn't find a dep, it shouldn't present in the cache.  */
   1274  1.1  mrg 	gcc_assert (!present_p);
   1275  1.1  mrg     }
   1276  1.1  mrg 
   1277  1.1  mrg   /* Might want to check one level of transitivity to save conses.
   1278  1.1  mrg      This check should be done in maybe_add_or_update_dep_1.
   1279  1.1  mrg      Since we made it to add_or_update_dep_1, we must create
   1280  1.1  mrg      (or update) a link.  */
   1281  1.1  mrg 
   1282  1.1  mrg   if (mem1 != NULL_RTX)
   1283  1.1  mrg     {
   1284  1.1  mrg       gcc_assert (sched_deps_info->generate_spec_deps);
   1285  1.1  mrg       DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
   1286  1.1  mrg 					   estimate_dep_weak (mem1, mem2));
   1287  1.1  mrg     }
   1288  1.1  mrg 
   1289  1.1  mrg   sd_add_dep (new_dep, resolved_p);
   1290  1.1  mrg 
   1291  1.1  mrg   return DEP_CREATED;
   1292  1.1  mrg }
   1293  1.1  mrg 
   1294  1.1  mrg /* Initialize BACK_LIST_PTR with consumer's backward list and
   1295  1.1  mrg    FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
   1296  1.1  mrg    initialize with lists that hold resolved deps.  */
   1297  1.1  mrg static void
   1298  1.1  mrg get_back_and_forw_lists (dep_t dep, bool resolved_p,
   1299  1.1  mrg 			 deps_list_t *back_list_ptr,
   1300  1.1  mrg 			 deps_list_t *forw_list_ptr)
   1301  1.1  mrg {
   1302  1.1  mrg   rtx_insn *con = DEP_CON (dep);
   1303  1.1  mrg 
   1304  1.1  mrg   if (!resolved_p)
   1305  1.1  mrg     {
   1306  1.1  mrg       if (dep_spec_p (dep))
   1307  1.1  mrg 	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
   1308  1.1  mrg       else
   1309  1.1  mrg 	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
   1310  1.1  mrg 
   1311  1.1  mrg       *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
   1312  1.1  mrg     }
   1313  1.1  mrg   else
   1314  1.1  mrg     {
   1315  1.1  mrg       *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
   1316  1.1  mrg       *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
   1317  1.1  mrg     }
   1318  1.1  mrg }
   1319  1.1  mrg 
   1320  1.1  mrg /* Add dependence described by DEP.
   1321  1.1  mrg    If RESOLVED_P is true treat the dependence as a resolved one.  */
   1322  1.1  mrg void
   1323  1.1  mrg sd_add_dep (dep_t dep, bool resolved_p)
   1324  1.1  mrg {
   1325  1.1  mrg   dep_node_t n = create_dep_node ();
   1326  1.1  mrg   deps_list_t con_back_deps;
   1327  1.1  mrg   deps_list_t pro_forw_deps;
   1328  1.1  mrg   rtx_insn *elem = DEP_PRO (dep);
   1329  1.1  mrg   rtx_insn *insn = DEP_CON (dep);
   1330  1.1  mrg 
   1331  1.1  mrg   gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
   1332  1.1  mrg 
   1333  1.1  mrg   if ((current_sched_info->flags & DO_SPECULATION) == 0
   1334  1.1  mrg       || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
   1335  1.1  mrg     DEP_STATUS (dep) &= ~SPECULATIVE;
   1336  1.1  mrg 
   1337  1.1  mrg   copy_dep (DEP_NODE_DEP (n), dep);
   1338  1.1  mrg 
   1339  1.1  mrg   get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
   1340  1.1  mrg 
   1341  1.1  mrg   add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
   1342  1.1  mrg 
   1343  1.1  mrg   if (flag_checking)
   1344  1.1  mrg     check_dep (dep, false);
   1345  1.1  mrg 
   1346  1.1  mrg   add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
   1347  1.1  mrg 
   1348  1.1  mrg   /* If we are adding a dependency to INSN's LOG_LINKs, then note that
   1349  1.1  mrg      in the bitmap caches of dependency information.  */
   1350  1.1  mrg   if (true_dependency_cache != NULL)
   1351  1.1  mrg     set_dependency_caches (dep);
   1352  1.1  mrg }
   1353  1.1  mrg 
   1354  1.1  mrg /* Add or update backward dependence between INSN and ELEM
   1355  1.1  mrg    with given type DEP_TYPE and dep_status DS.
   1356  1.1  mrg    This function is a convenience wrapper.  */
   1357  1.1  mrg enum DEPS_ADJUST_RESULT
   1358  1.1  mrg sd_add_or_update_dep (dep_t dep, bool resolved_p)
   1359  1.1  mrg {
   1360  1.1  mrg   return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
   1361  1.1  mrg }
   1362  1.1  mrg 
   1363  1.1  mrg /* Resolved dependence pointed to by SD_IT.
   1364  1.1  mrg    SD_IT will advance to the next element.  */
   1365  1.1  mrg void
   1366  1.1  mrg sd_resolve_dep (sd_iterator_def sd_it)
   1367  1.1  mrg {
   1368  1.1  mrg   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
   1369  1.1  mrg   dep_t dep = DEP_NODE_DEP (node);
   1370  1.1  mrg   rtx_insn *pro = DEP_PRO (dep);
   1371  1.1  mrg   rtx_insn *con = DEP_CON (dep);
   1372  1.1  mrg 
   1373  1.1  mrg   if (dep_spec_p (dep))
   1374  1.1  mrg     move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
   1375  1.1  mrg 		   INSN_RESOLVED_BACK_DEPS (con));
   1376  1.1  mrg   else
   1377  1.1  mrg     move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
   1378  1.1  mrg 		   INSN_RESOLVED_BACK_DEPS (con));
   1379  1.1  mrg 
   1380  1.1  mrg   move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
   1381  1.1  mrg 		 INSN_RESOLVED_FORW_DEPS (pro));
   1382  1.1  mrg }
   1383  1.1  mrg 
   1384  1.1  mrg /* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
   1385  1.1  mrg    pointed to by SD_IT to unresolved state.  */
   1386  1.1  mrg void
   1387  1.1  mrg sd_unresolve_dep (sd_iterator_def sd_it)
   1388  1.1  mrg {
   1389  1.1  mrg   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
   1390  1.1  mrg   dep_t dep = DEP_NODE_DEP (node);
   1391  1.1  mrg   rtx_insn *pro = DEP_PRO (dep);
   1392  1.1  mrg   rtx_insn *con = DEP_CON (dep);
   1393  1.1  mrg 
   1394  1.1  mrg   if (dep_spec_p (dep))
   1395  1.1  mrg     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
   1396  1.1  mrg 		   INSN_SPEC_BACK_DEPS (con));
   1397  1.1  mrg   else
   1398  1.1  mrg     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
   1399  1.1  mrg 		   INSN_HARD_BACK_DEPS (con));
   1400  1.1  mrg 
   1401  1.1  mrg   move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
   1402  1.1  mrg 		 INSN_FORW_DEPS (pro));
   1403  1.1  mrg }
   1404  1.1  mrg 
   1405  1.1  mrg /* Make TO depend on all the FROM's producers.
   1406  1.1  mrg    If RESOLVED_P is true add dependencies to the resolved lists.  */
   1407  1.1  mrg void
   1408  1.1  mrg sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
   1409  1.1  mrg {
   1410  1.1  mrg   sd_list_types_def list_type;
   1411  1.1  mrg   sd_iterator_def sd_it;
   1412  1.1  mrg   dep_t dep;
   1413  1.1  mrg 
   1414  1.1  mrg   list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
   1415  1.1  mrg 
   1416  1.1  mrg   FOR_EACH_DEP (from, list_type, sd_it, dep)
   1417  1.1  mrg     {
   1418  1.1  mrg       dep_def _new_dep, *new_dep = &_new_dep;
   1419  1.1  mrg 
   1420  1.1  mrg       copy_dep (new_dep, dep);
   1421  1.1  mrg       DEP_CON (new_dep) = to;
   1422  1.1  mrg       sd_add_dep (new_dep, resolved_p);
   1423  1.1  mrg     }
   1424  1.1  mrg }
   1425  1.1  mrg 
   1426  1.1  mrg /* Remove a dependency referred to by SD_IT.
   1427  1.1  mrg    SD_IT will point to the next dependence after removal.  */
   1428  1.1  mrg void
   1429  1.1  mrg sd_delete_dep (sd_iterator_def sd_it)
   1430  1.1  mrg {
   1431  1.1  mrg   dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
   1432  1.1  mrg   dep_t dep = DEP_NODE_DEP (n);
   1433  1.1  mrg   rtx_insn *pro = DEP_PRO (dep);
   1434  1.1  mrg   rtx_insn *con = DEP_CON (dep);
   1435  1.1  mrg   deps_list_t con_back_deps;
   1436  1.1  mrg   deps_list_t pro_forw_deps;
   1437  1.1  mrg 
   1438  1.1  mrg   if (true_dependency_cache != NULL)
   1439  1.1  mrg     {
   1440  1.1  mrg       int elem_luid = INSN_LUID (pro);
   1441  1.1  mrg       int insn_luid = INSN_LUID (con);
   1442  1.1  mrg 
   1443  1.1  mrg       bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
   1444  1.1  mrg       bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
   1445  1.1  mrg       bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
   1446  1.1  mrg       bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
   1447  1.1  mrg 
   1448  1.1  mrg       if (current_sched_info->flags & DO_SPECULATION)
   1449  1.1  mrg 	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
   1450  1.1  mrg     }
   1451  1.1  mrg 
   1452  1.1  mrg   get_back_and_forw_lists (dep, sd_it.resolved_p,
   1453  1.1  mrg 			   &con_back_deps, &pro_forw_deps);
   1454  1.1  mrg 
   1455  1.1  mrg   remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
   1456  1.1  mrg   remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
   1457  1.1  mrg 
   1458  1.1  mrg   delete_dep_node (n);
   1459  1.1  mrg }
   1460  1.1  mrg 
   1461  1.1  mrg /* Dump size of the lists.  */
   1462  1.1  mrg #define DUMP_LISTS_SIZE (2)
   1463  1.1  mrg 
   1464  1.1  mrg /* Dump dependencies of the lists.  */
   1465  1.1  mrg #define DUMP_LISTS_DEPS (4)
   1466  1.1  mrg 
   1467  1.1  mrg /* Dump all information about the lists.  */
   1468  1.1  mrg #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
   1469  1.1  mrg 
   1470  1.1  mrg /* Dump deps_lists of INSN specified by TYPES to DUMP.
   1471  1.1  mrg    FLAGS is a bit mask specifying what information about the lists needs
   1472  1.1  mrg    to be printed.
   1473  1.1  mrg    If FLAGS has the very first bit set, then dump all information about
   1474  1.1  mrg    the lists and propagate this bit into the callee dump functions.  */
   1475  1.1  mrg static void
   1476  1.1  mrg dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
   1477  1.1  mrg {
   1478  1.1  mrg   sd_iterator_def sd_it;
   1479  1.1  mrg   dep_t dep;
   1480  1.1  mrg   int all;
   1481  1.1  mrg 
   1482  1.1  mrg   all = (flags & 1);
   1483  1.1  mrg 
   1484  1.1  mrg   if (all)
   1485  1.1  mrg     flags |= DUMP_LISTS_ALL;
   1486  1.1  mrg 
   1487  1.1  mrg   fprintf (dump, "[");
   1488  1.1  mrg 
   1489  1.1  mrg   if (flags & DUMP_LISTS_SIZE)
   1490  1.1  mrg     fprintf (dump, "%d; ", sd_lists_size (insn, types));
   1491  1.1  mrg 
   1492  1.1  mrg   if (flags & DUMP_LISTS_DEPS)
   1493  1.1  mrg     {
   1494  1.1  mrg       FOR_EACH_DEP (insn, types, sd_it, dep)
   1495  1.1  mrg 	{
   1496  1.1  mrg 	  dump_dep (dump, dep, dump_dep_flags | all);
   1497  1.1  mrg 	  fprintf (dump, " ");
   1498  1.1  mrg 	}
   1499  1.1  mrg     }
   1500  1.1  mrg }
   1501  1.1  mrg 
   1502  1.1  mrg /* Dump all information about deps_lists of INSN specified by TYPES
   1503  1.1  mrg    to STDERR.  */
   1504  1.1  mrg void
   1505  1.1  mrg sd_debug_lists (rtx insn, sd_list_types_def types)
   1506  1.1  mrg {
   1507  1.1  mrg   dump_lists (stderr, insn, types, 1);
   1508  1.1  mrg   fprintf (stderr, "\n");
   1509  1.1  mrg }
   1510  1.1  mrg 
   1511  1.1  mrg /* A wrapper around add_dependence_1, to add a dependence of CON on
   1512  1.1  mrg    PRO, with type DEP_TYPE.  This function implements special handling
   1513  1.1  mrg    for REG_DEP_CONTROL dependencies.  For these, we optionally promote
   1514  1.1  mrg    the type to REG_DEP_ANTI if we can determine that predication is
   1515  1.1  mrg    impossible; otherwise we add additional true dependencies on the
   1516  1.1  mrg    INSN_COND_DEPS list of the jump (which PRO must be).  */
   1517  1.1  mrg void
   1518  1.1  mrg add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
   1519  1.1  mrg {
   1520  1.1  mrg   if (dep_type == REG_DEP_CONTROL
   1521  1.1  mrg       && !(current_sched_info->flags & DO_PREDICATION))
   1522  1.1  mrg     dep_type = REG_DEP_ANTI;
   1523  1.1  mrg 
   1524  1.1  mrg   /* A REG_DEP_CONTROL dependence may be eliminated through predication,
   1525  1.1  mrg      so we must also make the insn dependent on the setter of the
   1526  1.1  mrg      condition.  */
   1527  1.1  mrg   if (dep_type == REG_DEP_CONTROL)
   1528  1.1  mrg     {
   1529  1.1  mrg       rtx_insn *real_pro = pro;
   1530  1.1  mrg       rtx_insn *other = real_insn_for_shadow (real_pro);
   1531  1.1  mrg       rtx cond;
   1532  1.1  mrg 
   1533  1.1  mrg       if (other != NULL_RTX)
   1534  1.1  mrg 	real_pro = other;
   1535  1.1  mrg       cond = sched_get_reverse_condition_uncached (real_pro);
   1536  1.1  mrg       /* Verify that the insn does not use a different value in
   1537  1.1  mrg 	 the condition register than the one that was present at
   1538  1.1  mrg 	 the jump.  */
   1539  1.1  mrg       if (cond == NULL_RTX)
   1540  1.1  mrg 	dep_type = REG_DEP_ANTI;
   1541  1.1  mrg       else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
   1542  1.1  mrg 	{
   1543  1.1  mrg 	  HARD_REG_SET uses;
   1544  1.1  mrg 	  CLEAR_HARD_REG_SET (uses);
   1545  1.1  mrg 	  note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
   1546  1.1  mrg 	  if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
   1547  1.1  mrg 	    dep_type = REG_DEP_ANTI;
   1548  1.1  mrg 	}
   1549  1.1  mrg       if (dep_type == REG_DEP_CONTROL)
   1550  1.1  mrg 	{
   1551  1.1  mrg 	  if (sched_verbose >= 5)
   1552  1.1  mrg 	    fprintf (sched_dump, "making DEP_CONTROL for %d\n",
   1553  1.1  mrg 		     INSN_UID (real_pro));
   1554  1.1  mrg 	  add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
   1555  1.1  mrg 			       REG_DEP_TRUE, false);
   1556  1.1  mrg 	}
   1557  1.1  mrg     }
   1558  1.1  mrg 
   1559  1.1  mrg   add_dependence_1 (con, pro, dep_type);
   1560  1.1  mrg }
   1561  1.1  mrg 
   1562  1.1  mrg /* A convenience wrapper to operate on an entire list.  HARD should be
   1563  1.1  mrg    true if DEP_NONREG should be set on newly created dependencies.  */
   1564  1.1  mrg 
   1565  1.1  mrg static void
   1566  1.1  mrg add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
   1567  1.1  mrg 		     enum reg_note dep_type, bool hard)
   1568  1.1  mrg {
   1569  1.1  mrg   mark_as_hard = hard;
   1570  1.1  mrg   for (; list; list = list->next ())
   1571  1.1  mrg     {
   1572  1.1  mrg       if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
   1573  1.1  mrg 	add_dependence (insn, list->insn (), dep_type);
   1574  1.1  mrg     }
   1575  1.1  mrg   mark_as_hard = false;
   1576  1.1  mrg }
   1577  1.1  mrg 
   1578  1.1  mrg /* Similar, but free *LISTP at the same time, when the context
   1579  1.1  mrg    is not readonly.  HARD should be true if DEP_NONREG should be set on
   1580  1.1  mrg    newly created dependencies.  */
   1581  1.1  mrg 
   1582  1.1  mrg static void
   1583  1.1  mrg add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn,
   1584  1.1  mrg 			      rtx_insn_list **listp,
   1585  1.1  mrg                               int uncond, enum reg_note dep_type, bool hard)
   1586  1.1  mrg {
   1587  1.1  mrg   add_dependence_list (insn, *listp, uncond, dep_type, hard);
   1588  1.1  mrg 
   1589  1.1  mrg   /* We don't want to short-circuit dependencies involving debug
   1590  1.1  mrg      insns, because they may cause actual dependencies to be
   1591  1.1  mrg      disregarded.  */
   1592  1.1  mrg   if (deps->readonly || DEBUG_INSN_P (insn))
   1593  1.1  mrg     return;
   1594  1.1  mrg 
   1595  1.1  mrg   free_INSN_LIST_list (listp);
   1596  1.1  mrg }
   1597  1.1  mrg 
   1598  1.1  mrg /* Remove all occurrences of INSN from LIST.  Return the number of
   1599  1.1  mrg    occurrences removed.  */
   1600  1.1  mrg 
   1601  1.1  mrg static int
   1602  1.1  mrg remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
   1603  1.1  mrg {
   1604  1.1  mrg   int removed = 0;
   1605  1.1  mrg 
   1606  1.1  mrg   while (*listp)
   1607  1.1  mrg     {
   1608  1.1  mrg       if ((*listp)->insn () == insn)
   1609  1.1  mrg         {
   1610  1.1  mrg           remove_free_INSN_LIST_node (listp);
   1611  1.1  mrg           removed++;
   1612  1.1  mrg           continue;
   1613  1.1  mrg         }
   1614  1.1  mrg 
   1615  1.1  mrg       listp = (rtx_insn_list **)&XEXP (*listp, 1);
   1616  1.1  mrg     }
   1617  1.1  mrg 
   1618  1.1  mrg   return removed;
   1619  1.1  mrg }
   1620  1.1  mrg 
   1621  1.1  mrg /* Same as above, but process two lists at once.  */
   1622  1.1  mrg static int
   1623  1.1  mrg remove_from_both_dependence_lists (rtx_insn *insn,
   1624  1.1  mrg 				   rtx_insn_list **listp,
   1625  1.1  mrg 				   rtx_expr_list **exprp)
   1626  1.1  mrg {
   1627  1.1  mrg   int removed = 0;
   1628  1.1  mrg 
   1629  1.1  mrg   while (*listp)
   1630  1.1  mrg     {
   1631  1.1  mrg       if (XEXP (*listp, 0) == insn)
   1632  1.1  mrg         {
   1633  1.1  mrg           remove_free_INSN_LIST_node (listp);
   1634  1.1  mrg           remove_free_EXPR_LIST_node (exprp);
   1635  1.1  mrg           removed++;
   1636  1.1  mrg           continue;
   1637  1.1  mrg         }
   1638  1.1  mrg 
   1639  1.1  mrg       listp = (rtx_insn_list **)&XEXP (*listp, 1);
   1640  1.1  mrg       exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
   1641  1.1  mrg     }
   1642  1.1  mrg 
   1643  1.1  mrg   return removed;
   1644  1.1  mrg }
   1645  1.1  mrg 
   1646  1.1  mrg /* Clear all dependencies for an insn.  */
   1647  1.1  mrg static void
   1648  1.1  mrg delete_all_dependences (rtx_insn *insn)
   1649  1.1  mrg {
   1650  1.1  mrg   sd_iterator_def sd_it;
   1651  1.1  mrg   dep_t dep;
   1652  1.1  mrg 
   1653  1.1  mrg   /* The below cycle can be optimized to clear the caches and back_deps
   1654  1.1  mrg      in one call but that would provoke duplication of code from
   1655  1.1  mrg      delete_dep ().  */
   1656  1.1  mrg 
   1657  1.1  mrg   for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
   1658  1.1  mrg        sd_iterator_cond (&sd_it, &dep);)
   1659  1.1  mrg     sd_delete_dep (sd_it);
   1660  1.1  mrg }
   1661  1.1  mrg 
   1662  1.1  mrg /* All insns in a scheduling group except the first should only have
   1663  1.1  mrg    dependencies on the previous insn in the group.  So we find the
   1664  1.1  mrg    first instruction in the scheduling group by walking the dependence
   1665  1.1  mrg    chains backwards. Then we add the dependencies for the group to
   1666  1.1  mrg    the previous nonnote insn.  */
   1667  1.1  mrg 
   1668  1.1  mrg static void
   1669  1.1  mrg chain_to_prev_insn (rtx_insn *insn)
   1670  1.1  mrg {
   1671  1.1  mrg   sd_iterator_def sd_it;
   1672  1.1  mrg   dep_t dep;
   1673  1.1  mrg   rtx_insn *prev_nonnote;
   1674  1.1  mrg 
   1675  1.1  mrg   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
   1676  1.1  mrg     {
   1677  1.1  mrg       rtx_insn *i = insn;
   1678  1.1  mrg       rtx_insn *pro = DEP_PRO (dep);
   1679  1.1  mrg 
   1680  1.1  mrg       do
   1681  1.1  mrg 	{
   1682  1.1  mrg 	  i = prev_nonnote_insn (i);
   1683  1.1  mrg 
   1684  1.1  mrg 	  if (pro == i)
   1685  1.1  mrg 	    goto next_link;
   1686  1.1  mrg 	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
   1687  1.1  mrg 
   1688  1.1  mrg       if (! sched_insns_conditions_mutex_p (i, pro))
   1689  1.1  mrg 	add_dependence (i, pro, DEP_TYPE (dep));
   1690  1.1  mrg     next_link:;
   1691  1.1  mrg     }
   1692  1.1  mrg 
   1693  1.1  mrg   delete_all_dependences (insn);
   1694  1.1  mrg 
   1695  1.1  mrg   prev_nonnote = prev_nonnote_nondebug_insn (insn);
   1696  1.1  mrg   if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
   1697  1.1  mrg       && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
   1698  1.1  mrg     add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
   1699  1.1  mrg }
   1700  1.1  mrg 
   1701  1.1  mrg /* Process an insn's memory dependencies.  There are four kinds of
   1703  1.1  mrg    dependencies:
   1704  1.1  mrg 
   1705  1.1  mrg    (0) read dependence: read follows read
   1706  1.1  mrg    (1) true dependence: read follows write
   1707  1.1  mrg    (2) output dependence: write follows write
   1708  1.1  mrg    (3) anti dependence: write follows read
   1709  1.1  mrg 
   1710  1.1  mrg    We are careful to build only dependencies which actually exist, and
   1711  1.1  mrg    use transitivity to avoid building too many links.  */
   1712  1.1  mrg 
   1713  1.1  mrg /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
   1714  1.1  mrg    The MEM is a memory reference contained within INSN, which we are saving
   1715  1.1  mrg    so that we can do memory aliasing on it.  */
   1716  1.1  mrg 
   1717  1.1  mrg static void
   1718  1.1  mrg add_insn_mem_dependence (class deps_desc *deps, bool read_p,
   1719  1.1  mrg 			 rtx_insn *insn, rtx mem)
   1720  1.1  mrg {
   1721  1.1  mrg   rtx_insn_list **insn_list;
   1722  1.1  mrg   rtx_insn_list *insn_node;
   1723  1.1  mrg   rtx_expr_list **mem_list;
   1724  1.1  mrg   rtx_expr_list *mem_node;
   1725  1.1  mrg 
   1726  1.1  mrg   gcc_assert (!deps->readonly);
   1727  1.1  mrg   if (read_p)
   1728  1.1  mrg     {
   1729  1.1  mrg       insn_list = &deps->pending_read_insns;
   1730  1.1  mrg       mem_list = &deps->pending_read_mems;
   1731  1.1  mrg       if (!DEBUG_INSN_P (insn))
   1732  1.1  mrg 	deps->pending_read_list_length++;
   1733  1.1  mrg     }
   1734  1.1  mrg   else
   1735  1.1  mrg     {
   1736  1.1  mrg       insn_list = &deps->pending_write_insns;
   1737  1.1  mrg       mem_list = &deps->pending_write_mems;
   1738  1.1  mrg       deps->pending_write_list_length++;
   1739  1.1  mrg     }
   1740  1.1  mrg 
   1741  1.1  mrg   insn_node = alloc_INSN_LIST (insn, *insn_list);
   1742  1.1  mrg   *insn_list = insn_node;
   1743  1.1  mrg 
   1744  1.1  mrg   if (sched_deps_info->use_cselib)
   1745  1.1  mrg     {
   1746  1.1  mrg       mem = shallow_copy_rtx (mem);
   1747  1.1  mrg       XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
   1748  1.1  mrg 							GET_MODE (mem), insn);
   1749  1.1  mrg     }
   1750  1.1  mrg   mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
   1751  1.1  mrg   *mem_list = mem_node;
   1752  1.1  mrg }
   1753  1.1  mrg 
   1754  1.1  mrg /* Make a dependency between every memory reference on the pending lists
   1755  1.1  mrg    and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
   1756  1.1  mrg    dependencies for a read operation, similarly with FOR_WRITE.  */
   1757  1.1  mrg 
   1758  1.1  mrg static void
   1759  1.1  mrg flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read,
   1760  1.1  mrg 		     int for_write)
   1761  1.1  mrg {
   1762  1.1  mrg   if (for_write)
   1763  1.1  mrg     {
   1764  1.1  mrg       add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
   1765  1.1  mrg                                     1, REG_DEP_ANTI, true);
   1766  1.1  mrg       if (!deps->readonly)
   1767  1.1  mrg         {
   1768  1.1  mrg           free_EXPR_LIST_list (&deps->pending_read_mems);
   1769  1.1  mrg           deps->pending_read_list_length = 0;
   1770  1.1  mrg         }
   1771  1.1  mrg     }
   1772  1.1  mrg 
   1773  1.1  mrg   add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
   1774  1.1  mrg 				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
   1775  1.1  mrg 				true);
   1776  1.1  mrg 
   1777  1.1  mrg   add_dependence_list_and_free (deps, insn,
   1778  1.1  mrg                                 &deps->last_pending_memory_flush, 1,
   1779  1.1  mrg                                 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
   1780  1.1  mrg 				true);
   1781  1.1  mrg 
   1782  1.1  mrg   add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
   1783  1.1  mrg 				REG_DEP_ANTI, true);
   1784  1.1  mrg 
   1785  1.1  mrg   if (DEBUG_INSN_P (insn))
   1786  1.1  mrg     {
   1787  1.1  mrg       if (for_write)
   1788  1.1  mrg 	free_INSN_LIST_list (&deps->pending_read_insns);
   1789  1.1  mrg       free_INSN_LIST_list (&deps->pending_write_insns);
   1790  1.1  mrg       free_INSN_LIST_list (&deps->last_pending_memory_flush);
   1791  1.1  mrg       free_INSN_LIST_list (&deps->pending_jump_insns);
   1792  1.1  mrg     }
   1793  1.1  mrg 
   1794  1.1  mrg   if (!deps->readonly)
   1795  1.1  mrg     {
   1796  1.1  mrg       free_EXPR_LIST_list (&deps->pending_write_mems);
   1797  1.1  mrg       deps->pending_write_list_length = 0;
   1798  1.1  mrg 
   1799  1.1  mrg       deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
   1800  1.1  mrg       deps->pending_flush_length = 1;
   1801  1.1  mrg     }
   1802  1.1  mrg   mark_as_hard = false;
   1803  1.1  mrg }
   1804  1.1  mrg 
   1805  1.1  mrg /* Instruction which dependencies we are analyzing.  */
   1807  1.1  mrg static rtx_insn *cur_insn = NULL;
   1808  1.1  mrg 
   1809  1.1  mrg /* Implement hooks for haifa scheduler.  */
   1810  1.1  mrg 
   1811  1.1  mrg static void
   1812  1.1  mrg haifa_start_insn (rtx_insn *insn)
   1813  1.1  mrg {
   1814  1.1  mrg   gcc_assert (insn && !cur_insn);
   1815  1.1  mrg 
   1816  1.1  mrg   cur_insn = insn;
   1817  1.1  mrg }
   1818  1.1  mrg 
   1819  1.1  mrg static void
   1820  1.1  mrg haifa_finish_insn (void)
   1821  1.1  mrg {
   1822  1.1  mrg   cur_insn = NULL;
   1823  1.1  mrg }
   1824  1.1  mrg 
   1825  1.1  mrg void
   1826  1.1  mrg haifa_note_reg_set (int regno)
   1827  1.1  mrg {
   1828  1.1  mrg   SET_REGNO_REG_SET (reg_pending_sets, regno);
   1829  1.1  mrg }
   1830  1.1  mrg 
   1831  1.1  mrg void
   1832  1.1  mrg haifa_note_reg_clobber (int regno)
   1833  1.1  mrg {
   1834  1.1  mrg   SET_REGNO_REG_SET (reg_pending_clobbers, regno);
   1835  1.1  mrg }
   1836  1.1  mrg 
   1837  1.1  mrg void
   1838  1.1  mrg haifa_note_reg_use (int regno)
   1839  1.1  mrg {
   1840  1.1  mrg   SET_REGNO_REG_SET (reg_pending_uses, regno);
   1841  1.1  mrg }
   1842  1.1  mrg 
   1843  1.1  mrg static void
   1844  1.1  mrg haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
   1845  1.1  mrg {
   1846  1.1  mrg   if (!(ds & SPECULATIVE))
   1847  1.1  mrg     {
   1848  1.1  mrg       mem = NULL_RTX;
   1849  1.1  mrg       pending_mem = NULL_RTX;
   1850  1.1  mrg     }
   1851  1.1  mrg   else
   1852  1.1  mrg     gcc_assert (ds & BEGIN_DATA);
   1853  1.1  mrg 
   1854  1.1  mrg   {
   1855  1.1  mrg     dep_def _dep, *dep = &_dep;
   1856  1.1  mrg 
   1857  1.1  mrg     init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
   1858  1.1  mrg                 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
   1859  1.1  mrg     DEP_NONREG (dep) = 1;
   1860  1.1  mrg     maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
   1861  1.1  mrg   }
   1862  1.1  mrg 
   1863  1.1  mrg }
   1864  1.1  mrg 
   1865  1.1  mrg static void
   1866  1.1  mrg haifa_note_dep (rtx_insn *elem, ds_t ds)
   1867  1.1  mrg {
   1868  1.1  mrg   dep_def _dep;
   1869  1.1  mrg   dep_t dep = &_dep;
   1870  1.1  mrg 
   1871  1.1  mrg   init_dep (dep, elem, cur_insn, ds_to_dt (ds));
   1872  1.1  mrg   if (mark_as_hard)
   1873  1.1  mrg     DEP_NONREG (dep) = 1;
   1874  1.1  mrg   maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
   1875  1.1  mrg }
   1876  1.1  mrg 
   1877  1.1  mrg static void
   1878  1.1  mrg note_reg_use (int r)
   1879  1.1  mrg {
   1880  1.1  mrg   if (sched_deps_info->note_reg_use)
   1881  1.1  mrg     sched_deps_info->note_reg_use (r);
   1882  1.1  mrg }
   1883  1.1  mrg 
   1884  1.1  mrg static void
   1885  1.1  mrg note_reg_set (int r)
   1886  1.1  mrg {
   1887  1.1  mrg   if (sched_deps_info->note_reg_set)
   1888  1.1  mrg     sched_deps_info->note_reg_set (r);
   1889  1.1  mrg }
   1890  1.1  mrg 
   1891  1.1  mrg static void
   1892  1.1  mrg note_reg_clobber (int r)
   1893  1.1  mrg {
   1894  1.1  mrg   if (sched_deps_info->note_reg_clobber)
   1895  1.1  mrg     sched_deps_info->note_reg_clobber (r);
   1896  1.1  mrg }
   1897  1.1  mrg 
   1898  1.1  mrg static void
   1899  1.1  mrg note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
   1900  1.1  mrg {
   1901  1.1  mrg   if (sched_deps_info->note_mem_dep)
   1902  1.1  mrg     sched_deps_info->note_mem_dep (m1, m2, e, ds);
   1903  1.1  mrg }
   1904  1.1  mrg 
   1905  1.1  mrg static void
   1906  1.1  mrg note_dep (rtx_insn *e, ds_t ds)
   1907  1.1  mrg {
   1908  1.1  mrg   if (sched_deps_info->note_dep)
   1909  1.1  mrg     sched_deps_info->note_dep (e, ds);
   1910  1.1  mrg }
   1911  1.1  mrg 
   1912  1.1  mrg /* Return corresponding to DS reg_note.  */
   1913  1.1  mrg enum reg_note
   1914  1.1  mrg ds_to_dt (ds_t ds)
   1915  1.1  mrg {
   1916  1.1  mrg   if (ds & DEP_TRUE)
   1917  1.1  mrg     return REG_DEP_TRUE;
   1918  1.1  mrg   else if (ds & DEP_OUTPUT)
   1919  1.1  mrg     return REG_DEP_OUTPUT;
   1920  1.1  mrg   else if (ds & DEP_ANTI)
   1921  1.1  mrg     return REG_DEP_ANTI;
   1922  1.1  mrg   else
   1923  1.1  mrg     {
   1924  1.1  mrg       gcc_assert (ds & DEP_CONTROL);
   1925  1.1  mrg       return REG_DEP_CONTROL;
   1926  1.1  mrg     }
   1927  1.1  mrg }
   1928  1.1  mrg 
   1929  1.1  mrg 
   1930  1.1  mrg 
   1932  1.1  mrg /* Functions for computation of info needed for register pressure
   1933  1.1  mrg    sensitive insn scheduling.  */
   1934  1.1  mrg 
   1935  1.1  mrg 
   1936  1.1  mrg /* Allocate and return reg_use_data structure for REGNO and INSN.  */
   1937  1.1  mrg static struct reg_use_data *
   1938  1.1  mrg create_insn_reg_use (int regno, rtx_insn *insn)
   1939  1.1  mrg {
   1940  1.1  mrg   struct reg_use_data *use;
   1941  1.1  mrg 
   1942  1.1  mrg   use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
   1943  1.1  mrg   use->regno = regno;
   1944  1.1  mrg   use->insn = insn;
   1945  1.1  mrg   use->next_insn_use = INSN_REG_USE_LIST (insn);
   1946  1.1  mrg   INSN_REG_USE_LIST (insn) = use;
   1947  1.1  mrg   return use;
   1948  1.1  mrg }
   1949  1.1  mrg 
   1950  1.1  mrg /* Allocate reg_set_data structure for REGNO and INSN.  */
   1951  1.1  mrg static void
   1952  1.1  mrg create_insn_reg_set (int regno, rtx insn)
   1953  1.1  mrg {
   1954  1.1  mrg   struct reg_set_data *set;
   1955  1.1  mrg 
   1956  1.1  mrg   set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
   1957  1.1  mrg   set->regno = regno;
   1958  1.1  mrg   set->insn = insn;
   1959  1.1  mrg   set->next_insn_set = INSN_REG_SET_LIST (insn);
   1960  1.1  mrg   INSN_REG_SET_LIST (insn) = set;
   1961  1.1  mrg }
   1962  1.1  mrg 
   1963  1.1  mrg /* Set up insn register uses for INSN and dependency context DEPS.  */
   1964  1.1  mrg static void
   1965  1.1  mrg setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn)
   1966  1.1  mrg {
   1967  1.1  mrg   unsigned i;
   1968  1.1  mrg   reg_set_iterator rsi;
   1969  1.1  mrg   struct reg_use_data *use, *use2, *next;
   1970  1.1  mrg   struct deps_reg *reg_last;
   1971  1.1  mrg 
   1972  1.1  mrg   EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
   1973  1.1  mrg     {
   1974  1.1  mrg       if (i < FIRST_PSEUDO_REGISTER
   1975  1.1  mrg 	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
   1976  1.1  mrg 	continue;
   1977  1.1  mrg 
   1978  1.1  mrg       if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
   1979  1.1  mrg 	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
   1980  1.1  mrg 	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
   1981  1.1  mrg 	/* Ignore use which is not dying.  */
   1982  1.1  mrg 	continue;
   1983  1.1  mrg 
   1984  1.1  mrg       use = create_insn_reg_use (i, insn);
   1985  1.1  mrg       use->next_regno_use = use;
   1986  1.1  mrg       reg_last = &deps->reg_last[i];
   1987  1.1  mrg 
   1988  1.1  mrg       /* Create the cycle list of uses.  */
   1989  1.1  mrg       for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
   1990  1.1  mrg 	{
   1991  1.1  mrg 	  use2 = create_insn_reg_use (i, list->insn ());
   1992  1.1  mrg 	  next = use->next_regno_use;
   1993  1.1  mrg 	  use->next_regno_use = use2;
   1994  1.1  mrg 	  use2->next_regno_use = next;
   1995  1.1  mrg 	}
   1996  1.1  mrg     }
   1997  1.1  mrg }
   1998  1.1  mrg 
   1999  1.1  mrg /* Register pressure info for the currently processed insn.  */
   2000  1.1  mrg static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
   2001  1.1  mrg 
   2002  1.1  mrg /* Return TRUE if INSN has the use structure for REGNO.  */
   2003  1.1  mrg static bool
   2004  1.1  mrg insn_use_p (rtx insn, int regno)
   2005  1.1  mrg {
   2006  1.1  mrg   struct reg_use_data *use;
   2007  1.1  mrg 
   2008  1.1  mrg   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
   2009  1.1  mrg     if (use->regno == regno)
   2010  1.1  mrg       return true;
   2011  1.1  mrg   return false;
   2012  1.1  mrg }
   2013  1.1  mrg 
   2014  1.1  mrg /* Update the register pressure info after birth of pseudo register REGNO
   2015  1.1  mrg    in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
   2016  1.1  mrg    the register is in clobber or unused after the insn.  */
   2017  1.1  mrg static void
   2018  1.1  mrg mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
   2019  1.1  mrg {
   2020  1.1  mrg   int incr, new_incr;
   2021  1.1  mrg   enum reg_class cl;
   2022  1.1  mrg 
   2023  1.1  mrg   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
   2024  1.1  mrg   cl = sched_regno_pressure_class[regno];
   2025  1.1  mrg   if (cl != NO_REGS)
   2026  1.1  mrg     {
   2027  1.1  mrg       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
   2028  1.1  mrg       if (clobber_p)
   2029  1.1  mrg 	{
   2030  1.1  mrg 	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
   2031  1.1  mrg 	  reg_pressure_info[cl].clobber_increase = new_incr;
   2032  1.1  mrg 	}
   2033  1.1  mrg       else if (unused_p)
   2034  1.1  mrg 	{
   2035  1.1  mrg 	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
   2036  1.1  mrg 	  reg_pressure_info[cl].unused_set_increase = new_incr;
   2037  1.1  mrg 	}
   2038  1.1  mrg       else
   2039  1.1  mrg 	{
   2040  1.1  mrg 	  new_incr = reg_pressure_info[cl].set_increase + incr;
   2041  1.1  mrg 	  reg_pressure_info[cl].set_increase = new_incr;
   2042  1.1  mrg 	  if (! insn_use_p (insn, regno))
   2043  1.1  mrg 	    reg_pressure_info[cl].change += incr;
   2044  1.1  mrg 	  create_insn_reg_set (regno, insn);
   2045  1.1  mrg 	}
   2046  1.1  mrg       gcc_assert (new_incr < (1 << INCREASE_BITS));
   2047  1.1  mrg     }
   2048  1.1  mrg }
   2049  1.1  mrg 
   2050  1.1  mrg /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
   2051  1.1  mrg    hard registers involved in the birth.  */
   2052  1.1  mrg static void
   2053  1.1  mrg mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
   2054  1.1  mrg 			    bool clobber_p, bool unused_p)
   2055  1.1  mrg {
   2056  1.1  mrg   enum reg_class cl;
   2057  1.1  mrg   int new_incr, last = regno + nregs;
   2058  1.1  mrg 
   2059  1.1  mrg   while (regno < last)
   2060  1.1  mrg     {
   2061  1.1  mrg       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
   2062  1.1  mrg       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
   2063  1.1  mrg 	{
   2064  1.1  mrg 	  cl = sched_regno_pressure_class[regno];
   2065  1.1  mrg 	  if (cl != NO_REGS)
   2066  1.1  mrg 	    {
   2067  1.1  mrg 	      if (clobber_p)
   2068  1.1  mrg 		{
   2069  1.1  mrg 		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
   2070  1.1  mrg 		  reg_pressure_info[cl].clobber_increase = new_incr;
   2071  1.1  mrg 		}
   2072  1.1  mrg 	      else if (unused_p)
   2073  1.1  mrg 		{
   2074  1.1  mrg 		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
   2075  1.1  mrg 		  reg_pressure_info[cl].unused_set_increase = new_incr;
   2076  1.1  mrg 		}
   2077  1.1  mrg 	      else
   2078  1.1  mrg 		{
   2079  1.1  mrg 		  new_incr = reg_pressure_info[cl].set_increase + 1;
   2080  1.1  mrg 		  reg_pressure_info[cl].set_increase = new_incr;
   2081  1.1  mrg 		  if (! insn_use_p (insn, regno))
   2082  1.1  mrg 		    reg_pressure_info[cl].change += 1;
   2083  1.1  mrg 		  create_insn_reg_set (regno, insn);
   2084  1.1  mrg 		}
   2085  1.1  mrg 	      gcc_assert (new_incr < (1 << INCREASE_BITS));
   2086  1.1  mrg 	    }
   2087  1.1  mrg 	}
   2088  1.1  mrg       regno++;
   2089  1.1  mrg     }
   2090  1.1  mrg }
   2091  1.1  mrg 
   2092  1.1  mrg /* Update the register pressure info after birth of pseudo or hard
   2093  1.1  mrg    register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
   2094  1.1  mrg    correspondingly that the register is in clobber or unused after the
   2095  1.1  mrg    insn.  */
   2096  1.1  mrg static void
   2097  1.1  mrg mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
   2098  1.1  mrg {
   2099  1.1  mrg   int regno;
   2100  1.1  mrg 
   2101  1.1  mrg   if (GET_CODE (reg) == SUBREG)
   2102  1.1  mrg     reg = SUBREG_REG (reg);
   2103  1.1  mrg 
   2104  1.1  mrg   if (! REG_P (reg))
   2105  1.1  mrg     return;
   2106  1.1  mrg 
   2107  1.1  mrg   regno = REGNO (reg);
   2108  1.1  mrg   if (regno < FIRST_PSEUDO_REGISTER)
   2109  1.1  mrg     mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
   2110  1.1  mrg 				clobber_p, unused_p);
   2111  1.1  mrg   else
   2112  1.1  mrg     mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
   2113  1.1  mrg }
   2114  1.1  mrg 
   2115  1.1  mrg /* Update the register pressure info after death of pseudo register
   2116  1.1  mrg    REGNO.  */
   2117  1.1  mrg static void
   2118  1.1  mrg mark_pseudo_death (int regno)
   2119  1.1  mrg {
   2120  1.1  mrg   int incr;
   2121  1.1  mrg   enum reg_class cl;
   2122  1.1  mrg 
   2123  1.1  mrg   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
   2124  1.1  mrg   cl = sched_regno_pressure_class[regno];
   2125  1.1  mrg   if (cl != NO_REGS)
   2126  1.1  mrg     {
   2127  1.1  mrg       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
   2128  1.1  mrg       reg_pressure_info[cl].change -= incr;
   2129  1.1  mrg     }
   2130  1.1  mrg }
   2131  1.1  mrg 
   2132  1.1  mrg /* Like mark_pseudo_death except that NREGS saying how many hard
   2133  1.1  mrg    registers involved in the death.  */
   2134  1.1  mrg static void
   2135  1.1  mrg mark_hard_regno_death (int regno, int nregs)
   2136  1.1  mrg {
   2137  1.1  mrg   enum reg_class cl;
   2138  1.1  mrg   int last = regno + nregs;
   2139  1.1  mrg 
   2140  1.1  mrg   while (regno < last)
   2141  1.1  mrg     {
   2142  1.1  mrg       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
   2143  1.1  mrg       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
   2144  1.1  mrg 	{
   2145  1.1  mrg 	  cl = sched_regno_pressure_class[regno];
   2146  1.1  mrg 	  if (cl != NO_REGS)
   2147  1.1  mrg 	    reg_pressure_info[cl].change -= 1;
   2148  1.1  mrg 	}
   2149  1.1  mrg       regno++;
   2150  1.1  mrg     }
   2151  1.1  mrg }
   2152  1.1  mrg 
   2153  1.1  mrg /* Update the register pressure info after death of pseudo or hard
   2154  1.1  mrg    register REG.  */
   2155  1.1  mrg static void
   2156  1.1  mrg mark_reg_death (rtx reg)
   2157  1.1  mrg {
   2158  1.1  mrg   int regno;
   2159  1.1  mrg 
   2160  1.1  mrg   if (GET_CODE (reg) == SUBREG)
   2161  1.1  mrg     reg = SUBREG_REG (reg);
   2162  1.1  mrg 
   2163  1.1  mrg   if (! REG_P (reg))
   2164  1.1  mrg     return;
   2165  1.1  mrg 
   2166  1.1  mrg   regno = REGNO (reg);
   2167  1.1  mrg   if (regno < FIRST_PSEUDO_REGISTER)
   2168  1.1  mrg     mark_hard_regno_death (regno, REG_NREGS (reg));
   2169  1.1  mrg   else
   2170  1.1  mrg     mark_pseudo_death (regno);
   2171  1.1  mrg }
   2172  1.1  mrg 
   2173  1.1  mrg /* Process SETTER of REG.  DATA is an insn containing the setter.  */
   2174  1.1  mrg static void
   2175  1.1  mrg mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
   2176  1.1  mrg {
   2177  1.1  mrg   if (setter != NULL_RTX && GET_CODE (setter) != SET)
   2178  1.1  mrg     return;
   2179  1.1  mrg   mark_insn_reg_birth
   2180  1.1  mrg     ((rtx) data, reg, false,
   2181  1.1  mrg      find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
   2182  1.1  mrg }
   2183  1.1  mrg 
   2184  1.1  mrg /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
   2185  1.1  mrg static void
   2186  1.1  mrg mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
   2187  1.1  mrg {
   2188  1.1  mrg   if (GET_CODE (setter) == CLOBBER)
   2189  1.1  mrg     mark_insn_reg_birth ((rtx) data, reg, true, false);
   2190  1.1  mrg }
   2191  1.1  mrg 
   2192  1.1  mrg /* Set up reg pressure info related to INSN.  */
   2193  1.1  mrg void
   2194  1.1  mrg init_insn_reg_pressure_info (rtx_insn *insn)
   2195  1.1  mrg {
   2196  1.1  mrg   int i, len;
   2197  1.1  mrg   enum reg_class cl;
   2198  1.1  mrg   static struct reg_pressure_data *pressure_info;
   2199  1.1  mrg   rtx link;
   2200  1.1  mrg 
   2201  1.1  mrg   gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
   2202  1.1  mrg 
   2203  1.1  mrg   if (! INSN_P (insn))
   2204  1.1  mrg     return;
   2205  1.1  mrg 
   2206  1.1  mrg   for (i = 0; i < ira_pressure_classes_num; i++)
   2207  1.1  mrg     {
   2208  1.1  mrg       cl = ira_pressure_classes[i];
   2209  1.1  mrg       reg_pressure_info[cl].clobber_increase = 0;
   2210  1.1  mrg       reg_pressure_info[cl].set_increase = 0;
   2211  1.1  mrg       reg_pressure_info[cl].unused_set_increase = 0;
   2212  1.1  mrg       reg_pressure_info[cl].change = 0;
   2213  1.1  mrg     }
   2214  1.1  mrg 
   2215  1.1  mrg   note_stores (insn, mark_insn_reg_clobber, insn);
   2216  1.1  mrg 
   2217  1.1  mrg   note_stores (insn, mark_insn_reg_store, insn);
   2218  1.1  mrg 
   2219  1.1  mrg   if (AUTO_INC_DEC)
   2220  1.1  mrg     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
   2221  1.1  mrg       if (REG_NOTE_KIND (link) == REG_INC)
   2222  1.1  mrg 	mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
   2223  1.1  mrg 
   2224  1.1  mrg   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
   2225  1.1  mrg     if (REG_NOTE_KIND (link) == REG_DEAD)
   2226  1.1  mrg       mark_reg_death (XEXP (link, 0));
   2227  1.1  mrg 
   2228  1.1  mrg   len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
   2229  1.1  mrg   pressure_info
   2230  1.1  mrg     = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
   2231  1.1  mrg   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
   2232  1.1  mrg     INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
   2233  1.1  mrg 						    * sizeof (int), 1);
   2234  1.1  mrg   for (i = 0; i < ira_pressure_classes_num; i++)
   2235  1.1  mrg     {
   2236  1.1  mrg       cl = ira_pressure_classes[i];
   2237  1.1  mrg       pressure_info[i].clobber_increase
   2238  1.1  mrg 	= reg_pressure_info[cl].clobber_increase;
   2239  1.1  mrg       pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
   2240  1.1  mrg       pressure_info[i].unused_set_increase
   2241  1.1  mrg 	= reg_pressure_info[cl].unused_set_increase;
   2242  1.1  mrg       pressure_info[i].change = reg_pressure_info[cl].change;
   2243  1.1  mrg     }
   2244  1.1  mrg }
   2245  1.1  mrg 
   2246  1.1  mrg 
   2247  1.1  mrg 
   2248  1.1  mrg 
   2250  1.1  mrg /* Internal variable for sched_analyze_[12] () functions.
   2251  1.1  mrg    If it is nonzero, this means that sched_analyze_[12] looks
   2252  1.1  mrg    at the most toplevel SET.  */
   2253  1.1  mrg static bool can_start_lhs_rhs_p;
   2254  1.1  mrg 
   2255  1.1  mrg /* Extend reg info for the deps context DEPS given that
   2256  1.1  mrg    we have just generated a register numbered REGNO.  */
   2257  1.1  mrg static void
   2258  1.1  mrg extend_deps_reg_info (class deps_desc *deps, int regno)
   2259  1.1  mrg {
   2260  1.1  mrg   int max_regno = regno + 1;
   2261  1.1  mrg 
   2262  1.1  mrg   gcc_assert (!reload_completed);
   2263  1.1  mrg 
   2264  1.1  mrg   /* In a readonly context, it would not hurt to extend info,
   2265  1.1  mrg      but it should not be needed.  */
   2266  1.1  mrg   if (reload_completed && deps->readonly)
   2267  1.1  mrg     {
   2268  1.1  mrg       deps->max_reg = max_regno;
   2269  1.1  mrg       return;
   2270  1.1  mrg     }
   2271  1.1  mrg 
   2272  1.1  mrg   if (max_regno > deps->max_reg)
   2273  1.1  mrg     {
   2274  1.1  mrg       deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
   2275  1.1  mrg                                    max_regno);
   2276  1.1  mrg       memset (&deps->reg_last[deps->max_reg],
   2277  1.1  mrg               0, (max_regno - deps->max_reg)
   2278  1.1  mrg               * sizeof (struct deps_reg));
   2279  1.1  mrg       deps->max_reg = max_regno;
   2280  1.1  mrg     }
   2281  1.1  mrg }
   2282  1.1  mrg 
   2283  1.1  mrg /* Extends REG_INFO_P if needed.  */
   2284  1.1  mrg void
   2285  1.1  mrg maybe_extend_reg_info_p (void)
   2286  1.1  mrg {
   2287  1.1  mrg   /* Extend REG_INFO_P, if needed.  */
   2288  1.1  mrg   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
   2289  1.1  mrg     {
   2290  1.1  mrg       size_t new_reg_info_p_size = max_regno + 128;
   2291  1.1  mrg 
   2292  1.1  mrg       gcc_assert (!reload_completed && sel_sched_p ());
   2293  1.1  mrg 
   2294  1.1  mrg       reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
   2295  1.1  mrg                                                     new_reg_info_p_size,
   2296  1.1  mrg                                                     reg_info_p_size,
   2297  1.1  mrg                                                     sizeof (*reg_info_p));
   2298  1.1  mrg       reg_info_p_size = new_reg_info_p_size;
   2299  1.1  mrg     }
   2300  1.1  mrg }
   2301  1.1  mrg 
   2302  1.1  mrg /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
   2303  1.1  mrg    The type of the reference is specified by REF and can be SET,
   2304  1.1  mrg    CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
   2305  1.1  mrg 
   2306  1.1  mrg static void
   2307  1.1  mrg sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode,
   2308  1.1  mrg 		   enum rtx_code ref, rtx_insn *insn)
   2309  1.1  mrg {
   2310  1.1  mrg   /* We could emit new pseudos in renaming.  Extend the reg structures.  */
   2311  1.1  mrg   if (!reload_completed && sel_sched_p ()
   2312  1.1  mrg       && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
   2313  1.1  mrg     extend_deps_reg_info (deps, regno);
   2314  1.1  mrg 
   2315  1.1  mrg   maybe_extend_reg_info_p ();
   2316  1.1  mrg 
   2317  1.1  mrg   /* A hard reg in a wide mode may really be multiple registers.
   2318  1.1  mrg      If so, mark all of them just like the first.  */
   2319  1.1  mrg   if (regno < FIRST_PSEUDO_REGISTER)
   2320  1.1  mrg     {
   2321  1.1  mrg       int i = hard_regno_nregs (regno, mode);
   2322  1.1  mrg       if (ref == SET)
   2323  1.1  mrg 	{
   2324  1.1  mrg 	  while (--i >= 0)
   2325  1.1  mrg 	    note_reg_set (regno + i);
   2326  1.1  mrg 	}
   2327  1.1  mrg       else if (ref == USE)
   2328  1.1  mrg 	{
   2329  1.1  mrg 	  while (--i >= 0)
   2330  1.1  mrg 	    note_reg_use (regno + i);
   2331  1.1  mrg 	}
   2332  1.1  mrg       else
   2333  1.1  mrg 	{
   2334  1.1  mrg 	  while (--i >= 0)
   2335  1.1  mrg 	    note_reg_clobber (regno + i);
   2336  1.1  mrg 	}
   2337  1.1  mrg     }
   2338  1.1  mrg 
   2339  1.1  mrg   /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
   2340  1.1  mrg      it does not reload.  Ignore these as they have served their
   2341  1.1  mrg      purpose already.  */
   2342  1.1  mrg   else if (regno >= deps->max_reg)
   2343  1.1  mrg     {
   2344  1.1  mrg       enum rtx_code code = GET_CODE (PATTERN (insn));
   2345  1.1  mrg       gcc_assert (code == USE || code == CLOBBER);
   2346  1.1  mrg     }
   2347  1.1  mrg 
   2348  1.1  mrg   else
   2349  1.1  mrg     {
   2350  1.1  mrg       if (ref == SET)
   2351  1.1  mrg 	note_reg_set (regno);
   2352  1.1  mrg       else if (ref == USE)
   2353  1.1  mrg 	note_reg_use (regno);
   2354  1.1  mrg       else
   2355  1.1  mrg 	note_reg_clobber (regno);
   2356  1.1  mrg 
   2357  1.1  mrg       /* Pseudos that are REG_EQUIV to something may be replaced
   2358  1.1  mrg 	 by that during reloading.  We need only add dependencies for
   2359  1.1  mrg 	the address in the REG_EQUIV note.  */
   2360  1.1  mrg       if (!reload_completed && get_reg_known_equiv_p (regno))
   2361  1.1  mrg 	{
   2362  1.1  mrg 	  rtx t = get_reg_known_value (regno);
   2363  1.1  mrg 	  if (MEM_P (t))
   2364  1.1  mrg 	    sched_analyze_2 (deps, XEXP (t, 0), insn);
   2365  1.1  mrg 	}
   2366  1.1  mrg 
   2367  1.1  mrg       /* Don't let it cross a call after scheduling if it doesn't
   2368  1.1  mrg 	 already cross one.  */
   2369  1.1  mrg       if (REG_N_CALLS_CROSSED (regno) == 0)
   2370  1.1  mrg 	{
   2371  1.1  mrg 	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
   2372  1.1  mrg 	    deps->sched_before_next_call
   2373  1.1  mrg 	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
   2374  1.1  mrg 	  else
   2375  1.1  mrg 	    add_dependence_list (insn, deps->last_function_call, 1,
   2376  1.1  mrg 				 REG_DEP_ANTI, false);
   2377  1.1  mrg 	}
   2378  1.1  mrg     }
   2379  1.1  mrg }
   2380  1.1  mrg 
   2381  1.1  mrg /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
   2382  1.1  mrg    rtx, X, creating all dependencies generated by the write to the
   2383  1.1  mrg    destination of X, and reads of everything mentioned.  */
   2384  1.1  mrg 
   2385  1.1  mrg static void
   2386  1.1  mrg sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
   2387  1.1  mrg {
   2388  1.1  mrg   rtx dest = XEXP (x, 0);
   2389  1.1  mrg   enum rtx_code code = GET_CODE (x);
   2390  1.1  mrg   bool cslr_p = can_start_lhs_rhs_p;
   2391  1.1  mrg 
   2392  1.1  mrg   can_start_lhs_rhs_p = false;
   2393  1.1  mrg 
   2394  1.1  mrg   gcc_assert (dest);
   2395  1.1  mrg   if (dest == 0)
   2396  1.1  mrg     return;
   2397  1.1  mrg 
   2398  1.1  mrg   if (cslr_p && sched_deps_info->start_lhs)
   2399  1.1  mrg     sched_deps_info->start_lhs (dest);
   2400  1.1  mrg 
   2401  1.1  mrg   if (GET_CODE (dest) == PARALLEL)
   2402  1.1  mrg     {
   2403  1.1  mrg       int i;
   2404  1.1  mrg 
   2405  1.1  mrg       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
   2406  1.1  mrg 	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
   2407  1.1  mrg 	  sched_analyze_1 (deps,
   2408  1.1  mrg 			   gen_rtx_CLOBBER (VOIDmode,
   2409  1.1  mrg 					    XEXP (XVECEXP (dest, 0, i), 0)),
   2410  1.1  mrg 			   insn);
   2411  1.1  mrg 
   2412  1.1  mrg       if (cslr_p && sched_deps_info->finish_lhs)
   2413  1.1  mrg 	sched_deps_info->finish_lhs ();
   2414  1.1  mrg 
   2415  1.1  mrg       if (code == SET)
   2416  1.1  mrg 	{
   2417  1.1  mrg 	  can_start_lhs_rhs_p = cslr_p;
   2418  1.1  mrg 
   2419  1.1  mrg 	  sched_analyze_2 (deps, SET_SRC (x), insn);
   2420  1.1  mrg 
   2421  1.1  mrg 	  can_start_lhs_rhs_p = false;
   2422  1.1  mrg 	}
   2423  1.1  mrg 
   2424  1.1  mrg       return;
   2425  1.1  mrg     }
   2426  1.1  mrg 
   2427  1.1  mrg   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
   2428  1.1  mrg 	 || GET_CODE (dest) == ZERO_EXTRACT)
   2429  1.1  mrg     {
   2430  1.1  mrg       if (GET_CODE (dest) == STRICT_LOW_PART
   2431  1.1  mrg 	 || GET_CODE (dest) == ZERO_EXTRACT
   2432  1.1  mrg 	 || read_modify_subreg_p (dest))
   2433  1.1  mrg         {
   2434  1.1  mrg 	  /* These both read and modify the result.  We must handle
   2435  1.1  mrg              them as writes to get proper dependencies for following
   2436  1.1  mrg              instructions.  We must handle them as reads to get proper
   2437  1.1  mrg              dependencies from this to previous instructions.
   2438  1.1  mrg              Thus we need to call sched_analyze_2.  */
   2439  1.1  mrg 
   2440  1.1  mrg 	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
   2441  1.1  mrg 	}
   2442  1.1  mrg       if (GET_CODE (dest) == ZERO_EXTRACT)
   2443  1.1  mrg 	{
   2444  1.1  mrg 	  /* The second and third arguments are values read by this insn.  */
   2445  1.1  mrg 	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
   2446  1.1  mrg 	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
   2447  1.1  mrg 	}
   2448  1.1  mrg       dest = XEXP (dest, 0);
   2449  1.1  mrg     }
   2450  1.1  mrg 
   2451  1.1  mrg   if (REG_P (dest))
   2452  1.1  mrg     {
   2453  1.1  mrg       int regno = REGNO (dest);
   2454  1.1  mrg       machine_mode mode = GET_MODE (dest);
   2455  1.1  mrg 
   2456  1.1  mrg       sched_analyze_reg (deps, regno, mode, code, insn);
   2457  1.1  mrg 
   2458  1.1  mrg #ifdef STACK_REGS
   2459  1.1  mrg       /* Treat all writes to a stack register as modifying the TOS.  */
   2460  1.1  mrg       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
   2461  1.1  mrg 	{
   2462  1.1  mrg 	  /* Avoid analyzing the same register twice.  */
   2463  1.1  mrg 	  if (regno != FIRST_STACK_REG)
   2464  1.1  mrg 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
   2465  1.1  mrg 
   2466  1.1  mrg 	  add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
   2467  1.1  mrg 			       FIRST_STACK_REG);
   2468  1.1  mrg 	}
   2469  1.1  mrg #endif
   2470  1.1  mrg     }
   2471  1.1  mrg   else if (MEM_P (dest))
   2472  1.1  mrg     {
   2473  1.1  mrg       /* Writing memory.  */
   2474  1.1  mrg       rtx t = dest;
   2475  1.1  mrg 
   2476  1.1  mrg       if (sched_deps_info->use_cselib)
   2477  1.1  mrg 	{
   2478  1.1  mrg 	  machine_mode address_mode = get_address_mode (dest);
   2479  1.1  mrg 
   2480  1.1  mrg 	  t = shallow_copy_rtx (dest);
   2481  1.1  mrg 	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
   2482  1.1  mrg 				   GET_MODE (t), insn);
   2483  1.1  mrg 	  XEXP (t, 0)
   2484  1.1  mrg 	    = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
   2485  1.1  mrg 						insn);
   2486  1.1  mrg 	}
   2487  1.1  mrg       t = canon_rtx (t);
   2488  1.1  mrg 
   2489  1.1  mrg       /* Pending lists can't get larger with a readonly context.  */
   2490  1.1  mrg       if (!deps->readonly
   2491  1.1  mrg           && ((deps->pending_read_list_length + deps->pending_write_list_length)
   2492  1.1  mrg 	      >= param_max_pending_list_length))
   2493  1.1  mrg 	{
   2494  1.1  mrg 	  /* Flush all pending reads and writes to prevent the pending lists
   2495  1.1  mrg 	     from getting any larger.  Insn scheduling runs too slowly when
   2496  1.1  mrg 	     these lists get long.  When compiling GCC with itself,
   2497  1.1  mrg 	     this flush occurs 8 times for sparc, and 10 times for m88k using
   2498  1.1  mrg 	     the default value of 32.  */
   2499  1.1  mrg 	  flush_pending_lists (deps, insn, false, true);
   2500  1.1  mrg 	}
   2501  1.1  mrg       else
   2502  1.1  mrg 	{
   2503  1.1  mrg 	  rtx_insn_list *pending;
   2504  1.1  mrg 	  rtx_expr_list *pending_mem;
   2505  1.1  mrg 
   2506  1.1  mrg 	  pending = deps->pending_read_insns;
   2507  1.1  mrg 	  pending_mem = deps->pending_read_mems;
   2508  1.1  mrg 	  while (pending)
   2509  1.1  mrg 	    {
   2510  1.1  mrg 	      if (anti_dependence (pending_mem->element (), t)
   2511  1.1  mrg 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
   2512  1.1  mrg 		note_mem_dep (t, pending_mem->element (), pending->insn (),
   2513  1.1  mrg 			      DEP_ANTI);
   2514  1.1  mrg 
   2515  1.1  mrg 	      pending = pending->next ();
   2516  1.1  mrg 	      pending_mem = pending_mem->next ();
   2517  1.1  mrg 	    }
   2518  1.1  mrg 
   2519  1.1  mrg 	  pending = deps->pending_write_insns;
   2520  1.1  mrg 	  pending_mem = deps->pending_write_mems;
   2521  1.1  mrg 	  while (pending)
   2522  1.1  mrg 	    {
   2523  1.1  mrg 	      if (output_dependence (pending_mem->element (), t)
   2524  1.1  mrg 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
   2525  1.1  mrg 		note_mem_dep (t, pending_mem->element (),
   2526  1.1  mrg 			      pending->insn (),
   2527  1.1  mrg 			      DEP_OUTPUT);
   2528  1.1  mrg 
   2529  1.1  mrg 	      pending = pending->next ();
   2530  1.1  mrg 	      pending_mem = pending_mem-> next ();
   2531  1.1  mrg 	    }
   2532  1.1  mrg 
   2533  1.1  mrg 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
   2534  1.1  mrg 			       REG_DEP_ANTI, true);
   2535  1.1  mrg 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
   2536  1.1  mrg 			       REG_DEP_CONTROL, true);
   2537  1.1  mrg 
   2538  1.1  mrg           if (!deps->readonly)
   2539  1.1  mrg             add_insn_mem_dependence (deps, false, insn, dest);
   2540  1.1  mrg 	}
   2541  1.1  mrg       sched_analyze_2 (deps, XEXP (dest, 0), insn);
   2542  1.1  mrg     }
   2543  1.1  mrg 
   2544  1.1  mrg   if (cslr_p && sched_deps_info->finish_lhs)
   2545  1.1  mrg     sched_deps_info->finish_lhs ();
   2546  1.1  mrg 
   2547  1.1  mrg   /* Analyze reads.  */
   2548  1.1  mrg   if (GET_CODE (x) == SET)
   2549  1.1  mrg     {
   2550  1.1  mrg       can_start_lhs_rhs_p = cslr_p;
   2551  1.1  mrg 
   2552  1.1  mrg       sched_analyze_2 (deps, SET_SRC (x), insn);
   2553  1.1  mrg 
   2554  1.1  mrg       can_start_lhs_rhs_p = false;
   2555  1.1  mrg     }
   2556  1.1  mrg }
   2557  1.1  mrg 
   2558  1.1  mrg /* Analyze the uses of memory and registers in rtx X in INSN.  */
   2559  1.1  mrg static void
   2560  1.1  mrg sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
   2561  1.1  mrg {
   2562  1.1  mrg   int i;
   2563  1.1  mrg   int j;
   2564  1.1  mrg   enum rtx_code code;
   2565  1.1  mrg   const char *fmt;
   2566  1.1  mrg   bool cslr_p = can_start_lhs_rhs_p;
   2567  1.1  mrg 
   2568  1.1  mrg   can_start_lhs_rhs_p = false;
   2569  1.1  mrg 
   2570  1.1  mrg   gcc_assert (x);
   2571  1.1  mrg   if (x == 0)
   2572  1.1  mrg     return;
   2573  1.1  mrg 
   2574  1.1  mrg   if (cslr_p && sched_deps_info->start_rhs)
   2575  1.1  mrg     sched_deps_info->start_rhs (x);
   2576  1.1  mrg 
   2577  1.1  mrg   code = GET_CODE (x);
   2578  1.1  mrg 
   2579  1.1  mrg   switch (code)
   2580  1.1  mrg     {
   2581  1.1  mrg     CASE_CONST_ANY:
   2582  1.1  mrg     case SYMBOL_REF:
   2583  1.1  mrg     case CONST:
   2584  1.1  mrg     case LABEL_REF:
   2585  1.1  mrg       /* Ignore constants.  */
   2586  1.1  mrg       if (cslr_p && sched_deps_info->finish_rhs)
   2587  1.1  mrg 	sched_deps_info->finish_rhs ();
   2588  1.1  mrg 
   2589  1.1  mrg       return;
   2590  1.1  mrg 
   2591  1.1  mrg     case REG:
   2592  1.1  mrg       {
   2593  1.1  mrg 	int regno = REGNO (x);
   2594  1.1  mrg 	machine_mode mode = GET_MODE (x);
   2595  1.1  mrg 
   2596  1.1  mrg 	sched_analyze_reg (deps, regno, mode, USE, insn);
   2597  1.1  mrg 
   2598  1.1  mrg #ifdef STACK_REGS
   2599  1.1  mrg       /* Treat all reads of a stack register as modifying the TOS.  */
   2600  1.1  mrg       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
   2601  1.1  mrg 	{
   2602  1.1  mrg 	  /* Avoid analyzing the same register twice.  */
   2603  1.1  mrg 	  if (regno != FIRST_STACK_REG)
   2604  1.1  mrg 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
   2605  1.1  mrg 	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
   2606  1.1  mrg 	}
   2607  1.1  mrg #endif
   2608  1.1  mrg 
   2609  1.1  mrg 	if (cslr_p && sched_deps_info->finish_rhs)
   2610  1.1  mrg 	  sched_deps_info->finish_rhs ();
   2611  1.1  mrg 
   2612  1.1  mrg 	return;
   2613  1.1  mrg       }
   2614  1.1  mrg 
   2615  1.1  mrg     case MEM:
   2616  1.1  mrg       {
   2617  1.1  mrg 	/* Reading memory.  */
   2618  1.1  mrg 	rtx_insn_list *u;
   2619  1.1  mrg 	rtx_insn_list *pending;
   2620  1.1  mrg 	rtx_expr_list *pending_mem;
   2621  1.1  mrg 	rtx t = x;
   2622  1.1  mrg 
   2623  1.1  mrg 	if (sched_deps_info->use_cselib)
   2624  1.1  mrg 	  {
   2625  1.1  mrg 	    machine_mode address_mode = get_address_mode (t);
   2626  1.1  mrg 
   2627  1.1  mrg 	    t = shallow_copy_rtx (t);
   2628  1.1  mrg 	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
   2629  1.1  mrg 				     GET_MODE (t), insn);
   2630  1.1  mrg 	    XEXP (t, 0)
   2631  1.1  mrg 	      = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
   2632  1.1  mrg 						  insn);
   2633  1.1  mrg 	  }
   2634  1.1  mrg 
   2635  1.1  mrg 	if (!DEBUG_INSN_P (insn))
   2636  1.1  mrg 	  {
   2637  1.1  mrg 	    t = canon_rtx (t);
   2638  1.1  mrg 	    pending = deps->pending_read_insns;
   2639  1.1  mrg 	    pending_mem = deps->pending_read_mems;
   2640  1.1  mrg 	    while (pending)
   2641  1.1  mrg 	      {
   2642  1.1  mrg 		if (read_dependence (pending_mem->element (), t)
   2643  1.1  mrg 		    && ! sched_insns_conditions_mutex_p (insn,
   2644  1.1  mrg 							 pending->insn ()))
   2645  1.1  mrg 		  note_mem_dep (t, pending_mem->element (),
   2646  1.1  mrg 				pending->insn (),
   2647  1.1  mrg 				DEP_ANTI);
   2648  1.1  mrg 
   2649  1.1  mrg 		pending = pending->next ();
   2650  1.1  mrg 		pending_mem = pending_mem->next ();
   2651  1.1  mrg 	      }
   2652  1.1  mrg 
   2653  1.1  mrg 	    pending = deps->pending_write_insns;
   2654  1.1  mrg 	    pending_mem = deps->pending_write_mems;
   2655  1.1  mrg 	    while (pending)
   2656  1.1  mrg 	      {
   2657  1.1  mrg 		if (true_dependence (pending_mem->element (), VOIDmode, t)
   2658  1.1  mrg 		    && ! sched_insns_conditions_mutex_p (insn,
   2659  1.1  mrg 							 pending->insn ()))
   2660  1.1  mrg 		  note_mem_dep (t, pending_mem->element (),
   2661  1.1  mrg 				pending->insn (),
   2662  1.1  mrg 				sched_deps_info->generate_spec_deps
   2663  1.1  mrg 				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
   2664  1.1  mrg 
   2665  1.1  mrg 		pending = pending->next ();
   2666  1.1  mrg 		pending_mem = pending_mem->next ();
   2667  1.1  mrg 	      }
   2668  1.1  mrg 
   2669  1.1  mrg 	    for (u = deps->last_pending_memory_flush; u; u = u->next ())
   2670  1.1  mrg 	      add_dependence (insn, u->insn (), REG_DEP_ANTI);
   2671  1.1  mrg 
   2672  1.1  mrg 	    for (u = deps->pending_jump_insns; u; u = u->next ())
   2673  1.1  mrg 	      if (deps_may_trap_p (x))
   2674  1.1  mrg 		{
   2675  1.1  mrg 		  if ((sched_deps_info->generate_spec_deps)
   2676  1.1  mrg 		      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
   2677  1.1  mrg 		    {
   2678  1.1  mrg 		      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
   2679  1.1  mrg 					      MAX_DEP_WEAK);
   2680  1.1  mrg 
   2681  1.1  mrg 		      note_dep (u->insn (), ds);
   2682  1.1  mrg 		    }
   2683  1.1  mrg 		  else
   2684  1.1  mrg 		    add_dependence (insn, u->insn (), REG_DEP_CONTROL);
   2685  1.1  mrg 		}
   2686  1.1  mrg 	  }
   2687  1.1  mrg 
   2688  1.1  mrg 	/* Always add these dependencies to pending_reads, since
   2689  1.1  mrg 	   this insn may be followed by a write.  */
   2690  1.1  mrg 	if (!deps->readonly)
   2691  1.1  mrg 	  {
   2692  1.1  mrg 	    if ((deps->pending_read_list_length
   2693  1.1  mrg 		 + deps->pending_write_list_length)
   2694  1.1  mrg 		>= param_max_pending_list_length
   2695  1.1  mrg 		&& !DEBUG_INSN_P (insn))
   2696  1.1  mrg 	      flush_pending_lists (deps, insn, true, true);
   2697  1.1  mrg 	    add_insn_mem_dependence (deps, true, insn, x);
   2698  1.1  mrg 	  }
   2699  1.1  mrg 
   2700  1.1  mrg 	sched_analyze_2 (deps, XEXP (x, 0), insn);
   2701  1.1  mrg 
   2702  1.1  mrg 	if (cslr_p && sched_deps_info->finish_rhs)
   2703  1.1  mrg 	  sched_deps_info->finish_rhs ();
   2704  1.1  mrg 
   2705  1.1  mrg 	return;
   2706  1.1  mrg       }
   2707  1.1  mrg 
   2708  1.1  mrg     /* Force pending stores to memory in case a trap handler needs them.
   2709  1.1  mrg        Also force pending loads from memory; loads and stores can segfault
   2710  1.1  mrg        and the signal handler won't be triggered if the trap insn was moved
   2711  1.1  mrg        above load or store insn.  */
   2712  1.1  mrg     case TRAP_IF:
   2713  1.1  mrg       flush_pending_lists (deps, insn, true, true);
   2714  1.1  mrg       break;
   2715  1.1  mrg 
   2716  1.1  mrg     case PREFETCH:
   2717  1.1  mrg       if (PREFETCH_SCHEDULE_BARRIER_P (x))
   2718  1.1  mrg 	reg_pending_barrier = TRUE_BARRIER;
   2719  1.1  mrg       /* Prefetch insn contains addresses only.  So if the prefetch
   2720  1.1  mrg 	 address has no registers, there will be no dependencies on
   2721  1.1  mrg 	 the prefetch insn.  This is wrong with result code
   2722  1.1  mrg 	 correctness point of view as such prefetch can be moved below
   2723  1.1  mrg 	 a jump insn which usually generates MOVE_BARRIER preventing
   2724  1.1  mrg 	 to move insns containing registers or memories through the
   2725  1.1  mrg 	 barrier.  It is also wrong with generated code performance
   2726  1.1  mrg 	 point of view as prefetch withouth dependecies will have a
   2727  1.1  mrg 	 tendency to be issued later instead of earlier.  It is hard
   2728  1.1  mrg 	 to generate accurate dependencies for prefetch insns as
   2729  1.1  mrg 	 prefetch has only the start address but it is better to have
   2730  1.1  mrg 	 something than nothing.  */
   2731  1.1  mrg       if (!deps->readonly)
   2732  1.1  mrg 	{
   2733  1.1  mrg 	  rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
   2734  1.1  mrg 	  if (sched_deps_info->use_cselib)
   2735  1.1  mrg 	    cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
   2736  1.1  mrg 	  add_insn_mem_dependence (deps, true, insn, x);
   2737  1.1  mrg 	}
   2738  1.1  mrg       break;
   2739  1.1  mrg 
   2740  1.1  mrg     case UNSPEC_VOLATILE:
   2741  1.1  mrg       flush_pending_lists (deps, insn, true, true);
   2742  1.1  mrg       /* FALLTHRU */
   2743  1.1  mrg 
   2744  1.1  mrg     case ASM_OPERANDS:
   2745  1.1  mrg     case ASM_INPUT:
   2746  1.1  mrg       {
   2747  1.1  mrg 	/* Traditional and volatile asm instructions must be considered to use
   2748  1.1  mrg 	   and clobber all hard registers, all pseudo-registers and all of
   2749  1.1  mrg 	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
   2750  1.1  mrg 
   2751  1.1  mrg 	   Consider for instance a volatile asm that changes the fpu rounding
   2752  1.1  mrg 	   mode.  An insn should not be moved across this even if it only uses
   2753  1.1  mrg 	   pseudo-regs because it might give an incorrectly rounded result.  */
   2754  1.1  mrg 	if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
   2755  1.1  mrg 	    && !DEBUG_INSN_P (insn))
   2756  1.1  mrg 	  reg_pending_barrier = TRUE_BARRIER;
   2757  1.1  mrg 
   2758  1.1  mrg 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
   2759  1.1  mrg 	   We cannot just fall through here since then we would be confused
   2760  1.1  mrg 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
   2761  1.1  mrg 	   traditional asms unlike their normal usage.  */
   2762  1.1  mrg 
   2763  1.1  mrg 	if (code == ASM_OPERANDS)
   2764  1.1  mrg 	  {
   2765  1.1  mrg 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
   2766  1.1  mrg 	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
   2767  1.1  mrg 
   2768  1.1  mrg 	    if (cslr_p && sched_deps_info->finish_rhs)
   2769  1.1  mrg 	      sched_deps_info->finish_rhs ();
   2770  1.1  mrg 
   2771  1.1  mrg 	    return;
   2772  1.1  mrg 	  }
   2773  1.1  mrg 	break;
   2774  1.1  mrg       }
   2775  1.1  mrg 
   2776  1.1  mrg     case PRE_DEC:
   2777  1.1  mrg     case POST_DEC:
   2778  1.1  mrg     case PRE_INC:
   2779  1.1  mrg     case POST_INC:
   2780  1.1  mrg       /* These both read and modify the result.  We must handle them as writes
   2781  1.1  mrg          to get proper dependencies for following instructions.  We must handle
   2782  1.1  mrg          them as reads to get proper dependencies from this to previous
   2783  1.1  mrg          instructions.  Thus we need to pass them to both sched_analyze_1
   2784  1.1  mrg          and sched_analyze_2.  We must call sched_analyze_2 first in order
   2785  1.1  mrg          to get the proper antecedent for the read.  */
   2786  1.1  mrg       sched_analyze_2 (deps, XEXP (x, 0), insn);
   2787  1.1  mrg       sched_analyze_1 (deps, x, insn);
   2788  1.1  mrg 
   2789  1.1  mrg       if (cslr_p && sched_deps_info->finish_rhs)
   2790  1.1  mrg 	sched_deps_info->finish_rhs ();
   2791  1.1  mrg 
   2792  1.1  mrg       return;
   2793  1.1  mrg 
   2794  1.1  mrg     case POST_MODIFY:
   2795  1.1  mrg     case PRE_MODIFY:
   2796  1.1  mrg       /* op0 = op0 + op1 */
   2797  1.1  mrg       sched_analyze_2 (deps, XEXP (x, 0), insn);
   2798  1.1  mrg       sched_analyze_2 (deps, XEXP (x, 1), insn);
   2799  1.1  mrg       sched_analyze_1 (deps, x, insn);
   2800  1.1  mrg 
   2801  1.1  mrg       if (cslr_p && sched_deps_info->finish_rhs)
   2802  1.1  mrg 	sched_deps_info->finish_rhs ();
   2803  1.1  mrg 
   2804  1.1  mrg       return;
   2805  1.1  mrg 
   2806  1.1  mrg     default:
   2807  1.1  mrg       break;
   2808  1.1  mrg     }
   2809  1.1  mrg 
   2810  1.1  mrg   /* Other cases: walk the insn.  */
   2811  1.1  mrg   fmt = GET_RTX_FORMAT (code);
   2812  1.1  mrg   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
   2813  1.1  mrg     {
   2814  1.1  mrg       if (fmt[i] == 'e')
   2815  1.1  mrg 	sched_analyze_2 (deps, XEXP (x, i), insn);
   2816  1.1  mrg       else if (fmt[i] == 'E')
   2817  1.1  mrg 	for (j = 0; j < XVECLEN (x, i); j++)
   2818  1.1  mrg 	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
   2819  1.1  mrg     }
   2820  1.1  mrg 
   2821  1.1  mrg   if (cslr_p && sched_deps_info->finish_rhs)
   2822  1.1  mrg     sched_deps_info->finish_rhs ();
   2823  1.1  mrg }
   2824  1.1  mrg 
   2825  1.1  mrg /* Try to group two fusible insns together to prevent scheduler
   2826  1.1  mrg    from scheduling them apart.  */
   2827  1.1  mrg 
   2828  1.1  mrg static void
   2829  1.1  mrg sched_macro_fuse_insns (rtx_insn *insn)
   2830  1.1  mrg {
   2831  1.1  mrg   rtx_insn *prev;
   2832  1.1  mrg   /* No target hook would return true for debug insn as any of the
   2833  1.1  mrg      hook operand, and with very large sequences of only debug insns
   2834  1.1  mrg      where on each we call sched_macro_fuse_insns it has quadratic
   2835  1.1  mrg      compile time complexity.  */
   2836  1.1  mrg   if (DEBUG_INSN_P (insn))
   2837  1.1  mrg     return;
   2838  1.1  mrg   prev = prev_nonnote_nondebug_insn (insn);
   2839  1.1  mrg   if (!prev)
   2840  1.1  mrg     return;
   2841  1.1  mrg 
   2842  1.1  mrg   if (any_condjump_p (insn))
   2843  1.1  mrg     {
   2844  1.1  mrg       unsigned int condreg1, condreg2;
   2845  1.1  mrg       rtx cc_reg_1;
   2846  1.1  mrg       if (targetm.fixed_condition_code_regs (&condreg1, &condreg2))
   2847  1.1  mrg 	{
   2848  1.1  mrg 	  cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
   2849  1.1  mrg 	  if (reg_referenced_p (cc_reg_1, PATTERN (insn))
   2850  1.1  mrg 	      && modified_in_p (cc_reg_1, prev))
   2851  1.1  mrg 	    {
   2852  1.1  mrg 	      if (targetm.sched.macro_fusion_pair_p (prev, insn))
   2853  1.1  mrg 		SCHED_GROUP_P (insn) = 1;
   2854  1.1  mrg 	      return;
   2855  1.1  mrg 	    }
   2856  1.1  mrg 	}
   2857  1.1  mrg     }
   2858  1.1  mrg 
   2859  1.1  mrg   if (single_set (insn) && single_set (prev))
   2860  1.1  mrg     {
   2861  1.1  mrg       if (targetm.sched.macro_fusion_pair_p (prev, insn))
   2862  1.1  mrg 	SCHED_GROUP_P (insn) = 1;
   2863  1.1  mrg     }
   2864  1.1  mrg }
   2865  1.1  mrg 
   2866  1.1  mrg /* Get the implicit reg pending clobbers for INSN and save them in TEMP.  */
   2867  1.1  mrg void
   2868  1.1  mrg get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
   2869  1.1  mrg {
   2870  1.1  mrg   extract_insn (insn);
   2871  1.1  mrg   preprocess_constraints (insn);
   2872  1.1  mrg   alternative_mask preferred = get_preferred_alternatives (insn);
   2873  1.1  mrg   ira_implicitly_set_insn_hard_regs (temp, preferred);
   2874  1.1  mrg   *temp &= ~ira_no_alloc_regs;
   2875  1.1  mrg }
   2876  1.1  mrg 
   2877  1.1  mrg /* Analyze an INSN with pattern X to find all dependencies.  */
   2878  1.1  mrg static void
   2879  1.1  mrg sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
   2880  1.1  mrg {
   2881  1.1  mrg   RTX_CODE code = GET_CODE (x);
   2882  1.1  mrg   rtx link;
   2883  1.1  mrg   unsigned i;
   2884  1.1  mrg   reg_set_iterator rsi;
   2885  1.1  mrg 
   2886  1.1  mrg   if (! reload_completed)
   2887  1.1  mrg     {
   2888  1.1  mrg       HARD_REG_SET temp;
   2889  1.1  mrg       get_implicit_reg_pending_clobbers (&temp, insn);
   2890  1.1  mrg       implicit_reg_pending_clobbers |= temp;
   2891  1.1  mrg     }
   2892  1.1  mrg 
   2893  1.1  mrg   can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
   2894  1.1  mrg 			 && code == SET);
   2895  1.1  mrg 
   2896  1.1  mrg   /* Group compare and branch insns for macro-fusion.  */
   2897  1.1  mrg   if (!deps->readonly
   2898  1.1  mrg       && targetm.sched.macro_fusion_p
   2899  1.1  mrg       && targetm.sched.macro_fusion_p ())
   2900  1.1  mrg     sched_macro_fuse_insns (insn);
   2901  1.1  mrg 
   2902  1.1  mrg   if (may_trap_p (x))
   2903  1.1  mrg     /* Avoid moving trapping instructions across function calls that might
   2904  1.1  mrg        not always return.  */
   2905  1.1  mrg     add_dependence_list (insn, deps->last_function_call_may_noreturn,
   2906  1.1  mrg 			 1, REG_DEP_ANTI, true);
   2907  1.1  mrg 
   2908  1.1  mrg   /* We must avoid creating a situation in which two successors of the
   2909  1.1  mrg      current block have different unwind info after scheduling.  If at any
   2910  1.1  mrg      point the two paths re-join this leads to incorrect unwind info.  */
   2911  1.1  mrg   /* ??? There are certain situations involving a forced frame pointer in
   2912  1.1  mrg      which, with extra effort, we could fix up the unwind info at a later
   2913  1.1  mrg      CFG join.  However, it seems better to notice these cases earlier
   2914  1.1  mrg      during prologue generation and avoid marking the frame pointer setup
   2915  1.1  mrg      as frame-related at all.  */
   2916  1.1  mrg   if (RTX_FRAME_RELATED_P (insn))
   2917  1.1  mrg     {
   2918  1.1  mrg       /* Make sure prologue insn is scheduled before next jump.  */
   2919  1.1  mrg       deps->sched_before_next_jump
   2920  1.1  mrg 	= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
   2921  1.1  mrg 
   2922  1.1  mrg       /* Make sure epilogue insn is scheduled after preceding jumps.  */
   2923  1.1  mrg       add_dependence_list (insn, deps->last_pending_memory_flush, 1,
   2924  1.1  mrg 			   REG_DEP_ANTI, true);
   2925  1.1  mrg       add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
   2926  1.1  mrg 			   true);
   2927  1.1  mrg     }
   2928  1.1  mrg 
   2929  1.1  mrg   if (code == COND_EXEC)
   2930  1.1  mrg     {
   2931  1.1  mrg       sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
   2932  1.1  mrg 
   2933  1.1  mrg       /* ??? Should be recording conditions so we reduce the number of
   2934  1.1  mrg 	 false dependencies.  */
   2935  1.1  mrg       x = COND_EXEC_CODE (x);
   2936  1.1  mrg       code = GET_CODE (x);
   2937  1.1  mrg     }
   2938  1.1  mrg   if (code == SET || code == CLOBBER)
   2939  1.1  mrg     {
   2940  1.1  mrg       sched_analyze_1 (deps, x, insn);
   2941  1.1  mrg 
   2942  1.1  mrg       /* Bare clobber insns are used for letting life analysis, reg-stack
   2943  1.1  mrg 	 and others know that a value is dead.  Depend on the last call
   2944  1.1  mrg 	 instruction so that reg-stack won't get confused.  */
   2945  1.1  mrg       if (code == CLOBBER)
   2946  1.1  mrg 	add_dependence_list (insn, deps->last_function_call, 1,
   2947  1.1  mrg 			     REG_DEP_OUTPUT, true);
   2948  1.1  mrg     }
   2949  1.1  mrg   else if (code == PARALLEL)
   2950  1.1  mrg     {
   2951  1.1  mrg       for (i = XVECLEN (x, 0); i--;)
   2952  1.1  mrg 	{
   2953  1.1  mrg 	  rtx sub = XVECEXP (x, 0, i);
   2954  1.1  mrg 	  code = GET_CODE (sub);
   2955  1.1  mrg 
   2956  1.1  mrg 	  if (code == COND_EXEC)
   2957  1.1  mrg 	    {
   2958  1.1  mrg 	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
   2959  1.1  mrg 	      sub = COND_EXEC_CODE (sub);
   2960  1.1  mrg 	      code = GET_CODE (sub);
   2961  1.1  mrg 	    }
   2962  1.1  mrg 	  else if (code == SET || code == CLOBBER)
   2963  1.1  mrg 	    sched_analyze_1 (deps, sub, insn);
   2964  1.1  mrg 	  else
   2965  1.1  mrg 	    sched_analyze_2 (deps, sub, insn);
   2966  1.1  mrg 	}
   2967  1.1  mrg     }
   2968  1.1  mrg   else
   2969  1.1  mrg     sched_analyze_2 (deps, x, insn);
   2970  1.1  mrg 
   2971  1.1  mrg   /* Mark registers CLOBBERED or used by called function.  */
   2972  1.1  mrg   if (CALL_P (insn))
   2973  1.1  mrg     {
   2974  1.1  mrg       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
   2975  1.1  mrg 	{
   2976  1.1  mrg 	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
   2977  1.1  mrg 	    sched_analyze_1 (deps, XEXP (link, 0), insn);
   2978  1.1  mrg 	  else if (GET_CODE (XEXP (link, 0)) != SET)
   2979  1.1  mrg 	    sched_analyze_2 (deps, XEXP (link, 0), insn);
   2980  1.1  mrg 	}
   2981  1.1  mrg       /* Don't schedule anything after a tail call, tail call needs
   2982  1.1  mrg 	 to use at least all call-saved registers.  */
   2983  1.1  mrg       if (SIBLING_CALL_P (insn))
   2984  1.1  mrg 	reg_pending_barrier = TRUE_BARRIER;
   2985  1.1  mrg       else if (find_reg_note (insn, REG_SETJMP, NULL))
   2986  1.1  mrg 	reg_pending_barrier = MOVE_BARRIER;
   2987  1.1  mrg     }
   2988  1.1  mrg 
   2989  1.1  mrg   if (JUMP_P (insn))
   2990  1.1  mrg     {
   2991  1.1  mrg       rtx_insn *next = next_nonnote_nondebug_insn (insn);
   2992  1.1  mrg       /* ??? For tablejumps, the barrier may appear not immediately after
   2993  1.1  mrg          the jump, but after a label and a jump_table_data insn.  */
   2994  1.1  mrg       if (next && LABEL_P (next) && NEXT_INSN (next)
   2995  1.1  mrg 	  && JUMP_TABLE_DATA_P (NEXT_INSN (next)))
   2996  1.1  mrg 	next = NEXT_INSN (NEXT_INSN (next));
   2997  1.1  mrg       if (next && BARRIER_P (next))
   2998  1.1  mrg 	reg_pending_barrier = MOVE_BARRIER;
   2999  1.1  mrg       else
   3000  1.1  mrg 	{
   3001  1.1  mrg 	  rtx_insn_list *pending;
   3002  1.1  mrg 	  rtx_expr_list *pending_mem;
   3003  1.1  mrg 
   3004  1.1  mrg           if (sched_deps_info->compute_jump_reg_dependencies)
   3005  1.1  mrg             {
   3006  1.1  mrg               (*sched_deps_info->compute_jump_reg_dependencies)
   3007  1.1  mrg 		(insn, reg_pending_control_uses);
   3008  1.1  mrg 
   3009  1.1  mrg               /* Make latency of jump equal to 0 by using anti-dependence.  */
   3010  1.1  mrg               EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
   3011  1.1  mrg                 {
   3012  1.1  mrg                   struct deps_reg *reg_last = &deps->reg_last[i];
   3013  1.1  mrg                   add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
   3014  1.1  mrg 				       false);
   3015  1.1  mrg                   add_dependence_list (insn, reg_last->implicit_sets,
   3016  1.1  mrg 				       0, REG_DEP_ANTI, false);
   3017  1.1  mrg                   add_dependence_list (insn, reg_last->clobbers, 0,
   3018  1.1  mrg 				       REG_DEP_ANTI, false);
   3019  1.1  mrg                 }
   3020  1.1  mrg             }
   3021  1.1  mrg 
   3022  1.1  mrg 	  /* All memory writes and volatile reads must happen before the
   3023  1.1  mrg 	     jump.  Non-volatile reads must happen before the jump iff
   3024  1.1  mrg 	     the result is needed by the above register used mask.  */
   3025  1.1  mrg 
   3026  1.1  mrg 	  pending = deps->pending_write_insns;
   3027  1.1  mrg 	  pending_mem = deps->pending_write_mems;
   3028  1.1  mrg 	  while (pending)
   3029  1.1  mrg 	    {
   3030  1.1  mrg 	      if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
   3031  1.1  mrg 		add_dependence (insn, pending->insn (),
   3032  1.1  mrg 				REG_DEP_OUTPUT);
   3033  1.1  mrg 	      pending = pending->next ();
   3034  1.1  mrg 	      pending_mem = pending_mem->next ();
   3035  1.1  mrg 	    }
   3036  1.1  mrg 
   3037  1.1  mrg 	  pending = deps->pending_read_insns;
   3038  1.1  mrg 	  pending_mem = deps->pending_read_mems;
   3039  1.1  mrg 	  while (pending)
   3040  1.1  mrg 	    {
   3041  1.1  mrg 	      if (MEM_VOLATILE_P (pending_mem->element ())
   3042  1.1  mrg 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
   3043  1.1  mrg 		add_dependence (insn, pending->insn (),
   3044  1.1  mrg 				REG_DEP_OUTPUT);
   3045  1.1  mrg 	      pending = pending->next ();
   3046  1.1  mrg 	      pending_mem = pending_mem->next ();
   3047  1.1  mrg 	    }
   3048  1.1  mrg 
   3049  1.1  mrg 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
   3050  1.1  mrg 			       REG_DEP_ANTI, true);
   3051  1.1  mrg 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
   3052  1.1  mrg 			       REG_DEP_ANTI, true);
   3053  1.1  mrg 	}
   3054  1.1  mrg     }
   3055  1.1  mrg 
   3056  1.1  mrg   /* If this instruction can throw an exception, then moving it changes
   3057  1.1  mrg      where block boundaries fall.  This is mighty confusing elsewhere.
   3058  1.1  mrg      Therefore, prevent such an instruction from being moved.  Same for
   3059  1.1  mrg      non-jump instructions that define block boundaries.
   3060  1.1  mrg      ??? Unclear whether this is still necessary in EBB mode.  If not,
   3061  1.1  mrg      add_branch_dependences should be adjusted for RGN mode instead.  */
   3062  1.1  mrg   if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
   3063  1.1  mrg       || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
   3064  1.1  mrg     reg_pending_barrier = MOVE_BARRIER;
   3065  1.1  mrg 
   3066  1.1  mrg   if (sched_pressure != SCHED_PRESSURE_NONE)
   3067  1.1  mrg     {
   3068  1.1  mrg       setup_insn_reg_uses (deps, insn);
   3069  1.1  mrg       init_insn_reg_pressure_info (insn);
   3070  1.1  mrg     }
   3071  1.1  mrg 
   3072  1.1  mrg   /* Add register dependencies for insn.  */
   3073  1.1  mrg   if (DEBUG_INSN_P (insn))
   3074  1.1  mrg     {
   3075  1.1  mrg       rtx_insn *prev = deps->last_debug_insn;
   3076  1.1  mrg       rtx_insn_list *u;
   3077  1.1  mrg 
   3078  1.1  mrg       if (!deps->readonly)
   3079  1.1  mrg 	deps->last_debug_insn = insn;
   3080  1.1  mrg 
   3081  1.1  mrg       if (prev)
   3082  1.1  mrg 	add_dependence (insn, prev, REG_DEP_ANTI);
   3083  1.1  mrg 
   3084  1.1  mrg       add_dependence_list (insn, deps->last_function_call, 1,
   3085  1.1  mrg 			   REG_DEP_ANTI, false);
   3086  1.1  mrg 
   3087  1.1  mrg       if (!sel_sched_p ())
   3088  1.1  mrg 	for (u = deps->last_pending_memory_flush; u; u = u->next ())
   3089  1.1  mrg 	  add_dependence (insn, u->insn (), REG_DEP_ANTI);
   3090  1.1  mrg 
   3091  1.1  mrg       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
   3092  1.1  mrg 	{
   3093  1.1  mrg 	  struct deps_reg *reg_last = &deps->reg_last[i];
   3094  1.1  mrg 	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
   3095  1.1  mrg 	  /* There's no point in making REG_DEP_CONTROL dependencies for
   3096  1.1  mrg 	     debug insns.  */
   3097  1.1  mrg 	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
   3098  1.1  mrg 			       false);
   3099  1.1  mrg 
   3100  1.1  mrg 	  if (!deps->readonly)
   3101  1.1  mrg 	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
   3102  1.1  mrg 	}
   3103  1.1  mrg       CLEAR_REG_SET (reg_pending_uses);
   3104  1.1  mrg 
   3105  1.1  mrg       /* Quite often, a debug insn will refer to stuff in the
   3106  1.1  mrg 	 previous instruction, but the reason we want this
   3107  1.1  mrg 	 dependency here is to make sure the scheduler doesn't
   3108  1.1  mrg 	 gratuitously move a debug insn ahead.  This could dirty
   3109  1.1  mrg 	 DF flags and cause additional analysis that wouldn't have
   3110  1.1  mrg 	 occurred in compilation without debug insns, and such
   3111  1.1  mrg 	 additional analysis can modify the generated code.  */
   3112  1.1  mrg       prev = PREV_INSN (insn);
   3113  1.1  mrg 
   3114  1.1  mrg       if (prev && NONDEBUG_INSN_P (prev))
   3115  1.1  mrg 	add_dependence (insn, prev, REG_DEP_ANTI);
   3116  1.1  mrg     }
   3117  1.1  mrg   else
   3118  1.1  mrg     {
   3119  1.1  mrg       regset_head set_or_clobbered;
   3120  1.1  mrg 
   3121  1.1  mrg       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
   3122  1.1  mrg 	{
   3123  1.1  mrg 	  struct deps_reg *reg_last = &deps->reg_last[i];
   3124  1.1  mrg 	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
   3125  1.1  mrg 	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
   3126  1.1  mrg 			       false);
   3127  1.1  mrg 	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
   3128  1.1  mrg 			       false);
   3129  1.1  mrg 
   3130  1.1  mrg 	  if (!deps->readonly)
   3131  1.1  mrg 	    {
   3132  1.1  mrg 	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
   3133  1.1  mrg 	      reg_last->uses_length++;
   3134  1.1  mrg 	    }
   3135  1.1  mrg 	}
   3136  1.1  mrg 
   3137  1.1  mrg       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
   3138  1.1  mrg 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
   3139  1.1  mrg 	  {
   3140  1.1  mrg 	    struct deps_reg *reg_last = &deps->reg_last[i];
   3141  1.1  mrg 	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
   3142  1.1  mrg 	    add_dependence_list (insn, reg_last->implicit_sets, 0,
   3143  1.1  mrg 				 REG_DEP_ANTI, false);
   3144  1.1  mrg 	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
   3145  1.1  mrg 				 false);
   3146  1.1  mrg 
   3147  1.1  mrg 	    if (!deps->readonly)
   3148  1.1  mrg 	      {
   3149  1.1  mrg 		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
   3150  1.1  mrg 		reg_last->uses_length++;
   3151  1.1  mrg 	      }
   3152  1.1  mrg 	  }
   3153  1.1  mrg 
   3154  1.1  mrg       if (targetm.sched.exposed_pipeline)
   3155  1.1  mrg 	{
   3156  1.1  mrg 	  INIT_REG_SET (&set_or_clobbered);
   3157  1.1  mrg 	  bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
   3158  1.1  mrg 		      reg_pending_sets);
   3159  1.1  mrg 	  EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
   3160  1.1  mrg 	    {
   3161  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[i];
   3162  1.1  mrg 	      rtx list;
   3163  1.1  mrg 	      for (list = reg_last->uses; list; list = XEXP (list, 1))
   3164  1.1  mrg 		{
   3165  1.1  mrg 		  rtx other = XEXP (list, 0);
   3166  1.1  mrg 		  if (INSN_CACHED_COND (other) != const_true_rtx
   3167  1.1  mrg 		      && refers_to_regno_p (i, INSN_CACHED_COND (other)))
   3168  1.1  mrg 		    INSN_CACHED_COND (other) = const_true_rtx;
   3169  1.1  mrg 		}
   3170  1.1  mrg 	    }
   3171  1.1  mrg 	}
   3172  1.1  mrg 
   3173  1.1  mrg       /* If the current insn is conditional, we can't free any
   3174  1.1  mrg 	 of the lists.  */
   3175  1.1  mrg       if (sched_has_condition_p (insn))
   3176  1.1  mrg 	{
   3177  1.1  mrg 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
   3178  1.1  mrg 	    {
   3179  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[i];
   3180  1.1  mrg 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
   3181  1.1  mrg 				   false);
   3182  1.1  mrg 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
   3183  1.1  mrg 				   REG_DEP_ANTI, false);
   3184  1.1  mrg 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
   3185  1.1  mrg 				   false);
   3186  1.1  mrg 	      add_dependence_list (insn, reg_last->control_uses, 0,
   3187  1.1  mrg 				   REG_DEP_CONTROL, false);
   3188  1.1  mrg 
   3189  1.1  mrg 	      if (!deps->readonly)
   3190  1.1  mrg 		{
   3191  1.1  mrg 		  reg_last->clobbers
   3192  1.1  mrg 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
   3193  1.1  mrg 		  reg_last->clobbers_length++;
   3194  1.1  mrg 		}
   3195  1.1  mrg 	    }
   3196  1.1  mrg 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
   3197  1.1  mrg 	    {
   3198  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[i];
   3199  1.1  mrg 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
   3200  1.1  mrg 				   false);
   3201  1.1  mrg 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
   3202  1.1  mrg 				   REG_DEP_ANTI, false);
   3203  1.1  mrg 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
   3204  1.1  mrg 				   false);
   3205  1.1  mrg 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
   3206  1.1  mrg 				   false);
   3207  1.1  mrg 	      add_dependence_list (insn, reg_last->control_uses, 0,
   3208  1.1  mrg 				   REG_DEP_CONTROL, false);
   3209  1.1  mrg 
   3210  1.1  mrg 	      if (!deps->readonly)
   3211  1.1  mrg 		reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
   3212  1.1  mrg 	    }
   3213  1.1  mrg 	}
   3214  1.1  mrg       else
   3215  1.1  mrg 	{
   3216  1.1  mrg 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
   3217  1.1  mrg 	    {
   3218  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[i];
   3219  1.1  mrg 	      if (reg_last->uses_length >= param_max_pending_list_length
   3220  1.1  mrg 		  || reg_last->clobbers_length >= param_max_pending_list_length)
   3221  1.1  mrg 		{
   3222  1.1  mrg 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
   3223  1.1  mrg 						REG_DEP_OUTPUT, false);
   3224  1.1  mrg 		  add_dependence_list_and_free (deps, insn,
   3225  1.1  mrg 						&reg_last->implicit_sets, 0,
   3226  1.1  mrg 						REG_DEP_ANTI, false);
   3227  1.1  mrg 		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
   3228  1.1  mrg 						REG_DEP_ANTI, false);
   3229  1.1  mrg 		  add_dependence_list_and_free (deps, insn,
   3230  1.1  mrg 						&reg_last->control_uses, 0,
   3231  1.1  mrg 						REG_DEP_ANTI, false);
   3232  1.1  mrg 		  add_dependence_list_and_free (deps, insn,
   3233  1.1  mrg 						&reg_last->clobbers, 0,
   3234  1.1  mrg 						REG_DEP_OUTPUT, false);
   3235  1.1  mrg 
   3236  1.1  mrg 		  if (!deps->readonly)
   3237  1.1  mrg 		    {
   3238  1.1  mrg 		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
   3239  1.1  mrg 		      reg_last->clobbers_length = 0;
   3240  1.1  mrg 		      reg_last->uses_length = 0;
   3241  1.1  mrg 		    }
   3242  1.1  mrg 		}
   3243  1.1  mrg 	      else
   3244  1.1  mrg 		{
   3245  1.1  mrg 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
   3246  1.1  mrg 				       false);
   3247  1.1  mrg 		  add_dependence_list (insn, reg_last->implicit_sets, 0,
   3248  1.1  mrg 				       REG_DEP_ANTI, false);
   3249  1.1  mrg 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
   3250  1.1  mrg 				       false);
   3251  1.1  mrg 		  add_dependence_list (insn, reg_last->control_uses, 0,
   3252  1.1  mrg 				       REG_DEP_CONTROL, false);
   3253  1.1  mrg 		}
   3254  1.1  mrg 
   3255  1.1  mrg 	      if (!deps->readonly)
   3256  1.1  mrg 		{
   3257  1.1  mrg 		  reg_last->clobbers_length++;
   3258  1.1  mrg 		  reg_last->clobbers
   3259  1.1  mrg 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
   3260  1.1  mrg 		}
   3261  1.1  mrg 	    }
   3262  1.1  mrg 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
   3263  1.1  mrg 	    {
   3264  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[i];
   3265  1.1  mrg 
   3266  1.1  mrg 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
   3267  1.1  mrg 					    REG_DEP_OUTPUT, false);
   3268  1.1  mrg 	      add_dependence_list_and_free (deps, insn,
   3269  1.1  mrg 					    &reg_last->implicit_sets,
   3270  1.1  mrg 					    0, REG_DEP_ANTI, false);
   3271  1.1  mrg 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
   3272  1.1  mrg 					    REG_DEP_OUTPUT, false);
   3273  1.1  mrg 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
   3274  1.1  mrg 					    REG_DEP_ANTI, false);
   3275  1.1  mrg 	      add_dependence_list (insn, reg_last->control_uses, 0,
   3276  1.1  mrg 				   REG_DEP_CONTROL, false);
   3277  1.1  mrg 
   3278  1.1  mrg 	      if (!deps->readonly)
   3279  1.1  mrg 		{
   3280  1.1  mrg 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
   3281  1.1  mrg 		  reg_last->uses_length = 0;
   3282  1.1  mrg 		  reg_last->clobbers_length = 0;
   3283  1.1  mrg 		}
   3284  1.1  mrg 	    }
   3285  1.1  mrg 	}
   3286  1.1  mrg       if (!deps->readonly)
   3287  1.1  mrg 	{
   3288  1.1  mrg 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
   3289  1.1  mrg 	    {
   3290  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[i];
   3291  1.1  mrg 	      reg_last->control_uses
   3292  1.1  mrg 		= alloc_INSN_LIST (insn, reg_last->control_uses);
   3293  1.1  mrg 	    }
   3294  1.1  mrg 	}
   3295  1.1  mrg     }
   3296  1.1  mrg 
   3297  1.1  mrg   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
   3298  1.1  mrg     if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
   3299  1.1  mrg       {
   3300  1.1  mrg 	struct deps_reg *reg_last = &deps->reg_last[i];
   3301  1.1  mrg 	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
   3302  1.1  mrg 	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
   3303  1.1  mrg 	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
   3304  1.1  mrg 	add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
   3305  1.1  mrg 			     false);
   3306  1.1  mrg 
   3307  1.1  mrg 	if (!deps->readonly)
   3308  1.1  mrg 	  reg_last->implicit_sets
   3309  1.1  mrg 	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
   3310  1.1  mrg       }
   3311  1.1  mrg 
   3312  1.1  mrg   if (!deps->readonly)
   3313  1.1  mrg     {
   3314  1.1  mrg       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
   3315  1.1  mrg       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
   3316  1.1  mrg       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
   3317  1.1  mrg       IOR_REG_SET_HRS (&deps->reg_last_in_use,
   3318  1.1  mrg 		       implicit_reg_pending_uses
   3319  1.1  mrg 		       | implicit_reg_pending_clobbers);
   3320  1.1  mrg 
   3321  1.1  mrg       /* Set up the pending barrier found.  */
   3322  1.1  mrg       deps->last_reg_pending_barrier = reg_pending_barrier;
   3323  1.1  mrg     }
   3324  1.1  mrg 
   3325  1.1  mrg   CLEAR_REG_SET (reg_pending_uses);
   3326  1.1  mrg   CLEAR_REG_SET (reg_pending_clobbers);
   3327  1.1  mrg   CLEAR_REG_SET (reg_pending_sets);
   3328  1.1  mrg   CLEAR_REG_SET (reg_pending_control_uses);
   3329  1.1  mrg   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
   3330  1.1  mrg   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
   3331  1.1  mrg 
   3332  1.1  mrg   /* Add dependencies if a scheduling barrier was found.  */
   3333  1.1  mrg   if (reg_pending_barrier)
   3334  1.1  mrg     {
   3335  1.1  mrg       /* In the case of barrier the most added dependencies are not
   3336  1.1  mrg          real, so we use anti-dependence here.  */
   3337  1.1  mrg       if (sched_has_condition_p (insn))
   3338  1.1  mrg 	{
   3339  1.1  mrg 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
   3340  1.1  mrg 	    {
   3341  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[i];
   3342  1.1  mrg 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
   3343  1.1  mrg 				   true);
   3344  1.1  mrg 	      add_dependence_list (insn, reg_last->sets, 0,
   3345  1.1  mrg 				   reg_pending_barrier == TRUE_BARRIER
   3346  1.1  mrg 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
   3347  1.1  mrg 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
   3348  1.1  mrg 				   REG_DEP_ANTI, true);
   3349  1.1  mrg 	      add_dependence_list (insn, reg_last->clobbers, 0,
   3350  1.1  mrg 				   reg_pending_barrier == TRUE_BARRIER
   3351  1.1  mrg 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
   3352  1.1  mrg 	    }
   3353  1.1  mrg 	}
   3354  1.1  mrg       else
   3355  1.1  mrg 	{
   3356  1.1  mrg 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
   3357  1.1  mrg 	    {
   3358  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[i];
   3359  1.1  mrg 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
   3360  1.1  mrg 					    REG_DEP_ANTI, true);
   3361  1.1  mrg 	      add_dependence_list_and_free (deps, insn,
   3362  1.1  mrg 					    &reg_last->control_uses, 0,
   3363  1.1  mrg 					    REG_DEP_CONTROL, true);
   3364  1.1  mrg 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
   3365  1.1  mrg 					    reg_pending_barrier == TRUE_BARRIER
   3366  1.1  mrg 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
   3367  1.1  mrg 					    true);
   3368  1.1  mrg 	      add_dependence_list_and_free (deps, insn,
   3369  1.1  mrg 					    &reg_last->implicit_sets, 0,
   3370  1.1  mrg 					    REG_DEP_ANTI, true);
   3371  1.1  mrg 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
   3372  1.1  mrg 					    reg_pending_barrier == TRUE_BARRIER
   3373  1.1  mrg 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
   3374  1.1  mrg 					    true);
   3375  1.1  mrg 
   3376  1.1  mrg               if (!deps->readonly)
   3377  1.1  mrg                 {
   3378  1.1  mrg                   reg_last->uses_length = 0;
   3379  1.1  mrg                   reg_last->clobbers_length = 0;
   3380  1.1  mrg                 }
   3381  1.1  mrg 	    }
   3382  1.1  mrg 	}
   3383  1.1  mrg 
   3384  1.1  mrg       if (!deps->readonly)
   3385  1.1  mrg         for (i = 0; i < (unsigned)deps->max_reg; i++)
   3386  1.1  mrg           {
   3387  1.1  mrg             struct deps_reg *reg_last = &deps->reg_last[i];
   3388  1.1  mrg             reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
   3389  1.1  mrg             SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
   3390  1.1  mrg           }
   3391  1.1  mrg 
   3392  1.1  mrg       /* Don't flush pending lists on speculative checks for
   3393  1.1  mrg 	 selective scheduling.  */
   3394  1.1  mrg       if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
   3395  1.1  mrg 	flush_pending_lists (deps, insn, true, true);
   3396  1.1  mrg 
   3397  1.1  mrg       reg_pending_barrier = NOT_A_BARRIER;
   3398  1.1  mrg     }
   3399  1.1  mrg 
   3400  1.1  mrg   /* If a post-call group is still open, see if it should remain so.
   3401  1.1  mrg      This insn must be a simple move of a hard reg to a pseudo or
   3402  1.1  mrg      vice-versa.
   3403  1.1  mrg 
   3404  1.1  mrg      We must avoid moving these insns for correctness on targets
   3405  1.1  mrg      with small register classes, and for special registers like
   3406  1.1  mrg      PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
   3407  1.1  mrg      hard regs for all targets.  */
   3408  1.1  mrg 
   3409  1.1  mrg   if (deps->in_post_call_group_p)
   3410  1.1  mrg     {
   3411  1.1  mrg       rtx tmp, set = single_set (insn);
   3412  1.1  mrg       int src_regno, dest_regno;
   3413  1.1  mrg 
   3414  1.1  mrg       if (set == NULL)
   3415  1.1  mrg 	{
   3416  1.1  mrg 	  if (DEBUG_INSN_P (insn))
   3417  1.1  mrg 	    /* We don't want to mark debug insns as part of the same
   3418  1.1  mrg 	       sched group.  We know they really aren't, but if we use
   3419  1.1  mrg 	       debug insns to tell that a call group is over, we'll
   3420  1.1  mrg 	       get different code if debug insns are not there and
   3421  1.1  mrg 	       instructions that follow seem like they should be part
   3422  1.1  mrg 	       of the call group.
   3423  1.1  mrg 
   3424  1.1  mrg 	       Also, if we did, chain_to_prev_insn would move the
   3425  1.1  mrg 	       deps of the debug insn to the call insn, modifying
   3426  1.1  mrg 	       non-debug post-dependency counts of the debug insn
   3427  1.1  mrg 	       dependencies and otherwise messing with the scheduling
   3428  1.1  mrg 	       order.
   3429  1.1  mrg 
   3430  1.1  mrg 	       Instead, let such debug insns be scheduled freely, but
   3431  1.1  mrg 	       keep the call group open in case there are insns that
   3432  1.1  mrg 	       should be part of it afterwards.  Since we grant debug
   3433  1.1  mrg 	       insns higher priority than even sched group insns, it
   3434  1.1  mrg 	       will all turn out all right.  */
   3435  1.1  mrg 	    goto debug_dont_end_call_group;
   3436  1.1  mrg 	  else
   3437  1.1  mrg 	    goto end_call_group;
   3438  1.1  mrg 	}
   3439  1.1  mrg 
   3440  1.1  mrg       tmp = SET_DEST (set);
   3441  1.1  mrg       if (GET_CODE (tmp) == SUBREG)
   3442  1.1  mrg 	tmp = SUBREG_REG (tmp);
   3443  1.1  mrg       if (REG_P (tmp))
   3444  1.1  mrg 	dest_regno = REGNO (tmp);
   3445  1.1  mrg       else
   3446  1.1  mrg 	goto end_call_group;
   3447  1.1  mrg 
   3448  1.1  mrg       tmp = SET_SRC (set);
   3449  1.1  mrg       if (GET_CODE (tmp) == SUBREG)
   3450  1.1  mrg 	tmp = SUBREG_REG (tmp);
   3451  1.1  mrg       if ((GET_CODE (tmp) == PLUS
   3452  1.1  mrg 	   || GET_CODE (tmp) == MINUS)
   3453  1.1  mrg 	  && REG_P (XEXP (tmp, 0))
   3454  1.1  mrg 	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
   3455  1.1  mrg 	  && dest_regno == STACK_POINTER_REGNUM)
   3456  1.1  mrg 	src_regno = STACK_POINTER_REGNUM;
   3457  1.1  mrg       else if (REG_P (tmp))
   3458  1.1  mrg 	src_regno = REGNO (tmp);
   3459  1.1  mrg       else
   3460  1.1  mrg 	goto end_call_group;
   3461  1.1  mrg 
   3462  1.1  mrg       if (src_regno < FIRST_PSEUDO_REGISTER
   3463  1.1  mrg 	  || dest_regno < FIRST_PSEUDO_REGISTER)
   3464  1.1  mrg 	{
   3465  1.1  mrg 	  if (!deps->readonly
   3466  1.1  mrg               && deps->in_post_call_group_p == post_call_initial)
   3467  1.1  mrg 	    deps->in_post_call_group_p = post_call;
   3468  1.1  mrg 
   3469  1.1  mrg           if (!sel_sched_p () || sched_emulate_haifa_p)
   3470  1.1  mrg             {
   3471  1.1  mrg               SCHED_GROUP_P (insn) = 1;
   3472  1.1  mrg               CANT_MOVE (insn) = 1;
   3473  1.1  mrg             }
   3474  1.1  mrg 	}
   3475  1.1  mrg       else
   3476  1.1  mrg 	{
   3477  1.1  mrg 	end_call_group:
   3478  1.1  mrg           if (!deps->readonly)
   3479  1.1  mrg             deps->in_post_call_group_p = not_post_call;
   3480  1.1  mrg 	}
   3481  1.1  mrg     }
   3482  1.1  mrg 
   3483  1.1  mrg  debug_dont_end_call_group:
   3484  1.1  mrg   if ((current_sched_info->flags & DO_SPECULATION)
   3485  1.1  mrg       && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
   3486  1.1  mrg     /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
   3487  1.1  mrg        be speculated.  */
   3488  1.1  mrg     {
   3489  1.1  mrg       if (sel_sched_p ())
   3490  1.1  mrg         sel_mark_hard_insn (insn);
   3491  1.1  mrg       else
   3492  1.1  mrg         {
   3493  1.1  mrg           sd_iterator_def sd_it;
   3494  1.1  mrg           dep_t dep;
   3495  1.1  mrg 
   3496  1.1  mrg           for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
   3497  1.1  mrg                sd_iterator_cond (&sd_it, &dep);)
   3498  1.1  mrg             change_spec_dep_to_hard (sd_it);
   3499  1.1  mrg         }
   3500  1.1  mrg     }
   3501  1.1  mrg 
   3502  1.1  mrg   /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
   3503  1.1  mrg      honor their original ordering.  */
   3504  1.1  mrg   if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
   3505  1.1  mrg     {
   3506  1.1  mrg       if (deps->last_args_size)
   3507  1.1  mrg 	add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
   3508  1.1  mrg       if (!deps->readonly)
   3509  1.1  mrg 	deps->last_args_size = insn;
   3510  1.1  mrg     }
   3511  1.1  mrg 
   3512  1.1  mrg   /* We must not mix prologue and epilogue insns.  See PR78029.  */
   3513  1.1  mrg   if (prologue_contains (insn))
   3514  1.1  mrg     {
   3515  1.1  mrg       add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
   3516  1.1  mrg       if (!deps->readonly)
   3517  1.1  mrg 	{
   3518  1.1  mrg 	  if (deps->last_logue_was_epilogue)
   3519  1.1  mrg 	    free_INSN_LIST_list (&deps->last_prologue);
   3520  1.1  mrg 	  deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
   3521  1.1  mrg 	  deps->last_logue_was_epilogue = false;
   3522  1.1  mrg 	}
   3523  1.1  mrg     }
   3524  1.1  mrg 
   3525  1.1  mrg   if (epilogue_contains (insn))
   3526  1.1  mrg     {
   3527  1.1  mrg       add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
   3528  1.1  mrg       if (!deps->readonly)
   3529  1.1  mrg 	{
   3530  1.1  mrg 	  if (!deps->last_logue_was_epilogue)
   3531  1.1  mrg 	    free_INSN_LIST_list (&deps->last_epilogue);
   3532  1.1  mrg 	  deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
   3533  1.1  mrg 	  deps->last_logue_was_epilogue = true;
   3534  1.1  mrg 	}
   3535  1.1  mrg     }
   3536  1.1  mrg }
   3537  1.1  mrg 
   3538  1.1  mrg /* Return TRUE if INSN might not always return normally (e.g. call exit,
   3539  1.1  mrg    longjmp, loop forever, ...).  */
   3540  1.1  mrg /* FIXME: Why can't this function just use flags_from_decl_or_type and
   3541  1.1  mrg    test for ECF_NORETURN?  */
   3542  1.1  mrg static bool
   3543  1.1  mrg call_may_noreturn_p (rtx_insn *insn)
   3544  1.1  mrg {
   3545  1.1  mrg   rtx call;
   3546  1.1  mrg 
   3547  1.1  mrg   /* const or pure calls that aren't looping will always return.  */
   3548  1.1  mrg   if (RTL_CONST_OR_PURE_CALL_P (insn)
   3549  1.1  mrg       && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
   3550  1.1  mrg     return false;
   3551  1.1  mrg 
   3552  1.1  mrg   call = get_call_rtx_from (insn);
   3553  1.1  mrg   if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
   3554  1.1  mrg     {
   3555  1.1  mrg       rtx symbol = XEXP (XEXP (call, 0), 0);
   3556  1.1  mrg       if (SYMBOL_REF_DECL (symbol)
   3557  1.1  mrg 	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
   3558  1.1  mrg 	{
   3559  1.1  mrg 	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
   3560  1.1  mrg 	      == BUILT_IN_NORMAL)
   3561  1.1  mrg 	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
   3562  1.1  mrg 	      {
   3563  1.1  mrg 	      case BUILT_IN_BCMP:
   3564  1.1  mrg 	      case BUILT_IN_BCOPY:
   3565  1.1  mrg 	      case BUILT_IN_BZERO:
   3566  1.1  mrg 	      case BUILT_IN_INDEX:
   3567  1.1  mrg 	      case BUILT_IN_MEMCHR:
   3568  1.1  mrg 	      case BUILT_IN_MEMCMP:
   3569  1.1  mrg 	      case BUILT_IN_MEMCPY:
   3570  1.1  mrg 	      case BUILT_IN_MEMMOVE:
   3571  1.1  mrg 	      case BUILT_IN_MEMPCPY:
   3572  1.1  mrg 	      case BUILT_IN_MEMSET:
   3573  1.1  mrg 	      case BUILT_IN_RINDEX:
   3574  1.1  mrg 	      case BUILT_IN_STPCPY:
   3575  1.1  mrg 	      case BUILT_IN_STPNCPY:
   3576  1.1  mrg 	      case BUILT_IN_STRCAT:
   3577  1.1  mrg 	      case BUILT_IN_STRCHR:
   3578  1.1  mrg 	      case BUILT_IN_STRCMP:
   3579  1.1  mrg 	      case BUILT_IN_STRCPY:
   3580  1.1  mrg 	      case BUILT_IN_STRCSPN:
   3581  1.1  mrg 	      case BUILT_IN_STRLEN:
   3582  1.1  mrg 	      case BUILT_IN_STRNCAT:
   3583  1.1  mrg 	      case BUILT_IN_STRNCMP:
   3584  1.1  mrg 	      case BUILT_IN_STRNCPY:
   3585  1.1  mrg 	      case BUILT_IN_STRPBRK:
   3586  1.1  mrg 	      case BUILT_IN_STRRCHR:
   3587  1.1  mrg 	      case BUILT_IN_STRSPN:
   3588  1.1  mrg 	      case BUILT_IN_STRSTR:
   3589  1.1  mrg 		/* Assume certain string/memory builtins always return.  */
   3590  1.1  mrg 		return false;
   3591  1.1  mrg 	      default:
   3592  1.1  mrg 		break;
   3593  1.1  mrg 	      }
   3594  1.1  mrg 	}
   3595  1.1  mrg     }
   3596  1.1  mrg 
   3597  1.1  mrg   /* For all other calls assume that they might not always return.  */
   3598  1.1  mrg   return true;
   3599  1.1  mrg }
   3600  1.1  mrg 
   3601  1.1  mrg /* Return true if INSN should be made dependent on the previous instruction
   3602  1.1  mrg    group, and if all INSN's dependencies should be moved to the first
   3603  1.1  mrg    instruction of that group.  */
   3604  1.1  mrg 
   3605  1.1  mrg static bool
   3606  1.1  mrg chain_to_prev_insn_p (rtx_insn *insn)
   3607  1.1  mrg {
   3608  1.1  mrg   /* INSN forms a group with the previous instruction.  */
   3609  1.1  mrg   if (SCHED_GROUP_P (insn))
   3610  1.1  mrg     return true;
   3611  1.1  mrg 
   3612  1.1  mrg   /* If the previous instruction clobbers a register R and this one sets
   3613  1.1  mrg      part of R, the clobber was added specifically to help us track the
   3614  1.1  mrg      liveness of R.  There's no point scheduling the clobber and leaving
   3615  1.1  mrg      INSN behind, especially if we move the clobber to another block.  */
   3616  1.1  mrg   rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
   3617  1.1  mrg   if (prev
   3618  1.1  mrg       && INSN_P (prev)
   3619  1.1  mrg       && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
   3620  1.1  mrg       && GET_CODE (PATTERN (prev)) == CLOBBER)
   3621  1.1  mrg     {
   3622  1.1  mrg       rtx x = XEXP (PATTERN (prev), 0);
   3623  1.1  mrg       if (set_of (x, insn))
   3624  1.1  mrg 	return true;
   3625  1.1  mrg     }
   3626  1.1  mrg 
   3627  1.1  mrg   return false;
   3628  1.1  mrg }
   3629  1.1  mrg 
   3630  1.1  mrg /* Analyze INSN with DEPS as a context.  */
   3631  1.1  mrg void
   3632  1.1  mrg deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
   3633  1.1  mrg {
   3634  1.1  mrg   if (sched_deps_info->start_insn)
   3635  1.1  mrg     sched_deps_info->start_insn (insn);
   3636  1.1  mrg 
   3637  1.1  mrg   /* Record the condition for this insn.  */
   3638  1.1  mrg   if (NONDEBUG_INSN_P (insn))
   3639  1.1  mrg     {
   3640  1.1  mrg       rtx t;
   3641  1.1  mrg       sched_get_condition_with_rev (insn, NULL);
   3642  1.1  mrg       t = INSN_CACHED_COND (insn);
   3643  1.1  mrg       INSN_COND_DEPS (insn) = NULL;
   3644  1.1  mrg       if (reload_completed
   3645  1.1  mrg 	  && (current_sched_info->flags & DO_PREDICATION)
   3646  1.1  mrg 	  && COMPARISON_P (t)
   3647  1.1  mrg 	  && REG_P (XEXP (t, 0))
   3648  1.1  mrg 	  && CONSTANT_P (XEXP (t, 1)))
   3649  1.1  mrg 	{
   3650  1.1  mrg 	  unsigned int regno;
   3651  1.1  mrg 	  int nregs;
   3652  1.1  mrg 	  rtx_insn_list *cond_deps = NULL;
   3653  1.1  mrg 	  t = XEXP (t, 0);
   3654  1.1  mrg 	  regno = REGNO (t);
   3655  1.1  mrg 	  nregs = REG_NREGS (t);
   3656  1.1  mrg 	  while (nregs-- > 0)
   3657  1.1  mrg 	    {
   3658  1.1  mrg 	      struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
   3659  1.1  mrg 	      cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
   3660  1.1  mrg 	      cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
   3661  1.1  mrg 	      cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
   3662  1.1  mrg 	    }
   3663  1.1  mrg 	  INSN_COND_DEPS (insn) = cond_deps;
   3664  1.1  mrg 	}
   3665  1.1  mrg     }
   3666  1.1  mrg 
   3667  1.1  mrg   if (JUMP_P (insn))
   3668  1.1  mrg     {
   3669  1.1  mrg       /* Make each JUMP_INSN (but not a speculative check)
   3670  1.1  mrg          a scheduling barrier for memory references.  */
   3671  1.1  mrg       if (!deps->readonly
   3672  1.1  mrg           && !(sel_sched_p ()
   3673  1.1  mrg                && sel_insn_is_speculation_check (insn)))
   3674  1.1  mrg         {
   3675  1.1  mrg           /* Keep the list a reasonable size.  */
   3676  1.1  mrg 	  if (deps->pending_flush_length++ >= param_max_pending_list_length)
   3677  1.1  mrg 	    flush_pending_lists (deps, insn, true, true);
   3678  1.1  mrg           else
   3679  1.1  mrg 	    deps->pending_jump_insns
   3680  1.1  mrg               = alloc_INSN_LIST (insn, deps->pending_jump_insns);
   3681  1.1  mrg         }
   3682  1.1  mrg 
   3683  1.1  mrg       /* For each insn which shouldn't cross a jump, add a dependence.  */
   3684  1.1  mrg       add_dependence_list_and_free (deps, insn,
   3685  1.1  mrg 				    &deps->sched_before_next_jump, 1,
   3686  1.1  mrg 				    REG_DEP_ANTI, true);
   3687  1.1  mrg 
   3688  1.1  mrg       sched_analyze_insn (deps, PATTERN (insn), insn);
   3689  1.1  mrg     }
   3690  1.1  mrg   else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
   3691  1.1  mrg     {
   3692  1.1  mrg       sched_analyze_insn (deps, PATTERN (insn), insn);
   3693  1.1  mrg     }
   3694  1.1  mrg   else if (CALL_P (insn))
   3695  1.1  mrg     {
   3696  1.1  mrg       int i;
   3697  1.1  mrg 
   3698  1.1  mrg       CANT_MOVE (insn) = 1;
   3699  1.1  mrg 
   3700  1.1  mrg       if (find_reg_note (insn, REG_SETJMP, NULL))
   3701  1.1  mrg         {
   3702  1.1  mrg           /* This is setjmp.  Assume that all registers, not just
   3703  1.1  mrg              hard registers, may be clobbered by this call.  */
   3704  1.1  mrg           reg_pending_barrier = MOVE_BARRIER;
   3705  1.1  mrg         }
   3706  1.1  mrg       else
   3707  1.1  mrg         {
   3708  1.1  mrg 	  function_abi callee_abi = insn_callee_abi (insn);
   3709  1.1  mrg           for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
   3710  1.1  mrg             /* A call may read and modify global register variables.  */
   3711  1.1  mrg             if (global_regs[i])
   3712  1.1  mrg               {
   3713  1.1  mrg                 SET_REGNO_REG_SET (reg_pending_sets, i);
   3714  1.1  mrg                 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
   3715  1.1  mrg               }
   3716  1.1  mrg           /* Other call-clobbered hard regs may be clobbered.
   3717  1.1  mrg              Since we only have a choice between 'might be clobbered'
   3718  1.1  mrg              and 'definitely not clobbered', we must include all
   3719  1.1  mrg              partly call-clobbered registers here.  */
   3720  1.1  mrg 	    else if (callee_abi.clobbers_at_least_part_of_reg_p (i))
   3721  1.1  mrg               SET_REGNO_REG_SET (reg_pending_clobbers, i);
   3722  1.1  mrg           /* We don't know what set of fixed registers might be used
   3723  1.1  mrg              by the function, but it is certain that the stack pointer
   3724  1.1  mrg              is among them, but be conservative.  */
   3725  1.1  mrg             else if (fixed_regs[i])
   3726  1.1  mrg 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
   3727  1.1  mrg           /* The frame pointer is normally not used by the function
   3728  1.1  mrg              itself, but by the debugger.  */
   3729  1.1  mrg           /* ??? MIPS o32 is an exception.  It uses the frame pointer
   3730  1.1  mrg              in the macro expansion of jal but does not represent this
   3731  1.1  mrg              fact in the call_insn rtl.  */
   3732  1.1  mrg             else if (i == FRAME_POINTER_REGNUM
   3733  1.1  mrg                      || (i == HARD_FRAME_POINTER_REGNUM
   3734  1.1  mrg                          && (! reload_completed || frame_pointer_needed)))
   3735  1.1  mrg 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
   3736  1.1  mrg         }
   3737  1.1  mrg 
   3738  1.1  mrg       /* For each insn which shouldn't cross a call, add a dependence
   3739  1.1  mrg          between that insn and this call insn.  */
   3740  1.1  mrg       add_dependence_list_and_free (deps, insn,
   3741  1.1  mrg                                     &deps->sched_before_next_call, 1,
   3742  1.1  mrg                                     REG_DEP_ANTI, true);
   3743  1.1  mrg 
   3744  1.1  mrg       sched_analyze_insn (deps, PATTERN (insn), insn);
   3745  1.1  mrg 
   3746  1.1  mrg       /* If CALL would be in a sched group, then this will violate
   3747  1.1  mrg 	 convention that sched group insns have dependencies only on the
   3748  1.1  mrg 	 previous instruction.
   3749  1.1  mrg 
   3750  1.1  mrg 	 Of course one can say: "Hey!  What about head of the sched group?"
   3751  1.1  mrg 	 And I will answer: "Basic principles (one dep per insn) are always
   3752  1.1  mrg 	 the same."  */
   3753  1.1  mrg       gcc_assert (!SCHED_GROUP_P (insn));
   3754  1.1  mrg 
   3755  1.1  mrg       /* In the absence of interprocedural alias analysis, we must flush
   3756  1.1  mrg          all pending reads and writes, and start new dependencies starting
   3757  1.1  mrg          from here.  But only flush writes for constant calls (which may
   3758  1.1  mrg          be passed a pointer to something we haven't written yet).  */
   3759  1.1  mrg       flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
   3760  1.1  mrg 
   3761  1.1  mrg       if (!deps->readonly)
   3762  1.1  mrg         {
   3763  1.1  mrg           /* Remember the last function call for limiting lifetimes.  */
   3764  1.1  mrg           free_INSN_LIST_list (&deps->last_function_call);
   3765  1.1  mrg           deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
   3766  1.1  mrg 
   3767  1.1  mrg 	  if (call_may_noreturn_p (insn))
   3768  1.1  mrg 	    {
   3769  1.1  mrg 	      /* Remember the last function call that might not always return
   3770  1.1  mrg 		 normally for limiting moves of trapping insns.  */
   3771  1.1  mrg 	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
   3772  1.1  mrg 	      deps->last_function_call_may_noreturn
   3773  1.1  mrg 		= alloc_INSN_LIST (insn, NULL_RTX);
   3774  1.1  mrg 	    }
   3775  1.1  mrg 
   3776  1.1  mrg           /* Before reload, begin a post-call group, so as to keep the
   3777  1.1  mrg              lifetimes of hard registers correct.  */
   3778  1.1  mrg           if (! reload_completed)
   3779  1.1  mrg             deps->in_post_call_group_p = post_call;
   3780  1.1  mrg         }
   3781  1.1  mrg     }
   3782  1.1  mrg 
   3783  1.1  mrg   if (sched_deps_info->use_cselib)
   3784  1.1  mrg     cselib_process_insn (insn);
   3785  1.1  mrg 
   3786  1.1  mrg   if (sched_deps_info->finish_insn)
   3787  1.1  mrg     sched_deps_info->finish_insn ();
   3788  1.1  mrg 
   3789  1.1  mrg   /* Fixup the dependencies in the sched group.  */
   3790  1.1  mrg   if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
   3791  1.1  mrg       && chain_to_prev_insn_p (insn)
   3792  1.1  mrg       && !sel_sched_p ())
   3793  1.1  mrg     chain_to_prev_insn (insn);
   3794  1.1  mrg }
   3795  1.1  mrg 
   3796  1.1  mrg /* Initialize DEPS for the new block beginning with HEAD.  */
   3797  1.1  mrg void
   3798  1.1  mrg deps_start_bb (class deps_desc *deps, rtx_insn *head)
   3799  1.1  mrg {
   3800  1.1  mrg   gcc_assert (!deps->readonly);
   3801  1.1  mrg 
   3802  1.1  mrg   /* Before reload, if the previous block ended in a call, show that
   3803  1.1  mrg      we are inside a post-call group, so as to keep the lifetimes of
   3804  1.1  mrg      hard registers correct.  */
   3805  1.1  mrg   if (! reload_completed && !LABEL_P (head))
   3806  1.1  mrg     {
   3807  1.1  mrg       rtx_insn *insn = prev_nonnote_nondebug_insn (head);
   3808  1.1  mrg 
   3809  1.1  mrg       if (insn && CALL_P (insn))
   3810  1.1  mrg 	deps->in_post_call_group_p = post_call_initial;
   3811  1.1  mrg     }
   3812  1.1  mrg }
   3813  1.1  mrg 
   3814  1.1  mrg /* Analyze every insn between HEAD and TAIL inclusive, creating backward
   3815  1.1  mrg    dependencies for each insn.  */
   3816  1.1  mrg void
   3817  1.1  mrg sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail)
   3818  1.1  mrg {
   3819  1.1  mrg   rtx_insn *insn;
   3820  1.1  mrg 
   3821  1.1  mrg   if (sched_deps_info->use_cselib)
   3822  1.1  mrg     cselib_init (CSELIB_RECORD_MEMORY);
   3823  1.1  mrg 
   3824  1.1  mrg   deps_start_bb (deps, head);
   3825  1.1  mrg 
   3826  1.1  mrg   for (insn = head;; insn = NEXT_INSN (insn))
   3827  1.1  mrg     {
   3828  1.1  mrg       if (INSN_P (insn))
   3829  1.1  mrg 	{
   3830  1.1  mrg 	  /* And initialize deps_lists.  */
   3831  1.1  mrg 	  sd_init_insn (insn);
   3832  1.1  mrg 	  /* Clean up SCHED_GROUP_P which may be set by last
   3833  1.1  mrg 	     scheduler pass.  */
   3834  1.1  mrg 	  if (SCHED_GROUP_P (insn))
   3835  1.1  mrg 	    SCHED_GROUP_P (insn) = 0;
   3836  1.1  mrg 	}
   3837  1.1  mrg 
   3838  1.1  mrg       deps_analyze_insn (deps, insn);
   3839  1.1  mrg 
   3840  1.1  mrg       if (insn == tail)
   3841  1.1  mrg 	{
   3842  1.1  mrg 	  if (sched_deps_info->use_cselib)
   3843  1.1  mrg 	    cselib_finish ();
   3844  1.1  mrg 	  return;
   3845  1.1  mrg 	}
   3846  1.1  mrg     }
   3847  1.1  mrg }
   3848  1.1  mrg 
   3849  1.1  mrg /* Helper for sched_free_deps ().
   3850  1.1  mrg    Delete INSN's (RESOLVED_P) backward dependencies.  */
   3851  1.1  mrg static void
   3852  1.1  mrg delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
   3853  1.1  mrg {
   3854  1.1  mrg   sd_iterator_def sd_it;
   3855  1.1  mrg   dep_t dep;
   3856  1.1  mrg   sd_list_types_def types;
   3857  1.1  mrg 
   3858  1.1  mrg   if (resolved_p)
   3859  1.1  mrg     types = SD_LIST_RES_BACK;
   3860  1.1  mrg   else
   3861  1.1  mrg     types = SD_LIST_BACK;
   3862  1.1  mrg 
   3863  1.1  mrg   for (sd_it = sd_iterator_start (insn, types);
   3864  1.1  mrg        sd_iterator_cond (&sd_it, &dep);)
   3865  1.1  mrg     {
   3866  1.1  mrg       dep_link_t link = *sd_it.linkp;
   3867  1.1  mrg       dep_node_t node = DEP_LINK_NODE (link);
   3868  1.1  mrg       deps_list_t back_list;
   3869  1.1  mrg       deps_list_t forw_list;
   3870  1.1  mrg 
   3871  1.1  mrg       get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
   3872  1.1  mrg       remove_from_deps_list (link, back_list);
   3873  1.1  mrg       delete_dep_node (node);
   3874  1.1  mrg     }
   3875  1.1  mrg }
   3876  1.1  mrg 
   3877  1.1  mrg /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
   3878  1.1  mrg    deps_lists.  */
   3879  1.1  mrg void
   3880  1.1  mrg sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
   3881  1.1  mrg {
   3882  1.1  mrg   rtx_insn *insn;
   3883  1.1  mrg   rtx_insn *next_tail = NEXT_INSN (tail);
   3884  1.1  mrg 
   3885  1.1  mrg   /* We make two passes since some insns may be scheduled before their
   3886  1.1  mrg      dependencies are resolved.  */
   3887  1.1  mrg   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
   3888  1.1  mrg     if (INSN_P (insn) && INSN_LUID (insn) > 0)
   3889  1.1  mrg       {
   3890  1.1  mrg 	/* Clear forward deps and leave the dep_nodes to the
   3891  1.1  mrg 	   corresponding back_deps list.  */
   3892  1.1  mrg 	if (resolved_p)
   3893  1.1  mrg 	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
   3894  1.1  mrg 	else
   3895  1.1  mrg 	  clear_deps_list (INSN_FORW_DEPS (insn));
   3896  1.1  mrg       }
   3897  1.1  mrg   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
   3898  1.1  mrg     if (INSN_P (insn) && INSN_LUID (insn) > 0)
   3899  1.1  mrg       {
   3900  1.1  mrg 	/* Clear resolved back deps together with its dep_nodes.  */
   3901  1.1  mrg 	delete_dep_nodes_in_back_deps (insn, resolved_p);
   3902  1.1  mrg 
   3903  1.1  mrg 	sd_finish_insn (insn);
   3904  1.1  mrg       }
   3905  1.1  mrg }
   3906  1.1  mrg 
   3907  1.1  mrg /* Initialize variables for region data dependence analysis.
   3909  1.1  mrg    When LAZY_REG_LAST is true, do not allocate reg_last array
   3910  1.1  mrg    of class deps_desc immediately.  */
   3911  1.1  mrg 
   3912  1.1  mrg void
   3913  1.1  mrg init_deps (class deps_desc *deps, bool lazy_reg_last)
   3914  1.1  mrg {
   3915  1.1  mrg   int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
   3916  1.1  mrg 
   3917  1.1  mrg   deps->max_reg = max_reg;
   3918  1.1  mrg   if (lazy_reg_last)
   3919  1.1  mrg     deps->reg_last = NULL;
   3920  1.1  mrg   else
   3921  1.1  mrg     deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
   3922  1.1  mrg   INIT_REG_SET (&deps->reg_last_in_use);
   3923  1.1  mrg 
   3924  1.1  mrg   deps->pending_read_insns = 0;
   3925  1.1  mrg   deps->pending_read_mems = 0;
   3926  1.1  mrg   deps->pending_write_insns = 0;
   3927  1.1  mrg   deps->pending_write_mems = 0;
   3928  1.1  mrg   deps->pending_jump_insns = 0;
   3929  1.1  mrg   deps->pending_read_list_length = 0;
   3930  1.1  mrg   deps->pending_write_list_length = 0;
   3931  1.1  mrg   deps->pending_flush_length = 0;
   3932  1.1  mrg   deps->last_pending_memory_flush = 0;
   3933  1.1  mrg   deps->last_function_call = 0;
   3934  1.1  mrg   deps->last_function_call_may_noreturn = 0;
   3935  1.1  mrg   deps->sched_before_next_call = 0;
   3936  1.1  mrg   deps->sched_before_next_jump = 0;
   3937  1.1  mrg   deps->in_post_call_group_p = not_post_call;
   3938  1.1  mrg   deps->last_debug_insn = 0;
   3939  1.1  mrg   deps->last_args_size = 0;
   3940  1.1  mrg   deps->last_prologue = 0;
   3941  1.1  mrg   deps->last_epilogue = 0;
   3942  1.1  mrg   deps->last_logue_was_epilogue = false;
   3943  1.1  mrg   deps->last_reg_pending_barrier = NOT_A_BARRIER;
   3944  1.1  mrg   deps->readonly = 0;
   3945  1.1  mrg }
   3946  1.1  mrg 
   3947  1.1  mrg /* Init only reg_last field of DEPS, which was not allocated before as
   3948  1.1  mrg    we inited DEPS lazily.  */
   3949  1.1  mrg void
   3950  1.1  mrg init_deps_reg_last (class deps_desc *deps)
   3951  1.1  mrg {
   3952  1.1  mrg   gcc_assert (deps && deps->max_reg > 0);
   3953  1.1  mrg   gcc_assert (deps->reg_last == NULL);
   3954  1.1  mrg 
   3955  1.1  mrg   deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
   3956  1.1  mrg }
   3957  1.1  mrg 
   3958  1.1  mrg 
   3959  1.1  mrg /* Free insn lists found in DEPS.  */
   3960  1.1  mrg 
   3961  1.1  mrg void
   3962  1.1  mrg free_deps (class deps_desc *deps)
   3963  1.1  mrg {
   3964  1.1  mrg   unsigned i;
   3965  1.1  mrg   reg_set_iterator rsi;
   3966  1.1  mrg 
   3967  1.1  mrg   /* We set max_reg to 0 when this context was already freed.  */
   3968  1.1  mrg   if (deps->max_reg == 0)
   3969  1.1  mrg     {
   3970  1.1  mrg       gcc_assert (deps->reg_last == NULL);
   3971  1.1  mrg       return;
   3972  1.1  mrg     }
   3973  1.1  mrg   deps->max_reg = 0;
   3974  1.1  mrg 
   3975  1.1  mrg   free_INSN_LIST_list (&deps->pending_read_insns);
   3976  1.1  mrg   free_EXPR_LIST_list (&deps->pending_read_mems);
   3977  1.1  mrg   free_INSN_LIST_list (&deps->pending_write_insns);
   3978  1.1  mrg   free_EXPR_LIST_list (&deps->pending_write_mems);
   3979  1.1  mrg   free_INSN_LIST_list (&deps->last_pending_memory_flush);
   3980  1.1  mrg 
   3981  1.1  mrg   /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
   3982  1.1  mrg      times.  For a testcase with 42000 regs and 8000 small basic blocks,
   3983  1.1  mrg      this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
   3984  1.1  mrg   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
   3985  1.1  mrg     {
   3986  1.1  mrg       struct deps_reg *reg_last = &deps->reg_last[i];
   3987  1.1  mrg       if (reg_last->uses)
   3988  1.1  mrg 	free_INSN_LIST_list (&reg_last->uses);
   3989  1.1  mrg       if (reg_last->sets)
   3990  1.1  mrg 	free_INSN_LIST_list (&reg_last->sets);
   3991  1.1  mrg       if (reg_last->implicit_sets)
   3992  1.1  mrg 	free_INSN_LIST_list (&reg_last->implicit_sets);
   3993  1.1  mrg       if (reg_last->control_uses)
   3994  1.1  mrg 	free_INSN_LIST_list (&reg_last->control_uses);
   3995  1.1  mrg       if (reg_last->clobbers)
   3996  1.1  mrg 	free_INSN_LIST_list (&reg_last->clobbers);
   3997  1.1  mrg     }
   3998  1.1  mrg   CLEAR_REG_SET (&deps->reg_last_in_use);
   3999  1.1  mrg 
   4000  1.1  mrg   /* As we initialize reg_last lazily, it is possible that we didn't allocate
   4001  1.1  mrg      it at all.  */
   4002  1.1  mrg   free (deps->reg_last);
   4003  1.1  mrg   deps->reg_last = NULL;
   4004  1.1  mrg 
   4005  1.1  mrg   deps = NULL;
   4006  1.1  mrg }
   4007  1.1  mrg 
   4008  1.1  mrg /* Remove INSN from dependence contexts DEPS.  */
   4009  1.1  mrg void
   4010  1.1  mrg remove_from_deps (class deps_desc *deps, rtx_insn *insn)
   4011  1.1  mrg {
   4012  1.1  mrg   int removed;
   4013  1.1  mrg   unsigned i;
   4014  1.1  mrg   reg_set_iterator rsi;
   4015  1.1  mrg 
   4016  1.1  mrg   removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
   4017  1.1  mrg                                                &deps->pending_read_mems);
   4018  1.1  mrg   if (!DEBUG_INSN_P (insn))
   4019  1.1  mrg     deps->pending_read_list_length -= removed;
   4020  1.1  mrg   removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
   4021  1.1  mrg                                                &deps->pending_write_mems);
   4022  1.1  mrg   deps->pending_write_list_length -= removed;
   4023  1.1  mrg 
   4024  1.1  mrg   removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
   4025  1.1  mrg   deps->pending_flush_length -= removed;
   4026  1.1  mrg   removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
   4027  1.1  mrg   deps->pending_flush_length -= removed;
   4028  1.1  mrg 
   4029  1.1  mrg   unsigned to_clear = -1U;
   4030  1.1  mrg   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
   4031  1.1  mrg     {
   4032  1.1  mrg       if (to_clear != -1U)
   4033  1.1  mrg 	{
   4034  1.1  mrg 	  CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
   4035  1.1  mrg 	  to_clear = -1U;
   4036  1.1  mrg 	}
   4037  1.1  mrg       struct deps_reg *reg_last = &deps->reg_last[i];
   4038  1.1  mrg       if (reg_last->uses)
   4039  1.1  mrg 	remove_from_dependence_list (insn, &reg_last->uses);
   4040  1.1  mrg       if (reg_last->sets)
   4041  1.1  mrg 	remove_from_dependence_list (insn, &reg_last->sets);
   4042  1.1  mrg       if (reg_last->implicit_sets)
   4043  1.1  mrg 	remove_from_dependence_list (insn, &reg_last->implicit_sets);
   4044  1.1  mrg       if (reg_last->clobbers)
   4045  1.1  mrg 	remove_from_dependence_list (insn, &reg_last->clobbers);
   4046  1.1  mrg       if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
   4047  1.1  mrg 	  && !reg_last->clobbers)
   4048  1.1  mrg 	to_clear = i;
   4049  1.1  mrg     }
   4050  1.1  mrg   if (to_clear != -1U)
   4051  1.1  mrg     CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
   4052  1.1  mrg 
   4053  1.1  mrg   if (CALL_P (insn))
   4054  1.1  mrg     {
   4055  1.1  mrg       remove_from_dependence_list (insn, &deps->last_function_call);
   4056  1.1  mrg       remove_from_dependence_list (insn,
   4057  1.1  mrg 				   &deps->last_function_call_may_noreturn);
   4058  1.1  mrg     }
   4059  1.1  mrg   remove_from_dependence_list (insn, &deps->sched_before_next_call);
   4060  1.1  mrg }
   4061  1.1  mrg 
   4062  1.1  mrg /* Init deps data vector.  */
   4063  1.1  mrg static void
   4064  1.1  mrg init_deps_data_vector (void)
   4065  1.1  mrg {
   4066  1.1  mrg   int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
   4067  1.1  mrg   if (reserve > 0 && ! h_d_i_d.space (reserve))
   4068  1.1  mrg     h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2, true);
   4069  1.1  mrg }
   4070  1.1  mrg 
   4071  1.1  mrg /* If it is profitable to use them, initialize or extend (depending on
   4072  1.1  mrg    GLOBAL_P) dependency data.  */
   4073  1.1  mrg void
   4074  1.1  mrg sched_deps_init (bool global_p)
   4075  1.1  mrg {
   4076  1.1  mrg   /* Average number of insns in the basic block.
   4077  1.1  mrg      '+ 1' is used to make it nonzero.  */
   4078  1.1  mrg   int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
   4079  1.1  mrg 
   4080  1.1  mrg   init_deps_data_vector ();
   4081  1.1  mrg 
   4082  1.1  mrg   /* We use another caching mechanism for selective scheduling, so
   4083  1.1  mrg      we don't use this one.  */
   4084  1.1  mrg   if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
   4085  1.1  mrg     {
   4086  1.1  mrg       /* ?!? We could save some memory by computing a per-region luid mapping
   4087  1.1  mrg          which could reduce both the number of vectors in the cache and the
   4088  1.1  mrg          size of each vector.  Instead we just avoid the cache entirely unless
   4089  1.1  mrg          the average number of instructions in a basic block is very high.  See
   4090  1.1  mrg          the comment before the declaration of true_dependency_cache for
   4091  1.1  mrg          what we consider "very high".  */
   4092  1.1  mrg       cache_size = 0;
   4093  1.1  mrg       extend_dependency_caches (sched_max_luid, true);
   4094  1.1  mrg     }
   4095  1.1  mrg 
   4096  1.1  mrg   if (global_p)
   4097  1.1  mrg     {
   4098  1.1  mrg       dl_pool = new object_allocator<_deps_list> ("deps_list");
   4099  1.1  mrg 				/* Allocate lists for one block at a time.  */
   4100  1.1  mrg       dn_pool = new object_allocator<_dep_node> ("dep_node");
   4101  1.1  mrg 				/* Allocate nodes for one block at a time.  */
   4102  1.1  mrg     }
   4103  1.1  mrg }
   4104  1.1  mrg 
   4105  1.1  mrg 
   4106  1.1  mrg /* Create or extend (depending on CREATE_P) dependency caches to
   4107  1.1  mrg    size N.  */
   4108  1.1  mrg void
   4109  1.1  mrg extend_dependency_caches (int n, bool create_p)
   4110  1.1  mrg {
   4111  1.1  mrg   if (create_p || true_dependency_cache)
   4112  1.1  mrg     {
   4113  1.1  mrg       int i, luid = cache_size + n;
   4114  1.1  mrg 
   4115  1.1  mrg       true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
   4116  1.1  mrg 					  luid);
   4117  1.1  mrg       output_dependency_cache = XRESIZEVEC (bitmap_head,
   4118  1.1  mrg 					    output_dependency_cache, luid);
   4119  1.1  mrg       anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
   4120  1.1  mrg 					  luid);
   4121  1.1  mrg       control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
   4122  1.1  mrg 					  luid);
   4123  1.1  mrg 
   4124  1.1  mrg       if (current_sched_info->flags & DO_SPECULATION)
   4125  1.1  mrg         spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
   4126  1.1  mrg 					    luid);
   4127  1.1  mrg 
   4128  1.1  mrg       for (i = cache_size; i < luid; i++)
   4129  1.1  mrg 	{
   4130  1.1  mrg 	  bitmap_initialize (&true_dependency_cache[i], 0);
   4131  1.1  mrg 	  bitmap_initialize (&output_dependency_cache[i], 0);
   4132  1.1  mrg 	  bitmap_initialize (&anti_dependency_cache[i], 0);
   4133  1.1  mrg 	  bitmap_initialize (&control_dependency_cache[i], 0);
   4134  1.1  mrg 
   4135  1.1  mrg           if (current_sched_info->flags & DO_SPECULATION)
   4136  1.1  mrg             bitmap_initialize (&spec_dependency_cache[i], 0);
   4137  1.1  mrg 	}
   4138  1.1  mrg       cache_size = luid;
   4139  1.1  mrg     }
   4140  1.1  mrg }
   4141  1.1  mrg 
   4142  1.1  mrg /* Finalize dependency information for the whole function.  */
   4143  1.1  mrg void
   4144  1.1  mrg sched_deps_finish (void)
   4145  1.1  mrg {
   4146  1.1  mrg   gcc_assert (deps_pools_are_empty_p ());
   4147  1.1  mrg   delete dn_pool;
   4148  1.1  mrg   delete dl_pool;
   4149  1.1  mrg   dn_pool = NULL;
   4150  1.1  mrg   dl_pool = NULL;
   4151  1.1  mrg 
   4152  1.1  mrg   h_d_i_d.release ();
   4153  1.1  mrg   cache_size = 0;
   4154  1.1  mrg 
   4155  1.1  mrg   if (true_dependency_cache)
   4156  1.1  mrg     {
   4157  1.1  mrg       int i;
   4158  1.1  mrg 
   4159  1.1  mrg       for (i = 0; i < cache_size; i++)
   4160  1.1  mrg 	{
   4161  1.1  mrg 	  bitmap_clear (&true_dependency_cache[i]);
   4162  1.1  mrg 	  bitmap_clear (&output_dependency_cache[i]);
   4163  1.1  mrg 	  bitmap_clear (&anti_dependency_cache[i]);
   4164  1.1  mrg 	  bitmap_clear (&control_dependency_cache[i]);
   4165  1.1  mrg 
   4166  1.1  mrg           if (sched_deps_info->generate_spec_deps)
   4167  1.1  mrg             bitmap_clear (&spec_dependency_cache[i]);
   4168  1.1  mrg 	}
   4169  1.1  mrg       free (true_dependency_cache);
   4170  1.1  mrg       true_dependency_cache = NULL;
   4171  1.1  mrg       free (output_dependency_cache);
   4172  1.1  mrg       output_dependency_cache = NULL;
   4173  1.1  mrg       free (anti_dependency_cache);
   4174  1.1  mrg       anti_dependency_cache = NULL;
   4175  1.1  mrg       free (control_dependency_cache);
   4176  1.1  mrg       control_dependency_cache = NULL;
   4177  1.1  mrg 
   4178  1.1  mrg       if (sched_deps_info->generate_spec_deps)
   4179  1.1  mrg         {
   4180  1.1  mrg           free (spec_dependency_cache);
   4181  1.1  mrg           spec_dependency_cache = NULL;
   4182  1.1  mrg         }
   4183  1.1  mrg 
   4184  1.1  mrg     }
   4185  1.1  mrg }
   4186  1.1  mrg 
   4187  1.1  mrg /* Initialize some global variables needed by the dependency analysis
   4188  1.1  mrg    code.  */
   4189  1.1  mrg 
   4190  1.1  mrg void
   4191  1.1  mrg init_deps_global (void)
   4192  1.1  mrg {
   4193  1.1  mrg   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
   4194  1.1  mrg   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
   4195  1.1  mrg   reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
   4196  1.1  mrg   reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
   4197  1.1  mrg   reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
   4198  1.1  mrg   reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
   4199  1.1  mrg   reg_pending_barrier = NOT_A_BARRIER;
   4200  1.1  mrg 
   4201  1.1  mrg   if (!sel_sched_p () || sched_emulate_haifa_p)
   4202  1.1  mrg     {
   4203  1.1  mrg       sched_deps_info->start_insn = haifa_start_insn;
   4204  1.1  mrg       sched_deps_info->finish_insn = haifa_finish_insn;
   4205  1.1  mrg 
   4206  1.1  mrg       sched_deps_info->note_reg_set = haifa_note_reg_set;
   4207  1.1  mrg       sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
   4208  1.1  mrg       sched_deps_info->note_reg_use = haifa_note_reg_use;
   4209  1.1  mrg 
   4210  1.1  mrg       sched_deps_info->note_mem_dep = haifa_note_mem_dep;
   4211  1.1  mrg       sched_deps_info->note_dep = haifa_note_dep;
   4212  1.1  mrg    }
   4213  1.1  mrg }
   4214  1.1  mrg 
   4215  1.1  mrg /* Free everything used by the dependency analysis code.  */
   4216  1.1  mrg 
   4217  1.1  mrg void
   4218  1.1  mrg finish_deps_global (void)
   4219  1.1  mrg {
   4220  1.1  mrg   FREE_REG_SET (reg_pending_sets);
   4221  1.1  mrg   FREE_REG_SET (reg_pending_clobbers);
   4222  1.1  mrg   FREE_REG_SET (reg_pending_uses);
   4223  1.1  mrg   FREE_REG_SET (reg_pending_control_uses);
   4224  1.1  mrg }
   4225  1.1  mrg 
   4226  1.1  mrg /* Estimate the weakness of dependence between MEM1 and MEM2.  */
   4227  1.1  mrg dw_t
   4228  1.1  mrg estimate_dep_weak (rtx mem1, rtx mem2)
   4229  1.1  mrg {
   4230  1.1  mrg   if (mem1 == mem2)
   4231  1.1  mrg     /* MEMs are the same - don't speculate.  */
   4232  1.1  mrg     return MIN_DEP_WEAK;
   4233  1.1  mrg 
   4234  1.1  mrg   rtx r1 = XEXP (mem1, 0);
   4235  1.1  mrg   rtx r2 = XEXP (mem2, 0);
   4236  1.1  mrg 
   4237  1.1  mrg   if (sched_deps_info->use_cselib)
   4238  1.1  mrg     {
   4239  1.1  mrg       /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
   4240  1.1  mrg 	 dangling at this point, since we never preserve them.  Instead we
   4241  1.1  mrg 	 canonicalize manually to get stable VALUEs out of hashing.  */
   4242  1.1  mrg       if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
   4243  1.1  mrg 	r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
   4244  1.1  mrg       if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
   4245  1.1  mrg 	r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
   4246  1.1  mrg     }
   4247  1.1  mrg 
   4248  1.1  mrg   if (r1 == r2
   4249  1.1  mrg       || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
   4250  1.1  mrg     /* Again, MEMs are the same.  */
   4251  1.1  mrg     return MIN_DEP_WEAK;
   4252  1.1  mrg   else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
   4253  1.1  mrg     /* Different addressing modes - reason to be more speculative,
   4254  1.1  mrg        than usual.  */
   4255  1.1  mrg     return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
   4256  1.1  mrg   else
   4257  1.1  mrg     /* We can't say anything about the dependence.  */
   4258  1.1  mrg     return UNCERTAIN_DEP_WEAK;
   4259  1.1  mrg }
   4260  1.1  mrg 
   4261  1.1  mrg /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
   4262  1.1  mrg    This function can handle same INSN and ELEM (INSN == ELEM).
   4263  1.1  mrg    It is a convenience wrapper.  */
   4264  1.1  mrg static void
   4265  1.1  mrg add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
   4266  1.1  mrg {
   4267  1.1  mrg   ds_t ds;
   4268  1.1  mrg   bool internal;
   4269  1.1  mrg 
   4270  1.1  mrg   if (dep_type == REG_DEP_TRUE)
   4271  1.1  mrg     ds = DEP_TRUE;
   4272  1.1  mrg   else if (dep_type == REG_DEP_OUTPUT)
   4273  1.1  mrg     ds = DEP_OUTPUT;
   4274  1.1  mrg   else if (dep_type == REG_DEP_CONTROL)
   4275  1.1  mrg     ds = DEP_CONTROL;
   4276  1.1  mrg   else
   4277  1.1  mrg     {
   4278  1.1  mrg       gcc_assert (dep_type == REG_DEP_ANTI);
   4279  1.1  mrg       ds = DEP_ANTI;
   4280  1.1  mrg     }
   4281  1.1  mrg 
   4282  1.1  mrg   /* When add_dependence is called from inside sched-deps.cc, we expect
   4283  1.1  mrg      cur_insn to be non-null.  */
   4284  1.1  mrg   internal = cur_insn != NULL;
   4285  1.1  mrg   if (internal)
   4286  1.1  mrg     gcc_assert (insn == cur_insn);
   4287  1.1  mrg   else
   4288  1.1  mrg     cur_insn = insn;
   4289  1.1  mrg 
   4290  1.1  mrg   note_dep (elem, ds);
   4291  1.1  mrg   if (!internal)
   4292  1.1  mrg     cur_insn = NULL;
   4293  1.1  mrg }
   4294  1.1  mrg 
   4295  1.1  mrg /* Return weakness of speculative type TYPE in the dep_status DS,
   4296  1.1  mrg    without checking to prevent ICEs on malformed input.  */
   4297  1.1  mrg static dw_t
   4298  1.1  mrg get_dep_weak_1 (ds_t ds, ds_t type)
   4299  1.1  mrg {
   4300  1.1  mrg   ds = ds & type;
   4301  1.1  mrg 
   4302  1.1  mrg   switch (type)
   4303  1.1  mrg     {
   4304  1.1  mrg     case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
   4305  1.1  mrg     case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
   4306  1.1  mrg     case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
   4307  1.1  mrg     case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
   4308  1.1  mrg     default: gcc_unreachable ();
   4309  1.1  mrg     }
   4310  1.1  mrg 
   4311  1.1  mrg   return (dw_t) ds;
   4312  1.1  mrg }
   4313  1.1  mrg 
   4314  1.1  mrg /* Return weakness of speculative type TYPE in the dep_status DS.  */
   4315  1.1  mrg dw_t
   4316  1.1  mrg get_dep_weak (ds_t ds, ds_t type)
   4317  1.1  mrg {
   4318  1.1  mrg   dw_t dw = get_dep_weak_1 (ds, type);
   4319  1.1  mrg 
   4320  1.1  mrg   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
   4321  1.1  mrg   return dw;
   4322  1.1  mrg }
   4323  1.1  mrg 
   4324  1.1  mrg /* Return the dep_status, which has the same parameters as DS, except for
   4325  1.1  mrg    speculative type TYPE, that will have weakness DW.  */
   4326  1.1  mrg ds_t
   4327  1.1  mrg set_dep_weak (ds_t ds, ds_t type, dw_t dw)
   4328  1.1  mrg {
   4329  1.1  mrg   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
   4330  1.1  mrg 
   4331  1.1  mrg   ds &= ~type;
   4332  1.1  mrg   switch (type)
   4333  1.1  mrg     {
   4334  1.1  mrg     case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
   4335  1.1  mrg     case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
   4336  1.1  mrg     case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
   4337  1.1  mrg     case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
   4338  1.1  mrg     default: gcc_unreachable ();
   4339  1.1  mrg     }
   4340  1.1  mrg   return ds;
   4341  1.1  mrg }
   4342  1.1  mrg 
   4343  1.1  mrg /* Return the join of two dep_statuses DS1 and DS2.
   4344  1.1  mrg    If MAX_P is true then choose the greater probability,
   4345  1.1  mrg    otherwise multiply probabilities.
   4346  1.1  mrg    This function assumes that both DS1 and DS2 contain speculative bits.  */
   4347  1.1  mrg static ds_t
   4348  1.1  mrg ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
   4349  1.1  mrg {
   4350  1.1  mrg   ds_t ds, t;
   4351  1.1  mrg 
   4352  1.1  mrg   gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
   4353  1.1  mrg 
   4354  1.1  mrg   ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
   4355  1.1  mrg 
   4356  1.1  mrg   t = FIRST_SPEC_TYPE;
   4357  1.1  mrg   do
   4358  1.1  mrg     {
   4359  1.1  mrg       if ((ds1 & t) && !(ds2 & t))
   4360  1.1  mrg 	ds |= ds1 & t;
   4361  1.1  mrg       else if (!(ds1 & t) && (ds2 & t))
   4362  1.1  mrg 	ds |= ds2 & t;
   4363  1.1  mrg       else if ((ds1 & t) && (ds2 & t))
   4364  1.1  mrg 	{
   4365  1.1  mrg 	  dw_t dw1 = get_dep_weak (ds1, t);
   4366  1.1  mrg 	  dw_t dw2 = get_dep_weak (ds2, t);
   4367  1.1  mrg 	  ds_t dw;
   4368  1.1  mrg 
   4369  1.1  mrg 	  if (!max_p)
   4370  1.1  mrg 	    {
   4371  1.1  mrg 	      dw = ((ds_t) dw1) * ((ds_t) dw2);
   4372  1.1  mrg 	      dw /= MAX_DEP_WEAK;
   4373  1.1  mrg 	      if (dw < MIN_DEP_WEAK)
   4374  1.1  mrg 		dw = MIN_DEP_WEAK;
   4375  1.1  mrg 	    }
   4376  1.1  mrg 	  else
   4377  1.1  mrg 	    {
   4378  1.1  mrg 	      if (dw1 >= dw2)
   4379  1.1  mrg 		dw = dw1;
   4380  1.1  mrg 	      else
   4381  1.1  mrg 		dw = dw2;
   4382  1.1  mrg 	    }
   4383  1.1  mrg 
   4384  1.1  mrg 	  ds = set_dep_weak (ds, t, (dw_t) dw);
   4385  1.1  mrg 	}
   4386  1.1  mrg 
   4387  1.1  mrg       if (t == LAST_SPEC_TYPE)
   4388  1.1  mrg 	break;
   4389  1.1  mrg       t <<= SPEC_TYPE_SHIFT;
   4390  1.1  mrg     }
   4391  1.1  mrg   while (1);
   4392  1.1  mrg 
   4393  1.1  mrg   return ds;
   4394  1.1  mrg }
   4395  1.1  mrg 
   4396  1.1  mrg /* Return the join of two dep_statuses DS1 and DS2.
   4397  1.1  mrg    This function assumes that both DS1 and DS2 contain speculative bits.  */
   4398  1.1  mrg ds_t
   4399  1.1  mrg ds_merge (ds_t ds1, ds_t ds2)
   4400  1.1  mrg {
   4401  1.1  mrg   return ds_merge_1 (ds1, ds2, false);
   4402  1.1  mrg }
   4403  1.1  mrg 
   4404  1.1  mrg /* Return the join of two dep_statuses DS1 and DS2.  */
   4405  1.1  mrg ds_t
   4406  1.1  mrg ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
   4407  1.1  mrg {
   4408  1.1  mrg   ds_t new_status = ds | ds2;
   4409  1.1  mrg 
   4410  1.1  mrg   if (new_status & SPECULATIVE)
   4411  1.1  mrg     {
   4412  1.1  mrg       if ((ds && !(ds & SPECULATIVE))
   4413  1.1  mrg 	  || (ds2 && !(ds2 & SPECULATIVE)))
   4414  1.1  mrg 	/* Then this dep can't be speculative.  */
   4415  1.1  mrg 	new_status &= ~SPECULATIVE;
   4416  1.1  mrg       else
   4417  1.1  mrg 	{
   4418  1.1  mrg 	  /* Both are speculative.  Merging probabilities.  */
   4419  1.1  mrg 	  if (mem1)
   4420  1.1  mrg 	    {
   4421  1.1  mrg 	      dw_t dw;
   4422  1.1  mrg 
   4423  1.1  mrg 	      dw = estimate_dep_weak (mem1, mem2);
   4424  1.1  mrg 	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
   4425  1.1  mrg 	    }
   4426  1.1  mrg 
   4427  1.1  mrg 	  if (!ds)
   4428  1.1  mrg 	    new_status = ds2;
   4429  1.1  mrg 	  else if (!ds2)
   4430  1.1  mrg 	    new_status = ds;
   4431  1.1  mrg 	  else
   4432  1.1  mrg 	    new_status = ds_merge (ds2, ds);
   4433  1.1  mrg 	}
   4434  1.1  mrg     }
   4435  1.1  mrg 
   4436  1.1  mrg   return new_status;
   4437  1.1  mrg }
   4438  1.1  mrg 
   4439  1.1  mrg /* Return the join of DS1 and DS2.  Use maximum instead of multiplying
   4440  1.1  mrg    probabilities.  */
   4441  1.1  mrg ds_t
   4442  1.1  mrg ds_max_merge (ds_t ds1, ds_t ds2)
   4443  1.1  mrg {
   4444  1.1  mrg   if (ds1 == 0 && ds2 == 0)
   4445  1.1  mrg     return 0;
   4446  1.1  mrg 
   4447  1.1  mrg   if (ds1 == 0 && ds2 != 0)
   4448  1.1  mrg     return ds2;
   4449  1.1  mrg 
   4450  1.1  mrg   if (ds1 != 0 && ds2 == 0)
   4451  1.1  mrg     return ds1;
   4452  1.1  mrg 
   4453  1.1  mrg   return ds_merge_1 (ds1, ds2, true);
   4454  1.1  mrg }
   4455  1.1  mrg 
   4456  1.1  mrg /* Return the probability of speculation success for the speculation
   4457  1.1  mrg    status DS.  */
   4458  1.1  mrg dw_t
   4459  1.1  mrg ds_weak (ds_t ds)
   4460  1.1  mrg {
   4461  1.1  mrg   ds_t res = 1, dt;
   4462  1.1  mrg   int n = 0;
   4463  1.1  mrg 
   4464  1.1  mrg   dt = FIRST_SPEC_TYPE;
   4465  1.1  mrg   do
   4466  1.1  mrg     {
   4467  1.1  mrg       if (ds & dt)
   4468  1.1  mrg 	{
   4469  1.1  mrg 	  res *= (ds_t) get_dep_weak (ds, dt);
   4470  1.1  mrg 	  n++;
   4471  1.1  mrg 	}
   4472  1.1  mrg 
   4473  1.1  mrg       if (dt == LAST_SPEC_TYPE)
   4474  1.1  mrg 	break;
   4475  1.1  mrg       dt <<= SPEC_TYPE_SHIFT;
   4476  1.1  mrg     }
   4477  1.1  mrg   while (1);
   4478  1.1  mrg 
   4479  1.1  mrg   gcc_assert (n);
   4480  1.1  mrg   while (--n)
   4481  1.1  mrg     res /= MAX_DEP_WEAK;
   4482  1.1  mrg 
   4483  1.1  mrg   if (res < MIN_DEP_WEAK)
   4484  1.1  mrg     res = MIN_DEP_WEAK;
   4485  1.1  mrg 
   4486  1.1  mrg   gcc_assert (res <= MAX_DEP_WEAK);
   4487  1.1  mrg 
   4488  1.1  mrg   return (dw_t) res;
   4489  1.1  mrg }
   4490  1.1  mrg 
   4491  1.1  mrg /* Return a dep status that contains all speculation types of DS.  */
   4492  1.1  mrg ds_t
   4493  1.1  mrg ds_get_speculation_types (ds_t ds)
   4494  1.1  mrg {
   4495  1.1  mrg   if (ds & BEGIN_DATA)
   4496  1.1  mrg     ds |= BEGIN_DATA;
   4497  1.1  mrg   if (ds & BE_IN_DATA)
   4498  1.1  mrg     ds |= BE_IN_DATA;
   4499  1.1  mrg   if (ds & BEGIN_CONTROL)
   4500  1.1  mrg     ds |= BEGIN_CONTROL;
   4501  1.1  mrg   if (ds & BE_IN_CONTROL)
   4502  1.1  mrg     ds |= BE_IN_CONTROL;
   4503  1.1  mrg 
   4504  1.1  mrg   return ds & SPECULATIVE;
   4505  1.1  mrg }
   4506  1.1  mrg 
   4507  1.1  mrg /* Return a dep status that contains maximal weakness for each speculation
   4508  1.1  mrg    type present in DS.  */
   4509  1.1  mrg ds_t
   4510  1.1  mrg ds_get_max_dep_weak (ds_t ds)
   4511  1.1  mrg {
   4512  1.1  mrg   if (ds & BEGIN_DATA)
   4513  1.1  mrg     ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
   4514  1.1  mrg   if (ds & BE_IN_DATA)
   4515  1.1  mrg     ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
   4516  1.1  mrg   if (ds & BEGIN_CONTROL)
   4517  1.1  mrg     ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
   4518  1.1  mrg   if (ds & BE_IN_CONTROL)
   4519  1.1  mrg     ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
   4520  1.1  mrg 
   4521  1.1  mrg   return ds;
   4522  1.1  mrg }
   4523  1.1  mrg 
   4524  1.1  mrg /* Dump information about the dependence status S.  */
   4525  1.1  mrg static void
   4526  1.1  mrg dump_ds (FILE *f, ds_t s)
   4527  1.1  mrg {
   4528  1.1  mrg   fprintf (f, "{");
   4529  1.1  mrg 
   4530  1.1  mrg   if (s & BEGIN_DATA)
   4531  1.1  mrg     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
   4532  1.1  mrg   if (s & BE_IN_DATA)
   4533  1.1  mrg     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
   4534  1.1  mrg   if (s & BEGIN_CONTROL)
   4535  1.1  mrg     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
   4536  1.1  mrg   if (s & BE_IN_CONTROL)
   4537  1.1  mrg     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
   4538  1.1  mrg 
   4539  1.1  mrg   if (s & HARD_DEP)
   4540  1.1  mrg     fprintf (f, "HARD_DEP; ");
   4541  1.1  mrg 
   4542  1.1  mrg   if (s & DEP_TRUE)
   4543  1.1  mrg     fprintf (f, "DEP_TRUE; ");
   4544  1.1  mrg   if (s & DEP_OUTPUT)
   4545  1.1  mrg     fprintf (f, "DEP_OUTPUT; ");
   4546  1.1  mrg   if (s & DEP_ANTI)
   4547  1.1  mrg     fprintf (f, "DEP_ANTI; ");
   4548  1.1  mrg   if (s & DEP_CONTROL)
   4549  1.1  mrg     fprintf (f, "DEP_CONTROL; ");
   4550  1.1  mrg 
   4551  1.1  mrg   fprintf (f, "}");
   4552  1.1  mrg }
   4553  1.1  mrg 
   4554  1.1  mrg DEBUG_FUNCTION void
   4555  1.1  mrg debug_ds (ds_t s)
   4556  1.1  mrg {
   4557  1.1  mrg   dump_ds (stderr, s);
   4558  1.1  mrg   fprintf (stderr, "\n");
   4559  1.1  mrg }
   4560  1.1  mrg 
   4561  1.1  mrg /* Verify that dependence type and status are consistent.
   4562  1.1  mrg    If RELAXED_P is true, then skip dep_weakness checks.  */
   4563  1.1  mrg static void
   4564  1.1  mrg check_dep (dep_t dep, bool relaxed_p)
   4565  1.1  mrg {
   4566  1.1  mrg   enum reg_note dt = DEP_TYPE (dep);
   4567  1.1  mrg   ds_t ds = DEP_STATUS (dep);
   4568  1.1  mrg 
   4569  1.1  mrg   gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
   4570  1.1  mrg 
   4571  1.1  mrg   if (!(current_sched_info->flags & USE_DEPS_LIST))
   4572  1.1  mrg     {
   4573  1.1  mrg       gcc_assert (ds == 0);
   4574  1.1  mrg       return;
   4575  1.1  mrg     }
   4576  1.1  mrg 
   4577  1.1  mrg   /* Check that dependence type contains the same bits as the status.  */
   4578  1.1  mrg   if (dt == REG_DEP_TRUE)
   4579  1.1  mrg     gcc_assert (ds & DEP_TRUE);
   4580  1.1  mrg   else if (dt == REG_DEP_OUTPUT)
   4581  1.1  mrg     gcc_assert ((ds & DEP_OUTPUT)
   4582  1.1  mrg 		&& !(ds & DEP_TRUE));
   4583  1.1  mrg   else if (dt == REG_DEP_ANTI)
   4584  1.1  mrg     gcc_assert ((ds & DEP_ANTI)
   4585  1.1  mrg 		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
   4586  1.1  mrg   else
   4587  1.1  mrg     gcc_assert (dt == REG_DEP_CONTROL
   4588  1.1  mrg 		&& (ds & DEP_CONTROL)
   4589  1.1  mrg 		&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
   4590  1.1  mrg 
   4591  1.1  mrg   /* HARD_DEP cannot appear in dep_status of a link.  */
   4592  1.1  mrg   gcc_assert (!(ds & HARD_DEP));
   4593  1.1  mrg 
   4594  1.1  mrg   /* Check that dependence status is set correctly when speculation is not
   4595  1.1  mrg      supported.  */
   4596  1.1  mrg   if (!sched_deps_info->generate_spec_deps)
   4597  1.1  mrg     gcc_assert (!(ds & SPECULATIVE));
   4598  1.1  mrg   else if (ds & SPECULATIVE)
   4599  1.1  mrg     {
   4600  1.1  mrg       if (!relaxed_p)
   4601  1.1  mrg 	{
   4602  1.1  mrg 	  ds_t type = FIRST_SPEC_TYPE;
   4603  1.1  mrg 
   4604  1.1  mrg 	  /* Check that dependence weakness is in proper range.  */
   4605  1.1  mrg 	  do
   4606  1.1  mrg 	    {
   4607  1.1  mrg 	      if (ds & type)
   4608  1.1  mrg 		get_dep_weak (ds, type);
   4609  1.1  mrg 
   4610  1.1  mrg 	      if (type == LAST_SPEC_TYPE)
   4611  1.1  mrg 		break;
   4612  1.1  mrg 	      type <<= SPEC_TYPE_SHIFT;
   4613  1.1  mrg 	    }
   4614  1.1  mrg 	  while (1);
   4615  1.1  mrg 	}
   4616  1.1  mrg 
   4617  1.1  mrg       if (ds & BEGIN_SPEC)
   4618  1.1  mrg 	{
   4619  1.1  mrg 	  /* Only true dependence can be data speculative.  */
   4620  1.1  mrg 	  if (ds & BEGIN_DATA)
   4621  1.1  mrg 	    gcc_assert (ds & DEP_TRUE);
   4622  1.1  mrg 
   4623  1.1  mrg 	  /* Control dependencies in the insn scheduler are represented by
   4624  1.1  mrg 	     anti-dependencies, therefore only anti dependence can be
   4625  1.1  mrg 	     control speculative.  */
   4626  1.1  mrg 	  if (ds & BEGIN_CONTROL)
   4627  1.1  mrg 	    gcc_assert (ds & DEP_ANTI);
   4628  1.1  mrg 	}
   4629  1.1  mrg       else
   4630  1.1  mrg 	{
   4631  1.1  mrg 	  /* Subsequent speculations should resolve true dependencies.  */
   4632  1.1  mrg 	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
   4633  1.1  mrg 	}
   4634  1.1  mrg 
   4635  1.1  mrg       /* Check that true and anti dependencies can't have other speculative
   4636  1.1  mrg 	 statuses.  */
   4637  1.1  mrg       if (ds & DEP_TRUE)
   4638  1.1  mrg 	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
   4639  1.1  mrg       /* An output dependence can't be speculative at all.  */
   4640  1.1  mrg       gcc_assert (!(ds & DEP_OUTPUT));
   4641  1.1  mrg       if (ds & DEP_ANTI)
   4642  1.1  mrg 	gcc_assert (ds & BEGIN_CONTROL);
   4643  1.1  mrg     }
   4644  1.1  mrg }
   4645  1.1  mrg 
   4646  1.1  mrg /* The following code discovers opportunities to switch a memory reference
   4647  1.1  mrg    and an increment by modifying the address.  We ensure that this is done
   4648  1.1  mrg    only for dependencies that are only used to show a single register
   4649  1.1  mrg    dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
   4650  1.1  mrg    instruction involved is subject to only one dep that can cause a pattern
   4651  1.1  mrg    change.
   4652  1.1  mrg 
   4653  1.1  mrg    When we discover a suitable dependency, we fill in the dep_replacement
   4654  1.1  mrg    structure to show how to modify the memory reference.  */
   4655  1.1  mrg 
   4656  1.1  mrg /* Holds information about a pair of memory reference and register increment
   4657  1.1  mrg    insns which depend on each other, but could possibly be interchanged.  */
   4658  1.1  mrg struct mem_inc_info
   4659  1.1  mrg {
   4660  1.1  mrg   rtx_insn *inc_insn;
   4661  1.1  mrg   rtx_insn *mem_insn;
   4662  1.1  mrg 
   4663  1.1  mrg   rtx *mem_loc;
   4664  1.1  mrg   /* A register occurring in the memory address for which we wish to break
   4665  1.1  mrg      the dependence.  This must be identical to the destination register of
   4666  1.1  mrg      the increment.  */
   4667  1.1  mrg   rtx mem_reg0;
   4668  1.1  mrg   /* Any kind of index that is added to that register.  */
   4669  1.1  mrg   rtx mem_index;
   4670  1.1  mrg   /* The constant offset used in the memory address.  */
   4671  1.1  mrg   HOST_WIDE_INT mem_constant;
   4672  1.1  mrg   /* The constant added in the increment insn.  Negated if the increment is
   4673  1.1  mrg      after the memory address.  */
   4674  1.1  mrg   HOST_WIDE_INT inc_constant;
   4675  1.1  mrg   /* The source register used in the increment.  May be different from mem_reg0
   4676  1.1  mrg      if the increment occurs before the memory address.  */
   4677  1.1  mrg   rtx inc_input;
   4678  1.1  mrg };
   4679  1.1  mrg 
   4680  1.1  mrg /* Verify that the memory location described in MII can be replaced with
   4681  1.1  mrg    one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
   4682  1.1  mrg    insn remains unchanged by this function.  */
   4683  1.1  mrg 
   4684  1.1  mrg static rtx
   4685  1.1  mrg attempt_change (struct mem_inc_info *mii, rtx new_addr)
   4686  1.1  mrg {
   4687  1.1  mrg   rtx mem = *mii->mem_loc;
   4688  1.1  mrg   rtx new_mem;
   4689  1.1  mrg 
   4690  1.1  mrg   if (!targetm.new_address_profitable_p (mem, mii->mem_insn, new_addr))
   4691  1.1  mrg     return NULL_RTX;
   4692  1.1  mrg 
   4693  1.1  mrg   /* Jump through a lot of hoops to keep the attributes up to date.  We
   4694  1.1  mrg      do not want to call one of the change address variants that take
   4695  1.1  mrg      an offset even though we know the offset in many cases.  These
   4696  1.1  mrg      assume you are changing where the address is pointing by the
   4697  1.1  mrg      offset.  */
   4698  1.1  mrg   new_mem = replace_equiv_address_nv (mem, new_addr);
   4699  1.1  mrg   if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
   4700  1.1  mrg     {
   4701  1.1  mrg       if (sched_verbose >= 5)
   4702  1.1  mrg 	fprintf (sched_dump, "validation failure\n");
   4703  1.1  mrg       return NULL_RTX;
   4704  1.1  mrg     }
   4705  1.1  mrg 
   4706  1.1  mrg   /* Put back the old one.  */
   4707  1.1  mrg   validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
   4708  1.1  mrg 
   4709  1.1  mrg   return new_mem;
   4710  1.1  mrg }
   4711  1.1  mrg 
   4712  1.1  mrg /* Return true if INSN is of a form "a = b op c" where a and b are
   4713  1.1  mrg    regs.  op is + if c is a reg and +|- if c is a const.  Fill in
   4714  1.1  mrg    informantion in MII about what is found.
   4715  1.1  mrg    BEFORE_MEM indicates whether the increment is found before or after
   4716  1.1  mrg    a corresponding memory reference.  */
   4717  1.1  mrg 
   4718  1.1  mrg static bool
   4719  1.1  mrg parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
   4720  1.1  mrg {
   4721  1.1  mrg   rtx pat = single_set (insn);
   4722  1.1  mrg   rtx src, cst;
   4723  1.1  mrg   bool regs_equal;
   4724  1.1  mrg 
   4725  1.1  mrg   if (RTX_FRAME_RELATED_P (insn) || !pat)
   4726  1.1  mrg     return false;
   4727  1.1  mrg 
   4728  1.1  mrg   /* Do not allow breaking data dependencies for insns that are marked
   4729  1.1  mrg      with REG_STACK_CHECK.  */
   4730  1.1  mrg   if (find_reg_note (insn, REG_STACK_CHECK, NULL))
   4731  1.1  mrg     return false;
   4732  1.1  mrg 
   4733  1.1  mrg   /* Result must be single reg.  */
   4734  1.1  mrg   if (!REG_P (SET_DEST (pat)))
   4735  1.1  mrg     return false;
   4736  1.1  mrg 
   4737  1.1  mrg   if (GET_CODE (SET_SRC (pat)) != PLUS)
   4738  1.1  mrg     return false;
   4739  1.1  mrg 
   4740  1.1  mrg   mii->inc_insn = insn;
   4741  1.1  mrg   src = SET_SRC (pat);
   4742  1.1  mrg   mii->inc_input = XEXP (src, 0);
   4743  1.1  mrg 
   4744  1.1  mrg   if (!REG_P (XEXP (src, 0)))
   4745  1.1  mrg     return false;
   4746  1.1  mrg 
   4747  1.1  mrg   if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
   4748  1.1  mrg     return false;
   4749  1.1  mrg 
   4750  1.1  mrg   cst = XEXP (src, 1);
   4751  1.1  mrg   if (!CONST_INT_P (cst))
   4752  1.1  mrg     return false;
   4753  1.1  mrg   mii->inc_constant = INTVAL (cst);
   4754  1.1  mrg 
   4755  1.1  mrg   regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
   4756  1.1  mrg 
   4757  1.1  mrg   if (!before_mem)
   4758  1.1  mrg     {
   4759  1.1  mrg       mii->inc_constant = -mii->inc_constant;
   4760  1.1  mrg       if (!regs_equal)
   4761  1.1  mrg 	return false;
   4762  1.1  mrg     }
   4763  1.1  mrg 
   4764  1.1  mrg   if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
   4765  1.1  mrg     {
   4766  1.1  mrg       /* Note that the sign has already been reversed for !before_mem.  */
   4767  1.1  mrg       if (STACK_GROWS_DOWNWARD)
   4768  1.1  mrg 	return mii->inc_constant > 0;
   4769  1.1  mrg       else
   4770  1.1  mrg 	return mii->inc_constant < 0;
   4771  1.1  mrg     }
   4772  1.1  mrg   return true;
   4773  1.1  mrg }
   4774  1.1  mrg 
   4775  1.1  mrg /* Once a suitable mem reference has been found and the corresponding data
   4776  1.1  mrg    in MII has been filled in, this function is called to find a suitable
   4777  1.1  mrg    add or inc insn involving the register we found in the memory
   4778  1.1  mrg    reference.  */
   4779  1.1  mrg 
   4780  1.1  mrg static bool
   4781  1.1  mrg find_inc (struct mem_inc_info *mii, bool backwards)
   4782  1.1  mrg {
   4783  1.1  mrg   sd_iterator_def sd_it;
   4784  1.1  mrg   dep_t dep;
   4785  1.1  mrg 
   4786  1.1  mrg   sd_it = sd_iterator_start (mii->mem_insn,
   4787  1.1  mrg 			     backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
   4788  1.1  mrg   while (sd_iterator_cond (&sd_it, &dep))
   4789  1.1  mrg     {
   4790  1.1  mrg       dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
   4791  1.1  mrg       rtx_insn *pro = DEP_PRO (dep);
   4792  1.1  mrg       rtx_insn *con = DEP_CON (dep);
   4793  1.1  mrg       rtx_insn *inc_cand = backwards ? pro : con;
   4794  1.1  mrg       if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
   4795  1.1  mrg 	goto next;
   4796  1.1  mrg       if (parse_add_or_inc (mii, inc_cand, backwards))
   4797  1.1  mrg 	{
   4798  1.1  mrg 	  struct dep_replacement *desc;
   4799  1.1  mrg 	  df_ref def;
   4800  1.1  mrg 	  rtx newaddr, newmem;
   4801  1.1  mrg 
   4802  1.1  mrg 	  if (sched_verbose >= 5)
   4803  1.1  mrg 	    fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
   4804  1.1  mrg 		     INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
   4805  1.1  mrg 
   4806  1.1  mrg 	  /* Need to assure that none of the operands of the inc
   4807  1.1  mrg 	     instruction are assigned to by the mem insn.  */
   4808  1.1  mrg 	  FOR_EACH_INSN_DEF (def, mii->mem_insn)
   4809  1.1  mrg 	    if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
   4810  1.1  mrg 		|| reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
   4811  1.1  mrg 	      {
   4812  1.1  mrg 		if (sched_verbose >= 5)
   4813  1.1  mrg 		  fprintf (sched_dump,
   4814  1.1  mrg 			   "inc conflicts with store failure.\n");
   4815  1.1  mrg 		goto next;
   4816  1.1  mrg 	      }
   4817  1.1  mrg 
   4818  1.1  mrg 	  newaddr = mii->inc_input;
   4819  1.1  mrg 	  if (mii->mem_index != NULL_RTX)
   4820  1.1  mrg 	    newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
   4821  1.1  mrg 				    mii->mem_index);
   4822  1.1  mrg 	  newaddr = plus_constant (GET_MODE (newaddr), newaddr,
   4823  1.1  mrg 				   mii->mem_constant + mii->inc_constant);
   4824  1.1  mrg 	  newmem = attempt_change (mii, newaddr);
   4825  1.1  mrg 	  if (newmem == NULL_RTX)
   4826  1.1  mrg 	    goto next;
   4827  1.1  mrg 	  if (sched_verbose >= 5)
   4828  1.1  mrg 	    fprintf (sched_dump, "successful address replacement\n");
   4829  1.1  mrg 	  desc = XCNEW (struct dep_replacement);
   4830  1.1  mrg 	  DEP_REPLACE (dep) = desc;
   4831  1.1  mrg 	  desc->loc = mii->mem_loc;
   4832  1.1  mrg 	  desc->newval = newmem;
   4833  1.1  mrg 	  desc->orig = *desc->loc;
   4834  1.1  mrg 	  desc->insn = mii->mem_insn;
   4835  1.1  mrg 	  move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
   4836  1.1  mrg 			 INSN_SPEC_BACK_DEPS (con));
   4837  1.1  mrg 	  if (backwards)
   4838  1.1  mrg 	    {
   4839  1.1  mrg 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
   4840  1.1  mrg 		add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
   4841  1.1  mrg 				  REG_DEP_TRUE);
   4842  1.1  mrg 	    }
   4843  1.1  mrg 	  else
   4844  1.1  mrg 	    {
   4845  1.1  mrg 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
   4846  1.1  mrg 		add_dependence_1 (DEP_CON (dep), mii->mem_insn,
   4847  1.1  mrg 				  REG_DEP_ANTI);
   4848  1.1  mrg 	    }
   4849  1.1  mrg 	  return true;
   4850  1.1  mrg 	}
   4851  1.1  mrg     next:
   4852  1.1  mrg       sd_iterator_next (&sd_it);
   4853  1.1  mrg     }
   4854  1.1  mrg   return false;
   4855  1.1  mrg }
   4856  1.1  mrg 
   4857  1.1  mrg /* A recursive function that walks ADDRESS_OF_X to find memory references
   4858  1.1  mrg    which could be modified during scheduling.  We call find_inc for each
   4859  1.1  mrg    one we find that has a recognizable form.  MII holds information about
   4860  1.1  mrg    the pair of memory/increment instructions.
   4861  1.1  mrg    We ensure that every instruction with a memory reference (which will be
   4862  1.1  mrg    the location of the replacement) is assigned at most one breakable
   4863  1.1  mrg    dependency.  */
   4864  1.1  mrg 
   4865  1.1  mrg static bool
   4866  1.1  mrg find_mem (struct mem_inc_info *mii, rtx *address_of_x)
   4867  1.1  mrg {
   4868  1.1  mrg   rtx x = *address_of_x;
   4869  1.1  mrg   enum rtx_code code = GET_CODE (x);
   4870  1.1  mrg   const char *const fmt = GET_RTX_FORMAT (code);
   4871  1.1  mrg   int i;
   4872  1.1  mrg 
   4873  1.1  mrg   if (code == MEM)
   4874  1.1  mrg     {
   4875  1.1  mrg       rtx reg0 = XEXP (x, 0);
   4876  1.1  mrg 
   4877  1.1  mrg       mii->mem_loc = address_of_x;
   4878  1.1  mrg       mii->mem_index = NULL_RTX;
   4879  1.1  mrg       mii->mem_constant = 0;
   4880  1.1  mrg       if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
   4881  1.1  mrg 	{
   4882  1.1  mrg 	  mii->mem_constant = INTVAL (XEXP (reg0, 1));
   4883  1.1  mrg 	  reg0 = XEXP (reg0, 0);
   4884  1.1  mrg 	}
   4885  1.1  mrg       if (GET_CODE (reg0) == PLUS)
   4886  1.1  mrg 	{
   4887  1.1  mrg 	  mii->mem_index = XEXP (reg0, 1);
   4888  1.1  mrg 	  reg0 = XEXP (reg0, 0);
   4889  1.1  mrg 	}
   4890  1.1  mrg       if (REG_P (reg0))
   4891  1.1  mrg 	{
   4892  1.1  mrg 	  df_ref use;
   4893  1.1  mrg 	  int occurrences = 0;
   4894  1.1  mrg 
   4895  1.1  mrg 	  /* Make sure this reg appears only once in this insn.  Can't use
   4896  1.1  mrg 	     count_occurrences since that only works for pseudos.  */
   4897  1.1  mrg 	  FOR_EACH_INSN_USE (use, mii->mem_insn)
   4898  1.1  mrg 	    if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
   4899  1.1  mrg 	      if (++occurrences > 1)
   4900  1.1  mrg 		{
   4901  1.1  mrg 		  if (sched_verbose >= 5)
   4902  1.1  mrg 		    fprintf (sched_dump, "mem count failure\n");
   4903  1.1  mrg 		  return false;
   4904  1.1  mrg 		}
   4905  1.1  mrg 
   4906  1.1  mrg 	  mii->mem_reg0 = reg0;
   4907  1.1  mrg 	  return find_inc (mii, true) || find_inc (mii, false);
   4908  1.1  mrg 	}
   4909  1.1  mrg       return false;
   4910  1.1  mrg     }
   4911  1.1  mrg 
   4912  1.1  mrg   if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
   4913  1.1  mrg     {
   4914  1.1  mrg       /* If REG occurs inside a MEM used in a bit-field reference,
   4915  1.1  mrg 	 that is unacceptable.  */
   4916  1.1  mrg       return false;
   4917  1.1  mrg     }
   4918  1.1  mrg 
   4919  1.1  mrg   /* Time for some deep diving.  */
   4920  1.1  mrg   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
   4921  1.1  mrg     {
   4922  1.1  mrg       if (fmt[i] == 'e')
   4923  1.1  mrg 	{
   4924  1.1  mrg 	  if (find_mem (mii, &XEXP (x, i)))
   4925  1.1  mrg 	    return true;
   4926  1.1  mrg 	}
   4927  1.1  mrg       else if (fmt[i] == 'E')
   4928  1.1  mrg 	{
   4929  1.1  mrg 	  int j;
   4930  1.1  mrg 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
   4931  1.1  mrg 	    if (find_mem (mii, &XVECEXP (x, i, j)))
   4932  1.1  mrg 	      return true;
   4933  1.1  mrg 	}
   4934  1.1  mrg     }
   4935  1.1  mrg   return false;
   4936  1.1  mrg }
   4937  1.1  mrg 
   4938  1.1  mrg 
   4939  1.1  mrg /* Examine the instructions between HEAD and TAIL and try to find
   4940  1.1  mrg    dependencies that can be broken by modifying one of the patterns.  */
   4941  1.1  mrg 
   4942  1.1  mrg void
   4943  1.1  mrg find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
   4944  1.1  mrg {
   4945  1.1  mrg   rtx_insn *insn, *next_tail = NEXT_INSN (tail);
   4946  1.1  mrg   int success_in_block = 0;
   4947  1.1  mrg 
   4948  1.1  mrg   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
   4949  1.1  mrg     {
   4950  1.1  mrg       struct mem_inc_info mii;
   4951  1.1  mrg 
   4952  1.1  mrg       if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
   4953  1.1  mrg 	continue;
   4954  1.1  mrg 
   4955                 mii.mem_insn = insn;
   4956                 if (find_mem (&mii, &PATTERN (insn)))
   4957           	success_in_block++;
   4958               }
   4959             if (success_in_block && sched_verbose >= 5)
   4960               fprintf (sched_dump, "%d candidates for address modification found.\n",
   4961           	     success_in_block);
   4962           }
   4963           
   4964           #endif /* INSN_SCHEDULING */
   4965