Home | History | Annotate | Line # | Download | only in gcc
      1  1.1  mrg /* Instruction scheduling pass.  Selective scheduler and pipeliner.
      2  1.1  mrg    Copyright (C) 2006-2022 Free Software Foundation, Inc.
      3  1.1  mrg 
      4  1.1  mrg This file is part of GCC.
      5  1.1  mrg 
      6  1.1  mrg GCC is free software; you can redistribute it and/or modify it under
      7  1.1  mrg the terms of the GNU General Public License as published by the Free
      8  1.1  mrg Software Foundation; either version 3, or (at your option) any later
      9  1.1  mrg version.
     10  1.1  mrg 
     11  1.1  mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
     12  1.1  mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
     13  1.1  mrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     14  1.1  mrg for more details.
     15  1.1  mrg 
     16  1.1  mrg You should have received a copy of the GNU General Public License
     17  1.1  mrg along with GCC; see the file COPYING3.  If not see
     18  1.1  mrg <http://www.gnu.org/licenses/>.  */
     19  1.1  mrg 
     20  1.1  mrg #include "config.h"
     21  1.1  mrg #include "system.h"
     22  1.1  mrg #include "coretypes.h"
     23  1.1  mrg #include "backend.h"
     24  1.1  mrg #include "tree.h"
     25  1.1  mrg #include "rtl.h"
     26  1.1  mrg #include "df.h"
     27  1.1  mrg #include "memmodel.h"
     28  1.1  mrg #include "tm_p.h"
     29  1.1  mrg #include "regs.h"
     30  1.1  mrg #include "cfgbuild.h"
     31  1.1  mrg #include "cfgcleanup.h"
     32  1.1  mrg #include "insn-config.h"
     33  1.1  mrg #include "insn-attr.h"
     34  1.1  mrg #include "target.h"
     35  1.1  mrg #include "sched-int.h"
     36  1.1  mrg #include "rtlhooks-def.h"
     37  1.1  mrg #include "ira.h"
     38  1.1  mrg #include "ira-int.h"
     39  1.1  mrg #include "rtl-iter.h"
     40  1.1  mrg 
     41  1.1  mrg #ifdef INSN_SCHEDULING
     42  1.1  mrg #include "regset.h"
     43  1.1  mrg #include "cfgloop.h"
     44  1.1  mrg #include "sel-sched-ir.h"
     45  1.1  mrg #include "sel-sched-dump.h"
     46  1.1  mrg #include "sel-sched.h"
     47  1.1  mrg #include "dbgcnt.h"
     48  1.1  mrg #include "function-abi.h"
     49  1.1  mrg 
     50  1.1  mrg /* Implementation of selective scheduling approach.
     51  1.1  mrg    The below implementation follows the original approach with the following
     52  1.1  mrg    changes:
     53  1.1  mrg 
     54  1.1  mrg    o the scheduler works after register allocation (but can be also tuned
     55  1.1  mrg    to work before RA);
     56  1.1  mrg    o some instructions are not copied or register renamed;
     57  1.1  mrg    o conditional jumps are not moved with code duplication;
     58  1.1  mrg    o several jumps in one parallel group are not supported;
     59  1.1  mrg    o when pipelining outer loops, code motion through inner loops
     60  1.1  mrg    is not supported;
     61  1.1  mrg    o control and data speculation are supported;
     62  1.1  mrg    o some improvements for better compile time/performance were made.
     63  1.1  mrg 
     64  1.1  mrg    Terminology
     65  1.1  mrg    ===========
     66  1.1  mrg 
     67  1.1  mrg    A vinsn, or virtual insn, is an insn with additional data characterizing
     68  1.1  mrg    insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc.
     69  1.1  mrg    Vinsns also act as smart pointers to save memory by reusing them in
     70  1.1  mrg    different expressions.  A vinsn is described by vinsn_t type.
     71  1.1  mrg 
     72  1.1  mrg    An expression is a vinsn with additional data characterizing its properties
     73  1.1  mrg    at some point in the control flow graph.  The data may be its usefulness,
     74  1.1  mrg    priority, speculative status, whether it was renamed/subsituted, etc.
     75  1.1  mrg    An expression is described by expr_t type.
     76  1.1  mrg 
     77  1.1  mrg    Availability set (av_set) is a set of expressions at a given control flow
     78  1.1  mrg    point. It is represented as av_set_t.  The expressions in av sets are kept
     79  1.1  mrg    sorted in the terms of expr_greater_p function.  It allows to truncate
     80  1.1  mrg    the set while leaving the best expressions.
     81  1.1  mrg 
     82  1.1  mrg    A fence is a point through which code motion is prohibited.  On each step,
     83  1.1  mrg    we gather a parallel group of insns at a fence.  It is possible to have
     84  1.1  mrg    multiple fences. A fence is represented via fence_t.
     85  1.1  mrg 
     86  1.1  mrg    A boundary is the border between the fence group and the rest of the code.
     87  1.1  mrg    Currently, we never have more than one boundary per fence, as we finalize
     88  1.1  mrg    the fence group when a jump is scheduled. A boundary is represented
     89  1.1  mrg    via bnd_t.
     90  1.1  mrg 
     91  1.1  mrg    High-level overview
     92  1.1  mrg    ===================
     93  1.1  mrg 
     94  1.1  mrg    The scheduler finds regions to schedule, schedules each one, and finalizes.
     95  1.1  mrg    The regions are formed starting from innermost loops, so that when the inner
     96  1.1  mrg    loop is pipelined, its prologue can be scheduled together with yet unprocessed
     97  1.1  mrg    outer loop. The rest of acyclic regions are found using extend_rgns:
     98  1.1  mrg    the blocks that are not yet allocated to any regions are traversed in top-down
     99  1.1  mrg    order, and a block is added to a region to which all its predecessors belong;
    100  1.1  mrg    otherwise, the block starts its own region.
    101  1.1  mrg 
    102  1.1  mrg    The main scheduling loop (sel_sched_region_2) consists of just
    103  1.1  mrg    scheduling on each fence and updating fences.  For each fence,
    104  1.1  mrg    we fill a parallel group of insns (fill_insns) until some insns can be added.
    105  1.1  mrg    First, we compute available exprs (av-set) at the boundary of the current
    106  1.1  mrg    group.  Second, we choose the best expression from it.  If the stall is
    107  1.1  mrg    required to schedule any of the expressions, we advance the current cycle
    108  1.1  mrg    appropriately.  So, the final group does not exactly correspond to a VLIW
    109  1.1  mrg    word.  Third, we move the chosen expression to the boundary (move_op)
    110  1.1  mrg    and update the intermediate av sets and liveness sets.  We quit fill_insns
    111  1.1  mrg    when either no insns left for scheduling or we have scheduled enough insns
    112  1.1  mrg    so we feel like advancing a scheduling point.
    113  1.1  mrg 
    114  1.1  mrg    Computing available expressions
    115  1.1  mrg    ===============================
    116  1.1  mrg 
    117  1.1  mrg    The computation (compute_av_set) is a bottom-up traversal.  At each insn,
    118  1.1  mrg    we're moving the union of its successors' sets through it via
    119  1.1  mrg    moveup_expr_set.  The dependent expressions are removed.  Local
    120  1.1  mrg    transformations (substitution, speculation) are applied to move more
    121  1.1  mrg    exprs.  Then the expr corresponding to the current insn is added.
    122  1.1  mrg    The result is saved on each basic block header.
    123  1.1  mrg 
    124  1.1  mrg    When traversing the CFG, we're moving down for no more than max_ws insns.
    125  1.1  mrg    Also, we do not move down to ineligible successors (is_ineligible_successor),
    126  1.1  mrg    which include moving along a back-edge, moving to already scheduled code,
    127  1.1  mrg    and moving to another fence.  The first two restrictions are lifted during
    128  1.1  mrg    pipelining, which allows us to move insns along a back-edge.  We always have
    129  1.1  mrg    an acyclic region for scheduling because we forbid motion through fences.
    130  1.1  mrg 
    131  1.1  mrg    Choosing the best expression
    132  1.1  mrg    ============================
    133  1.1  mrg 
    134  1.1  mrg    We sort the final availability set via sel_rank_for_schedule, then we remove
    135  1.1  mrg    expressions which are not yet ready (tick_check_p) or which dest registers
    136  1.1  mrg    cannot be used.  For some of them, we choose another register via
    137  1.1  mrg    find_best_reg.  To do this, we run find_used_regs to calculate the set of
    138  1.1  mrg    registers which cannot be used.  The find_used_regs function performs
    139  1.1  mrg    a traversal of code motion paths for an expr.  We consider for renaming
    140  1.1  mrg    only registers which are from the same regclass as the original one and
    141  1.1  mrg    using which does not interfere with any live ranges.  Finally, we convert
    142  1.1  mrg    the resulting set to the ready list format and use max_issue and reorder*
    143  1.1  mrg    hooks similarly to the Haifa scheduler.
    144  1.1  mrg 
    145  1.1  mrg    Scheduling the best expression
    146  1.1  mrg    ==============================
    147  1.1  mrg 
    148  1.1  mrg    We run the move_op routine to perform the same type of code motion paths
    149  1.1  mrg    traversal as in find_used_regs.  (These are working via the same driver,
    150  1.1  mrg    code_motion_path_driver.)  When moving down the CFG, we look for original
    151  1.1  mrg    instruction that gave birth to a chosen expression.  We undo
    152  1.1  mrg    the transformations performed on an expression via the history saved in it.
    153  1.1  mrg    When found, we remove the instruction or leave a reg-reg copy/speculation
    154  1.1  mrg    check if needed.  On a way up, we insert bookkeeping copies at each join
    155  1.1  mrg    point.  If a copy is not needed, it will be removed later during this
    156  1.1  mrg    traversal.  We update the saved av sets and liveness sets on the way up, too.
    157  1.1  mrg 
    158  1.1  mrg    Finalizing the schedule
    159  1.1  mrg    =======================
    160  1.1  mrg 
    161  1.1  mrg    When pipelining, we reschedule the blocks from which insns were pipelined
    162  1.1  mrg    to get a tighter schedule.  On Itanium, we also perform bundling via
    163  1.1  mrg    the same routine from ia64.cc.
    164  1.1  mrg 
    165  1.1  mrg    Dependence analysis changes
    166  1.1  mrg    ===========================
    167  1.1  mrg 
    168  1.1  mrg    We augmented the sched-deps.cc with hooks that get called when a particular
    169  1.1  mrg    dependence is found in a particular part of an insn.  Using these hooks, we
    170  1.1  mrg    can do several actions such as: determine whether an insn can be moved through
    171  1.1  mrg    another (has_dependence_p, moveup_expr); find out whether an insn can be
    172  1.1  mrg    scheduled on the current cycle (tick_check_p); find out registers that
    173  1.1  mrg    are set/used/clobbered by an insn and find out all the strange stuff that
    174  1.1  mrg    restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in
    175  1.1  mrg    init_global_and_expr_for_insn).
    176  1.1  mrg 
    177  1.1  mrg    Initialization changes
    178  1.1  mrg    ======================
    179  1.1  mrg 
    180  1.1  mrg    There are parts of haifa-sched.cc, sched-deps.cc, and sched-rgn.cc that are
    181  1.1  mrg    reused in all of the schedulers.  We have split up the initialization of data
    182  1.1  mrg    of such parts into different functions prefixed with scheduler type and
    183  1.1  mrg    postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish},
    184  1.1  mrg    sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc.
    185  1.1  mrg    The same splitting is done with current_sched_info structure:
    186  1.1  mrg    dependence-related parts are in sched_deps_info, common part is in
    187  1.1  mrg    common_sched_info, and haifa/sel/etc part is in current_sched_info.
    188  1.1  mrg 
    189  1.1  mrg    Target contexts
    190  1.1  mrg    ===============
    191  1.1  mrg 
    192  1.1  mrg    As we now have multiple-point scheduling, this would not work with backends
    193  1.1  mrg    which save some of the scheduler state to use it in the target hooks.
    194  1.1  mrg    For this purpose, we introduce a concept of target contexts, which
    195  1.1  mrg    encapsulate such information.  The backend should implement simple routines
    196  1.1  mrg    of allocating/freeing/setting such a context.  The scheduler calls these
    197  1.1  mrg    as target hooks and handles the target context as an opaque pointer (similar
    198  1.1  mrg    to the DFA state type, state_t).
    199  1.1  mrg 
    200  1.1  mrg    Various speedups
    201  1.1  mrg    ================
    202  1.1  mrg 
    203  1.1  mrg    As the correct data dependence graph is not supported during scheduling (which
    204  1.1  mrg    is to be changed in mid-term), we cache as much of the dependence analysis
    205  1.1  mrg    results as possible to avoid reanalyzing.  This includes: bitmap caches on
    206  1.1  mrg    each insn in stream of the region saying yes/no for a query with a pair of
    207  1.1  mrg    UIDs; hashtables with the previously done transformations on each insn in
    208  1.1  mrg    stream; a vector keeping a history of transformations on each expr.
    209  1.1  mrg 
    210  1.1  mrg    Also, we try to minimize the dependence context used on each fence to check
    211  1.1  mrg    whether the given expression is ready for scheduling by removing from it
    212  1.1  mrg    insns that are definitely completed the execution.  The results of
    213  1.1  mrg    tick_check_p checks are also cached in a vector on each fence.
    214  1.1  mrg 
    215  1.1  mrg    We keep a valid liveness set on each insn in a region to avoid the high
    216  1.1  mrg    cost of recomputation on large basic blocks.
    217  1.1  mrg 
    218  1.1  mrg    Finally, we try to minimize the number of needed updates to the availability
    219  1.1  mrg    sets.  The updates happen in two cases: when fill_insns terminates,
    220  1.1  mrg    we advance all fences and increase the stage number to show that the region
    221  1.1  mrg    has changed and the sets are to be recomputed; and when the next iteration
    222  1.1  mrg    of a loop in fill_insns happens (but this one reuses the saved av sets
    223  1.1  mrg    on bb headers.)  Thus, we try to break the fill_insns loop only when
    224  1.1  mrg    "significant" number of insns from the current scheduling window was
    225  1.1  mrg    scheduled.  This should be made a target param.
    226  1.1  mrg 
    227  1.1  mrg 
    228  1.1  mrg    TODO: correctly support the data dependence graph at all stages and get rid
    229  1.1  mrg    of all caches.  This should speed up the scheduler.
    230  1.1  mrg    TODO: implement moving cond jumps with bookkeeping copies on both targets.
    231  1.1  mrg    TODO: tune the scheduler before RA so it does not create too much pseudos.
    232  1.1  mrg 
    233  1.1  mrg 
    234  1.1  mrg    References:
    235  1.1  mrg    S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with
    236  1.1  mrg    selective scheduling and software pipelining.
    237  1.1  mrg    ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997.
    238  1.1  mrg 
    239  1.1  mrg    Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik,
    240  1.1  mrg    and Dmitry Zhurikhin.  An interblock VLIW-targeted instruction scheduler
    241  1.1  mrg    for GCC. In Proceedings of GCC Developers' Summit 2006.
    242  1.1  mrg 
    243  1.1  mrg    Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik.  GCC Instruction
    244  1.1  mrg    Scheduler and Software Pipeliner on the Itanium Platform.   EPIC-7 Workshop.
    245  1.1  mrg    http://rogue.colorado.edu/EPIC7/.
    246  1.1  mrg 
    247  1.1  mrg */
    248  1.1  mrg 
    249  1.1  mrg /* True when pipelining is enabled.  */
    250  1.1  mrg bool pipelining_p;
    251  1.1  mrg 
    252  1.1  mrg /* True if bookkeeping is enabled.  */
    253  1.1  mrg bool bookkeeping_p;
    254  1.1  mrg 
    255  1.1  mrg /* Maximum number of insns that are eligible for renaming.  */
    256  1.1  mrg int max_insns_to_rename;
    257  1.1  mrg 
    258  1.1  mrg 
    260  1.1  mrg /* Definitions of local types and macros.  */
    261  1.1  mrg 
    262  1.1  mrg /* Represents possible outcomes of moving an expression through an insn.  */
    263  1.1  mrg enum MOVEUP_EXPR_CODE
    264  1.1  mrg   {
    265  1.1  mrg     /* The expression is not changed.  */
    266  1.1  mrg     MOVEUP_EXPR_SAME,
    267  1.1  mrg 
    268  1.1  mrg     /* Not changed, but requires a new destination register.  */
    269  1.1  mrg     MOVEUP_EXPR_AS_RHS,
    270  1.1  mrg 
    271  1.1  mrg     /* Cannot be moved.  */
    272  1.1  mrg     MOVEUP_EXPR_NULL,
    273  1.1  mrg 
    274  1.1  mrg     /* Changed (substituted or speculated).  */
    275  1.1  mrg     MOVEUP_EXPR_CHANGED
    276  1.1  mrg   };
    277  1.1  mrg 
    278  1.1  mrg /* The container to be passed into rtx search & replace functions.  */
    279  1.1  mrg struct rtx_search_arg
    280  1.1  mrg {
    281  1.1  mrg   /* What we are searching for.  */
    282  1.1  mrg   rtx x;
    283  1.1  mrg 
    284  1.1  mrg   /* The occurrence counter.  */
    285  1.1  mrg   int n;
    286  1.1  mrg };
    287  1.1  mrg 
    288  1.1  mrg typedef struct rtx_search_arg *rtx_search_arg_p;
    289  1.1  mrg 
    290  1.1  mrg /* This struct contains precomputed hard reg sets that are needed when
    291  1.1  mrg    computing registers available for renaming.  */
    292  1.1  mrg struct hard_regs_data
    293  1.1  mrg {
    294  1.1  mrg   /* For every mode, this stores registers available for use with
    295  1.1  mrg      that mode.  */
    296  1.1  mrg   HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES];
    297  1.1  mrg 
    298  1.1  mrg   /* True when regs_for_mode[mode] is initialized.  */
    299  1.1  mrg   bool regs_for_mode_ok[NUM_MACHINE_MODES];
    300  1.1  mrg 
    301  1.1  mrg   /* For every register, it has regs that are ok to rename into it.
    302  1.1  mrg      The register in question is always set.  If not, this means
    303  1.1  mrg      that the whole set is not computed yet.  */
    304  1.1  mrg   HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER];
    305  1.1  mrg 
    306  1.1  mrg   /* All registers that are used or call used.  */
    307  1.1  mrg   HARD_REG_SET regs_ever_used;
    308  1.1  mrg 
    309  1.1  mrg #ifdef STACK_REGS
    310  1.1  mrg   /* Stack registers.  */
    311  1.1  mrg   HARD_REG_SET stack_regs;
    312  1.1  mrg #endif
    313  1.1  mrg };
    314  1.1  mrg 
    315  1.1  mrg /* Holds the results of computation of available for renaming and
    316  1.1  mrg    unavailable hard registers.  */
    317  1.1  mrg struct reg_rename
    318  1.1  mrg {
    319  1.1  mrg   /* These are unavailable due to calls crossing, globalness, etc.  */
    320  1.1  mrg   HARD_REG_SET unavailable_hard_regs;
    321  1.1  mrg 
    322  1.1  mrg   /* These are *available* for renaming.  */
    323  1.1  mrg   HARD_REG_SET available_for_renaming;
    324  1.1  mrg 
    325  1.1  mrg   /* The set of ABIs used by calls that the code motion path crosses.  */
    326  1.1  mrg   unsigned int crossed_call_abis : NUM_ABI_IDS;
    327  1.1  mrg };
    328  1.1  mrg 
    329  1.1  mrg /* A global structure that contains the needed information about harg
    330  1.1  mrg    regs.  */
    331  1.1  mrg static struct hard_regs_data sel_hrd;
    332  1.1  mrg 
    333  1.1  mrg 
    335  1.1  mrg /* This structure holds local data used in code_motion_path_driver hooks on
    336  1.1  mrg    the same or adjacent levels of recursion.  Here we keep those parameters
    337  1.1  mrg    that are not used in code_motion_path_driver routine itself, but only in
    338  1.1  mrg    its hooks.  Moreover, all parameters that can be modified in hooks are
    339  1.1  mrg    in this structure, so all other parameters passed explicitly to hooks are
    340  1.1  mrg    read-only.  */
    341  1.1  mrg struct cmpd_local_params
    342  1.1  mrg {
    343  1.1  mrg   /* Local params used in move_op_* functions.  */
    344  1.1  mrg 
    345  1.1  mrg   /* Edges for bookkeeping generation.  */
    346  1.1  mrg   edge e1, e2;
    347  1.1  mrg 
    348  1.1  mrg   /* C_EXPR merged from all successors and locally allocated temporary C_EXPR.  */
    349  1.1  mrg   expr_t c_expr_merged, c_expr_local;
    350  1.1  mrg 
    351  1.1  mrg   /* Local params used in fur_* functions.  */
    352  1.1  mrg   /* Copy of the ORIGINAL_INSN list, stores the original insns already
    353  1.1  mrg      found before entering the current level of code_motion_path_driver.  */
    354  1.1  mrg   def_list_t old_original_insns;
    355  1.1  mrg 
    356  1.1  mrg   /* Local params used in move_op_* functions.  */
    357  1.1  mrg   /* True when we have removed last insn in the block which was
    358  1.1  mrg      also a boundary.  Do not update anything or create bookkeeping copies.  */
    359  1.1  mrg   BOOL_BITFIELD removed_last_insn : 1;
    360  1.1  mrg };
    361  1.1  mrg 
    362  1.1  mrg /* Stores the static parameters for move_op_* calls.  */
    363  1.1  mrg struct moveop_static_params
    364  1.1  mrg {
    365  1.1  mrg   /* Destination register.  */
    366  1.1  mrg   rtx dest;
    367  1.1  mrg 
    368  1.1  mrg   /* Current C_EXPR.  */
    369  1.1  mrg   expr_t c_expr;
    370  1.1  mrg 
    371  1.1  mrg   /* An UID of expr_vliw which is to be moved up.  If we find other exprs,
    372  1.1  mrg      they are to be removed.  */
    373  1.1  mrg   int uid;
    374  1.1  mrg 
    375  1.1  mrg   /* This is initialized to the insn on which the driver stopped its traversal.  */
    376  1.1  mrg   insn_t failed_insn;
    377  1.1  mrg 
    378  1.1  mrg   /* True if we scheduled an insn with different register.  */
    379  1.1  mrg   bool was_renamed;
    380  1.1  mrg };
    381  1.1  mrg 
    382  1.1  mrg /* Stores the static parameters for fur_* calls.  */
    383  1.1  mrg struct fur_static_params
    384  1.1  mrg {
    385  1.1  mrg   /* Set of registers unavailable on the code motion path.  */
    386  1.1  mrg   regset used_regs;
    387  1.1  mrg 
    388  1.1  mrg   /* Pointer to the list of original insns definitions.  */
    389  1.1  mrg   def_list_t *original_insns;
    390  1.1  mrg 
    391  1.1  mrg   /* The set of ABIs used by calls that the code motion path crosses.  */
    392  1.1  mrg   unsigned int crossed_call_abis : NUM_ABI_IDS;
    393  1.1  mrg };
    394  1.1  mrg 
    395  1.1  mrg typedef struct fur_static_params *fur_static_params_p;
    396  1.1  mrg typedef struct cmpd_local_params *cmpd_local_params_p;
    397  1.1  mrg typedef struct moveop_static_params *moveop_static_params_p;
    398  1.1  mrg 
    399  1.1  mrg /* Set of hooks and parameters that determine behavior specific to
    400  1.1  mrg    move_op or find_used_regs functions.  */
    401  1.1  mrg struct code_motion_path_driver_info_def
    402  1.1  mrg {
    403  1.1  mrg   /* Called on enter to the basic block.  */
    404  1.1  mrg   int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool);
    405  1.1  mrg 
    406  1.1  mrg   /* Called when original expr is found.  */
    407  1.1  mrg   void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *);
    408  1.1  mrg 
    409  1.1  mrg   /* Called while descending current basic block if current insn is not
    410  1.1  mrg      the original EXPR we're searching for.  */
    411  1.1  mrg   bool (*orig_expr_not_found) (insn_t, av_set_t, void *);
    412  1.1  mrg 
    413  1.1  mrg   /* Function to merge C_EXPRes from different successors.  */
    414  1.1  mrg   void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *);
    415  1.1  mrg 
    416  1.1  mrg   /* Function to finalize merge from different successors and possibly
    417  1.1  mrg      deallocate temporary data structures used for merging.  */
    418  1.1  mrg   void (*after_merge_succs) (cmpd_local_params_p, void *);
    419  1.1  mrg 
    420  1.1  mrg   /* Called on the backward stage of recursion to do moveup_expr.
    421  1.1  mrg      Used only with move_op_*.  */
    422  1.1  mrg   void (*ascend) (insn_t, void *);
    423  1.1  mrg 
    424  1.1  mrg   /* Called on the ascending pass, before returning from the current basic
    425  1.1  mrg      block or from the whole traversal.  */
    426  1.1  mrg   void (*at_first_insn) (insn_t, cmpd_local_params_p, void *);
    427  1.1  mrg 
    428  1.1  mrg   /* When processing successors in move_op we need only descend into
    429  1.1  mrg      SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL.  */
    430  1.1  mrg   int succ_flags;
    431  1.1  mrg 
    432  1.1  mrg   /* The routine name to print in dumps ("move_op" of "find_used_regs").  */
    433  1.1  mrg   const char *routine_name;
    434  1.1  mrg };
    435  1.1  mrg 
    436  1.1  mrg /* Global pointer to current hooks, either points to MOVE_OP_HOOKS or
    437  1.1  mrg    FUR_HOOKS.  */
    438  1.1  mrg struct code_motion_path_driver_info_def *code_motion_path_driver_info;
    439  1.1  mrg 
    440  1.1  mrg /* Set of hooks for performing move_op and find_used_regs routines with
    441  1.1  mrg    code_motion_path_driver.  */
    442  1.1  mrg extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks;
    443  1.1  mrg 
    444  1.1  mrg /* True if/when we want to emulate Haifa scheduler in the common code.
    445  1.1  mrg    This is used in sched_rgn_local_init and in various places in
    446  1.1  mrg    sched-deps.cc.  */
    447  1.1  mrg int sched_emulate_haifa_p;
    448  1.1  mrg 
    449  1.1  mrg /* GLOBAL_LEVEL is used to discard information stored in basic block headers
    450  1.1  mrg    av_sets.  Av_set of bb header is valid if its (bb header's) level is equal
    451  1.1  mrg    to GLOBAL_LEVEL.  And invalid if lesser.  This is primarily used to advance
    452  1.1  mrg    scheduling window.  */
    453  1.1  mrg int global_level;
    454  1.1  mrg 
    455  1.1  mrg /* Current fences.  */
    456  1.1  mrg flist_t fences;
    457  1.1  mrg 
    458  1.1  mrg /* True when separable insns should be scheduled as RHSes.  */
    459  1.1  mrg static bool enable_schedule_as_rhs_p;
    460  1.1  mrg 
    461  1.1  mrg /* Used in verify_target_availability to assert that target reg is reported
    462  1.1  mrg    unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if
    463  1.1  mrg    we haven't scheduled anything on the previous fence.
    464  1.1  mrg    if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can
    465  1.1  mrg    have more conservative value than the one returned by the
    466  1.1  mrg    find_used_regs, thus we shouldn't assert that these values are equal.  */
    467  1.1  mrg static bool scheduled_something_on_previous_fence;
    468  1.1  mrg 
    469  1.1  mrg /* All newly emitted insns will have their uids greater than this value.  */
    470  1.1  mrg static int first_emitted_uid;
    471  1.1  mrg 
    472  1.1  mrg /* Set of basic blocks that are forced to start new ebbs.  This is a subset
    473  1.1  mrg    of all the ebb heads.  */
    474  1.1  mrg bitmap forced_ebb_heads;
    475  1.1  mrg 
    476  1.1  mrg /* Blocks that need to be rescheduled after pipelining.  */
    477  1.1  mrg bitmap blocks_to_reschedule = NULL;
    478  1.1  mrg 
    479  1.1  mrg /* True when the first lv set should be ignored when updating liveness.  */
    480  1.1  mrg static bool ignore_first = false;
    481  1.1  mrg 
    482  1.1  mrg /* Number of insns max_issue has initialized data structures for.  */
    483  1.1  mrg static int max_issue_size = 0;
    484  1.1  mrg 
    485  1.1  mrg /* Whether we can issue more instructions.  */
    486  1.1  mrg static int can_issue_more;
    487  1.1  mrg 
    488  1.1  mrg /* Maximum software lookahead window size, reduced when rescheduling after
    489  1.1  mrg    pipelining.  */
    490  1.1  mrg static int max_ws;
    491  1.1  mrg 
    492  1.1  mrg /* Number of insns scheduled in current region.  */
    493  1.1  mrg static int num_insns_scheduled;
    494  1.1  mrg 
    495  1.1  mrg /* A vector of expressions is used to be able to sort them.  */
    496  1.1  mrg static vec<expr_t> vec_av_set;
    497  1.1  mrg 
    498  1.1  mrg /* A vector of vinsns is used to hold temporary lists of vinsns.  */
    499  1.1  mrg typedef vec<vinsn_t> vinsn_vec_t;
    500  1.1  mrg 
    501  1.1  mrg /* This vector has the exprs which may still present in av_sets, but actually
    502  1.1  mrg    can't be moved up due to bookkeeping created during code motion to another
    503  1.1  mrg    fence.  See comment near the call to update_and_record_unavailable_insns
    504  1.1  mrg    for the detailed explanations.  */
    505  1.1  mrg static vinsn_vec_t vec_bookkeeping_blocked_vinsns = vinsn_vec_t ();
    506  1.1  mrg 
    507  1.1  mrg /* This vector has vinsns which are scheduled with renaming on the first fence
    508  1.1  mrg    and then seen on the second.  For expressions with such vinsns, target
    509  1.1  mrg    availability information may be wrong.  */
    510  1.1  mrg static vinsn_vec_t vec_target_unavailable_vinsns = vinsn_vec_t ();
    511  1.1  mrg 
    512  1.1  mrg /* Vector to store temporary nops inserted in move_op to prevent removal
    513  1.1  mrg    of empty bbs.  */
    514  1.1  mrg static vec<insn_t> vec_temp_moveop_nops;
    515  1.1  mrg 
    516  1.1  mrg /* These bitmaps record original instructions scheduled on the current
    517  1.1  mrg    iteration and bookkeeping copies created by them.  */
    518  1.1  mrg static bitmap current_originators = NULL;
    519  1.1  mrg static bitmap current_copies = NULL;
    520  1.1  mrg 
    521  1.1  mrg /* This bitmap marks the blocks visited by code_motion_path_driver so we don't
    522  1.1  mrg    visit them afterwards.  */
    523  1.1  mrg static bitmap code_motion_visited_blocks = NULL;
    524  1.1  mrg 
    525  1.1  mrg /* Variables to accumulate different statistics.  */
    526  1.1  mrg 
    527  1.1  mrg /* The number of bookkeeping copies created.  */
    528  1.1  mrg static int stat_bookkeeping_copies;
    529  1.1  mrg 
    530  1.1  mrg /* The number of insns that required bookkeeiping for their scheduling.  */
    531  1.1  mrg static int stat_insns_needed_bookkeeping;
    532  1.1  mrg 
    533  1.1  mrg /* The number of insns that got renamed.  */
    534  1.1  mrg static int stat_renamed_scheduled;
    535  1.1  mrg 
    536  1.1  mrg /* The number of substitutions made during scheduling.  */
    537  1.1  mrg static int stat_substitutions_total;
    538  1.1  mrg 
    539  1.1  mrg 
    541  1.1  mrg /* Forward declarations of static functions.  */
    542  1.1  mrg static bool rtx_ok_for_substitution_p (rtx, rtx);
    543  1.1  mrg static int sel_rank_for_schedule (const void *, const void *);
    544  1.1  mrg static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool);
    545  1.1  mrg static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax);
    546  1.1  mrg 
    547  1.1  mrg static rtx get_dest_from_orig_ops (av_set_t);
    548  1.1  mrg static basic_block generate_bookkeeping_insn (expr_t, edge, edge);
    549  1.1  mrg static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
    550  1.1  mrg                             def_list_t *);
    551  1.1  mrg static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*);
    552  1.1  mrg static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
    553  1.1  mrg                                     cmpd_local_params_p, void *);
    554  1.1  mrg static void sel_sched_region_1 (void);
    555  1.1  mrg static void sel_sched_region_2 (int);
    556  1.1  mrg static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
    557  1.1  mrg 
    558  1.1  mrg static void debug_state (state_t);
    559  1.1  mrg 
    560  1.1  mrg 
    562  1.1  mrg /* Functions that work with fences.  */
    563  1.1  mrg 
    564  1.1  mrg /* Advance one cycle on FENCE.  */
    565  1.1  mrg static void
    566  1.1  mrg advance_one_cycle (fence_t fence)
    567  1.1  mrg {
    568  1.1  mrg   unsigned i;
    569  1.1  mrg   int cycle;
    570  1.1  mrg   rtx_insn *insn;
    571  1.1  mrg 
    572  1.1  mrg   advance_state (FENCE_STATE (fence));
    573  1.1  mrg   cycle = ++FENCE_CYCLE (fence);
    574  1.1  mrg   FENCE_ISSUED_INSNS (fence) = 0;
    575  1.1  mrg   FENCE_STARTS_CYCLE_P (fence) = 1;
    576  1.1  mrg   can_issue_more = issue_rate;
    577  1.1  mrg   FENCE_ISSUE_MORE (fence) = can_issue_more;
    578  1.1  mrg 
    579  1.1  mrg   for (i = 0; vec_safe_iterate (FENCE_EXECUTING_INSNS (fence), i, &insn); )
    580  1.1  mrg     {
    581  1.1  mrg       if (INSN_READY_CYCLE (insn) < cycle)
    582  1.1  mrg         {
    583  1.1  mrg           remove_from_deps (FENCE_DC (fence), insn);
    584  1.1  mrg           FENCE_EXECUTING_INSNS (fence)->unordered_remove (i);
    585  1.1  mrg           continue;
    586  1.1  mrg         }
    587  1.1  mrg       i++;
    588  1.1  mrg     }
    589  1.1  mrg   if (sched_verbose >= 2)
    590  1.1  mrg     {
    591  1.1  mrg       sel_print ("Finished a cycle.  Current cycle = %d\n", FENCE_CYCLE (fence));
    592  1.1  mrg       debug_state (FENCE_STATE (fence));
    593  1.1  mrg     }
    594  1.1  mrg }
    595  1.1  mrg 
    596  1.1  mrg /* Returns true when SUCC in a fallthru bb of INSN, possibly
    597  1.1  mrg    skipping empty basic blocks.  */
    598  1.1  mrg static bool
    599  1.1  mrg in_fallthru_bb_p (rtx_insn *insn, rtx succ)
    600  1.1  mrg {
    601  1.1  mrg   basic_block bb = BLOCK_FOR_INSN (insn);
    602  1.1  mrg   edge e;
    603  1.1  mrg 
    604  1.1  mrg   if (bb == BLOCK_FOR_INSN (succ))
    605  1.1  mrg     return true;
    606  1.1  mrg 
    607  1.1  mrg   e = find_fallthru_edge_from (bb);
    608  1.1  mrg   if (e)
    609  1.1  mrg     bb = e->dest;
    610  1.1  mrg   else
    611  1.1  mrg     return false;
    612  1.1  mrg 
    613  1.1  mrg   while (sel_bb_empty_p (bb))
    614  1.1  mrg     bb = bb->next_bb;
    615  1.1  mrg 
    616  1.1  mrg   return bb == BLOCK_FOR_INSN (succ);
    617  1.1  mrg }
    618  1.1  mrg 
    619  1.1  mrg /* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES.
    620  1.1  mrg    When a successor will continue a ebb, transfer all parameters of a fence
    621  1.1  mrg    to the new fence.  ORIG_MAX_SEQNO is the maximal seqno before this round
    622  1.1  mrg    of scheduling helping to distinguish between the old and the new code.  */
    623  1.1  mrg static void
    624  1.1  mrg extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
    625  1.1  mrg 			 int orig_max_seqno)
    626  1.1  mrg {
    627  1.1  mrg   bool was_here_p = false;
    628  1.1  mrg   insn_t insn = NULL;
    629  1.1  mrg   insn_t succ;
    630  1.1  mrg   succ_iterator si;
    631  1.1  mrg   ilist_iterator ii;
    632  1.1  mrg   fence_t fence = FLIST_FENCE (old_fences);
    633  1.1  mrg   basic_block bb;
    634  1.1  mrg 
    635  1.1  mrg   /* Get the only element of FENCE_BNDS (fence).  */
    636  1.1  mrg   FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence))
    637  1.1  mrg     {
    638  1.1  mrg       gcc_assert (!was_here_p);
    639  1.1  mrg       was_here_p = true;
    640  1.1  mrg     }
    641  1.1  mrg   gcc_assert (was_here_p && insn != NULL_RTX);
    642  1.1  mrg 
    643  1.1  mrg   /* When in the "middle" of the block, just move this fence
    644  1.1  mrg      to the new list.  */
    645  1.1  mrg   bb = BLOCK_FOR_INSN (insn);
    646  1.1  mrg   if (! sel_bb_end_p (insn)
    647  1.1  mrg       || (single_succ_p (bb)
    648  1.1  mrg           && single_pred_p (single_succ (bb))))
    649  1.1  mrg     {
    650  1.1  mrg       insn_t succ;
    651  1.1  mrg 
    652  1.1  mrg       succ = (sel_bb_end_p (insn)
    653  1.1  mrg               ? sel_bb_head (single_succ (bb))
    654  1.1  mrg               : NEXT_INSN (insn));
    655  1.1  mrg 
    656  1.1  mrg       if (INSN_SEQNO (succ) > 0
    657  1.1  mrg           && INSN_SEQNO (succ) <= orig_max_seqno
    658  1.1  mrg           && INSN_SCHED_TIMES (succ) <= 0)
    659  1.1  mrg         {
    660  1.1  mrg           FENCE_INSN (fence) = succ;
    661  1.1  mrg           move_fence_to_fences (old_fences, new_fences);
    662  1.1  mrg 
    663  1.1  mrg           if (sched_verbose >= 1)
    664  1.1  mrg             sel_print ("Fence %d continues as %d[%d] (state continue)\n",
    665  1.1  mrg                        INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ));
    666  1.1  mrg         }
    667  1.1  mrg       return;
    668  1.1  mrg     }
    669  1.1  mrg 
    670  1.1  mrg   /* Otherwise copy fence's structures to (possibly) multiple successors.  */
    671  1.1  mrg   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
    672  1.1  mrg     {
    673  1.1  mrg       int seqno = INSN_SEQNO (succ);
    674  1.1  mrg 
    675  1.1  mrg       if (seqno > 0 && seqno <= orig_max_seqno
    676  1.1  mrg           && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
    677  1.1  mrg         {
    678  1.1  mrg           bool b = (in_same_ebb_p (insn, succ)
    679  1.1  mrg                     || in_fallthru_bb_p (insn, succ));
    680  1.1  mrg 
    681  1.1  mrg           if (sched_verbose >= 1)
    682  1.1  mrg             sel_print ("Fence %d continues as %d[%d] (state %s)\n",
    683  1.1  mrg                        INSN_UID (insn), INSN_UID (succ),
    684  1.1  mrg                        BLOCK_NUM (succ), b ? "continue" : "reset");
    685  1.1  mrg 
    686  1.1  mrg           if (b)
    687  1.1  mrg             add_dirty_fence_to_fences (new_fences, succ, fence);
    688  1.1  mrg           else
    689  1.1  mrg             {
    690  1.1  mrg               /* Mark block of the SUCC as head of the new ebb.  */
    691  1.1  mrg               bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ));
    692  1.1  mrg               add_clean_fence_to_fences (new_fences, succ, fence);
    693  1.1  mrg             }
    694  1.1  mrg         }
    695  1.1  mrg     }
    696  1.1  mrg }
    697  1.1  mrg 
    698  1.1  mrg 
    700  1.1  mrg /* Functions to support substitution.  */
    701  1.1  mrg 
    702  1.1  mrg /* Returns whether INSN with dependence status DS is eligible for
    703  1.1  mrg    substitution, i.e. it's a copy operation x := y, and RHS that is
    704  1.1  mrg    moved up through this insn should be substituted.  */
    705  1.1  mrg static bool
    706  1.1  mrg can_substitute_through_p (insn_t insn, ds_t ds)
    707  1.1  mrg {
    708  1.1  mrg   /* We can substitute only true dependencies.  */
    709  1.1  mrg   if ((ds & DEP_OUTPUT)
    710  1.1  mrg       || (ds & DEP_ANTI)
    711  1.1  mrg       || ! INSN_RHS (insn)
    712  1.1  mrg       || ! INSN_LHS (insn))
    713  1.1  mrg     return false;
    714  1.1  mrg 
    715  1.1  mrg   /* Now we just need to make sure the INSN_RHS consists of only one
    716  1.1  mrg      simple REG rtx.  */
    717  1.1  mrg   if (REG_P (INSN_LHS (insn))
    718  1.1  mrg       && REG_P (INSN_RHS (insn)))
    719  1.1  mrg     return true;
    720  1.1  mrg   return false;
    721  1.1  mrg }
    722  1.1  mrg 
    723  1.1  mrg /* Substitute all occurrences of INSN's destination in EXPR' vinsn with INSN's
    724  1.1  mrg    source (if INSN is eligible for substitution).  Returns TRUE if
    725  1.1  mrg    substitution was actually performed, FALSE otherwise.  Substitution might
    726  1.1  mrg    be not performed because it's either EXPR' vinsn doesn't contain INSN's
    727  1.1  mrg    destination or the resulting insn is invalid for the target machine.
    728  1.1  mrg    When UNDO is true, perform unsubstitution instead (the difference is in
    729  1.1  mrg    the part of rtx on which validate_replace_rtx is called).  */
    730  1.1  mrg static bool
    731  1.1  mrg substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
    732  1.1  mrg {
    733  1.1  mrg   rtx *where;
    734  1.1  mrg   bool new_insn_valid;
    735  1.1  mrg   vinsn_t *vi = &EXPR_VINSN (expr);
    736  1.1  mrg   bool has_rhs = VINSN_RHS (*vi) != NULL;
    737  1.1  mrg   rtx old, new_rtx;
    738  1.1  mrg 
    739  1.1  mrg   /* Do not try to replace in SET_DEST.  Although we'll choose new
    740  1.1  mrg      register for the RHS, we don't want to change RHS' original reg.
    741  1.1  mrg      If the insn is not SET, we may still be able to substitute something
    742  1.1  mrg      in it, and if we're here (don't have deps), it doesn't write INSN's
    743  1.1  mrg      dest.  */
    744  1.1  mrg   where = (has_rhs
    745  1.1  mrg 	   ? &VINSN_RHS (*vi)
    746  1.1  mrg 	   : &PATTERN (VINSN_INSN_RTX (*vi)));
    747  1.1  mrg   old = undo ? INSN_RHS (insn) : INSN_LHS (insn);
    748  1.1  mrg 
    749  1.1  mrg   /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI.  */
    750  1.1  mrg   if (rtx_ok_for_substitution_p (old, *where))
    751  1.1  mrg     {
    752  1.1  mrg       rtx_insn *new_insn;
    753  1.1  mrg       rtx *where_replace;
    754  1.1  mrg 
    755  1.1  mrg       /* We should copy these rtxes before substitution.  */
    756  1.1  mrg       new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn));
    757  1.1  mrg       new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi));
    758  1.1  mrg 
    759  1.1  mrg       /* Where we'll replace.
    760  1.1  mrg          WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be
    761  1.1  mrg 	 used instead of SET_SRC.  */
    762  1.1  mrg       where_replace = (has_rhs
    763  1.1  mrg 		       ? &SET_SRC (PATTERN (new_insn))
    764  1.1  mrg 		       : &PATTERN (new_insn));
    765  1.1  mrg 
    766  1.1  mrg       new_insn_valid
    767  1.1  mrg         = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace,
    768  1.1  mrg                                                 new_insn);
    769  1.1  mrg 
    770  1.1  mrg       /* ??? Actually, constrain_operands result depends upon choice of
    771  1.1  mrg          destination register.  E.g. if we allow single register to be an rhs,
    772  1.1  mrg 	 and if we try to move dx=ax(as rhs) through ax=dx, we'll result
    773  1.1  mrg 	 in invalid insn dx=dx, so we'll loose this rhs here.
    774  1.1  mrg 	 Just can't come up with significant testcase for this, so just
    775  1.1  mrg 	 leaving it for now.  */
    776  1.1  mrg       if (new_insn_valid)
    777  1.1  mrg 	{
    778  1.1  mrg 	  change_vinsn_in_expr (expr,
    779  1.1  mrg 				create_vinsn_from_insn_rtx (new_insn, false));
    780  1.1  mrg 
    781  1.1  mrg 	  /* Do not allow clobbering the address register of speculative
    782  1.1  mrg              insns.  */
    783  1.1  mrg 	  if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE)
    784  1.1  mrg               && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
    785  1.1  mrg 					 expr_dest_reg (expr)))
    786  1.1  mrg 	    EXPR_TARGET_AVAILABLE (expr) = false;
    787  1.1  mrg 
    788  1.1  mrg 	  return true;
    789  1.1  mrg 	}
    790  1.1  mrg       else
    791  1.1  mrg         return false;
    792  1.1  mrg     }
    793  1.1  mrg   else
    794  1.1  mrg     return false;
    795  1.1  mrg }
    796  1.1  mrg 
    797  1.1  mrg /* Return the number of places WHAT appears within WHERE.
    798  1.1  mrg    Bail out when we found a reference occupying several hard registers.  */
    799  1.1  mrg static int
    800  1.1  mrg count_occurrences_equiv (const_rtx what, const_rtx where)
    801  1.1  mrg {
    802  1.1  mrg   int count = 0;
    803  1.1  mrg   subrtx_iterator::array_type array;
    804  1.1  mrg   FOR_EACH_SUBRTX (iter, array, where, NONCONST)
    805  1.1  mrg     {
    806  1.1  mrg       const_rtx x = *iter;
    807  1.1  mrg       if (REG_P (x) && REGNO (x) == REGNO (what))
    808  1.1  mrg 	{
    809  1.1  mrg 	  /* Bail out if mode is different or more than one register is
    810  1.1  mrg 	     used.  */
    811  1.1  mrg 	  if (GET_MODE (x) != GET_MODE (what) || REG_NREGS (x) > 1)
    812  1.1  mrg 	    return 0;
    813  1.1  mrg 	  count += 1;
    814  1.1  mrg 	}
    815  1.1  mrg       else if (GET_CODE (x) == SUBREG
    816  1.1  mrg 	       && (!REG_P (SUBREG_REG (x))
    817  1.1  mrg 		   || REGNO (SUBREG_REG (x)) == REGNO (what)))
    818  1.1  mrg 	/* ??? Do not support substituting regs inside subregs.  In that case,
    819  1.1  mrg 	   simplify_subreg will be called by validate_replace_rtx, and
    820  1.1  mrg 	   unsubstitution will fail later.  */
    821  1.1  mrg 	return 0;
    822  1.1  mrg     }
    823  1.1  mrg   return count;
    824  1.1  mrg }
    825  1.1  mrg 
    826  1.1  mrg /* Returns TRUE if WHAT is found in WHERE rtx tree.  */
    827  1.1  mrg static bool
    828  1.1  mrg rtx_ok_for_substitution_p (rtx what, rtx where)
    829  1.1  mrg {
    830  1.1  mrg   return (count_occurrences_equiv (what, where) > 0);
    831  1.1  mrg }
    832  1.1  mrg 
    833  1.1  mrg 
    835  1.1  mrg /* Functions to support register renaming.  */
    836  1.1  mrg 
    837  1.1  mrg /* Substitute VI's set source with REGNO.  Returns newly created pattern
    838  1.1  mrg    that has REGNO as its source.  */
    839  1.1  mrg static rtx_insn *
    840  1.1  mrg create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx)
    841  1.1  mrg {
    842  1.1  mrg   rtx lhs_rtx;
    843  1.1  mrg   rtx pattern;
    844  1.1  mrg   rtx_insn *insn_rtx;
    845  1.1  mrg 
    846  1.1  mrg   lhs_rtx = copy_rtx (VINSN_LHS (vi));
    847  1.1  mrg 
    848  1.1  mrg   pattern = gen_rtx_SET (lhs_rtx, rhs_rtx);
    849  1.1  mrg   insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
    850  1.1  mrg 
    851  1.1  mrg   return insn_rtx;
    852  1.1  mrg }
    853  1.1  mrg 
    854  1.1  mrg /* Returns whether INSN's src can be replaced with register number
    855  1.1  mrg    NEW_SRC_REG. E.g. the following insn is valid for i386:
    856  1.1  mrg 
    857  1.1  mrg     (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337
    858  1.1  mrg       (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp)
    859  1.1  mrg 			(reg:SI 0 ax [orig:770 c1 ] [770]))
    860  1.1  mrg 		    (const_int 288 [0x120])) [0 str S1 A8])
    861  1.1  mrg 	    (const_int 0 [0x0])) 43 {*movqi_1} (nil)
    862  1.1  mrg 	(nil))
    863  1.1  mrg 
    864  1.1  mrg   But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid
    865  1.1  mrg   because of operand constraints:
    866  1.1  mrg 
    867  1.1  mrg     (define_insn "*movqi_1"
    868  1.1  mrg       [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
    869  1.1  mrg 	    (match_operand:QI 1 "general_operand"      " q,qn,qm,q,rn,qm,qn")
    870  1.1  mrg 	    )]
    871  1.1  mrg 
    872  1.1  mrg   So do constrain_operands here, before choosing NEW_SRC_REG as best
    873  1.1  mrg   reg for rhs.  */
    874  1.1  mrg 
    875  1.1  mrg static bool
    876  1.1  mrg replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg)
    877  1.1  mrg {
    878  1.1  mrg   vinsn_t vi = INSN_VINSN (insn);
    879  1.1  mrg   machine_mode mode;
    880  1.1  mrg   rtx dst_loc;
    881  1.1  mrg   bool res;
    882  1.1  mrg 
    883  1.1  mrg   gcc_assert (VINSN_SEPARABLE_P (vi));
    884  1.1  mrg 
    885  1.1  mrg   get_dest_and_mode (insn, &dst_loc, &mode);
    886  1.1  mrg   gcc_assert (mode == GET_MODE (new_src_reg));
    887  1.1  mrg 
    888  1.1  mrg   if (REG_P (dst_loc) && REGNO (new_src_reg) == REGNO (dst_loc))
    889  1.1  mrg     return true;
    890  1.1  mrg 
    891  1.1  mrg   /* See whether SET_SRC can be replaced with this register.  */
    892  1.1  mrg   validate_change (insn, &SET_SRC (PATTERN (insn)), new_src_reg, 1);
    893  1.1  mrg   res = verify_changes (0);
    894  1.1  mrg   cancel_changes (0);
    895  1.1  mrg 
    896  1.1  mrg   return res;
    897  1.1  mrg }
    898  1.1  mrg 
    899  1.1  mrg /* Returns whether INSN still be valid after replacing it's DEST with
    900  1.1  mrg    register NEW_REG.  */
    901  1.1  mrg static bool
    902  1.1  mrg replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg)
    903  1.1  mrg {
    904  1.1  mrg   vinsn_t vi = INSN_VINSN (insn);
    905  1.1  mrg   bool res;
    906  1.1  mrg 
    907  1.1  mrg   /* We should deal here only with separable insns.  */
    908  1.1  mrg   gcc_assert (VINSN_SEPARABLE_P (vi));
    909  1.1  mrg   gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg));
    910  1.1  mrg 
    911  1.1  mrg   /* See whether SET_DEST can be replaced with this register.  */
    912  1.1  mrg   validate_change (insn, &SET_DEST (PATTERN (insn)), new_reg, 1);
    913  1.1  mrg   res = verify_changes (0);
    914  1.1  mrg   cancel_changes (0);
    915  1.1  mrg 
    916  1.1  mrg   return res;
    917  1.1  mrg }
    918  1.1  mrg 
    919  1.1  mrg /* Create a pattern with rhs of VI and lhs of LHS_RTX.  */
    920  1.1  mrg static rtx_insn *
    921  1.1  mrg create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx)
    922  1.1  mrg {
    923  1.1  mrg   rtx rhs_rtx;
    924  1.1  mrg   rtx pattern;
    925  1.1  mrg   rtx_insn *insn_rtx;
    926  1.1  mrg 
    927  1.1  mrg   rhs_rtx = copy_rtx (VINSN_RHS (vi));
    928  1.1  mrg 
    929  1.1  mrg   pattern = gen_rtx_SET (lhs_rtx, rhs_rtx);
    930  1.1  mrg   insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
    931  1.1  mrg 
    932  1.1  mrg   return insn_rtx;
    933  1.1  mrg }
    934  1.1  mrg 
    935  1.1  mrg /* Substitute lhs in the given expression EXPR for the register with number
    936  1.1  mrg    NEW_REGNO.  SET_DEST may be arbitrary rtx, not only register.  */
    937  1.1  mrg static void
    938  1.1  mrg replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
    939  1.1  mrg {
    940  1.1  mrg   rtx_insn *insn_rtx;
    941  1.1  mrg   vinsn_t vinsn;
    942  1.1  mrg 
    943  1.1  mrg   insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg);
    944  1.1  mrg   vinsn = create_vinsn_from_insn_rtx (insn_rtx, false);
    945  1.1  mrg 
    946  1.1  mrg   change_vinsn_in_expr (expr, vinsn);
    947  1.1  mrg   EXPR_WAS_RENAMED (expr) = 1;
    948  1.1  mrg   EXPR_TARGET_AVAILABLE (expr) = 1;
    949  1.1  mrg }
    950  1.1  mrg 
    951  1.1  mrg /* Returns whether VI writes either one of the USED_REGS registers or,
    952  1.1  mrg    if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers.  */
    953  1.1  mrg static bool
    954  1.1  mrg vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
    955  1.1  mrg                             HARD_REG_SET unavailable_hard_regs)
    956  1.1  mrg {
    957  1.1  mrg   unsigned regno;
    958  1.1  mrg   reg_set_iterator rsi;
    959  1.1  mrg 
    960  1.1  mrg   EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi)
    961  1.1  mrg     {
    962  1.1  mrg       if (REGNO_REG_SET_P (used_regs, regno))
    963  1.1  mrg         return true;
    964  1.1  mrg       if (HARD_REGISTER_NUM_P (regno)
    965  1.1  mrg           && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
    966  1.1  mrg 	return true;
    967  1.1  mrg     }
    968  1.1  mrg 
    969  1.1  mrg   EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi)
    970  1.1  mrg     {
    971  1.1  mrg       if (REGNO_REG_SET_P (used_regs, regno))
    972  1.1  mrg         return true;
    973  1.1  mrg       if (HARD_REGISTER_NUM_P (regno)
    974  1.1  mrg           && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
    975  1.1  mrg 	return true;
    976  1.1  mrg     }
    977  1.1  mrg 
    978  1.1  mrg   return false;
    979  1.1  mrg }
    980  1.1  mrg 
    981  1.1  mrg /* Returns register class of the output register in INSN.
    982  1.1  mrg    Returns NO_REGS for call insns because some targets have constraints on
    983  1.1  mrg    destination register of a call insn.
    984  1.1  mrg 
    985  1.1  mrg    Code adopted from regrename.cc::build_def_use.  */
    986  1.1  mrg static enum reg_class
    987  1.1  mrg get_reg_class (rtx_insn *insn)
    988  1.1  mrg {
    989  1.1  mrg   int i, n_ops;
    990  1.1  mrg 
    991  1.1  mrg   extract_constrain_insn (insn);
    992  1.1  mrg   preprocess_constraints (insn);
    993  1.1  mrg   n_ops = recog_data.n_operands;
    994  1.1  mrg 
    995  1.1  mrg   const operand_alternative *op_alt = which_op_alt ();
    996  1.1  mrg   if (asm_noperands (PATTERN (insn)) > 0)
    997  1.1  mrg     {
    998  1.1  mrg       for (i = 0; i < n_ops; i++)
    999  1.1  mrg 	if (recog_data.operand_type[i] == OP_OUT)
   1000  1.1  mrg 	  {
   1001  1.1  mrg 	    rtx *loc = recog_data.operand_loc[i];
   1002  1.1  mrg 	    rtx op = *loc;
   1003  1.1  mrg 	    enum reg_class cl = alternative_class (op_alt, i);
   1004  1.1  mrg 
   1005  1.1  mrg 	    if (REG_P (op)
   1006  1.1  mrg 		&& REGNO (op) == ORIGINAL_REGNO (op))
   1007  1.1  mrg 	      continue;
   1008  1.1  mrg 
   1009  1.1  mrg 	    return cl;
   1010  1.1  mrg 	  }
   1011  1.1  mrg     }
   1012  1.1  mrg   else if (!CALL_P (insn))
   1013  1.1  mrg     {
   1014  1.1  mrg       for (i = 0; i < n_ops + recog_data.n_dups; i++)
   1015  1.1  mrg        {
   1016  1.1  mrg 	 int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops];
   1017  1.1  mrg 	 enum reg_class cl = alternative_class (op_alt, opn);
   1018  1.1  mrg 
   1019  1.1  mrg 	 if (recog_data.operand_type[opn] == OP_OUT ||
   1020  1.1  mrg 	     recog_data.operand_type[opn] == OP_INOUT)
   1021  1.1  mrg 	   return cl;
   1022  1.1  mrg        }
   1023  1.1  mrg     }
   1024  1.1  mrg 
   1025  1.1  mrg /*  Insns like
   1026  1.1  mrg     (insn (set (reg:CCZ 17 flags) (compare:CCZ ...)))
   1027  1.1  mrg     may result in returning NO_REGS, cause flags is written implicitly through
   1028  1.1  mrg     CMP insn, which has no OP_OUT | OP_INOUT operands.  */
   1029  1.1  mrg   return NO_REGS;
   1030  1.1  mrg }
   1031  1.1  mrg 
   1032  1.1  mrg /* Calculate HARD_REGNO_RENAME_OK data for REGNO.  */
   1033  1.1  mrg static void
   1034  1.1  mrg init_hard_regno_rename (int regno)
   1035  1.1  mrg {
   1036  1.1  mrg   int cur_reg;
   1037  1.1  mrg 
   1038  1.1  mrg   SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno);
   1039  1.1  mrg 
   1040  1.1  mrg   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
   1041  1.1  mrg     {
   1042  1.1  mrg       /* We are not interested in renaming in other regs.  */
   1043  1.1  mrg       if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg))
   1044  1.1  mrg         continue;
   1045  1.1  mrg 
   1046  1.1  mrg       if (HARD_REGNO_RENAME_OK (regno, cur_reg))
   1047  1.1  mrg         SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg);
   1048  1.1  mrg     }
   1049  1.1  mrg }
   1050  1.1  mrg 
   1051  1.1  mrg /* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs
   1052  1.1  mrg    data first.  */
   1053  1.1  mrg static inline bool
   1054  1.1  mrg sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED)
   1055  1.1  mrg {
   1056  1.1  mrg   /* Check whether this is all calculated.  */
   1057  1.1  mrg   if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from))
   1058  1.1  mrg     return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
   1059  1.1  mrg 
   1060  1.1  mrg   init_hard_regno_rename (from);
   1061  1.1  mrg 
   1062  1.1  mrg   return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
   1063  1.1  mrg }
   1064  1.1  mrg 
   1065  1.1  mrg /* Calculate set of registers that are capable of holding MODE.  */
   1066  1.1  mrg static void
   1067  1.1  mrg init_regs_for_mode (machine_mode mode)
   1068  1.1  mrg {
   1069  1.1  mrg   int cur_reg;
   1070  1.1  mrg 
   1071  1.1  mrg   CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]);
   1072  1.1  mrg 
   1073  1.1  mrg   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
   1074  1.1  mrg     {
   1075  1.1  mrg       int nregs;
   1076  1.1  mrg       int i;
   1077  1.1  mrg 
   1078  1.1  mrg       /* See whether it accepts all modes that occur in
   1079  1.1  mrg          original insns.  */
   1080  1.1  mrg       if (!targetm.hard_regno_mode_ok (cur_reg, mode))
   1081  1.1  mrg         continue;
   1082  1.1  mrg 
   1083  1.1  mrg       nregs = hard_regno_nregs (cur_reg, mode);
   1084  1.1  mrg 
   1085  1.1  mrg       for (i = nregs - 1; i >= 0; --i)
   1086  1.1  mrg         if (fixed_regs[cur_reg + i]
   1087  1.1  mrg                 || global_regs[cur_reg + i]
   1088  1.1  mrg             /* Can't use regs which aren't saved by
   1089  1.1  mrg                the prologue.  */
   1090  1.1  mrg             || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i)
   1091  1.1  mrg 	    /* Can't use regs with non-null REG_BASE_VALUE, because adjusting
   1092  1.1  mrg 	       it affects aliasing globally and invalidates all AV sets.  */
   1093  1.1  mrg 	    || get_reg_base_value (cur_reg + i)
   1094  1.1  mrg #ifdef LEAF_REGISTERS
   1095  1.1  mrg             /* We can't use a non-leaf register if we're in a
   1096  1.1  mrg                leaf function.  */
   1097  1.1  mrg             || (crtl->is_leaf
   1098  1.1  mrg                 && !LEAF_REGISTERS[cur_reg + i])
   1099  1.1  mrg #endif
   1100  1.1  mrg             )
   1101  1.1  mrg           break;
   1102  1.1  mrg 
   1103  1.1  mrg       if (i >= 0)
   1104  1.1  mrg         continue;
   1105  1.1  mrg 
   1106  1.1  mrg       /* If the CUR_REG passed all the checks above,
   1107  1.1  mrg          then it's ok.  */
   1108  1.1  mrg       SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg);
   1109  1.1  mrg     }
   1110  1.1  mrg 
   1111  1.1  mrg   sel_hrd.regs_for_mode_ok[mode] = true;
   1112  1.1  mrg }
   1113  1.1  mrg 
   1114  1.1  mrg /* Init all register sets gathered in HRD.  */
   1115  1.1  mrg static void
   1116  1.1  mrg init_hard_regs_data (void)
   1117  1.1  mrg {
   1118  1.1  mrg   int cur_reg = 0;
   1119  1.1  mrg   int cur_mode = 0;
   1120  1.1  mrg 
   1121  1.1  mrg   CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used);
   1122  1.1  mrg   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
   1123  1.1  mrg     if (df_regs_ever_live_p (cur_reg)
   1124  1.1  mrg 	|| crtl->abi->clobbers_full_reg_p (cur_reg))
   1125  1.1  mrg       SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg);
   1126  1.1  mrg 
   1127  1.1  mrg   /* Initialize registers that are valid based on mode when this is
   1128  1.1  mrg      really needed.  */
   1129  1.1  mrg   for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++)
   1130  1.1  mrg     sel_hrd.regs_for_mode_ok[cur_mode] = false;
   1131  1.1  mrg 
   1132  1.1  mrg   /* Mark that all HARD_REGNO_RENAME_OK is not calculated.  */
   1133  1.1  mrg   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
   1134  1.1  mrg     CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]);
   1135  1.1  mrg 
   1136  1.1  mrg #ifdef STACK_REGS
   1137  1.1  mrg   CLEAR_HARD_REG_SET (sel_hrd.stack_regs);
   1138  1.1  mrg 
   1139  1.1  mrg   for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++)
   1140  1.1  mrg     SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg);
   1141  1.1  mrg #endif
   1142  1.1  mrg }
   1143  1.1  mrg 
   1144  1.1  mrg /* Mark hardware regs in REG_RENAME_P that are not suitable
   1145  1.1  mrg    for renaming rhs in INSN due to hardware restrictions (register class,
   1146  1.1  mrg    modes compatibility etc).  This doesn't affect original insn's dest reg,
   1147  1.1  mrg    if it isn't in USED_REGS.  DEF is a definition insn of rhs for which the
   1148  1.1  mrg    destination register is sought.  LHS (DEF->ORIG_INSN) may be REG or MEM.
   1149  1.1  mrg    Registers that are in used_regs are always marked in
   1150  1.1  mrg    unavailable_hard_regs as well.  */
   1151  1.1  mrg 
   1152  1.1  mrg static void
   1153  1.1  mrg mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
   1154  1.1  mrg                             regset used_regs ATTRIBUTE_UNUSED)
   1155  1.1  mrg {
   1156  1.1  mrg   machine_mode mode;
   1157  1.1  mrg   enum reg_class cl = NO_REGS;
   1158  1.1  mrg   rtx orig_dest;
   1159  1.1  mrg   unsigned cur_reg, regno;
   1160  1.1  mrg   hard_reg_set_iterator hrsi;
   1161  1.1  mrg 
   1162  1.1  mrg   gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET);
   1163  1.1  mrg   gcc_assert (reg_rename_p);
   1164  1.1  mrg 
   1165  1.1  mrg   orig_dest = SET_DEST (PATTERN (def->orig_insn));
   1166  1.1  mrg 
   1167  1.1  mrg   /* We have decided not to rename 'mem = something;' insns, as 'something'
   1168  1.1  mrg      is usually a register.  */
   1169  1.1  mrg   if (!REG_P (orig_dest))
   1170  1.1  mrg     return;
   1171  1.1  mrg 
   1172  1.1  mrg   regno = REGNO (orig_dest);
   1173  1.1  mrg 
   1174  1.1  mrg   /* If before reload, don't try to work with pseudos.  */
   1175  1.1  mrg   if (!reload_completed && !HARD_REGISTER_NUM_P (regno))
   1176  1.1  mrg     return;
   1177  1.1  mrg 
   1178  1.1  mrg   if (reload_completed)
   1179  1.1  mrg     cl = get_reg_class (def->orig_insn);
   1180  1.1  mrg 
   1181  1.1  mrg   /* Stop if the original register is one of the fixed_regs, global_regs or
   1182  1.1  mrg      frame pointer, or we could not discover its class.  */
   1183  1.1  mrg   if (fixed_regs[regno]
   1184  1.1  mrg       || global_regs[regno]
   1185  1.1  mrg       || (!HARD_FRAME_POINTER_IS_FRAME_POINTER && frame_pointer_needed
   1186  1.1  mrg 	  && regno == HARD_FRAME_POINTER_REGNUM)
   1187  1.1  mrg       || (HARD_FRAME_POINTER_IS_FRAME_POINTER && frame_pointer_needed
   1188  1.1  mrg 	  && regno == FRAME_POINTER_REGNUM)
   1189  1.1  mrg       || (reload_completed && cl == NO_REGS))
   1190  1.1  mrg     {
   1191  1.1  mrg       SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs);
   1192  1.1  mrg 
   1193  1.1  mrg       /* Give a chance for original register, if it isn't in used_regs.  */
   1194  1.1  mrg       if (!def->crossed_call_abis)
   1195  1.1  mrg         CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno);
   1196  1.1  mrg 
   1197  1.1  mrg       return;
   1198  1.1  mrg     }
   1199  1.1  mrg 
   1200  1.1  mrg   /* If something allocated on stack in this function, mark frame pointer
   1201  1.1  mrg      register unavailable, considering also modes.
   1202  1.1  mrg      FIXME: it is enough to do this once per all original defs.  */
   1203  1.1  mrg   if (frame_pointer_needed)
   1204  1.1  mrg     {
   1205  1.1  mrg       add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
   1206  1.1  mrg 			   Pmode, FRAME_POINTER_REGNUM);
   1207  1.1  mrg 
   1208  1.1  mrg       if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
   1209  1.1  mrg         add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
   1210  1.1  mrg 			     Pmode, HARD_FRAME_POINTER_REGNUM);
   1211  1.1  mrg     }
   1212  1.1  mrg 
   1213  1.1  mrg #ifdef STACK_REGS
   1214  1.1  mrg   /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS
   1215  1.1  mrg      is equivalent to as if all stack regs were in this set.
   1216  1.1  mrg      I.e. no stack register can be renamed, and even if it's an original
   1217  1.1  mrg      register here we make sure it won't be lifted over it's previous def
   1218  1.1  mrg      (it's previous def will appear as if it's a FIRST_STACK_REG def.
   1219  1.1  mrg      The HARD_REGNO_RENAME_OK covers other cases in condition below.  */
   1220  1.1  mrg   if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
   1221  1.1  mrg       && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
   1222  1.1  mrg     reg_rename_p->unavailable_hard_regs |= sel_hrd.stack_regs;
   1223  1.1  mrg #endif
   1224  1.1  mrg 
   1225  1.1  mrg   mode = GET_MODE (orig_dest);
   1226  1.1  mrg 
   1227  1.1  mrg   /* If there's a call on this path, make regs from full_reg_clobbers
   1228  1.1  mrg      unavailable.
   1229  1.1  mrg 
   1230  1.1  mrg      ??? It would be better to track the set of clobbered registers
   1231  1.1  mrg      directly, but that would be quite expensive in a def_t.  */
   1232  1.1  mrg   if (def->crossed_call_abis)
   1233  1.1  mrg     reg_rename_p->unavailable_hard_regs
   1234  1.1  mrg       |= call_clobbers_in_region (def->crossed_call_abis,
   1235  1.1  mrg 				  reg_class_contents[ALL_REGS], mode);
   1236  1.1  mrg 
   1237  1.1  mrg   /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and
   1238  1.1  mrg      crossed_call_abis, but not register classes.  */
   1239  1.1  mrg   if (!reload_completed)
   1240  1.1  mrg     return;
   1241  1.1  mrg 
   1242  1.1  mrg   /* Leave regs as 'available' only from the current
   1243  1.1  mrg      register class.  */
   1244  1.1  mrg   reg_rename_p->available_for_renaming = reg_class_contents[cl];
   1245  1.1  mrg 
   1246  1.1  mrg   /* Leave only registers available for this mode.  */
   1247  1.1  mrg   if (!sel_hrd.regs_for_mode_ok[mode])
   1248  1.1  mrg     init_regs_for_mode (mode);
   1249  1.1  mrg   reg_rename_p->available_for_renaming &= sel_hrd.regs_for_mode[mode];
   1250  1.1  mrg 
   1251  1.1  mrg   /* Leave only those that are ok to rename.  */
   1252  1.1  mrg   EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
   1253  1.1  mrg                                   0, cur_reg, hrsi)
   1254  1.1  mrg     {
   1255  1.1  mrg       int nregs;
   1256  1.1  mrg       int i;
   1257  1.1  mrg 
   1258  1.1  mrg       nregs = hard_regno_nregs (cur_reg, mode);
   1259  1.1  mrg       gcc_assert (nregs > 0);
   1260  1.1  mrg 
   1261  1.1  mrg       for (i = nregs - 1; i >= 0; --i)
   1262  1.1  mrg         if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i))
   1263  1.1  mrg           break;
   1264  1.1  mrg 
   1265  1.1  mrg       if (i >= 0)
   1266  1.1  mrg         CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming,
   1267  1.1  mrg                             cur_reg);
   1268  1.1  mrg     }
   1269  1.1  mrg 
   1270  1.1  mrg   reg_rename_p->available_for_renaming &= ~reg_rename_p->unavailable_hard_regs;
   1271  1.1  mrg 
   1272  1.1  mrg   /* Regno is always ok from the renaming part of view, but it really
   1273  1.1  mrg      could be in *unavailable_hard_regs already, so set it here instead
   1274  1.1  mrg      of there.  */
   1275  1.1  mrg   SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno);
   1276  1.1  mrg }
   1277  1.1  mrg 
   1278  1.1  mrg /* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the
   1279  1.1  mrg    best register more recently than REG2.  */
   1280  1.1  mrg static int reg_rename_tick[FIRST_PSEUDO_REGISTER];
   1281  1.1  mrg 
   1282  1.1  mrg /* Indicates the number of times renaming happened before the current one.  */
   1283  1.1  mrg static int reg_rename_this_tick;
   1284  1.1  mrg 
   1285  1.1  mrg /* Choose the register among free, that is suitable for storing
   1286  1.1  mrg    the rhs value.
   1287  1.1  mrg 
   1288  1.1  mrg    ORIGINAL_INSNS is the list of insns where the operation (rhs)
   1289  1.1  mrg    originally appears.  There could be multiple original operations
   1290  1.1  mrg    for single rhs since we moving it up and merging along different
   1291  1.1  mrg    paths.
   1292  1.1  mrg 
   1293  1.1  mrg    Some code is adapted from regrename.cc (regrename_optimize).
   1294  1.1  mrg    If original register is available, function returns it.
   1295  1.1  mrg    Otherwise it performs the checks, so the new register should
   1296  1.1  mrg    comply with the following:
   1297  1.1  mrg     - it should not violate any live ranges (such registers are in
   1298  1.1  mrg       REG_RENAME_P->available_for_renaming set);
   1299  1.1  mrg     - it should not be in the HARD_REGS_USED regset;
   1300  1.1  mrg     - it should be in the class compatible with original uses;
   1301  1.1  mrg     - it should not be clobbered through reference with different mode;
   1302  1.1  mrg     - if we're in the leaf function, then the new register should
   1303  1.1  mrg       not be in the LEAF_REGISTERS;
   1304  1.1  mrg     - etc.
   1305  1.1  mrg 
   1306  1.1  mrg    If several registers meet the conditions, the register with smallest
   1307  1.1  mrg    tick is returned to achieve more even register allocation.
   1308  1.1  mrg 
   1309  1.1  mrg    If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true.
   1310  1.1  mrg 
   1311  1.1  mrg    If no register satisfies the above conditions, NULL_RTX is returned.  */
   1312  1.1  mrg static rtx
   1313  1.1  mrg choose_best_reg_1 (HARD_REG_SET hard_regs_used,
   1314  1.1  mrg                    struct reg_rename *reg_rename_p,
   1315  1.1  mrg                    def_list_t original_insns, bool *is_orig_reg_p_ptr)
   1316  1.1  mrg {
   1317  1.1  mrg   int best_new_reg;
   1318  1.1  mrg   unsigned cur_reg;
   1319  1.1  mrg   machine_mode mode = VOIDmode;
   1320  1.1  mrg   unsigned regno, i, n;
   1321  1.1  mrg   hard_reg_set_iterator hrsi;
   1322  1.1  mrg   def_list_iterator di;
   1323  1.1  mrg   def_t def;
   1324  1.1  mrg 
   1325  1.1  mrg   /* If original register is available, return it.  */
   1326  1.1  mrg   *is_orig_reg_p_ptr = true;
   1327  1.1  mrg 
   1328  1.1  mrg   FOR_EACH_DEF (def, di, original_insns)
   1329  1.1  mrg     {
   1330  1.1  mrg       rtx orig_dest = SET_DEST (PATTERN (def->orig_insn));
   1331  1.1  mrg 
   1332  1.1  mrg       gcc_assert (REG_P (orig_dest));
   1333  1.1  mrg 
   1334  1.1  mrg       /* Check that all original operations have the same mode.
   1335  1.1  mrg          This is done for the next loop; if we'd return from this
   1336  1.1  mrg          loop, we'd check only part of them, but in this case
   1337  1.1  mrg          it doesn't matter.  */
   1338  1.1  mrg       if (mode == VOIDmode)
   1339  1.1  mrg         mode = GET_MODE (orig_dest);
   1340  1.1  mrg       gcc_assert (mode == GET_MODE (orig_dest));
   1341  1.1  mrg 
   1342  1.1  mrg       regno = REGNO (orig_dest);
   1343  1.1  mrg       for (i = 0, n = REG_NREGS (orig_dest); i < n; i++)
   1344  1.1  mrg         if (TEST_HARD_REG_BIT (hard_regs_used, regno + i))
   1345  1.1  mrg           break;
   1346  1.1  mrg 
   1347  1.1  mrg       /* All hard registers are available.  */
   1348  1.1  mrg       if (i == n)
   1349  1.1  mrg         {
   1350  1.1  mrg           gcc_assert (mode != VOIDmode);
   1351  1.1  mrg 
   1352  1.1  mrg           /* Hard registers should not be shared.  */
   1353  1.1  mrg           return gen_rtx_REG (mode, regno);
   1354  1.1  mrg         }
   1355  1.1  mrg     }
   1356  1.1  mrg 
   1357  1.1  mrg   *is_orig_reg_p_ptr = false;
   1358  1.1  mrg   best_new_reg = -1;
   1359  1.1  mrg 
   1360  1.1  mrg   /* Among all available regs choose the register that was
   1361  1.1  mrg      allocated earliest.  */
   1362  1.1  mrg   EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
   1363  1.1  mrg                                   0, cur_reg, hrsi)
   1364  1.1  mrg     if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg))
   1365  1.1  mrg       {
   1366  1.1  mrg 	/* Check that all hard regs for mode are available.  */
   1367  1.1  mrg 	for (i = 1, n = hard_regno_nregs (cur_reg, mode); i < n; i++)
   1368  1.1  mrg 	  if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i)
   1369  1.1  mrg 	      || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming,
   1370  1.1  mrg 				     cur_reg + i))
   1371  1.1  mrg 	    break;
   1372  1.1  mrg 
   1373  1.1  mrg 	if (i < n)
   1374  1.1  mrg 	  continue;
   1375  1.1  mrg 
   1376  1.1  mrg         /* All hard registers are available.  */
   1377  1.1  mrg         if (best_new_reg < 0
   1378  1.1  mrg             || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg])
   1379  1.1  mrg           {
   1380  1.1  mrg             best_new_reg = cur_reg;
   1381  1.1  mrg 
   1382  1.1  mrg             /* Return immediately when we know there's no better reg.  */
   1383  1.1  mrg             if (! reg_rename_tick[best_new_reg])
   1384  1.1  mrg               break;
   1385  1.1  mrg           }
   1386  1.1  mrg       }
   1387  1.1  mrg 
   1388  1.1  mrg   if (best_new_reg >= 0)
   1389  1.1  mrg     {
   1390  1.1  mrg       /* Use the check from the above loop.  */
   1391  1.1  mrg       gcc_assert (mode != VOIDmode);
   1392  1.1  mrg       return gen_rtx_REG (mode, best_new_reg);
   1393  1.1  mrg     }
   1394  1.1  mrg 
   1395  1.1  mrg   return NULL_RTX;
   1396  1.1  mrg }
   1397  1.1  mrg 
   1398  1.1  mrg /* A wrapper around choose_best_reg_1 () to verify that we make correct
   1399  1.1  mrg    assumptions about available registers in the function.  */
   1400  1.1  mrg static rtx
   1401  1.1  mrg choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
   1402  1.1  mrg                  def_list_t original_insns, bool *is_orig_reg_p_ptr)
   1403  1.1  mrg {
   1404  1.1  mrg   rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p,
   1405  1.1  mrg                                     original_insns, is_orig_reg_p_ptr);
   1406  1.1  mrg 
   1407  1.1  mrg   /* FIXME loop over hard_regno_nregs here.  */
   1408  1.1  mrg   gcc_assert (best_reg == NULL_RTX
   1409  1.1  mrg 	      || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg)));
   1410  1.1  mrg 
   1411  1.1  mrg   return best_reg;
   1412  1.1  mrg }
   1413  1.1  mrg 
   1414  1.1  mrg /* Choose the pseudo register for storing rhs value.  As this is supposed
   1415  1.1  mrg    to work before reload, we return either the original register or make
   1416  1.1  mrg    the new one.  The parameters are the same that in choose_nest_reg_1
   1417  1.1  mrg    functions, except that USED_REGS may contain pseudos.
   1418  1.1  mrg    If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS.
   1419  1.1  mrg 
   1420  1.1  mrg    TODO: take into account register pressure while doing this.  Up to this
   1421  1.1  mrg    moment, this function would never return NULL for pseudos, but we should
   1422  1.1  mrg    not rely on this.  */
   1423  1.1  mrg static rtx
   1424  1.1  mrg choose_best_pseudo_reg (regset used_regs,
   1425  1.1  mrg                         struct reg_rename *reg_rename_p,
   1426  1.1  mrg                         def_list_t original_insns, bool *is_orig_reg_p_ptr)
   1427  1.1  mrg {
   1428  1.1  mrg   def_list_iterator i;
   1429  1.1  mrg   def_t def;
   1430  1.1  mrg   machine_mode mode = VOIDmode;
   1431  1.1  mrg   bool bad_hard_regs = false;
   1432  1.1  mrg 
   1433  1.1  mrg   /* We should not use this after reload.  */
   1434  1.1  mrg   gcc_assert (!reload_completed);
   1435  1.1  mrg 
   1436  1.1  mrg   /* If original register is available, return it.  */
   1437  1.1  mrg   *is_orig_reg_p_ptr = true;
   1438  1.1  mrg 
   1439  1.1  mrg   FOR_EACH_DEF (def, i, original_insns)
   1440  1.1  mrg     {
   1441  1.1  mrg       rtx dest = SET_DEST (PATTERN (def->orig_insn));
   1442  1.1  mrg       int orig_regno;
   1443  1.1  mrg 
   1444  1.1  mrg       gcc_assert (REG_P (dest));
   1445  1.1  mrg 
   1446  1.1  mrg       /* Check that all original operations have the same mode.  */
   1447  1.1  mrg       if (mode == VOIDmode)
   1448  1.1  mrg         mode = GET_MODE (dest);
   1449  1.1  mrg       else
   1450  1.1  mrg         gcc_assert (mode == GET_MODE (dest));
   1451  1.1  mrg       orig_regno = REGNO (dest);
   1452  1.1  mrg 
   1453  1.1  mrg       /* Check that nothing in used_regs intersects with orig_regno.  When
   1454  1.1  mrg 	 we have a hard reg here, still loop over hard_regno_nregs.  */
   1455  1.1  mrg       if (HARD_REGISTER_NUM_P (orig_regno))
   1456  1.1  mrg 	{
   1457  1.1  mrg 	  int j, n;
   1458  1.1  mrg 	  for (j = 0, n = REG_NREGS (dest); j < n; j++)
   1459  1.1  mrg 	    if (REGNO_REG_SET_P (used_regs, orig_regno + j))
   1460  1.1  mrg 	      break;
   1461  1.1  mrg 	  if (j < n)
   1462  1.1  mrg 	    continue;
   1463  1.1  mrg 	}
   1464  1.1  mrg       else
   1465  1.1  mrg 	{
   1466  1.1  mrg 	  if (REGNO_REG_SET_P (used_regs, orig_regno))
   1467  1.1  mrg 	    continue;
   1468  1.1  mrg 	}
   1469  1.1  mrg       if (HARD_REGISTER_NUM_P (orig_regno))
   1470  1.1  mrg 	{
   1471  1.1  mrg 	  gcc_assert (df_regs_ever_live_p (orig_regno));
   1472  1.1  mrg 
   1473  1.1  mrg 	  /* For hard registers, we have to check hardware imposed
   1474  1.1  mrg 	     limitations (frame/stack registers, calls crossed).  */
   1475  1.1  mrg 	  if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
   1476  1.1  mrg 				  orig_regno))
   1477  1.1  mrg 	    {
   1478  1.1  mrg 	      /* Don't let register cross a call if it doesn't already
   1479  1.1  mrg 		 cross one.  This condition is written in accordance with
   1480  1.1  mrg 		 that in sched-deps.cc sched_analyze_reg().  */
   1481  1.1  mrg 	      if (!reg_rename_p->crossed_call_abis
   1482  1.1  mrg 		  || REG_N_CALLS_CROSSED (orig_regno) > 0)
   1483  1.1  mrg 		return gen_rtx_REG (mode, orig_regno);
   1484  1.1  mrg 	    }
   1485  1.1  mrg 
   1486  1.1  mrg 	  bad_hard_regs = true;
   1487  1.1  mrg 	}
   1488  1.1  mrg       else
   1489  1.1  mrg 	return dest;
   1490  1.1  mrg     }
   1491  1.1  mrg 
   1492  1.1  mrg   *is_orig_reg_p_ptr = false;
   1493  1.1  mrg 
   1494  1.1  mrg   /* We had some original hard registers that couldn't be used.
   1495  1.1  mrg      Those were likely special.  Don't try to create a pseudo.  */
   1496  1.1  mrg   if (bad_hard_regs)
   1497  1.1  mrg     return NULL_RTX;
   1498  1.1  mrg 
   1499  1.1  mrg   /* We haven't found a register from original operations.  Get a new one.
   1500  1.1  mrg      FIXME: control register pressure somehow.  */
   1501  1.1  mrg   {
   1502  1.1  mrg     rtx new_reg = gen_reg_rtx (mode);
   1503  1.1  mrg 
   1504  1.1  mrg     gcc_assert (mode != VOIDmode);
   1505  1.1  mrg 
   1506  1.1  mrg     max_regno = max_reg_num ();
   1507  1.1  mrg     maybe_extend_reg_info_p ();
   1508  1.1  mrg     REG_N_CALLS_CROSSED (REGNO (new_reg))
   1509  1.1  mrg       = reg_rename_p->crossed_call_abis ? 1 : 0;
   1510  1.1  mrg 
   1511  1.1  mrg     return new_reg;
   1512  1.1  mrg   }
   1513  1.1  mrg }
   1514  1.1  mrg 
   1515  1.1  mrg /* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE,
   1516  1.1  mrg    USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS.  */
   1517  1.1  mrg static void
   1518  1.1  mrg verify_target_availability (expr_t expr, regset used_regs,
   1519  1.1  mrg 			    struct reg_rename *reg_rename_p)
   1520  1.1  mrg {
   1521  1.1  mrg   unsigned n, i, regno;
   1522  1.1  mrg   machine_mode mode;
   1523  1.1  mrg   bool target_available, live_available, hard_available;
   1524  1.1  mrg 
   1525  1.1  mrg   if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0)
   1526  1.1  mrg     return;
   1527  1.1  mrg 
   1528  1.1  mrg   regno = expr_dest_regno (expr);
   1529  1.1  mrg   mode = GET_MODE (EXPR_LHS (expr));
   1530  1.1  mrg   target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
   1531  1.1  mrg   n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs (regno, mode) : 1;
   1532  1.1  mrg 
   1533  1.1  mrg   live_available = hard_available = true;
   1534  1.1  mrg   for (i = 0; i < n; i++)
   1535  1.1  mrg     {
   1536  1.1  mrg       if (bitmap_bit_p (used_regs, regno + i))
   1537  1.1  mrg         live_available = false;
   1538  1.1  mrg       if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i))
   1539  1.1  mrg         hard_available = false;
   1540  1.1  mrg     }
   1541  1.1  mrg 
   1542  1.1  mrg   /* When target is not available, it may be due to hard register
   1543  1.1  mrg      restrictions, e.g. crosses calls, so we check hard_available too.  */
   1544  1.1  mrg   if (target_available)
   1545  1.1  mrg     gcc_assert (live_available);
   1546  1.1  mrg   else
   1547  1.1  mrg     /* Check only if we haven't scheduled something on the previous fence,
   1548  1.1  mrg        cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
   1549  1.1  mrg        and having more than one fence, we may end having targ_un in a block
   1550  1.1  mrg        in which successors target register is actually available.
   1551  1.1  mrg 
   1552  1.1  mrg        The last condition handles the case when a dependence from a call insn
   1553  1.1  mrg        was created in sched-deps.cc for insns with destination registers that
   1554  1.1  mrg        never crossed a call before, but do cross one after our code motion.
   1555  1.1  mrg 
   1556  1.1  mrg        FIXME: in the latter case, we just uselessly called find_used_regs,
   1557  1.1  mrg        because we can't move this expression with any other register
   1558  1.1  mrg        as well.  */
   1559  1.1  mrg     gcc_assert (scheduled_something_on_previous_fence || !live_available
   1560  1.1  mrg 		|| !hard_available
   1561  1.1  mrg 		|| (!reload_completed
   1562  1.1  mrg 		    && reg_rename_p->crossed_call_abis
   1563  1.1  mrg 		    && REG_N_CALLS_CROSSED (regno) == 0));
   1564  1.1  mrg }
   1565  1.1  mrg 
   1566  1.1  mrg /* Collect unavailable registers due to liveness for EXPR from BNDS
   1567  1.1  mrg    into USED_REGS.  Save additional information about available
   1568  1.1  mrg    registers and unavailable due to hardware restriction registers
   1569  1.1  mrg    into REG_RENAME_P structure.  Save original insns into ORIGINAL_INSNS
   1570  1.1  mrg    list.  */
   1571  1.1  mrg static void
   1572  1.1  mrg collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs,
   1573  1.1  mrg 				    struct reg_rename *reg_rename_p,
   1574  1.1  mrg 				    def_list_t *original_insns)
   1575  1.1  mrg {
   1576  1.1  mrg   for (; bnds; bnds = BLIST_NEXT (bnds))
   1577  1.1  mrg     {
   1578  1.1  mrg       bool res;
   1579  1.1  mrg       av_set_t orig_ops = NULL;
   1580  1.1  mrg       bnd_t bnd = BLIST_BND (bnds);
   1581  1.1  mrg 
   1582  1.1  mrg       /* If the chosen best expr doesn't belong to current boundary,
   1583  1.1  mrg 	 skip it.  */
   1584  1.1  mrg       if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr)))
   1585  1.1  mrg 	continue;
   1586  1.1  mrg 
   1587  1.1  mrg       /* Put in ORIG_OPS all exprs from this boundary that became
   1588  1.1  mrg 	 RES on top.  */
   1589  1.1  mrg       orig_ops = find_sequential_best_exprs (bnd, expr, false);
   1590  1.1  mrg 
   1591  1.1  mrg       /* Compute used regs and OR it into the USED_REGS.  */
   1592  1.1  mrg       res = find_used_regs (BND_TO (bnd), orig_ops, used_regs,
   1593  1.1  mrg 			    reg_rename_p, original_insns);
   1594  1.1  mrg 
   1595  1.1  mrg       /* FIXME: the assert is true until we'd have several boundaries.  */
   1596  1.1  mrg       gcc_assert (res);
   1597  1.1  mrg       av_set_clear (&orig_ops);
   1598  1.1  mrg     }
   1599  1.1  mrg }
   1600  1.1  mrg 
   1601  1.1  mrg /* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG.
   1602  1.1  mrg    If BEST_REG is valid, replace LHS of EXPR with it.  */
   1603  1.1  mrg static bool
   1604  1.1  mrg try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr)
   1605  1.1  mrg {
   1606  1.1  mrg   /* Try whether we'll be able to generate the insn
   1607  1.1  mrg      'dest := best_reg' at the place of the original operation.  */
   1608  1.1  mrg   for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns))
   1609  1.1  mrg     {
   1610  1.1  mrg       insn_t orig_insn = DEF_LIST_DEF (orig_insns)->orig_insn;
   1611  1.1  mrg 
   1612  1.1  mrg       gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn)));
   1613  1.1  mrg 
   1614  1.1  mrg       if (REGNO (best_reg) != REGNO (INSN_LHS (orig_insn))
   1615  1.1  mrg 	  && (! replace_src_with_reg_ok_p (orig_insn, best_reg)
   1616  1.1  mrg 	      || ! replace_dest_with_reg_ok_p (orig_insn, best_reg)))
   1617  1.1  mrg 	return false;
   1618  1.1  mrg     }
   1619  1.1  mrg 
   1620  1.1  mrg   /* Make sure that EXPR has the right destination
   1621  1.1  mrg      register.  */
   1622  1.1  mrg   if (expr_dest_regno (expr) != REGNO (best_reg))
   1623  1.1  mrg     replace_dest_with_reg_in_expr (expr, best_reg);
   1624  1.1  mrg   else
   1625  1.1  mrg     EXPR_TARGET_AVAILABLE (expr) = 1;
   1626  1.1  mrg 
   1627  1.1  mrg   return true;
   1628  1.1  mrg }
   1629  1.1  mrg 
   1630  1.1  mrg /* Select and assign best register to EXPR searching from BNDS.
   1631  1.1  mrg    Set *IS_ORIG_REG_P to TRUE if original register was selected.
   1632  1.1  mrg    Return FALSE if no register can be chosen, which could happen when:
   1633  1.1  mrg    * EXPR_SEPARABLE_P is true but we were unable to find suitable register;
   1634  1.1  mrg    * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers
   1635  1.1  mrg      that are used on the moving path.  */
   1636  1.1  mrg static bool
   1637  1.1  mrg find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p)
   1638  1.1  mrg {
   1639  1.1  mrg   static struct reg_rename reg_rename_data;
   1640  1.1  mrg 
   1641  1.1  mrg   regset used_regs;
   1642  1.1  mrg   def_list_t original_insns = NULL;
   1643  1.1  mrg   bool reg_ok;
   1644  1.1  mrg 
   1645  1.1  mrg   *is_orig_reg_p = false;
   1646  1.1  mrg 
   1647  1.1  mrg   /* Don't bother to do anything if this insn doesn't set any registers.  */
   1648  1.1  mrg   if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr)))
   1649  1.1  mrg       && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr))))
   1650  1.1  mrg     return true;
   1651  1.1  mrg 
   1652  1.1  mrg   used_regs = get_clear_regset_from_pool ();
   1653  1.1  mrg   CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs);
   1654  1.1  mrg 
   1655  1.1  mrg   collect_unavailable_regs_from_bnds (expr, bnds, used_regs, &reg_rename_data,
   1656  1.1  mrg 				      &original_insns);
   1657  1.1  mrg 
   1658  1.1  mrg   /* If after reload, make sure we're working with hard regs here.  */
   1659  1.1  mrg   if (flag_checking && reload_completed)
   1660  1.1  mrg     {
   1661  1.1  mrg       reg_set_iterator rsi;
   1662  1.1  mrg       unsigned i;
   1663  1.1  mrg 
   1664  1.1  mrg       EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)
   1665  1.1  mrg         gcc_unreachable ();
   1666  1.1  mrg     }
   1667  1.1  mrg 
   1668  1.1  mrg   if (EXPR_SEPARABLE_P (expr))
   1669  1.1  mrg     {
   1670  1.1  mrg       rtx best_reg = NULL_RTX;
   1671  1.1  mrg       /* Check that we have computed availability of a target register
   1672  1.1  mrg 	 correctly.  */
   1673  1.1  mrg       verify_target_availability (expr, used_regs, &reg_rename_data);
   1674  1.1  mrg 
   1675  1.1  mrg       /* Turn everything in hard regs after reload.  */
   1676  1.1  mrg       if (reload_completed)
   1677  1.1  mrg 	{
   1678  1.1  mrg 	  HARD_REG_SET hard_regs_used;
   1679  1.1  mrg 	  REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs);
   1680  1.1  mrg 
   1681  1.1  mrg 	  /* Join hard registers unavailable due to register class
   1682  1.1  mrg 	     restrictions and live range intersection.  */
   1683  1.1  mrg 	  hard_regs_used |= reg_rename_data.unavailable_hard_regs;
   1684  1.1  mrg 
   1685  1.1  mrg 	  best_reg = choose_best_reg (hard_regs_used, &reg_rename_data,
   1686  1.1  mrg 				      original_insns, is_orig_reg_p);
   1687  1.1  mrg 	}
   1688  1.1  mrg       else
   1689  1.1  mrg 	best_reg = choose_best_pseudo_reg (used_regs, &reg_rename_data,
   1690  1.1  mrg 					   original_insns, is_orig_reg_p);
   1691  1.1  mrg 
   1692  1.1  mrg       if (!best_reg)
   1693  1.1  mrg 	reg_ok = false;
   1694  1.1  mrg       else if (*is_orig_reg_p)
   1695  1.1  mrg 	{
   1696  1.1  mrg 	  /* In case of unification BEST_REG may be different from EXPR's LHS
   1697  1.1  mrg 	     when EXPR's LHS is unavailable, and there is another LHS among
   1698  1.1  mrg 	     ORIGINAL_INSNS.  */
   1699  1.1  mrg 	  reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
   1700  1.1  mrg 	}
   1701  1.1  mrg       else
   1702  1.1  mrg 	{
   1703  1.1  mrg 	  /* Forbid renaming of low-cost insns.  */
   1704  1.1  mrg 	  if (sel_vinsn_cost (EXPR_VINSN (expr)) < 2)
   1705  1.1  mrg 	    reg_ok = false;
   1706  1.1  mrg 	  else
   1707  1.1  mrg 	    reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
   1708  1.1  mrg 	}
   1709  1.1  mrg     }
   1710  1.1  mrg   else
   1711  1.1  mrg     {
   1712  1.1  mrg       /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set
   1713  1.1  mrg 	 any of the HARD_REGS_USED set.  */
   1714  1.1  mrg       if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr), used_regs,
   1715  1.1  mrg 				      reg_rename_data.unavailable_hard_regs))
   1716  1.1  mrg 	{
   1717  1.1  mrg 	  reg_ok = false;
   1718  1.1  mrg 	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0);
   1719  1.1  mrg 	}
   1720  1.1  mrg       else
   1721  1.1  mrg 	{
   1722  1.1  mrg 	  reg_ok = true;
   1723  1.1  mrg 	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0);
   1724  1.1  mrg 	}
   1725  1.1  mrg     }
   1726  1.1  mrg 
   1727  1.1  mrg   ilist_clear (&original_insns);
   1728  1.1  mrg   return_regset_to_pool (used_regs);
   1729  1.1  mrg 
   1730  1.1  mrg   return reg_ok;
   1731  1.1  mrg }
   1732  1.1  mrg 
   1733  1.1  mrg 
   1735  1.1  mrg /* Return true if dependence described by DS can be overcomed.  */
   1736  1.1  mrg static bool
   1737  1.1  mrg can_speculate_dep_p (ds_t ds)
   1738  1.1  mrg {
   1739  1.1  mrg   if (spec_info == NULL)
   1740  1.1  mrg     return false;
   1741  1.1  mrg 
   1742  1.1  mrg   /* Leave only speculative data.  */
   1743  1.1  mrg   ds &= SPECULATIVE;
   1744  1.1  mrg 
   1745  1.1  mrg   if (ds == 0)
   1746  1.1  mrg     return false;
   1747  1.1  mrg 
   1748  1.1  mrg   {
   1749  1.1  mrg     /* FIXME: make sched-deps.cc produce only those non-hard dependencies,
   1750  1.1  mrg        that we can overcome.  */
   1751  1.1  mrg     ds_t spec_mask = spec_info->mask;
   1752  1.1  mrg 
   1753  1.1  mrg     if ((ds & spec_mask) != ds)
   1754  1.1  mrg       return false;
   1755  1.1  mrg   }
   1756  1.1  mrg 
   1757  1.1  mrg   if (ds_weak (ds) < spec_info->data_weakness_cutoff)
   1758  1.1  mrg     return false;
   1759  1.1  mrg 
   1760  1.1  mrg   return true;
   1761  1.1  mrg }
   1762  1.1  mrg 
   1763  1.1  mrg /* Get a speculation check instruction.
   1764  1.1  mrg    C_EXPR is a speculative expression,
   1765  1.1  mrg    CHECK_DS describes speculations that should be checked,
   1766  1.1  mrg    ORIG_INSN is the original non-speculative insn in the stream.  */
   1767  1.1  mrg static insn_t
   1768  1.1  mrg create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn)
   1769  1.1  mrg {
   1770  1.1  mrg   rtx check_pattern;
   1771  1.1  mrg   rtx_insn *insn_rtx;
   1772  1.1  mrg   insn_t insn;
   1773  1.1  mrg   basic_block recovery_block;
   1774  1.1  mrg   rtx_insn *label;
   1775  1.1  mrg 
   1776  1.1  mrg   /* Create a recovery block if target is going to emit branchy check, or if
   1777  1.1  mrg      ORIG_INSN was speculative already.  */
   1778  1.1  mrg   if (targetm.sched.needs_block_p (check_ds)
   1779  1.1  mrg       || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn)) != 0)
   1780  1.1  mrg     {
   1781  1.1  mrg       recovery_block = sel_create_recovery_block (orig_insn);
   1782  1.1  mrg       label = BB_HEAD (recovery_block);
   1783  1.1  mrg     }
   1784  1.1  mrg   else
   1785  1.1  mrg     {
   1786  1.1  mrg       recovery_block = NULL;
   1787  1.1  mrg       label = NULL;
   1788  1.1  mrg     }
   1789  1.1  mrg 
   1790  1.1  mrg   /* Get pattern of the check.  */
   1791  1.1  mrg   check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr), label,
   1792  1.1  mrg 						check_ds);
   1793  1.1  mrg 
   1794  1.1  mrg   gcc_assert (check_pattern != NULL);
   1795  1.1  mrg 
   1796  1.1  mrg   /* Emit check.  */
   1797  1.1  mrg   insn_rtx = create_insn_rtx_from_pattern (check_pattern, label);
   1798  1.1  mrg 
   1799  1.1  mrg   insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn),
   1800  1.1  mrg 				      INSN_SEQNO (orig_insn), orig_insn);
   1801  1.1  mrg 
   1802  1.1  mrg   /* Make check to be non-speculative.  */
   1803  1.1  mrg   EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
   1804  1.1  mrg   INSN_SPEC_CHECKED_DS (insn) = check_ds;
   1805  1.1  mrg 
   1806  1.1  mrg   /* Decrease priority of check by difference of load/check instruction
   1807  1.1  mrg      latencies.  */
   1808  1.1  mrg   EXPR_PRIORITY (INSN_EXPR (insn)) -= (sel_vinsn_cost (INSN_VINSN (orig_insn))
   1809  1.1  mrg 				       - sel_vinsn_cost (INSN_VINSN (insn)));
   1810  1.1  mrg 
   1811  1.1  mrg   /* Emit copy of original insn (though with replaced target register,
   1812  1.1  mrg      if needed) to the recovery block.  */
   1813  1.1  mrg   if (recovery_block != NULL)
   1814  1.1  mrg     {
   1815  1.1  mrg       rtx twin_rtx;
   1816  1.1  mrg 
   1817  1.1  mrg       twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr)));
   1818  1.1  mrg       twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX);
   1819  1.1  mrg       sel_gen_recovery_insn_from_rtx_after (twin_rtx,
   1820  1.1  mrg 					    INSN_EXPR (orig_insn),
   1821  1.1  mrg 					    INSN_SEQNO (insn),
   1822  1.1  mrg 					    bb_note (recovery_block));
   1823  1.1  mrg     }
   1824  1.1  mrg 
   1825  1.1  mrg   /* If we've generated a data speculation check, make sure
   1826  1.1  mrg      that all the bookkeeping instruction we'll create during
   1827  1.1  mrg      this move_op () will allocate an ALAT entry so that the
   1828  1.1  mrg      check won't fail.
   1829  1.1  mrg      In case of control speculation we must convert C_EXPR to control
   1830  1.1  mrg      speculative mode, because failing to do so will bring us an exception
   1831  1.1  mrg      thrown by the non-control-speculative load.  */
   1832  1.1  mrg   check_ds = ds_get_max_dep_weak (check_ds);
   1833  1.1  mrg   speculate_expr (c_expr, check_ds);
   1834  1.1  mrg 
   1835  1.1  mrg   return insn;
   1836  1.1  mrg }
   1837  1.1  mrg 
   1838  1.1  mrg /* True when INSN is a "regN = regN" copy.  */
   1839  1.1  mrg static bool
   1840  1.1  mrg identical_copy_p (rtx_insn *insn)
   1841  1.1  mrg {
   1842  1.1  mrg   rtx lhs, rhs, pat;
   1843  1.1  mrg 
   1844  1.1  mrg   pat = PATTERN (insn);
   1845  1.1  mrg 
   1846  1.1  mrg   if (GET_CODE (pat) != SET)
   1847  1.1  mrg     return false;
   1848  1.1  mrg 
   1849  1.1  mrg   lhs = SET_DEST (pat);
   1850  1.1  mrg   if (!REG_P (lhs))
   1851  1.1  mrg     return false;
   1852  1.1  mrg 
   1853  1.1  mrg   rhs = SET_SRC (pat);
   1854  1.1  mrg   if (!REG_P (rhs))
   1855  1.1  mrg     return false;
   1856  1.1  mrg 
   1857  1.1  mrg   return REGNO (lhs) == REGNO (rhs);
   1858  1.1  mrg }
   1859  1.1  mrg 
   1860  1.1  mrg /* Undo all transformations on *AV_PTR that were done when
   1861  1.1  mrg    moving through INSN.  */
   1862  1.1  mrg static void
   1863  1.1  mrg undo_transformations (av_set_t *av_ptr, rtx_insn *insn)
   1864  1.1  mrg {
   1865  1.1  mrg   av_set_iterator av_iter;
   1866  1.1  mrg   expr_t expr;
   1867  1.1  mrg   av_set_t new_set = NULL;
   1868  1.1  mrg 
   1869  1.1  mrg   /* First, kill any EXPR that uses registers set by an insn.  This is
   1870  1.1  mrg      required for correctness.  */
   1871  1.1  mrg   FOR_EACH_EXPR_1 (expr, av_iter, av_ptr)
   1872  1.1  mrg     if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr))
   1873  1.1  mrg         && bitmap_intersect_p (INSN_REG_SETS (insn),
   1874  1.1  mrg                                VINSN_REG_USES (EXPR_VINSN (expr)))
   1875  1.1  mrg         /* When an insn looks like 'r1 = r1', we could substitute through
   1876  1.1  mrg            it, but the above condition will still hold.  This happened with
   1877  1.1  mrg            gcc.c-torture/execute/961125-1.c.  */
   1878  1.1  mrg         && !identical_copy_p (insn))
   1879  1.1  mrg       {
   1880  1.1  mrg         if (sched_verbose >= 6)
   1881  1.1  mrg           sel_print ("Expr %d removed due to use/set conflict\n",
   1882  1.1  mrg                      INSN_UID (EXPR_INSN_RTX (expr)));
   1883  1.1  mrg         av_set_iter_remove (&av_iter);
   1884  1.1  mrg       }
   1885  1.1  mrg 
   1886  1.1  mrg   /* Undo transformations looking at the history vector.  */
   1887  1.1  mrg   FOR_EACH_EXPR (expr, av_iter, *av_ptr)
   1888  1.1  mrg     {
   1889  1.1  mrg       int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr),
   1890  1.1  mrg                                         insn, EXPR_VINSN (expr), true);
   1891  1.1  mrg 
   1892  1.1  mrg       if (index >= 0)
   1893  1.1  mrg         {
   1894  1.1  mrg           expr_history_def *phist;
   1895  1.1  mrg 
   1896  1.1  mrg           phist = &EXPR_HISTORY_OF_CHANGES (expr)[index];
   1897  1.1  mrg 
   1898  1.1  mrg           switch (phist->type)
   1899  1.1  mrg             {
   1900  1.1  mrg             case TRANS_SPECULATION:
   1901  1.1  mrg               {
   1902  1.1  mrg                 ds_t old_ds, new_ds;
   1903  1.1  mrg 
   1904  1.1  mrg                 /* Compute the difference between old and new speculative
   1905  1.1  mrg                    statuses: that's what we need to check.
   1906  1.1  mrg                    Earlier we used to assert that the status will really
   1907  1.1  mrg                    change.  This no longer works because only the probability
   1908  1.1  mrg                    bits in the status may have changed during compute_av_set,
   1909  1.1  mrg                    and in the case of merging different probabilities of the
   1910  1.1  mrg                    same speculative status along different paths we do not
   1911  1.1  mrg                    record this in the history vector.  */
   1912  1.1  mrg                 old_ds = phist->spec_ds;
   1913  1.1  mrg                 new_ds = EXPR_SPEC_DONE_DS (expr);
   1914  1.1  mrg 
   1915  1.1  mrg                 old_ds &= SPECULATIVE;
   1916  1.1  mrg                 new_ds &= SPECULATIVE;
   1917  1.1  mrg                 new_ds &= ~old_ds;
   1918  1.1  mrg 
   1919  1.1  mrg                 EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds;
   1920  1.1  mrg                 break;
   1921  1.1  mrg               }
   1922  1.1  mrg             case TRANS_SUBSTITUTION:
   1923  1.1  mrg               {
   1924  1.1  mrg                 expr_def _tmp_expr, *tmp_expr = &_tmp_expr;
   1925  1.1  mrg                 vinsn_t new_vi;
   1926  1.1  mrg                 bool add = true;
   1927  1.1  mrg 
   1928  1.1  mrg                 new_vi = phist->old_expr_vinsn;
   1929  1.1  mrg 
   1930  1.1  mrg                 gcc_assert (VINSN_SEPARABLE_P (new_vi)
   1931  1.1  mrg                             == EXPR_SEPARABLE_P (expr));
   1932  1.1  mrg                 copy_expr (tmp_expr, expr);
   1933  1.1  mrg 
   1934  1.1  mrg                 if (vinsn_equal_p (phist->new_expr_vinsn,
   1935  1.1  mrg                                    EXPR_VINSN (tmp_expr)))
   1936  1.1  mrg                   change_vinsn_in_expr (tmp_expr, new_vi);
   1937  1.1  mrg                 else
   1938  1.1  mrg                   /* This happens when we're unsubstituting on a bookkeeping
   1939  1.1  mrg                      copy, which was in turn substituted.  The history is wrong
   1940  1.1  mrg                      in this case.  Do it the hard way.  */
   1941  1.1  mrg                   add = substitute_reg_in_expr (tmp_expr, insn, true);
   1942  1.1  mrg                 if (add)
   1943  1.1  mrg                   av_set_add (&new_set, tmp_expr);
   1944  1.1  mrg                 clear_expr (tmp_expr);
   1945  1.1  mrg                 break;
   1946  1.1  mrg               }
   1947  1.1  mrg             default:
   1948  1.1  mrg               gcc_unreachable ();
   1949  1.1  mrg             }
   1950  1.1  mrg         }
   1951  1.1  mrg 
   1952  1.1  mrg     }
   1953  1.1  mrg 
   1954  1.1  mrg   av_set_union_and_clear (av_ptr, &new_set, NULL);
   1955  1.1  mrg }
   1956  1.1  mrg 
   1957  1.1  mrg 
   1959  1.1  mrg /* Moveup_* helpers for code motion and computing av sets.  */
   1960  1.1  mrg 
   1961  1.1  mrg /* Propagates EXPR inside an insn group through THROUGH_INSN.
   1962  1.1  mrg    The difference from the below function is that only substitution is
   1963  1.1  mrg    performed.  */
   1964  1.1  mrg static enum MOVEUP_EXPR_CODE
   1965  1.1  mrg moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
   1966  1.1  mrg {
   1967  1.1  mrg   vinsn_t vi = EXPR_VINSN (expr);
   1968  1.1  mrg   ds_t *has_dep_p;
   1969  1.1  mrg   ds_t full_ds;
   1970  1.1  mrg 
   1971  1.1  mrg   /* Do this only inside insn group.  */
   1972  1.1  mrg   gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0);
   1973  1.1  mrg 
   1974  1.1  mrg   full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
   1975  1.1  mrg   if (full_ds == 0)
   1976  1.1  mrg     return MOVEUP_EXPR_SAME;
   1977  1.1  mrg 
   1978  1.1  mrg   /* Substitution is the possible choice in this case.  */
   1979  1.1  mrg   if (has_dep_p[DEPS_IN_RHS])
   1980  1.1  mrg     {
   1981  1.1  mrg       /* Can't substitute UNIQUE VINSNs.  */
   1982  1.1  mrg       gcc_assert (!VINSN_UNIQUE_P (vi));
   1983  1.1  mrg 
   1984  1.1  mrg       if (can_substitute_through_p (through_insn,
   1985  1.1  mrg                                     has_dep_p[DEPS_IN_RHS])
   1986  1.1  mrg           && substitute_reg_in_expr (expr, through_insn, false))
   1987  1.1  mrg         {
   1988  1.1  mrg           EXPR_WAS_SUBSTITUTED (expr) = true;
   1989  1.1  mrg           return MOVEUP_EXPR_CHANGED;
   1990  1.1  mrg         }
   1991  1.1  mrg 
   1992  1.1  mrg       /* Don't care about this, as even true dependencies may be allowed
   1993  1.1  mrg          in an insn group.  */
   1994  1.1  mrg       return MOVEUP_EXPR_SAME;
   1995  1.1  mrg     }
   1996  1.1  mrg 
   1997  1.1  mrg   /* This can catch output dependencies in COND_EXECs.  */
   1998  1.1  mrg   if (has_dep_p[DEPS_IN_INSN])
   1999  1.1  mrg     return MOVEUP_EXPR_NULL;
   2000  1.1  mrg 
   2001  1.1  mrg   /* This is either an output or an anti dependence, which usually have
   2002  1.1  mrg      a zero latency.  Allow this here, if we'd be wrong, tick_check_p
   2003  1.1  mrg      will fix this.  */
   2004  1.1  mrg   gcc_assert (has_dep_p[DEPS_IN_LHS]);
   2005  1.1  mrg   return MOVEUP_EXPR_AS_RHS;
   2006  1.1  mrg }
   2007  1.1  mrg 
   2008  1.1  mrg /* True when a trapping EXPR cannot be moved through THROUGH_INSN.  */
   2009  1.1  mrg #define CANT_MOVE_TRAPPING(expr, through_insn)                \
   2010  1.1  mrg   (VINSN_MAY_TRAP_P (EXPR_VINSN (expr))                       \
   2011  1.1  mrg    && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL) \
   2012  1.1  mrg    && !sel_insn_is_speculation_check (through_insn))
   2013  1.1  mrg 
   2014  1.1  mrg /* True when a conflict on a target register was found during moveup_expr.  */
   2015  1.1  mrg static bool was_target_conflict = false;
   2016  1.1  mrg 
   2017  1.1  mrg /* Return true when moving a debug INSN across THROUGH_INSN will
   2018  1.1  mrg    create a bookkeeping block.  We don't want to create such blocks,
   2019  1.1  mrg    for they would cause codegen differences between compilations with
   2020  1.1  mrg    and without debug info.  */
   2021  1.1  mrg 
   2022  1.1  mrg static bool
   2023  1.1  mrg moving_insn_creates_bookkeeping_block_p (insn_t insn,
   2024  1.1  mrg 					 insn_t through_insn)
   2025  1.1  mrg {
   2026  1.1  mrg   basic_block bbi, bbt;
   2027  1.1  mrg   edge e1, e2;
   2028  1.1  mrg   edge_iterator ei1, ei2;
   2029  1.1  mrg 
   2030  1.1  mrg   if (!bookkeeping_can_be_created_if_moved_through_p (through_insn))
   2031  1.1  mrg     {
   2032  1.1  mrg       if (sched_verbose >= 9)
   2033  1.1  mrg 	sel_print ("no bookkeeping required: ");
   2034  1.1  mrg       return FALSE;
   2035  1.1  mrg     }
   2036  1.1  mrg 
   2037  1.1  mrg   bbi = BLOCK_FOR_INSN (insn);
   2038  1.1  mrg 
   2039  1.1  mrg   if (EDGE_COUNT (bbi->preds) == 1)
   2040  1.1  mrg     {
   2041  1.1  mrg       if (sched_verbose >= 9)
   2042  1.1  mrg 	sel_print ("only one pred edge: ");
   2043  1.1  mrg       return TRUE;
   2044  1.1  mrg     }
   2045  1.1  mrg 
   2046  1.1  mrg   bbt = BLOCK_FOR_INSN (through_insn);
   2047  1.1  mrg 
   2048  1.1  mrg   FOR_EACH_EDGE (e1, ei1, bbt->succs)
   2049  1.1  mrg     {
   2050  1.1  mrg       FOR_EACH_EDGE (e2, ei2, bbi->preds)
   2051  1.1  mrg 	{
   2052  1.1  mrg 	  if (find_block_for_bookkeeping (e1, e2, TRUE))
   2053  1.1  mrg 	    {
   2054  1.1  mrg 	      if (sched_verbose >= 9)
   2055  1.1  mrg 		sel_print ("found existing block: ");
   2056  1.1  mrg 	      return FALSE;
   2057  1.1  mrg 	    }
   2058  1.1  mrg 	}
   2059  1.1  mrg     }
   2060  1.1  mrg 
   2061  1.1  mrg   if (sched_verbose >= 9)
   2062  1.1  mrg     sel_print ("would create bookkeeping block: ");
   2063  1.1  mrg 
   2064  1.1  mrg   return TRUE;
   2065  1.1  mrg }
   2066  1.1  mrg 
   2067  1.1  mrg /* Return true when the conflict with newly created implicit clobbers
   2068  1.1  mrg    between EXPR and THROUGH_INSN is found because of renaming.  */
   2069  1.1  mrg static bool
   2070  1.1  mrg implicit_clobber_conflict_p (insn_t through_insn, expr_t expr)
   2071  1.1  mrg {
   2072  1.1  mrg   HARD_REG_SET temp;
   2073  1.1  mrg   rtx_insn *insn;
   2074  1.1  mrg   rtx reg, rhs, pat;
   2075  1.1  mrg   hard_reg_set_iterator hrsi;
   2076  1.1  mrg   unsigned regno;
   2077  1.1  mrg   bool valid;
   2078  1.1  mrg 
   2079  1.1  mrg   /* Make a new pseudo register.  */
   2080  1.1  mrg   reg = gen_reg_rtx (GET_MODE (EXPR_LHS (expr)));
   2081  1.1  mrg   max_regno = max_reg_num ();
   2082  1.1  mrg   maybe_extend_reg_info_p ();
   2083  1.1  mrg 
   2084  1.1  mrg   /* Validate a change and bail out early.  */
   2085  1.1  mrg   insn = EXPR_INSN_RTX (expr);
   2086  1.1  mrg   validate_change (insn, &SET_DEST (PATTERN (insn)), reg, true);
   2087  1.1  mrg   valid = verify_changes (0);
   2088  1.1  mrg   cancel_changes (0);
   2089  1.1  mrg   if (!valid)
   2090  1.1  mrg     {
   2091  1.1  mrg       if (sched_verbose >= 6)
   2092  1.1  mrg 	sel_print ("implicit clobbers failed validation, ");
   2093  1.1  mrg       return true;
   2094  1.1  mrg     }
   2095  1.1  mrg 
   2096  1.1  mrg   /* Make a new insn with it.  */
   2097  1.1  mrg   rhs = copy_rtx (VINSN_RHS (EXPR_VINSN (expr)));
   2098  1.1  mrg   pat = gen_rtx_SET (reg, rhs);
   2099  1.1  mrg   start_sequence ();
   2100  1.1  mrg   insn = emit_insn (pat);
   2101  1.1  mrg   end_sequence ();
   2102  1.1  mrg 
   2103  1.1  mrg   /* Calculate implicit clobbers.  */
   2104  1.1  mrg   extract_insn (insn);
   2105  1.1  mrg   preprocess_constraints (insn);
   2106  1.1  mrg   alternative_mask prefrred = get_preferred_alternatives (insn);
   2107  1.1  mrg   ira_implicitly_set_insn_hard_regs (&temp, prefrred);
   2108  1.1  mrg   temp &= ~ira_no_alloc_regs;
   2109  1.1  mrg 
   2110  1.1  mrg   /* If any implicit clobber registers intersect with regular ones in
   2111  1.1  mrg      through_insn, we have a dependency and thus bail out.  */
   2112  1.1  mrg   EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
   2113  1.1  mrg     {
   2114  1.1  mrg       vinsn_t vi = INSN_VINSN (through_insn);
   2115  1.1  mrg       if (bitmap_bit_p (VINSN_REG_SETS (vi), regno)
   2116  1.1  mrg 	  || bitmap_bit_p (VINSN_REG_CLOBBERS (vi), regno)
   2117  1.1  mrg 	  || bitmap_bit_p (VINSN_REG_USES (vi), regno))
   2118  1.1  mrg 	return true;
   2119  1.1  mrg     }
   2120  1.1  mrg 
   2121  1.1  mrg   return false;
   2122  1.1  mrg }
   2123  1.1  mrg 
   2124  1.1  mrg /* Modifies EXPR so it can be moved through the THROUGH_INSN,
   2125  1.1  mrg    performing necessary transformations.  Record the type of transformation
   2126  1.1  mrg    made in PTRANS_TYPE, when it is not NULL.  When INSIDE_INSN_GROUP,
   2127  1.1  mrg    permit all dependencies except true ones, and try to remove those
   2128  1.1  mrg    too via forward substitution.  All cases when a non-eliminable
   2129  1.1  mrg    non-zero cost dependency exists inside an insn group will be fixed
   2130  1.1  mrg    in tick_check_p instead.  */
   2131  1.1  mrg static enum MOVEUP_EXPR_CODE
   2132  1.1  mrg moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
   2133  1.1  mrg             enum local_trans_type *ptrans_type)
   2134  1.1  mrg {
   2135  1.1  mrg   vinsn_t vi = EXPR_VINSN (expr);
   2136  1.1  mrg   insn_t insn = VINSN_INSN_RTX (vi);
   2137  1.1  mrg   bool was_changed = false;
   2138  1.1  mrg   bool as_rhs = false;
   2139  1.1  mrg   ds_t *has_dep_p;
   2140  1.1  mrg   ds_t full_ds;
   2141  1.1  mrg 
   2142  1.1  mrg   /* ??? We use dependencies of non-debug insns on debug insns to
   2143  1.1  mrg      indicate that the debug insns need to be reset if the non-debug
   2144  1.1  mrg      insn is pulled ahead of it.  It's hard to figure out how to
   2145  1.1  mrg      introduce such a notion in sel-sched, but it already fails to
   2146  1.1  mrg      support debug insns in other ways, so we just go ahead and
   2147  1.1  mrg      let the deug insns go corrupt for now.  */
   2148  1.1  mrg   if (DEBUG_INSN_P (through_insn) && !DEBUG_INSN_P (insn))
   2149  1.1  mrg     return MOVEUP_EXPR_SAME;
   2150  1.1  mrg 
   2151  1.1  mrg   /* When inside_insn_group, delegate to the helper.  */
   2152  1.1  mrg   if (inside_insn_group)
   2153  1.1  mrg     return moveup_expr_inside_insn_group (expr, through_insn);
   2154  1.1  mrg 
   2155  1.1  mrg   /* Deal with unique insns and control dependencies.  */
   2156  1.1  mrg   if (VINSN_UNIQUE_P (vi))
   2157  1.1  mrg     {
   2158  1.1  mrg       /* We can move jumps without side-effects or jumps that are
   2159  1.1  mrg 	 mutually exclusive with instruction THROUGH_INSN (all in cases
   2160  1.1  mrg 	 dependencies allow to do so and jump is not speculative).  */
   2161  1.1  mrg       if (control_flow_insn_p (insn))
   2162  1.1  mrg         {
   2163  1.1  mrg           basic_block fallthru_bb;
   2164  1.1  mrg 
   2165  1.1  mrg           /* Do not move checks and do not move jumps through other
   2166  1.1  mrg              jumps.  */
   2167  1.1  mrg           if (control_flow_insn_p (through_insn)
   2168  1.1  mrg               || sel_insn_is_speculation_check (insn))
   2169  1.1  mrg             return MOVEUP_EXPR_NULL;
   2170  1.1  mrg 
   2171  1.1  mrg           /* Don't move jumps through CFG joins.  */
   2172  1.1  mrg           if (bookkeeping_can_be_created_if_moved_through_p (through_insn))
   2173  1.1  mrg             return MOVEUP_EXPR_NULL;
   2174  1.1  mrg 
   2175  1.1  mrg           /* The jump should have a clear fallthru block, and
   2176  1.1  mrg              this block should be in the current region.  */
   2177  1.1  mrg           if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL
   2178  1.1  mrg               || ! in_current_region_p (fallthru_bb))
   2179  1.1  mrg             return MOVEUP_EXPR_NULL;
   2180  1.1  mrg 
   2181  1.1  mrg           /* And it should be mutually exclusive with through_insn.  */
   2182  1.1  mrg           if (! sched_insns_conditions_mutex_p (insn, through_insn)
   2183  1.1  mrg 	      && ! DEBUG_INSN_P (through_insn))
   2184  1.1  mrg             return MOVEUP_EXPR_NULL;
   2185  1.1  mrg         }
   2186  1.1  mrg 
   2187  1.1  mrg       /* Don't move what we can't move.  */
   2188  1.1  mrg       if (EXPR_CANT_MOVE (expr)
   2189  1.1  mrg 	  && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn))
   2190  1.1  mrg 	return MOVEUP_EXPR_NULL;
   2191  1.1  mrg 
   2192  1.1  mrg       /* Don't move SCHED_GROUP instruction through anything.
   2193  1.1  mrg          If we don't force this, then it will be possible to start
   2194  1.1  mrg          scheduling a sched_group before all its dependencies are
   2195  1.1  mrg          resolved.
   2196  1.1  mrg          ??? Haifa deals with this issue by delaying the SCHED_GROUP
   2197  1.1  mrg          as late as possible through rank_for_schedule.  */
   2198  1.1  mrg       if (SCHED_GROUP_P (insn))
   2199  1.1  mrg 	return MOVEUP_EXPR_NULL;
   2200  1.1  mrg     }
   2201  1.1  mrg   else
   2202  1.1  mrg     gcc_assert (!control_flow_insn_p (insn));
   2203  1.1  mrg 
   2204  1.1  mrg   /* Don't move debug insns if this would require bookkeeping.  */
   2205  1.1  mrg   if (DEBUG_INSN_P (insn)
   2206  1.1  mrg       && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)
   2207  1.1  mrg       && moving_insn_creates_bookkeeping_block_p (insn, through_insn))
   2208  1.1  mrg     return MOVEUP_EXPR_NULL;
   2209  1.1  mrg 
   2210  1.1  mrg   /* Deal with data dependencies.  */
   2211  1.1  mrg   was_target_conflict = false;
   2212  1.1  mrg   full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
   2213  1.1  mrg   if (full_ds == 0)
   2214  1.1  mrg     {
   2215  1.1  mrg       if (!CANT_MOVE_TRAPPING (expr, through_insn))
   2216  1.1  mrg 	return MOVEUP_EXPR_SAME;
   2217  1.1  mrg     }
   2218  1.1  mrg   else
   2219  1.1  mrg     {
   2220  1.1  mrg       /* We can move UNIQUE insn up only as a whole and unchanged,
   2221  1.1  mrg          so it shouldn't have any dependencies.  */
   2222  1.1  mrg       if (VINSN_UNIQUE_P (vi))
   2223  1.1  mrg 	return MOVEUP_EXPR_NULL;
   2224  1.1  mrg     }
   2225  1.1  mrg 
   2226  1.1  mrg   if (full_ds != 0 && can_speculate_dep_p (full_ds))
   2227  1.1  mrg     {
   2228  1.1  mrg       int res;
   2229  1.1  mrg 
   2230  1.1  mrg       res = speculate_expr (expr, full_ds);
   2231  1.1  mrg       if (res >= 0)
   2232  1.1  mrg 	{
   2233  1.1  mrg           /* Speculation was successful.  */
   2234  1.1  mrg           full_ds = 0;
   2235  1.1  mrg           was_changed = (res > 0);
   2236  1.1  mrg           if (res == 2)
   2237  1.1  mrg             was_target_conflict = true;
   2238  1.1  mrg           if (ptrans_type)
   2239  1.1  mrg             *ptrans_type = TRANS_SPECULATION;
   2240  1.1  mrg 	  sel_clear_has_dependence ();
   2241  1.1  mrg 	}
   2242  1.1  mrg     }
   2243  1.1  mrg 
   2244  1.1  mrg   if (has_dep_p[DEPS_IN_INSN])
   2245  1.1  mrg     /* We have some dependency that cannot be discarded.  */
   2246  1.1  mrg     return MOVEUP_EXPR_NULL;
   2247  1.1  mrg 
   2248  1.1  mrg   if (has_dep_p[DEPS_IN_LHS])
   2249  1.1  mrg     {
   2250  1.1  mrg       /* Only separable insns can be moved up with the new register.
   2251  1.1  mrg          Anyways, we should mark that the original register is
   2252  1.1  mrg          unavailable.  */
   2253  1.1  mrg       if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr))
   2254  1.1  mrg         return MOVEUP_EXPR_NULL;
   2255  1.1  mrg 
   2256  1.1  mrg       /* When renaming a hard register to a pseudo before reload, extra
   2257  1.1  mrg 	 dependencies can occur from the implicit clobbers of the insn.
   2258  1.1  mrg 	 Filter out such cases here.  */
   2259  1.1  mrg       if (!reload_completed && REG_P (EXPR_LHS (expr))
   2260  1.1  mrg 	  && HARD_REGISTER_P (EXPR_LHS (expr))
   2261  1.1  mrg 	  && implicit_clobber_conflict_p (through_insn, expr))
   2262  1.1  mrg 	{
   2263  1.1  mrg 	  if (sched_verbose >= 6)
   2264  1.1  mrg 	    sel_print ("implicit clobbers conflict detected, ");
   2265  1.1  mrg 	  return MOVEUP_EXPR_NULL;
   2266  1.1  mrg 	}
   2267  1.1  mrg       EXPR_TARGET_AVAILABLE (expr) = false;
   2268  1.1  mrg       was_target_conflict = true;
   2269  1.1  mrg       as_rhs = true;
   2270  1.1  mrg     }
   2271  1.1  mrg 
   2272  1.1  mrg   /* At this point we have either separable insns, that will be lifted
   2273  1.1  mrg      up only as RHSes, or non-separable insns with no dependency in lhs.
   2274  1.1  mrg      If dependency is in RHS, then try to perform substitution and move up
   2275  1.1  mrg      substituted RHS:
   2276  1.1  mrg 
   2277  1.1  mrg       Ex. 1:				  Ex.2
   2278  1.1  mrg 	y = x;				    y = x;
   2279  1.1  mrg 	z = y*2;			    y = y*2;
   2280  1.1  mrg 
   2281  1.1  mrg     In Ex.1 y*2 can be substituted for x*2 and the whole operation can be
   2282  1.1  mrg     moved above y=x assignment as z=x*2.
   2283  1.1  mrg 
   2284  1.1  mrg     In Ex.2 y*2 also can be substituted for x*2, but only the right hand
   2285  1.1  mrg     side can be moved because of the output dependency.  The operation was
   2286  1.1  mrg     cropped to its rhs above.  */
   2287  1.1  mrg   if (has_dep_p[DEPS_IN_RHS])
   2288  1.1  mrg     {
   2289  1.1  mrg       ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS];
   2290  1.1  mrg 
   2291  1.1  mrg       /* Can't substitute UNIQUE VINSNs.  */
   2292  1.1  mrg       gcc_assert (!VINSN_UNIQUE_P (vi));
   2293  1.1  mrg 
   2294  1.1  mrg       if (can_speculate_dep_p (*rhs_dsp))
   2295  1.1  mrg 	{
   2296  1.1  mrg           int res;
   2297  1.1  mrg 
   2298  1.1  mrg           res = speculate_expr (expr, *rhs_dsp);
   2299  1.1  mrg           if (res >= 0)
   2300  1.1  mrg             {
   2301  1.1  mrg               /* Speculation was successful.  */
   2302  1.1  mrg               *rhs_dsp = 0;
   2303  1.1  mrg               was_changed = (res > 0);
   2304  1.1  mrg               if (res == 2)
   2305  1.1  mrg                 was_target_conflict = true;
   2306  1.1  mrg               if (ptrans_type)
   2307  1.1  mrg                 *ptrans_type = TRANS_SPECULATION;
   2308  1.1  mrg             }
   2309  1.1  mrg 	  else
   2310  1.1  mrg 	    return MOVEUP_EXPR_NULL;
   2311  1.1  mrg 	}
   2312  1.1  mrg       else if (can_substitute_through_p (through_insn,
   2313  1.1  mrg                                          *rhs_dsp)
   2314  1.1  mrg                && substitute_reg_in_expr (expr, through_insn, false))
   2315  1.1  mrg 	{
   2316  1.1  mrg           /* ??? We cannot perform substitution AND speculation on the same
   2317  1.1  mrg              insn.  */
   2318  1.1  mrg           gcc_assert (!was_changed);
   2319  1.1  mrg           was_changed = true;
   2320  1.1  mrg           if (ptrans_type)
   2321  1.1  mrg             *ptrans_type = TRANS_SUBSTITUTION;
   2322  1.1  mrg           EXPR_WAS_SUBSTITUTED (expr) = true;
   2323  1.1  mrg 	}
   2324  1.1  mrg       else
   2325  1.1  mrg 	return MOVEUP_EXPR_NULL;
   2326  1.1  mrg     }
   2327  1.1  mrg 
   2328  1.1  mrg   /* Don't move trapping insns through jumps.
   2329  1.1  mrg      This check should be at the end to give a chance to control speculation
   2330  1.1  mrg      to perform its duties.  */
   2331  1.1  mrg   if (CANT_MOVE_TRAPPING (expr, through_insn))
   2332  1.1  mrg     return MOVEUP_EXPR_NULL;
   2333  1.1  mrg 
   2334  1.1  mrg   return (was_changed
   2335  1.1  mrg           ? MOVEUP_EXPR_CHANGED
   2336  1.1  mrg           : (as_rhs
   2337  1.1  mrg              ? MOVEUP_EXPR_AS_RHS
   2338  1.1  mrg              : MOVEUP_EXPR_SAME));
   2339  1.1  mrg }
   2340  1.1  mrg 
   2341  1.1  mrg /* Try to look at bitmap caches for EXPR and INSN pair, return true
   2342  1.1  mrg    if successful.  When INSIDE_INSN_GROUP, also try ignore dependencies
   2343  1.1  mrg    that can exist within a parallel group.  Write to RES the resulting
   2344  1.1  mrg    code for moveup_expr.  */
   2345  1.1  mrg static bool
   2346  1.1  mrg try_bitmap_cache (expr_t expr, insn_t insn,
   2347  1.1  mrg                   bool inside_insn_group,
   2348  1.1  mrg                   enum MOVEUP_EXPR_CODE *res)
   2349  1.1  mrg {
   2350  1.1  mrg   int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
   2351  1.1  mrg 
   2352  1.1  mrg   /* First check whether we've analyzed this situation already.  */
   2353  1.1  mrg   if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid))
   2354  1.1  mrg     {
   2355  1.1  mrg       if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
   2356  1.1  mrg         {
   2357  1.1  mrg           if (sched_verbose >= 6)
   2358  1.1  mrg             sel_print ("removed (cached)\n");
   2359  1.1  mrg           *res = MOVEUP_EXPR_NULL;
   2360  1.1  mrg           return true;
   2361  1.1  mrg         }
   2362  1.1  mrg       else
   2363  1.1  mrg         {
   2364  1.1  mrg           if (sched_verbose >= 6)
   2365  1.1  mrg             sel_print ("unchanged (cached)\n");
   2366  1.1  mrg           *res = MOVEUP_EXPR_SAME;
   2367  1.1  mrg           return true;
   2368  1.1  mrg         }
   2369  1.1  mrg     }
   2370  1.1  mrg   else if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
   2371  1.1  mrg     {
   2372  1.1  mrg       if (inside_insn_group)
   2373  1.1  mrg         {
   2374  1.1  mrg           if (sched_verbose >= 6)
   2375  1.1  mrg             sel_print ("unchanged (as RHS, cached, inside insn group)\n");
   2376  1.1  mrg           *res = MOVEUP_EXPR_SAME;
   2377  1.1  mrg           return true;
   2378  1.1  mrg 
   2379  1.1  mrg         }
   2380  1.1  mrg       else
   2381  1.1  mrg         EXPR_TARGET_AVAILABLE (expr) = false;
   2382  1.1  mrg 
   2383  1.1  mrg       /* This is the only case when propagation result can change over time,
   2384  1.1  mrg          as we can dynamically switch off scheduling as RHS.  In this case,
   2385  1.1  mrg          just check the flag to reach the correct decision.  */
   2386  1.1  mrg       if (enable_schedule_as_rhs_p)
   2387  1.1  mrg         {
   2388  1.1  mrg           if (sched_verbose >= 6)
   2389  1.1  mrg             sel_print ("unchanged (as RHS, cached)\n");
   2390  1.1  mrg           *res = MOVEUP_EXPR_AS_RHS;
   2391  1.1  mrg           return true;
   2392  1.1  mrg         }
   2393  1.1  mrg       else
   2394  1.1  mrg         {
   2395  1.1  mrg           if (sched_verbose >= 6)
   2396  1.1  mrg             sel_print ("removed (cached as RHS, but renaming"
   2397  1.1  mrg                        " is now disabled)\n");
   2398  1.1  mrg           *res = MOVEUP_EXPR_NULL;
   2399  1.1  mrg           return true;
   2400  1.1  mrg         }
   2401  1.1  mrg     }
   2402  1.1  mrg 
   2403  1.1  mrg   return false;
   2404  1.1  mrg }
   2405  1.1  mrg 
   2406  1.1  mrg /* Try to look at bitmap caches for EXPR and INSN pair, return true
   2407  1.1  mrg    if successful.  Write to RES the resulting code for moveup_expr.  */
   2408  1.1  mrg static bool
   2409  1.1  mrg try_transformation_cache (expr_t expr, insn_t insn,
   2410  1.1  mrg                           enum MOVEUP_EXPR_CODE *res)
   2411  1.1  mrg {
   2412  1.1  mrg   struct transformed_insns *pti
   2413  1.1  mrg     = (struct transformed_insns *)
   2414  1.1  mrg     htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn),
   2415  1.1  mrg                          &EXPR_VINSN (expr),
   2416  1.1  mrg                          VINSN_HASH_RTX (EXPR_VINSN (expr)));
   2417  1.1  mrg   if (pti)
   2418  1.1  mrg     {
   2419  1.1  mrg       /* This EXPR was already moved through this insn and was
   2420  1.1  mrg          changed as a result.  Fetch the proper data from
   2421  1.1  mrg          the hashtable.  */
   2422  1.1  mrg       insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
   2423  1.1  mrg                               INSN_UID (insn), pti->type,
   2424  1.1  mrg                               pti->vinsn_old, pti->vinsn_new,
   2425  1.1  mrg                               EXPR_SPEC_DONE_DS (expr));
   2426  1.1  mrg 
   2427  1.1  mrg       if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new)))
   2428  1.1  mrg         pti->vinsn_new = vinsn_copy (pti->vinsn_new, true);
   2429  1.1  mrg       change_vinsn_in_expr (expr, pti->vinsn_new);
   2430  1.1  mrg       if (pti->was_target_conflict)
   2431  1.1  mrg         EXPR_TARGET_AVAILABLE (expr) = false;
   2432  1.1  mrg       if (pti->type == TRANS_SPECULATION)
   2433  1.1  mrg         {
   2434  1.1  mrg           EXPR_SPEC_DONE_DS (expr) = pti->ds;
   2435  1.1  mrg           EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check;
   2436  1.1  mrg         }
   2437  1.1  mrg 
   2438  1.1  mrg       if (sched_verbose >= 6)
   2439  1.1  mrg         {
   2440  1.1  mrg           sel_print ("changed (cached): ");
   2441  1.1  mrg           dump_expr (expr);
   2442  1.1  mrg           sel_print ("\n");
   2443  1.1  mrg         }
   2444  1.1  mrg 
   2445  1.1  mrg       *res = MOVEUP_EXPR_CHANGED;
   2446  1.1  mrg       return true;
   2447  1.1  mrg     }
   2448  1.1  mrg 
   2449  1.1  mrg   return false;
   2450  1.1  mrg }
   2451  1.1  mrg 
   2452  1.1  mrg /* Update bitmap caches on INSN with result RES of propagating EXPR.  */
   2453  1.1  mrg static void
   2454  1.1  mrg update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
   2455  1.1  mrg                      enum MOVEUP_EXPR_CODE res)
   2456  1.1  mrg {
   2457  1.1  mrg   int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
   2458  1.1  mrg 
   2459  1.1  mrg   /* Do not cache result of propagating jumps through an insn group,
   2460  1.1  mrg      as it is always true, which is not useful outside the group.  */
   2461  1.1  mrg   if (inside_insn_group)
   2462  1.1  mrg     return;
   2463  1.1  mrg 
   2464  1.1  mrg   if (res == MOVEUP_EXPR_NULL)
   2465  1.1  mrg     {
   2466  1.1  mrg       bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
   2467  1.1  mrg       bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
   2468  1.1  mrg     }
   2469  1.1  mrg   else if (res == MOVEUP_EXPR_SAME)
   2470  1.1  mrg     {
   2471  1.1  mrg       bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
   2472  1.1  mrg       bitmap_clear_bit (INSN_FOUND_DEPS (insn), expr_uid);
   2473  1.1  mrg     }
   2474  1.1  mrg   else if (res == MOVEUP_EXPR_AS_RHS)
   2475  1.1  mrg     {
   2476  1.1  mrg       bitmap_clear_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
   2477  1.1  mrg       bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
   2478  1.1  mrg     }
   2479  1.1  mrg   else
   2480  1.1  mrg     gcc_unreachable ();
   2481  1.1  mrg }
   2482  1.1  mrg 
   2483  1.1  mrg /* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN
   2484  1.1  mrg    and transformation type TRANS_TYPE.  */
   2485  1.1  mrg static void
   2486  1.1  mrg update_transformation_cache (expr_t expr, insn_t insn,
   2487  1.1  mrg                              bool inside_insn_group,
   2488  1.1  mrg                              enum local_trans_type trans_type,
   2489  1.1  mrg                              vinsn_t expr_old_vinsn)
   2490  1.1  mrg {
   2491  1.1  mrg   struct transformed_insns *pti;
   2492  1.1  mrg 
   2493  1.1  mrg   if (inside_insn_group)
   2494  1.1  mrg     return;
   2495  1.1  mrg 
   2496  1.1  mrg   pti = XNEW (struct transformed_insns);
   2497  1.1  mrg   pti->vinsn_old = expr_old_vinsn;
   2498  1.1  mrg   pti->vinsn_new = EXPR_VINSN (expr);
   2499  1.1  mrg   pti->type = trans_type;
   2500  1.1  mrg   pti->was_target_conflict = was_target_conflict;
   2501  1.1  mrg   pti->ds = EXPR_SPEC_DONE_DS (expr);
   2502  1.1  mrg   pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr);
   2503  1.1  mrg   vinsn_attach (pti->vinsn_old);
   2504  1.1  mrg   vinsn_attach (pti->vinsn_new);
   2505  1.1  mrg   *((struct transformed_insns **)
   2506  1.1  mrg     htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn),
   2507  1.1  mrg                               pti, VINSN_HASH_RTX (expr_old_vinsn),
   2508  1.1  mrg                               INSERT)) = pti;
   2509  1.1  mrg }
   2510  1.1  mrg 
   2511  1.1  mrg /* Same as moveup_expr, but first looks up the result of
   2512  1.1  mrg    transformation in caches.  */
   2513  1.1  mrg static enum MOVEUP_EXPR_CODE
   2514  1.1  mrg moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
   2515  1.1  mrg {
   2516  1.1  mrg   enum MOVEUP_EXPR_CODE res;
   2517  1.1  mrg   bool got_answer = false;
   2518  1.1  mrg 
   2519  1.1  mrg   if (sched_verbose >= 6)
   2520  1.1  mrg     {
   2521  1.1  mrg       sel_print ("Moving ");
   2522  1.1  mrg       dump_expr (expr);
   2523  1.1  mrg       sel_print (" through %d: ", INSN_UID (insn));
   2524  1.1  mrg     }
   2525  1.1  mrg 
   2526  1.1  mrg   if (DEBUG_INSN_P (EXPR_INSN_RTX (expr))
   2527  1.1  mrg       && BLOCK_FOR_INSN (EXPR_INSN_RTX (expr))
   2528  1.1  mrg       && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)))
   2529  1.1  mrg 	  == EXPR_INSN_RTX (expr)))
   2530  1.1  mrg     /* Don't use cached information for debug insns that are heads of
   2531  1.1  mrg        basic blocks.  */;
   2532  1.1  mrg   else if (try_bitmap_cache (expr, insn, inside_insn_group, &res))
   2533  1.1  mrg     /* When inside insn group, we do not want remove stores conflicting
   2534  1.1  mrg        with previosly issued loads.  */
   2535  1.1  mrg     got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL;
   2536  1.1  mrg   else if (try_transformation_cache (expr, insn, &res))
   2537  1.1  mrg     got_answer = true;
   2538  1.1  mrg 
   2539  1.1  mrg   if (! got_answer)
   2540  1.1  mrg     {
   2541  1.1  mrg       /* Invoke moveup_expr and record the results.  */
   2542  1.1  mrg       vinsn_t expr_old_vinsn = EXPR_VINSN (expr);
   2543  1.1  mrg       ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr);
   2544  1.1  mrg       int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn));
   2545  1.1  mrg       bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn);
   2546  1.1  mrg       enum local_trans_type trans_type = TRANS_SUBSTITUTION;
   2547  1.1  mrg 
   2548  1.1  mrg       /* ??? Invent something better than this.  We can't allow old_vinsn
   2549  1.1  mrg          to go, we need it for the history vector.  */
   2550  1.1  mrg       vinsn_attach (expr_old_vinsn);
   2551  1.1  mrg 
   2552  1.1  mrg       res = moveup_expr (expr, insn, inside_insn_group,
   2553  1.1  mrg                          &trans_type);
   2554  1.1  mrg       switch (res)
   2555  1.1  mrg         {
   2556  1.1  mrg         case MOVEUP_EXPR_NULL:
   2557  1.1  mrg           update_bitmap_cache (expr, insn, inside_insn_group, res);
   2558  1.1  mrg 	  if (sched_verbose >= 6)
   2559  1.1  mrg             sel_print ("removed\n");
   2560  1.1  mrg 	  break;
   2561  1.1  mrg 
   2562  1.1  mrg 	case MOVEUP_EXPR_SAME:
   2563  1.1  mrg           update_bitmap_cache (expr, insn, inside_insn_group, res);
   2564  1.1  mrg           if (sched_verbose >= 6)
   2565  1.1  mrg             sel_print ("unchanged\n");
   2566  1.1  mrg 	  break;
   2567  1.1  mrg 
   2568  1.1  mrg         case MOVEUP_EXPR_AS_RHS:
   2569  1.1  mrg           gcc_assert (!unique_p || inside_insn_group);
   2570  1.1  mrg           update_bitmap_cache (expr, insn, inside_insn_group, res);
   2571  1.1  mrg 	  if (sched_verbose >= 6)
   2572  1.1  mrg             sel_print ("unchanged (as RHS)\n");
   2573  1.1  mrg 	  break;
   2574  1.1  mrg 
   2575  1.1  mrg 	case MOVEUP_EXPR_CHANGED:
   2576  1.1  mrg           gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid
   2577  1.1  mrg                       || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds);
   2578  1.1  mrg           insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
   2579  1.1  mrg                                   INSN_UID (insn), trans_type,
   2580  1.1  mrg                                   expr_old_vinsn, EXPR_VINSN (expr),
   2581  1.1  mrg                                   expr_old_spec_ds);
   2582  1.1  mrg           update_transformation_cache (expr, insn, inside_insn_group,
   2583  1.1  mrg                                        trans_type, expr_old_vinsn);
   2584  1.1  mrg           if (sched_verbose >= 6)
   2585  1.1  mrg             {
   2586  1.1  mrg               sel_print ("changed: ");
   2587  1.1  mrg               dump_expr (expr);
   2588  1.1  mrg               sel_print ("\n");
   2589  1.1  mrg             }
   2590  1.1  mrg 	  break;
   2591  1.1  mrg 	default:
   2592  1.1  mrg 	  gcc_unreachable ();
   2593  1.1  mrg         }
   2594  1.1  mrg 
   2595  1.1  mrg       vinsn_detach (expr_old_vinsn);
   2596  1.1  mrg     }
   2597  1.1  mrg 
   2598  1.1  mrg   return res;
   2599  1.1  mrg }
   2600  1.1  mrg 
   2601  1.1  mrg /* Moves an av set AVP up through INSN, performing necessary
   2602  1.1  mrg    transformations.  */
   2603  1.1  mrg static void
   2604  1.1  mrg moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
   2605  1.1  mrg {
   2606  1.1  mrg   av_set_iterator i;
   2607  1.1  mrg   expr_t expr;
   2608  1.1  mrg 
   2609  1.1  mrg   FOR_EACH_EXPR_1 (expr, i, avp)
   2610  1.1  mrg     {
   2611  1.1  mrg 
   2612  1.1  mrg       switch (moveup_expr_cached (expr, insn, inside_insn_group))
   2613  1.1  mrg 	{
   2614  1.1  mrg 	case MOVEUP_EXPR_SAME:
   2615  1.1  mrg         case MOVEUP_EXPR_AS_RHS:
   2616  1.1  mrg 	  break;
   2617  1.1  mrg 
   2618  1.1  mrg 	case MOVEUP_EXPR_NULL:
   2619  1.1  mrg 	  av_set_iter_remove (&i);
   2620  1.1  mrg 	  break;
   2621  1.1  mrg 
   2622  1.1  mrg 	case MOVEUP_EXPR_CHANGED:
   2623  1.1  mrg           expr = merge_with_other_exprs (avp, &i, expr);
   2624  1.1  mrg 	  break;
   2625  1.1  mrg 
   2626  1.1  mrg 	default:
   2627  1.1  mrg 	  gcc_unreachable ();
   2628  1.1  mrg 	}
   2629  1.1  mrg     }
   2630  1.1  mrg }
   2631  1.1  mrg 
   2632  1.1  mrg /* Moves AVP set along PATH.  */
   2633  1.1  mrg static void
   2634  1.1  mrg moveup_set_inside_insn_group (av_set_t *avp, ilist_t path)
   2635  1.1  mrg {
   2636  1.1  mrg   int last_cycle;
   2637  1.1  mrg 
   2638  1.1  mrg   if (sched_verbose >= 6)
   2639  1.1  mrg     sel_print ("Moving expressions up in the insn group...\n");
   2640  1.1  mrg   if (! path)
   2641  1.1  mrg     return;
   2642  1.1  mrg   last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path));
   2643  1.1  mrg   while (path
   2644  1.1  mrg          && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
   2645  1.1  mrg     {
   2646  1.1  mrg       moveup_set_expr (avp, ILIST_INSN (path), true);
   2647  1.1  mrg       path = ILIST_NEXT (path);
   2648  1.1  mrg     }
   2649  1.1  mrg }
   2650  1.1  mrg 
   2651  1.1  mrg /* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW.  */
   2652  1.1  mrg static bool
   2653  1.1  mrg equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw)
   2654  1.1  mrg {
   2655  1.1  mrg   expr_def _tmp, *tmp = &_tmp;
   2656  1.1  mrg   int last_cycle;
   2657  1.1  mrg   bool res = true;
   2658  1.1  mrg 
   2659  1.1  mrg   copy_expr_onside (tmp, expr);
   2660  1.1  mrg   last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0;
   2661  1.1  mrg   while (path
   2662  1.1  mrg          && res
   2663  1.1  mrg          && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
   2664  1.1  mrg     {
   2665  1.1  mrg       res = (moveup_expr_cached (tmp, ILIST_INSN (path), true)
   2666  1.1  mrg              != MOVEUP_EXPR_NULL);
   2667  1.1  mrg       path = ILIST_NEXT (path);
   2668  1.1  mrg     }
   2669  1.1  mrg 
   2670  1.1  mrg   if (res)
   2671  1.1  mrg     {
   2672  1.1  mrg       vinsn_t tmp_vinsn = EXPR_VINSN (tmp);
   2673  1.1  mrg       vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw);
   2674  1.1  mrg 
   2675  1.1  mrg       if (tmp_vinsn != expr_vliw_vinsn)
   2676  1.1  mrg 	res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn);
   2677  1.1  mrg     }
   2678  1.1  mrg 
   2679  1.1  mrg   clear_expr (tmp);
   2680  1.1  mrg   return res;
   2681  1.1  mrg }
   2682  1.1  mrg 
   2683  1.1  mrg 
   2685  1.1  mrg /* Functions that compute av and lv sets.  */
   2686  1.1  mrg 
   2687  1.1  mrg /* Returns true if INSN is not a downward continuation of the given path P in
   2688  1.1  mrg    the current stage.  */
   2689  1.1  mrg static bool
   2690  1.1  mrg is_ineligible_successor (insn_t insn, ilist_t p)
   2691  1.1  mrg {
   2692  1.1  mrg   insn_t prev_insn;
   2693  1.1  mrg 
   2694  1.1  mrg   /* Check if insn is not deleted.  */
   2695  1.1  mrg   if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn)
   2696  1.1  mrg     gcc_unreachable ();
   2697  1.1  mrg   else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn)
   2698  1.1  mrg     gcc_unreachable ();
   2699  1.1  mrg 
   2700  1.1  mrg   /* If it's the first insn visited, then the successor is ok.  */
   2701  1.1  mrg   if (!p)
   2702  1.1  mrg     return false;
   2703  1.1  mrg 
   2704  1.1  mrg   prev_insn = ILIST_INSN (p);
   2705  1.1  mrg 
   2706  1.1  mrg   if (/* a backward edge.  */
   2707  1.1  mrg       INSN_SEQNO (insn) < INSN_SEQNO (prev_insn)
   2708  1.1  mrg       /* is already visited.  */
   2709  1.1  mrg       || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
   2710  1.1  mrg 	  && (ilist_is_in_p (p, insn)
   2711  1.1  mrg               /* We can reach another fence here and still seqno of insn
   2712  1.1  mrg                  would be equal to seqno of prev_insn.  This is possible
   2713  1.1  mrg                  when prev_insn is a previously created bookkeeping copy.
   2714  1.1  mrg                  In that case it'd get a seqno of insn.  Thus, check here
   2715  1.1  mrg                  whether insn is in current fence too.  */
   2716  1.1  mrg               || IN_CURRENT_FENCE_P (insn)))
   2717  1.1  mrg       /* Was already scheduled on this round.  */
   2718  1.1  mrg       || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
   2719  1.1  mrg 	  && IN_CURRENT_FENCE_P (insn))
   2720  1.1  mrg       /* An insn from another fence could also be
   2721  1.1  mrg 	 scheduled earlier even if this insn is not in
   2722  1.1  mrg 	 a fence list right now.  Check INSN_SCHED_CYCLE instead.  */
   2723  1.1  mrg       || (!pipelining_p
   2724  1.1  mrg           && INSN_SCHED_TIMES (insn) > 0))
   2725  1.1  mrg     return true;
   2726  1.1  mrg   else
   2727  1.1  mrg     return false;
   2728  1.1  mrg }
   2729  1.1  mrg 
   2730  1.1  mrg /* Computes the av_set below the last bb insn INSN, doing all the 'dirty work'
   2731  1.1  mrg    of handling multiple successors and properly merging its av_sets.  P is
   2732  1.1  mrg    the current path traversed.  WS is the size of lookahead window.
   2733  1.1  mrg    Return the av set computed.  */
   2734  1.1  mrg static av_set_t
   2735  1.1  mrg compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
   2736  1.1  mrg {
   2737  1.1  mrg   struct succs_info *sinfo;
   2738  1.1  mrg   av_set_t expr_in_all_succ_branches = NULL;
   2739  1.1  mrg   int is;
   2740  1.1  mrg   insn_t succ, zero_succ = NULL;
   2741  1.1  mrg   av_set_t av1 = NULL;
   2742  1.1  mrg 
   2743  1.1  mrg   gcc_assert (sel_bb_end_p (insn));
   2744  1.1  mrg 
   2745  1.1  mrg   /* Find different kind of successors needed for correct computing of
   2746  1.1  mrg      SPEC and TARGET_AVAILABLE attributes.  */
   2747  1.1  mrg   sinfo = compute_succs_info (insn, SUCCS_NORMAL);
   2748  1.1  mrg 
   2749  1.1  mrg   /* Debug output.  */
   2750  1.1  mrg   if (sched_verbose >= 6)
   2751  1.1  mrg     {
   2752  1.1  mrg       sel_print ("successors of bb end (%d): ", INSN_UID (insn));
   2753  1.1  mrg       dump_insn_vector (sinfo->succs_ok);
   2754  1.1  mrg       sel_print ("\n");
   2755  1.1  mrg       if (sinfo->succs_ok_n != sinfo->all_succs_n)
   2756  1.1  mrg         sel_print ("real successors num: %d\n", sinfo->all_succs_n);
   2757  1.1  mrg     }
   2758  1.1  mrg 
   2759  1.1  mrg   /* Add insn to the tail of current path.  */
   2760  1.1  mrg   ilist_add (&p, insn);
   2761  1.1  mrg 
   2762  1.1  mrg   FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
   2763  1.1  mrg     {
   2764  1.1  mrg       av_set_t succ_set;
   2765  1.1  mrg 
   2766  1.1  mrg       /* We will edit SUCC_SET and EXPR_SPEC field of its elements.  */
   2767  1.1  mrg       succ_set = compute_av_set_inside_bb (succ, p, ws, true);
   2768  1.1  mrg 
   2769  1.1  mrg       av_set_split_usefulness (succ_set,
   2770  1.1  mrg                                sinfo->probs_ok[is],
   2771  1.1  mrg                                sinfo->all_prob);
   2772  1.1  mrg 
   2773  1.1  mrg       if (sinfo->all_succs_n > 1)
   2774  1.1  mrg 	{
   2775  1.1  mrg           /* Find EXPR'es that came from *all* successors and save them
   2776  1.1  mrg              into expr_in_all_succ_branches.  This set will be used later
   2777  1.1  mrg              for calculating speculation attributes of EXPR'es.  */
   2778  1.1  mrg           if (is == 0)
   2779  1.1  mrg             {
   2780  1.1  mrg               expr_in_all_succ_branches = av_set_copy (succ_set);
   2781  1.1  mrg 
   2782  1.1  mrg               /* Remember the first successor for later. */
   2783  1.1  mrg               zero_succ = succ;
   2784  1.1  mrg             }
   2785  1.1  mrg           else
   2786  1.1  mrg             {
   2787  1.1  mrg               av_set_iterator i;
   2788  1.1  mrg               expr_t expr;
   2789  1.1  mrg 
   2790  1.1  mrg               FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches)
   2791  1.1  mrg                 if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr)))
   2792  1.1  mrg                   av_set_iter_remove (&i);
   2793  1.1  mrg             }
   2794  1.1  mrg 	}
   2795  1.1  mrg 
   2796  1.1  mrg       /* Union the av_sets.  Check liveness restrictions on target registers
   2797  1.1  mrg          in special case of two successors.  */
   2798  1.1  mrg       if (sinfo->succs_ok_n == 2 && is == 1)
   2799  1.1  mrg         {
   2800  1.1  mrg           basic_block bb0 = BLOCK_FOR_INSN (zero_succ);
   2801  1.1  mrg           basic_block bb1 = BLOCK_FOR_INSN (succ);
   2802  1.1  mrg 
   2803  1.1  mrg           gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1));
   2804  1.1  mrg           av_set_union_and_live (&av1, &succ_set,
   2805  1.1  mrg                                  BB_LV_SET (bb0),
   2806  1.1  mrg                                  BB_LV_SET (bb1),
   2807  1.1  mrg                                  insn);
   2808  1.1  mrg         }
   2809  1.1  mrg       else
   2810  1.1  mrg         av_set_union_and_clear (&av1, &succ_set, insn);
   2811  1.1  mrg     }
   2812  1.1  mrg 
   2813  1.1  mrg   /* Check liveness restrictions via hard way when there are more than
   2814  1.1  mrg      two successors.  */
   2815  1.1  mrg   if (sinfo->succs_ok_n > 2)
   2816  1.1  mrg     FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
   2817  1.1  mrg       {
   2818  1.1  mrg         basic_block succ_bb = BLOCK_FOR_INSN (succ);
   2819  1.1  mrg 	av_set_t av_succ = (is_ineligible_successor (succ, p)
   2820  1.1  mrg 			    ? NULL
   2821  1.1  mrg 			    : BB_AV_SET (succ_bb));
   2822  1.1  mrg 
   2823  1.1  mrg         gcc_assert (BB_LV_SET_VALID_P (succ_bb));
   2824  1.1  mrg 	mark_unavailable_targets (av1, av_succ, BB_LV_SET (succ_bb));
   2825  1.1  mrg       }
   2826  1.1  mrg 
   2827  1.1  mrg   /* Finally, check liveness restrictions on paths leaving the region.  */
   2828  1.1  mrg   if (sinfo->all_succs_n > sinfo->succs_ok_n)
   2829  1.1  mrg     FOR_EACH_VEC_ELT (sinfo->succs_other, is, succ)
   2830  1.1  mrg       mark_unavailable_targets
   2831  1.1  mrg         (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ)));
   2832  1.1  mrg 
   2833  1.1  mrg   if (sinfo->all_succs_n > 1)
   2834  1.1  mrg     {
   2835  1.1  mrg       av_set_iterator i;
   2836  1.1  mrg       expr_t expr;
   2837  1.1  mrg 
   2838  1.1  mrg       /* Increase the spec attribute of all EXPR'es that didn't come
   2839  1.1  mrg 	 from all successors.  */
   2840  1.1  mrg       FOR_EACH_EXPR (expr, i, av1)
   2841  1.1  mrg 	if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr)))
   2842  1.1  mrg 	  EXPR_SPEC (expr)++;
   2843  1.1  mrg 
   2844  1.1  mrg       av_set_clear (&expr_in_all_succ_branches);
   2845  1.1  mrg 
   2846  1.1  mrg       /* Do not move conditional branches through other
   2847  1.1  mrg 	 conditional branches.  So, remove all conditional
   2848  1.1  mrg 	 branches from av_set if current operator is a conditional
   2849  1.1  mrg 	 branch.  */
   2850  1.1  mrg       av_set_substract_cond_branches (&av1);
   2851  1.1  mrg     }
   2852  1.1  mrg 
   2853  1.1  mrg   ilist_remove (&p);
   2854  1.1  mrg   free_succs_info (sinfo);
   2855  1.1  mrg 
   2856  1.1  mrg   if (sched_verbose >= 6)
   2857  1.1  mrg     {
   2858  1.1  mrg       sel_print ("av_succs (%d): ", INSN_UID (insn));
   2859  1.1  mrg       dump_av_set (av1);
   2860  1.1  mrg       sel_print ("\n");
   2861  1.1  mrg     }
   2862  1.1  mrg 
   2863  1.1  mrg   return av1;
   2864  1.1  mrg }
   2865  1.1  mrg 
   2866  1.1  mrg /* This function computes av_set for the FIRST_INSN by dragging valid
   2867  1.1  mrg    av_set through all basic block insns either from the end of basic block
   2868  1.1  mrg    (computed using compute_av_set_at_bb_end) or from the insn on which
   2869  1.1  mrg    MAX_WS was exceeded.  It uses compute_av_set_at_bb_end to compute av_set
   2870  1.1  mrg    below the basic block and handling conditional branches.
   2871  1.1  mrg    FIRST_INSN - the basic block head, P - path consisting of the insns
   2872  1.1  mrg    traversed on the way to the FIRST_INSN (the path is sparse, only bb heads
   2873  1.1  mrg    and bb ends are added to the path), WS - current window size,
   2874  1.1  mrg    NEED_COPY_P - true if we'll make a copy of av_set before returning it.  */
   2875  1.1  mrg static av_set_t
   2876  1.1  mrg compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
   2877  1.1  mrg 			  bool need_copy_p)
   2878  1.1  mrg {
   2879  1.1  mrg   insn_t cur_insn;
   2880  1.1  mrg   int end_ws = ws;
   2881  1.1  mrg   insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn));
   2882  1.1  mrg   insn_t after_bb_end = NEXT_INSN (bb_end);
   2883  1.1  mrg   insn_t last_insn;
   2884  1.1  mrg   av_set_t av = NULL;
   2885  1.1  mrg   basic_block cur_bb = BLOCK_FOR_INSN (first_insn);
   2886  1.1  mrg 
   2887  1.1  mrg   /* Return NULL if insn is not on the legitimate downward path.  */
   2888  1.1  mrg   if (is_ineligible_successor (first_insn, p))
   2889  1.1  mrg     {
   2890  1.1  mrg       if (sched_verbose >= 6)
   2891  1.1  mrg         sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn));
   2892  1.1  mrg 
   2893  1.1  mrg       return NULL;
   2894  1.1  mrg     }
   2895  1.1  mrg 
   2896  1.1  mrg   /* If insn already has valid av(insn) computed, just return it.  */
   2897  1.1  mrg   if (AV_SET_VALID_P (first_insn))
   2898  1.1  mrg     {
   2899  1.1  mrg       av_set_t av_set;
   2900  1.1  mrg 
   2901  1.1  mrg       if (sel_bb_head_p (first_insn))
   2902  1.1  mrg 	av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn));
   2903  1.1  mrg       else
   2904  1.1  mrg 	av_set = NULL;
   2905  1.1  mrg 
   2906  1.1  mrg       if (sched_verbose >= 6)
   2907  1.1  mrg         {
   2908  1.1  mrg           sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn));
   2909  1.1  mrg           dump_av_set (av_set);
   2910  1.1  mrg           sel_print ("\n");
   2911  1.1  mrg         }
   2912  1.1  mrg 
   2913  1.1  mrg       return need_copy_p ? av_set_copy (av_set) : av_set;
   2914  1.1  mrg     }
   2915  1.1  mrg 
   2916  1.1  mrg   ilist_add (&p, first_insn);
   2917  1.1  mrg 
   2918  1.1  mrg   /* As the result after this loop have completed, in LAST_INSN we'll
   2919  1.1  mrg      have the insn which has valid av_set to start backward computation
   2920  1.1  mrg      from: it either will be NULL because on it the window size was exceeded
   2921  1.1  mrg      or other valid av_set as returned by compute_av_set for the last insn
   2922  1.1  mrg      of the basic block.  */
   2923  1.1  mrg   for (last_insn = first_insn; last_insn != after_bb_end;
   2924  1.1  mrg        last_insn = NEXT_INSN (last_insn))
   2925  1.1  mrg     {
   2926  1.1  mrg       /* We may encounter valid av_set not only on bb_head, but also on
   2927  1.1  mrg 	 those insns on which previously MAX_WS was exceeded.  */
   2928  1.1  mrg       if (AV_SET_VALID_P (last_insn))
   2929  1.1  mrg 	{
   2930  1.1  mrg           if (sched_verbose >= 6)
   2931  1.1  mrg             sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn));
   2932  1.1  mrg 	  break;
   2933  1.1  mrg 	}
   2934  1.1  mrg 
   2935  1.1  mrg       /* The special case: the last insn of the BB may be an
   2936  1.1  mrg          ineligible_successor due to its SEQ_NO that was set on
   2937  1.1  mrg 	 it as a bookkeeping.  */
   2938  1.1  mrg       if (last_insn != first_insn
   2939  1.1  mrg           && is_ineligible_successor (last_insn, p))
   2940  1.1  mrg 	{
   2941  1.1  mrg           if (sched_verbose >= 6)
   2942  1.1  mrg             sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn));
   2943  1.1  mrg 	  break;
   2944  1.1  mrg 	}
   2945  1.1  mrg 
   2946  1.1  mrg       if (DEBUG_INSN_P (last_insn))
   2947  1.1  mrg 	continue;
   2948  1.1  mrg 
   2949  1.1  mrg       if (end_ws > max_ws)
   2950  1.1  mrg 	{
   2951  1.1  mrg 	  /* We can reach max lookahead size at bb_header, so clean av_set
   2952  1.1  mrg 	     first.  */
   2953  1.1  mrg 	  INSN_WS_LEVEL (last_insn) = global_level;
   2954  1.1  mrg 
   2955  1.1  mrg 	  if (sched_verbose >= 6)
   2956  1.1  mrg             sel_print ("Insn %d is beyond the software lookahead window size\n",
   2957  1.1  mrg                        INSN_UID (last_insn));
   2958  1.1  mrg 	  break;
   2959  1.1  mrg 	}
   2960  1.1  mrg 
   2961  1.1  mrg       end_ws++;
   2962  1.1  mrg     }
   2963  1.1  mrg 
   2964  1.1  mrg   /* Get the valid av_set into AV above the LAST_INSN to start backward
   2965  1.1  mrg      computation from.  It either will be empty av_set or av_set computed from
   2966  1.1  mrg      the successors on the last insn of the current bb.  */
   2967  1.1  mrg   if (last_insn != after_bb_end)
   2968  1.1  mrg     {
   2969  1.1  mrg       av = NULL;
   2970  1.1  mrg 
   2971  1.1  mrg       /* This is needed only to obtain av_sets that are identical to
   2972  1.1  mrg          those computed by the old compute_av_set version.  */
   2973  1.1  mrg       if (last_insn == first_insn && !INSN_NOP_P (last_insn))
   2974  1.1  mrg         av_set_add (&av, INSN_EXPR (last_insn));
   2975  1.1  mrg     }
   2976  1.1  mrg   else
   2977  1.1  mrg     /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END.  */
   2978  1.1  mrg     av = compute_av_set_at_bb_end (bb_end, p, end_ws);
   2979  1.1  mrg 
   2980  1.1  mrg   /* Compute av_set in AV starting from below the LAST_INSN up to
   2981  1.1  mrg      location above the FIRST_INSN.  */
   2982  1.1  mrg   for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn);
   2983  1.1  mrg        cur_insn = PREV_INSN (cur_insn))
   2984  1.1  mrg     if (!INSN_NOP_P (cur_insn))
   2985  1.1  mrg       {
   2986  1.1  mrg         expr_t expr;
   2987  1.1  mrg 
   2988  1.1  mrg         moveup_set_expr (&av, cur_insn, false);
   2989  1.1  mrg 
   2990  1.1  mrg         /* If the expression for CUR_INSN is already in the set,
   2991  1.1  mrg            replace it by the new one.  */
   2992  1.1  mrg         expr = av_set_lookup (av, INSN_VINSN (cur_insn));
   2993  1.1  mrg         if (expr != NULL)
   2994  1.1  mrg           {
   2995  1.1  mrg             clear_expr (expr);
   2996  1.1  mrg             copy_expr (expr, INSN_EXPR (cur_insn));
   2997  1.1  mrg           }
   2998  1.1  mrg         else
   2999  1.1  mrg           av_set_add (&av, INSN_EXPR (cur_insn));
   3000  1.1  mrg       }
   3001  1.1  mrg 
   3002  1.1  mrg   /* Clear stale bb_av_set.  */
   3003  1.1  mrg   if (sel_bb_head_p (first_insn))
   3004  1.1  mrg     {
   3005  1.1  mrg       av_set_clear (&BB_AV_SET (cur_bb));
   3006  1.1  mrg       BB_AV_SET (cur_bb) = need_copy_p ? av_set_copy (av) : av;
   3007  1.1  mrg       BB_AV_LEVEL (cur_bb) = global_level;
   3008  1.1  mrg     }
   3009  1.1  mrg 
   3010  1.1  mrg   if (sched_verbose >= 6)
   3011  1.1  mrg     {
   3012  1.1  mrg       sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn));
   3013  1.1  mrg       dump_av_set (av);
   3014  1.1  mrg       sel_print ("\n");
   3015  1.1  mrg     }
   3016  1.1  mrg 
   3017  1.1  mrg   ilist_remove (&p);
   3018  1.1  mrg   return av;
   3019  1.1  mrg }
   3020  1.1  mrg 
   3021  1.1  mrg /* Compute av set before INSN.
   3022  1.1  mrg    INSN - the current operation (actual rtx INSN)
   3023  1.1  mrg    P - the current path, which is list of insns visited so far
   3024  1.1  mrg    WS - software lookahead window size.
   3025  1.1  mrg    UNIQUE_P - TRUE, if returned av_set will be changed, hence
   3026  1.1  mrg    if we want to save computed av_set in s_i_d, we should make a copy of it.
   3027  1.1  mrg 
   3028  1.1  mrg    In the resulting set we will have only expressions that don't have delay
   3029  1.1  mrg    stalls and nonsubstitutable dependences.  */
   3030  1.1  mrg static av_set_t
   3031  1.1  mrg compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p)
   3032  1.1  mrg {
   3033  1.1  mrg   return compute_av_set_inside_bb (insn, p, ws, unique_p);
   3034  1.1  mrg }
   3035  1.1  mrg 
   3036  1.1  mrg /* Propagate a liveness set LV through INSN.  */
   3037  1.1  mrg static void
   3038  1.1  mrg propagate_lv_set (regset lv, insn_t insn)
   3039  1.1  mrg {
   3040  1.1  mrg   gcc_assert (INSN_P (insn));
   3041  1.1  mrg 
   3042  1.1  mrg   if (INSN_NOP_P (insn))
   3043  1.1  mrg     return;
   3044  1.1  mrg 
   3045  1.1  mrg   df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv);
   3046  1.1  mrg }
   3047  1.1  mrg 
   3048  1.1  mrg /* Return livness set at the end of BB.  */
   3049  1.1  mrg static regset
   3050  1.1  mrg compute_live_after_bb (basic_block bb)
   3051  1.1  mrg {
   3052  1.1  mrg   edge e;
   3053  1.1  mrg   edge_iterator ei;
   3054  1.1  mrg   regset lv = get_clear_regset_from_pool ();
   3055  1.1  mrg 
   3056  1.1  mrg   gcc_assert (!ignore_first);
   3057  1.1  mrg 
   3058  1.1  mrg   FOR_EACH_EDGE (e, ei, bb->succs)
   3059  1.1  mrg     if (sel_bb_empty_p (e->dest))
   3060  1.1  mrg       {
   3061  1.1  mrg         if (! BB_LV_SET_VALID_P (e->dest))
   3062  1.1  mrg           {
   3063  1.1  mrg             gcc_unreachable ();
   3064  1.1  mrg             gcc_assert (BB_LV_SET (e->dest) == NULL);
   3065  1.1  mrg             BB_LV_SET (e->dest) = compute_live_after_bb (e->dest);
   3066  1.1  mrg             BB_LV_SET_VALID_P (e->dest) = true;
   3067  1.1  mrg           }
   3068  1.1  mrg         IOR_REG_SET (lv, BB_LV_SET (e->dest));
   3069  1.1  mrg       }
   3070  1.1  mrg     else
   3071  1.1  mrg       IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest)));
   3072  1.1  mrg 
   3073  1.1  mrg   return lv;
   3074  1.1  mrg }
   3075  1.1  mrg 
   3076  1.1  mrg /* Compute the set of all live registers at the point before INSN and save
   3077  1.1  mrg    it at INSN if INSN is bb header.  */
   3078  1.1  mrg regset
   3079  1.1  mrg compute_live (insn_t insn)
   3080  1.1  mrg {
   3081  1.1  mrg   basic_block bb = BLOCK_FOR_INSN (insn);
   3082  1.1  mrg   insn_t final, temp;
   3083  1.1  mrg   regset lv;
   3084  1.1  mrg 
   3085  1.1  mrg   /* Return the valid set if we're already on it.  */
   3086  1.1  mrg   if (!ignore_first)
   3087  1.1  mrg     {
   3088  1.1  mrg       regset src = NULL;
   3089  1.1  mrg 
   3090  1.1  mrg       if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb))
   3091  1.1  mrg         src = BB_LV_SET (bb);
   3092  1.1  mrg       else
   3093  1.1  mrg         {
   3094  1.1  mrg           gcc_assert (in_current_region_p (bb));
   3095  1.1  mrg           if (INSN_LIVE_VALID_P (insn))
   3096  1.1  mrg             src = INSN_LIVE (insn);
   3097  1.1  mrg         }
   3098  1.1  mrg 
   3099  1.1  mrg       if (src)
   3100  1.1  mrg 	{
   3101  1.1  mrg 	  lv = get_regset_from_pool ();
   3102  1.1  mrg 	  COPY_REG_SET (lv, src);
   3103  1.1  mrg 
   3104  1.1  mrg           if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb))
   3105  1.1  mrg             {
   3106  1.1  mrg               COPY_REG_SET (BB_LV_SET (bb), lv);
   3107  1.1  mrg               BB_LV_SET_VALID_P (bb) = true;
   3108  1.1  mrg             }
   3109  1.1  mrg 
   3110  1.1  mrg 	  return_regset_to_pool (lv);
   3111  1.1  mrg 	  return lv;
   3112  1.1  mrg 	}
   3113  1.1  mrg     }
   3114  1.1  mrg 
   3115  1.1  mrg   /* We've skipped the wrong lv_set.  Don't skip the right one.  */
   3116  1.1  mrg   ignore_first = false;
   3117  1.1  mrg   gcc_assert (in_current_region_p (bb));
   3118  1.1  mrg 
   3119  1.1  mrg   /* Find a valid LV set in this block or below, if needed.
   3120  1.1  mrg      Start searching from the next insn: either ignore_first is true, or
   3121  1.1  mrg      INSN doesn't have a correct live set.  */
   3122  1.1  mrg   temp = NEXT_INSN (insn);
   3123  1.1  mrg   final = NEXT_INSN (BB_END (bb));
   3124  1.1  mrg   while (temp != final && ! INSN_LIVE_VALID_P (temp))
   3125  1.1  mrg     temp = NEXT_INSN (temp);
   3126  1.1  mrg   if (temp == final)
   3127  1.1  mrg     {
   3128  1.1  mrg       lv = compute_live_after_bb (bb);
   3129  1.1  mrg       temp = PREV_INSN (temp);
   3130  1.1  mrg     }
   3131  1.1  mrg   else
   3132  1.1  mrg     {
   3133  1.1  mrg       lv = get_regset_from_pool ();
   3134  1.1  mrg       COPY_REG_SET (lv, INSN_LIVE (temp));
   3135  1.1  mrg     }
   3136  1.1  mrg 
   3137  1.1  mrg   /* Put correct lv sets on the insns which have bad sets.  */
   3138  1.1  mrg   final = PREV_INSN (insn);
   3139  1.1  mrg   while (temp != final)
   3140  1.1  mrg     {
   3141  1.1  mrg       propagate_lv_set (lv, temp);
   3142  1.1  mrg       COPY_REG_SET (INSN_LIVE (temp), lv);
   3143  1.1  mrg       INSN_LIVE_VALID_P (temp) = true;
   3144  1.1  mrg       temp = PREV_INSN (temp);
   3145  1.1  mrg     }
   3146  1.1  mrg 
   3147  1.1  mrg   /* Also put it in a BB.  */
   3148  1.1  mrg   if (sel_bb_head_p (insn))
   3149  1.1  mrg     {
   3150  1.1  mrg       basic_block bb = BLOCK_FOR_INSN (insn);
   3151  1.1  mrg 
   3152  1.1  mrg       COPY_REG_SET (BB_LV_SET (bb), lv);
   3153  1.1  mrg       BB_LV_SET_VALID_P (bb) = true;
   3154  1.1  mrg     }
   3155  1.1  mrg 
   3156  1.1  mrg   /* We return LV to the pool, but will not clear it there.  Thus we can
   3157  1.1  mrg      legimatelly use LV till the next use of regset_pool_get ().  */
   3158  1.1  mrg   return_regset_to_pool (lv);
   3159  1.1  mrg   return lv;
   3160  1.1  mrg }
   3161  1.1  mrg 
   3162  1.1  mrg /* Update liveness sets for INSN.  */
   3163  1.1  mrg static inline void
   3164  1.1  mrg update_liveness_on_insn (rtx_insn *insn)
   3165  1.1  mrg {
   3166  1.1  mrg   ignore_first = true;
   3167  1.1  mrg   compute_live (insn);
   3168  1.1  mrg }
   3169  1.1  mrg 
   3170  1.1  mrg /* Compute liveness below INSN and write it into REGS.  */
   3171  1.1  mrg static inline void
   3172  1.1  mrg compute_live_below_insn (rtx_insn *insn, regset regs)
   3173  1.1  mrg {
   3174  1.1  mrg   rtx_insn *succ;
   3175  1.1  mrg   succ_iterator si;
   3176  1.1  mrg 
   3177  1.1  mrg   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
   3178  1.1  mrg     IOR_REG_SET (regs, compute_live (succ));
   3179  1.1  mrg }
   3180  1.1  mrg 
   3181  1.1  mrg /* Update the data gathered in av and lv sets starting from INSN.  */
   3182  1.1  mrg static void
   3183  1.1  mrg update_data_sets (rtx_insn *insn)
   3184  1.1  mrg {
   3185  1.1  mrg   update_liveness_on_insn (insn);
   3186  1.1  mrg   if (sel_bb_head_p (insn))
   3187  1.1  mrg     {
   3188  1.1  mrg       gcc_assert (AV_LEVEL (insn) != 0);
   3189  1.1  mrg       BB_AV_LEVEL (BLOCK_FOR_INSN (insn)) = -1;
   3190  1.1  mrg       compute_av_set (insn, NULL, 0, 0);
   3191  1.1  mrg     }
   3192  1.1  mrg }
   3193  1.1  mrg 
   3194  1.1  mrg 
   3196  1.1  mrg /* Helper for move_op () and find_used_regs ().
   3197  1.1  mrg    Return speculation type for which a check should be created on the place
   3198  1.1  mrg    of INSN.  EXPR is one of the original ops we are searching for.  */
   3199  1.1  mrg static ds_t
   3200  1.1  mrg get_spec_check_type_for_insn (insn_t insn, expr_t expr)
   3201  1.1  mrg {
   3202  1.1  mrg   ds_t to_check_ds;
   3203  1.1  mrg   ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn));
   3204  1.1  mrg 
   3205  1.1  mrg   to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr);
   3206  1.1  mrg 
   3207  1.1  mrg   if (targetm.sched.get_insn_checked_ds)
   3208  1.1  mrg     already_checked_ds |= targetm.sched.get_insn_checked_ds (insn);
   3209  1.1  mrg 
   3210  1.1  mrg   if (spec_info != NULL
   3211  1.1  mrg       && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL))
   3212  1.1  mrg     already_checked_ds |= BEGIN_CONTROL;
   3213  1.1  mrg 
   3214  1.1  mrg   already_checked_ds = ds_get_speculation_types (already_checked_ds);
   3215  1.1  mrg 
   3216  1.1  mrg   to_check_ds &= ~already_checked_ds;
   3217  1.1  mrg 
   3218  1.1  mrg   return to_check_ds;
   3219  1.1  mrg }
   3220  1.1  mrg 
   3221  1.1  mrg /* Find the set of registers that are unavailable for storing expres
   3222  1.1  mrg    while moving ORIG_OPS up on the path starting from INSN due to
   3223  1.1  mrg    liveness (USED_REGS) or hardware restrictions (REG_RENAME_P).
   3224  1.1  mrg 
   3225  1.1  mrg    All the original operations found during the traversal are saved in the
   3226  1.1  mrg    ORIGINAL_INSNS list.
   3227  1.1  mrg 
   3228  1.1  mrg    REG_RENAME_P denotes the set of hardware registers that
   3229  1.1  mrg    cannot be used with renaming due to the register class restrictions,
   3230  1.1  mrg    mode restrictions and other (the register we'll choose should be
   3231  1.1  mrg    compatible class with the original uses, shouldn't be in call_used_regs,
   3232  1.1  mrg    should be HARD_REGNO_RENAME_OK etc).
   3233  1.1  mrg 
   3234  1.1  mrg    Returns TRUE if we've found all original insns, FALSE otherwise.
   3235  1.1  mrg 
   3236  1.1  mrg    This function utilizes code_motion_path_driver (formerly find_used_regs_1)
   3237  1.1  mrg    to traverse the code motion paths.  This helper function finds registers
   3238  1.1  mrg    that are not available for storing expres while moving ORIG_OPS up on the
   3239  1.1  mrg    path starting from INSN.  A register considered as used on the moving path,
   3240  1.1  mrg    if one of the following conditions is not satisfied:
   3241  1.1  mrg 
   3242  1.1  mrg       (1) a register not set or read on any path from xi to an instance of
   3243  1.1  mrg 	  the original operation,
   3244  1.1  mrg       (2) not among the live registers of the point immediately following the
   3245  1.1  mrg           first original operation on a given downward path, except for the
   3246  1.1  mrg 	  original target register of the operation,
   3247  1.1  mrg       (3) not live on the other path of any conditional branch that is passed
   3248  1.1  mrg 	  by the operation, in case original operations are not present on
   3249  1.1  mrg 	  both paths of the conditional branch.
   3250  1.1  mrg 
   3251  1.1  mrg    All the original operations found during the traversal are saved in the
   3252  1.1  mrg    ORIGINAL_INSNS list.
   3253  1.1  mrg 
   3254  1.1  mrg    REG_RENAME_P->CROSSED_CALL_ABIS is true, if there is a call insn on the path
   3255  1.1  mrg    from INSN to original insn. In this case CALL_USED_REG_SET will be added
   3256  1.1  mrg    to unavailable hard regs at the point original operation is found.  */
   3257  1.1  mrg 
   3258  1.1  mrg static bool
   3259  1.1  mrg find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
   3260  1.1  mrg 		struct reg_rename  *reg_rename_p, def_list_t *original_insns)
   3261  1.1  mrg {
   3262  1.1  mrg   def_list_iterator i;
   3263  1.1  mrg   def_t def;
   3264  1.1  mrg   int res;
   3265  1.1  mrg   bool needs_spec_check_p = false;
   3266  1.1  mrg   expr_t expr;
   3267  1.1  mrg   av_set_iterator expr_iter;
   3268  1.1  mrg   struct fur_static_params sparams;
   3269  1.1  mrg   struct cmpd_local_params lparams;
   3270  1.1  mrg 
   3271  1.1  mrg   /* We haven't visited any blocks yet.  */
   3272  1.1  mrg   bitmap_clear (code_motion_visited_blocks);
   3273  1.1  mrg 
   3274  1.1  mrg   /* Init parameters for code_motion_path_driver.  */
   3275  1.1  mrg   sparams.crossed_call_abis = 0;
   3276  1.1  mrg   sparams.original_insns = original_insns;
   3277  1.1  mrg   sparams.used_regs = used_regs;
   3278  1.1  mrg 
   3279  1.1  mrg   /* Set the appropriate hooks and data.  */
   3280  1.1  mrg   code_motion_path_driver_info = &fur_hooks;
   3281  1.1  mrg 
   3282  1.1  mrg   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
   3283  1.1  mrg 
   3284  1.1  mrg   reg_rename_p->crossed_call_abis |= sparams.crossed_call_abis;
   3285  1.1  mrg 
   3286  1.1  mrg   gcc_assert (res == 1);
   3287  1.1  mrg   gcc_assert (original_insns && *original_insns);
   3288  1.1  mrg 
   3289  1.1  mrg   /* ??? We calculate whether an expression needs a check when computing
   3290  1.1  mrg      av sets.  This information is not as precise as it could be due to
   3291  1.1  mrg      merging this bit in merge_expr.  We can do better in find_used_regs,
   3292  1.1  mrg      but we want to avoid multiple traversals of the same code motion
   3293  1.1  mrg      paths.  */
   3294  1.1  mrg   FOR_EACH_EXPR (expr, expr_iter, orig_ops)
   3295  1.1  mrg     needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr);
   3296  1.1  mrg 
   3297  1.1  mrg   /* Mark hardware regs in REG_RENAME_P that are not suitable
   3298  1.1  mrg      for renaming expr in INSN due to hardware restrictions (register class,
   3299  1.1  mrg      modes compatibility etc).  */
   3300  1.1  mrg   FOR_EACH_DEF (def, i, *original_insns)
   3301  1.1  mrg     {
   3302  1.1  mrg       vinsn_t vinsn = INSN_VINSN (def->orig_insn);
   3303  1.1  mrg 
   3304  1.1  mrg       if (VINSN_SEPARABLE_P (vinsn))
   3305  1.1  mrg 	mark_unavailable_hard_regs (def, reg_rename_p, used_regs);
   3306  1.1  mrg 
   3307  1.1  mrg       /* Do not allow clobbering of ld.[sa] address in case some of the
   3308  1.1  mrg          original operations need a check.  */
   3309  1.1  mrg       if (needs_spec_check_p)
   3310  1.1  mrg 	IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn));
   3311  1.1  mrg     }
   3312  1.1  mrg 
   3313  1.1  mrg   return true;
   3314  1.1  mrg }
   3315  1.1  mrg 
   3316  1.1  mrg 
   3318  1.1  mrg /* Functions to choose the best insn from available ones.  */
   3319  1.1  mrg 
   3320  1.1  mrg /* Adjusts the priority for EXPR using the backend *_adjust_priority hook.  */
   3321  1.1  mrg static int
   3322  1.1  mrg sel_target_adjust_priority (expr_t expr)
   3323  1.1  mrg {
   3324  1.1  mrg   int priority = EXPR_PRIORITY (expr);
   3325  1.1  mrg   int new_priority;
   3326  1.1  mrg 
   3327  1.1  mrg   if (targetm.sched.adjust_priority)
   3328  1.1  mrg     new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr), priority);
   3329  1.1  mrg   else
   3330  1.1  mrg     new_priority = priority;
   3331  1.1  mrg 
   3332  1.1  mrg   /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly.  */
   3333  1.1  mrg   EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr);
   3334  1.1  mrg 
   3335  1.1  mrg   if (sched_verbose >= 4)
   3336  1.1  mrg     sel_print ("sel_target_adjust_priority: insn %d,  %d+%d = %d.\n",
   3337  1.1  mrg 	       INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
   3338  1.1  mrg 	       EXPR_PRIORITY_ADJ (expr), new_priority);
   3339  1.1  mrg 
   3340  1.1  mrg   return new_priority;
   3341  1.1  mrg }
   3342  1.1  mrg 
   3343  1.1  mrg /* Rank two available exprs for schedule.  Never return 0 here.  */
   3344  1.1  mrg static int
   3345  1.1  mrg sel_rank_for_schedule (const void *x, const void *y)
   3346  1.1  mrg {
   3347  1.1  mrg   expr_t tmp = *(const expr_t *) y;
   3348  1.1  mrg   expr_t tmp2 = *(const expr_t *) x;
   3349  1.1  mrg   insn_t tmp_insn, tmp2_insn;
   3350  1.1  mrg   vinsn_t tmp_vinsn, tmp2_vinsn;
   3351  1.1  mrg   int val;
   3352  1.1  mrg 
   3353  1.1  mrg   tmp_vinsn = EXPR_VINSN (tmp);
   3354  1.1  mrg   tmp2_vinsn = EXPR_VINSN (tmp2);
   3355  1.1  mrg   tmp_insn = EXPR_INSN_RTX (tmp);
   3356  1.1  mrg   tmp2_insn = EXPR_INSN_RTX (tmp2);
   3357  1.1  mrg 
   3358  1.1  mrg   /* Schedule debug insns as early as possible.  */
   3359  1.1  mrg   if (DEBUG_INSN_P (tmp_insn) && !DEBUG_INSN_P (tmp2_insn))
   3360  1.1  mrg     return -1;
   3361  1.1  mrg   else if (DEBUG_INSN_P (tmp2_insn))
   3362  1.1  mrg     return 1;
   3363  1.1  mrg 
   3364  1.1  mrg   /* Prefer SCHED_GROUP_P insns to any others.  */
   3365  1.1  mrg   if (SCHED_GROUP_P (tmp_insn) != SCHED_GROUP_P (tmp2_insn))
   3366  1.1  mrg     {
   3367  1.1  mrg       if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn))
   3368  1.1  mrg         return SCHED_GROUP_P (tmp2_insn) ? 1 : -1;
   3369  1.1  mrg 
   3370  1.1  mrg       /* Now uniqueness means SCHED_GROUP_P is set, because schedule groups
   3371  1.1  mrg          cannot be cloned.  */
   3372  1.1  mrg       if (VINSN_UNIQUE_P (tmp2_vinsn))
   3373  1.1  mrg         return 1;
   3374  1.1  mrg       return -1;
   3375  1.1  mrg     }
   3376  1.1  mrg 
   3377  1.1  mrg   /* Discourage scheduling of speculative checks.  */
   3378  1.1  mrg   val = (sel_insn_is_speculation_check (tmp_insn)
   3379  1.1  mrg 	 - sel_insn_is_speculation_check (tmp2_insn));
   3380  1.1  mrg   if (val)
   3381  1.1  mrg     return val;
   3382  1.1  mrg 
   3383  1.1  mrg   /* Prefer not scheduled insn over scheduled one.  */
   3384  1.1  mrg   if (EXPR_SCHED_TIMES (tmp) > 0 || EXPR_SCHED_TIMES (tmp2) > 0)
   3385  1.1  mrg     {
   3386  1.1  mrg       val = EXPR_SCHED_TIMES (tmp) - EXPR_SCHED_TIMES (tmp2);
   3387  1.1  mrg       if (val)
   3388  1.1  mrg 	return val;
   3389  1.1  mrg     }
   3390  1.1  mrg 
   3391  1.1  mrg   /* Prefer jump over non-jump instruction.  */
   3392  1.1  mrg   if (control_flow_insn_p (tmp_insn) && !control_flow_insn_p (tmp2_insn))
   3393  1.1  mrg     return -1;
   3394  1.1  mrg   else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn))
   3395  1.1  mrg     return 1;
   3396  1.1  mrg 
   3397  1.1  mrg   /* Prefer an expr with non-zero usefulness.  */
   3398  1.1  mrg   int u1 = EXPR_USEFULNESS (tmp), u2 = EXPR_USEFULNESS (tmp2);
   3399  1.1  mrg 
   3400  1.1  mrg   if (u1 == 0)
   3401  1.1  mrg     {
   3402  1.1  mrg       if (u2 == 0)
   3403  1.1  mrg         u1 = u2 = 1;
   3404  1.1  mrg       else
   3405  1.1  mrg         return 1;
   3406  1.1  mrg     }
   3407  1.1  mrg   else if (u2 == 0)
   3408  1.1  mrg     return -1;
   3409  1.1  mrg 
   3410  1.1  mrg   /* Prefer an expr with greater priority.  */
   3411  1.1  mrg   val = (u2 * (EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2))
   3412  1.1  mrg          - u1 * (EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp)));
   3413  1.1  mrg   if (val)
   3414  1.1  mrg     return val;
   3415  1.1  mrg 
   3416  1.1  mrg   if (spec_info != NULL && spec_info->mask != 0)
   3417  1.1  mrg     /* This code was taken from haifa-sched.cc: rank_for_schedule ().  */
   3418  1.1  mrg     {
   3419  1.1  mrg       ds_t ds1, ds2;
   3420  1.1  mrg       dw_t dw1, dw2;
   3421  1.1  mrg       int dw;
   3422  1.1  mrg 
   3423  1.1  mrg       ds1 = EXPR_SPEC_DONE_DS (tmp);
   3424  1.1  mrg       if (ds1)
   3425  1.1  mrg 	dw1 = ds_weak (ds1);
   3426  1.1  mrg       else
   3427  1.1  mrg 	dw1 = NO_DEP_WEAK;
   3428  1.1  mrg 
   3429  1.1  mrg       ds2 = EXPR_SPEC_DONE_DS (tmp2);
   3430  1.1  mrg       if (ds2)
   3431  1.1  mrg 	dw2 = ds_weak (ds2);
   3432  1.1  mrg       else
   3433  1.1  mrg 	dw2 = NO_DEP_WEAK;
   3434  1.1  mrg 
   3435  1.1  mrg       dw = dw2 - dw1;
   3436  1.1  mrg       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
   3437  1.1  mrg 	return dw;
   3438  1.1  mrg     }
   3439  1.1  mrg 
   3440  1.1  mrg   /* Prefer an old insn to a bookkeeping insn.  */
   3441  1.1  mrg   if (INSN_UID (tmp_insn) < first_emitted_uid
   3442  1.1  mrg       && INSN_UID (tmp2_insn) >= first_emitted_uid)
   3443  1.1  mrg     return -1;
   3444  1.1  mrg   if (INSN_UID (tmp_insn) >= first_emitted_uid
   3445  1.1  mrg       && INSN_UID (tmp2_insn) < first_emitted_uid)
   3446  1.1  mrg     return 1;
   3447  1.1  mrg 
   3448  1.1  mrg   /* Prefer an insn with smaller UID, as a last resort.
   3449  1.1  mrg      We can't safely use INSN_LUID as it is defined only for those insns
   3450  1.1  mrg      that are in the stream.  */
   3451  1.1  mrg   return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn);
   3452  1.1  mrg }
   3453  1.1  mrg 
   3454  1.1  mrg /* Filter out expressions from av set pointed to by AV_PTR
   3455  1.1  mrg    that are pipelined too many times.  */
   3456  1.1  mrg static void
   3457  1.1  mrg process_pipelined_exprs (av_set_t *av_ptr)
   3458  1.1  mrg {
   3459  1.1  mrg   expr_t expr;
   3460  1.1  mrg   av_set_iterator si;
   3461  1.1  mrg 
   3462  1.1  mrg   /* Don't pipeline already pipelined code as that would increase
   3463  1.1  mrg      number of unnecessary register moves.  */
   3464  1.1  mrg   FOR_EACH_EXPR_1 (expr, si, av_ptr)
   3465  1.1  mrg     {
   3466  1.1  mrg       if (EXPR_SCHED_TIMES (expr)
   3467  1.1  mrg 	  >= param_selsched_max_sched_times)
   3468  1.1  mrg 	av_set_iter_remove (&si);
   3469  1.1  mrg     }
   3470  1.1  mrg }
   3471  1.1  mrg 
   3472  1.1  mrg /* Filter speculative insns from AV_PTR if we don't want them.  */
   3473  1.1  mrg static void
   3474  1.1  mrg process_spec_exprs (av_set_t *av_ptr)
   3475  1.1  mrg {
   3476  1.1  mrg   expr_t expr;
   3477  1.1  mrg   av_set_iterator si;
   3478  1.1  mrg 
   3479  1.1  mrg   if (spec_info == NULL)
   3480  1.1  mrg     return;
   3481  1.1  mrg 
   3482  1.1  mrg   /* Scan *AV_PTR to find out if we want to consider speculative
   3483  1.1  mrg      instructions for scheduling.  */
   3484  1.1  mrg   FOR_EACH_EXPR_1 (expr, si, av_ptr)
   3485  1.1  mrg     {
   3486  1.1  mrg       ds_t ds;
   3487  1.1  mrg 
   3488  1.1  mrg       ds = EXPR_SPEC_DONE_DS (expr);
   3489  1.1  mrg 
   3490  1.1  mrg       /* The probability of a success is too low - don't speculate.  */
   3491  1.1  mrg       if ((ds & SPECULATIVE)
   3492  1.1  mrg           && (ds_weak (ds) < spec_info->data_weakness_cutoff
   3493  1.1  mrg               || EXPR_USEFULNESS (expr) < spec_info->control_weakness_cutoff
   3494  1.1  mrg 	      || (pipelining_p && false
   3495  1.1  mrg 		  && (ds & DATA_SPEC)
   3496  1.1  mrg 		  && (ds & CONTROL_SPEC))))
   3497  1.1  mrg         {
   3498  1.1  mrg           av_set_iter_remove (&si);
   3499  1.1  mrg           continue;
   3500  1.1  mrg         }
   3501  1.1  mrg     }
   3502  1.1  mrg }
   3503  1.1  mrg 
   3504  1.1  mrg /* Search for any use-like insns in AV_PTR and decide on scheduling
   3505  1.1  mrg    them.  Return one when found, and NULL otherwise.
   3506  1.1  mrg    Note that we check here whether a USE could be scheduled to avoid
   3507  1.1  mrg    an infinite loop later.  */
   3508  1.1  mrg static expr_t
   3509  1.1  mrg process_use_exprs (av_set_t *av_ptr)
   3510  1.1  mrg {
   3511  1.1  mrg   expr_t expr;
   3512  1.1  mrg   av_set_iterator si;
   3513  1.1  mrg   bool uses_present_p = false;
   3514  1.1  mrg   bool try_uses_p = true;
   3515  1.1  mrg 
   3516  1.1  mrg   FOR_EACH_EXPR_1 (expr, si, av_ptr)
   3517  1.1  mrg     {
   3518  1.1  mrg       /* This will also initialize INSN_CODE for later use.  */
   3519  1.1  mrg       if (recog_memoized (EXPR_INSN_RTX (expr)) < 0)
   3520  1.1  mrg         {
   3521  1.1  mrg           /* If we have a USE in *AV_PTR that was not scheduled yet,
   3522  1.1  mrg              do so because it will do good only.  */
   3523  1.1  mrg           if (EXPR_SCHED_TIMES (expr) <= 0)
   3524  1.1  mrg             {
   3525  1.1  mrg               if (EXPR_TARGET_AVAILABLE (expr) == 1)
   3526  1.1  mrg                 return expr;
   3527  1.1  mrg 
   3528  1.1  mrg               av_set_iter_remove (&si);
   3529  1.1  mrg             }
   3530  1.1  mrg           else
   3531  1.1  mrg             {
   3532  1.1  mrg               gcc_assert (pipelining_p);
   3533  1.1  mrg 
   3534  1.1  mrg               uses_present_p = true;
   3535  1.1  mrg             }
   3536  1.1  mrg         }
   3537  1.1  mrg       else
   3538  1.1  mrg         try_uses_p = false;
   3539  1.1  mrg     }
   3540  1.1  mrg 
   3541  1.1  mrg   if (uses_present_p)
   3542  1.1  mrg     {
   3543  1.1  mrg       /* If we don't want to schedule any USEs right now and we have some
   3544  1.1  mrg            in *AV_PTR, remove them, else just return the first one found.  */
   3545  1.1  mrg       if (!try_uses_p)
   3546  1.1  mrg         {
   3547  1.1  mrg           FOR_EACH_EXPR_1 (expr, si, av_ptr)
   3548  1.1  mrg             if (INSN_CODE (EXPR_INSN_RTX (expr)) < 0)
   3549  1.1  mrg               av_set_iter_remove (&si);
   3550  1.1  mrg         }
   3551  1.1  mrg       else
   3552  1.1  mrg         {
   3553  1.1  mrg           FOR_EACH_EXPR_1 (expr, si, av_ptr)
   3554  1.1  mrg             {
   3555  1.1  mrg               gcc_assert (INSN_CODE (EXPR_INSN_RTX (expr)) < 0);
   3556  1.1  mrg 
   3557  1.1  mrg               if (EXPR_TARGET_AVAILABLE (expr) == 1)
   3558  1.1  mrg                 return expr;
   3559  1.1  mrg 
   3560  1.1  mrg               av_set_iter_remove (&si);
   3561  1.1  mrg             }
   3562  1.1  mrg         }
   3563  1.1  mrg     }
   3564  1.1  mrg 
   3565  1.1  mrg   return NULL;
   3566  1.1  mrg }
   3567  1.1  mrg 
   3568  1.1  mrg /* Lookup EXPR in VINSN_VEC and return TRUE if found.  Also check patterns from
   3569  1.1  mrg    EXPR's history of changes.  */
   3570  1.1  mrg static bool
   3571  1.1  mrg vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr)
   3572  1.1  mrg {
   3573  1.1  mrg   vinsn_t vinsn, expr_vinsn;
   3574  1.1  mrg   int n;
   3575  1.1  mrg   unsigned i;
   3576  1.1  mrg 
   3577  1.1  mrg   /* Start with checking expr itself and then proceed with all the old forms
   3578  1.1  mrg      of expr taken from its history vector.  */
   3579  1.1  mrg   for (i = 0, expr_vinsn = EXPR_VINSN (expr);
   3580  1.1  mrg        expr_vinsn;
   3581  1.1  mrg        expr_vinsn = (i < EXPR_HISTORY_OF_CHANGES (expr).length ()
   3582  1.1  mrg 		     ? EXPR_HISTORY_OF_CHANGES (expr)[i++].old_expr_vinsn
   3583  1.1  mrg 		     : NULL))
   3584  1.1  mrg     FOR_EACH_VEC_ELT (vinsn_vec, n, vinsn)
   3585  1.1  mrg       if (VINSN_SEPARABLE_P (vinsn))
   3586  1.1  mrg 	{
   3587  1.1  mrg 	  if (vinsn_equal_p (vinsn, expr_vinsn))
   3588  1.1  mrg 	    return true;
   3589  1.1  mrg 	}
   3590  1.1  mrg       else
   3591  1.1  mrg 	{
   3592  1.1  mrg 	  /* For non-separable instructions, the blocking insn can have
   3593  1.1  mrg 	     another pattern due to substitution, and we can't choose
   3594  1.1  mrg 	     different register as in the above case.  Check all registers
   3595  1.1  mrg 	     being written instead.  */
   3596  1.1  mrg 	  if (bitmap_intersect_p (VINSN_REG_SETS (vinsn),
   3597  1.1  mrg 				  VINSN_REG_SETS (expr_vinsn)))
   3598  1.1  mrg 	    return true;
   3599  1.1  mrg 	}
   3600  1.1  mrg 
   3601  1.1  mrg   return false;
   3602  1.1  mrg }
   3603  1.1  mrg 
   3604  1.1  mrg /* Return true if either of expressions from ORIG_OPS can be blocked
   3605  1.1  mrg    by previously created bookkeeping code.  STATIC_PARAMS points to static
   3606  1.1  mrg    parameters of move_op.  */
   3607  1.1  mrg static bool
   3608  1.1  mrg av_set_could_be_blocked_by_bookkeeping_p (av_set_t orig_ops, void *static_params)
   3609  1.1  mrg {
   3610  1.1  mrg   expr_t expr;
   3611  1.1  mrg   av_set_iterator iter;
   3612  1.1  mrg   moveop_static_params_p sparams;
   3613  1.1  mrg 
   3614  1.1  mrg   /* This checks that expressions in ORIG_OPS are not blocked by bookkeeping
   3615  1.1  mrg      created while scheduling on another fence.  */
   3616  1.1  mrg   FOR_EACH_EXPR (expr, iter, orig_ops)
   3617  1.1  mrg     if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
   3618  1.1  mrg       return true;
   3619  1.1  mrg 
   3620  1.1  mrg   gcc_assert (code_motion_path_driver_info == &move_op_hooks);
   3621  1.1  mrg   sparams = (moveop_static_params_p) static_params;
   3622  1.1  mrg 
   3623  1.1  mrg   /* Expressions can be also blocked by bookkeeping created during current
   3624  1.1  mrg      move_op.  */
   3625  1.1  mrg   if (bitmap_bit_p (current_copies, INSN_UID (sparams->failed_insn)))
   3626  1.1  mrg     FOR_EACH_EXPR (expr, iter, orig_ops)
   3627  1.1  mrg       if (moveup_expr_cached (expr, sparams->failed_insn, false) != MOVEUP_EXPR_NULL)
   3628  1.1  mrg         return true;
   3629  1.1  mrg 
   3630  1.1  mrg   /* Expressions in ORIG_OPS may have wrong destination register due to
   3631  1.1  mrg      renaming.  Check with the right register instead.  */
   3632  1.1  mrg   if (sparams->dest && REG_P (sparams->dest))
   3633  1.1  mrg     {
   3634  1.1  mrg       rtx reg = sparams->dest;
   3635  1.1  mrg       vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn);
   3636  1.1  mrg 
   3637  1.1  mrg       if (register_unavailable_p (VINSN_REG_SETS (failed_vinsn), reg)
   3638  1.1  mrg 	  || register_unavailable_p (VINSN_REG_USES (failed_vinsn), reg)
   3639  1.1  mrg 	  || register_unavailable_p (VINSN_REG_CLOBBERS (failed_vinsn), reg))
   3640  1.1  mrg 	return true;
   3641  1.1  mrg     }
   3642  1.1  mrg 
   3643  1.1  mrg   return false;
   3644  1.1  mrg }
   3645  1.1  mrg 
   3646  1.1  mrg /* Clear VINSN_VEC and detach vinsns.  */
   3647  1.1  mrg static void
   3648  1.1  mrg vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
   3649  1.1  mrg {
   3650  1.1  mrg   unsigned len = vinsn_vec->length ();
   3651  1.1  mrg   if (len > 0)
   3652  1.1  mrg     {
   3653  1.1  mrg       vinsn_t vinsn;
   3654  1.1  mrg       int n;
   3655  1.1  mrg 
   3656  1.1  mrg       FOR_EACH_VEC_ELT (*vinsn_vec, n, vinsn)
   3657  1.1  mrg         vinsn_detach (vinsn);
   3658  1.1  mrg       vinsn_vec->block_remove (0, len);
   3659  1.1  mrg     }
   3660  1.1  mrg }
   3661  1.1  mrg 
   3662  1.1  mrg /* Add the vinsn of EXPR to the VINSN_VEC.  */
   3663  1.1  mrg static void
   3664  1.1  mrg vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
   3665  1.1  mrg {
   3666  1.1  mrg   vinsn_attach (EXPR_VINSN (expr));
   3667  1.1  mrg   vinsn_vec->safe_push (EXPR_VINSN (expr));
   3668  1.1  mrg }
   3669  1.1  mrg 
   3670  1.1  mrg /* Free the vector representing blocked expressions.  */
   3671  1.1  mrg static void
   3672  1.1  mrg vinsn_vec_free (vinsn_vec_t &vinsn_vec)
   3673  1.1  mrg {
   3674  1.1  mrg   vinsn_vec.release ();
   3675  1.1  mrg }
   3676  1.1  mrg 
   3677  1.1  mrg /* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT.  */
   3678  1.1  mrg 
   3679  1.1  mrg void sel_add_to_insn_priority (rtx insn, int amount)
   3680  1.1  mrg {
   3681  1.1  mrg   EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) += amount;
   3682  1.1  mrg 
   3683  1.1  mrg   if (sched_verbose >= 2)
   3684  1.1  mrg     sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n",
   3685  1.1  mrg 	       INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn)),
   3686  1.1  mrg 	       EXPR_PRIORITY_ADJ (INSN_EXPR (insn)));
   3687  1.1  mrg }
   3688  1.1  mrg 
   3689  1.1  mrg /* Turn AV into a vector, filter inappropriate insns and sort it.  Return
   3690  1.1  mrg    true if there is something to schedule.  BNDS and FENCE are current
   3691  1.1  mrg    boundaries and fence, respectively.  If we need to stall for some cycles
   3692  1.1  mrg    before an expr from AV would become available, write this number to
   3693  1.1  mrg    *PNEED_STALL.  */
   3694  1.1  mrg static bool
   3695  1.1  mrg fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
   3696  1.1  mrg                  int *pneed_stall)
   3697  1.1  mrg {
   3698  1.1  mrg   av_set_iterator si;
   3699  1.1  mrg   expr_t expr;
   3700  1.1  mrg   int sched_next_worked = 0, stalled, n;
   3701  1.1  mrg   static int av_max_prio, est_ticks_till_branch;
   3702  1.1  mrg   int min_need_stall = -1;
   3703  1.1  mrg   deps_t dc = BND_DC (BLIST_BND (bnds));
   3704  1.1  mrg 
   3705  1.1  mrg   /* Bail out early when the ready list contained only USEs/CLOBBERs that are
   3706  1.1  mrg      already scheduled.  */
   3707  1.1  mrg   if (av == NULL)
   3708  1.1  mrg     return false;
   3709  1.1  mrg 
   3710  1.1  mrg   /* Empty vector from the previous stuff.  */
   3711  1.1  mrg   if (vec_av_set.length () > 0)
   3712  1.1  mrg     vec_av_set.block_remove (0, vec_av_set.length ());
   3713  1.1  mrg 
   3714  1.1  mrg   /* Turn the set into a vector for sorting and call sel_target_adjust_priority
   3715  1.1  mrg      for each insn.  */
   3716  1.1  mrg   gcc_assert (vec_av_set.is_empty ());
   3717  1.1  mrg   FOR_EACH_EXPR (expr, si, av)
   3718  1.1  mrg     {
   3719  1.1  mrg       vec_av_set.safe_push (expr);
   3720  1.1  mrg 
   3721  1.1  mrg       gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall);
   3722  1.1  mrg 
   3723  1.1  mrg       /* Adjust priority using target backend hook.  */
   3724  1.1  mrg       sel_target_adjust_priority (expr);
   3725  1.1  mrg     }
   3726  1.1  mrg 
   3727  1.1  mrg   /* Sort the vector.  */
   3728  1.1  mrg   vec_av_set.qsort (sel_rank_for_schedule);
   3729  1.1  mrg 
   3730  1.1  mrg   /* We record maximal priority of insns in av set for current instruction
   3731  1.1  mrg      group.  */
   3732  1.1  mrg   if (FENCE_STARTS_CYCLE_P (fence))
   3733  1.1  mrg     av_max_prio = est_ticks_till_branch = INT_MIN;
   3734  1.1  mrg 
   3735  1.1  mrg   /* Filter out inappropriate expressions.  Loop's direction is reversed to
   3736  1.1  mrg      visit "best" instructions first.  We assume that vec::unordered_remove
   3737  1.1  mrg      moves last element in place of one being deleted.  */
   3738  1.1  mrg   for (n = vec_av_set.length () - 1, stalled = 0; n >= 0; n--)
   3739  1.1  mrg     {
   3740  1.1  mrg       expr_t expr = vec_av_set[n];
   3741  1.1  mrg       insn_t insn = EXPR_INSN_RTX (expr);
   3742  1.1  mrg       signed char target_available;
   3743  1.1  mrg       bool is_orig_reg_p = true;
   3744  1.1  mrg       int need_cycles, new_prio;
   3745  1.1  mrg       bool fence_insn_p = INSN_UID (insn) == INSN_UID (FENCE_INSN (fence));
   3746  1.1  mrg 
   3747  1.1  mrg       /* Don't allow any insns other than from SCHED_GROUP if we have one.  */
   3748  1.1  mrg       if (FENCE_SCHED_NEXT (fence) && insn != FENCE_SCHED_NEXT (fence))
   3749  1.1  mrg         {
   3750  1.1  mrg           vec_av_set.unordered_remove (n);
   3751  1.1  mrg           continue;
   3752  1.1  mrg         }
   3753  1.1  mrg 
   3754  1.1  mrg       /* Set number of sched_next insns (just in case there
   3755  1.1  mrg          could be several).  */
   3756  1.1  mrg       if (FENCE_SCHED_NEXT (fence))
   3757  1.1  mrg         sched_next_worked++;
   3758  1.1  mrg 
   3759  1.1  mrg       /* Check all liveness requirements and try renaming.
   3760  1.1  mrg          FIXME: try to minimize calls to this.  */
   3761  1.1  mrg       target_available = EXPR_TARGET_AVAILABLE (expr);
   3762  1.1  mrg 
   3763  1.1  mrg       /* If insn was already scheduled on the current fence,
   3764  1.1  mrg 	 set TARGET_AVAILABLE to -1 no matter what expr's attribute says.  */
   3765  1.1  mrg       if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr)
   3766  1.1  mrg 	  && !fence_insn_p)
   3767  1.1  mrg 	target_available = -1;
   3768  1.1  mrg 
   3769  1.1  mrg       /* If the availability of the EXPR is invalidated by the insertion of
   3770  1.1  mrg 	 bookkeeping earlier, make sure that we won't choose this expr for
   3771  1.1  mrg 	 scheduling if it's not separable, and if it is separable, then
   3772  1.1  mrg 	 we have to recompute the set of available registers for it.  */
   3773  1.1  mrg       if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
   3774  1.1  mrg 	{
   3775  1.1  mrg           vec_av_set.unordered_remove (n);
   3776  1.1  mrg           if (sched_verbose >= 4)
   3777  1.1  mrg             sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n",
   3778  1.1  mrg                        INSN_UID (insn));
   3779  1.1  mrg           continue;
   3780  1.1  mrg         }
   3781  1.1  mrg 
   3782  1.1  mrg       if (target_available == true)
   3783  1.1  mrg 	{
   3784  1.1  mrg           /* Do nothing -- we can use an existing register.  */
   3785  1.1  mrg 	  is_orig_reg_p = EXPR_SEPARABLE_P (expr);
   3786  1.1  mrg         }
   3787  1.1  mrg       else if (/* Non-separable instruction will never
   3788  1.1  mrg                   get another register. */
   3789  1.1  mrg                (target_available == false
   3790  1.1  mrg                 && !EXPR_SEPARABLE_P (expr))
   3791  1.1  mrg                /* Don't try to find a register for low-priority expression.  */
   3792  1.1  mrg                || (int) vec_av_set.length () - 1 - n >= max_insns_to_rename
   3793  1.1  mrg                /* ??? FIXME: Don't try to rename data speculation.  */
   3794  1.1  mrg                || (EXPR_SPEC_DONE_DS (expr) & BEGIN_DATA)
   3795  1.1  mrg                || ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p))
   3796  1.1  mrg         {
   3797  1.1  mrg           vec_av_set.unordered_remove (n);
   3798  1.1  mrg           if (sched_verbose >= 4)
   3799  1.1  mrg             sel_print ("Expr %d has no suitable target register\n",
   3800  1.1  mrg                        INSN_UID (insn));
   3801  1.1  mrg 
   3802  1.1  mrg 	  /* A fence insn should not get here.  */
   3803  1.1  mrg 	  gcc_assert (!fence_insn_p);
   3804  1.1  mrg 	  continue;
   3805  1.1  mrg         }
   3806  1.1  mrg 
   3807  1.1  mrg       /* At this point a fence insn should always be available.  */
   3808  1.1  mrg       gcc_assert (!fence_insn_p
   3809  1.1  mrg 		  || INSN_UID (FENCE_INSN (fence)) == INSN_UID (EXPR_INSN_RTX (expr)));
   3810  1.1  mrg 
   3811  1.1  mrg       /* Filter expressions that need to be renamed or speculated when
   3812  1.1  mrg 	 pipelining, because compensating register copies or speculation
   3813  1.1  mrg 	 checks are likely to be placed near the beginning of the loop,
   3814  1.1  mrg 	 causing a stall.  */
   3815  1.1  mrg       if (pipelining_p && EXPR_ORIG_SCHED_CYCLE (expr) > 0
   3816  1.1  mrg 	  && (!is_orig_reg_p || EXPR_SPEC_DONE_DS (expr) != 0))
   3817  1.1  mrg 	{
   3818  1.1  mrg 	  /* Estimation of number of cycles until loop branch for
   3819  1.1  mrg 	     renaming/speculation to be successful.  */
   3820  1.1  mrg 	  int need_n_ticks_till_branch = sel_vinsn_cost (EXPR_VINSN (expr));
   3821  1.1  mrg 
   3822  1.1  mrg 	  if ((int) current_loop_nest->ninsns < 9)
   3823  1.1  mrg 	    {
   3824  1.1  mrg 	      vec_av_set.unordered_remove (n);
   3825  1.1  mrg 	      if (sched_verbose >= 4)
   3826  1.1  mrg 		sel_print ("Pipelining expr %d will likely cause stall\n",
   3827  1.1  mrg 			   INSN_UID (insn));
   3828  1.1  mrg 	      continue;
   3829  1.1  mrg 	    }
   3830  1.1  mrg 
   3831  1.1  mrg 	  if ((int) current_loop_nest->ninsns - num_insns_scheduled
   3832  1.1  mrg 	      < need_n_ticks_till_branch * issue_rate / 2
   3833  1.1  mrg 	      && est_ticks_till_branch < need_n_ticks_till_branch)
   3834  1.1  mrg 	     {
   3835  1.1  mrg 	       vec_av_set.unordered_remove (n);
   3836  1.1  mrg 	       if (sched_verbose >= 4)
   3837  1.1  mrg 		 sel_print ("Pipelining expr %d will likely cause stall\n",
   3838  1.1  mrg 			    INSN_UID (insn));
   3839  1.1  mrg 	       continue;
   3840  1.1  mrg 	     }
   3841  1.1  mrg 	}
   3842  1.1  mrg 
   3843  1.1  mrg       /* We want to schedule speculation checks as late as possible.  Discard
   3844  1.1  mrg 	 them from av set if there are instructions with higher priority.  */
   3845  1.1  mrg       if (sel_insn_is_speculation_check (insn)
   3846  1.1  mrg 	  && EXPR_PRIORITY (expr) < av_max_prio)
   3847  1.1  mrg 	{
   3848  1.1  mrg           stalled++;
   3849  1.1  mrg           min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1);
   3850  1.1  mrg           vec_av_set.unordered_remove (n);
   3851  1.1  mrg 	  if (sched_verbose >= 4)
   3852  1.1  mrg 	    sel_print ("Delaying speculation check %d until its first use\n",
   3853  1.1  mrg 		       INSN_UID (insn));
   3854  1.1  mrg 	  continue;
   3855  1.1  mrg 	}
   3856  1.1  mrg 
   3857  1.1  mrg       /* Ignore EXPRs available from pipelining to update AV_MAX_PRIO.  */
   3858  1.1  mrg       if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
   3859  1.1  mrg 	av_max_prio = MAX (av_max_prio, EXPR_PRIORITY (expr));
   3860  1.1  mrg 
   3861  1.1  mrg       /* Don't allow any insns whose data is not yet ready.
   3862  1.1  mrg          Check first whether we've already tried them and failed.  */
   3863  1.1  mrg       if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
   3864  1.1  mrg 	{
   3865  1.1  mrg           need_cycles = (FENCE_READY_TICKS (fence)[INSN_UID (insn)]
   3866  1.1  mrg 			 - FENCE_CYCLE (fence));
   3867  1.1  mrg 	  if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
   3868  1.1  mrg 	    est_ticks_till_branch = MAX (est_ticks_till_branch,
   3869  1.1  mrg 					 EXPR_PRIORITY (expr) + need_cycles);
   3870  1.1  mrg 
   3871  1.1  mrg 	  if (need_cycles > 0)
   3872  1.1  mrg 	    {
   3873  1.1  mrg 	      stalled++;
   3874  1.1  mrg 	      min_need_stall = (min_need_stall < 0
   3875  1.1  mrg 				? need_cycles
   3876  1.1  mrg 				: MIN (min_need_stall, need_cycles));
   3877  1.1  mrg 	      vec_av_set.unordered_remove (n);
   3878  1.1  mrg 
   3879  1.1  mrg 	      if (sched_verbose >= 4)
   3880  1.1  mrg 		sel_print ("Expr %d is not ready until cycle %d (cached)\n",
   3881  1.1  mrg 			   INSN_UID (insn),
   3882  1.1  mrg 			   FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
   3883  1.1  mrg 	      continue;
   3884  1.1  mrg 	    }
   3885  1.1  mrg 	}
   3886  1.1  mrg 
   3887  1.1  mrg       /* Now resort to dependence analysis to find whether EXPR might be
   3888  1.1  mrg          stalled due to dependencies from FENCE's context.  */
   3889  1.1  mrg       need_cycles = tick_check_p (expr, dc, fence);
   3890  1.1  mrg       new_prio = EXPR_PRIORITY (expr) + EXPR_PRIORITY_ADJ (expr) + need_cycles;
   3891  1.1  mrg 
   3892  1.1  mrg       if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
   3893  1.1  mrg 	est_ticks_till_branch = MAX (est_ticks_till_branch,
   3894  1.1  mrg 				     new_prio);
   3895  1.1  mrg 
   3896  1.1  mrg       if (need_cycles > 0)
   3897  1.1  mrg         {
   3898  1.1  mrg           if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence))
   3899  1.1  mrg             {
   3900  1.1  mrg               int new_size = INSN_UID (insn) * 3 / 2;
   3901  1.1  mrg 
   3902  1.1  mrg               FENCE_READY_TICKS (fence)
   3903  1.1  mrg                 = (int *) xrecalloc (FENCE_READY_TICKS (fence),
   3904  1.1  mrg                                      new_size, FENCE_READY_TICKS_SIZE (fence),
   3905  1.1  mrg                                      sizeof (int));
   3906  1.1  mrg             }
   3907  1.1  mrg           FENCE_READY_TICKS (fence)[INSN_UID (insn)]
   3908  1.1  mrg             = FENCE_CYCLE (fence) + need_cycles;
   3909  1.1  mrg 
   3910  1.1  mrg           stalled++;
   3911  1.1  mrg           min_need_stall = (min_need_stall < 0
   3912  1.1  mrg                             ? need_cycles
   3913  1.1  mrg                             : MIN (min_need_stall, need_cycles));
   3914  1.1  mrg 
   3915  1.1  mrg           vec_av_set.unordered_remove (n);
   3916  1.1  mrg 
   3917  1.1  mrg           if (sched_verbose >= 4)
   3918  1.1  mrg             sel_print ("Expr %d is not ready yet until cycle %d\n",
   3919  1.1  mrg                        INSN_UID (insn),
   3920  1.1  mrg                        FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
   3921  1.1  mrg           continue;
   3922  1.1  mrg         }
   3923  1.1  mrg 
   3924  1.1  mrg       if (sched_verbose >= 4)
   3925  1.1  mrg         sel_print ("Expr %d is ok\n", INSN_UID (insn));
   3926  1.1  mrg       min_need_stall = 0;
   3927  1.1  mrg     }
   3928  1.1  mrg 
   3929  1.1  mrg   /* Clear SCHED_NEXT.  */
   3930  1.1  mrg   if (FENCE_SCHED_NEXT (fence))
   3931  1.1  mrg     {
   3932  1.1  mrg       gcc_assert (sched_next_worked == 1);
   3933  1.1  mrg       FENCE_SCHED_NEXT (fence) = NULL;
   3934  1.1  mrg     }
   3935  1.1  mrg 
   3936  1.1  mrg   /* No need to stall if this variable was not initialized.  */
   3937  1.1  mrg   if (min_need_stall < 0)
   3938  1.1  mrg     min_need_stall = 0;
   3939  1.1  mrg 
   3940  1.1  mrg   if (vec_av_set.is_empty ())
   3941  1.1  mrg     {
   3942  1.1  mrg       /* We need to set *pneed_stall here, because later we skip this code
   3943  1.1  mrg          when ready list is empty.  */
   3944  1.1  mrg       *pneed_stall = min_need_stall;
   3945  1.1  mrg       return false;
   3946  1.1  mrg     }
   3947  1.1  mrg   else
   3948  1.1  mrg     gcc_assert (min_need_stall == 0);
   3949  1.1  mrg 
   3950  1.1  mrg   /* Sort the vector.  */
   3951  1.1  mrg   vec_av_set.qsort (sel_rank_for_schedule);
   3952  1.1  mrg 
   3953  1.1  mrg   if (sched_verbose >= 4)
   3954  1.1  mrg     {
   3955  1.1  mrg       sel_print ("Total ready exprs: %d, stalled: %d\n",
   3956  1.1  mrg                  vec_av_set.length (), stalled);
   3957  1.1  mrg       sel_print ("Sorted av set (%d): ", vec_av_set.length ());
   3958  1.1  mrg       FOR_EACH_VEC_ELT (vec_av_set, n, expr)
   3959  1.1  mrg         dump_expr (expr);
   3960  1.1  mrg       sel_print ("\n");
   3961  1.1  mrg     }
   3962  1.1  mrg 
   3963  1.1  mrg   *pneed_stall = 0;
   3964  1.1  mrg   return true;
   3965  1.1  mrg }
   3966  1.1  mrg 
   3967  1.1  mrg /* Convert a vectored and sorted av set to the ready list that
   3968  1.1  mrg    the rest of the backend wants to see.  */
   3969  1.1  mrg static void
   3970  1.1  mrg convert_vec_av_set_to_ready (void)
   3971  1.1  mrg {
   3972  1.1  mrg   int n;
   3973  1.1  mrg   expr_t expr;
   3974  1.1  mrg 
   3975  1.1  mrg   /* Allocate and fill the ready list from the sorted vector.  */
   3976  1.1  mrg   ready.n_ready = vec_av_set.length ();
   3977  1.1  mrg   ready.first = ready.n_ready - 1;
   3978  1.1  mrg 
   3979  1.1  mrg   gcc_assert (ready.n_ready > 0);
   3980  1.1  mrg 
   3981  1.1  mrg   if (ready.n_ready > max_issue_size)
   3982  1.1  mrg     {
   3983  1.1  mrg       max_issue_size = ready.n_ready;
   3984  1.1  mrg       sched_extend_ready_list (ready.n_ready);
   3985  1.1  mrg     }
   3986  1.1  mrg 
   3987  1.1  mrg   FOR_EACH_VEC_ELT (vec_av_set, n, expr)
   3988  1.1  mrg     {
   3989  1.1  mrg       vinsn_t vi = EXPR_VINSN (expr);
   3990  1.1  mrg       insn_t insn = VINSN_INSN_RTX (vi);
   3991  1.1  mrg 
   3992  1.1  mrg       ready_try[n] = 0;
   3993  1.1  mrg       ready.vec[n] = insn;
   3994  1.1  mrg     }
   3995  1.1  mrg }
   3996  1.1  mrg 
   3997  1.1  mrg /* Initialize ready list from *AV_PTR for the max_issue () call.
   3998  1.1  mrg    If any unrecognizable insn found in *AV_PTR, return it (and skip
   3999  1.1  mrg    max_issue).  BND and FENCE are current boundary and fence,
   4000  1.1  mrg    respectively.  If we need to stall for some cycles before an expr
   4001  1.1  mrg    from *AV_PTR would become available, write this number to *PNEED_STALL.  */
   4002  1.1  mrg static expr_t
   4003  1.1  mrg fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
   4004  1.1  mrg                  int *pneed_stall)
   4005  1.1  mrg {
   4006  1.1  mrg   expr_t expr;
   4007  1.1  mrg 
   4008  1.1  mrg   /* We do not support multiple boundaries per fence.  */
   4009  1.1  mrg   gcc_assert (BLIST_NEXT (bnds) == NULL);
   4010  1.1  mrg 
   4011  1.1  mrg   /* Process expressions required special handling, i.e.  pipelined,
   4012  1.1  mrg      speculative and recog() < 0 expressions first.  */
   4013  1.1  mrg   process_pipelined_exprs (av_ptr);
   4014  1.1  mrg   process_spec_exprs (av_ptr);
   4015  1.1  mrg 
   4016  1.1  mrg   /* A USE could be scheduled immediately.  */
   4017  1.1  mrg   expr = process_use_exprs (av_ptr);
   4018  1.1  mrg   if (expr)
   4019  1.1  mrg     {
   4020  1.1  mrg       *pneed_stall = 0;
   4021  1.1  mrg       return expr;
   4022  1.1  mrg     }
   4023  1.1  mrg 
   4024  1.1  mrg   /* Turn the av set to a vector for sorting.  */
   4025  1.1  mrg   if (! fill_vec_av_set (*av_ptr, bnds, fence, pneed_stall))
   4026  1.1  mrg     {
   4027  1.1  mrg       ready.n_ready = 0;
   4028  1.1  mrg       return NULL;
   4029  1.1  mrg     }
   4030  1.1  mrg 
   4031  1.1  mrg   /* Build the final ready list.  */
   4032  1.1  mrg   convert_vec_av_set_to_ready ();
   4033  1.1  mrg   return NULL;
   4034  1.1  mrg }
   4035  1.1  mrg 
   4036  1.1  mrg /* Wrapper for dfa_new_cycle ().  Returns TRUE if cycle was advanced.  */
   4037  1.1  mrg static bool
   4038  1.1  mrg sel_dfa_new_cycle (insn_t insn, fence_t fence)
   4039  1.1  mrg {
   4040  1.1  mrg   int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)
   4041  1.1  mrg                              ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))
   4042  1.1  mrg                              : FENCE_CYCLE (fence) - 1;
   4043  1.1  mrg   bool res = false;
   4044  1.1  mrg   int sort_p = 0;
   4045  1.1  mrg 
   4046  1.1  mrg   if (!targetm.sched.dfa_new_cycle)
   4047  1.1  mrg     return false;
   4048  1.1  mrg 
   4049  1.1  mrg   memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
   4050  1.1  mrg 
   4051  1.1  mrg   while (!sort_p && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
   4052  1.1  mrg                                                  insn, last_scheduled_cycle,
   4053  1.1  mrg                                                  FENCE_CYCLE (fence), &sort_p))
   4054  1.1  mrg     {
   4055  1.1  mrg       memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
   4056  1.1  mrg       advance_one_cycle (fence);
   4057  1.1  mrg       memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
   4058  1.1  mrg       res = true;
   4059  1.1  mrg     }
   4060  1.1  mrg 
   4061  1.1  mrg   return res;
   4062  1.1  mrg }
   4063  1.1  mrg 
   4064  1.1  mrg /* Invoke reorder* target hooks on the ready list.  Return the number of insns
   4065  1.1  mrg    we can issue.  FENCE is the current fence.  */
   4066  1.1  mrg static int
   4067  1.1  mrg invoke_reorder_hooks (fence_t fence)
   4068  1.1  mrg {
   4069  1.1  mrg   int issue_more;
   4070  1.1  mrg   bool ran_hook = false;
   4071  1.1  mrg 
   4072  1.1  mrg   /* Call the reorder hook at the beginning of the cycle, and call
   4073  1.1  mrg      the reorder2 hook in the middle of the cycle.  */
   4074  1.1  mrg   if (FENCE_ISSUED_INSNS (fence) == 0)
   4075  1.1  mrg     {
   4076  1.1  mrg       if (targetm.sched.reorder
   4077  1.1  mrg           && !SCHED_GROUP_P (ready_element (&ready, 0))
   4078  1.1  mrg           && ready.n_ready > 1)
   4079  1.1  mrg         {
   4080  1.1  mrg           /* Don't give reorder the most prioritized insn as it can break
   4081  1.1  mrg              pipelining.  */
   4082  1.1  mrg           if (pipelining_p)
   4083  1.1  mrg             --ready.n_ready;
   4084  1.1  mrg 
   4085  1.1  mrg           issue_more
   4086  1.1  mrg             = targetm.sched.reorder (sched_dump, sched_verbose,
   4087  1.1  mrg                                      ready_lastpos (&ready),
   4088  1.1  mrg                                      &ready.n_ready, FENCE_CYCLE (fence));
   4089  1.1  mrg 
   4090  1.1  mrg           if (pipelining_p)
   4091  1.1  mrg             ++ready.n_ready;
   4092  1.1  mrg 
   4093  1.1  mrg           ran_hook = true;
   4094  1.1  mrg         }
   4095  1.1  mrg       else
   4096  1.1  mrg         /* Initialize can_issue_more for variable_issue.  */
   4097  1.1  mrg         issue_more = issue_rate;
   4098  1.1  mrg     }
   4099  1.1  mrg   else if (targetm.sched.reorder2
   4100  1.1  mrg            && !SCHED_GROUP_P (ready_element (&ready, 0)))
   4101  1.1  mrg     {
   4102  1.1  mrg       if (ready.n_ready == 1)
   4103  1.1  mrg         issue_more =
   4104  1.1  mrg           targetm.sched.reorder2 (sched_dump, sched_verbose,
   4105  1.1  mrg                                   ready_lastpos (&ready),
   4106  1.1  mrg                                   &ready.n_ready, FENCE_CYCLE (fence));
   4107  1.1  mrg       else
   4108  1.1  mrg         {
   4109  1.1  mrg           if (pipelining_p)
   4110  1.1  mrg             --ready.n_ready;
   4111  1.1  mrg 
   4112  1.1  mrg           issue_more =
   4113  1.1  mrg             targetm.sched.reorder2 (sched_dump, sched_verbose,
   4114  1.1  mrg                                     ready.n_ready
   4115  1.1  mrg                                     ? ready_lastpos (&ready) : NULL,
   4116  1.1  mrg                                     &ready.n_ready, FENCE_CYCLE (fence));
   4117  1.1  mrg 
   4118  1.1  mrg           if (pipelining_p)
   4119  1.1  mrg             ++ready.n_ready;
   4120  1.1  mrg         }
   4121  1.1  mrg 
   4122  1.1  mrg       ran_hook = true;
   4123  1.1  mrg     }
   4124  1.1  mrg   else
   4125  1.1  mrg     issue_more = FENCE_ISSUE_MORE (fence);
   4126  1.1  mrg 
   4127  1.1  mrg   /* Ensure that ready list and vec_av_set are in line with each other,
   4128  1.1  mrg      i.e. vec_av_set[i] == ready_element (&ready, i).  */
   4129  1.1  mrg   if (issue_more && ran_hook)
   4130  1.1  mrg     {
   4131  1.1  mrg       int i, j, n;
   4132  1.1  mrg       rtx_insn **arr = ready.vec;
   4133  1.1  mrg       expr_t *vec = vec_av_set.address ();
   4134  1.1  mrg 
   4135  1.1  mrg       for (i = 0, n = ready.n_ready; i < n; i++)
   4136  1.1  mrg         if (EXPR_INSN_RTX (vec[i]) != arr[i])
   4137  1.1  mrg           {
   4138  1.1  mrg             for (j = i; j < n; j++)
   4139  1.1  mrg               if (EXPR_INSN_RTX (vec[j]) == arr[i])
   4140  1.1  mrg                 break;
   4141  1.1  mrg             gcc_assert (j < n);
   4142  1.1  mrg 
   4143  1.1  mrg 	    std::swap (vec[i], vec[j]);
   4144  1.1  mrg           }
   4145  1.1  mrg     }
   4146  1.1  mrg 
   4147  1.1  mrg   return issue_more;
   4148  1.1  mrg }
   4149  1.1  mrg 
   4150  1.1  mrg /* Return an EXPR corresponding to INDEX element of ready list, if
   4151  1.1  mrg    FOLLOW_READY_ELEMENT is true (i.e., an expr of
   4152  1.1  mrg    ready_element (&ready, INDEX) will be returned), and to INDEX element of
   4153  1.1  mrg    ready.vec otherwise.  */
   4154  1.1  mrg static inline expr_t
   4155  1.1  mrg find_expr_for_ready (int index, bool follow_ready_element)
   4156  1.1  mrg {
   4157  1.1  mrg   expr_t expr;
   4158  1.1  mrg   int real_index;
   4159  1.1  mrg 
   4160  1.1  mrg   real_index = follow_ready_element ? ready.first - index : index;
   4161  1.1  mrg 
   4162  1.1  mrg   expr = vec_av_set[real_index];
   4163  1.1  mrg   gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr));
   4164  1.1  mrg 
   4165  1.1  mrg   return expr;
   4166  1.1  mrg }
   4167  1.1  mrg 
   4168  1.1  mrg /* Calculate insns worth trying via lookahead_guard hook.  Return a number
   4169  1.1  mrg    of such insns found.  */
   4170  1.1  mrg static int
   4171  1.1  mrg invoke_dfa_lookahead_guard (void)
   4172  1.1  mrg {
   4173  1.1  mrg   int i, n;
   4174  1.1  mrg   bool have_hook
   4175  1.1  mrg     = targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULL;
   4176  1.1  mrg 
   4177  1.1  mrg   if (sched_verbose >= 2)
   4178  1.1  mrg     sel_print ("ready after reorder: ");
   4179  1.1  mrg 
   4180  1.1  mrg   for (i = 0, n = 0; i < ready.n_ready; i++)
   4181  1.1  mrg     {
   4182  1.1  mrg       expr_t expr;
   4183  1.1  mrg       insn_t insn;
   4184  1.1  mrg       int r;
   4185  1.1  mrg 
   4186  1.1  mrg       /* In this loop insn is Ith element of the ready list given by
   4187  1.1  mrg          ready_element, not Ith element of ready.vec.  */
   4188  1.1  mrg       insn = ready_element (&ready, i);
   4189  1.1  mrg 
   4190  1.1  mrg       if (! have_hook || i == 0)
   4191  1.1  mrg         r = 0;
   4192  1.1  mrg       else
   4193  1.1  mrg         r = targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn, i);
   4194  1.1  mrg 
   4195  1.1  mrg       gcc_assert (INSN_CODE (insn) >= 0);
   4196  1.1  mrg 
   4197  1.1  mrg       /* Only insns with ready_try = 0 can get here
   4198  1.1  mrg          from fill_ready_list.  */
   4199  1.1  mrg       gcc_assert (ready_try [i] == 0);
   4200  1.1  mrg       ready_try[i] = r;
   4201  1.1  mrg       if (!r)
   4202  1.1  mrg         n++;
   4203  1.1  mrg 
   4204  1.1  mrg       expr = find_expr_for_ready (i, true);
   4205  1.1  mrg 
   4206  1.1  mrg       if (sched_verbose >= 2)
   4207  1.1  mrg         {
   4208  1.1  mrg           dump_vinsn (EXPR_VINSN (expr));
   4209  1.1  mrg           sel_print (":%d; ", ready_try[i]);
   4210  1.1  mrg         }
   4211  1.1  mrg     }
   4212  1.1  mrg 
   4213  1.1  mrg   if (sched_verbose >= 2)
   4214  1.1  mrg     sel_print ("\n");
   4215  1.1  mrg   return n;
   4216  1.1  mrg }
   4217  1.1  mrg 
   4218  1.1  mrg /* Calculate the number of privileged insns and return it.  */
   4219  1.1  mrg static int
   4220  1.1  mrg calculate_privileged_insns (void)
   4221  1.1  mrg {
   4222  1.1  mrg   expr_t cur_expr, min_spec_expr = NULL;
   4223  1.1  mrg   int privileged_n = 0, i;
   4224  1.1  mrg 
   4225  1.1  mrg   for (i = 0; i < ready.n_ready; i++)
   4226  1.1  mrg     {
   4227  1.1  mrg       if (ready_try[i])
   4228  1.1  mrg         continue;
   4229  1.1  mrg 
   4230  1.1  mrg       if (! min_spec_expr)
   4231  1.1  mrg 	min_spec_expr = find_expr_for_ready (i, true);
   4232  1.1  mrg 
   4233  1.1  mrg       cur_expr = find_expr_for_ready (i, true);
   4234  1.1  mrg 
   4235  1.1  mrg       if (EXPR_SPEC (cur_expr) > EXPR_SPEC (min_spec_expr))
   4236  1.1  mrg         break;
   4237  1.1  mrg 
   4238  1.1  mrg       ++privileged_n;
   4239  1.1  mrg     }
   4240  1.1  mrg 
   4241  1.1  mrg   if (i == ready.n_ready)
   4242  1.1  mrg     privileged_n = 0;
   4243  1.1  mrg 
   4244  1.1  mrg   if (sched_verbose >= 2)
   4245  1.1  mrg     sel_print ("privileged_n: %d insns with SPEC %d\n",
   4246  1.1  mrg                privileged_n, privileged_n ? EXPR_SPEC (min_spec_expr) : -1);
   4247  1.1  mrg   return privileged_n;
   4248  1.1  mrg }
   4249  1.1  mrg 
   4250  1.1  mrg /* Call the rest of the hooks after the choice was made.  Return
   4251  1.1  mrg    the number of insns that still can be issued given that the current
   4252  1.1  mrg    number is ISSUE_MORE.  FENCE and BEST_INSN are the current fence
   4253  1.1  mrg    and the insn chosen for scheduling, respectively.  */
   4254  1.1  mrg static int
   4255  1.1  mrg invoke_aftermath_hooks (fence_t fence, rtx_insn *best_insn, int issue_more)
   4256  1.1  mrg {
   4257  1.1  mrg   gcc_assert (INSN_P (best_insn));
   4258  1.1  mrg 
   4259  1.1  mrg   /* First, call dfa_new_cycle, and then variable_issue, if available.  */
   4260  1.1  mrg   sel_dfa_new_cycle (best_insn, fence);
   4261  1.1  mrg 
   4262  1.1  mrg   if (targetm.sched.variable_issue)
   4263  1.1  mrg     {
   4264  1.1  mrg       memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
   4265  1.1  mrg       issue_more =
   4266  1.1  mrg         targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn,
   4267  1.1  mrg                                       issue_more);
   4268  1.1  mrg       memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
   4269  1.1  mrg     }
   4270  1.1  mrg   else if (!DEBUG_INSN_P (best_insn)
   4271  1.1  mrg 	   && GET_CODE (PATTERN (best_insn)) != USE
   4272  1.1  mrg 	   && GET_CODE (PATTERN (best_insn)) != CLOBBER)
   4273  1.1  mrg     issue_more--;
   4274  1.1  mrg 
   4275  1.1  mrg   return issue_more;
   4276  1.1  mrg }
   4277  1.1  mrg 
   4278  1.1  mrg /* Estimate the cost of issuing INSN on DFA state STATE.  */
   4279  1.1  mrg static int
   4280  1.1  mrg estimate_insn_cost (rtx_insn *insn, state_t state)
   4281  1.1  mrg {
   4282  1.1  mrg   static state_t temp = NULL;
   4283  1.1  mrg   int cost;
   4284  1.1  mrg 
   4285  1.1  mrg   if (!temp)
   4286  1.1  mrg     temp = xmalloc (dfa_state_size);
   4287  1.1  mrg 
   4288  1.1  mrg   memcpy (temp, state, dfa_state_size);
   4289  1.1  mrg   cost = state_transition (temp, insn);
   4290  1.1  mrg 
   4291  1.1  mrg   if (cost < 0)
   4292  1.1  mrg     return 0;
   4293  1.1  mrg   else if (cost == 0)
   4294  1.1  mrg     return 1;
   4295  1.1  mrg   return cost;
   4296  1.1  mrg }
   4297  1.1  mrg 
   4298  1.1  mrg /* Return the cost of issuing EXPR on the FENCE as estimated by DFA.
   4299  1.1  mrg    This function properly handles ASMs, USEs etc.  */
   4300  1.1  mrg static int
   4301  1.1  mrg get_expr_cost (expr_t expr, fence_t fence)
   4302  1.1  mrg {
   4303  1.1  mrg   rtx_insn *insn = EXPR_INSN_RTX (expr);
   4304  1.1  mrg 
   4305  1.1  mrg   if (recog_memoized (insn) < 0)
   4306  1.1  mrg     {
   4307  1.1  mrg       if (!FENCE_STARTS_CYCLE_P (fence)
   4308  1.1  mrg 	  && INSN_ASM_P (insn))
   4309  1.1  mrg 	/* This is asm insn which is tryed to be issued on the
   4310  1.1  mrg 	   cycle not first.  Issue it on the next cycle.  */
   4311  1.1  mrg 	return 1;
   4312  1.1  mrg       else
   4313  1.1  mrg 	/* A USE insn, or something else we don't need to
   4314  1.1  mrg 	   understand.  We can't pass these directly to
   4315  1.1  mrg 	   state_transition because it will trigger a
   4316  1.1  mrg 	   fatal error for unrecognizable insns.  */
   4317  1.1  mrg 	return 0;
   4318  1.1  mrg     }
   4319  1.1  mrg   else
   4320  1.1  mrg     return estimate_insn_cost (insn, FENCE_STATE (fence));
   4321  1.1  mrg }
   4322  1.1  mrg 
   4323  1.1  mrg /* Find the best insn for scheduling, either via max_issue or just take
   4324  1.1  mrg    the most prioritized available.  */
   4325  1.1  mrg static int
   4326  1.1  mrg choose_best_insn (fence_t fence, int privileged_n, int *index)
   4327  1.1  mrg {
   4328  1.1  mrg   int can_issue = 0;
   4329  1.1  mrg 
   4330  1.1  mrg   if (dfa_lookahead > 0)
   4331  1.1  mrg     {
   4332  1.1  mrg       cycle_issued_insns = FENCE_ISSUED_INSNS (fence);
   4333  1.1  mrg       /* TODO: pass equivalent of first_cycle_insn_p to max_issue ().  */
   4334  1.1  mrg       can_issue = max_issue (&ready, privileged_n,
   4335  1.1  mrg                              FENCE_STATE (fence), true, index);
   4336  1.1  mrg       if (sched_verbose >= 2)
   4337  1.1  mrg         sel_print ("max_issue: we can issue %d insns, already did %d insns\n",
   4338  1.1  mrg                    can_issue, FENCE_ISSUED_INSNS (fence));
   4339  1.1  mrg     }
   4340  1.1  mrg   else
   4341  1.1  mrg     {
   4342  1.1  mrg       /* We can't use max_issue; just return the first available element.  */
   4343  1.1  mrg       int i;
   4344  1.1  mrg 
   4345  1.1  mrg       for (i = 0; i < ready.n_ready; i++)
   4346  1.1  mrg 	{
   4347  1.1  mrg 	  expr_t expr = find_expr_for_ready (i, true);
   4348  1.1  mrg 
   4349  1.1  mrg 	  if (get_expr_cost (expr, fence) < 1)
   4350  1.1  mrg 	    {
   4351  1.1  mrg 	      can_issue = can_issue_more;
   4352  1.1  mrg 	      *index = i;
   4353  1.1  mrg 
   4354  1.1  mrg 	      if (sched_verbose >= 2)
   4355  1.1  mrg 		sel_print ("using %dth insn from the ready list\n", i + 1);
   4356  1.1  mrg 
   4357  1.1  mrg 	      break;
   4358  1.1  mrg 	    }
   4359  1.1  mrg 	}
   4360  1.1  mrg 
   4361  1.1  mrg       if (i == ready.n_ready)
   4362  1.1  mrg 	{
   4363  1.1  mrg 	  can_issue = 0;
   4364  1.1  mrg 	  *index = -1;
   4365  1.1  mrg 	}
   4366  1.1  mrg     }
   4367  1.1  mrg 
   4368  1.1  mrg   return can_issue;
   4369  1.1  mrg }
   4370  1.1  mrg 
   4371  1.1  mrg /* Choose the best expr from *AV_VLIW_PTR and a suitable register for it.
   4372  1.1  mrg    BNDS and FENCE are current boundaries and scheduling fence respectively.
   4373  1.1  mrg    Return the expr found and NULL if nothing can be issued atm.
   4374  1.1  mrg    Write to PNEED_STALL the number of cycles to stall if no expr was found.  */
   4375  1.1  mrg static expr_t
   4376  1.1  mrg find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
   4377  1.1  mrg                 int *pneed_stall)
   4378  1.1  mrg {
   4379  1.1  mrg   expr_t best;
   4380  1.1  mrg 
   4381  1.1  mrg   /* Choose the best insn for scheduling via:
   4382  1.1  mrg      1) sorting the ready list based on priority;
   4383  1.1  mrg      2) calling the reorder hook;
   4384  1.1  mrg      3) calling max_issue.  */
   4385  1.1  mrg   best = fill_ready_list (av_vliw_ptr, bnds, fence, pneed_stall);
   4386  1.1  mrg   if (best == NULL && ready.n_ready > 0)
   4387  1.1  mrg     {
   4388  1.1  mrg       int privileged_n, index;
   4389  1.1  mrg 
   4390  1.1  mrg       can_issue_more = invoke_reorder_hooks (fence);
   4391  1.1  mrg       if (can_issue_more > 0)
   4392  1.1  mrg         {
   4393  1.1  mrg           /* Try choosing the best insn until we find one that is could be
   4394  1.1  mrg              scheduled due to liveness restrictions on its destination register.
   4395  1.1  mrg              In the future, we'd like to choose once and then just probe insns
   4396  1.1  mrg              in the order of their priority.  */
   4397  1.1  mrg           invoke_dfa_lookahead_guard ();
   4398  1.1  mrg           privileged_n = calculate_privileged_insns ();
   4399  1.1  mrg           can_issue_more = choose_best_insn (fence, privileged_n, &index);
   4400  1.1  mrg           if (can_issue_more)
   4401  1.1  mrg             best = find_expr_for_ready (index, true);
   4402  1.1  mrg         }
   4403  1.1  mrg       /* We had some available insns, so if we can't issue them,
   4404  1.1  mrg          we have a stall.  */
   4405  1.1  mrg       if (can_issue_more == 0)
   4406  1.1  mrg         {
   4407  1.1  mrg           best = NULL;
   4408  1.1  mrg           *pneed_stall = 1;
   4409  1.1  mrg         }
   4410  1.1  mrg     }
   4411  1.1  mrg 
   4412  1.1  mrg   if (best != NULL)
   4413  1.1  mrg     {
   4414  1.1  mrg       can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best),
   4415  1.1  mrg                                                can_issue_more);
   4416  1.1  mrg       if (targetm.sched.variable_issue
   4417  1.1  mrg 	  && can_issue_more == 0)
   4418  1.1  mrg         *pneed_stall = 1;
   4419  1.1  mrg     }
   4420  1.1  mrg 
   4421  1.1  mrg   if (sched_verbose >= 2)
   4422  1.1  mrg     {
   4423  1.1  mrg       if (best != NULL)
   4424  1.1  mrg         {
   4425  1.1  mrg           sel_print ("Best expression (vliw form): ");
   4426  1.1  mrg           dump_expr (best);
   4427  1.1  mrg           sel_print ("; cycle %d\n", FENCE_CYCLE (fence));
   4428  1.1  mrg         }
   4429  1.1  mrg       else
   4430  1.1  mrg         sel_print ("No best expr found!\n");
   4431  1.1  mrg     }
   4432  1.1  mrg 
   4433  1.1  mrg   return best;
   4434  1.1  mrg }
   4435  1.1  mrg 
   4436  1.1  mrg 
   4438  1.1  mrg /* Functions that implement the core of the scheduler.  */
   4439  1.1  mrg 
   4440  1.1  mrg 
   4441  1.1  mrg /* Emit an instruction from EXPR with SEQNO and VINSN after
   4442  1.1  mrg    PLACE_TO_INSERT.  */
   4443  1.1  mrg static insn_t
   4444  1.1  mrg emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
   4445  1.1  mrg                            insn_t place_to_insert)
   4446  1.1  mrg {
   4447  1.1  mrg   /* This assert fails when we have identical instructions
   4448  1.1  mrg      one of which dominates the other.  In this case move_op ()
   4449  1.1  mrg      finds the first instruction and doesn't search for second one.
   4450  1.1  mrg      The solution would be to compute av_set after the first found
   4451  1.1  mrg      insn and, if insn present in that set, continue searching.
   4452  1.1  mrg      For now we workaround this issue in move_op.  */
   4453  1.1  mrg   gcc_assert (!INSN_IN_STREAM_P (EXPR_INSN_RTX (expr)));
   4454  1.1  mrg 
   4455  1.1  mrg   if (EXPR_WAS_RENAMED (expr))
   4456  1.1  mrg     {
   4457  1.1  mrg       unsigned regno = expr_dest_regno (expr);
   4458  1.1  mrg 
   4459  1.1  mrg       if (HARD_REGISTER_NUM_P (regno))
   4460  1.1  mrg 	{
   4461  1.1  mrg 	  df_set_regs_ever_live (regno, true);
   4462  1.1  mrg 	  reg_rename_tick[regno] = ++reg_rename_this_tick;
   4463  1.1  mrg 	}
   4464  1.1  mrg     }
   4465  1.1  mrg 
   4466  1.1  mrg   return sel_gen_insn_from_expr_after (expr, vinsn, seqno,
   4467  1.1  mrg                                        place_to_insert);
   4468  1.1  mrg }
   4469  1.1  mrg 
   4470  1.1  mrg /* Return TRUE if BB can hold bookkeeping code.  */
   4471  1.1  mrg static bool
   4472  1.1  mrg block_valid_for_bookkeeping_p (basic_block bb)
   4473  1.1  mrg {
   4474  1.1  mrg   insn_t bb_end = BB_END (bb);
   4475  1.1  mrg 
   4476  1.1  mrg   if (!in_current_region_p (bb) || EDGE_COUNT (bb->succs) > 1)
   4477  1.1  mrg     return false;
   4478  1.1  mrg 
   4479  1.1  mrg   if (INSN_P (bb_end))
   4480  1.1  mrg     {
   4481  1.1  mrg       if (INSN_SCHED_TIMES (bb_end) > 0)
   4482  1.1  mrg 	return false;
   4483  1.1  mrg     }
   4484  1.1  mrg   else
   4485  1.1  mrg     gcc_assert (NOTE_INSN_BASIC_BLOCK_P (bb_end));
   4486  1.1  mrg 
   4487  1.1  mrg   return true;
   4488  1.1  mrg }
   4489  1.1  mrg 
   4490  1.1  mrg /* Attempt to find a block that can hold bookkeeping code for path(s) incoming
   4491  1.1  mrg    into E2->dest, except from E1->src (there may be a sequence of empty basic
   4492  1.1  mrg    blocks between E1->src and E2->dest).  Return found block, or NULL if new
   4493  1.1  mrg    one must be created.  If LAX holds, don't assume there is a simple path
   4494  1.1  mrg    from E1->src to E2->dest.  */
   4495  1.1  mrg static basic_block
   4496  1.1  mrg find_block_for_bookkeeping (edge e1, edge e2, bool lax)
   4497  1.1  mrg {
   4498  1.1  mrg   basic_block candidate_block = NULL;
   4499  1.1  mrg   edge e;
   4500  1.1  mrg 
   4501  1.1  mrg   /* Loop over edges from E1 to E2, inclusive.  */
   4502  1.1  mrg   for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun); e =
   4503  1.1  mrg        EDGE_SUCC (e->dest, 0))
   4504  1.1  mrg     {
   4505  1.1  mrg       if (EDGE_COUNT (e->dest->preds) == 2)
   4506  1.1  mrg 	{
   4507  1.1  mrg 	  if (candidate_block == NULL)
   4508  1.1  mrg 	    candidate_block = (EDGE_PRED (e->dest, 0) == e
   4509  1.1  mrg 			       ? EDGE_PRED (e->dest, 1)->src
   4510  1.1  mrg 			       : EDGE_PRED (e->dest, 0)->src);
   4511  1.1  mrg 	  else
   4512  1.1  mrg 	    /* Found additional edge leading to path from e1 to e2
   4513  1.1  mrg 	       from aside.  */
   4514  1.1  mrg 	    return NULL;
   4515  1.1  mrg 	}
   4516  1.1  mrg       else if (EDGE_COUNT (e->dest->preds) > 2)
   4517  1.1  mrg 	/* Several edges leading to path from e1 to e2 from aside.  */
   4518  1.1  mrg 	return NULL;
   4519  1.1  mrg 
   4520  1.1  mrg       if (e == e2)
   4521  1.1  mrg 	return ((!lax || candidate_block)
   4522  1.1  mrg 		&& block_valid_for_bookkeeping_p (candidate_block)
   4523  1.1  mrg 		? candidate_block
   4524  1.1  mrg 		: NULL);
   4525  1.1  mrg 
   4526  1.1  mrg       if (lax && EDGE_COUNT (e->dest->succs) != 1)
   4527  1.1  mrg 	return NULL;
   4528  1.1  mrg     }
   4529  1.1  mrg 
   4530  1.1  mrg   if (lax)
   4531  1.1  mrg     return NULL;
   4532  1.1  mrg 
   4533  1.1  mrg   gcc_unreachable ();
   4534  1.1  mrg }
   4535  1.1  mrg 
   4536  1.1  mrg /* Create new basic block for bookkeeping code for path(s) incoming into
   4537  1.1  mrg    E2->dest, except from E1->src.  Return created block.  */
   4538  1.1  mrg static basic_block
   4539  1.1  mrg create_block_for_bookkeeping (edge e1, edge e2)
   4540  1.1  mrg {
   4541  1.1  mrg   basic_block new_bb, bb = e2->dest;
   4542  1.1  mrg 
   4543  1.1  mrg   /* Check that we don't spoil the loop structure.  */
   4544  1.1  mrg   if (current_loop_nest)
   4545  1.1  mrg     {
   4546  1.1  mrg       basic_block latch = current_loop_nest->latch;
   4547  1.1  mrg 
   4548  1.1  mrg       /* We do not split header.  */
   4549  1.1  mrg       gcc_assert (e2->dest != current_loop_nest->header);
   4550  1.1  mrg 
   4551  1.1  mrg       /* We do not redirect the only edge to the latch block.  */
   4552  1.1  mrg       gcc_assert (e1->dest != latch
   4553  1.1  mrg 		  || !single_pred_p (latch)
   4554  1.1  mrg 		  || e1 != single_pred_edge (latch));
   4555  1.1  mrg     }
   4556  1.1  mrg 
   4557  1.1  mrg   /* Split BB to insert BOOK_INSN there.  */
   4558  1.1  mrg   new_bb = sched_split_block (bb, NULL);
   4559  1.1  mrg 
   4560  1.1  mrg   /* Move note_list from the upper bb.  */
   4561  1.1  mrg   gcc_assert (BB_NOTE_LIST (new_bb) == NULL_RTX);
   4562  1.1  mrg   BB_NOTE_LIST (new_bb) = BB_NOTE_LIST (bb);
   4563  1.1  mrg   BB_NOTE_LIST (bb) = NULL;
   4564  1.1  mrg 
   4565  1.1  mrg   gcc_assert (e2->dest == bb);
   4566  1.1  mrg 
   4567  1.1  mrg   /* Skip block for bookkeeping copy when leaving E1->src.  */
   4568  1.1  mrg   if (e1->flags & EDGE_FALLTHRU)
   4569  1.1  mrg     sel_redirect_edge_and_branch_force (e1, new_bb);
   4570  1.1  mrg   else
   4571  1.1  mrg     sel_redirect_edge_and_branch (e1, new_bb);
   4572  1.1  mrg 
   4573  1.1  mrg   gcc_assert (e1->dest == new_bb);
   4574  1.1  mrg   gcc_assert (sel_bb_empty_p (bb));
   4575  1.1  mrg 
   4576  1.1  mrg   /* To keep basic block numbers in sync between debug and non-debug
   4577  1.1  mrg      compilations, we have to rotate blocks here.  Consider that we
   4578  1.1  mrg      started from (a,b)->d, (c,d)->e, and d contained only debug
   4579  1.1  mrg      insns.  It would have been removed before if the debug insns
   4580  1.1  mrg      weren't there, so we'd have split e rather than d.  So what we do
   4581  1.1  mrg      now is to swap the block numbers of new_bb and
   4582  1.1  mrg      single_succ(new_bb) == e, so that the insns that were in e before
   4583  1.1  mrg      get the new block number.  */
   4584  1.1  mrg 
   4585  1.1  mrg   if (MAY_HAVE_DEBUG_INSNS)
   4586  1.1  mrg     {
   4587  1.1  mrg       basic_block succ;
   4588  1.1  mrg       insn_t insn = sel_bb_head (new_bb);
   4589  1.1  mrg       insn_t last;
   4590  1.1  mrg 
   4591  1.1  mrg       if (DEBUG_INSN_P (insn)
   4592  1.1  mrg 	  && single_succ_p (new_bb)
   4593  1.1  mrg 	  && (succ = single_succ (new_bb))
   4594  1.1  mrg 	  && succ != EXIT_BLOCK_PTR_FOR_FN (cfun)
   4595  1.1  mrg 	  && DEBUG_INSN_P ((last = sel_bb_end (new_bb))))
   4596  1.1  mrg 	{
   4597  1.1  mrg 	  while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn)))
   4598  1.1  mrg 	    insn = NEXT_INSN (insn);
   4599  1.1  mrg 
   4600  1.1  mrg 	  if (insn == last)
   4601  1.1  mrg 	    {
   4602  1.1  mrg 	      sel_global_bb_info_def gbi;
   4603  1.1  mrg 	      sel_region_bb_info_def rbi;
   4604  1.1  mrg 
   4605  1.1  mrg 	      if (sched_verbose >= 2)
   4606  1.1  mrg 		sel_print ("Swapping block ids %i and %i\n",
   4607  1.1  mrg 			   new_bb->index, succ->index);
   4608  1.1  mrg 
   4609  1.1  mrg 	      std::swap (new_bb->index, succ->index);
   4610  1.1  mrg 
   4611  1.1  mrg 	      SET_BASIC_BLOCK_FOR_FN (cfun, new_bb->index, new_bb);
   4612  1.1  mrg 	      SET_BASIC_BLOCK_FOR_FN (cfun, succ->index, succ);
   4613  1.1  mrg 
   4614  1.1  mrg 	      memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb), sizeof (gbi));
   4615  1.1  mrg 	      memcpy (SEL_GLOBAL_BB_INFO (new_bb), SEL_GLOBAL_BB_INFO (succ),
   4616  1.1  mrg 		      sizeof (gbi));
   4617  1.1  mrg 	      memcpy (SEL_GLOBAL_BB_INFO (succ), &gbi, sizeof (gbi));
   4618  1.1  mrg 
   4619  1.1  mrg 	      memcpy (&rbi, SEL_REGION_BB_INFO (new_bb), sizeof (rbi));
   4620  1.1  mrg 	      memcpy (SEL_REGION_BB_INFO (new_bb), SEL_REGION_BB_INFO (succ),
   4621  1.1  mrg 		      sizeof (rbi));
   4622  1.1  mrg 	      memcpy (SEL_REGION_BB_INFO (succ), &rbi, sizeof (rbi));
   4623  1.1  mrg 
   4624  1.1  mrg 	      std::swap (BLOCK_TO_BB (new_bb->index),
   4625  1.1  mrg 			 BLOCK_TO_BB (succ->index));
   4626  1.1  mrg 
   4627  1.1  mrg 	      std::swap (CONTAINING_RGN (new_bb->index),
   4628  1.1  mrg 			 CONTAINING_RGN (succ->index));
   4629  1.1  mrg 
   4630  1.1  mrg 	      for (int i = 0; i < current_nr_blocks; i++)
   4631  1.1  mrg 		if (BB_TO_BLOCK (i) == succ->index)
   4632  1.1  mrg 		  BB_TO_BLOCK (i) = new_bb->index;
   4633  1.1  mrg 		else if (BB_TO_BLOCK (i) == new_bb->index)
   4634  1.1  mrg 		  BB_TO_BLOCK (i) = succ->index;
   4635  1.1  mrg 
   4636  1.1  mrg 	      FOR_BB_INSNS (new_bb, insn)
   4637  1.1  mrg 		if (INSN_P (insn))
   4638  1.1  mrg 		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
   4639  1.1  mrg 
   4640  1.1  mrg 	      FOR_BB_INSNS (succ, insn)
   4641  1.1  mrg 		if (INSN_P (insn))
   4642  1.1  mrg 		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = succ->index;
   4643  1.1  mrg 
   4644  1.1  mrg 	      if (bitmap_clear_bit (code_motion_visited_blocks, new_bb->index))
   4645  1.1  mrg 		bitmap_set_bit (code_motion_visited_blocks, succ->index);
   4646  1.1  mrg 
   4647  1.1  mrg 	      gcc_assert (LABEL_P (BB_HEAD (new_bb))
   4648  1.1  mrg 			  && LABEL_P (BB_HEAD (succ)));
   4649  1.1  mrg 
   4650  1.1  mrg 	      if (sched_verbose >= 4)
   4651  1.1  mrg 		sel_print ("Swapping code labels %i and %i\n",
   4652  1.1  mrg 			   CODE_LABEL_NUMBER (BB_HEAD (new_bb)),
   4653  1.1  mrg 			   CODE_LABEL_NUMBER (BB_HEAD (succ)));
   4654  1.1  mrg 
   4655  1.1  mrg 	      std::swap (CODE_LABEL_NUMBER (BB_HEAD (new_bb)),
   4656  1.1  mrg 			 CODE_LABEL_NUMBER (BB_HEAD (succ)));
   4657  1.1  mrg 	    }
   4658  1.1  mrg 	}
   4659  1.1  mrg     }
   4660  1.1  mrg 
   4661  1.1  mrg   return bb;
   4662  1.1  mrg }
   4663  1.1  mrg 
   4664  1.1  mrg /* Return insn after which we must insert bookkeeping code for path(s) incoming
   4665  1.1  mrg    into E2->dest, except from E1->src.  If the returned insn immediately
   4666  1.1  mrg    precedes a fence, assign that fence to *FENCE_TO_REWIND.  */
   4667  1.1  mrg static insn_t
   4668  1.1  mrg find_place_for_bookkeeping (edge e1, edge e2, fence_t *fence_to_rewind)
   4669  1.1  mrg {
   4670  1.1  mrg   insn_t place_to_insert;
   4671  1.1  mrg   /* Find a basic block that can hold bookkeeping.  If it can be found, do not
   4672  1.1  mrg      create new basic block, but insert bookkeeping there.  */
   4673  1.1  mrg   basic_block book_block = find_block_for_bookkeeping (e1, e2, FALSE);
   4674  1.1  mrg 
   4675  1.1  mrg   if (book_block)
   4676  1.1  mrg     {
   4677  1.1  mrg       place_to_insert = BB_END (book_block);
   4678  1.1  mrg 
   4679  1.1  mrg       /* Don't use a block containing only debug insns for
   4680  1.1  mrg 	 bookkeeping, this causes scheduling differences between debug
   4681  1.1  mrg 	 and non-debug compilations, for the block would have been
   4682  1.1  mrg 	 removed already.  */
   4683  1.1  mrg       if (DEBUG_INSN_P (place_to_insert))
   4684  1.1  mrg 	{
   4685  1.1  mrg 	  rtx_insn *insn = sel_bb_head (book_block);
   4686  1.1  mrg 
   4687  1.1  mrg 	  while (insn != place_to_insert &&
   4688  1.1  mrg 		 (DEBUG_INSN_P (insn) || NOTE_P (insn)))
   4689  1.1  mrg 	    insn = NEXT_INSN (insn);
   4690  1.1  mrg 
   4691  1.1  mrg 	  if (insn == place_to_insert)
   4692  1.1  mrg 	    book_block = NULL;
   4693  1.1  mrg 	}
   4694  1.1  mrg     }
   4695  1.1  mrg 
   4696  1.1  mrg   if (!book_block)
   4697  1.1  mrg     {
   4698  1.1  mrg       book_block = create_block_for_bookkeeping (e1, e2);
   4699  1.1  mrg       place_to_insert = BB_END (book_block);
   4700  1.1  mrg       if (sched_verbose >= 9)
   4701  1.1  mrg 	sel_print ("New block is %i, split from bookkeeping block %i\n",
   4702  1.1  mrg 		   EDGE_SUCC (book_block, 0)->dest->index, book_block->index);
   4703  1.1  mrg     }
   4704  1.1  mrg   else
   4705  1.1  mrg     {
   4706  1.1  mrg       if (sched_verbose >= 9)
   4707  1.1  mrg 	sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index);
   4708  1.1  mrg     }
   4709  1.1  mrg 
   4710  1.1  mrg   *fence_to_rewind = NULL;
   4711  1.1  mrg   /* If basic block ends with a jump, insert bookkeeping code right before it.
   4712  1.1  mrg      Notice if we are crossing a fence when taking PREV_INSN.  */
   4713  1.1  mrg   if (INSN_P (place_to_insert) && control_flow_insn_p (place_to_insert))
   4714  1.1  mrg     {
   4715  1.1  mrg       *fence_to_rewind = flist_lookup (fences, place_to_insert);
   4716  1.1  mrg       place_to_insert = PREV_INSN (place_to_insert);
   4717  1.1  mrg     }
   4718  1.1  mrg 
   4719  1.1  mrg   return place_to_insert;
   4720  1.1  mrg }
   4721  1.1  mrg 
   4722  1.1  mrg /* Find a proper seqno for bookkeeing insn inserted at PLACE_TO_INSERT
   4723  1.1  mrg    for JOIN_POINT.   */
   4724  1.1  mrg static int
   4725  1.1  mrg find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
   4726  1.1  mrg {
   4727  1.1  mrg   int seqno;
   4728  1.1  mrg 
   4729  1.1  mrg   /* Check if we are about to insert bookkeeping copy before a jump, and use
   4730  1.1  mrg      jump's seqno for the copy; otherwise, use JOIN_POINT's seqno.  */
   4731  1.1  mrg   rtx_insn *next = NEXT_INSN (place_to_insert);
   4732  1.1  mrg   if (INSN_P (next)
   4733  1.1  mrg       && JUMP_P (next)
   4734  1.1  mrg       && BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert))
   4735  1.1  mrg     {
   4736  1.1  mrg       gcc_assert (INSN_SCHED_TIMES (next) == 0);
   4737  1.1  mrg       seqno = INSN_SEQNO (next);
   4738  1.1  mrg     }
   4739  1.1  mrg   else if (INSN_SEQNO (join_point) > 0)
   4740  1.1  mrg     seqno = INSN_SEQNO (join_point);
   4741  1.1  mrg   else
   4742  1.1  mrg     {
   4743  1.1  mrg       seqno = get_seqno_by_preds (place_to_insert);
   4744  1.1  mrg 
   4745  1.1  mrg       /* Sometimes the fences can move in such a way that there will be
   4746  1.1  mrg          no instructions with positive seqno around this bookkeeping.
   4747  1.1  mrg          This means that there will be no way to get to it by a regular
   4748  1.1  mrg          fence movement.  Never mind because we pick up such pieces for
   4749  1.1  mrg          rescheduling anyways, so any positive value will do for now.  */
   4750  1.1  mrg       if (seqno < 0)
   4751  1.1  mrg         {
   4752  1.1  mrg           gcc_assert (pipelining_p);
   4753  1.1  mrg           seqno = 1;
   4754  1.1  mrg         }
   4755  1.1  mrg     }
   4756  1.1  mrg 
   4757  1.1  mrg   gcc_assert (seqno > 0);
   4758  1.1  mrg   return seqno;
   4759  1.1  mrg }
   4760  1.1  mrg 
   4761  1.1  mrg /* Insert bookkeeping copy of C_EXPS's insn after PLACE_TO_INSERT, assigning
   4762  1.1  mrg    NEW_SEQNO to it.  Return created insn.  */
   4763  1.1  mrg static insn_t
   4764  1.1  mrg emit_bookkeeping_insn (insn_t place_to_insert, expr_t c_expr, int new_seqno)
   4765  1.1  mrg {
   4766  1.1  mrg   rtx_insn *new_insn_rtx = create_copy_of_insn_rtx (EXPR_INSN_RTX (c_expr));
   4767  1.1  mrg 
   4768  1.1  mrg   vinsn_t new_vinsn
   4769  1.1  mrg     = create_vinsn_from_insn_rtx (new_insn_rtx,
   4770  1.1  mrg 				  VINSN_UNIQUE_P (EXPR_VINSN (c_expr)));
   4771  1.1  mrg 
   4772  1.1  mrg   insn_t new_insn = emit_insn_from_expr_after (c_expr, new_vinsn, new_seqno,
   4773  1.1  mrg 					       place_to_insert);
   4774  1.1  mrg 
   4775  1.1  mrg   INSN_SCHED_TIMES (new_insn) = 0;
   4776  1.1  mrg   bitmap_set_bit (current_copies, INSN_UID (new_insn));
   4777  1.1  mrg 
   4778  1.1  mrg   return new_insn;
   4779  1.1  mrg }
   4780  1.1  mrg 
   4781  1.1  mrg /* Generate a bookkeeping copy of C_EXPR's insn for path(s) incoming into to
   4782  1.1  mrg    E2->dest, except from E1->src (there may be a sequence of empty blocks
   4783  1.1  mrg    between E1->src and E2->dest).  Return block containing the copy.
   4784  1.1  mrg    All scheduler data is initialized for the newly created insn.  */
   4785  1.1  mrg static basic_block
   4786  1.1  mrg generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2)
   4787  1.1  mrg {
   4788  1.1  mrg   insn_t join_point, place_to_insert, new_insn;
   4789  1.1  mrg   int new_seqno;
   4790  1.1  mrg   bool need_to_exchange_data_sets;
   4791  1.1  mrg   fence_t fence_to_rewind;
   4792  1.1  mrg 
   4793  1.1  mrg   if (sched_verbose >= 4)
   4794  1.1  mrg     sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index,
   4795  1.1  mrg 	       e2->dest->index);
   4796  1.1  mrg 
   4797  1.1  mrg   join_point = sel_bb_head (e2->dest);
   4798  1.1  mrg   place_to_insert = find_place_for_bookkeeping (e1, e2, &fence_to_rewind);
   4799  1.1  mrg   new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point);
   4800  1.1  mrg   need_to_exchange_data_sets
   4801  1.1  mrg     = sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert));
   4802  1.1  mrg 
   4803  1.1  mrg   new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno);
   4804  1.1  mrg 
   4805  1.1  mrg   if (fence_to_rewind)
   4806  1.1  mrg     FENCE_INSN (fence_to_rewind) = new_insn;
   4807  1.1  mrg 
   4808  1.1  mrg   /* When inserting bookkeeping insn in new block, av sets should be
   4809  1.1  mrg      following: old basic block (that now holds bookkeeping) data sets are
   4810  1.1  mrg      the same as was before generation of bookkeeping, and new basic block
   4811  1.1  mrg      (that now hold all other insns of old basic block) data sets are
   4812  1.1  mrg      invalid.  So exchange data sets for these basic blocks as sel_split_block
   4813  1.1  mrg      mistakenly exchanges them in this case.  Cannot do it earlier because
   4814  1.1  mrg      when single instruction is added to new basic block it should hold NULL
   4815  1.1  mrg      lv_set.  */
   4816  1.1  mrg   if (need_to_exchange_data_sets)
   4817  1.1  mrg     exchange_data_sets (BLOCK_FOR_INSN (new_insn),
   4818  1.1  mrg 			BLOCK_FOR_INSN (join_point));
   4819  1.1  mrg 
   4820  1.1  mrg   stat_bookkeeping_copies++;
   4821  1.1  mrg   return BLOCK_FOR_INSN (new_insn);
   4822  1.1  mrg }
   4823  1.1  mrg 
   4824  1.1  mrg /* Remove from AV_PTR all insns that may need bookkeeping when scheduling
   4825  1.1  mrg    on FENCE, but we are unable to copy them.  */
   4826  1.1  mrg static void
   4827  1.1  mrg remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
   4828  1.1  mrg {
   4829  1.1  mrg   expr_t expr;
   4830  1.1  mrg   av_set_iterator i;
   4831  1.1  mrg 
   4832  1.1  mrg   /*  An expression does not need bookkeeping if it is available on all paths
   4833  1.1  mrg       from current block to original block and current block dominates
   4834  1.1  mrg       original block.  We check availability on all paths by examining
   4835  1.1  mrg       EXPR_SPEC; this is not equivalent, because it may be positive even
   4836  1.1  mrg       if expr is available on all paths (but if expr is not available on
   4837  1.1  mrg       any path, EXPR_SPEC will be positive).  */
   4838  1.1  mrg 
   4839  1.1  mrg   FOR_EACH_EXPR_1 (expr, i, av_ptr)
   4840  1.1  mrg     {
   4841  1.1  mrg       if (!control_flow_insn_p (EXPR_INSN_RTX (expr))
   4842  1.1  mrg 	  && (!bookkeeping_p || VINSN_UNIQUE_P (EXPR_VINSN (expr)))
   4843  1.1  mrg 	  && (EXPR_SPEC (expr)
   4844  1.1  mrg 	      || !EXPR_ORIG_BB_INDEX (expr)
   4845  1.1  mrg 	      || !dominated_by_p (CDI_DOMINATORS,
   4846  1.1  mrg 				  BASIC_BLOCK_FOR_FN (cfun,
   4847  1.1  mrg 						      EXPR_ORIG_BB_INDEX (expr)),
   4848  1.1  mrg 				  BLOCK_FOR_INSN (FENCE_INSN (fence)))))
   4849  1.1  mrg 	{
   4850  1.1  mrg           if (sched_verbose >= 4)
   4851  1.1  mrg             sel_print ("Expr %d removed because it would need bookkeeping, which "
   4852  1.1  mrg                        "cannot be created\n", INSN_UID (EXPR_INSN_RTX (expr)));
   4853  1.1  mrg 	  av_set_iter_remove (&i);
   4854  1.1  mrg 	}
   4855  1.1  mrg     }
   4856  1.1  mrg }
   4857  1.1  mrg 
   4858  1.1  mrg /* Moving conditional jump through some instructions.
   4859  1.1  mrg 
   4860  1.1  mrg    Consider example:
   4861  1.1  mrg 
   4862  1.1  mrg        ...                     <- current scheduling point
   4863  1.1  mrg        NOTE BASIC BLOCK:       <- bb header
   4864  1.1  mrg        (p8)  add r14=r14+0x9;;
   4865  1.1  mrg        (p8)  mov [r14]=r23
   4866  1.1  mrg        (!p8) jump L1;;
   4867  1.1  mrg        NOTE BASIC BLOCK:
   4868  1.1  mrg        ...
   4869  1.1  mrg 
   4870  1.1  mrg    We can schedule jump one cycle earlier, than mov, because they cannot be
   4871  1.1  mrg    executed together as their predicates are mutually exclusive.
   4872  1.1  mrg 
   4873  1.1  mrg    This is done in this way: first, new fallthrough basic block is created
   4874  1.1  mrg    after jump (it is always can be done, because there already should be a
   4875  1.1  mrg    fallthrough block, where control flow goes in case of predicate being true -
   4876  1.1  mrg    in our example; otherwise there should be a dependence between those
   4877  1.1  mrg    instructions and jump and we cannot schedule jump right now);
   4878  1.1  mrg    next, all instructions between jump and current scheduling point are moved
   4879  1.1  mrg    to this new block.  And the result is this:
   4880  1.1  mrg 
   4881  1.1  mrg       NOTE BASIC BLOCK:
   4882  1.1  mrg       (!p8) jump L1           <- current scheduling point
   4883  1.1  mrg       NOTE BASIC BLOCK:       <- bb header
   4884  1.1  mrg       (p8)  add r14=r14+0x9;;
   4885  1.1  mrg       (p8)  mov [r14]=r23
   4886  1.1  mrg       NOTE BASIC BLOCK:
   4887  1.1  mrg       ...
   4888  1.1  mrg */
   4889  1.1  mrg static void
   4890  1.1  mrg move_cond_jump (rtx_insn *insn, bnd_t bnd)
   4891  1.1  mrg {
   4892  1.1  mrg   edge ft_edge;
   4893  1.1  mrg   basic_block block_from, block_next, block_new, block_bnd, bb;
   4894  1.1  mrg   rtx_insn *next, *prev, *link, *head;
   4895  1.1  mrg 
   4896  1.1  mrg   block_from = BLOCK_FOR_INSN (insn);
   4897  1.1  mrg   block_bnd = BLOCK_FOR_INSN (BND_TO (bnd));
   4898  1.1  mrg   prev = BND_TO (bnd);
   4899  1.1  mrg 
   4900  1.1  mrg   /* Moving of jump should not cross any other jumps or beginnings of new
   4901  1.1  mrg      basic blocks.  The only exception is when we move a jump through
   4902  1.1  mrg      mutually exclusive insns along fallthru edges.  */
   4903  1.1  mrg   if (flag_checking && block_from != block_bnd)
   4904  1.1  mrg     {
   4905  1.1  mrg       bb = block_from;
   4906  1.1  mrg       for (link = PREV_INSN (insn); link != PREV_INSN (prev);
   4907  1.1  mrg            link = PREV_INSN (link))
   4908  1.1  mrg         {
   4909  1.1  mrg           if (INSN_P (link))
   4910  1.1  mrg             gcc_assert (sched_insns_conditions_mutex_p (insn, link));
   4911  1.1  mrg           if (BLOCK_FOR_INSN (link) && BLOCK_FOR_INSN (link) != bb)
   4912  1.1  mrg             {
   4913  1.1  mrg               gcc_assert (single_pred (bb) == BLOCK_FOR_INSN (link));
   4914  1.1  mrg               bb = BLOCK_FOR_INSN (link);
   4915  1.1  mrg             }
   4916  1.1  mrg         }
   4917  1.1  mrg     }
   4918  1.1  mrg 
   4919  1.1  mrg   /* Jump is moved to the boundary.  */
   4920  1.1  mrg   next = PREV_INSN (insn);
   4921  1.1  mrg   BND_TO (bnd) = insn;
   4922  1.1  mrg 
   4923  1.1  mrg   ft_edge = find_fallthru_edge_from (block_from);
   4924  1.1  mrg   block_next = ft_edge->dest;
   4925  1.1  mrg   /* There must be a fallthrough block (or where should go
   4926  1.1  mrg   control flow in case of false jump predicate otherwise?).  */
   4927  1.1  mrg   gcc_assert (block_next);
   4928  1.1  mrg 
   4929  1.1  mrg   /* Create new empty basic block after source block.  */
   4930  1.1  mrg   block_new = sel_split_edge (ft_edge);
   4931  1.1  mrg   gcc_assert (block_new->next_bb == block_next
   4932  1.1  mrg               && block_from->next_bb == block_new);
   4933  1.1  mrg 
   4934  1.1  mrg   /* Move all instructions except INSN to BLOCK_NEW.  */
   4935  1.1  mrg   bb = block_bnd;
   4936  1.1  mrg   head = BB_HEAD (block_new);
   4937  1.1  mrg   while (bb != block_from->next_bb)
   4938  1.1  mrg     {
   4939  1.1  mrg       rtx_insn *from, *to;
   4940  1.1  mrg       from = bb == block_bnd ? prev : sel_bb_head (bb);
   4941  1.1  mrg       to = bb == block_from ? next : sel_bb_end (bb);
   4942  1.1  mrg 
   4943  1.1  mrg       /* The jump being moved can be the first insn in the block.
   4944  1.1  mrg          In this case we don't have to move anything in this block.  */
   4945  1.1  mrg       if (NEXT_INSN (to) != from)
   4946  1.1  mrg         {
   4947  1.1  mrg           reorder_insns (from, to, head);
   4948  1.1  mrg 
   4949  1.1  mrg           for (link = to; link != head; link = PREV_INSN (link))
   4950  1.1  mrg             EXPR_ORIG_BB_INDEX (INSN_EXPR (link)) = block_new->index;
   4951  1.1  mrg           head = to;
   4952  1.1  mrg         }
   4953  1.1  mrg 
   4954  1.1  mrg       /* Cleanup possibly empty blocks left.  */
   4955  1.1  mrg       block_next = bb->next_bb;
   4956  1.1  mrg       if (bb != block_from)
   4957  1.1  mrg 	tidy_control_flow (bb, false);
   4958  1.1  mrg       bb = block_next;
   4959  1.1  mrg     }
   4960  1.1  mrg 
   4961  1.1  mrg   /* Assert there is no jump to BLOCK_NEW, only fallthrough edge.  */
   4962  1.1  mrg   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (block_new)));
   4963  1.1  mrg 
   4964  1.1  mrg   gcc_assert (!sel_bb_empty_p (block_from)
   4965  1.1  mrg               && !sel_bb_empty_p (block_new));
   4966  1.1  mrg 
   4967  1.1  mrg   /* Update data sets for BLOCK_NEW to represent that INSN and
   4968  1.1  mrg      instructions from the other branch of INSN is no longer
   4969  1.1  mrg      available at BLOCK_NEW.  */
   4970  1.1  mrg   BB_AV_LEVEL (block_new) = global_level;
   4971  1.1  mrg   gcc_assert (BB_LV_SET (block_new) == NULL);
   4972  1.1  mrg   BB_LV_SET (block_new) = get_clear_regset_from_pool ();
   4973  1.1  mrg   update_data_sets (sel_bb_head (block_new));
   4974  1.1  mrg 
   4975  1.1  mrg   /* INSN is a new basic block header - so prepare its data
   4976  1.1  mrg      structures and update availability and liveness sets.  */
   4977  1.1  mrg   update_data_sets (insn);
   4978  1.1  mrg 
   4979  1.1  mrg   if (sched_verbose >= 4)
   4980  1.1  mrg     sel_print ("Moving jump %d\n", INSN_UID (insn));
   4981  1.1  mrg }
   4982  1.1  mrg 
   4983  1.1  mrg /* Remove nops generated during move_op for preventing removal of empty
   4984  1.1  mrg    basic blocks.  */
   4985  1.1  mrg static void
   4986  1.1  mrg remove_temp_moveop_nops (bool full_tidying)
   4987  1.1  mrg {
   4988  1.1  mrg   int i;
   4989  1.1  mrg   insn_t insn;
   4990  1.1  mrg 
   4991  1.1  mrg   FOR_EACH_VEC_ELT (vec_temp_moveop_nops, i, insn)
   4992  1.1  mrg     {
   4993  1.1  mrg       gcc_assert (INSN_NOP_P (insn));
   4994  1.1  mrg       return_nop_to_pool (insn, full_tidying);
   4995  1.1  mrg     }
   4996  1.1  mrg 
   4997  1.1  mrg   /* Empty the vector.  */
   4998  1.1  mrg   if (vec_temp_moveop_nops.length () > 0)
   4999  1.1  mrg     vec_temp_moveop_nops.block_remove (0, vec_temp_moveop_nops.length ());
   5000  1.1  mrg }
   5001  1.1  mrg 
   5002  1.1  mrg /* Records the maximal UID before moving up an instruction.  Used for
   5003  1.1  mrg    distinguishing between bookkeeping copies and original insns.  */
   5004  1.1  mrg static int max_uid_before_move_op = 0;
   5005  1.1  mrg 
   5006  1.1  mrg /* When true, we're always scheduling next insn on the already scheduled code
   5007  1.1  mrg    to get the right insn data for the following bundling or other passes.  */
   5008  1.1  mrg static int force_next_insn = 0;
   5009  1.1  mrg 
   5010  1.1  mrg /* Remove from AV_VLIW_P all instructions but next when debug counter
   5011  1.1  mrg    tells us so.  Next instruction is fetched from BNDS.  */
   5012  1.1  mrg static void
   5013  1.1  mrg remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
   5014  1.1  mrg {
   5015  1.1  mrg   if (! dbg_cnt (sel_sched_insn_cnt) || force_next_insn)
   5016  1.1  mrg     /* Leave only the next insn in av_vliw.  */
   5017  1.1  mrg     {
   5018  1.1  mrg       av_set_iterator av_it;
   5019  1.1  mrg       expr_t expr;
   5020  1.1  mrg       bnd_t bnd = BLIST_BND (bnds);
   5021  1.1  mrg       insn_t next = BND_TO (bnd);
   5022  1.1  mrg 
   5023  1.1  mrg       gcc_assert (BLIST_NEXT (bnds) == NULL);
   5024  1.1  mrg 
   5025  1.1  mrg       FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p)
   5026  1.1  mrg         if (EXPR_INSN_RTX (expr) != next)
   5027  1.1  mrg           av_set_iter_remove (&av_it);
   5028  1.1  mrg     }
   5029  1.1  mrg }
   5030  1.1  mrg 
   5031  1.1  mrg /* Compute available instructions on BNDS.  FENCE is the current fence.  Write
   5032  1.1  mrg    the computed set to *AV_VLIW_P.  */
   5033  1.1  mrg static void
   5034  1.1  mrg compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
   5035  1.1  mrg {
   5036  1.1  mrg   if (sched_verbose >= 2)
   5037  1.1  mrg     {
   5038  1.1  mrg       sel_print ("Boundaries: ");
   5039  1.1  mrg       dump_blist (bnds);
   5040  1.1  mrg       sel_print ("\n");
   5041  1.1  mrg     }
   5042  1.1  mrg 
   5043  1.1  mrg   for (; bnds; bnds = BLIST_NEXT (bnds))
   5044  1.1  mrg     {
   5045  1.1  mrg       bnd_t bnd = BLIST_BND (bnds);
   5046  1.1  mrg       av_set_t av1_copy;
   5047  1.1  mrg       insn_t bnd_to = BND_TO (bnd);
   5048  1.1  mrg 
   5049  1.1  mrg       /* Rewind BND->TO to the basic block header in case some bookkeeping
   5050  1.1  mrg          instructions were inserted before BND->TO and it needs to be
   5051  1.1  mrg          adjusted.  */
   5052  1.1  mrg       if (sel_bb_head_p (bnd_to))
   5053  1.1  mrg         gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0);
   5054  1.1  mrg       else
   5055  1.1  mrg         while (INSN_SCHED_TIMES (PREV_INSN (bnd_to)) == 0)
   5056  1.1  mrg           {
   5057  1.1  mrg             bnd_to = PREV_INSN (bnd_to);
   5058  1.1  mrg             if (sel_bb_head_p (bnd_to))
   5059  1.1  mrg               break;
   5060  1.1  mrg           }
   5061  1.1  mrg 
   5062  1.1  mrg       if (BND_TO (bnd) != bnd_to)
   5063  1.1  mrg 	{
   5064  1.1  mrg   	  gcc_assert (FENCE_INSN (fence) == BND_TO (bnd));
   5065  1.1  mrg 	  FENCE_INSN (fence) = bnd_to;
   5066  1.1  mrg 	  BND_TO (bnd) = bnd_to;
   5067  1.1  mrg 	}
   5068  1.1  mrg 
   5069  1.1  mrg       av_set_clear (&BND_AV (bnd));
   5070  1.1  mrg       BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
   5071  1.1  mrg 
   5072  1.1  mrg       av_set_clear (&BND_AV1 (bnd));
   5073  1.1  mrg       BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
   5074  1.1  mrg 
   5075  1.1  mrg       moveup_set_inside_insn_group (&BND_AV1 (bnd), NULL);
   5076  1.1  mrg 
   5077  1.1  mrg       av1_copy = av_set_copy (BND_AV1 (bnd));
   5078  1.1  mrg       av_set_union_and_clear (av_vliw_p, &av1_copy, NULL);
   5079  1.1  mrg     }
   5080  1.1  mrg 
   5081  1.1  mrg   if (sched_verbose >= 2)
   5082  1.1  mrg     {
   5083  1.1  mrg       sel_print ("Available exprs (vliw form): ");
   5084  1.1  mrg       dump_av_set (*av_vliw_p);
   5085  1.1  mrg       sel_print ("\n");
   5086  1.1  mrg     }
   5087  1.1  mrg }
   5088  1.1  mrg 
   5089  1.1  mrg /* Calculate the sequential av set on BND corresponding to the EXPR_VLIW
   5090  1.1  mrg    expression.  When FOR_MOVEOP is true, also replace the register of
   5091  1.1  mrg    expressions found with the register from EXPR_VLIW.  */
   5092  1.1  mrg static av_set_t
   5093  1.1  mrg find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
   5094  1.1  mrg {
   5095  1.1  mrg   av_set_t expr_seq = NULL;
   5096  1.1  mrg   expr_t expr;
   5097  1.1  mrg   av_set_iterator i;
   5098  1.1  mrg 
   5099  1.1  mrg   FOR_EACH_EXPR (expr, i, BND_AV (bnd))
   5100  1.1  mrg     {
   5101  1.1  mrg       if (equal_after_moveup_path_p (expr, NULL, expr_vliw))
   5102  1.1  mrg         {
   5103  1.1  mrg           if (for_moveop)
   5104  1.1  mrg             {
   5105  1.1  mrg               /* The sequential expression has the right form to pass
   5106  1.1  mrg                  to move_op except when renaming happened.  Put the
   5107  1.1  mrg                  correct register in EXPR then.  */
   5108  1.1  mrg               if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr)))
   5109  1.1  mrg 		{
   5110  1.1  mrg                   if (expr_dest_regno (expr) != expr_dest_regno (expr_vliw))
   5111  1.1  mrg 		    {
   5112  1.1  mrg 		      replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
   5113  1.1  mrg 		      stat_renamed_scheduled++;
   5114  1.1  mrg 		    }
   5115  1.1  mrg 		  /* Also put the correct TARGET_AVAILABLE bit on the expr.
   5116  1.1  mrg                      This is needed when renaming came up with original
   5117  1.1  mrg                      register.  */
   5118  1.1  mrg                   else if (EXPR_TARGET_AVAILABLE (expr)
   5119  1.1  mrg                            != EXPR_TARGET_AVAILABLE (expr_vliw))
   5120  1.1  mrg 		    {
   5121  1.1  mrg 		      gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1);
   5122  1.1  mrg 		      EXPR_TARGET_AVAILABLE (expr) = 1;
   5123  1.1  mrg 		    }
   5124  1.1  mrg 		}
   5125  1.1  mrg               if (EXPR_WAS_SUBSTITUTED (expr))
   5126  1.1  mrg                 stat_substitutions_total++;
   5127  1.1  mrg             }
   5128  1.1  mrg 
   5129  1.1  mrg           av_set_add (&expr_seq, expr);
   5130  1.1  mrg 
   5131  1.1  mrg           /* With substitution inside insn group, it is possible
   5132  1.1  mrg              that more than one expression in expr_seq will correspond
   5133  1.1  mrg              to expr_vliw.  In this case, choose one as the attempt to
   5134  1.1  mrg              move both leads to miscompiles.  */
   5135  1.1  mrg           break;
   5136  1.1  mrg         }
   5137  1.1  mrg     }
   5138  1.1  mrg 
   5139  1.1  mrg   if (for_moveop && sched_verbose >= 2)
   5140  1.1  mrg     {
   5141  1.1  mrg       sel_print ("Best expression(s) (sequential form): ");
   5142  1.1  mrg       dump_av_set (expr_seq);
   5143  1.1  mrg       sel_print ("\n");
   5144  1.1  mrg     }
   5145  1.1  mrg 
   5146  1.1  mrg   return expr_seq;
   5147  1.1  mrg }
   5148  1.1  mrg 
   5149  1.1  mrg 
   5150  1.1  mrg /* Move nop to previous block.  */
   5151  1.1  mrg static void ATTRIBUTE_UNUSED
   5152  1.1  mrg move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
   5153  1.1  mrg {
   5154  1.1  mrg   insn_t prev_insn, next_insn;
   5155  1.1  mrg 
   5156  1.1  mrg   gcc_assert (sel_bb_head_p (nop)
   5157  1.1  mrg               && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
   5158  1.1  mrg   rtx_note *note = bb_note (BLOCK_FOR_INSN (nop));
   5159  1.1  mrg   prev_insn = sel_bb_end (prev_bb);
   5160  1.1  mrg   next_insn = NEXT_INSN (nop);
   5161  1.1  mrg   gcc_assert (prev_insn != NULL_RTX
   5162  1.1  mrg               && PREV_INSN (note) == prev_insn);
   5163  1.1  mrg 
   5164  1.1  mrg   SET_NEXT_INSN (prev_insn) = nop;
   5165  1.1  mrg   SET_PREV_INSN (nop) = prev_insn;
   5166  1.1  mrg 
   5167  1.1  mrg   SET_PREV_INSN (note) = nop;
   5168  1.1  mrg   SET_NEXT_INSN (note) = next_insn;
   5169  1.1  mrg 
   5170  1.1  mrg   SET_NEXT_INSN (nop) = note;
   5171  1.1  mrg   SET_PREV_INSN (next_insn) = note;
   5172  1.1  mrg 
   5173  1.1  mrg   BB_END (prev_bb) = nop;
   5174  1.1  mrg   BLOCK_FOR_INSN (nop) = prev_bb;
   5175  1.1  mrg }
   5176  1.1  mrg 
   5177  1.1  mrg /* Prepare a place to insert the chosen expression on BND.  */
   5178  1.1  mrg static insn_t
   5179  1.1  mrg prepare_place_to_insert (bnd_t bnd)
   5180  1.1  mrg {
   5181  1.1  mrg   insn_t place_to_insert;
   5182  1.1  mrg 
   5183  1.1  mrg   /* Init place_to_insert before calling move_op, as the later
   5184  1.1  mrg      can possibly remove BND_TO (bnd).  */
   5185  1.1  mrg   if (/* If this is not the first insn scheduled.  */
   5186  1.1  mrg       BND_PTR (bnd))
   5187  1.1  mrg     {
   5188  1.1  mrg       /* Add it after last scheduled.  */
   5189  1.1  mrg       place_to_insert = ILIST_INSN (BND_PTR (bnd));
   5190  1.1  mrg       if (DEBUG_INSN_P (place_to_insert))
   5191  1.1  mrg 	{
   5192  1.1  mrg 	  ilist_t l = BND_PTR (bnd);
   5193  1.1  mrg 	  while ((l = ILIST_NEXT (l)) &&
   5194  1.1  mrg 		 DEBUG_INSN_P (ILIST_INSN (l)))
   5195  1.1  mrg 	    ;
   5196  1.1  mrg 	  if (!l)
   5197  1.1  mrg 	    place_to_insert = NULL;
   5198  1.1  mrg 	}
   5199  1.1  mrg     }
   5200  1.1  mrg   else
   5201  1.1  mrg     place_to_insert = NULL;
   5202  1.1  mrg 
   5203  1.1  mrg   if (!place_to_insert)
   5204  1.1  mrg     {
   5205  1.1  mrg       /* Add it before BND_TO.  The difference is in the
   5206  1.1  mrg          basic block, where INSN will be added.  */
   5207  1.1  mrg       place_to_insert = get_nop_from_pool (BND_TO (bnd));
   5208  1.1  mrg       gcc_assert (BLOCK_FOR_INSN (place_to_insert)
   5209  1.1  mrg                   == BLOCK_FOR_INSN (BND_TO (bnd)));
   5210  1.1  mrg     }
   5211  1.1  mrg 
   5212  1.1  mrg   return place_to_insert;
   5213  1.1  mrg }
   5214  1.1  mrg 
   5215  1.1  mrg /* Find original instructions for EXPR_SEQ and move it to BND boundary.
   5216  1.1  mrg    Return the expression to emit in C_EXPR.  */
   5217  1.1  mrg static bool
   5218  1.1  mrg move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
   5219  1.1  mrg                         av_set_t expr_seq, expr_t c_expr)
   5220  1.1  mrg {
   5221  1.1  mrg   bool b, should_move;
   5222  1.1  mrg   unsigned book_uid;
   5223  1.1  mrg   bitmap_iterator bi;
   5224  1.1  mrg   int n_bookkeeping_copies_before_moveop;
   5225  1.1  mrg 
   5226  1.1  mrg   /* Make a move.  This call will remove the original operation,
   5227  1.1  mrg      insert all necessary bookkeeping instructions and update the
   5228  1.1  mrg      data sets.  After that all we have to do is add the operation
   5229  1.1  mrg      at before BND_TO (BND).  */
   5230  1.1  mrg   n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies;
   5231  1.1  mrg   max_uid_before_move_op = get_max_uid ();
   5232  1.1  mrg   bitmap_clear (current_copies);
   5233  1.1  mrg   bitmap_clear (current_originators);
   5234  1.1  mrg 
   5235  1.1  mrg   b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
   5236  1.1  mrg                get_dest_from_orig_ops (expr_seq), c_expr, &should_move);
   5237  1.1  mrg 
   5238  1.1  mrg   /* We should be able to find the expression we've chosen for
   5239  1.1  mrg      scheduling.  */
   5240  1.1  mrg   gcc_assert (b);
   5241  1.1  mrg 
   5242  1.1  mrg   if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
   5243  1.1  mrg     stat_insns_needed_bookkeeping++;
   5244  1.1  mrg 
   5245  1.1  mrg   EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
   5246  1.1  mrg     {
   5247  1.1  mrg       unsigned uid;
   5248  1.1  mrg       bitmap_iterator bi;
   5249  1.1  mrg 
   5250  1.1  mrg       /* We allocate these bitmaps lazily.  */
   5251  1.1  mrg       if (! INSN_ORIGINATORS_BY_UID (book_uid))
   5252  1.1  mrg         INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
   5253  1.1  mrg 
   5254  1.1  mrg       bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
   5255  1.1  mrg                    current_originators);
   5256  1.1  mrg 
   5257  1.1  mrg       /* Transitively add all originators' originators.  */
   5258  1.1  mrg       EXECUTE_IF_SET_IN_BITMAP (current_originators, 0, uid, bi)
   5259  1.1  mrg        if (INSN_ORIGINATORS_BY_UID (uid))
   5260  1.1  mrg 	 bitmap_ior_into (INSN_ORIGINATORS_BY_UID (book_uid),
   5261  1.1  mrg 			  INSN_ORIGINATORS_BY_UID (uid));
   5262  1.1  mrg     }
   5263  1.1  mrg 
   5264  1.1  mrg   return should_move;
   5265  1.1  mrg }
   5266  1.1  mrg 
   5267  1.1  mrg 
   5268  1.1  mrg /* Debug a DFA state as an array of bytes.  */
   5269  1.1  mrg static void
   5270  1.1  mrg debug_state (state_t state)
   5271  1.1  mrg {
   5272  1.1  mrg   unsigned char *p;
   5273  1.1  mrg   unsigned int i, size = dfa_state_size;
   5274  1.1  mrg 
   5275  1.1  mrg   sel_print ("state (%u):", size);
   5276  1.1  mrg   for (i = 0, p = (unsigned char *) state; i < size; i++)
   5277  1.1  mrg     sel_print (" %d", p[i]);
   5278  1.1  mrg   sel_print ("\n");
   5279  1.1  mrg }
   5280  1.1  mrg 
   5281  1.1  mrg /* Advance state on FENCE with INSN.  Return true if INSN is
   5282  1.1  mrg    an ASM, and we should advance state once more.  */
   5283  1.1  mrg static bool
   5284  1.1  mrg advance_state_on_fence (fence_t fence, insn_t insn)
   5285  1.1  mrg {
   5286  1.1  mrg   bool asm_p;
   5287  1.1  mrg 
   5288  1.1  mrg   if (recog_memoized (insn) >= 0)
   5289  1.1  mrg     {
   5290  1.1  mrg       int res;
   5291  1.1  mrg       state_t temp_state = alloca (dfa_state_size);
   5292  1.1  mrg 
   5293  1.1  mrg       gcc_assert (!INSN_ASM_P (insn));
   5294  1.1  mrg       asm_p = false;
   5295  1.1  mrg 
   5296  1.1  mrg       memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
   5297  1.1  mrg       res = state_transition (FENCE_STATE (fence), insn);
   5298  1.1  mrg       gcc_assert (res < 0);
   5299  1.1  mrg 
   5300  1.1  mrg       if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size))
   5301  1.1  mrg         {
   5302  1.1  mrg           FENCE_ISSUED_INSNS (fence)++;
   5303  1.1  mrg 
   5304  1.1  mrg           /* We should never issue more than issue_rate insns.  */
   5305  1.1  mrg           if (FENCE_ISSUED_INSNS (fence) > issue_rate)
   5306  1.1  mrg             gcc_unreachable ();
   5307  1.1  mrg         }
   5308  1.1  mrg     }
   5309  1.1  mrg   else
   5310  1.1  mrg     {
   5311  1.1  mrg       /* This could be an ASM insn which we'd like to schedule
   5312  1.1  mrg          on the next cycle.  */
   5313  1.1  mrg       asm_p = INSN_ASM_P (insn);
   5314  1.1  mrg       if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
   5315  1.1  mrg         advance_one_cycle (fence);
   5316  1.1  mrg     }
   5317  1.1  mrg 
   5318  1.1  mrg   if (sched_verbose >= 2)
   5319  1.1  mrg     debug_state (FENCE_STATE (fence));
   5320  1.1  mrg   if (!DEBUG_INSN_P (insn))
   5321  1.1  mrg     FENCE_STARTS_CYCLE_P (fence) = 0;
   5322  1.1  mrg   FENCE_ISSUE_MORE (fence) = can_issue_more;
   5323  1.1  mrg   return asm_p;
   5324  1.1  mrg }
   5325  1.1  mrg 
   5326  1.1  mrg /* Update FENCE on which INSN was scheduled and this INSN, too.  NEED_STALL
   5327  1.1  mrg    is nonzero if we need to stall after issuing INSN.  */
   5328  1.1  mrg static void
   5329  1.1  mrg update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
   5330  1.1  mrg {
   5331  1.1  mrg   bool asm_p;
   5332  1.1  mrg 
   5333  1.1  mrg   /* First, reflect that something is scheduled on this fence.  */
   5334  1.1  mrg   asm_p = advance_state_on_fence (fence, insn);
   5335  1.1  mrg   FENCE_LAST_SCHEDULED_INSN (fence) = insn;
   5336  1.1  mrg   vec_safe_push (FENCE_EXECUTING_INSNS (fence), insn);
   5337  1.1  mrg   if (SCHED_GROUP_P (insn))
   5338  1.1  mrg     {
   5339  1.1  mrg       FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
   5340  1.1  mrg       SCHED_GROUP_P (insn) = 0;
   5341  1.1  mrg     }
   5342  1.1  mrg   else
   5343  1.1  mrg     FENCE_SCHED_NEXT (fence) = NULL;
   5344  1.1  mrg   if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
   5345  1.1  mrg     FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
   5346  1.1  mrg 
   5347  1.1  mrg   /* Set instruction scheduling info.  This will be used in bundling,
   5348  1.1  mrg      pipelining, tick computations etc.  */
   5349  1.1  mrg   ++INSN_SCHED_TIMES (insn);
   5350  1.1  mrg   EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
   5351  1.1  mrg   EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = FENCE_CYCLE (fence);
   5352  1.1  mrg   INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
   5353  1.1  mrg   INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
   5354  1.1  mrg 
   5355  1.1  mrg   /* This does not account for adjust_cost hooks, just add the biggest
   5356  1.1  mrg      constant the hook may add to the latency.  TODO: make this
   5357  1.1  mrg      a target dependent constant.  */
   5358  1.1  mrg   INSN_READY_CYCLE (insn)
   5359  1.1  mrg     = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
   5360  1.1  mrg                                  ? 1
   5361  1.1  mrg                                  : maximal_insn_latency (insn) + 1);
   5362  1.1  mrg 
   5363  1.1  mrg   /* Change these fields last, as they're used above.  */
   5364  1.1  mrg   FENCE_AFTER_STALL_P (fence) = 0;
   5365  1.1  mrg   if (asm_p || need_stall)
   5366  1.1  mrg     advance_one_cycle (fence);
   5367  1.1  mrg 
   5368  1.1  mrg   /* Indicate that we've scheduled something on this fence.  */
   5369  1.1  mrg   FENCE_SCHEDULED_P (fence) = true;
   5370  1.1  mrg   scheduled_something_on_previous_fence = true;
   5371  1.1  mrg 
   5372  1.1  mrg   /* Print debug information when insn's fields are updated.  */
   5373  1.1  mrg   if (sched_verbose >= 2)
   5374  1.1  mrg     {
   5375  1.1  mrg       sel_print ("Scheduling insn: ");
   5376  1.1  mrg       dump_insn_1 (insn, 1);
   5377  1.1  mrg       sel_print ("\n");
   5378  1.1  mrg     }
   5379  1.1  mrg }
   5380  1.1  mrg 
   5381  1.1  mrg /* Update boundary BND (and, if needed, FENCE) with INSN, remove the
   5382  1.1  mrg    old boundary from BNDSP, add new boundaries to BNDS_TAIL_P and
   5383  1.1  mrg    return it.  */
   5384  1.1  mrg static blist_t *
   5385  1.1  mrg update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp,
   5386  1.1  mrg                    blist_t *bnds_tailp)
   5387  1.1  mrg {
   5388  1.1  mrg   succ_iterator si;
   5389  1.1  mrg   insn_t succ;
   5390  1.1  mrg 
   5391  1.1  mrg   advance_deps_context (BND_DC (bnd), insn);
   5392  1.1  mrg   FOR_EACH_SUCC_1 (succ, si, insn,
   5393  1.1  mrg                    SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
   5394  1.1  mrg     {
   5395  1.1  mrg       ilist_t ptr = ilist_copy (BND_PTR (bnd));
   5396  1.1  mrg 
   5397  1.1  mrg       ilist_add (&ptr, insn);
   5398  1.1  mrg 
   5399  1.1  mrg       if (DEBUG_INSN_P (insn) && sel_bb_end_p (insn)
   5400  1.1  mrg 	  && is_ineligible_successor (succ, ptr))
   5401  1.1  mrg 	{
   5402  1.1  mrg 	  ilist_clear (&ptr);
   5403  1.1  mrg 	  continue;
   5404  1.1  mrg 	}
   5405  1.1  mrg 
   5406  1.1  mrg       if (FENCE_INSN (fence) == insn && !sel_bb_end_p (insn))
   5407  1.1  mrg 	{
   5408  1.1  mrg 	  if (sched_verbose >= 9)
   5409  1.1  mrg 	    sel_print ("Updating fence insn from %i to %i\n",
   5410  1.1  mrg 		       INSN_UID (insn), INSN_UID (succ));
   5411  1.1  mrg 	  FENCE_INSN (fence) = succ;
   5412  1.1  mrg 	}
   5413  1.1  mrg       blist_add (bnds_tailp, succ, ptr, BND_DC (bnd));
   5414  1.1  mrg       bnds_tailp = &BLIST_NEXT (*bnds_tailp);
   5415  1.1  mrg     }
   5416  1.1  mrg 
   5417  1.1  mrg   blist_remove (bndsp);
   5418  1.1  mrg   return bnds_tailp;
   5419  1.1  mrg }
   5420  1.1  mrg 
   5421  1.1  mrg /* Schedule EXPR_VLIW on BND.  Return the insn emitted.  */
   5422  1.1  mrg static insn_t
   5423  1.1  mrg schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
   5424  1.1  mrg {
   5425  1.1  mrg   av_set_t expr_seq;
   5426  1.1  mrg   expr_t c_expr = XALLOCA (expr_def);
   5427  1.1  mrg   insn_t place_to_insert;
   5428  1.1  mrg   insn_t insn;
   5429  1.1  mrg   bool should_move;
   5430  1.1  mrg 
   5431  1.1  mrg   expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true);
   5432  1.1  mrg 
   5433  1.1  mrg   /* In case of scheduling a jump skipping some other instructions,
   5434  1.1  mrg      prepare CFG.  After this, jump is at the boundary and can be
   5435  1.1  mrg      scheduled as usual insn by MOVE_OP.  */
   5436  1.1  mrg   if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)))
   5437  1.1  mrg     {
   5438  1.1  mrg       insn = EXPR_INSN_RTX (expr_vliw);
   5439  1.1  mrg 
   5440  1.1  mrg       /* Speculative jumps are not handled.  */
   5441  1.1  mrg       if (insn != BND_TO (bnd)
   5442  1.1  mrg           && !sel_insn_is_speculation_check (insn))
   5443  1.1  mrg         move_cond_jump (insn, bnd);
   5444  1.1  mrg     }
   5445  1.1  mrg 
   5446  1.1  mrg   /* Find a place for C_EXPR to schedule.  */
   5447  1.1  mrg   place_to_insert = prepare_place_to_insert (bnd);
   5448  1.1  mrg   should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
   5449  1.1  mrg   clear_expr (c_expr);
   5450  1.1  mrg 
   5451  1.1  mrg   /* Add the instruction.  The corner case to care about is when
   5452  1.1  mrg      the expr_seq set has more than one expr, and we chose the one that
   5453  1.1  mrg      is not equal to expr_vliw.  Then expr_vliw may be insn in stream, and
   5454  1.1  mrg      we can't use it.  Generate the new vinsn.  */
   5455  1.1  mrg   if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw)))
   5456  1.1  mrg     {
   5457  1.1  mrg       vinsn_t vinsn_new;
   5458  1.1  mrg 
   5459  1.1  mrg       vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false);
   5460  1.1  mrg       change_vinsn_in_expr (expr_vliw, vinsn_new);
   5461  1.1  mrg       should_move = false;
   5462  1.1  mrg     }
   5463  1.1  mrg   if (should_move)
   5464  1.1  mrg     insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
   5465  1.1  mrg   else
   5466  1.1  mrg     insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
   5467  1.1  mrg                                       place_to_insert);
   5468  1.1  mrg 
   5469  1.1  mrg   /* Return the nops generated for preserving of data sets back
   5470  1.1  mrg      into pool.  */
   5471  1.1  mrg   if (INSN_NOP_P (place_to_insert))
   5472  1.1  mrg     return_nop_to_pool (place_to_insert, !DEBUG_INSN_P (insn));
   5473  1.1  mrg   remove_temp_moveop_nops (!DEBUG_INSN_P (insn));
   5474  1.1  mrg 
   5475  1.1  mrg   av_set_clear (&expr_seq);
   5476  1.1  mrg 
   5477  1.1  mrg   /* Save the expression scheduled so to reset target availability if we'll
   5478  1.1  mrg      meet it later on the same fence.  */
   5479  1.1  mrg   if (EXPR_WAS_RENAMED (expr_vliw))
   5480  1.1  mrg     vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn));
   5481  1.1  mrg 
   5482  1.1  mrg   /* Check that the recent movement didn't destroyed loop
   5483  1.1  mrg      structure.  */
   5484  1.1  mrg   gcc_assert (!pipelining_p
   5485  1.1  mrg               || current_loop_nest == NULL
   5486  1.1  mrg               || loop_latch_edge (current_loop_nest));
   5487  1.1  mrg   return insn;
   5488  1.1  mrg }
   5489  1.1  mrg 
   5490  1.1  mrg /* Stall for N cycles on FENCE.  */
   5491  1.1  mrg static void
   5492  1.1  mrg stall_for_cycles (fence_t fence, int n)
   5493  1.1  mrg {
   5494  1.1  mrg   int could_more;
   5495  1.1  mrg 
   5496  1.1  mrg   could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate;
   5497  1.1  mrg   while (n--)
   5498  1.1  mrg     advance_one_cycle (fence);
   5499  1.1  mrg   if (could_more)
   5500  1.1  mrg     FENCE_AFTER_STALL_P (fence) = 1;
   5501  1.1  mrg }
   5502  1.1  mrg 
   5503  1.1  mrg /* Gather a parallel group of insns at FENCE and assign their seqno
   5504  1.1  mrg    to SEQNO.  All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
   5505  1.1  mrg    list for later recalculation of seqnos.  */
   5506  1.1  mrg static void
   5507  1.1  mrg fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
   5508  1.1  mrg {
   5509  1.1  mrg   blist_t bnds = NULL, *bnds_tailp;
   5510  1.1  mrg   av_set_t av_vliw = NULL;
   5511  1.1  mrg   insn_t insn = FENCE_INSN (fence);
   5512  1.1  mrg 
   5513  1.1  mrg   if (sched_verbose >= 2)
   5514  1.1  mrg     sel_print ("Starting fill_insns for insn %d, cycle %d\n",
   5515  1.1  mrg                INSN_UID (insn), FENCE_CYCLE (fence));
   5516  1.1  mrg 
   5517  1.1  mrg   blist_add (&bnds, insn, NULL, FENCE_DC (fence));
   5518  1.1  mrg   bnds_tailp = &BLIST_NEXT (bnds);
   5519  1.1  mrg   set_target_context (FENCE_TC (fence));
   5520  1.1  mrg   can_issue_more = FENCE_ISSUE_MORE (fence);
   5521  1.1  mrg   target_bb = INSN_BB (insn);
   5522  1.1  mrg 
   5523  1.1  mrg   /* Do while we can add any operation to the current group.  */
   5524  1.1  mrg   do
   5525  1.1  mrg     {
   5526  1.1  mrg       blist_t *bnds_tailp1, *bndsp;
   5527  1.1  mrg       expr_t expr_vliw;
   5528  1.1  mrg       int need_stall = false;
   5529  1.1  mrg       int was_stall = 0, scheduled_insns = 0;
   5530  1.1  mrg       int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
   5531  1.1  mrg       int max_stall = pipelining_p ? 1 : 3;
   5532  1.1  mrg       bool last_insn_was_debug = false;
   5533  1.1  mrg       bool was_debug_bb_end_p = false;
   5534  1.1  mrg 
   5535  1.1  mrg       compute_av_set_on_boundaries (fence, bnds, &av_vliw);
   5536  1.1  mrg       remove_insns_that_need_bookkeeping (fence, &av_vliw);
   5537  1.1  mrg       remove_insns_for_debug (bnds, &av_vliw);
   5538  1.1  mrg 
   5539  1.1  mrg       /* Return early if we have nothing to schedule.  */
   5540  1.1  mrg       if (av_vliw == NULL)
   5541  1.1  mrg         break;
   5542  1.1  mrg 
   5543  1.1  mrg       /* Choose the best expression and, if needed, destination register
   5544  1.1  mrg 	 for it.  */
   5545  1.1  mrg       do
   5546  1.1  mrg         {
   5547  1.1  mrg           expr_vliw = find_best_expr (&av_vliw, bnds, fence, &need_stall);
   5548  1.1  mrg           if (! expr_vliw && need_stall)
   5549  1.1  mrg             {
   5550  1.1  mrg               /* All expressions required a stall.  Do not recompute av sets
   5551  1.1  mrg                  as we'll get the same answer (modulo the insns between
   5552  1.1  mrg                  the fence and its boundary, which will not be available for
   5553  1.1  mrg                  pipelining).
   5554  1.1  mrg 		 If we are going to stall for too long, break to recompute av
   5555  1.1  mrg 		 sets and bring more insns for pipelining.  */
   5556  1.1  mrg               was_stall++;
   5557  1.1  mrg 	      if (need_stall <= 3)
   5558  1.1  mrg 		stall_for_cycles (fence, need_stall);
   5559  1.1  mrg 	      else
   5560  1.1  mrg 		{
   5561  1.1  mrg 		  stall_for_cycles (fence, 1);
   5562  1.1  mrg 		  break;
   5563  1.1  mrg 		}
   5564  1.1  mrg             }
   5565  1.1  mrg         }
   5566  1.1  mrg       while (! expr_vliw && need_stall);
   5567  1.1  mrg 
   5568  1.1  mrg       /* Now either we've selected expr_vliw or we have nothing to schedule.  */
   5569  1.1  mrg       if (!expr_vliw)
   5570  1.1  mrg         {
   5571  1.1  mrg 	  av_set_clear (&av_vliw);
   5572  1.1  mrg           break;
   5573  1.1  mrg         }
   5574  1.1  mrg 
   5575  1.1  mrg       bndsp = &bnds;
   5576  1.1  mrg       bnds_tailp1 = bnds_tailp;
   5577  1.1  mrg 
   5578  1.1  mrg       do
   5579  1.1  mrg 	/* This code will be executed only once until we'd have several
   5580  1.1  mrg            boundaries per fence.  */
   5581  1.1  mrg         {
   5582  1.1  mrg 	  bnd_t bnd = BLIST_BND (*bndsp);
   5583  1.1  mrg 
   5584  1.1  mrg 	  if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw)))
   5585  1.1  mrg 	    {
   5586  1.1  mrg 	      bndsp = &BLIST_NEXT (*bndsp);
   5587  1.1  mrg 	      continue;
   5588  1.1  mrg 	    }
   5589  1.1  mrg 
   5590  1.1  mrg           insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno);
   5591  1.1  mrg 	  last_insn_was_debug = DEBUG_INSN_P (insn);
   5592  1.1  mrg 	  if (last_insn_was_debug)
   5593  1.1  mrg 	    was_debug_bb_end_p = (insn == BND_TO (bnd) && sel_bb_end_p (insn));
   5594  1.1  mrg           update_fence_and_insn (fence, insn, need_stall);
   5595  1.1  mrg           bnds_tailp = update_boundaries (fence, bnd, insn, bndsp, bnds_tailp);
   5596  1.1  mrg 
   5597  1.1  mrg 	  /* Add insn to the list of scheduled on this cycle instructions.  */
   5598  1.1  mrg 	  ilist_add (*scheduled_insns_tailpp, insn);
   5599  1.1  mrg 	  *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp);
   5600  1.1  mrg         }
   5601  1.1  mrg       while (*bndsp != *bnds_tailp1);
   5602  1.1  mrg 
   5603  1.1  mrg       av_set_clear (&av_vliw);
   5604  1.1  mrg       if (!last_insn_was_debug)
   5605  1.1  mrg 	scheduled_insns++;
   5606  1.1  mrg 
   5607  1.1  mrg       /* We currently support information about candidate blocks only for
   5608  1.1  mrg 	 one 'target_bb' block.  Hence we can't schedule after jump insn,
   5609  1.1  mrg 	 as this will bring two boundaries and, hence, necessity to handle
   5610  1.1  mrg 	 information for two or more blocks concurrently.  */
   5611  1.1  mrg       if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn))
   5612  1.1  mrg           || (was_stall
   5613  1.1  mrg               && (was_stall >= max_stall
   5614  1.1  mrg                   || scheduled_insns >= max_insns)))
   5615  1.1  mrg         break;
   5616  1.1  mrg     }
   5617  1.1  mrg   while (bnds);
   5618  1.1  mrg 
   5619  1.1  mrg   gcc_assert (!FENCE_BNDS (fence));
   5620  1.1  mrg 
   5621  1.1  mrg   /* Update boundaries of the FENCE.  */
   5622  1.1  mrg   while (bnds)
   5623  1.1  mrg     {
   5624  1.1  mrg       ilist_t ptr = BND_PTR (BLIST_BND (bnds));
   5625  1.1  mrg 
   5626  1.1  mrg       if (ptr)
   5627  1.1  mrg 	{
   5628  1.1  mrg 	  insn = ILIST_INSN (ptr);
   5629  1.1  mrg 
   5630  1.1  mrg 	  if (!ilist_is_in_p (FENCE_BNDS (fence), insn))
   5631  1.1  mrg 	    ilist_add (&FENCE_BNDS (fence), insn);
   5632  1.1  mrg 	}
   5633  1.1  mrg 
   5634  1.1  mrg       blist_remove (&bnds);
   5635  1.1  mrg     }
   5636  1.1  mrg 
   5637  1.1  mrg   /* Update target context on the fence.  */
   5638  1.1  mrg   reset_target_context (FENCE_TC (fence), false);
   5639  1.1  mrg }
   5640  1.1  mrg 
   5641  1.1  mrg /* All exprs in ORIG_OPS must have the same destination register or memory.
   5642  1.1  mrg    Return that destination.  */
   5643  1.1  mrg static rtx
   5644  1.1  mrg get_dest_from_orig_ops (av_set_t orig_ops)
   5645  1.1  mrg {
   5646  1.1  mrg   rtx dest = NULL_RTX;
   5647  1.1  mrg   av_set_iterator av_it;
   5648  1.1  mrg   expr_t expr;
   5649  1.1  mrg   bool first_p = true;
   5650  1.1  mrg 
   5651  1.1  mrg   FOR_EACH_EXPR (expr, av_it, orig_ops)
   5652  1.1  mrg     {
   5653  1.1  mrg       rtx x = EXPR_LHS (expr);
   5654  1.1  mrg 
   5655  1.1  mrg       if (first_p)
   5656  1.1  mrg 	{
   5657  1.1  mrg 	  first_p = false;
   5658  1.1  mrg 	  dest = x;
   5659  1.1  mrg 	}
   5660  1.1  mrg       else
   5661  1.1  mrg 	gcc_assert (dest == x
   5662  1.1  mrg 		    || (dest != NULL_RTX && x != NULL_RTX
   5663  1.1  mrg 			&& rtx_equal_p (dest, x)));
   5664  1.1  mrg     }
   5665  1.1  mrg 
   5666  1.1  mrg   return dest;
   5667  1.1  mrg }
   5668  1.1  mrg 
   5669  1.1  mrg /* Update data sets for the bookkeeping block and record those expressions
   5670  1.1  mrg    which become no longer available after inserting this bookkeeping.  */
   5671  1.1  mrg static void
   5672  1.1  mrg update_and_record_unavailable_insns (basic_block book_block)
   5673  1.1  mrg {
   5674  1.1  mrg   av_set_iterator i;
   5675  1.1  mrg   av_set_t old_av_set = NULL;
   5676  1.1  mrg   expr_t cur_expr;
   5677  1.1  mrg   rtx_insn *bb_end = sel_bb_end (book_block);
   5678  1.1  mrg 
   5679  1.1  mrg   /* First, get correct liveness in the bookkeeping block.  The problem is
   5680  1.1  mrg      the range between the bookeeping insn and the end of block.  */
   5681  1.1  mrg   update_liveness_on_insn (bb_end);
   5682  1.1  mrg   if (control_flow_insn_p (bb_end))
   5683  1.1  mrg     update_liveness_on_insn (PREV_INSN (bb_end));
   5684  1.1  mrg 
   5685  1.1  mrg   /* If there's valid av_set on BOOK_BLOCK, then there might exist another
   5686  1.1  mrg      fence above, where we may choose to schedule an insn which is
   5687  1.1  mrg      actually blocked from moving up with the bookkeeping we create here.  */
   5688  1.1  mrg   if (AV_SET_VALID_P (sel_bb_head (book_block)))
   5689  1.1  mrg     {
   5690  1.1  mrg       old_av_set = av_set_copy (BB_AV_SET (book_block));
   5691  1.1  mrg       update_data_sets (sel_bb_head (book_block));
   5692  1.1  mrg 
   5693  1.1  mrg       /* Traverse all the expressions in the old av_set and check whether
   5694  1.1  mrg 	 CUR_EXPR is in new AV_SET.  */
   5695  1.1  mrg       FOR_EACH_EXPR (cur_expr, i, old_av_set)
   5696  1.1  mrg         {
   5697  1.1  mrg           expr_t new_expr = av_set_lookup (BB_AV_SET (book_block),
   5698  1.1  mrg 					   EXPR_VINSN (cur_expr));
   5699  1.1  mrg 
   5700  1.1  mrg           if (! new_expr
   5701  1.1  mrg               /* In this case, we can just turn off the E_T_A bit, but we can't
   5702  1.1  mrg                  represent this information with the current vector.  */
   5703  1.1  mrg               || EXPR_TARGET_AVAILABLE (new_expr)
   5704  1.1  mrg 		 != EXPR_TARGET_AVAILABLE (cur_expr))
   5705  1.1  mrg 	    /* Unfortunately, the below code could be also fired up on
   5706  1.1  mrg 	       separable insns, e.g. when moving insns through the new
   5707  1.1  mrg 	       speculation check as in PR 53701.  */
   5708  1.1  mrg             vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr);
   5709  1.1  mrg         }
   5710  1.1  mrg 
   5711  1.1  mrg       av_set_clear (&old_av_set);
   5712  1.1  mrg     }
   5713  1.1  mrg }
   5714  1.1  mrg 
   5715  1.1  mrg /* The main effect of this function is that sparams->c_expr is merged
   5716  1.1  mrg    with (or copied to) lparams->c_expr_merged.  If there's only one successor,
   5717  1.1  mrg    we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged.
   5718  1.1  mrg    lparams->c_expr_merged is copied back to sparams->c_expr after all
   5719  1.1  mrg    successors has been traversed.  lparams->c_expr_local is an expr allocated
   5720  1.1  mrg    on stack in the caller function, and is used if there is more than one
   5721  1.1  mrg    successor.
   5722  1.1  mrg 
   5723  1.1  mrg    SUCC is one of the SUCCS_NORMAL successors of INSN,
   5724  1.1  mrg    MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ,
   5725  1.1  mrg    LPARAMS and STATIC_PARAMS contain the parameters described above.  */
   5726  1.1  mrg static void
   5727  1.1  mrg move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
   5728  1.1  mrg                      insn_t succ ATTRIBUTE_UNUSED,
   5729  1.1  mrg 		     int moveop_drv_call_res,
   5730  1.1  mrg 		     cmpd_local_params_p lparams, void *static_params)
   5731  1.1  mrg {
   5732  1.1  mrg   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
   5733  1.1  mrg 
   5734  1.1  mrg   /* Nothing to do, if original expr wasn't found below.  */
   5735  1.1  mrg   if (moveop_drv_call_res != 1)
   5736  1.1  mrg     return;
   5737  1.1  mrg 
   5738  1.1  mrg   /* If this is a first successor.  */
   5739  1.1  mrg   if (!lparams->c_expr_merged)
   5740  1.1  mrg     {
   5741  1.1  mrg       lparams->c_expr_merged = sparams->c_expr;
   5742  1.1  mrg       sparams->c_expr = lparams->c_expr_local;
   5743  1.1  mrg     }
   5744  1.1  mrg   else
   5745  1.1  mrg     {
   5746  1.1  mrg       /* We must merge all found expressions to get reasonable
   5747  1.1  mrg 	 EXPR_SPEC_DONE_DS for the resulting insn.  If we don't
   5748  1.1  mrg 	 do so then we can first find the expr with epsilon
   5749  1.1  mrg 	 speculation success probability and only then with the
   5750  1.1  mrg 	 good probability.  As a result the insn will get epsilon
   5751  1.1  mrg 	 probability and will never be scheduled because of
   5752  1.1  mrg 	 weakness_cutoff in find_best_expr.
   5753  1.1  mrg 
   5754  1.1  mrg 	 We call merge_expr_data here instead of merge_expr
   5755  1.1  mrg 	 because due to speculation C_EXPR and X may have the
   5756  1.1  mrg 	 same insns with different speculation types.  And as of
   5757  1.1  mrg 	 now such insns are considered non-equal.
   5758  1.1  mrg 
   5759  1.1  mrg 	 However, EXPR_SCHED_TIMES is different -- we must get
   5760  1.1  mrg 	 SCHED_TIMES from a real insn, not a bookkeeping copy.
   5761  1.1  mrg 	 We force this here.  Instead, we may consider merging
   5762  1.1  mrg 	 SCHED_TIMES to the maximum instead of minimum in the
   5763  1.1  mrg 	 below function.  */
   5764  1.1  mrg       int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged);
   5765  1.1  mrg 
   5766  1.1  mrg       merge_expr_data (lparams->c_expr_merged, sparams->c_expr, NULL);
   5767  1.1  mrg       if (EXPR_SCHED_TIMES (sparams->c_expr) == 0)
   5768  1.1  mrg 	EXPR_SCHED_TIMES (lparams->c_expr_merged) = old_times;
   5769  1.1  mrg 
   5770  1.1  mrg       clear_expr (sparams->c_expr);
   5771  1.1  mrg     }
   5772  1.1  mrg }
   5773  1.1  mrg 
   5774  1.1  mrg /*  Add used regs for the successor SUCC into SPARAMS->USED_REGS.
   5775  1.1  mrg 
   5776  1.1  mrg    SUCC is one of the SUCCS_NORMAL successors of INSN,
   5777  1.1  mrg    MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ or 0,
   5778  1.1  mrg      if SUCC is one of SUCCS_BACK or SUCCS_OUT.
   5779  1.1  mrg    STATIC_PARAMS contain USED_REGS set.  */
   5780  1.1  mrg static void
   5781  1.1  mrg fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
   5782  1.1  mrg 		 int moveop_drv_call_res,
   5783  1.1  mrg 		 cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
   5784  1.1  mrg 		 void *static_params)
   5785  1.1  mrg {
   5786  1.1  mrg   regset succ_live;
   5787  1.1  mrg   fur_static_params_p sparams = (fur_static_params_p) static_params;
   5788  1.1  mrg 
   5789  1.1  mrg   /* Here we compute live regsets only for branches that do not lie
   5790  1.1  mrg      on the code motion paths.  These branches correspond to value
   5791  1.1  mrg      MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though
   5792  1.1  mrg      for such branches code_motion_path_driver is not called.  */
   5793  1.1  mrg   if (moveop_drv_call_res != 0)
   5794  1.1  mrg     return;
   5795  1.1  mrg 
   5796  1.1  mrg   /* Mark all registers that do not meet the following condition:
   5797  1.1  mrg      (3) not live on the other path of any conditional branch
   5798  1.1  mrg      that is passed by the operation, in case original
   5799  1.1  mrg      operations are not present on both paths of the
   5800  1.1  mrg      conditional branch.  */
   5801  1.1  mrg   succ_live = compute_live (succ);
   5802  1.1  mrg   IOR_REG_SET (sparams->used_regs, succ_live);
   5803  1.1  mrg }
   5804  1.1  mrg 
   5805  1.1  mrg /* This function is called after the last successor.  Copies LP->C_EXPR_MERGED
   5806  1.1  mrg    into SP->CEXPR.  */
   5807  1.1  mrg static void
   5808  1.1  mrg move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams)
   5809  1.1  mrg {
   5810  1.1  mrg   moveop_static_params_p sp = (moveop_static_params_p) sparams;
   5811  1.1  mrg 
   5812  1.1  mrg   sp->c_expr = lp->c_expr_merged;
   5813  1.1  mrg }
   5814  1.1  mrg 
   5815  1.1  mrg /* Track bookkeeping copies created, insns scheduled, and blocks for
   5816  1.1  mrg    rescheduling when INSN is found by move_op.  */
   5817  1.1  mrg static void
   5818  1.1  mrg track_scheduled_insns_and_blocks (rtx_insn *insn)
   5819  1.1  mrg {
   5820  1.1  mrg   /* Even if this insn can be a copy that will be removed during current move_op,
   5821  1.1  mrg      we still need to count it as an originator.  */
   5822  1.1  mrg   bitmap_set_bit (current_originators, INSN_UID (insn));
   5823  1.1  mrg 
   5824  1.1  mrg   if (!bitmap_clear_bit (current_copies, INSN_UID (insn)))
   5825  1.1  mrg     {
   5826  1.1  mrg       /* Note that original block needs to be rescheduled, as we pulled an
   5827  1.1  mrg 	 instruction out of it.  */
   5828  1.1  mrg       if (INSN_SCHED_TIMES (insn) > 0)
   5829  1.1  mrg 	bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index);
   5830  1.1  mrg       else if (INSN_UID (insn) < first_emitted_uid && !DEBUG_INSN_P (insn))
   5831  1.1  mrg 	num_insns_scheduled++;
   5832  1.1  mrg     }
   5833  1.1  mrg 
   5834  1.1  mrg   /* For instructions we must immediately remove insn from the
   5835  1.1  mrg      stream, so subsequent update_data_sets () won't include this
   5836  1.1  mrg      insn into av_set.
   5837  1.1  mrg      For expr we must make insn look like "INSN_REG (insn) := c_expr".  */
   5838  1.1  mrg   if (INSN_UID (insn) > max_uid_before_move_op)
   5839  1.1  mrg     stat_bookkeeping_copies--;
   5840  1.1  mrg }
   5841  1.1  mrg 
   5842  1.1  mrg /* Emit a register-register copy for INSN if needed.  Return true if
   5843  1.1  mrg    emitted one.  PARAMS is the move_op static parameters.  */
   5844  1.1  mrg static bool
   5845  1.1  mrg maybe_emit_renaming_copy (rtx_insn *insn,
   5846  1.1  mrg                           moveop_static_params_p params)
   5847  1.1  mrg {
   5848  1.1  mrg   bool insn_emitted  = false;
   5849  1.1  mrg   rtx cur_reg;
   5850  1.1  mrg 
   5851  1.1  mrg   /* Bail out early when expression cannot be renamed at all.  */
   5852  1.1  mrg   if (!EXPR_SEPARABLE_P (params->c_expr))
   5853  1.1  mrg     return false;
   5854  1.1  mrg 
   5855  1.1  mrg   cur_reg = expr_dest_reg (params->c_expr);
   5856  1.1  mrg   gcc_assert (cur_reg && params->dest && REG_P (params->dest));
   5857  1.1  mrg 
   5858  1.1  mrg   /* If original operation has expr and the register chosen for
   5859  1.1  mrg      that expr is not original operation's dest reg, substitute
   5860  1.1  mrg      operation's right hand side with the register chosen.  */
   5861  1.1  mrg   if (REGNO (params->dest) != REGNO (cur_reg))
   5862  1.1  mrg     {
   5863  1.1  mrg       insn_t reg_move_insn, reg_move_insn_rtx;
   5864  1.1  mrg 
   5865  1.1  mrg       reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn),
   5866  1.1  mrg                                                     params->dest);
   5867  1.1  mrg       reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx,
   5868  1.1  mrg                                                    INSN_EXPR (insn),
   5869  1.1  mrg                                                    INSN_SEQNO (insn),
   5870  1.1  mrg                                                    insn);
   5871  1.1  mrg       EXPR_SPEC_DONE_DS (INSN_EXPR (reg_move_insn)) = 0;
   5872  1.1  mrg       replace_dest_with_reg_in_expr (params->c_expr, params->dest);
   5873  1.1  mrg 
   5874  1.1  mrg       insn_emitted = true;
   5875  1.1  mrg       params->was_renamed = true;
   5876  1.1  mrg     }
   5877  1.1  mrg 
   5878  1.1  mrg   return insn_emitted;
   5879  1.1  mrg }
   5880  1.1  mrg 
   5881  1.1  mrg /* Emit a speculative check for INSN speculated as EXPR if needed.
   5882  1.1  mrg    Return true if we've  emitted one.  PARAMS is the move_op static
   5883  1.1  mrg    parameters.  */
   5884  1.1  mrg static bool
   5885  1.1  mrg maybe_emit_speculative_check (rtx_insn *insn, expr_t expr,
   5886  1.1  mrg                               moveop_static_params_p params)
   5887  1.1  mrg {
   5888  1.1  mrg   bool insn_emitted = false;
   5889  1.1  mrg   insn_t x;
   5890  1.1  mrg   ds_t check_ds;
   5891  1.1  mrg 
   5892  1.1  mrg   check_ds = get_spec_check_type_for_insn (insn, expr);
   5893  1.1  mrg   if (check_ds != 0)
   5894  1.1  mrg     {
   5895  1.1  mrg       /* A speculation check should be inserted.  */
   5896  1.1  mrg       x = create_speculation_check (params->c_expr, check_ds, insn);
   5897  1.1  mrg       insn_emitted = true;
   5898  1.1  mrg     }
   5899  1.1  mrg   else
   5900  1.1  mrg     {
   5901  1.1  mrg       EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
   5902  1.1  mrg       x = insn;
   5903  1.1  mrg     }
   5904  1.1  mrg 
   5905  1.1  mrg   gcc_assert (EXPR_SPEC_DONE_DS (INSN_EXPR (x)) == 0
   5906  1.1  mrg               && EXPR_SPEC_TO_CHECK_DS (INSN_EXPR (x)) == 0);
   5907  1.1  mrg   return insn_emitted;
   5908  1.1  mrg }
   5909  1.1  mrg 
   5910  1.1  mrg /* Handle transformations that leave an insn in place of original
   5911  1.1  mrg    insn such as renaming/speculation.  Return true if one of such
   5912  1.1  mrg    transformations actually happened, and we have emitted this insn.  */
   5913  1.1  mrg static bool
   5914  1.1  mrg handle_emitting_transformations (rtx_insn *insn, expr_t expr,
   5915  1.1  mrg                                  moveop_static_params_p params)
   5916  1.1  mrg {
   5917  1.1  mrg   bool insn_emitted = false;
   5918  1.1  mrg 
   5919  1.1  mrg   insn_emitted = maybe_emit_renaming_copy (insn, params);
   5920  1.1  mrg   insn_emitted |= maybe_emit_speculative_check (insn, expr, params);
   5921  1.1  mrg 
   5922  1.1  mrg   return insn_emitted;
   5923  1.1  mrg }
   5924  1.1  mrg 
   5925  1.1  mrg /* If INSN is the only insn in the basic block (not counting JUMP,
   5926  1.1  mrg    which may be a jump to next insn, and DEBUG_INSNs), we want to
   5927  1.1  mrg    leave a NOP there till the return to fill_insns.  */
   5928  1.1  mrg 
   5929  1.1  mrg static bool
   5930  1.1  mrg need_nop_to_preserve_insn_bb (rtx_insn *insn)
   5931  1.1  mrg {
   5932  1.1  mrg   insn_t bb_head, bb_end, bb_next, in_next;
   5933  1.1  mrg   basic_block bb = BLOCK_FOR_INSN (insn);
   5934  1.1  mrg 
   5935  1.1  mrg   bb_head = sel_bb_head (bb);
   5936  1.1  mrg   bb_end = sel_bb_end (bb);
   5937  1.1  mrg 
   5938  1.1  mrg   if (bb_head == bb_end)
   5939  1.1  mrg     return true;
   5940  1.1  mrg 
   5941  1.1  mrg   while (bb_head != bb_end && DEBUG_INSN_P (bb_head))
   5942  1.1  mrg     bb_head = NEXT_INSN (bb_head);
   5943  1.1  mrg 
   5944  1.1  mrg   if (bb_head == bb_end)
   5945  1.1  mrg     return true;
   5946  1.1  mrg 
   5947  1.1  mrg   while (bb_head != bb_end && DEBUG_INSN_P (bb_end))
   5948  1.1  mrg     bb_end = PREV_INSN (bb_end);
   5949  1.1  mrg 
   5950  1.1  mrg   if (bb_head == bb_end)
   5951  1.1  mrg     return true;
   5952  1.1  mrg 
   5953  1.1  mrg   bb_next = NEXT_INSN (bb_head);
   5954  1.1  mrg   while (bb_next != bb_end && DEBUG_INSN_P (bb_next))
   5955  1.1  mrg     bb_next = NEXT_INSN (bb_next);
   5956  1.1  mrg 
   5957  1.1  mrg   if (bb_next == bb_end && JUMP_P (bb_end))
   5958  1.1  mrg     return true;
   5959  1.1  mrg 
   5960  1.1  mrg   in_next = NEXT_INSN (insn);
   5961  1.1  mrg   while (DEBUG_INSN_P (in_next))
   5962  1.1  mrg     in_next = NEXT_INSN (in_next);
   5963  1.1  mrg 
   5964  1.1  mrg   if (IN_CURRENT_FENCE_P (in_next))
   5965  1.1  mrg     return true;
   5966  1.1  mrg 
   5967  1.1  mrg   return false;
   5968  1.1  mrg }
   5969  1.1  mrg 
   5970  1.1  mrg /* Remove INSN from stream.  When ONLY_DISCONNECT is true, its data
   5971  1.1  mrg    is not removed but reused when INSN is re-emitted.  */
   5972  1.1  mrg static void
   5973  1.1  mrg remove_insn_from_stream (rtx_insn *insn, bool only_disconnect)
   5974  1.1  mrg {
   5975  1.1  mrg   /* If there's only one insn in the BB, make sure that a nop is
   5976  1.1  mrg      inserted into it, so the basic block won't disappear when we'll
   5977  1.1  mrg      delete INSN below with sel_remove_insn. It should also survive
   5978  1.1  mrg      till the return to fill_insns.  */
   5979  1.1  mrg   if (need_nop_to_preserve_insn_bb (insn))
   5980  1.1  mrg     {
   5981  1.1  mrg       insn_t nop = get_nop_from_pool (insn);
   5982  1.1  mrg       gcc_assert (INSN_NOP_P (nop));
   5983  1.1  mrg       vec_temp_moveop_nops.safe_push (nop);
   5984  1.1  mrg     }
   5985  1.1  mrg 
   5986  1.1  mrg   sel_remove_insn (insn, only_disconnect, false);
   5987  1.1  mrg }
   5988  1.1  mrg 
   5989  1.1  mrg /* This function is called when original expr is found.
   5990  1.1  mrg    INSN - current insn traversed, EXPR - the corresponding expr found.
   5991  1.1  mrg    LPARAMS is the local parameters of code modion driver, STATIC_PARAMS
   5992  1.1  mrg    is static parameters of move_op.  */
   5993  1.1  mrg static void
   5994  1.1  mrg move_op_orig_expr_found (insn_t insn, expr_t expr,
   5995  1.1  mrg                          cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
   5996  1.1  mrg                          void *static_params)
   5997  1.1  mrg {
   5998  1.1  mrg   bool only_disconnect;
   5999  1.1  mrg   moveop_static_params_p params = (moveop_static_params_p) static_params;
   6000  1.1  mrg 
   6001  1.1  mrg   copy_expr_onside (params->c_expr, INSN_EXPR (insn));
   6002  1.1  mrg   track_scheduled_insns_and_blocks (insn);
   6003  1.1  mrg   handle_emitting_transformations (insn, expr, params);
   6004  1.1  mrg   only_disconnect = params->uid == INSN_UID (insn);
   6005  1.1  mrg 
   6006  1.1  mrg   /* Mark that we've disconnected an insn.  */
   6007  1.1  mrg   if (only_disconnect)
   6008  1.1  mrg     params->uid = -1;
   6009  1.1  mrg   remove_insn_from_stream (insn, only_disconnect);
   6010  1.1  mrg }
   6011  1.1  mrg 
   6012  1.1  mrg /* The function is called when original expr is found.
   6013  1.1  mrg    INSN - current insn traversed, EXPR - the corresponding expr found,
   6014  1.1  mrg    crossed_call_abis and original_insns in STATIC_PARAMS are updated.  */
   6015  1.1  mrg static void
   6016  1.1  mrg fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
   6017  1.1  mrg                      cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
   6018  1.1  mrg                      void *static_params)
   6019  1.1  mrg {
   6020  1.1  mrg   fur_static_params_p params = (fur_static_params_p) static_params;
   6021  1.1  mrg   regset tmp;
   6022  1.1  mrg 
   6023  1.1  mrg   if (CALL_P (insn))
   6024  1.1  mrg     params->crossed_call_abis |= 1 << insn_callee_abi (insn).id ();
   6025  1.1  mrg 
   6026  1.1  mrg   def_list_add (params->original_insns, insn, params->crossed_call_abis);
   6027  1.1  mrg 
   6028  1.1  mrg   /* Mark the registers that do not meet the following condition:
   6029  1.1  mrg     (2) not among the live registers of the point
   6030  1.1  mrg 	immediately following the first original operation on
   6031  1.1  mrg 	a given downward path, except for the original target
   6032  1.1  mrg 	register of the operation.  */
   6033  1.1  mrg   tmp = get_clear_regset_from_pool ();
   6034  1.1  mrg   compute_live_below_insn (insn, tmp);
   6035  1.1  mrg   AND_COMPL_REG_SET (tmp, INSN_REG_SETS (insn));
   6036  1.1  mrg   AND_COMPL_REG_SET (tmp, INSN_REG_CLOBBERS (insn));
   6037  1.1  mrg   IOR_REG_SET (params->used_regs, tmp);
   6038  1.1  mrg   return_regset_to_pool (tmp);
   6039  1.1  mrg 
   6040  1.1  mrg   /* (*1) We need to add to USED_REGS registers that are read by
   6041  1.1  mrg      INSN's lhs. This may lead to choosing wrong src register.
   6042  1.1  mrg      E.g. (scheduling const expr enabled):
   6043  1.1  mrg 
   6044  1.1  mrg 	429: ax=0x0	<- Can't use AX for this expr (0x0)
   6045  1.1  mrg 	433: dx=[bp-0x18]
   6046  1.1  mrg 	427: [ax+dx+0x1]=ax
   6047  1.1  mrg 	  REG_DEAD: ax
   6048  1.1  mrg 	168: di=dx
   6049  1.1  mrg 	  REG_DEAD: dx
   6050  1.1  mrg      */
   6051  1.1  mrg   /* FIXME: see comment above and enable MEM_P
   6052  1.1  mrg      in vinsn_separable_p.  */
   6053  1.1  mrg   gcc_assert (!VINSN_SEPARABLE_P (INSN_VINSN (insn))
   6054  1.1  mrg 	      || !MEM_P (INSN_LHS (insn)));
   6055  1.1  mrg }
   6056  1.1  mrg 
   6057  1.1  mrg /* This function is called on the ascending pass, before returning from
   6058  1.1  mrg    current basic block.  */
   6059  1.1  mrg static void
   6060  1.1  mrg move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
   6061  1.1  mrg                        void *static_params)
   6062  1.1  mrg {
   6063  1.1  mrg   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
   6064  1.1  mrg   basic_block book_block = NULL;
   6065  1.1  mrg 
   6066  1.1  mrg   /* When we have removed the boundary insn for scheduling, which also
   6067  1.1  mrg      happened to be the end insn in its bb, we don't need to update sets.  */
   6068  1.1  mrg   if (!lparams->removed_last_insn
   6069  1.1  mrg       && lparams->e1
   6070  1.1  mrg       && sel_bb_head_p (insn))
   6071  1.1  mrg     {
   6072  1.1  mrg       /* We should generate bookkeeping code only if we are not at the
   6073  1.1  mrg          top level of the move_op.  */
   6074  1.1  mrg       if (sel_num_cfg_preds_gt_1 (insn))
   6075  1.1  mrg         book_block = generate_bookkeeping_insn (sparams->c_expr,
   6076  1.1  mrg                                                 lparams->e1, lparams->e2);
   6077  1.1  mrg       /* Update data sets for the current insn.  */
   6078  1.1  mrg       update_data_sets (insn);
   6079  1.1  mrg     }
   6080  1.1  mrg 
   6081  1.1  mrg   /* If bookkeeping code was inserted, we need to update av sets of basic
   6082  1.1  mrg      block that received bookkeeping.  After generation of bookkeeping insn,
   6083  1.1  mrg      bookkeeping block does not contain valid av set because we are not following
   6084  1.1  mrg      the original algorithm in every detail with regards to e.g. renaming
   6085  1.1  mrg      simple reg-reg copies.  Consider example:
   6086  1.1  mrg 
   6087  1.1  mrg      bookkeeping block           scheduling fence
   6088  1.1  mrg      \            /
   6089  1.1  mrg       \    join  /
   6090  1.1  mrg        ----------
   6091  1.1  mrg        |        |
   6092  1.1  mrg        ----------
   6093  1.1  mrg       /           \
   6094  1.1  mrg      /             \
   6095  1.1  mrg      r1 := r2          r1 := r3
   6096  1.1  mrg 
   6097  1.1  mrg      We try to schedule insn "r1 := r3" on the current
   6098  1.1  mrg      scheduling fence.  Also, note that av set of bookkeeping block
   6099  1.1  mrg      contain both insns "r1 := r2" and "r1 := r3".  When the insn has
   6100  1.1  mrg      been scheduled, the CFG is as follows:
   6101  1.1  mrg 
   6102  1.1  mrg      r1 := r3               r1 := r3
   6103  1.1  mrg      bookkeeping block           scheduling fence
   6104  1.1  mrg      \            /
   6105  1.1  mrg       \    join  /
   6106  1.1  mrg        ----------
   6107  1.1  mrg        |        |
   6108  1.1  mrg        ----------
   6109  1.1  mrg       /          \
   6110  1.1  mrg      /            \
   6111  1.1  mrg      r1 := r2
   6112  1.1  mrg 
   6113  1.1  mrg      Here, insn "r1 := r3" was scheduled at the current scheduling point
   6114  1.1  mrg      and bookkeeping code was generated at the bookeeping block.  This
   6115  1.1  mrg      way insn "r1 := r2" is no longer available as a whole instruction
   6116  1.1  mrg      (but only as expr) ahead of insn "r1 := r3" in bookkeeping block.
   6117  1.1  mrg      This situation is handled by calling update_data_sets.
   6118  1.1  mrg 
   6119  1.1  mrg      Since update_data_sets is called only on the bookkeeping block, and
   6120  1.1  mrg      it also may have predecessors with av_sets, containing instructions that
   6121  1.1  mrg      are no longer available, we save all such expressions that become
   6122  1.1  mrg      unavailable during data sets update on the bookkeeping block in
   6123  1.1  mrg      VEC_BOOKKEEPING_BLOCKED_VINSNS.  Later we avoid selecting such
   6124  1.1  mrg      expressions for scheduling.  This allows us to avoid recomputation of
   6125  1.1  mrg      av_sets outside the code motion path.  */
   6126  1.1  mrg 
   6127  1.1  mrg   if (book_block)
   6128  1.1  mrg     update_and_record_unavailable_insns (book_block);
   6129  1.1  mrg 
   6130  1.1  mrg   /* If INSN was previously marked for deletion, it's time to do it.  */
   6131  1.1  mrg   if (lparams->removed_last_insn)
   6132  1.1  mrg     insn = PREV_INSN (insn);
   6133  1.1  mrg 
   6134  1.1  mrg   /* Do not tidy control flow at the topmost moveop, as we can erroneously
   6135  1.1  mrg      kill a block with a single nop in which the insn should be emitted.  */
   6136  1.1  mrg   if (lparams->e1)
   6137  1.1  mrg     tidy_control_flow (BLOCK_FOR_INSN (insn), true);
   6138  1.1  mrg }
   6139  1.1  mrg 
   6140  1.1  mrg /* This function is called on the ascending pass, before returning from the
   6141  1.1  mrg    current basic block.  */
   6142  1.1  mrg static void
   6143  1.1  mrg fur_at_first_insn (insn_t insn,
   6144  1.1  mrg                    cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
   6145  1.1  mrg                    void *static_params ATTRIBUTE_UNUSED)
   6146  1.1  mrg {
   6147  1.1  mrg   gcc_assert (!sel_bb_head_p (insn) || AV_SET_VALID_P (insn)
   6148  1.1  mrg 	      || AV_LEVEL (insn) == -1);
   6149  1.1  mrg }
   6150  1.1  mrg 
   6151  1.1  mrg /* Called on the backward stage of recursion to call moveup_expr for insn
   6152  1.1  mrg    and sparams->c_expr.  */
   6153  1.1  mrg static void
   6154  1.1  mrg move_op_ascend (insn_t insn, void *static_params)
   6155  1.1  mrg {
   6156  1.1  mrg   enum MOVEUP_EXPR_CODE res;
   6157  1.1  mrg   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
   6158  1.1  mrg 
   6159  1.1  mrg   if (! INSN_NOP_P (insn))
   6160  1.1  mrg     {
   6161  1.1  mrg       res = moveup_expr_cached (sparams->c_expr, insn, false);
   6162  1.1  mrg       gcc_assert (res != MOVEUP_EXPR_NULL);
   6163  1.1  mrg     }
   6164  1.1  mrg 
   6165  1.1  mrg   /* Update liveness for this insn as it was invalidated.  */
   6166  1.1  mrg   update_liveness_on_insn (insn);
   6167  1.1  mrg }
   6168  1.1  mrg 
   6169  1.1  mrg /* This function is called on enter to the basic block.
   6170  1.1  mrg    Returns TRUE if this block already have been visited and
   6171  1.1  mrg    code_motion_path_driver should return 1, FALSE otherwise.  */
   6172  1.1  mrg static int
   6173  1.1  mrg fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
   6174  1.1  mrg 	      void *static_params, bool visited_p)
   6175  1.1  mrg {
   6176  1.1  mrg   fur_static_params_p sparams = (fur_static_params_p) static_params;
   6177  1.1  mrg 
   6178  1.1  mrg   if (visited_p)
   6179  1.1  mrg     {
   6180  1.1  mrg       /* If we have found something below this block, there should be at
   6181  1.1  mrg 	 least one insn in ORIGINAL_INSNS.  */
   6182  1.1  mrg       gcc_assert (*sparams->original_insns);
   6183  1.1  mrg 
   6184  1.1  mrg       /* Adjust CROSSED_CALL_ABIS, since we may have come to this block along
   6185  1.1  mrg 	 different path.  */
   6186  1.1  mrg       DEF_LIST_DEF (*sparams->original_insns)->crossed_call_abis
   6187  1.1  mrg 	|= sparams->crossed_call_abis;
   6188  1.1  mrg     }
   6189  1.1  mrg   else
   6190  1.1  mrg     local_params->old_original_insns = *sparams->original_insns;
   6191  1.1  mrg 
   6192  1.1  mrg   return 1;
   6193  1.1  mrg }
   6194  1.1  mrg 
   6195  1.1  mrg /* Same as above but for move_op.   */
   6196  1.1  mrg static int
   6197  1.1  mrg move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
   6198  1.1  mrg                   cmpd_local_params_p local_params ATTRIBUTE_UNUSED,
   6199  1.1  mrg                   void *static_params ATTRIBUTE_UNUSED, bool visited_p)
   6200  1.1  mrg {
   6201  1.1  mrg   if (visited_p)
   6202  1.1  mrg     return -1;
   6203  1.1  mrg   return 1;
   6204  1.1  mrg }
   6205  1.1  mrg 
   6206  1.1  mrg /* This function is called while descending current basic block if current
   6207  1.1  mrg    insn is not the original EXPR we're searching for.
   6208  1.1  mrg 
   6209  1.1  mrg    Return value: FALSE, if code_motion_path_driver should perform a local
   6210  1.1  mrg 			cleanup and return 0 itself;
   6211  1.1  mrg 		 TRUE, if code_motion_path_driver should continue.  */
   6212  1.1  mrg static bool
   6213  1.1  mrg move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED,
   6214  1.1  mrg 			    void *static_params)
   6215  1.1  mrg {
   6216  1.1  mrg   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
   6217  1.1  mrg 
   6218  1.1  mrg   sparams->failed_insn = insn;
   6219  1.1  mrg 
   6220  1.1  mrg   /* If we're scheduling separate expr, in order to generate correct code
   6221  1.1  mrg      we need to stop the search at bookkeeping code generated with the
   6222  1.1  mrg      same destination register or memory.  */
   6223  1.1  mrg   if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest))
   6224  1.1  mrg     return false;
   6225  1.1  mrg   return true;
   6226  1.1  mrg }
   6227  1.1  mrg 
   6228  1.1  mrg /* This function is called while descending current basic block if current
   6229  1.1  mrg    insn is not the original EXPR we're searching for.
   6230  1.1  mrg 
   6231  1.1  mrg    Return value: TRUE (code_motion_path_driver should continue).  */
   6232  1.1  mrg static bool
   6233  1.1  mrg fur_orig_expr_not_found (insn_t insn, av_set_t orig_ops, void *static_params)
   6234  1.1  mrg {
   6235  1.1  mrg   bool mutexed;
   6236  1.1  mrg   expr_t r;
   6237  1.1  mrg   av_set_iterator avi;
   6238  1.1  mrg   fur_static_params_p sparams = (fur_static_params_p) static_params;
   6239  1.1  mrg 
   6240  1.1  mrg   if (CALL_P (insn))
   6241  1.1  mrg     sparams->crossed_call_abis |= 1 << insn_callee_abi (insn).id ();
   6242  1.1  mrg   else if (DEBUG_INSN_P (insn))
   6243  1.1  mrg     return true;
   6244  1.1  mrg 
   6245  1.1  mrg   /* If current insn we are looking at cannot be executed together
   6246  1.1  mrg      with original insn, then we can skip it safely.
   6247  1.1  mrg 
   6248  1.1  mrg      Example: ORIG_OPS = { (p6) r14 = sign_extend (r15); }
   6249  1.1  mrg 	      INSN = (!p6) r14 = r14 + 1;
   6250  1.1  mrg 
   6251  1.1  mrg      Here we can schedule ORIG_OP with lhs = r14, though only
   6252  1.1  mrg      looking at the set of used and set registers of INSN we must
   6253  1.1  mrg      forbid it.  So, add set/used in INSN registers to the
   6254  1.1  mrg      untouchable set only if there is an insn in ORIG_OPS that can
   6255  1.1  mrg      affect INSN.  */
   6256  1.1  mrg   mutexed = true;
   6257  1.1  mrg   FOR_EACH_EXPR (r, avi, orig_ops)
   6258  1.1  mrg     if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (r)))
   6259  1.1  mrg       {
   6260  1.1  mrg 	mutexed = false;
   6261  1.1  mrg 	break;
   6262  1.1  mrg       }
   6263  1.1  mrg 
   6264  1.1  mrg   /* Mark all registers that do not meet the following condition:
   6265  1.1  mrg      (1) Not set or read on any path from xi to an instance of the
   6266  1.1  mrg 	 original operation.  */
   6267  1.1  mrg   if (!mutexed)
   6268  1.1  mrg     {
   6269  1.1  mrg       IOR_REG_SET (sparams->used_regs, INSN_REG_SETS (insn));
   6270  1.1  mrg       IOR_REG_SET (sparams->used_regs, INSN_REG_USES (insn));
   6271  1.1  mrg       IOR_REG_SET (sparams->used_regs, INSN_REG_CLOBBERS (insn));
   6272  1.1  mrg     }
   6273  1.1  mrg 
   6274  1.1  mrg   return true;
   6275  1.1  mrg }
   6276  1.1  mrg 
   6277  1.1  mrg /* Hooks and data to perform move_op operations with code_motion_path_driver.  */
   6278  1.1  mrg struct code_motion_path_driver_info_def move_op_hooks = {
   6279  1.1  mrg   move_op_on_enter,
   6280  1.1  mrg   move_op_orig_expr_found,
   6281  1.1  mrg   move_op_orig_expr_not_found,
   6282  1.1  mrg   move_op_merge_succs,
   6283  1.1  mrg   move_op_after_merge_succs,
   6284  1.1  mrg   move_op_ascend,
   6285  1.1  mrg   move_op_at_first_insn,
   6286  1.1  mrg   SUCCS_NORMAL,
   6287  1.1  mrg   "move_op"
   6288  1.1  mrg };
   6289  1.1  mrg 
   6290  1.1  mrg /* Hooks and data to perform find_used_regs operations
   6291  1.1  mrg    with code_motion_path_driver.  */
   6292  1.1  mrg struct code_motion_path_driver_info_def fur_hooks = {
   6293  1.1  mrg   fur_on_enter,
   6294  1.1  mrg   fur_orig_expr_found,
   6295  1.1  mrg   fur_orig_expr_not_found,
   6296  1.1  mrg   fur_merge_succs,
   6297  1.1  mrg   NULL, /* fur_after_merge_succs */
   6298  1.1  mrg   NULL, /* fur_ascend */
   6299  1.1  mrg   fur_at_first_insn,
   6300  1.1  mrg   SUCCS_ALL,
   6301  1.1  mrg   "find_used_regs"
   6302  1.1  mrg };
   6303  1.1  mrg 
   6304  1.1  mrg /* Traverse all successors of INSN.  For each successor that is SUCCS_NORMAL
   6305  1.1  mrg    code_motion_path_driver is called recursively.  Original operation
   6306  1.1  mrg    was found at least on one path that is starting with one of INSN's
   6307  1.1  mrg    successors (this fact is asserted).  ORIG_OPS is expressions we're looking
   6308  1.1  mrg    for, PATH is the path we've traversed, STATIC_PARAMS is the parameters
   6309  1.1  mrg    of either move_op or find_used_regs depending on the caller.
   6310  1.1  mrg 
   6311  1.1  mrg    Return 0 if we haven't found expression, 1 if we found it, -1 if we don't
   6312  1.1  mrg    know for sure at this point.  */
   6313  1.1  mrg static int
   6314  1.1  mrg code_motion_process_successors (insn_t insn, av_set_t orig_ops,
   6315  1.1  mrg                                 ilist_t path, void *static_params)
   6316  1.1  mrg {
   6317  1.1  mrg   int res = 0;
   6318  1.1  mrg   succ_iterator succ_i;
   6319  1.1  mrg   insn_t succ;
   6320  1.1  mrg   basic_block bb;
   6321  1.1  mrg   int old_index;
   6322  1.1  mrg   unsigned old_succs;
   6323  1.1  mrg 
   6324  1.1  mrg   struct cmpd_local_params lparams;
   6325  1.1  mrg   expr_def _x;
   6326  1.1  mrg 
   6327  1.1  mrg   lparams.c_expr_local = &_x;
   6328  1.1  mrg   lparams.c_expr_merged = NULL;
   6329  1.1  mrg 
   6330  1.1  mrg   /* We need to process only NORMAL succs for move_op, and collect live
   6331  1.1  mrg      registers from ALL branches (including those leading out of the
   6332  1.1  mrg      region) for find_used_regs.
   6333  1.1  mrg 
   6334  1.1  mrg      In move_op, there can be a case when insn's bb number has changed
   6335  1.1  mrg      due to created bookkeeping.  This happens very rare, as we need to
   6336  1.1  mrg      move expression from the beginning to the end of the same block.
   6337  1.1  mrg      Rescan successors in this case.  */
   6338  1.1  mrg 
   6339  1.1  mrg  rescan:
   6340  1.1  mrg   bb = BLOCK_FOR_INSN (insn);
   6341  1.1  mrg   old_index = bb->index;
   6342  1.1  mrg   old_succs = EDGE_COUNT (bb->succs);
   6343  1.1  mrg 
   6344  1.1  mrg   FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
   6345  1.1  mrg     {
   6346  1.1  mrg       int b;
   6347  1.1  mrg 
   6348  1.1  mrg       lparams.e1 = succ_i.e1;
   6349  1.1  mrg       lparams.e2 = succ_i.e2;
   6350  1.1  mrg 
   6351  1.1  mrg       /* Go deep into recursion only for NORMAL edges (non-backedges within the
   6352  1.1  mrg 	 current region).  */
   6353  1.1  mrg       if (succ_i.current_flags == SUCCS_NORMAL)
   6354  1.1  mrg 	b = code_motion_path_driver (succ, orig_ops, path, &lparams,
   6355  1.1  mrg 				     static_params);
   6356  1.1  mrg       else
   6357  1.1  mrg 	b = 0;
   6358  1.1  mrg 
   6359  1.1  mrg       /* Merge c_expres found or unify live register sets from different
   6360  1.1  mrg 	 successors.  */
   6361  1.1  mrg       code_motion_path_driver_info->merge_succs (insn, succ, b, &lparams,
   6362  1.1  mrg 						 static_params);
   6363  1.1  mrg       if (b == 1)
   6364  1.1  mrg         res = b;
   6365  1.1  mrg       else if (b == -1 && res != 1)
   6366  1.1  mrg         res = b;
   6367  1.1  mrg 
   6368  1.1  mrg       /* We have simplified the control flow below this point.  In this case,
   6369  1.1  mrg          the iterator becomes invalid.  We need to try again.
   6370  1.1  mrg 	 If we have removed the insn itself, it could be only an
   6371  1.1  mrg 	 unconditional jump.  Thus, do not rescan but break immediately --
   6372  1.1  mrg 	 we have already visited the only successor block.  */
   6373  1.1  mrg       if (!BLOCK_FOR_INSN (insn))
   6374  1.1  mrg 	{
   6375  1.1  mrg 	  if (sched_verbose >= 6)
   6376  1.1  mrg 	    sel_print ("Not doing rescan: already visited the only successor"
   6377  1.1  mrg 		       " of block %d\n", old_index);
   6378  1.1  mrg 	  break;
   6379  1.1  mrg 	}
   6380  1.1  mrg       if (BLOCK_FOR_INSN (insn)->index != old_index
   6381  1.1  mrg           || EDGE_COUNT (bb->succs) != old_succs)
   6382  1.1  mrg         {
   6383  1.1  mrg 	  if (sched_verbose >= 6)
   6384  1.1  mrg 	    sel_print ("Rescan: CFG was simplified below insn %d, block %d\n",
   6385  1.1  mrg 		       INSN_UID (insn), BLOCK_FOR_INSN (insn)->index);
   6386  1.1  mrg           insn = sel_bb_end (BLOCK_FOR_INSN (insn));
   6387  1.1  mrg           goto rescan;
   6388  1.1  mrg         }
   6389  1.1  mrg     }
   6390  1.1  mrg 
   6391  1.1  mrg   /* Here, RES==1 if original expr was found at least for one of the
   6392  1.1  mrg      successors.  After the loop, RES may happen to have zero value
   6393  1.1  mrg      only if at some point the expr searched is present in av_set, but is
   6394  1.1  mrg      not found below.  In most cases, this situation is an error.
   6395  1.1  mrg      The exception is when the original operation is blocked by
   6396  1.1  mrg      bookkeeping generated for another fence or for another path in current
   6397  1.1  mrg      move_op.  */
   6398  1.1  mrg   gcc_checking_assert (res == 1
   6399  1.1  mrg 		       || (res == 0
   6400  1.1  mrg 			    && av_set_could_be_blocked_by_bookkeeping_p (orig_ops, static_params))
   6401  1.1  mrg 		       || res == -1);
   6402  1.1  mrg 
   6403  1.1  mrg   /* Merge data, clean up, etc.  */
   6404  1.1  mrg   if (res != -1 && code_motion_path_driver_info->after_merge_succs)
   6405  1.1  mrg     code_motion_path_driver_info->after_merge_succs (&lparams, static_params);
   6406  1.1  mrg 
   6407  1.1  mrg   return res;
   6408  1.1  mrg }
   6409  1.1  mrg 
   6410  1.1  mrg 
   6411  1.1  mrg /* Perform a cleanup when the driver is about to terminate.  ORIG_OPS_P
   6412  1.1  mrg    is the pointer to the av set with expressions we were looking for,
   6413  1.1  mrg    PATH_P is the pointer to the traversed path.  */
   6414  1.1  mrg static inline void
   6415  1.1  mrg code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
   6416  1.1  mrg {
   6417  1.1  mrg   ilist_remove (path_p);
   6418  1.1  mrg   av_set_clear (orig_ops_p);
   6419  1.1  mrg }
   6420  1.1  mrg 
   6421  1.1  mrg /* The driver function that implements move_op or find_used_regs
   6422  1.1  mrg    functionality dependent whether code_motion_path_driver_INFO is set to
   6423  1.1  mrg    &MOVE_OP_HOOKS or &FUR_HOOKS.  This function implements the common parts
   6424  1.1  mrg    of code (CFG traversal etc) that are shared among both functions.  INSN
   6425  1.1  mrg    is the insn we're starting the search from, ORIG_OPS are the expressions
   6426  1.1  mrg    we're searching for, PATH is traversed path, LOCAL_PARAMS_IN are local
   6427  1.1  mrg    parameters of the driver, and STATIC_PARAMS are static parameters of
   6428  1.1  mrg    the caller.
   6429  1.1  mrg 
   6430  1.1  mrg    Returns whether original instructions were found.  Note that top-level
   6431  1.1  mrg    code_motion_path_driver always returns true.  */
   6432  1.1  mrg static int
   6433  1.1  mrg code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
   6434  1.1  mrg 			 cmpd_local_params_p local_params_in,
   6435  1.1  mrg 			 void *static_params)
   6436  1.1  mrg {
   6437  1.1  mrg   expr_t expr = NULL;
   6438  1.1  mrg   basic_block bb = BLOCK_FOR_INSN (insn);
   6439  1.1  mrg   insn_t first_insn, original_insn, bb_tail, before_first;
   6440  1.1  mrg   bool removed_last_insn = false;
   6441  1.1  mrg 
   6442  1.1  mrg   if (sched_verbose >= 6)
   6443  1.1  mrg     {
   6444  1.1  mrg       sel_print ("%s (", code_motion_path_driver_info->routine_name);
   6445  1.1  mrg       dump_insn (insn);
   6446  1.1  mrg       sel_print (",");
   6447  1.1  mrg       dump_av_set (orig_ops);
   6448  1.1  mrg       sel_print (")\n");
   6449  1.1  mrg     }
   6450  1.1  mrg 
   6451  1.1  mrg   gcc_assert (orig_ops);
   6452  1.1  mrg 
   6453  1.1  mrg   /* If no original operations exist below this insn, return immediately.  */
   6454  1.1  mrg   if (is_ineligible_successor (insn, path))
   6455  1.1  mrg     {
   6456  1.1  mrg       if (sched_verbose >= 6)
   6457  1.1  mrg         sel_print ("Insn %d is ineligible successor\n", INSN_UID (insn));
   6458  1.1  mrg       return false;
   6459  1.1  mrg     }
   6460  1.1  mrg 
   6461  1.1  mrg   /* The block can have invalid av set, in which case it was created earlier
   6462  1.1  mrg      during move_op.  Return immediately.  */
   6463  1.1  mrg   if (sel_bb_head_p (insn))
   6464  1.1  mrg     {
   6465  1.1  mrg       if (! AV_SET_VALID_P (insn))
   6466  1.1  mrg         {
   6467  1.1  mrg           if (sched_verbose >= 6)
   6468  1.1  mrg             sel_print ("Returned from block %d as it had invalid av set\n",
   6469  1.1  mrg                        bb->index);
   6470  1.1  mrg           return false;
   6471  1.1  mrg         }
   6472  1.1  mrg 
   6473  1.1  mrg       if (bitmap_bit_p (code_motion_visited_blocks, bb->index))
   6474  1.1  mrg         {
   6475  1.1  mrg           /* We have already found an original operation on this branch, do not
   6476  1.1  mrg              go any further and just return TRUE here.  If we don't stop here,
   6477  1.1  mrg              function can have exponential behavior even on the small code
   6478  1.1  mrg              with many different paths (e.g. with data speculation and
   6479  1.1  mrg              recovery blocks).  */
   6480  1.1  mrg           if (sched_verbose >= 6)
   6481  1.1  mrg             sel_print ("Block %d already visited in this traversal\n", bb->index);
   6482  1.1  mrg           if (code_motion_path_driver_info->on_enter)
   6483  1.1  mrg             return code_motion_path_driver_info->on_enter (insn,
   6484  1.1  mrg                                                            local_params_in,
   6485  1.1  mrg                                                            static_params,
   6486  1.1  mrg                                                            true);
   6487  1.1  mrg         }
   6488  1.1  mrg     }
   6489  1.1  mrg 
   6490  1.1  mrg   if (code_motion_path_driver_info->on_enter)
   6491  1.1  mrg     code_motion_path_driver_info->on_enter (insn, local_params_in,
   6492  1.1  mrg                                             static_params, false);
   6493  1.1  mrg   orig_ops = av_set_copy (orig_ops);
   6494  1.1  mrg 
   6495  1.1  mrg   /* Filter the orig_ops set.  */
   6496  1.1  mrg   if (AV_SET_VALID_P (insn))
   6497  1.1  mrg     av_set_code_motion_filter (&orig_ops, AV_SET (insn));
   6498  1.1  mrg 
   6499  1.1  mrg   /* If no more original ops, return immediately.  */
   6500  1.1  mrg   if (!orig_ops)
   6501  1.1  mrg     {
   6502  1.1  mrg       if (sched_verbose >= 6)
   6503  1.1  mrg         sel_print ("No intersection with av set of block %d\n", bb->index);
   6504  1.1  mrg       return false;
   6505  1.1  mrg     }
   6506  1.1  mrg 
   6507  1.1  mrg   /* For non-speculative insns we have to leave only one form of the
   6508  1.1  mrg      original operation, because if we don't, we may end up with
   6509  1.1  mrg      different C_EXPRes and, consequently, with bookkeepings for different
   6510  1.1  mrg      expression forms along the same code motion path.  That may lead to
   6511  1.1  mrg      generation of incorrect code.  So for each code motion we stick to
   6512  1.1  mrg      the single form of the instruction,  except for speculative insns
   6513  1.1  mrg      which we need to keep in different forms with all speculation
   6514  1.1  mrg      types.  */
   6515  1.1  mrg   av_set_leave_one_nonspec (&orig_ops);
   6516  1.1  mrg 
   6517  1.1  mrg   /* It is not possible that all ORIG_OPS are filtered out.  */
   6518  1.1  mrg   gcc_assert (orig_ops);
   6519  1.1  mrg 
   6520  1.1  mrg   /* It is enough to place only heads and tails of visited basic blocks into
   6521  1.1  mrg      the PATH.  */
   6522  1.1  mrg   ilist_add (&path, insn);
   6523  1.1  mrg   first_insn = original_insn = insn;
   6524  1.1  mrg   bb_tail = sel_bb_end (bb);
   6525  1.1  mrg 
   6526  1.1  mrg   /* Descend the basic block in search of the original expr; this part
   6527  1.1  mrg      corresponds to the part of the original move_op procedure executed
   6528  1.1  mrg      before the recursive call.  */
   6529  1.1  mrg   for (;;)
   6530  1.1  mrg     {
   6531  1.1  mrg       /* Look at the insn and decide if it could be an ancestor of currently
   6532  1.1  mrg 	 scheduling operation.  If it is so, then the insn "dest = op" could
   6533  1.1  mrg 	 either be replaced with "dest = reg", because REG now holds the result
   6534  1.1  mrg 	 of OP, or just removed, if we've scheduled the insn as a whole.
   6535  1.1  mrg 
   6536  1.1  mrg 	 If this insn doesn't contain currently scheduling OP, then proceed
   6537  1.1  mrg 	 with searching and look at its successors.  Operations we're searching
   6538  1.1  mrg 	 for could have changed when moving up through this insn via
   6539  1.1  mrg 	 substituting.  In this case, perform unsubstitution on them first.
   6540  1.1  mrg 
   6541  1.1  mrg 	 When traversing the DAG below this insn is finished, insert
   6542  1.1  mrg 	 bookkeeping code, if the insn is a joint point, and remove
   6543  1.1  mrg 	 leftovers.  */
   6544  1.1  mrg 
   6545  1.1  mrg       expr = av_set_lookup (orig_ops, INSN_VINSN (insn));
   6546  1.1  mrg       if (expr)
   6547  1.1  mrg 	{
   6548  1.1  mrg 	  insn_t last_insn = PREV_INSN (insn);
   6549  1.1  mrg 
   6550  1.1  mrg 	  /* We have found the original operation.   */
   6551  1.1  mrg           if (sched_verbose >= 6)
   6552  1.1  mrg             sel_print ("Found original operation at insn %d\n", INSN_UID (insn));
   6553  1.1  mrg 
   6554  1.1  mrg 	  code_motion_path_driver_info->orig_expr_found
   6555  1.1  mrg             (insn, expr, local_params_in, static_params);
   6556  1.1  mrg 
   6557  1.1  mrg 	  /* Step back, so on the way back we'll start traversing from the
   6558  1.1  mrg 	     previous insn (or we'll see that it's bb_note and skip that
   6559  1.1  mrg 	     loop).  */
   6560  1.1  mrg           if (insn == first_insn)
   6561  1.1  mrg             {
   6562  1.1  mrg               first_insn = NEXT_INSN (last_insn);
   6563  1.1  mrg               removed_last_insn = sel_bb_end_p (last_insn);
   6564  1.1  mrg             }
   6565  1.1  mrg 	  insn = last_insn;
   6566  1.1  mrg 	  break;
   6567  1.1  mrg 	}
   6568  1.1  mrg       else
   6569  1.1  mrg 	{
   6570  1.1  mrg 	  /* We haven't found the original expr, continue descending the basic
   6571  1.1  mrg 	     block.  */
   6572  1.1  mrg 	  if (code_motion_path_driver_info->orig_expr_not_found
   6573  1.1  mrg               (insn, orig_ops, static_params))
   6574  1.1  mrg 	    {
   6575  1.1  mrg 	      /* Av set ops could have been changed when moving through this
   6576  1.1  mrg 	         insn.  To find them below it, we have to un-substitute them.  */
   6577  1.1  mrg 	      undo_transformations (&orig_ops, insn);
   6578  1.1  mrg 	    }
   6579  1.1  mrg 	  else
   6580  1.1  mrg 	    {
   6581  1.1  mrg 	      /* Clean up and return, if the hook tells us to do so.  It may
   6582  1.1  mrg 		 happen if we've encountered the previously created
   6583  1.1  mrg 		 bookkeeping.  */
   6584  1.1  mrg 	      code_motion_path_driver_cleanup (&orig_ops, &path);
   6585  1.1  mrg 	      return -1;
   6586  1.1  mrg 	    }
   6587  1.1  mrg 
   6588  1.1  mrg 	  gcc_assert (orig_ops);
   6589  1.1  mrg         }
   6590  1.1  mrg 
   6591  1.1  mrg       /* Stop at insn if we got to the end of BB.  */
   6592  1.1  mrg       if (insn == bb_tail)
   6593  1.1  mrg 	break;
   6594  1.1  mrg 
   6595  1.1  mrg       insn = NEXT_INSN (insn);
   6596  1.1  mrg     }
   6597  1.1  mrg 
   6598  1.1  mrg   /* Here INSN either points to the insn before the original insn (may be
   6599  1.1  mrg      bb_note, if original insn was a bb_head) or to the bb_end.  */
   6600  1.1  mrg   if (!expr)
   6601  1.1  mrg     {
   6602  1.1  mrg       int res;
   6603  1.1  mrg       rtx_insn *last_insn = PREV_INSN (insn);
   6604  1.1  mrg       bool added_to_path;
   6605  1.1  mrg 
   6606  1.1  mrg       gcc_assert (insn == sel_bb_end (bb));
   6607  1.1  mrg 
   6608  1.1  mrg       /* Add bb tail to PATH (but it doesn't make any sense if it's a bb_head -
   6609  1.1  mrg 	 it's already in PATH then).  */
   6610  1.1  mrg       if (insn != first_insn)
   6611  1.1  mrg 	{
   6612  1.1  mrg 	  ilist_add (&path, insn);
   6613  1.1  mrg 	  added_to_path = true;
   6614  1.1  mrg 	}
   6615  1.1  mrg       else
   6616  1.1  mrg         added_to_path = false;
   6617  1.1  mrg 
   6618  1.1  mrg       /* Process_successors should be able to find at least one
   6619  1.1  mrg 	 successor for which code_motion_path_driver returns TRUE.  */
   6620  1.1  mrg       res = code_motion_process_successors (insn, orig_ops,
   6621  1.1  mrg                                             path, static_params);
   6622  1.1  mrg 
   6623  1.1  mrg       /* Jump in the end of basic block could have been removed or replaced
   6624  1.1  mrg          during code_motion_process_successors, so recompute insn as the
   6625  1.1  mrg          last insn in bb.  */
   6626  1.1  mrg       if (NEXT_INSN (last_insn) != insn)
   6627  1.1  mrg         {
   6628  1.1  mrg           insn = sel_bb_end (bb);
   6629  1.1  mrg           first_insn = sel_bb_head (bb);
   6630  1.1  mrg 	  if (first_insn != original_insn)
   6631  1.1  mrg 	    first_insn = original_insn;
   6632  1.1  mrg         }
   6633  1.1  mrg 
   6634  1.1  mrg       /* Remove bb tail from path.  */
   6635  1.1  mrg       if (added_to_path)
   6636  1.1  mrg 	ilist_remove (&path);
   6637  1.1  mrg 
   6638  1.1  mrg       if (res != 1)
   6639  1.1  mrg 	{
   6640  1.1  mrg 	  /* This is the case when one of the original expr is no longer available
   6641  1.1  mrg 	     due to bookkeeping created on this branch with the same register.
   6642  1.1  mrg 	     In the original algorithm, which doesn't have update_data_sets call
   6643  1.1  mrg 	     on a bookkeeping block, it would simply result in returning
   6644  1.1  mrg 	     FALSE when we've encountered a previously generated bookkeeping
   6645  1.1  mrg 	     insn in moveop_orig_expr_not_found.  */
   6646  1.1  mrg 	  code_motion_path_driver_cleanup (&orig_ops, &path);
   6647  1.1  mrg 	  return res;
   6648  1.1  mrg 	}
   6649  1.1  mrg     }
   6650  1.1  mrg 
   6651  1.1  mrg   /* Don't need it any more.  */
   6652  1.1  mrg   av_set_clear (&orig_ops);
   6653  1.1  mrg 
   6654  1.1  mrg   /* Backward pass: now, when we have C_EXPR computed, we'll drag it to
   6655  1.1  mrg      the beginning of the basic block.  */
   6656  1.1  mrg   before_first = PREV_INSN (first_insn);
   6657  1.1  mrg   while (insn != before_first)
   6658  1.1  mrg     {
   6659  1.1  mrg       if (code_motion_path_driver_info->ascend)
   6660  1.1  mrg 	code_motion_path_driver_info->ascend (insn, static_params);
   6661  1.1  mrg 
   6662  1.1  mrg       insn = PREV_INSN (insn);
   6663  1.1  mrg     }
   6664  1.1  mrg 
   6665  1.1  mrg   /* Now we're at the bb head.  */
   6666  1.1  mrg   insn = first_insn;
   6667  1.1  mrg   ilist_remove (&path);
   6668  1.1  mrg   local_params_in->removed_last_insn = removed_last_insn;
   6669  1.1  mrg   code_motion_path_driver_info->at_first_insn (insn, local_params_in, static_params);
   6670  1.1  mrg 
   6671  1.1  mrg   /* This should be the very last operation as at bb head we could change
   6672  1.1  mrg      the numbering by creating bookkeeping blocks.  */
   6673  1.1  mrg   if (removed_last_insn)
   6674  1.1  mrg     insn = PREV_INSN (insn);
   6675  1.1  mrg 
   6676  1.1  mrg   /* If we have simplified the control flow and removed the first jump insn,
   6677  1.1  mrg      there's no point in marking this block in the visited blocks bitmap.  */
   6678  1.1  mrg   if (BLOCK_FOR_INSN (insn))
   6679  1.1  mrg     bitmap_set_bit (code_motion_visited_blocks, BLOCK_FOR_INSN (insn)->index);
   6680  1.1  mrg   return true;
   6681  1.1  mrg }
   6682  1.1  mrg 
   6683  1.1  mrg /* Move up the operations from ORIG_OPS set traversing the dag starting
   6684  1.1  mrg    from INSN.  PATH represents the edges traversed so far.
   6685  1.1  mrg    DEST is the register chosen for scheduling the current expr.  Insert
   6686  1.1  mrg    bookkeeping code in the join points.  EXPR_VLIW is the chosen expression,
   6687  1.1  mrg    C_EXPR is how it looks like at the given cfg point.
   6688  1.1  mrg    Set *SHOULD_MOVE to indicate whether we have only disconnected
   6689  1.1  mrg    one of the insns found.
   6690  1.1  mrg 
   6691  1.1  mrg    Returns whether original instructions were found, which is asserted
   6692  1.1  mrg    to be true in the caller.  */
   6693  1.1  mrg static bool
   6694  1.1  mrg move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
   6695  1.1  mrg          rtx dest, expr_t c_expr, bool *should_move)
   6696  1.1  mrg {
   6697  1.1  mrg   struct moveop_static_params sparams;
   6698  1.1  mrg   struct cmpd_local_params lparams;
   6699  1.1  mrg   int res;
   6700  1.1  mrg 
   6701  1.1  mrg   /* Init params for code_motion_path_driver.  */
   6702  1.1  mrg   sparams.dest = dest;
   6703  1.1  mrg   sparams.c_expr = c_expr;
   6704  1.1  mrg   sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
   6705  1.1  mrg   sparams.failed_insn = NULL;
   6706  1.1  mrg   sparams.was_renamed = false;
   6707  1.1  mrg   lparams.e1 = NULL;
   6708  1.1  mrg 
   6709  1.1  mrg   /* We haven't visited any blocks yet.  */
   6710  1.1  mrg   bitmap_clear (code_motion_visited_blocks);
   6711  1.1  mrg 
   6712  1.1  mrg   /* Set appropriate hooks and data.  */
   6713  1.1  mrg   code_motion_path_driver_info = &move_op_hooks;
   6714  1.1  mrg   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
   6715  1.1  mrg 
   6716  1.1  mrg   gcc_assert (res != -1);
   6717  1.1  mrg 
   6718  1.1  mrg   if (sparams.was_renamed)
   6719  1.1  mrg     EXPR_WAS_RENAMED (expr_vliw) = true;
   6720  1.1  mrg 
   6721  1.1  mrg   *should_move = (sparams.uid == -1);
   6722  1.1  mrg 
   6723  1.1  mrg   return res;
   6724  1.1  mrg }
   6725  1.1  mrg 
   6726  1.1  mrg 
   6728  1.1  mrg /* Functions that work with regions.  */
   6729  1.1  mrg 
   6730  1.1  mrg /* Current number of seqno used in init_seqno and init_seqno_1.  */
   6731  1.1  mrg static int cur_seqno;
   6732  1.1  mrg 
   6733  1.1  mrg /* A helper for init_seqno.  Traverse the region starting from BB and
   6734  1.1  mrg    compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
   6735  1.1  mrg    Clear visited blocks from BLOCKS_TO_RESCHEDULE.  */
   6736  1.1  mrg static void
   6737  1.1  mrg init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
   6738  1.1  mrg {
   6739  1.1  mrg   int bbi = BLOCK_TO_BB (bb->index);
   6740  1.1  mrg   insn_t insn;
   6741  1.1  mrg   insn_t succ_insn;
   6742  1.1  mrg   succ_iterator si;
   6743  1.1  mrg 
   6744  1.1  mrg   rtx_note *note = bb_note (bb);
   6745  1.1  mrg   bitmap_set_bit (visited_bbs, bbi);
   6746  1.1  mrg   if (blocks_to_reschedule)
   6747  1.1  mrg     bitmap_clear_bit (blocks_to_reschedule, bb->index);
   6748  1.1  mrg 
   6749  1.1  mrg   FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb),
   6750  1.1  mrg 		   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
   6751  1.1  mrg     {
   6752  1.1  mrg       basic_block succ = BLOCK_FOR_INSN (succ_insn);
   6753  1.1  mrg       int succ_bbi = BLOCK_TO_BB (succ->index);
   6754  1.1  mrg 
   6755  1.1  mrg       gcc_assert (in_current_region_p (succ));
   6756  1.1  mrg 
   6757  1.1  mrg       if (!bitmap_bit_p (visited_bbs, succ_bbi))
   6758  1.1  mrg 	{
   6759  1.1  mrg 	  gcc_assert (succ_bbi > bbi);
   6760  1.1  mrg 
   6761  1.1  mrg 	  init_seqno_1 (succ, visited_bbs, blocks_to_reschedule);
   6762  1.1  mrg 	}
   6763  1.1  mrg       else if (blocks_to_reschedule)
   6764  1.1  mrg         bitmap_set_bit (forced_ebb_heads, succ->index);
   6765  1.1  mrg     }
   6766  1.1  mrg 
   6767  1.1  mrg   for (insn = BB_END (bb); insn != note; insn = PREV_INSN (insn))
   6768  1.1  mrg     INSN_SEQNO (insn) = cur_seqno--;
   6769  1.1  mrg }
   6770  1.1  mrg 
   6771  1.1  mrg /* Initialize seqnos for the current region.  BLOCKS_TO_RESCHEDULE contains
   6772  1.1  mrg    blocks on which we're rescheduling when pipelining, FROM is the block where
   6773  1.1  mrg    traversing region begins (it may not be the head of the region when
   6774  1.1  mrg    pipelining, but the head of the loop instead).
   6775  1.1  mrg 
   6776  1.1  mrg    Returns the maximal seqno found.  */
   6777  1.1  mrg static int
   6778  1.1  mrg init_seqno (bitmap blocks_to_reschedule, basic_block from)
   6779  1.1  mrg {
   6780  1.1  mrg   bitmap_iterator bi;
   6781  1.1  mrg   unsigned bbi;
   6782  1.1  mrg 
   6783  1.1  mrg   auto_sbitmap visited_bbs (current_nr_blocks);
   6784  1.1  mrg 
   6785  1.1  mrg   if (blocks_to_reschedule)
   6786  1.1  mrg     {
   6787  1.1  mrg       bitmap_ones (visited_bbs);
   6788  1.1  mrg       EXECUTE_IF_SET_IN_BITMAP (blocks_to_reschedule, 0, bbi, bi)
   6789  1.1  mrg         {
   6790  1.1  mrg 	  gcc_assert (BLOCK_TO_BB (bbi) < current_nr_blocks);
   6791  1.1  mrg           bitmap_clear_bit (visited_bbs, BLOCK_TO_BB (bbi));
   6792  1.1  mrg 	}
   6793  1.1  mrg     }
   6794  1.1  mrg   else
   6795  1.1  mrg     {
   6796  1.1  mrg       bitmap_clear (visited_bbs);
   6797  1.1  mrg       from = EBB_FIRST_BB (0);
   6798  1.1  mrg     }
   6799  1.1  mrg 
   6800  1.1  mrg   cur_seqno = sched_max_luid - 1;
   6801  1.1  mrg   init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
   6802  1.1  mrg 
   6803  1.1  mrg   /* cur_seqno may be positive if the number of instructions is less than
   6804  1.1  mrg      sched_max_luid - 1 (when rescheduling or if some instructions have been
   6805  1.1  mrg      removed by the call to purge_empty_blocks in sel_sched_region_1).  */
   6806  1.1  mrg   gcc_assert (cur_seqno >= 0);
   6807  1.1  mrg 
   6808  1.1  mrg   return sched_max_luid - 1;
   6809  1.1  mrg }
   6810  1.1  mrg 
   6811  1.1  mrg /* Initialize scheduling parameters for current region.  */
   6812  1.1  mrg static void
   6813  1.1  mrg sel_setup_region_sched_flags (void)
   6814  1.1  mrg {
   6815  1.1  mrg   enable_schedule_as_rhs_p = 1;
   6816  1.1  mrg   bookkeeping_p = 1;
   6817  1.1  mrg   pipelining_p = (bookkeeping_p
   6818  1.1  mrg                   && (flag_sel_sched_pipelining != 0)
   6819  1.1  mrg 		  && current_loop_nest != NULL
   6820  1.1  mrg 		  && loop_has_exit_edges (current_loop_nest));
   6821  1.1  mrg   max_insns_to_rename = param_selsched_insns_to_rename;
   6822  1.1  mrg   max_ws = MAX_WS;
   6823  1.1  mrg }
   6824  1.1  mrg 
   6825  1.1  mrg /* Return true if all basic blocks of current region are empty.  */
   6826  1.1  mrg static bool
   6827  1.1  mrg current_region_empty_p (void)
   6828  1.1  mrg {
   6829  1.1  mrg   int i;
   6830  1.1  mrg   for (i = 0; i < current_nr_blocks; i++)
   6831  1.1  mrg     if (! sel_bb_empty_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))))
   6832  1.1  mrg       return false;
   6833  1.1  mrg 
   6834  1.1  mrg   return true;
   6835  1.1  mrg }
   6836  1.1  mrg 
   6837  1.1  mrg /* Prepare and verify loop nest for pipelining.  */
   6838  1.1  mrg static void
   6839  1.1  mrg setup_current_loop_nest (int rgn, bb_vec_t *bbs)
   6840  1.1  mrg {
   6841  1.1  mrg   current_loop_nest = get_loop_nest_for_rgn (rgn);
   6842  1.1  mrg 
   6843  1.1  mrg   if (!current_loop_nest)
   6844  1.1  mrg     return;
   6845  1.1  mrg 
   6846  1.1  mrg   /* If this loop has any saved loop preheaders from nested loops,
   6847  1.1  mrg      add these basic blocks to the current region.  */
   6848  1.1  mrg   sel_add_loop_preheaders (bbs);
   6849  1.1  mrg 
   6850  1.1  mrg   /* Check that we're starting with a valid information.  */
   6851  1.1  mrg   gcc_assert (loop_latch_edge (current_loop_nest));
   6852  1.1  mrg   gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest));
   6853  1.1  mrg }
   6854  1.1  mrg 
   6855  1.1  mrg /* Compute instruction priorities for current region.  */
   6856  1.1  mrg static void
   6857  1.1  mrg sel_compute_priorities (int rgn)
   6858  1.1  mrg {
   6859  1.1  mrg   sched_rgn_compute_dependencies (rgn);
   6860  1.1  mrg 
   6861  1.1  mrg   /* Compute insn priorities in haifa style.  Then free haifa style
   6862  1.1  mrg      dependencies that we've calculated for this.  */
   6863  1.1  mrg   compute_priorities ();
   6864  1.1  mrg 
   6865  1.1  mrg   if (sched_verbose >= 5)
   6866  1.1  mrg     debug_rgn_dependencies (0);
   6867  1.1  mrg 
   6868  1.1  mrg   free_rgn_deps ();
   6869  1.1  mrg }
   6870  1.1  mrg 
   6871  1.1  mrg /* Init scheduling data for RGN.  Returns true when this region should not
   6872  1.1  mrg    be scheduled.  */
   6873  1.1  mrg static bool
   6874  1.1  mrg sel_region_init (int rgn)
   6875  1.1  mrg {
   6876  1.1  mrg   int i;
   6877  1.1  mrg   bb_vec_t bbs;
   6878  1.1  mrg 
   6879  1.1  mrg   rgn_setup_region (rgn);
   6880  1.1  mrg 
   6881  1.1  mrg   /* Even if sched_is_disabled_for_current_region_p() is true, we still
   6882  1.1  mrg      do region initialization here so the region can be bundled correctly,
   6883  1.1  mrg      but we'll skip the scheduling in sel_sched_region ().  */
   6884  1.1  mrg   if (current_region_empty_p ())
   6885  1.1  mrg     return true;
   6886  1.1  mrg 
   6887  1.1  mrg   bbs.create (current_nr_blocks);
   6888  1.1  mrg 
   6889  1.1  mrg   for (i = 0; i < current_nr_blocks; i++)
   6890  1.1  mrg     bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
   6891  1.1  mrg 
   6892  1.1  mrg   sel_init_bbs (bbs);
   6893  1.1  mrg 
   6894  1.1  mrg   if (flag_sel_sched_pipelining)
   6895  1.1  mrg     setup_current_loop_nest (rgn, &bbs);
   6896  1.1  mrg 
   6897  1.1  mrg   sel_setup_region_sched_flags ();
   6898  1.1  mrg 
   6899  1.1  mrg   /* Initialize luids and dependence analysis which both sel-sched and haifa
   6900  1.1  mrg      need.  */
   6901  1.1  mrg   sched_init_luids (bbs);
   6902  1.1  mrg   sched_deps_init (false);
   6903  1.1  mrg 
   6904  1.1  mrg   /* Initialize haifa data.  */
   6905  1.1  mrg   rgn_setup_sched_infos ();
   6906  1.1  mrg   sel_set_sched_flags ();
   6907  1.1  mrg   haifa_init_h_i_d (bbs);
   6908  1.1  mrg 
   6909  1.1  mrg   sel_compute_priorities (rgn);
   6910  1.1  mrg   init_deps_global ();
   6911  1.1  mrg 
   6912  1.1  mrg   /* Main initialization.  */
   6913  1.1  mrg   sel_setup_sched_infos ();
   6914  1.1  mrg   sel_init_global_and_expr (bbs);
   6915  1.1  mrg 
   6916  1.1  mrg   bbs.release ();
   6917  1.1  mrg 
   6918  1.1  mrg   blocks_to_reschedule = BITMAP_ALLOC (NULL);
   6919  1.1  mrg 
   6920  1.1  mrg   /* Init correct liveness sets on each instruction of a single-block loop.
   6921  1.1  mrg      This is the only situation when we can't update liveness when calling
   6922  1.1  mrg      compute_live for the first insn of the loop.  */
   6923  1.1  mrg   if (current_loop_nest)
   6924  1.1  mrg     {
   6925  1.1  mrg       int header =
   6926  1.1  mrg 	(sel_is_loop_preheader_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (0)))
   6927  1.1  mrg 	 ? 1
   6928  1.1  mrg 	 : 0);
   6929  1.1  mrg 
   6930  1.1  mrg       if (current_nr_blocks == header + 1)
   6931  1.1  mrg         update_liveness_on_insn
   6932  1.1  mrg           (sel_bb_head (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (header))));
   6933  1.1  mrg     }
   6934  1.1  mrg 
   6935  1.1  mrg   /* Set hooks so that no newly generated insn will go out unnoticed.  */
   6936  1.1  mrg   sel_register_cfg_hooks ();
   6937  1.1  mrg 
   6938  1.1  mrg   /* !!! We call target.sched.init () for the whole region, but we invoke
   6939  1.1  mrg      targetm.sched.finish () for every ebb.  */
   6940  1.1  mrg   if (targetm.sched.init)
   6941  1.1  mrg     /* None of the arguments are actually used in any target.  */
   6942  1.1  mrg     targetm.sched.init (sched_dump, sched_verbose, -1);
   6943  1.1  mrg 
   6944  1.1  mrg   first_emitted_uid = get_max_uid () + 1;
   6945  1.1  mrg   preheader_removed = false;
   6946  1.1  mrg 
   6947  1.1  mrg   /* Reset register allocation ticks array.  */
   6948  1.1  mrg   memset (reg_rename_tick, 0, sizeof reg_rename_tick);
   6949  1.1  mrg   reg_rename_this_tick = 0;
   6950  1.1  mrg 
   6951  1.1  mrg   forced_ebb_heads = BITMAP_ALLOC (NULL);
   6952  1.1  mrg 
   6953  1.1  mrg   setup_nop_vinsn ();
   6954  1.1  mrg   current_copies = BITMAP_ALLOC (NULL);
   6955  1.1  mrg   current_originators = BITMAP_ALLOC (NULL);
   6956  1.1  mrg   code_motion_visited_blocks = BITMAP_ALLOC (NULL);
   6957  1.1  mrg 
   6958  1.1  mrg   return false;
   6959  1.1  mrg }
   6960  1.1  mrg 
   6961  1.1  mrg /* Simplify insns after the scheduling.  */
   6962  1.1  mrg static void
   6963  1.1  mrg simplify_changed_insns (void)
   6964  1.1  mrg {
   6965  1.1  mrg   int i;
   6966  1.1  mrg 
   6967  1.1  mrg   for (i = 0; i < current_nr_blocks; i++)
   6968  1.1  mrg     {
   6969  1.1  mrg       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
   6970  1.1  mrg       rtx_insn *insn;
   6971  1.1  mrg 
   6972  1.1  mrg       FOR_BB_INSNS (bb, insn)
   6973  1.1  mrg 	if (INSN_P (insn))
   6974  1.1  mrg 	  {
   6975  1.1  mrg 	    expr_t expr = INSN_EXPR (insn);
   6976  1.1  mrg 
   6977  1.1  mrg 	    if (EXPR_WAS_SUBSTITUTED (expr))
   6978  1.1  mrg 	      validate_simplify_insn (insn);
   6979  1.1  mrg 	  }
   6980  1.1  mrg     }
   6981  1.1  mrg }
   6982  1.1  mrg 
   6983  1.1  mrg /* Find boundaries of the EBB starting from basic block BB, marking blocks of
   6984  1.1  mrg    this EBB in SCHEDULED_BLOCKS and appropriately filling in HEAD, TAIL,
   6985  1.1  mrg    PREV_HEAD, and NEXT_TAIL fields of CURRENT_SCHED_INFO structure.  */
   6986  1.1  mrg static void
   6987  1.1  mrg find_ebb_boundaries (basic_block bb, bitmap scheduled_blocks)
   6988  1.1  mrg {
   6989  1.1  mrg   rtx_insn *head, *tail;
   6990  1.1  mrg   basic_block bb1 = bb;
   6991  1.1  mrg   if (sched_verbose >= 2)
   6992  1.1  mrg     sel_print ("Finishing schedule in bbs: ");
   6993  1.1  mrg 
   6994  1.1  mrg   do
   6995  1.1  mrg     {
   6996  1.1  mrg       bitmap_set_bit (scheduled_blocks, BLOCK_TO_BB (bb1->index));
   6997  1.1  mrg 
   6998  1.1  mrg       if (sched_verbose >= 2)
   6999  1.1  mrg 	sel_print ("%d; ", bb1->index);
   7000  1.1  mrg     }
   7001  1.1  mrg   while (!bb_ends_ebb_p (bb1) && (bb1 = bb_next_bb (bb1)));
   7002  1.1  mrg 
   7003  1.1  mrg   if (sched_verbose >= 2)
   7004  1.1  mrg     sel_print ("\n");
   7005  1.1  mrg 
   7006  1.1  mrg   get_ebb_head_tail (bb, bb1, &head, &tail);
   7007  1.1  mrg 
   7008  1.1  mrg   current_sched_info->head = head;
   7009  1.1  mrg   current_sched_info->tail = tail;
   7010  1.1  mrg   current_sched_info->prev_head = PREV_INSN (head);
   7011  1.1  mrg   current_sched_info->next_tail = NEXT_INSN (tail);
   7012  1.1  mrg }
   7013  1.1  mrg 
   7014  1.1  mrg /* Regenerate INSN_SCHED_CYCLEs for insns of current EBB.  */
   7015  1.1  mrg static void
   7016  1.1  mrg reset_sched_cycles_in_current_ebb (void)
   7017  1.1  mrg {
   7018  1.1  mrg   int last_clock = 0;
   7019  1.1  mrg   int haifa_last_clock = -1;
   7020  1.1  mrg   int haifa_clock = 0;
   7021  1.1  mrg   int issued_insns = 0;
   7022  1.1  mrg   insn_t insn;
   7023  1.1  mrg 
   7024  1.1  mrg   if (targetm.sched.init)
   7025  1.1  mrg     {
   7026  1.1  mrg       /* None of the arguments are actually used in any target.
   7027  1.1  mrg 	 NB: We should have md_reset () hook for cases like this.  */
   7028  1.1  mrg       targetm.sched.init (sched_dump, sched_verbose, -1);
   7029  1.1  mrg     }
   7030  1.1  mrg 
   7031  1.1  mrg   state_reset (curr_state);
   7032  1.1  mrg   advance_state (curr_state);
   7033  1.1  mrg 
   7034  1.1  mrg   for (insn = current_sched_info->head;
   7035  1.1  mrg        insn != current_sched_info->next_tail;
   7036  1.1  mrg        insn = NEXT_INSN (insn))
   7037  1.1  mrg     {
   7038  1.1  mrg       int cost, haifa_cost;
   7039  1.1  mrg       int sort_p;
   7040  1.1  mrg       bool asm_p, real_insn, after_stall, all_issued;
   7041  1.1  mrg       int clock;
   7042  1.1  mrg 
   7043  1.1  mrg       if (!INSN_P (insn))
   7044  1.1  mrg 	continue;
   7045  1.1  mrg 
   7046  1.1  mrg       asm_p = false;
   7047  1.1  mrg       real_insn = recog_memoized (insn) >= 0;
   7048  1.1  mrg       clock = INSN_SCHED_CYCLE (insn);
   7049  1.1  mrg 
   7050  1.1  mrg       cost = clock - last_clock;
   7051  1.1  mrg 
   7052  1.1  mrg       /* Initialize HAIFA_COST.  */
   7053  1.1  mrg       if (! real_insn)
   7054  1.1  mrg 	{
   7055  1.1  mrg 	  asm_p = INSN_ASM_P (insn);
   7056  1.1  mrg 
   7057  1.1  mrg 	  if (asm_p)
   7058  1.1  mrg 	    /* This is asm insn which *had* to be scheduled first
   7059  1.1  mrg 	       on the cycle.  */
   7060  1.1  mrg 	    haifa_cost = 1;
   7061  1.1  mrg 	  else
   7062  1.1  mrg 	    /* This is a use/clobber insn.  It should not change
   7063  1.1  mrg 	       cost.  */
   7064  1.1  mrg 	    haifa_cost = 0;
   7065  1.1  mrg 	}
   7066  1.1  mrg       else
   7067  1.1  mrg         haifa_cost = estimate_insn_cost (insn, curr_state);
   7068  1.1  mrg 
   7069  1.1  mrg       /* Stall for whatever cycles we've stalled before.  */
   7070  1.1  mrg       after_stall = 0;
   7071  1.1  mrg       if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost)
   7072  1.1  mrg         {
   7073  1.1  mrg           haifa_cost = cost;
   7074  1.1  mrg           after_stall = 1;
   7075  1.1  mrg         }
   7076  1.1  mrg       all_issued = issued_insns == issue_rate;
   7077  1.1  mrg       if (haifa_cost == 0 && all_issued)
   7078  1.1  mrg 	haifa_cost = 1;
   7079  1.1  mrg       if (haifa_cost > 0)
   7080  1.1  mrg 	{
   7081  1.1  mrg 	  int i = 0;
   7082  1.1  mrg 
   7083  1.1  mrg 	  while (haifa_cost--)
   7084  1.1  mrg 	    {
   7085  1.1  mrg 	      advance_state (curr_state);
   7086  1.1  mrg 	      issued_insns = 0;
   7087  1.1  mrg               i++;
   7088  1.1  mrg 
   7089  1.1  mrg 	      if (sched_verbose >= 2)
   7090  1.1  mrg                 {
   7091  1.1  mrg                   sel_print ("advance_state (state_transition)\n");
   7092  1.1  mrg                   debug_state (curr_state);
   7093  1.1  mrg                 }
   7094  1.1  mrg 
   7095  1.1  mrg               /* The DFA may report that e.g. insn requires 2 cycles to be
   7096  1.1  mrg                  issued, but on the next cycle it says that insn is ready
   7097  1.1  mrg                  to go.  Check this here.  */
   7098  1.1  mrg               if (!after_stall
   7099  1.1  mrg                   && real_insn
   7100  1.1  mrg                   && haifa_cost > 0
   7101  1.1  mrg                   && estimate_insn_cost (insn, curr_state) == 0)
   7102  1.1  mrg                 break;
   7103  1.1  mrg 
   7104  1.1  mrg               /* When the data dependency stall is longer than the DFA stall,
   7105  1.1  mrg                  and when we have issued exactly issue_rate insns and stalled,
   7106  1.1  mrg                  it could be that after this longer stall the insn will again
   7107  1.1  mrg                  become unavailable  to the DFA restrictions.  Looks strange
   7108  1.1  mrg                  but happens e.g. on x86-64.  So recheck DFA on the last
   7109  1.1  mrg                  iteration.  */
   7110  1.1  mrg               if ((after_stall || all_issued)
   7111  1.1  mrg                   && real_insn
   7112  1.1  mrg                   && haifa_cost == 0)
   7113  1.1  mrg                 haifa_cost = estimate_insn_cost (insn, curr_state);
   7114  1.1  mrg             }
   7115  1.1  mrg 
   7116  1.1  mrg 	  haifa_clock += i;
   7117  1.1  mrg           if (sched_verbose >= 2)
   7118  1.1  mrg             sel_print ("haifa clock: %d\n", haifa_clock);
   7119  1.1  mrg 	}
   7120  1.1  mrg       else
   7121  1.1  mrg 	gcc_assert (haifa_cost == 0);
   7122  1.1  mrg 
   7123  1.1  mrg       if (sched_verbose >= 2)
   7124  1.1  mrg 	sel_print ("Haifa cost for insn %d: %d\n", INSN_UID (insn), haifa_cost);
   7125  1.1  mrg 
   7126  1.1  mrg       if (targetm.sched.dfa_new_cycle)
   7127  1.1  mrg 	while (targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, insn,
   7128  1.1  mrg 					    haifa_last_clock, haifa_clock,
   7129  1.1  mrg 					    &sort_p))
   7130  1.1  mrg 	  {
   7131  1.1  mrg 	    advance_state (curr_state);
   7132  1.1  mrg 	    issued_insns = 0;
   7133  1.1  mrg 	    haifa_clock++;
   7134  1.1  mrg 	    if (sched_verbose >= 2)
   7135  1.1  mrg               {
   7136  1.1  mrg                 sel_print ("advance_state (dfa_new_cycle)\n");
   7137  1.1  mrg                 debug_state (curr_state);
   7138  1.1  mrg 		sel_print ("haifa clock: %d\n", haifa_clock + 1);
   7139  1.1  mrg               }
   7140  1.1  mrg           }
   7141  1.1  mrg 
   7142  1.1  mrg       if (real_insn)
   7143  1.1  mrg 	{
   7144  1.1  mrg 	  static state_t temp = NULL;
   7145  1.1  mrg 
   7146  1.1  mrg 	  if (!temp)
   7147  1.1  mrg 	    temp = xmalloc (dfa_state_size);
   7148  1.1  mrg 	  memcpy (temp, curr_state, dfa_state_size);
   7149  1.1  mrg 
   7150  1.1  mrg 	  cost = state_transition (curr_state, insn);
   7151  1.1  mrg 	  if (memcmp (temp, curr_state, dfa_state_size))
   7152  1.1  mrg 	    issued_insns++;
   7153  1.1  mrg 
   7154  1.1  mrg           if (sched_verbose >= 2)
   7155  1.1  mrg 	    {
   7156  1.1  mrg 	      sel_print ("scheduled insn %d, clock %d\n", INSN_UID (insn),
   7157  1.1  mrg 			 haifa_clock + 1);
   7158  1.1  mrg               debug_state (curr_state);
   7159  1.1  mrg 	    }
   7160  1.1  mrg 	  gcc_assert (cost < 0);
   7161  1.1  mrg 	}
   7162  1.1  mrg 
   7163  1.1  mrg       if (targetm.sched.variable_issue)
   7164  1.1  mrg 	targetm.sched.variable_issue (sched_dump, sched_verbose, insn, 0);
   7165  1.1  mrg 
   7166  1.1  mrg       INSN_SCHED_CYCLE (insn) = haifa_clock;
   7167  1.1  mrg 
   7168  1.1  mrg       last_clock = clock;
   7169  1.1  mrg       haifa_last_clock = haifa_clock;
   7170  1.1  mrg     }
   7171  1.1  mrg }
   7172  1.1  mrg 
   7173  1.1  mrg /* Put TImode markers on insns starting a new issue group.  */
   7174  1.1  mrg static void
   7175  1.1  mrg put_TImodes (void)
   7176  1.1  mrg {
   7177  1.1  mrg   int last_clock = -1;
   7178  1.1  mrg   insn_t insn;
   7179  1.1  mrg 
   7180  1.1  mrg   for (insn = current_sched_info->head; insn != current_sched_info->next_tail;
   7181  1.1  mrg        insn = NEXT_INSN (insn))
   7182  1.1  mrg     {
   7183  1.1  mrg       int cost, clock;
   7184  1.1  mrg 
   7185  1.1  mrg       if (!INSN_P (insn))
   7186  1.1  mrg 	continue;
   7187  1.1  mrg 
   7188  1.1  mrg       clock = INSN_SCHED_CYCLE (insn);
   7189  1.1  mrg       cost = (last_clock == -1) ? 1 : clock - last_clock;
   7190  1.1  mrg 
   7191  1.1  mrg       gcc_assert (cost >= 0);
   7192  1.1  mrg 
   7193  1.1  mrg       if (issue_rate > 1
   7194  1.1  mrg 	  && GET_CODE (PATTERN (insn)) != USE
   7195  1.1  mrg 	  && GET_CODE (PATTERN (insn)) != CLOBBER)
   7196  1.1  mrg 	{
   7197  1.1  mrg 	  if (reload_completed && cost > 0)
   7198  1.1  mrg 	    PUT_MODE (insn, TImode);
   7199  1.1  mrg 
   7200  1.1  mrg 	  last_clock = clock;
   7201  1.1  mrg 	}
   7202  1.1  mrg 
   7203  1.1  mrg       if (sched_verbose >= 2)
   7204  1.1  mrg 	sel_print ("Cost for insn %d is %d\n", INSN_UID (insn), cost);
   7205  1.1  mrg     }
   7206  1.1  mrg }
   7207  1.1  mrg 
   7208  1.1  mrg /* Perform MD_FINISH on EBBs comprising current region.  When
   7209  1.1  mrg    RESET_SCHED_CYCLES_P is true, run a pass emulating the scheduler
   7210  1.1  mrg    to produce correct sched cycles on insns.  */
   7211  1.1  mrg static void
   7212  1.1  mrg sel_region_target_finish (bool reset_sched_cycles_p)
   7213  1.1  mrg {
   7214  1.1  mrg   int i;
   7215  1.1  mrg   bitmap scheduled_blocks = BITMAP_ALLOC (NULL);
   7216  1.1  mrg 
   7217  1.1  mrg   for (i = 0; i < current_nr_blocks; i++)
   7218  1.1  mrg     {
   7219  1.1  mrg       if (bitmap_bit_p (scheduled_blocks, i))
   7220  1.1  mrg 	continue;
   7221  1.1  mrg 
   7222  1.1  mrg       /* While pipelining outer loops, skip bundling for loop
   7223  1.1  mrg 	 preheaders.  Those will be rescheduled in the outer loop.  */
   7224  1.1  mrg       if (sel_is_loop_preheader_p (EBB_FIRST_BB (i)))
   7225  1.1  mrg 	continue;
   7226  1.1  mrg 
   7227  1.1  mrg       find_ebb_boundaries (EBB_FIRST_BB (i), scheduled_blocks);
   7228  1.1  mrg 
   7229  1.1  mrg       if (no_real_insns_p (current_sched_info->head, current_sched_info->tail))
   7230  1.1  mrg 	continue;
   7231  1.1  mrg 
   7232  1.1  mrg       if (reset_sched_cycles_p)
   7233  1.1  mrg 	reset_sched_cycles_in_current_ebb ();
   7234  1.1  mrg 
   7235  1.1  mrg       if (targetm.sched.init)
   7236  1.1  mrg 	targetm.sched.init (sched_dump, sched_verbose, -1);
   7237  1.1  mrg 
   7238  1.1  mrg       put_TImodes ();
   7239  1.1  mrg 
   7240  1.1  mrg       if (targetm.sched.finish)
   7241  1.1  mrg 	{
   7242  1.1  mrg 	  targetm.sched.finish (sched_dump, sched_verbose);
   7243  1.1  mrg 
   7244  1.1  mrg 	  /* Extend luids so that insns generated by the target will
   7245  1.1  mrg 	     get zero luid.  */
   7246  1.1  mrg 	  sched_extend_luids ();
   7247  1.1  mrg 	}
   7248  1.1  mrg     }
   7249  1.1  mrg 
   7250  1.1  mrg   BITMAP_FREE (scheduled_blocks);
   7251  1.1  mrg }
   7252  1.1  mrg 
   7253  1.1  mrg /* Free the scheduling data for the current region.  When RESET_SCHED_CYCLES_P
   7254  1.1  mrg    is true, make an additional pass emulating scheduler to get correct insn
   7255  1.1  mrg    cycles for md_finish calls.  */
   7256  1.1  mrg static void
   7257  1.1  mrg sel_region_finish (bool reset_sched_cycles_p)
   7258  1.1  mrg {
   7259  1.1  mrg   simplify_changed_insns ();
   7260  1.1  mrg   sched_finish_ready_list ();
   7261  1.1  mrg   free_nop_pool ();
   7262  1.1  mrg 
   7263  1.1  mrg   /* Free the vectors.  */
   7264  1.1  mrg   vec_av_set.release ();
   7265  1.1  mrg   BITMAP_FREE (current_copies);
   7266  1.1  mrg   BITMAP_FREE (current_originators);
   7267  1.1  mrg   BITMAP_FREE (code_motion_visited_blocks);
   7268  1.1  mrg   vinsn_vec_free (vec_bookkeeping_blocked_vinsns);
   7269  1.1  mrg   vinsn_vec_free (vec_target_unavailable_vinsns);
   7270  1.1  mrg 
   7271  1.1  mrg   /* If LV_SET of the region head should be updated, do it now because
   7272  1.1  mrg      there will be no other chance.  */
   7273  1.1  mrg   {
   7274  1.1  mrg     succ_iterator si;
   7275  1.1  mrg     insn_t insn;
   7276  1.1  mrg 
   7277  1.1  mrg     FOR_EACH_SUCC_1 (insn, si, bb_note (EBB_FIRST_BB (0)),
   7278  1.1  mrg                      SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
   7279  1.1  mrg       {
   7280  1.1  mrg 	basic_block bb = BLOCK_FOR_INSN (insn);
   7281  1.1  mrg 
   7282  1.1  mrg 	if (!BB_LV_SET_VALID_P (bb))
   7283  1.1  mrg 	  compute_live (insn);
   7284  1.1  mrg       }
   7285  1.1  mrg   }
   7286  1.1  mrg 
   7287  1.1  mrg   /* Emulate the Haifa scheduler for bundling.  */
   7288  1.1  mrg   if (reload_completed)
   7289  1.1  mrg     sel_region_target_finish (reset_sched_cycles_p);
   7290  1.1  mrg 
   7291  1.1  mrg   sel_finish_global_and_expr ();
   7292  1.1  mrg 
   7293  1.1  mrg   BITMAP_FREE (forced_ebb_heads);
   7294  1.1  mrg 
   7295  1.1  mrg   free_nop_vinsn ();
   7296  1.1  mrg 
   7297  1.1  mrg   finish_deps_global ();
   7298  1.1  mrg   sched_finish_luids ();
   7299  1.1  mrg   h_d_i_d.release ();
   7300  1.1  mrg 
   7301  1.1  mrg   sel_finish_bbs ();
   7302  1.1  mrg   BITMAP_FREE (blocks_to_reschedule);
   7303  1.1  mrg 
   7304  1.1  mrg   sel_unregister_cfg_hooks ();
   7305  1.1  mrg 
   7306  1.1  mrg   max_issue_size = 0;
   7307  1.1  mrg }
   7308  1.1  mrg 
   7309  1.1  mrg 
   7311  1.1  mrg /* Functions that implement the scheduler driver.  */
   7312  1.1  mrg 
   7313  1.1  mrg /* Schedule a parallel instruction group on each of FENCES.  MAX_SEQNO
   7314  1.1  mrg    is the current maximum seqno.  SCHEDULED_INSNS_TAILPP is the list
   7315  1.1  mrg    of insns scheduled -- these would be postprocessed later.  */
   7316  1.1  mrg static void
   7317  1.1  mrg schedule_on_fences (flist_t fences, int max_seqno,
   7318  1.1  mrg                     ilist_t **scheduled_insns_tailpp)
   7319  1.1  mrg {
   7320  1.1  mrg   flist_t old_fences = fences;
   7321  1.1  mrg 
   7322  1.1  mrg   if (sched_verbose >= 1)
   7323  1.1  mrg     {
   7324  1.1  mrg       sel_print ("\nScheduling on fences: ");
   7325  1.1  mrg       dump_flist (fences);
   7326  1.1  mrg       sel_print ("\n");
   7327  1.1  mrg     }
   7328  1.1  mrg 
   7329  1.1  mrg   scheduled_something_on_previous_fence = false;
   7330  1.1  mrg   for (; fences; fences = FLIST_NEXT (fences))
   7331  1.1  mrg     {
   7332  1.1  mrg       fence_t fence = NULL;
   7333  1.1  mrg       int seqno = 0;
   7334  1.1  mrg       flist_t fences2;
   7335  1.1  mrg       bool first_p = true;
   7336  1.1  mrg 
   7337  1.1  mrg       /* Choose the next fence group to schedule.
   7338  1.1  mrg          The fact that insn can be scheduled only once
   7339  1.1  mrg          on the cycle is guaranteed by two properties:
   7340  1.1  mrg          1. seqnos of parallel groups decrease with each iteration.
   7341  1.1  mrg          2. If is_ineligible_successor () sees the larger seqno, it
   7342  1.1  mrg          checks if candidate insn is_in_current_fence_p ().  */
   7343  1.1  mrg       for (fences2 = old_fences; fences2; fences2 = FLIST_NEXT (fences2))
   7344  1.1  mrg         {
   7345  1.1  mrg           fence_t f = FLIST_FENCE (fences2);
   7346  1.1  mrg 
   7347  1.1  mrg           if (!FENCE_PROCESSED_P (f))
   7348  1.1  mrg             {
   7349  1.1  mrg               int i = INSN_SEQNO (FENCE_INSN (f));
   7350  1.1  mrg 
   7351  1.1  mrg               if (first_p || i > seqno)
   7352  1.1  mrg                 {
   7353  1.1  mrg                   seqno = i;
   7354  1.1  mrg                   fence = f;
   7355  1.1  mrg                   first_p = false;
   7356  1.1  mrg                 }
   7357  1.1  mrg               else
   7358  1.1  mrg                 /* ??? Seqnos of different groups should be different.  */
   7359  1.1  mrg                 gcc_assert (1 || i != seqno);
   7360  1.1  mrg             }
   7361  1.1  mrg         }
   7362  1.1  mrg 
   7363  1.1  mrg       gcc_assert (fence);
   7364  1.1  mrg 
   7365  1.1  mrg       /* As FENCE is nonnull, SEQNO is initialized.  */
   7366  1.1  mrg       seqno -= max_seqno + 1;
   7367  1.1  mrg       fill_insns (fence, seqno, scheduled_insns_tailpp);
   7368  1.1  mrg       FENCE_PROCESSED_P (fence) = true;
   7369  1.1  mrg     }
   7370  1.1  mrg 
   7371  1.1  mrg   /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
   7372  1.1  mrg      don't need to keep bookkeeping-invalidated and target-unavailable
   7373  1.1  mrg      vinsns any more.  */
   7374  1.1  mrg   vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns);
   7375  1.1  mrg   vinsn_vec_clear (&vec_target_unavailable_vinsns);
   7376  1.1  mrg }
   7377  1.1  mrg 
   7378  1.1  mrg /* Calculate MIN_SEQNO and MAX_SEQNO.  */
   7379  1.1  mrg static void
   7380  1.1  mrg find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
   7381  1.1  mrg {
   7382  1.1  mrg   *min_seqno = *max_seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
   7383  1.1  mrg 
   7384  1.1  mrg   /* The first element is already processed.  */
   7385  1.1  mrg   while ((fences = FLIST_NEXT (fences)))
   7386  1.1  mrg     {
   7387  1.1  mrg       int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
   7388  1.1  mrg 
   7389  1.1  mrg       if (*min_seqno > seqno)
   7390  1.1  mrg         *min_seqno = seqno;
   7391  1.1  mrg       else if (*max_seqno < seqno)
   7392  1.1  mrg         *max_seqno = seqno;
   7393  1.1  mrg     }
   7394  1.1  mrg }
   7395  1.1  mrg 
   7396  1.1  mrg /* Calculate new fences from FENCES.  Write the current time to PTIME.  */
   7397  1.1  mrg static flist_t
   7398  1.1  mrg calculate_new_fences (flist_t fences, int orig_max_seqno, int *ptime)
   7399  1.1  mrg {
   7400  1.1  mrg   flist_t old_fences = fences;
   7401  1.1  mrg   struct flist_tail_def _new_fences, *new_fences = &_new_fences;
   7402  1.1  mrg   int max_time = 0;
   7403  1.1  mrg 
   7404  1.1  mrg   flist_tail_init (new_fences);
   7405  1.1  mrg   for (; fences; fences = FLIST_NEXT (fences))
   7406  1.1  mrg     {
   7407  1.1  mrg       fence_t fence = FLIST_FENCE (fences);
   7408  1.1  mrg       insn_t insn;
   7409  1.1  mrg 
   7410  1.1  mrg       if (!FENCE_BNDS (fence))
   7411  1.1  mrg         {
   7412  1.1  mrg           /* This fence doesn't have any successors.  */
   7413  1.1  mrg           if (!FENCE_SCHEDULED_P (fence))
   7414  1.1  mrg             {
   7415  1.1  mrg               /* Nothing was scheduled on this fence.  */
   7416  1.1  mrg               int seqno;
   7417  1.1  mrg 
   7418  1.1  mrg               insn = FENCE_INSN (fence);
   7419  1.1  mrg               seqno = INSN_SEQNO (insn);
   7420  1.1  mrg               gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
   7421  1.1  mrg 
   7422  1.1  mrg               if (sched_verbose >= 1)
   7423  1.1  mrg                 sel_print ("Fence %d[%d] has not changed\n",
   7424  1.1  mrg                            INSN_UID (insn),
   7425  1.1  mrg                            BLOCK_NUM (insn));
   7426  1.1  mrg               move_fence_to_fences (fences, new_fences);
   7427  1.1  mrg             }
   7428  1.1  mrg         }
   7429  1.1  mrg       else
   7430  1.1  mrg         extract_new_fences_from (fences, new_fences, orig_max_seqno);
   7431  1.1  mrg       max_time = MAX (max_time, FENCE_CYCLE (fence));
   7432  1.1  mrg     }
   7433  1.1  mrg 
   7434  1.1  mrg   flist_clear (&old_fences);
   7435  1.1  mrg   *ptime = max_time;
   7436  1.1  mrg   return FLIST_TAIL_HEAD (new_fences);
   7437  1.1  mrg }
   7438  1.1  mrg 
   7439  1.1  mrg /* Update seqnos of insns given by PSCHEDULED_INSNS.  MIN_SEQNO and MAX_SEQNO
   7440  1.1  mrg    are the miminum and maximum seqnos of the group, HIGHEST_SEQNO_IN_USE is
   7441  1.1  mrg    the highest seqno used in a region.  Return the updated highest seqno.  */
   7442  1.1  mrg static int
   7443  1.1  mrg update_seqnos_and_stage (int min_seqno, int max_seqno,
   7444  1.1  mrg                          int highest_seqno_in_use,
   7445  1.1  mrg                          ilist_t *pscheduled_insns)
   7446  1.1  mrg {
   7447  1.1  mrg   int new_hs;
   7448  1.1  mrg   ilist_iterator ii;
   7449  1.1  mrg   insn_t insn;
   7450  1.1  mrg 
   7451  1.1  mrg   /* Actually, new_hs is the seqno of the instruction, that was
   7452  1.1  mrg      scheduled first (i.e. it is the first one in SCHEDULED_INSNS).  */
   7453  1.1  mrg   if (*pscheduled_insns)
   7454  1.1  mrg     {
   7455  1.1  mrg       new_hs = (INSN_SEQNO (ILIST_INSN (*pscheduled_insns))
   7456  1.1  mrg                 + highest_seqno_in_use + max_seqno - min_seqno + 2);
   7457  1.1  mrg       gcc_assert (new_hs > highest_seqno_in_use);
   7458  1.1  mrg     }
   7459  1.1  mrg   else
   7460  1.1  mrg     new_hs = highest_seqno_in_use;
   7461  1.1  mrg 
   7462  1.1  mrg   FOR_EACH_INSN (insn, ii, *pscheduled_insns)
   7463  1.1  mrg     {
   7464  1.1  mrg       gcc_assert (INSN_SEQNO (insn) < 0);
   7465  1.1  mrg       INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
   7466  1.1  mrg       gcc_assert (INSN_SEQNO (insn) <= new_hs);
   7467  1.1  mrg 
   7468  1.1  mrg       /* When not pipelining, purge unneeded insn info on the scheduled insns.
   7469  1.1  mrg          For example, having reg_last array of INSN_DEPS_CONTEXT in memory may
   7470  1.1  mrg          require > 1GB of memory e.g. on limit-fnargs.c.  */
   7471  1.1  mrg       if (! pipelining_p)
   7472  1.1  mrg         free_data_for_scheduled_insn (insn);
   7473  1.1  mrg     }
   7474  1.1  mrg 
   7475  1.1  mrg   ilist_clear (pscheduled_insns);
   7476  1.1  mrg   global_level++;
   7477  1.1  mrg 
   7478  1.1  mrg   return new_hs;
   7479  1.1  mrg }
   7480  1.1  mrg 
   7481  1.1  mrg /* The main driver for scheduling a region.  This function is responsible
   7482  1.1  mrg    for correct propagation of fences (i.e. scheduling points) and creating
   7483  1.1  mrg    a group of parallel insns at each of them.  It also supports
   7484  1.1  mrg    pipelining.  ORIG_MAX_SEQNO is the maximal seqno before this pass
   7485  1.1  mrg    of scheduling.  */
   7486  1.1  mrg static void
   7487  1.1  mrg sel_sched_region_2 (int orig_max_seqno)
   7488  1.1  mrg {
   7489  1.1  mrg   int highest_seqno_in_use = orig_max_seqno;
   7490  1.1  mrg   int max_time = 0;
   7491  1.1  mrg 
   7492  1.1  mrg   stat_bookkeeping_copies = 0;
   7493  1.1  mrg   stat_insns_needed_bookkeeping = 0;
   7494  1.1  mrg   stat_renamed_scheduled = 0;
   7495  1.1  mrg   stat_substitutions_total = 0;
   7496  1.1  mrg   num_insns_scheduled = 0;
   7497  1.1  mrg 
   7498  1.1  mrg   while (fences)
   7499  1.1  mrg     {
   7500  1.1  mrg       int min_seqno, max_seqno;
   7501  1.1  mrg       ilist_t scheduled_insns = NULL;
   7502  1.1  mrg       ilist_t *scheduled_insns_tailp = &scheduled_insns;
   7503  1.1  mrg 
   7504  1.1  mrg       find_min_max_seqno (fences, &min_seqno, &max_seqno);
   7505  1.1  mrg       schedule_on_fences (fences, max_seqno, &scheduled_insns_tailp);
   7506  1.1  mrg       fences = calculate_new_fences (fences, orig_max_seqno, &max_time);
   7507  1.1  mrg       highest_seqno_in_use = update_seqnos_and_stage (min_seqno, max_seqno,
   7508  1.1  mrg                                                       highest_seqno_in_use,
   7509  1.1  mrg                                                       &scheduled_insns);
   7510  1.1  mrg     }
   7511  1.1  mrg 
   7512  1.1  mrg   if (sched_verbose >= 1)
   7513  1.1  mrg     {
   7514  1.1  mrg       sel_print ("Total scheduling time: %d cycles\n", max_time);
   7515  1.1  mrg       sel_print ("Scheduled %d bookkeeping copies, %d insns needed "
   7516  1.1  mrg 		 "bookkeeping, %d insns renamed, %d insns substituted\n",
   7517  1.1  mrg 		 stat_bookkeeping_copies,
   7518  1.1  mrg 		 stat_insns_needed_bookkeeping,
   7519  1.1  mrg 		 stat_renamed_scheduled,
   7520  1.1  mrg 		 stat_substitutions_total);
   7521  1.1  mrg     }
   7522  1.1  mrg }
   7523  1.1  mrg 
   7524  1.1  mrg /* Schedule a region.  When pipelining, search for possibly never scheduled
   7525  1.1  mrg    bookkeeping code and schedule it.  Reschedule pipelined code without
   7526  1.1  mrg    pipelining after.  */
   7527  1.1  mrg static void
   7528  1.1  mrg sel_sched_region_1 (void)
   7529  1.1  mrg {
   7530  1.1  mrg   int orig_max_seqno;
   7531  1.1  mrg 
   7532  1.1  mrg   /* Remove empty blocks that might be in the region from the beginning.  */
   7533  1.1  mrg   purge_empty_blocks ();
   7534  1.1  mrg 
   7535  1.1  mrg   orig_max_seqno = init_seqno (NULL, NULL);
   7536  1.1  mrg   gcc_assert (orig_max_seqno >= 1);
   7537  1.1  mrg 
   7538  1.1  mrg   /* When pipelining outer loops, create fences on the loop header,
   7539  1.1  mrg      not preheader.  */
   7540  1.1  mrg   fences = NULL;
   7541  1.1  mrg   if (current_loop_nest)
   7542  1.1  mrg     init_fences (BB_END (EBB_FIRST_BB (0)));
   7543  1.1  mrg   else
   7544  1.1  mrg     init_fences (bb_note (EBB_FIRST_BB (0)));
   7545  1.1  mrg   global_level = 1;
   7546  1.1  mrg 
   7547  1.1  mrg   sel_sched_region_2 (orig_max_seqno);
   7548  1.1  mrg 
   7549  1.1  mrg   gcc_assert (fences == NULL);
   7550  1.1  mrg 
   7551  1.1  mrg   if (pipelining_p)
   7552  1.1  mrg     {
   7553  1.1  mrg       int i;
   7554  1.1  mrg       basic_block bb;
   7555  1.1  mrg       struct flist_tail_def _new_fences;
   7556  1.1  mrg       flist_tail_t new_fences = &_new_fences;
   7557  1.1  mrg       bool do_p = true;
   7558  1.1  mrg 
   7559  1.1  mrg       pipelining_p = false;
   7560  1.1  mrg       max_ws = MIN (max_ws, issue_rate * 3 / 2);
   7561  1.1  mrg       bookkeeping_p = false;
   7562  1.1  mrg       enable_schedule_as_rhs_p = false;
   7563  1.1  mrg 
   7564  1.1  mrg       /* Schedule newly created code, that has not been scheduled yet.  */
   7565  1.1  mrg       do_p = true;
   7566  1.1  mrg 
   7567  1.1  mrg       while (do_p)
   7568  1.1  mrg         {
   7569  1.1  mrg           do_p = false;
   7570  1.1  mrg 
   7571  1.1  mrg           for (i = 0; i < current_nr_blocks; i++)
   7572  1.1  mrg             {
   7573  1.1  mrg               basic_block bb = EBB_FIRST_BB (i);
   7574  1.1  mrg 
   7575  1.1  mrg               if (bitmap_bit_p (blocks_to_reschedule, bb->index))
   7576  1.1  mrg                 {
   7577  1.1  mrg                   if (! bb_ends_ebb_p (bb))
   7578  1.1  mrg                     bitmap_set_bit (blocks_to_reschedule, bb_next_bb (bb)->index);
   7579  1.1  mrg                   if (sel_bb_empty_p (bb))
   7580  1.1  mrg                     {
   7581  1.1  mrg                       bitmap_clear_bit (blocks_to_reschedule, bb->index);
   7582  1.1  mrg                       continue;
   7583  1.1  mrg                     }
   7584  1.1  mrg                   clear_outdated_rtx_info (bb);
   7585  1.1  mrg                   if (sel_insn_is_speculation_check (BB_END (bb))
   7586  1.1  mrg                       && JUMP_P (BB_END (bb)))
   7587  1.1  mrg                     bitmap_set_bit (blocks_to_reschedule,
   7588  1.1  mrg                                     BRANCH_EDGE (bb)->dest->index);
   7589  1.1  mrg                 }
   7590  1.1  mrg               else if (! sel_bb_empty_p (bb)
   7591  1.1  mrg                        && INSN_SCHED_TIMES (sel_bb_head (bb)) <= 0)
   7592  1.1  mrg                 bitmap_set_bit (blocks_to_reschedule, bb->index);
   7593  1.1  mrg             }
   7594  1.1  mrg 
   7595  1.1  mrg           for (i = 0; i < current_nr_blocks; i++)
   7596  1.1  mrg             {
   7597  1.1  mrg               bb = EBB_FIRST_BB (i);
   7598  1.1  mrg 
   7599  1.1  mrg               /* While pipelining outer loops, skip bundling for loop
   7600  1.1  mrg                  preheaders.  Those will be rescheduled in the outer
   7601  1.1  mrg                  loop.  */
   7602  1.1  mrg               if (sel_is_loop_preheader_p (bb))
   7603  1.1  mrg                 {
   7604  1.1  mrg                   clear_outdated_rtx_info (bb);
   7605  1.1  mrg                   continue;
   7606  1.1  mrg                 }
   7607  1.1  mrg 
   7608  1.1  mrg               if (bitmap_bit_p (blocks_to_reschedule, bb->index))
   7609  1.1  mrg                 {
   7610  1.1  mrg                   flist_tail_init (new_fences);
   7611  1.1  mrg 
   7612  1.1  mrg                   orig_max_seqno = init_seqno (blocks_to_reschedule, bb);
   7613  1.1  mrg 
   7614  1.1  mrg                   /* Mark BB as head of the new ebb.  */
   7615  1.1  mrg                   bitmap_set_bit (forced_ebb_heads, bb->index);
   7616  1.1  mrg 
   7617  1.1  mrg                   gcc_assert (fences == NULL);
   7618  1.1  mrg 
   7619  1.1  mrg                   init_fences (bb_note (bb));
   7620  1.1  mrg 
   7621  1.1  mrg                   sel_sched_region_2 (orig_max_seqno);
   7622  1.1  mrg 
   7623  1.1  mrg                   do_p = true;
   7624  1.1  mrg                   break;
   7625  1.1  mrg                 }
   7626  1.1  mrg             }
   7627  1.1  mrg         }
   7628  1.1  mrg     }
   7629  1.1  mrg }
   7630  1.1  mrg 
   7631  1.1  mrg /* Schedule the RGN region.  */
   7632  1.1  mrg void
   7633  1.1  mrg sel_sched_region (int rgn)
   7634  1.1  mrg {
   7635  1.1  mrg   bool schedule_p;
   7636  1.1  mrg   bool reset_sched_cycles_p;
   7637  1.1  mrg 
   7638  1.1  mrg   if (sel_region_init (rgn))
   7639  1.1  mrg     return;
   7640  1.1  mrg 
   7641  1.1  mrg   if (sched_verbose >= 1)
   7642  1.1  mrg     sel_print ("Scheduling region %d\n", rgn);
   7643  1.1  mrg 
   7644  1.1  mrg   schedule_p = (!sched_is_disabled_for_current_region_p ()
   7645  1.1  mrg                 && dbg_cnt (sel_sched_region_cnt));
   7646  1.1  mrg   reset_sched_cycles_p = pipelining_p;
   7647  1.1  mrg   if (schedule_p)
   7648  1.1  mrg     sel_sched_region_1 ();
   7649  1.1  mrg   else
   7650  1.1  mrg     {
   7651  1.1  mrg       /* Schedule always selecting the next insn to make the correct data
   7652  1.1  mrg 	 for bundling or other later passes.  */
   7653  1.1  mrg       pipelining_p = false;
   7654  1.1  mrg       reset_sched_cycles_p = false;
   7655  1.1  mrg       force_next_insn = 1;
   7656  1.1  mrg       sel_sched_region_1 ();
   7657  1.1  mrg       force_next_insn = 0;
   7658  1.1  mrg     }
   7659  1.1  mrg   sel_region_finish (reset_sched_cycles_p);
   7660  1.1  mrg }
   7661  1.1  mrg 
   7662  1.1  mrg /* Perform global init for the scheduler.  */
   7663  1.1  mrg static void
   7664  1.1  mrg sel_global_init (void)
   7665  1.1  mrg {
   7666  1.1  mrg   /* Remove empty blocks: their presence can break assumptions elsewhere,
   7667  1.1  mrg      e.g. the logic to invoke update_liveness_on_insn in sel_region_init.  */
   7668  1.1  mrg   cleanup_cfg (0);
   7669  1.1  mrg 
   7670  1.1  mrg   calculate_dominance_info (CDI_DOMINATORS);
   7671  1.1  mrg   alloc_sched_pools ();
   7672  1.1  mrg 
   7673  1.1  mrg   /* Setup the infos for sched_init.  */
   7674  1.1  mrg   sel_setup_sched_infos ();
   7675  1.1  mrg   setup_sched_dump ();
   7676  1.1  mrg 
   7677  1.1  mrg   sched_rgn_init (false);
   7678  1.1  mrg   sched_init ();
   7679  1.1  mrg 
   7680  1.1  mrg   sched_init_bbs ();
   7681  1.1  mrg   /* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass.  */
   7682  1.1  mrg   after_recovery = 0;
   7683  1.1  mrg   can_issue_more = issue_rate;
   7684  1.1  mrg 
   7685  1.1  mrg   sched_extend_target ();
   7686  1.1  mrg   sched_deps_init (true);
   7687  1.1  mrg   setup_nop_and_exit_insns ();
   7688  1.1  mrg   sel_extend_global_bb_info ();
   7689  1.1  mrg   init_lv_sets ();
   7690  1.1  mrg   init_hard_regs_data ();
   7691  1.1  mrg }
   7692  1.1  mrg 
   7693  1.1  mrg /* Free the global data of the scheduler.  */
   7694  1.1  mrg static void
   7695  1.1  mrg sel_global_finish (void)
   7696  1.1  mrg {
   7697  1.1  mrg   free_bb_note_pool ();
   7698  1.1  mrg   free_lv_sets ();
   7699  1.1  mrg   sel_finish_global_bb_info ();
   7700  1.1  mrg 
   7701  1.1  mrg   free_regset_pool ();
   7702  1.1  mrg   free_nop_and_exit_insns ();
   7703  1.1  mrg 
   7704  1.1  mrg   sched_rgn_finish ();
   7705  1.1  mrg   sched_deps_finish ();
   7706  1.1  mrg   sched_finish ();
   7707  1.1  mrg 
   7708  1.1  mrg   if (current_loops)
   7709  1.1  mrg     sel_finish_pipelining ();
   7710  1.1  mrg 
   7711  1.1  mrg   free_sched_pools ();
   7712  1.1  mrg   free_dominance_info (CDI_DOMINATORS);
   7713  1.1  mrg }
   7714  1.1  mrg 
   7715  1.1  mrg /* Return true when we need to skip selective scheduling.  Used for debugging.  */
   7716  1.1  mrg bool
   7717  1.1  mrg maybe_skip_selective_scheduling (void)
   7718  1.1  mrg {
   7719  1.1  mrg   return ! dbg_cnt (sel_sched_cnt);
   7720  1.1  mrg }
   7721  1.1  mrg 
   7722  1.1  mrg /* The entry point.  */
   7723  1.1  mrg void
   7724  1.1  mrg run_selective_scheduling (void)
   7725  1.1  mrg {
   7726             int rgn;
   7727           
   7728             if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
   7729               return;
   7730           
   7731             sel_global_init ();
   7732           
   7733             for (rgn = 0; rgn < nr_regions; rgn++)
   7734               sel_sched_region (rgn);
   7735           
   7736             sel_global_finish ();
   7737           }
   7738           
   7739           #endif
   7740