Home | History | Annotate | Line # | Download | only in gcc
tree-vectorizer.h revision 1.9
      1  1.1  mrg /* Vectorizer
      2  1.9  mrg    Copyright (C) 2003-2018 Free Software Foundation, Inc.
      3  1.1  mrg    Contributed by Dorit Naishlos <dorit (at) il.ibm.com>
      4  1.1  mrg 
      5  1.1  mrg This file is part of GCC.
      6  1.1  mrg 
      7  1.1  mrg GCC is free software; you can redistribute it and/or modify it under
      8  1.1  mrg the terms of the GNU General Public License as published by the Free
      9  1.1  mrg Software Foundation; either version 3, or (at your option) any later
     10  1.1  mrg version.
     11  1.1  mrg 
     12  1.1  mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
     13  1.1  mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
     14  1.1  mrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     15  1.1  mrg for more details.
     16  1.1  mrg 
     17  1.1  mrg You should have received a copy of the GNU General Public License
     18  1.1  mrg along with GCC; see the file COPYING3.  If not see
     19  1.1  mrg <http://www.gnu.org/licenses/>.  */
     20  1.1  mrg 
     21  1.1  mrg #ifndef GCC_TREE_VECTORIZER_H
     22  1.1  mrg #define GCC_TREE_VECTORIZER_H
     23  1.1  mrg 
     24  1.1  mrg #include "tree-data-ref.h"
     25  1.9  mrg #include "tree-hash-traits.h"
     26  1.3  mrg #include "target.h"
     27  1.1  mrg 
     28  1.1  mrg /* Used for naming of new temporaries.  */
     29  1.1  mrg enum vect_var_kind {
     30  1.1  mrg   vect_simple_var,
     31  1.1  mrg   vect_pointer_var,
     32  1.6  mrg   vect_scalar_var,
     33  1.6  mrg   vect_mask_var
     34  1.1  mrg };
     35  1.1  mrg 
     36  1.1  mrg /* Defines type of operation.  */
     37  1.1  mrg enum operation_type {
     38  1.1  mrg   unary_op = 1,
     39  1.1  mrg   binary_op,
     40  1.1  mrg   ternary_op
     41  1.1  mrg };
     42  1.1  mrg 
     43  1.1  mrg /* Define type of available alignment support.  */
     44  1.1  mrg enum dr_alignment_support {
     45  1.1  mrg   dr_unaligned_unsupported,
     46  1.1  mrg   dr_unaligned_supported,
     47  1.1  mrg   dr_explicit_realign,
     48  1.1  mrg   dr_explicit_realign_optimized,
     49  1.1  mrg   dr_aligned
     50  1.1  mrg };
     51  1.1  mrg 
     52  1.1  mrg /* Define type of def-use cross-iteration cycle.  */
     53  1.1  mrg enum vect_def_type {
     54  1.1  mrg   vect_uninitialized_def = 0,
     55  1.1  mrg   vect_constant_def = 1,
     56  1.1  mrg   vect_external_def,
     57  1.1  mrg   vect_internal_def,
     58  1.1  mrg   vect_induction_def,
     59  1.1  mrg   vect_reduction_def,
     60  1.1  mrg   vect_double_reduction_def,
     61  1.1  mrg   vect_nested_cycle,
     62  1.1  mrg   vect_unknown_def_type
     63  1.1  mrg };
     64  1.1  mrg 
     65  1.6  mrg /* Define type of reduction.  */
     66  1.6  mrg enum vect_reduction_type {
     67  1.6  mrg   TREE_CODE_REDUCTION,
     68  1.6  mrg   COND_REDUCTION,
     69  1.8  mrg   INTEGER_INDUC_COND_REDUCTION,
     70  1.9  mrg   CONST_COND_REDUCTION,
     71  1.9  mrg 
     72  1.9  mrg   /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
     73  1.9  mrg      to implement:
     74  1.9  mrg 
     75  1.9  mrg        for (int i = 0; i < VF; ++i)
     76  1.9  mrg          res = cond[i] ? val[i] : res;  */
     77  1.9  mrg   EXTRACT_LAST_REDUCTION,
     78  1.9  mrg 
     79  1.9  mrg   /* Use a folding reduction within the loop to implement:
     80  1.9  mrg 
     81  1.9  mrg        for (int i = 0; i < VF; ++i)
     82  1.9  mrg 	 res = res OP val[i];
     83  1.9  mrg 
     84  1.9  mrg      (with no reassocation).  */
     85  1.9  mrg   FOLD_LEFT_REDUCTION
     86  1.6  mrg };
     87  1.6  mrg 
     88  1.1  mrg #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def)           \
     89  1.1  mrg                                    || ((D) == vect_double_reduction_def) \
     90  1.1  mrg                                    || ((D) == vect_nested_cycle))
     91  1.1  mrg 
     92  1.3  mrg /* Structure to encapsulate information about a group of like
     93  1.3  mrg    instructions to be presented to the target cost model.  */
     94  1.6  mrg struct stmt_info_for_cost {
     95  1.3  mrg   int count;
     96  1.3  mrg   enum vect_cost_for_stmt kind;
     97  1.6  mrg   gimple *stmt;
     98  1.3  mrg   int misalign;
     99  1.6  mrg };
    100  1.3  mrg 
    101  1.3  mrg typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
    102  1.3  mrg 
    103  1.9  mrg /* Maps base addresses to an innermost_loop_behavior that gives the maximum
    104  1.9  mrg    known alignment for that base.  */
    105  1.9  mrg typedef hash_map<tree_operand_hash,
    106  1.9  mrg 		 innermost_loop_behavior *> vec_base_alignments;
    107  1.9  mrg 
    108  1.1  mrg /************************************************************************
    109  1.1  mrg   SLP
    110  1.1  mrg  ************************************************************************/
    111  1.5  mrg typedef struct _slp_tree *slp_tree;
    112  1.1  mrg 
    113  1.3  mrg /* A computation tree of an SLP instance.  Each node corresponds to a group of
    114  1.1  mrg    stmts to be packed in a SIMD stmt.  */
    115  1.5  mrg struct _slp_tree {
    116  1.3  mrg   /* Nodes that contain def-stmts of this node statements operands.  */
    117  1.5  mrg   vec<slp_tree> children;
    118  1.1  mrg   /* A group of scalar stmts to be vectorized together.  */
    119  1.6  mrg   vec<gimple *> stmts;
    120  1.5  mrg   /* Load permutation relative to the stores, NULL if there is no
    121  1.5  mrg      permutation.  */
    122  1.5  mrg   vec<unsigned> load_permutation;
    123  1.1  mrg   /* Vectorized stmt/s.  */
    124  1.6  mrg   vec<gimple *> vec_stmts;
    125  1.1  mrg   /* Number of vector stmts that are created to replace the group of scalar
    126  1.1  mrg      stmts. It is calculated during the transformation phase as the number of
    127  1.1  mrg      scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
    128  1.1  mrg      divided by vector size.  */
    129  1.1  mrg   unsigned int vec_stmts_size;
    130  1.6  mrg   /* Whether the scalar computations use two different operators.  */
    131  1.6  mrg   bool two_operators;
    132  1.6  mrg   /* The DEF type of this node.  */
    133  1.6  mrg   enum vect_def_type def_type;
    134  1.5  mrg };
    135  1.1  mrg 
    136  1.1  mrg 
    137  1.1  mrg /* SLP instance is a sequence of stmts in a loop that can be packed into
    138  1.1  mrg    SIMD stmts.  */
    139  1.1  mrg typedef struct _slp_instance {
    140  1.1  mrg   /* The root of SLP tree.  */
    141  1.1  mrg   slp_tree root;
    142  1.1  mrg 
    143  1.1  mrg   /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s.  */
    144  1.1  mrg   unsigned int group_size;
    145  1.1  mrg 
    146  1.1  mrg   /* The unrolling factor required to vectorized this SLP instance.  */
    147  1.9  mrg   poly_uint64 unrolling_factor;
    148  1.1  mrg 
    149  1.1  mrg   /* The group of nodes that contain loads of this SLP instance.  */
    150  1.3  mrg   vec<slp_tree> loads;
    151  1.9  mrg 
    152  1.9  mrg   /* The SLP node containing the reduction PHIs.  */
    153  1.9  mrg   slp_tree reduc_phis;
    154  1.1  mrg } *slp_instance;
    155  1.1  mrg 
    156  1.1  mrg 
    157  1.1  mrg /* Access Functions.  */
    158  1.1  mrg #define SLP_INSTANCE_TREE(S)                     (S)->root
    159  1.1  mrg #define SLP_INSTANCE_GROUP_SIZE(S)               (S)->group_size
    160  1.1  mrg #define SLP_INSTANCE_UNROLLING_FACTOR(S)         (S)->unrolling_factor
    161  1.1  mrg #define SLP_INSTANCE_LOADS(S)                    (S)->loads
    162  1.1  mrg 
    163  1.3  mrg #define SLP_TREE_CHILDREN(S)                     (S)->children
    164  1.1  mrg #define SLP_TREE_SCALAR_STMTS(S)                 (S)->stmts
    165  1.1  mrg #define SLP_TREE_VEC_STMTS(S)                    (S)->vec_stmts
    166  1.1  mrg #define SLP_TREE_NUMBER_OF_VEC_STMTS(S)          (S)->vec_stmts_size
    167  1.5  mrg #define SLP_TREE_LOAD_PERMUTATION(S)             (S)->load_permutation
    168  1.6  mrg #define SLP_TREE_TWO_OPERATORS(S)		 (S)->two_operators
    169  1.6  mrg #define SLP_TREE_DEF_TYPE(S)			 (S)->def_type
    170  1.3  mrg 
    171  1.3  mrg 
    172  1.3  mrg 
    173  1.9  mrg /* Describes two objects whose addresses must be unequal for the vectorized
    174  1.9  mrg    loop to be valid.  */
    175  1.9  mrg typedef std::pair<tree, tree> vec_object_pair;
    176  1.9  mrg 
    177  1.9  mrg /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
    178  1.9  mrg    UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR.  */
    179  1.9  mrg struct vec_lower_bound {
    180  1.9  mrg   vec_lower_bound () {}
    181  1.9  mrg   vec_lower_bound (tree e, bool u, poly_uint64 m)
    182  1.9  mrg     : expr (e), unsigned_p (u), min_value (m) {}
    183  1.9  mrg 
    184  1.9  mrg   tree expr;
    185  1.9  mrg   bool unsigned_p;
    186  1.9  mrg   poly_uint64 min_value;
    187  1.5  mrg };
    188  1.5  mrg 
    189  1.9  mrg /* Vectorizer state common between loop and basic-block vectorization.  */
    190  1.9  mrg struct vec_info {
    191  1.9  mrg   enum vec_kind { bb, loop };
    192  1.5  mrg 
    193  1.9  mrg   vec_info (vec_kind, void *);
    194  1.9  mrg   ~vec_info ();
    195  1.5  mrg 
    196  1.9  mrg   /* The type of vectorization.  */
    197  1.9  mrg   vec_kind kind;
    198  1.6  mrg 
    199  1.6  mrg   /* All SLP instances.  */
    200  1.9  mrg   auto_vec<slp_instance> slp_instances;
    201  1.6  mrg 
    202  1.9  mrg   /* All data references.  Freed by free_data_refs, so not an auto_vec.  */
    203  1.6  mrg   vec<data_reference_p> datarefs;
    204  1.1  mrg 
    205  1.9  mrg   /* Maps base addresses to an innermost_loop_behavior that gives the maximum
    206  1.9  mrg      known alignment for that base.  */
    207  1.9  mrg   vec_base_alignments base_alignments;
    208  1.9  mrg 
    209  1.9  mrg   /* All data dependences.  Freed by free_dependence_relations, so not
    210  1.9  mrg      an auto_vec.  */
    211  1.6  mrg   vec<ddr_p> ddrs;
    212  1.5  mrg 
    213  1.6  mrg   /* All interleaving chains of stores, represented by the first
    214  1.6  mrg      stmt in the chain.  */
    215  1.9  mrg   auto_vec<gimple *> grouped_stores;
    216  1.5  mrg 
    217  1.6  mrg   /* Cost data used by the target cost model.  */
    218  1.6  mrg   void *target_cost_data;
    219  1.5  mrg };
    220  1.5  mrg 
    221  1.6  mrg struct _loop_vec_info;
    222  1.6  mrg struct _bb_vec_info;
    223  1.6  mrg 
    224  1.6  mrg template<>
    225  1.6  mrg template<>
    226  1.6  mrg inline bool
    227  1.6  mrg is_a_helper <_loop_vec_info *>::test (vec_info *i)
    228  1.5  mrg {
    229  1.6  mrg   return i->kind == vec_info::loop;
    230  1.5  mrg }
    231  1.5  mrg 
    232  1.6  mrg template<>
    233  1.6  mrg template<>
    234  1.5  mrg inline bool
    235  1.6  mrg is_a_helper <_bb_vec_info *>::test (vec_info *i)
    236  1.5  mrg {
    237  1.6  mrg   return i->kind == vec_info::bb;
    238  1.5  mrg }
    239  1.5  mrg 
    240  1.5  mrg 
    241  1.9  mrg /* In general, we can divide the vector statements in a vectorized loop
    242  1.9  mrg    into related groups ("rgroups") and say that for each rgroup there is
    243  1.9  mrg    some nS such that the rgroup operates on nS values from one scalar
    244  1.9  mrg    iteration followed by nS values from the next.  That is, if VF is the
    245  1.9  mrg    vectorization factor of the loop, the rgroup operates on a sequence:
    246  1.9  mrg 
    247  1.9  mrg      (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
    248  1.9  mrg 
    249  1.9  mrg    where (i,j) represents a scalar value with index j in a scalar
    250  1.9  mrg    iteration with index i.
    251  1.9  mrg 
    252  1.9  mrg    [ We use the term "rgroup" to emphasise that this grouping isn't
    253  1.9  mrg      necessarily the same as the grouping of statements used elsewhere.
    254  1.9  mrg      For example, if we implement a group of scalar loads using gather
    255  1.9  mrg      loads, we'll use a separate gather load for each scalar load, and
    256  1.9  mrg      thus each gather load will belong to its own rgroup. ]
    257  1.9  mrg 
    258  1.9  mrg    In general this sequence will occupy nV vectors concatenated
    259  1.9  mrg    together.  If these vectors have nL lanes each, the total number
    260  1.9  mrg    of scalar values N is given by:
    261  1.9  mrg 
    262  1.9  mrg        N = nS * VF = nV * nL
    263  1.9  mrg 
    264  1.9  mrg    None of nS, VF, nV and nL are required to be a power of 2.  nS and nV
    265  1.9  mrg    are compile-time constants but VF and nL can be variable (if the target
    266  1.9  mrg    supports variable-length vectors).
    267  1.9  mrg 
    268  1.9  mrg    In classical vectorization, each iteration of the vector loop would
    269  1.9  mrg    handle exactly VF iterations of the original scalar loop.  However,
    270  1.9  mrg    in a fully-masked loop, a particular iteration of the vector loop
    271  1.9  mrg    might handle fewer than VF iterations of the scalar loop.  The vector
    272  1.9  mrg    lanes that correspond to iterations of the scalar loop are said to be
    273  1.9  mrg    "active" and the other lanes are said to be "inactive".
    274  1.9  mrg 
    275  1.9  mrg    In a fully-masked loop, many rgroups need to be masked to ensure that
    276  1.9  mrg    they have no effect for the inactive lanes.  Each such rgroup needs a
    277  1.9  mrg    sequence of booleans in the same order as above, but with each (i,j)
    278  1.9  mrg    replaced by a boolean that indicates whether iteration i is active.
    279  1.9  mrg    This sequence occupies nV vector masks that again have nL lanes each.
    280  1.9  mrg    Thus the mask sequence as a whole consists of VF independent booleans
    281  1.9  mrg    that are each repeated nS times.
    282  1.9  mrg 
    283  1.9  mrg    We make the simplifying assumption that if a sequence of nV masks is
    284  1.9  mrg    suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
    285  1.9  mrg    VIEW_CONVERTing it.  This holds for all current targets that support
    286  1.9  mrg    fully-masked loops.  For example, suppose the scalar loop is:
    287  1.9  mrg 
    288  1.9  mrg      float *f;
    289  1.9  mrg      double *d;
    290  1.9  mrg      for (int i = 0; i < n; ++i)
    291  1.9  mrg        {
    292  1.9  mrg 	 f[i * 2 + 0] += 1.0f;
    293  1.9  mrg 	 f[i * 2 + 1] += 2.0f;
    294  1.9  mrg 	 d[i] += 3.0;
    295  1.9  mrg        }
    296  1.9  mrg 
    297  1.9  mrg    and suppose that vectors have 256 bits.  The vectorized f accesses
    298  1.9  mrg    will belong to one rgroup and the vectorized d access to another:
    299  1.9  mrg 
    300  1.9  mrg      f rgroup: nS = 2, nV = 1, nL = 8
    301  1.9  mrg      d rgroup: nS = 1, nV = 1, nL = 4
    302  1.9  mrg 	       VF = 4
    303  1.9  mrg 
    304  1.9  mrg      [ In this simple example the rgroups do correspond to the normal
    305  1.9  mrg        SLP grouping scheme. ]
    306  1.9  mrg 
    307  1.9  mrg    If only the first three lanes are active, the masks we need are:
    308  1.9  mrg 
    309  1.9  mrg      f rgroup: 1 1 | 1 1 | 1 1 | 0 0
    310  1.9  mrg      d rgroup:  1  |  1  |  1  |  0
    311  1.9  mrg 
    312  1.9  mrg    Here we can use a mask calculated for f's rgroup for d's, but not
    313  1.9  mrg    vice versa.
    314  1.9  mrg 
    315  1.9  mrg    Thus for each value of nV, it is enough to provide nV masks, with the
    316  1.9  mrg    mask being calculated based on the highest nL (or, equivalently, based
    317  1.9  mrg    on the highest nS) required by any rgroup with that nV.  We therefore
    318  1.9  mrg    represent the entire collection of masks as a two-level table, with the
    319  1.9  mrg    first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
    320  1.9  mrg    the second being indexed by the mask index 0 <= i < nV.  */
    321  1.9  mrg 
    322  1.9  mrg /* The masks needed by rgroups with nV vectors, according to the
    323  1.9  mrg    description above.  */
    324  1.9  mrg struct rgroup_masks {
    325  1.9  mrg   /* The largest nS for all rgroups that use these masks.  */
    326  1.9  mrg   unsigned int max_nscalars_per_iter;
    327  1.9  mrg 
    328  1.9  mrg   /* The type of mask to use, based on the highest nS recorded above.  */
    329  1.9  mrg   tree mask_type;
    330  1.9  mrg 
    331  1.9  mrg   /* A vector of nV masks, in iteration order.  */
    332  1.9  mrg   vec<tree> masks;
    333  1.9  mrg };
    334  1.9  mrg 
    335  1.9  mrg typedef auto_vec<rgroup_masks> vec_loop_masks;
    336  1.9  mrg 
    337  1.1  mrg /*-----------------------------------------------------------------*/
    338  1.1  mrg /* Info on vectorized loops.                                       */
    339  1.1  mrg /*-----------------------------------------------------------------*/
    340  1.6  mrg typedef struct _loop_vec_info : public vec_info {
    341  1.9  mrg   _loop_vec_info (struct loop *);
    342  1.9  mrg   ~_loop_vec_info ();
    343  1.1  mrg 
    344  1.1  mrg   /* The loop to which this info struct refers to.  */
    345  1.1  mrg   struct loop *loop;
    346  1.1  mrg 
    347  1.1  mrg   /* The loop basic blocks.  */
    348  1.1  mrg   basic_block *bbs;
    349  1.1  mrg 
    350  1.5  mrg   /* Number of latch executions.  */
    351  1.5  mrg   tree num_itersm1;
    352  1.1  mrg   /* Number of iterations.  */
    353  1.1  mrg   tree num_iters;
    354  1.5  mrg   /* Number of iterations of the original loop.  */
    355  1.1  mrg   tree num_iters_unchanged;
    356  1.8  mrg   /* Condition under which this loop is analyzed and versioned.  */
    357  1.8  mrg   tree num_iters_assumptions;
    358  1.1  mrg 
    359  1.5  mrg   /* Threshold of number of iterations below which vectorzation will not be
    360  1.5  mrg      performed. It is calculated from MIN_PROFITABLE_ITERS and
    361  1.5  mrg      PARAM_MIN_VECT_LOOP_BOUND.  */
    362  1.5  mrg   unsigned int th;
    363  1.5  mrg 
    364  1.9  mrg   /* When applying loop versioning, the vector form should only be used
    365  1.9  mrg      if the number of scalar iterations is >= this value, on top of all
    366  1.9  mrg      the other requirements.  Ignored when loop versioning is not being
    367  1.9  mrg      used.  */
    368  1.9  mrg   poly_uint64 versioning_threshold;
    369  1.9  mrg 
    370  1.1  mrg   /* Unrolling factor  */
    371  1.9  mrg   poly_uint64 vectorization_factor;
    372  1.9  mrg 
    373  1.9  mrg   /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
    374  1.9  mrg      if there is no particular limit.  */
    375  1.9  mrg   unsigned HOST_WIDE_INT max_vectorization_factor;
    376  1.9  mrg 
    377  1.9  mrg   /* The masks that a fully-masked loop should use to avoid operating
    378  1.9  mrg      on inactive scalars.  */
    379  1.9  mrg   vec_loop_masks masks;
    380  1.9  mrg 
    381  1.9  mrg   /* If we are using a loop mask to align memory addresses, this variable
    382  1.9  mrg      contains the number of vector elements that we should skip in the
    383  1.9  mrg      first iteration of the vector loop (i.e. the number of leading
    384  1.9  mrg      elements that should be false in the first mask).  */
    385  1.9  mrg   tree mask_skip_niters;
    386  1.9  mrg 
    387  1.9  mrg   /* Type of the variables to use in the WHILE_ULT call for fully-masked
    388  1.9  mrg      loops.  */
    389  1.9  mrg   tree mask_compare_type;
    390  1.1  mrg 
    391  1.1  mrg   /* Unknown DRs according to which loop was peeled.  */
    392  1.1  mrg   struct data_reference *unaligned_dr;
    393  1.1  mrg 
    394  1.1  mrg   /* peeling_for_alignment indicates whether peeling for alignment will take
    395  1.1  mrg      place, and what the peeling factor should be:
    396  1.1  mrg      peeling_for_alignment = X means:
    397  1.1  mrg         If X=0: Peeling for alignment will not be applied.
    398  1.1  mrg         If X>0: Peel first X iterations.
    399  1.1  mrg         If X=-1: Generate a runtime test to calculate the number of iterations
    400  1.1  mrg                  to be peeled, using the dataref recorded in the field
    401  1.1  mrg                  unaligned_dr.  */
    402  1.1  mrg   int peeling_for_alignment;
    403  1.1  mrg 
    404  1.1  mrg   /* The mask used to check the alignment of pointers or arrays.  */
    405  1.1  mrg   int ptr_mask;
    406  1.1  mrg 
    407  1.3  mrg   /* The loop nest in which the data dependences are computed.  */
    408  1.9  mrg   auto_vec<loop_p> loop_nest;
    409  1.3  mrg 
    410  1.1  mrg   /* Data Dependence Relations defining address ranges that are candidates
    411  1.1  mrg      for a run-time aliasing check.  */
    412  1.9  mrg   auto_vec<ddr_p> may_alias_ddrs;
    413  1.1  mrg 
    414  1.5  mrg   /* Data Dependence Relations defining address ranges together with segment
    415  1.5  mrg      lengths from which the run-time aliasing check is built.  */
    416  1.9  mrg   auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
    417  1.9  mrg 
    418  1.9  mrg   /* Check that the addresses of each pair of objects is unequal.  */
    419  1.9  mrg   auto_vec<vec_object_pair> check_unequal_addrs;
    420  1.9  mrg 
    421  1.9  mrg   /* List of values that are required to be nonzero.  This is used to check
    422  1.9  mrg      whether things like "x[i * n] += 1;" are safe and eventually gets added
    423  1.9  mrg      to the checks for lower bounds below.  */
    424  1.9  mrg   auto_vec<tree> check_nonzero;
    425  1.9  mrg 
    426  1.9  mrg   /* List of values that need to be checked for a minimum value.  */
    427  1.9  mrg   auto_vec<vec_lower_bound> lower_bounds;
    428  1.5  mrg 
    429  1.1  mrg   /* Statements in the loop that have data references that are candidates for a
    430  1.1  mrg      runtime (loop versioning) misalignment check.  */
    431  1.9  mrg   auto_vec<gimple *> may_misalign_stmts;
    432  1.1  mrg 
    433  1.3  mrg   /* Reduction cycles detected in the loop. Used in loop-aware SLP.  */
    434  1.9  mrg   auto_vec<gimple *> reductions;
    435  1.3  mrg 
    436  1.3  mrg   /* All reduction chains in the loop, represented by the first
    437  1.3  mrg      stmt in the chain.  */
    438  1.9  mrg   auto_vec<gimple *> reduction_chains;
    439  1.3  mrg 
    440  1.6  mrg   /* Cost vector for a single scalar iteration.  */
    441  1.9  mrg   auto_vec<stmt_info_for_cost> scalar_cost_vec;
    442  1.9  mrg 
    443  1.9  mrg   /* Map of IV base/step expressions to inserted name in the preheader.  */
    444  1.9  mrg   hash_map<tree_operand_hash, tree> *ivexpr_map;
    445  1.3  mrg 
    446  1.8  mrg   /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
    447  1.8  mrg      applied to the loop, i.e., no unrolling is needed, this is 1.  */
    448  1.9  mrg   poly_uint64 slp_unrolling_factor;
    449  1.8  mrg 
    450  1.6  mrg   /* Cost of a single scalar iteration.  */
    451  1.6  mrg   int single_scalar_iteration_cost;
    452  1.3  mrg 
    453  1.8  mrg   /* Is the loop vectorizable? */
    454  1.8  mrg   bool vectorizable;
    455  1.8  mrg 
    456  1.9  mrg   /* Records whether we still have the option of using a fully-masked loop.  */
    457  1.9  mrg   bool can_fully_mask_p;
    458  1.9  mrg 
    459  1.9  mrg   /* True if have decided to use a fully-masked loop.  */
    460  1.9  mrg   bool fully_masked_p;
    461  1.9  mrg 
    462  1.3  mrg   /* When we have grouped data accesses with gaps, we may introduce invalid
    463  1.1  mrg      memory accesses.  We peel the last iteration of the loop to prevent
    464  1.1  mrg      this.  */
    465  1.1  mrg   bool peeling_for_gaps;
    466  1.1  mrg 
    467  1.5  mrg   /* When the number of iterations is not a multiple of the vector size
    468  1.5  mrg      we need to peel off iterations at the end to form an epilogue loop.  */
    469  1.5  mrg   bool peeling_for_niter;
    470  1.5  mrg 
    471  1.3  mrg   /* Reductions are canonicalized so that the last operand is the reduction
    472  1.3  mrg      operand.  If this places a constant into RHS1, this decanonicalizes
    473  1.3  mrg      GIMPLE for other phases, so we must track when this has occurred and
    474  1.3  mrg      fix it up.  */
    475  1.3  mrg   bool operands_swapped;
    476  1.3  mrg 
    477  1.5  mrg   /* True if there are no loop carried data dependencies in the loop.
    478  1.5  mrg      If loop->safelen <= 1, then this is always true, either the loop
    479  1.5  mrg      didn't have any loop carried data dependencies, or the loop is being
    480  1.5  mrg      vectorized guarded with some runtime alias checks, or couldn't
    481  1.5  mrg      be vectorized at all, but then this field shouldn't be used.
    482  1.5  mrg      For loop->safelen >= 2, the user has asserted that there are no
    483  1.5  mrg      backward dependencies, but there still could be loop carried forward
    484  1.5  mrg      dependencies in such loops.  This flag will be false if normal
    485  1.5  mrg      vectorizer data dependency analysis would fail or require versioning
    486  1.5  mrg      for alias, but because of loop->safelen >= 2 it has been vectorized
    487  1.5  mrg      even without versioning for alias.  E.g. in:
    488  1.5  mrg      #pragma omp simd
    489  1.5  mrg      for (int i = 0; i < m; i++)
    490  1.5  mrg        a[i] = a[i + k] * c;
    491  1.5  mrg      (or #pragma simd or #pragma ivdep) we can vectorize this and it will
    492  1.5  mrg      DTRT even for k > 0 && k < m, but without safelen we would not
    493  1.5  mrg      vectorize this, so this field would be false.  */
    494  1.5  mrg   bool no_data_dependencies;
    495  1.5  mrg 
    496  1.8  mrg   /* Mark loops having masked stores.  */
    497  1.8  mrg   bool has_mask_store;
    498  1.8  mrg 
    499  1.5  mrg   /* If if-conversion versioned this loop before conversion, this is the
    500  1.5  mrg      loop version without if-conversion.  */
    501  1.5  mrg   struct loop *scalar_loop;
    502  1.5  mrg 
    503  1.8  mrg   /* For loops being epilogues of already vectorized loops
    504  1.8  mrg      this points to the original vectorized loop.  Otherwise NULL.  */
    505  1.8  mrg   _loop_vec_info *orig_loop_info;
    506  1.6  mrg 
    507  1.1  mrg } *loop_vec_info;
    508  1.1  mrg 
    509  1.1  mrg /* Access Functions.  */
    510  1.1  mrg #define LOOP_VINFO_LOOP(L)                 (L)->loop
    511  1.1  mrg #define LOOP_VINFO_BBS(L)                  (L)->bbs
    512  1.5  mrg #define LOOP_VINFO_NITERSM1(L)             (L)->num_itersm1
    513  1.1  mrg #define LOOP_VINFO_NITERS(L)               (L)->num_iters
    514  1.5  mrg /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
    515  1.5  mrg    prologue peeling retain total unchanged scalar loop iterations for
    516  1.5  mrg    cost model.  */
    517  1.1  mrg #define LOOP_VINFO_NITERS_UNCHANGED(L)     (L)->num_iters_unchanged
    518  1.8  mrg #define LOOP_VINFO_NITERS_ASSUMPTIONS(L)   (L)->num_iters_assumptions
    519  1.5  mrg #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
    520  1.9  mrg #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
    521  1.1  mrg #define LOOP_VINFO_VECTORIZABLE_P(L)       (L)->vectorizable
    522  1.9  mrg #define LOOP_VINFO_CAN_FULLY_MASK_P(L)     (L)->can_fully_mask_p
    523  1.9  mrg #define LOOP_VINFO_FULLY_MASKED_P(L)       (L)->fully_masked_p
    524  1.1  mrg #define LOOP_VINFO_VECT_FACTOR(L)          (L)->vectorization_factor
    525  1.9  mrg #define LOOP_VINFO_MAX_VECT_FACTOR(L)      (L)->max_vectorization_factor
    526  1.9  mrg #define LOOP_VINFO_MASKS(L)                (L)->masks
    527  1.9  mrg #define LOOP_VINFO_MASK_SKIP_NITERS(L)     (L)->mask_skip_niters
    528  1.9  mrg #define LOOP_VINFO_MASK_COMPARE_TYPE(L)    (L)->mask_compare_type
    529  1.1  mrg #define LOOP_VINFO_PTR_MASK(L)             (L)->ptr_mask
    530  1.3  mrg #define LOOP_VINFO_LOOP_NEST(L)            (L)->loop_nest
    531  1.1  mrg #define LOOP_VINFO_DATAREFS(L)             (L)->datarefs
    532  1.1  mrg #define LOOP_VINFO_DDRS(L)                 (L)->ddrs
    533  1.1  mrg #define LOOP_VINFO_INT_NITERS(L)           (TREE_INT_CST_LOW ((L)->num_iters))
    534  1.5  mrg #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
    535  1.1  mrg #define LOOP_VINFO_UNALIGNED_DR(L)         (L)->unaligned_dr
    536  1.1  mrg #define LOOP_VINFO_MAY_MISALIGN_STMTS(L)   (L)->may_misalign_stmts
    537  1.1  mrg #define LOOP_VINFO_MAY_ALIAS_DDRS(L)       (L)->may_alias_ddrs
    538  1.5  mrg #define LOOP_VINFO_COMP_ALIAS_DDRS(L)      (L)->comp_alias_ddrs
    539  1.9  mrg #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L)  (L)->check_unequal_addrs
    540  1.9  mrg #define LOOP_VINFO_CHECK_NONZERO(L)        (L)->check_nonzero
    541  1.9  mrg #define LOOP_VINFO_LOWER_BOUNDS(L)         (L)->lower_bounds
    542  1.3  mrg #define LOOP_VINFO_GROUPED_STORES(L)       (L)->grouped_stores
    543  1.1  mrg #define LOOP_VINFO_SLP_INSTANCES(L)        (L)->slp_instances
    544  1.1  mrg #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
    545  1.3  mrg #define LOOP_VINFO_REDUCTIONS(L)           (L)->reductions
    546  1.3  mrg #define LOOP_VINFO_REDUCTION_CHAINS(L)     (L)->reduction_chains
    547  1.3  mrg #define LOOP_VINFO_TARGET_COST_DATA(L)     (L)->target_cost_data
    548  1.1  mrg #define LOOP_VINFO_PEELING_FOR_GAPS(L)     (L)->peeling_for_gaps
    549  1.3  mrg #define LOOP_VINFO_OPERANDS_SWAPPED(L)     (L)->operands_swapped
    550  1.5  mrg #define LOOP_VINFO_PEELING_FOR_NITER(L)    (L)->peeling_for_niter
    551  1.5  mrg #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
    552  1.5  mrg #define LOOP_VINFO_SCALAR_LOOP(L)	   (L)->scalar_loop
    553  1.6  mrg #define LOOP_VINFO_HAS_MASK_STORE(L)       (L)->has_mask_store
    554  1.6  mrg #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
    555  1.6  mrg #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
    556  1.8  mrg #define LOOP_VINFO_ORIG_LOOP_INFO(L)       (L)->orig_loop_info
    557  1.1  mrg 
    558  1.8  mrg #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L)	\
    559  1.5  mrg   ((L)->may_misalign_stmts.length () > 0)
    560  1.8  mrg #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L)		\
    561  1.9  mrg   ((L)->comp_alias_ddrs.length () > 0 \
    562  1.9  mrg    || (L)->check_unequal_addrs.length () > 0 \
    563  1.9  mrg    || (L)->lower_bounds.length () > 0)
    564  1.8  mrg #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L)		\
    565  1.8  mrg   (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
    566  1.8  mrg #define LOOP_REQUIRES_VERSIONING(L)			\
    567  1.8  mrg   (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L)		\
    568  1.8  mrg    || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L)		\
    569  1.8  mrg    || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L))
    570  1.1  mrg 
    571  1.1  mrg #define LOOP_VINFO_NITERS_KNOWN_P(L)          \
    572  1.5  mrg   (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
    573  1.1  mrg 
    574  1.8  mrg #define LOOP_VINFO_EPILOGUE_P(L) \
    575  1.8  mrg   (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
    576  1.8  mrg 
    577  1.9  mrg #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
    578  1.9  mrg   (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
    579  1.8  mrg 
    580  1.1  mrg static inline loop_vec_info
    581  1.1  mrg loop_vec_info_for_loop (struct loop *loop)
    582  1.1  mrg {
    583  1.1  mrg   return (loop_vec_info) loop->aux;
    584  1.1  mrg }
    585  1.1  mrg 
    586  1.1  mrg static inline bool
    587  1.6  mrg nested_in_vect_loop_p (struct loop *loop, gimple *stmt)
    588  1.1  mrg {
    589  1.1  mrg   return (loop->inner
    590  1.1  mrg           && (loop->inner == (gimple_bb (stmt))->loop_father));
    591  1.1  mrg }
    592  1.1  mrg 
    593  1.6  mrg typedef struct _bb_vec_info : public vec_info
    594  1.6  mrg {
    595  1.9  mrg   _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator);
    596  1.9  mrg   ~_bb_vec_info ();
    597  1.9  mrg 
    598  1.1  mrg   basic_block bb;
    599  1.6  mrg   gimple_stmt_iterator region_begin;
    600  1.6  mrg   gimple_stmt_iterator region_end;
    601  1.1  mrg } *bb_vec_info;
    602  1.1  mrg 
    603  1.3  mrg #define BB_VINFO_BB(B)               (B)->bb
    604  1.3  mrg #define BB_VINFO_GROUPED_STORES(B)   (B)->grouped_stores
    605  1.3  mrg #define BB_VINFO_SLP_INSTANCES(B)    (B)->slp_instances
    606  1.3  mrg #define BB_VINFO_DATAREFS(B)         (B)->datarefs
    607  1.3  mrg #define BB_VINFO_DDRS(B)             (B)->ddrs
    608  1.3  mrg #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
    609  1.1  mrg 
    610  1.1  mrg static inline bb_vec_info
    611  1.1  mrg vec_info_for_bb (basic_block bb)
    612  1.1  mrg {
    613  1.1  mrg   return (bb_vec_info) bb->aux;
    614  1.1  mrg }
    615  1.1  mrg 
    616  1.1  mrg /*-----------------------------------------------------------------*/
    617  1.1  mrg /* Info on vectorized defs.                                        */
    618  1.1  mrg /*-----------------------------------------------------------------*/
    619  1.1  mrg enum stmt_vec_info_type {
    620  1.1  mrg   undef_vec_info_type = 0,
    621  1.1  mrg   load_vec_info_type,
    622  1.1  mrg   store_vec_info_type,
    623  1.1  mrg   shift_vec_info_type,
    624  1.1  mrg   op_vec_info_type,
    625  1.1  mrg   call_vec_info_type,
    626  1.5  mrg   call_simd_clone_vec_info_type,
    627  1.1  mrg   assignment_vec_info_type,
    628  1.1  mrg   condition_vec_info_type,
    629  1.6  mrg   comparison_vec_info_type,
    630  1.1  mrg   reduc_vec_info_type,
    631  1.1  mrg   induc_vec_info_type,
    632  1.1  mrg   type_promotion_vec_info_type,
    633  1.1  mrg   type_demotion_vec_info_type,
    634  1.1  mrg   type_conversion_vec_info_type,
    635  1.1  mrg   loop_exit_ctrl_vec_info_type
    636  1.1  mrg };
    637  1.1  mrg 
    638  1.1  mrg /* Indicates whether/how a variable is used in the scope of loop/basic
    639  1.1  mrg    block.  */
    640  1.1  mrg enum vect_relevant {
    641  1.1  mrg   vect_unused_in_scope = 0,
    642  1.8  mrg 
    643  1.8  mrg   /* The def is only used outside the loop.  */
    644  1.8  mrg   vect_used_only_live,
    645  1.1  mrg   /* The def is in the inner loop, and the use is in the outer loop, and the
    646  1.1  mrg      use is a reduction stmt.  */
    647  1.1  mrg   vect_used_in_outer_by_reduction,
    648  1.1  mrg   /* The def is in the inner loop, and the use is in the outer loop (and is
    649  1.1  mrg      not part of reduction).  */
    650  1.1  mrg   vect_used_in_outer,
    651  1.1  mrg 
    652  1.1  mrg   /* defs that feed computations that end up (only) in a reduction. These
    653  1.1  mrg      defs may be used by non-reduction stmts, but eventually, any
    654  1.1  mrg      computations/values that are affected by these defs are used to compute
    655  1.1  mrg      a reduction (i.e. don't get stored to memory, for example). We use this
    656  1.1  mrg      to identify computations that we can change the order in which they are
    657  1.1  mrg      computed.  */
    658  1.1  mrg   vect_used_by_reduction,
    659  1.1  mrg 
    660  1.1  mrg   vect_used_in_scope
    661  1.1  mrg };
    662  1.1  mrg 
    663  1.1  mrg /* The type of vectorization that can be applied to the stmt: regular loop-based
    664  1.1  mrg    vectorization; pure SLP - the stmt is a part of SLP instances and does not
    665  1.1  mrg    have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
    666  1.1  mrg    a part of SLP instance and also must be loop-based vectorized, since it has
    667  1.1  mrg    uses outside SLP sequences.
    668  1.1  mrg 
    669  1.1  mrg    In the loop context the meanings of pure and hybrid SLP are slightly
    670  1.1  mrg    different. By saying that pure SLP is applied to the loop, we mean that we
    671  1.1  mrg    exploit only intra-iteration parallelism in the loop; i.e., the loop can be
    672  1.1  mrg    vectorized without doing any conceptual unrolling, cause we don't pack
    673  1.1  mrg    together stmts from different iterations, only within a single iteration.
    674  1.1  mrg    Loop hybrid SLP means that we exploit both intra-iteration and
    675  1.1  mrg    inter-iteration parallelism (e.g., number of elements in the vector is 4
    676  1.1  mrg    and the slp-group-size is 2, in which case we don't have enough parallelism
    677  1.1  mrg    within an iteration, so we obtain the rest of the parallelism from subsequent
    678  1.1  mrg    iterations by unrolling the loop by 2).  */
    679  1.1  mrg enum slp_vect_type {
    680  1.1  mrg   loop_vect = 0,
    681  1.1  mrg   pure_slp,
    682  1.1  mrg   hybrid
    683  1.1  mrg };
    684  1.1  mrg 
    685  1.9  mrg /* Says whether a statement is a load, a store of a vectorized statement
    686  1.9  mrg    result, or a store of an invariant value.  */
    687  1.9  mrg enum vec_load_store_type {
    688  1.9  mrg   VLS_LOAD,
    689  1.9  mrg   VLS_STORE,
    690  1.9  mrg   VLS_STORE_INVARIANT
    691  1.9  mrg };
    692  1.9  mrg 
    693  1.8  mrg /* Describes how we're going to vectorize an individual load or store,
    694  1.8  mrg    or a group of loads or stores.  */
    695  1.8  mrg enum vect_memory_access_type {
    696  1.8  mrg   /* An access to an invariant address.  This is used only for loads.  */
    697  1.8  mrg   VMAT_INVARIANT,
    698  1.8  mrg 
    699  1.8  mrg   /* A simple contiguous access.  */
    700  1.8  mrg   VMAT_CONTIGUOUS,
    701  1.8  mrg 
    702  1.8  mrg   /* A contiguous access that goes down in memory rather than up,
    703  1.8  mrg      with no additional permutation.  This is used only for stores
    704  1.8  mrg      of invariants.  */
    705  1.8  mrg   VMAT_CONTIGUOUS_DOWN,
    706  1.8  mrg 
    707  1.8  mrg   /* A simple contiguous access in which the elements need to be permuted
    708  1.8  mrg      after loading or before storing.  Only used for loop vectorization;
    709  1.8  mrg      SLP uses separate permutes.  */
    710  1.8  mrg   VMAT_CONTIGUOUS_PERMUTE,
    711  1.8  mrg 
    712  1.8  mrg   /* A simple contiguous access in which the elements need to be reversed
    713  1.8  mrg      after loading or before storing.  */
    714  1.8  mrg   VMAT_CONTIGUOUS_REVERSE,
    715  1.8  mrg 
    716  1.8  mrg   /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES.  */
    717  1.8  mrg   VMAT_LOAD_STORE_LANES,
    718  1.8  mrg 
    719  1.8  mrg   /* An access in which each scalar element is loaded or stored
    720  1.8  mrg      individually.  */
    721  1.8  mrg   VMAT_ELEMENTWISE,
    722  1.8  mrg 
    723  1.8  mrg   /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
    724  1.8  mrg      SLP accesses.  Each unrolled iteration uses a contiguous load
    725  1.8  mrg      or store for the whole group, but the groups from separate iterations
    726  1.8  mrg      are combined in the same way as for VMAT_ELEMENTWISE.  */
    727  1.8  mrg   VMAT_STRIDED_SLP,
    728  1.8  mrg 
    729  1.8  mrg   /* The access uses gather loads or scatter stores.  */
    730  1.8  mrg   VMAT_GATHER_SCATTER
    731  1.8  mrg };
    732  1.1  mrg 
    733  1.1  mrg typedef struct data_reference *dr_p;
    734  1.1  mrg 
    735  1.1  mrg typedef struct _stmt_vec_info {
    736  1.1  mrg 
    737  1.1  mrg   enum stmt_vec_info_type type;
    738  1.1  mrg 
    739  1.3  mrg   /* Indicates whether this stmts is part of a computation whose result is
    740  1.3  mrg      used outside the loop.  */
    741  1.3  mrg   bool live;
    742  1.3  mrg 
    743  1.3  mrg   /* Stmt is part of some pattern (computation idiom)  */
    744  1.3  mrg   bool in_pattern_p;
    745  1.3  mrg 
    746  1.8  mrg   /* Is this statement vectorizable or should it be skipped in (partial)
    747  1.8  mrg      vectorization.  */
    748  1.8  mrg   bool vectorizable;
    749  1.8  mrg 
    750  1.1  mrg   /* The stmt to which this info struct refers to.  */
    751  1.6  mrg   gimple *stmt;
    752  1.1  mrg 
    753  1.6  mrg   /* The vec_info with respect to which STMT is vectorized.  */
    754  1.6  mrg   vec_info *vinfo;
    755  1.1  mrg 
    756  1.3  mrg   /* The vector type to be used for the LHS of this statement.  */
    757  1.1  mrg   tree vectype;
    758  1.1  mrg 
    759  1.1  mrg   /* The vectorized version of the stmt.  */
    760  1.6  mrg   gimple *vectorized_stmt;
    761  1.1  mrg 
    762  1.1  mrg 
    763  1.9  mrg   /* The following is relevant only for stmts that contain a non-scalar
    764  1.1  mrg      data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
    765  1.9  mrg      at most one such data-ref.  */
    766  1.1  mrg 
    767  1.1  mrg   /* Information about the data-ref (access function, etc),
    768  1.1  mrg      relative to the inner-most containing loop.  */
    769  1.1  mrg   struct data_reference *data_ref_info;
    770  1.1  mrg 
    771  1.1  mrg   /* Information about the data-ref relative to this loop
    772  1.1  mrg      nest (the loop that is being considered for vectorization).  */
    773  1.9  mrg   innermost_loop_behavior dr_wrt_vec_loop;
    774  1.1  mrg 
    775  1.6  mrg   /* For loop PHI nodes, the base and evolution part of it.  This makes sure
    776  1.3  mrg      this information is still available in vect_update_ivs_after_vectorizer
    777  1.3  mrg      where we may not be able to re-analyze the PHI nodes evolution as
    778  1.3  mrg      peeling for the prologue loop can make it unanalyzable.  The evolution
    779  1.6  mrg      part is still correct after peeling, but the base may have changed from
    780  1.6  mrg      the version here.  */
    781  1.6  mrg   tree loop_phi_evolution_base_unchanged;
    782  1.3  mrg   tree loop_phi_evolution_part;
    783  1.1  mrg 
    784  1.1  mrg   /* Used for various bookkeeping purposes, generally holding a pointer to
    785  1.1  mrg      some other stmt S that is in some way "related" to this stmt.
    786  1.1  mrg      Current use of this field is:
    787  1.1  mrg         If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
    788  1.1  mrg         true): S is the "pattern stmt" that represents (and replaces) the
    789  1.1  mrg         sequence of stmts that constitutes the pattern.  Similarly, the
    790  1.1  mrg         related_stmt of the "pattern stmt" points back to this stmt (which is
    791  1.1  mrg         the last stmt in the original sequence of stmts that constitutes the
    792  1.1  mrg         pattern).  */
    793  1.6  mrg   gimple *related_stmt;
    794  1.1  mrg 
    795  1.3  mrg   /* Used to keep a sequence of def stmts of a pattern stmt if such exists.  */
    796  1.3  mrg   gimple_seq pattern_def_seq;
    797  1.3  mrg 
    798  1.1  mrg   /* List of datarefs that are known to have the same alignment as the dataref
    799  1.1  mrg      of this stmt.  */
    800  1.3  mrg   vec<dr_p> same_align_refs;
    801  1.1  mrg 
    802  1.5  mrg   /* Selected SIMD clone's function info.  First vector element
    803  1.5  mrg      is SIMD clone's function decl, followed by a pair of trees (base + step)
    804  1.5  mrg      for linear arguments (pair of NULLs for other arguments).  */
    805  1.5  mrg   vec<tree> simd_clone_info;
    806  1.5  mrg 
    807  1.1  mrg   /* Classify the def of this stmt.  */
    808  1.1  mrg   enum vect_def_type def_type;
    809  1.1  mrg 
    810  1.3  mrg   /*  Whether the stmt is SLPed, loop-based vectorized, or both.  */
    811  1.3  mrg   enum slp_vect_type slp_type;
    812  1.3  mrg 
    813  1.3  mrg   /* Interleaving and reduction chains info.  */
    814  1.3  mrg   /* First element in the group.  */
    815  1.6  mrg   gimple *first_element;
    816  1.3  mrg   /* Pointer to the next element in the group.  */
    817  1.6  mrg   gimple *next_element;
    818  1.3  mrg   /* For data-refs, in case that two or more stmts share data-ref, this is the
    819  1.3  mrg      pointer to the previously detected stmt with the same dr.  */
    820  1.6  mrg   gimple *same_dr_stmt;
    821  1.3  mrg   /* The size of the group.  */
    822  1.1  mrg   unsigned int size;
    823  1.1  mrg   /* For stores, number of stores from this group seen. We vectorize the last
    824  1.1  mrg      one.  */
    825  1.1  mrg   unsigned int store_count;
    826  1.1  mrg   /* For loads only, the gap from the previous load. For consecutive loads, GAP
    827  1.1  mrg      is 1.  */
    828  1.1  mrg   unsigned int gap;
    829  1.1  mrg 
    830  1.3  mrg   /* The minimum negative dependence distance this stmt participates in
    831  1.3  mrg      or zero if none.  */
    832  1.3  mrg   unsigned int min_neg_dist;
    833  1.1  mrg 
    834  1.3  mrg   /* Not all stmts in the loop need to be vectorized. e.g, the increment
    835  1.3  mrg      of the loop induction variable and computation of array indexes. relevant
    836  1.3  mrg      indicates whether the stmt needs to be vectorized.  */
    837  1.3  mrg   enum vect_relevant relevant;
    838  1.1  mrg 
    839  1.6  mrg   /* For loads if this is a gather, for stores if this is a scatter.  */
    840  1.6  mrg   bool gather_scatter_p;
    841  1.6  mrg 
    842  1.6  mrg   /* True if this is an access with loop-invariant stride.  */
    843  1.6  mrg   bool strided_p;
    844  1.5  mrg 
    845  1.5  mrg   /* For both loads and stores.  */
    846  1.5  mrg   bool simd_lane_access_p;
    847  1.6  mrg 
    848  1.8  mrg   /* Classifies how the load or store is going to be implemented
    849  1.8  mrg      for loop vectorization.  */
    850  1.8  mrg   vect_memory_access_type memory_access_type;
    851  1.8  mrg 
    852  1.6  mrg   /* For reduction loops, this is the type of reduction.  */
    853  1.6  mrg   enum vect_reduction_type v_reduc_type;
    854  1.6  mrg 
    855  1.8  mrg   /* For CONST_COND_REDUCTION, record the reduc code.  */
    856  1.8  mrg   enum tree_code const_cond_reduc_code;
    857  1.8  mrg 
    858  1.9  mrg   /* On a reduction PHI the reduction type as detected by
    859  1.9  mrg      vect_force_simple_reduction.  */
    860  1.9  mrg   enum vect_reduction_type reduc_type;
    861  1.9  mrg 
    862  1.9  mrg   /* On a reduction PHI the def returned by vect_force_simple_reduction.
    863  1.9  mrg      On the def returned by vect_force_simple_reduction the
    864  1.9  mrg      corresponding PHI.  */
    865  1.9  mrg   gimple *reduc_def;
    866  1.9  mrg 
    867  1.6  mrg   /* The number of scalar stmt references from active SLP instances.  */
    868  1.6  mrg   unsigned int num_slp_uses;
    869  1.1  mrg } *stmt_vec_info;
    870  1.1  mrg 
    871  1.8  mrg /* Information about a gather/scatter call.  */
    872  1.8  mrg struct gather_scatter_info {
    873  1.9  mrg   /* The internal function to use for the gather/scatter operation,
    874  1.9  mrg      or IFN_LAST if a built-in function should be used instead.  */
    875  1.9  mrg   internal_fn ifn;
    876  1.9  mrg 
    877  1.9  mrg   /* The FUNCTION_DECL for the built-in gather/scatter function,
    878  1.9  mrg      or null if an internal function should be used instead.  */
    879  1.8  mrg   tree decl;
    880  1.8  mrg 
    881  1.8  mrg   /* The loop-invariant base value.  */
    882  1.8  mrg   tree base;
    883  1.8  mrg 
    884  1.8  mrg   /* The original scalar offset, which is a non-loop-invariant SSA_NAME.  */
    885  1.8  mrg   tree offset;
    886  1.8  mrg 
    887  1.8  mrg   /* Each offset element should be multiplied by this amount before
    888  1.8  mrg      being added to the base.  */
    889  1.8  mrg   int scale;
    890  1.8  mrg 
    891  1.8  mrg   /* The definition type for the vectorized offset.  */
    892  1.8  mrg   enum vect_def_type offset_dt;
    893  1.8  mrg 
    894  1.8  mrg   /* The type of the vectorized offset.  */
    895  1.8  mrg   tree offset_vectype;
    896  1.9  mrg 
    897  1.9  mrg   /* The type of the scalar elements after loading or before storing.  */
    898  1.9  mrg   tree element_type;
    899  1.9  mrg 
    900  1.9  mrg   /* The type of the scalar elements being loaded or stored.  */
    901  1.9  mrg   tree memory_type;
    902  1.8  mrg };
    903  1.8  mrg 
    904  1.1  mrg /* Access Functions.  */
    905  1.1  mrg #define STMT_VINFO_TYPE(S)                 (S)->type
    906  1.1  mrg #define STMT_VINFO_STMT(S)                 (S)->stmt
    907  1.6  mrg inline loop_vec_info
    908  1.6  mrg STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo)
    909  1.6  mrg {
    910  1.6  mrg   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo))
    911  1.6  mrg     return loop_vinfo;
    912  1.6  mrg   return NULL;
    913  1.6  mrg }
    914  1.6  mrg inline bb_vec_info
    915  1.6  mrg STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
    916  1.6  mrg {
    917  1.6  mrg   if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo))
    918  1.6  mrg     return bb_vinfo;
    919  1.6  mrg   return NULL;
    920  1.6  mrg }
    921  1.1  mrg #define STMT_VINFO_RELEVANT(S)             (S)->relevant
    922  1.1  mrg #define STMT_VINFO_LIVE_P(S)               (S)->live
    923  1.1  mrg #define STMT_VINFO_VECTYPE(S)              (S)->vectype
    924  1.1  mrg #define STMT_VINFO_VEC_STMT(S)             (S)->vectorized_stmt
    925  1.3  mrg #define STMT_VINFO_VECTORIZABLE(S)         (S)->vectorizable
    926  1.1  mrg #define STMT_VINFO_DATA_REF(S)             (S)->data_ref_info
    927  1.6  mrg #define STMT_VINFO_GATHER_SCATTER_P(S)	   (S)->gather_scatter_p
    928  1.6  mrg #define STMT_VINFO_STRIDED_P(S)	   	   (S)->strided_p
    929  1.8  mrg #define STMT_VINFO_MEMORY_ACCESS_TYPE(S)   (S)->memory_access_type
    930  1.5  mrg #define STMT_VINFO_SIMD_LANE_ACCESS_P(S)   (S)->simd_lane_access_p
    931  1.6  mrg #define STMT_VINFO_VEC_REDUCTION_TYPE(S)   (S)->v_reduc_type
    932  1.8  mrg #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code
    933  1.1  mrg 
    934  1.9  mrg #define STMT_VINFO_DR_WRT_VEC_LOOP(S)      (S)->dr_wrt_vec_loop
    935  1.9  mrg #define STMT_VINFO_DR_BASE_ADDRESS(S)      (S)->dr_wrt_vec_loop.base_address
    936  1.9  mrg #define STMT_VINFO_DR_INIT(S)              (S)->dr_wrt_vec_loop.init
    937  1.9  mrg #define STMT_VINFO_DR_OFFSET(S)            (S)->dr_wrt_vec_loop.offset
    938  1.9  mrg #define STMT_VINFO_DR_STEP(S)              (S)->dr_wrt_vec_loop.step
    939  1.9  mrg #define STMT_VINFO_DR_BASE_ALIGNMENT(S)    (S)->dr_wrt_vec_loop.base_alignment
    940  1.9  mrg #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
    941  1.9  mrg   (S)->dr_wrt_vec_loop.base_misalignment
    942  1.9  mrg #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
    943  1.9  mrg   (S)->dr_wrt_vec_loop.offset_alignment
    944  1.9  mrg #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
    945  1.9  mrg   (S)->dr_wrt_vec_loop.step_alignment
    946  1.1  mrg 
    947  1.1  mrg #define STMT_VINFO_IN_PATTERN_P(S)         (S)->in_pattern_p
    948  1.1  mrg #define STMT_VINFO_RELATED_STMT(S)         (S)->related_stmt
    949  1.3  mrg #define STMT_VINFO_PATTERN_DEF_SEQ(S)      (S)->pattern_def_seq
    950  1.1  mrg #define STMT_VINFO_SAME_ALIGN_REFS(S)      (S)->same_align_refs
    951  1.5  mrg #define STMT_VINFO_SIMD_CLONE_INFO(S)	   (S)->simd_clone_info
    952  1.1  mrg #define STMT_VINFO_DEF_TYPE(S)             (S)->def_type
    953  1.3  mrg #define STMT_VINFO_GROUP_FIRST_ELEMENT(S)  (S)->first_element
    954  1.3  mrg #define STMT_VINFO_GROUP_NEXT_ELEMENT(S)   (S)->next_element
    955  1.3  mrg #define STMT_VINFO_GROUP_SIZE(S)           (S)->size
    956  1.3  mrg #define STMT_VINFO_GROUP_STORE_COUNT(S)    (S)->store_count
    957  1.3  mrg #define STMT_VINFO_GROUP_GAP(S)            (S)->gap
    958  1.3  mrg #define STMT_VINFO_GROUP_SAME_DR_STMT(S)   (S)->same_dr_stmt
    959  1.3  mrg #define STMT_VINFO_GROUPED_ACCESS(S)      ((S)->first_element != NULL && (S)->data_ref_info)
    960  1.6  mrg #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
    961  1.3  mrg #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
    962  1.3  mrg #define STMT_VINFO_MIN_NEG_DIST(S)	(S)->min_neg_dist
    963  1.6  mrg #define STMT_VINFO_NUM_SLP_USES(S)	(S)->num_slp_uses
    964  1.9  mrg #define STMT_VINFO_REDUC_TYPE(S)	(S)->reduc_type
    965  1.9  mrg #define STMT_VINFO_REDUC_DEF(S)		(S)->reduc_def
    966  1.3  mrg 
    967  1.3  mrg #define GROUP_FIRST_ELEMENT(S)          (S)->first_element
    968  1.3  mrg #define GROUP_NEXT_ELEMENT(S)           (S)->next_element
    969  1.3  mrg #define GROUP_SIZE(S)                   (S)->size
    970  1.3  mrg #define GROUP_STORE_COUNT(S)            (S)->store_count
    971  1.3  mrg #define GROUP_GAP(S)                    (S)->gap
    972  1.3  mrg #define GROUP_SAME_DR_STMT(S)           (S)->same_dr_stmt
    973  1.1  mrg 
    974  1.1  mrg #define STMT_VINFO_RELEVANT_P(S)          ((S)->relevant != vect_unused_in_scope)
    975  1.1  mrg 
    976  1.1  mrg #define HYBRID_SLP_STMT(S)                ((S)->slp_type == hybrid)
    977  1.1  mrg #define PURE_SLP_STMT(S)                  ((S)->slp_type == pure_slp)
    978  1.1  mrg #define STMT_SLP_TYPE(S)                   (S)->slp_type
    979  1.1  mrg 
    980  1.5  mrg struct dataref_aux {
    981  1.9  mrg   /* The misalignment in bytes of the reference, or -1 if not known.  */
    982  1.5  mrg   int misalignment;
    983  1.9  mrg   /* The byte alignment that we'd ideally like the reference to have,
    984  1.9  mrg      and the value that misalignment is measured against.  */
    985  1.9  mrg   int target_alignment;
    986  1.5  mrg   /* If true the alignment of base_decl needs to be increased.  */
    987  1.5  mrg   bool base_misaligned;
    988  1.5  mrg   tree base_decl;
    989  1.5  mrg };
    990  1.5  mrg 
    991  1.5  mrg #define DR_VECT_AUX(dr) ((dataref_aux *)(dr)->aux)
    992  1.5  mrg 
    993  1.3  mrg #define VECT_MAX_COST 1000
    994  1.1  mrg 
    995  1.1  mrg /* The maximum number of intermediate steps required in multi-step type
    996  1.1  mrg    conversion.  */
    997  1.1  mrg #define MAX_INTERM_CVT_STEPS         3
    998  1.1  mrg 
    999  1.9  mrg #define MAX_VECTORIZATION_FACTOR INT_MAX
   1000  1.3  mrg 
   1001  1.8  mrg /* Nonzero if TYPE represents a (scalar) boolean type or type
   1002  1.8  mrg    in the middle-end compatible with it (unsigned precision 1 integral
   1003  1.8  mrg    types).  Used to determine which types should be vectorized as
   1004  1.8  mrg    VECTOR_BOOLEAN_TYPE_P.  */
   1005  1.8  mrg 
   1006  1.8  mrg #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
   1007  1.8  mrg   (TREE_CODE (TYPE) == BOOLEAN_TYPE		\
   1008  1.8  mrg    || ((TREE_CODE (TYPE) == INTEGER_TYPE	\
   1009  1.8  mrg 	|| TREE_CODE (TYPE) == ENUMERAL_TYPE)	\
   1010  1.8  mrg        && TYPE_PRECISION (TYPE) == 1		\
   1011  1.8  mrg        && TYPE_UNSIGNED (TYPE)))
   1012  1.8  mrg 
   1013  1.6  mrg extern vec<stmt_vec_info> stmt_vec_info_vec;
   1014  1.1  mrg 
   1015  1.1  mrg void init_stmt_vec_info_vec (void);
   1016  1.1  mrg void free_stmt_vec_info_vec (void);
   1017  1.1  mrg 
   1018  1.3  mrg /* Return a stmt_vec_info corresponding to STMT.  */
   1019  1.3  mrg 
   1020  1.1  mrg static inline stmt_vec_info
   1021  1.6  mrg vinfo_for_stmt (gimple *stmt)
   1022  1.1  mrg {
   1023  1.9  mrg   int uid = gimple_uid (stmt);
   1024  1.9  mrg   if (uid <= 0)
   1025  1.1  mrg     return NULL;
   1026  1.1  mrg 
   1027  1.6  mrg   return stmt_vec_info_vec[uid - 1];
   1028  1.1  mrg }
   1029  1.1  mrg 
   1030  1.3  mrg /* Set vectorizer information INFO for STMT.  */
   1031  1.3  mrg 
   1032  1.1  mrg static inline void
   1033  1.6  mrg set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
   1034  1.1  mrg {
   1035  1.1  mrg   unsigned int uid = gimple_uid (stmt);
   1036  1.1  mrg   if (uid == 0)
   1037  1.1  mrg     {
   1038  1.3  mrg       gcc_checking_assert (info);
   1039  1.3  mrg       uid = stmt_vec_info_vec.length () + 1;
   1040  1.1  mrg       gimple_set_uid (stmt, uid);
   1041  1.6  mrg       stmt_vec_info_vec.safe_push (info);
   1042  1.1  mrg     }
   1043  1.1  mrg   else
   1044  1.6  mrg     {
   1045  1.6  mrg       gcc_checking_assert (info == NULL);
   1046  1.6  mrg       stmt_vec_info_vec[uid - 1] = info;
   1047  1.6  mrg     }
   1048  1.1  mrg }
   1049  1.1  mrg 
   1050  1.8  mrg /* Return TRUE if a statement represented by STMT_INFO is a part of a
   1051  1.8  mrg    pattern.  */
   1052  1.3  mrg 
   1053  1.8  mrg static inline bool
   1054  1.8  mrg is_pattern_stmt_p (stmt_vec_info stmt_info)
   1055  1.1  mrg {
   1056  1.8  mrg   gimple *related_stmt;
   1057  1.8  mrg   stmt_vec_info related_stmt_info;
   1058  1.1  mrg 
   1059  1.8  mrg   related_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
   1060  1.8  mrg   if (related_stmt
   1061  1.8  mrg       && (related_stmt_info = vinfo_for_stmt (related_stmt))
   1062  1.8  mrg       && STMT_VINFO_IN_PATTERN_P (related_stmt_info))
   1063  1.8  mrg     return true;
   1064  1.1  mrg 
   1065  1.8  mrg   return false;
   1066  1.1  mrg }
   1067  1.1  mrg 
   1068  1.3  mrg /* Return the later statement between STMT1 and STMT2.  */
   1069  1.3  mrg 
   1070  1.6  mrg static inline gimple *
   1071  1.6  mrg get_later_stmt (gimple *stmt1, gimple *stmt2)
   1072  1.3  mrg {
   1073  1.3  mrg   unsigned int uid1, uid2;
   1074  1.3  mrg 
   1075  1.3  mrg   if (stmt1 == NULL)
   1076  1.3  mrg     return stmt2;
   1077  1.3  mrg 
   1078  1.3  mrg   if (stmt2 == NULL)
   1079  1.3  mrg     return stmt1;
   1080  1.3  mrg 
   1081  1.8  mrg   stmt_vec_info stmt_info1 = vinfo_for_stmt (stmt1);
   1082  1.8  mrg   stmt_vec_info stmt_info2 = vinfo_for_stmt (stmt2);
   1083  1.8  mrg   uid1 = gimple_uid (is_pattern_stmt_p (stmt_info1)
   1084  1.8  mrg 		     ? STMT_VINFO_RELATED_STMT (stmt_info1) : stmt1);
   1085  1.8  mrg   uid2 = gimple_uid (is_pattern_stmt_p (stmt_info2)
   1086  1.8  mrg 		     ? STMT_VINFO_RELATED_STMT (stmt_info2) : stmt2);
   1087  1.3  mrg 
   1088  1.3  mrg   if (uid1 == 0 || uid2 == 0)
   1089  1.3  mrg     return NULL;
   1090  1.3  mrg 
   1091  1.3  mrg   gcc_assert (uid1 <= stmt_vec_info_vec.length ());
   1092  1.3  mrg   gcc_assert (uid2 <= stmt_vec_info_vec.length ());
   1093  1.3  mrg 
   1094  1.3  mrg   if (uid1 > uid2)
   1095  1.3  mrg     return stmt1;
   1096  1.3  mrg   else
   1097  1.3  mrg     return stmt2;
   1098  1.3  mrg }
   1099  1.3  mrg 
   1100  1.3  mrg /* Return true if BB is a loop header.  */
   1101  1.3  mrg 
   1102  1.1  mrg static inline bool
   1103  1.1  mrg is_loop_header_bb_p (basic_block bb)
   1104  1.1  mrg {
   1105  1.1  mrg   if (bb == (bb->loop_father)->header)
   1106  1.1  mrg     return true;
   1107  1.3  mrg   gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
   1108  1.1  mrg   return false;
   1109  1.1  mrg }
   1110  1.1  mrg 
   1111  1.3  mrg /* Return pow2 (X).  */
   1112  1.1  mrg 
   1113  1.1  mrg static inline int
   1114  1.1  mrg vect_pow2 (int x)
   1115  1.1  mrg {
   1116  1.1  mrg   int i, res = 1;
   1117  1.1  mrg 
   1118  1.1  mrg   for (i = 0; i < x; i++)
   1119  1.1  mrg     res *= 2;
   1120  1.1  mrg 
   1121  1.1  mrg   return res;
   1122  1.1  mrg }
   1123  1.1  mrg 
   1124  1.3  mrg /* Alias targetm.vectorize.builtin_vectorization_cost.  */
   1125  1.3  mrg 
   1126  1.3  mrg static inline int
   1127  1.3  mrg builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
   1128  1.3  mrg 			    tree vectype, int misalign)
   1129  1.3  mrg {
   1130  1.3  mrg   return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
   1131  1.3  mrg 						       vectype, misalign);
   1132  1.3  mrg }
   1133  1.3  mrg 
   1134  1.3  mrg /* Get cost by calling cost target builtin.  */
   1135  1.3  mrg 
   1136  1.3  mrg static inline
   1137  1.3  mrg int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
   1138  1.3  mrg {
   1139  1.3  mrg   return builtin_vectorization_cost (type_of_cost, NULL, 0);
   1140  1.3  mrg }
   1141  1.3  mrg 
   1142  1.3  mrg /* Alias targetm.vectorize.init_cost.  */
   1143  1.3  mrg 
   1144  1.3  mrg static inline void *
   1145  1.3  mrg init_cost (struct loop *loop_info)
   1146  1.3  mrg {
   1147  1.3  mrg   return targetm.vectorize.init_cost (loop_info);
   1148  1.3  mrg }
   1149  1.3  mrg 
   1150  1.3  mrg /* Alias targetm.vectorize.add_stmt_cost.  */
   1151  1.3  mrg 
   1152  1.3  mrg static inline unsigned
   1153  1.3  mrg add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
   1154  1.3  mrg 	       stmt_vec_info stmt_info, int misalign,
   1155  1.3  mrg 	       enum vect_cost_model_location where)
   1156  1.3  mrg {
   1157  1.3  mrg   return targetm.vectorize.add_stmt_cost (data, count, kind,
   1158  1.3  mrg 					  stmt_info, misalign, where);
   1159  1.3  mrg }
   1160  1.3  mrg 
   1161  1.3  mrg /* Alias targetm.vectorize.finish_cost.  */
   1162  1.3  mrg 
   1163  1.3  mrg static inline void
   1164  1.3  mrg finish_cost (void *data, unsigned *prologue_cost,
   1165  1.3  mrg 	     unsigned *body_cost, unsigned *epilogue_cost)
   1166  1.3  mrg {
   1167  1.3  mrg   targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost);
   1168  1.3  mrg }
   1169  1.3  mrg 
   1170  1.3  mrg /* Alias targetm.vectorize.destroy_cost_data.  */
   1171  1.3  mrg 
   1172  1.3  mrg static inline void
   1173  1.3  mrg destroy_cost_data (void *data)
   1174  1.3  mrg {
   1175  1.3  mrg   targetm.vectorize.destroy_cost_data (data);
   1176  1.3  mrg }
   1177  1.3  mrg 
   1178  1.1  mrg /*-----------------------------------------------------------------*/
   1179  1.1  mrg /* Info on data references alignment.                              */
   1180  1.1  mrg /*-----------------------------------------------------------------*/
   1181  1.5  mrg inline void
   1182  1.5  mrg set_dr_misalignment (struct data_reference *dr, int val)
   1183  1.5  mrg {
   1184  1.5  mrg   dataref_aux *data_aux = DR_VECT_AUX (dr);
   1185  1.5  mrg 
   1186  1.5  mrg   if (!data_aux)
   1187  1.5  mrg     {
   1188  1.5  mrg       data_aux = XCNEW (dataref_aux);
   1189  1.5  mrg       dr->aux = data_aux;
   1190  1.5  mrg     }
   1191  1.5  mrg 
   1192  1.5  mrg   data_aux->misalignment = val;
   1193  1.5  mrg }
   1194  1.5  mrg 
   1195  1.5  mrg inline int
   1196  1.5  mrg dr_misalignment (struct data_reference *dr)
   1197  1.5  mrg {
   1198  1.5  mrg   return DR_VECT_AUX (dr)->misalignment;
   1199  1.5  mrg }
   1200  1.1  mrg 
   1201  1.1  mrg /* Reflects actual alignment of first access in the vectorized loop,
   1202  1.1  mrg    taking into account peeling/versioning if applied.  */
   1203  1.5  mrg #define DR_MISALIGNMENT(DR) dr_misalignment (DR)
   1204  1.5  mrg #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
   1205  1.9  mrg #define DR_MISALIGNMENT_UNKNOWN (-1)
   1206  1.1  mrg 
   1207  1.9  mrg /* Only defined once DR_MISALIGNMENT is defined.  */
   1208  1.9  mrg #define DR_TARGET_ALIGNMENT(DR) DR_VECT_AUX (DR)->target_alignment
   1209  1.9  mrg 
   1210  1.9  mrg /* Return true if data access DR is aligned to its target alignment
   1211  1.9  mrg    (which may be less than a full vector).  */
   1212  1.3  mrg 
   1213  1.1  mrg static inline bool
   1214  1.1  mrg aligned_access_p (struct data_reference *data_ref_info)
   1215  1.1  mrg {
   1216  1.1  mrg   return (DR_MISALIGNMENT (data_ref_info) == 0);
   1217  1.1  mrg }
   1218  1.1  mrg 
   1219  1.3  mrg /* Return TRUE if the alignment of the data access is known, and FALSE
   1220  1.3  mrg    otherwise.  */
   1221  1.3  mrg 
   1222  1.1  mrg static inline bool
   1223  1.1  mrg known_alignment_for_access_p (struct data_reference *data_ref_info)
   1224  1.1  mrg {
   1225  1.9  mrg   return (DR_MISALIGNMENT (data_ref_info) != DR_MISALIGNMENT_UNKNOWN);
   1226  1.9  mrg }
   1227  1.9  mrg 
   1228  1.9  mrg /* Return the minimum alignment in bytes that the vectorized version
   1229  1.9  mrg    of DR is guaranteed to have.  */
   1230  1.9  mrg 
   1231  1.9  mrg static inline unsigned int
   1232  1.9  mrg vect_known_alignment_in_bytes (struct data_reference *dr)
   1233  1.9  mrg {
   1234  1.9  mrg   if (DR_MISALIGNMENT (dr) == DR_MISALIGNMENT_UNKNOWN)
   1235  1.9  mrg     return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr)));
   1236  1.9  mrg   if (DR_MISALIGNMENT (dr) == 0)
   1237  1.9  mrg     return DR_TARGET_ALIGNMENT (dr);
   1238  1.9  mrg   return DR_MISALIGNMENT (dr) & -DR_MISALIGNMENT (dr);
   1239  1.1  mrg }
   1240  1.1  mrg 
   1241  1.9  mrg /* Return the behavior of DR with respect to the vectorization context
   1242  1.9  mrg    (which for outer loop vectorization might not be the behavior recorded
   1243  1.9  mrg    in DR itself).  */
   1244  1.9  mrg 
   1245  1.9  mrg static inline innermost_loop_behavior *
   1246  1.9  mrg vect_dr_behavior (data_reference *dr)
   1247  1.9  mrg {
   1248  1.9  mrg   gimple *stmt = DR_STMT (dr);
   1249  1.9  mrg   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   1250  1.9  mrg   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   1251  1.9  mrg   if (loop_vinfo == NULL
   1252  1.9  mrg       || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt))
   1253  1.9  mrg     return &DR_INNERMOST (dr);
   1254  1.9  mrg   else
   1255  1.9  mrg     return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
   1256  1.9  mrg }
   1257  1.5  mrg 
   1258  1.5  mrg /* Return true if the vect cost model is unlimited.  */
   1259  1.5  mrg static inline bool
   1260  1.5  mrg unlimited_cost_model (loop_p loop)
   1261  1.5  mrg {
   1262  1.5  mrg   if (loop != NULL && loop->force_vectorize
   1263  1.5  mrg       && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
   1264  1.5  mrg     return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED;
   1265  1.5  mrg   return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
   1266  1.5  mrg }
   1267  1.5  mrg 
   1268  1.9  mrg /* Return true if the loop described by LOOP_VINFO is fully-masked and
   1269  1.9  mrg    if the first iteration should use a partial mask in order to achieve
   1270  1.9  mrg    alignment.  */
   1271  1.9  mrg 
   1272  1.9  mrg static inline bool
   1273  1.9  mrg vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
   1274  1.9  mrg {
   1275  1.9  mrg   return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
   1276  1.9  mrg 	  && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
   1277  1.9  mrg }
   1278  1.9  mrg 
   1279  1.9  mrg /* Return the number of vectors of type VECTYPE that are needed to get
   1280  1.9  mrg    NUNITS elements.  NUNITS should be based on the vectorization factor,
   1281  1.9  mrg    so it is always a known multiple of the number of elements in VECTYPE.  */
   1282  1.9  mrg 
   1283  1.9  mrg static inline unsigned int
   1284  1.9  mrg vect_get_num_vectors (poly_uint64 nunits, tree vectype)
   1285  1.9  mrg {
   1286  1.9  mrg   return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
   1287  1.9  mrg }
   1288  1.9  mrg 
   1289  1.9  mrg /* Return the number of copies needed for loop vectorization when
   1290  1.9  mrg    a statement operates on vectors of type VECTYPE.  This is the
   1291  1.9  mrg    vectorization factor divided by the number of elements in
   1292  1.9  mrg    VECTYPE and is always known at compile time.  */
   1293  1.9  mrg 
   1294  1.9  mrg static inline unsigned int
   1295  1.9  mrg vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
   1296  1.9  mrg {
   1297  1.9  mrg   return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype);
   1298  1.9  mrg }
   1299  1.9  mrg 
   1300  1.9  mrg /* Update maximum unit count *MAX_NUNITS so that it accounts for
   1301  1.9  mrg    the number of units in vector type VECTYPE.  *MAX_NUNITS can be 1
   1302  1.9  mrg    if we haven't yet recorded any vector types.  */
   1303  1.9  mrg 
   1304  1.9  mrg static inline void
   1305  1.9  mrg vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
   1306  1.9  mrg {
   1307  1.9  mrg   /* All unit counts have the form current_vector_size * X for some
   1308  1.9  mrg      rational X, so two unit sizes must have a common multiple.
   1309  1.9  mrg      Everything is a multiple of the initial value of 1.  */
   1310  1.9  mrg   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
   1311  1.9  mrg   *max_nunits = force_common_multiple (*max_nunits, nunits);
   1312  1.9  mrg }
   1313  1.9  mrg 
   1314  1.9  mrg /* Return the vectorization factor that should be used for costing
   1315  1.9  mrg    purposes while vectorizing the loop described by LOOP_VINFO.
   1316  1.9  mrg    Pick a reasonable estimate if the vectorization factor isn't
   1317  1.9  mrg    known at compile time.  */
   1318  1.9  mrg 
   1319  1.9  mrg static inline unsigned int
   1320  1.9  mrg vect_vf_for_cost (loop_vec_info loop_vinfo)
   1321  1.9  mrg {
   1322  1.9  mrg   return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
   1323  1.9  mrg }
   1324  1.9  mrg 
   1325  1.9  mrg /* Estimate the number of elements in VEC_TYPE for costing purposes.
   1326  1.9  mrg    Pick a reasonable estimate if the exact number isn't known at
   1327  1.9  mrg    compile time.  */
   1328  1.9  mrg 
   1329  1.9  mrg static inline unsigned int
   1330  1.9  mrg vect_nunits_for_cost (tree vec_type)
   1331  1.9  mrg {
   1332  1.9  mrg   return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type));
   1333  1.9  mrg }
   1334  1.9  mrg 
   1335  1.9  mrg /* Return the maximum possible vectorization factor for LOOP_VINFO.  */
   1336  1.9  mrg 
   1337  1.9  mrg static inline unsigned HOST_WIDE_INT
   1338  1.9  mrg vect_max_vf (loop_vec_info loop_vinfo)
   1339  1.9  mrg {
   1340  1.9  mrg   unsigned HOST_WIDE_INT vf;
   1341  1.9  mrg   if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
   1342  1.9  mrg     return vf;
   1343  1.9  mrg   return MAX_VECTORIZATION_FACTOR;
   1344  1.9  mrg }
   1345  1.9  mrg 
   1346  1.9  mrg /* Return the size of the value accessed by unvectorized data reference DR.
   1347  1.9  mrg    This is only valid once STMT_VINFO_VECTYPE has been calculated for the
   1348  1.9  mrg    associated gimple statement, since that guarantees that DR accesses
   1349  1.9  mrg    either a scalar or a scalar equivalent.  ("Scalar equivalent" here
   1350  1.9  mrg    includes things like V1SI, which can be vectorized in the same way
   1351  1.9  mrg    as a plain SI.)  */
   1352  1.9  mrg 
   1353  1.9  mrg inline unsigned int
   1354  1.9  mrg vect_get_scalar_dr_size (struct data_reference *dr)
   1355  1.9  mrg {
   1356  1.9  mrg   return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))));
   1357  1.9  mrg }
   1358  1.9  mrg 
   1359  1.3  mrg /* Source location */
   1360  1.5  mrg extern source_location vect_location;
   1361  1.1  mrg 
   1362  1.1  mrg /*-----------------------------------------------------------------*/
   1363  1.1  mrg /* Function prototypes.                                            */
   1364  1.1  mrg /*-----------------------------------------------------------------*/
   1365  1.1  mrg 
   1366  1.1  mrg /* Simple loop peeling and versioning utilities for vectorizer's purposes -
   1367  1.1  mrg    in tree-vect-loop-manip.c.  */
   1368  1.9  mrg extern void vect_set_loop_condition (struct loop *, loop_vec_info,
   1369  1.9  mrg 				     tree, tree, tree, bool);
   1370  1.1  mrg extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
   1371  1.5  mrg struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
   1372  1.5  mrg 						     struct loop *, edge);
   1373  1.9  mrg extern void vect_loop_versioning (loop_vec_info, unsigned int, bool,
   1374  1.9  mrg 				  poly_uint64);
   1375  1.8  mrg extern struct loop *vect_do_peeling (loop_vec_info, tree, tree,
   1376  1.9  mrg 				     tree *, tree *, tree *, int, bool, bool);
   1377  1.9  mrg extern void vect_prepare_for_masked_peels (loop_vec_info);
   1378  1.5  mrg extern source_location find_loop_location (struct loop *);
   1379  1.1  mrg extern bool vect_can_advance_ivs_p (loop_vec_info);
   1380  1.1  mrg 
   1381  1.1  mrg /* In tree-vect-stmts.c.  */
   1382  1.9  mrg extern poly_uint64 current_vector_size;
   1383  1.1  mrg extern tree get_vectype_for_scalar_type (tree);
   1384  1.9  mrg extern tree get_vectype_for_scalar_type_and_size (tree, poly_uint64);
   1385  1.6  mrg extern tree get_mask_type_for_scalar_type (tree);
   1386  1.3  mrg extern tree get_same_sized_vectype (tree, tree);
   1387  1.9  mrg extern bool vect_get_loop_mask_type (loop_vec_info);
   1388  1.6  mrg extern bool vect_is_simple_use (tree, vec_info *, gimple **,
   1389  1.6  mrg                                 enum vect_def_type *);
   1390  1.6  mrg extern bool vect_is_simple_use (tree, vec_info *, gimple **,
   1391  1.6  mrg 				enum vect_def_type *, tree *);
   1392  1.6  mrg extern bool supportable_widening_operation (enum tree_code, gimple *, tree,
   1393  1.6  mrg 					    tree, enum tree_code *,
   1394  1.6  mrg 					    enum tree_code *, int *,
   1395  1.6  mrg 					    vec<tree> *);
   1396  1.3  mrg extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
   1397  1.3  mrg 					     enum tree_code *,
   1398  1.3  mrg 					     int *, vec<tree> *);
   1399  1.6  mrg extern stmt_vec_info new_stmt_vec_info (gimple *stmt, vec_info *);
   1400  1.6  mrg extern void free_stmt_vec_info (gimple *stmt);
   1401  1.1  mrg extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *,
   1402  1.9  mrg 				    int, stmt_vector_for_cost *,
   1403  1.3  mrg 				    stmt_vector_for_cost *);
   1404  1.8  mrg extern void vect_model_store_cost (stmt_vec_info, int, vect_memory_access_type,
   1405  1.9  mrg 				   vec_load_store_type, slp_tree,
   1406  1.3  mrg 				   stmt_vector_for_cost *,
   1407  1.3  mrg 				   stmt_vector_for_cost *);
   1408  1.8  mrg extern void vect_model_load_cost (stmt_vec_info, int, vect_memory_access_type,
   1409  1.8  mrg 				  slp_tree, stmt_vector_for_cost *,
   1410  1.3  mrg 				  stmt_vector_for_cost *);
   1411  1.3  mrg extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
   1412  1.3  mrg 				  enum vect_cost_for_stmt, stmt_vec_info,
   1413  1.3  mrg 				  int, enum vect_cost_model_location);
   1414  1.9  mrg extern void vect_finish_replace_stmt (gimple *, gimple *);
   1415  1.6  mrg extern void vect_finish_stmt_generation (gimple *, gimple *,
   1416  1.1  mrg                                          gimple_stmt_iterator *);
   1417  1.1  mrg extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info);
   1418  1.9  mrg extern tree vect_get_store_rhs (gimple *);
   1419  1.8  mrg extern tree vect_get_vec_def_for_operand_1 (gimple *, enum vect_def_type);
   1420  1.6  mrg extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL);
   1421  1.9  mrg extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *,
   1422  1.9  mrg 			       vec<tree> *, slp_tree);
   1423  1.9  mrg extern void vect_get_vec_defs_for_stmt_copy (enum vect_def_type *,
   1424  1.9  mrg 					     vec<tree> *, vec<tree> *);
   1425  1.6  mrg extern tree vect_init_vector (gimple *, tree, tree,
   1426  1.1  mrg                               gimple_stmt_iterator *);
   1427  1.1  mrg extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree);
   1428  1.6  mrg extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *,
   1429  1.1  mrg                                  bool *, slp_tree, slp_instance);
   1430  1.6  mrg extern void vect_remove_stores (gimple *);
   1431  1.9  mrg extern bool vect_analyze_stmt (gimple *, bool *, slp_tree, slp_instance);
   1432  1.6  mrg extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *,
   1433  1.6  mrg 				    gimple **, tree, int, slp_tree);
   1434  1.3  mrg extern void vect_get_load_cost (struct data_reference *, int, bool,
   1435  1.3  mrg 				unsigned int *, unsigned int *,
   1436  1.3  mrg 				stmt_vector_for_cost *,
   1437  1.3  mrg 				stmt_vector_for_cost *, bool);
   1438  1.3  mrg extern void vect_get_store_cost (struct data_reference *, int,
   1439  1.3  mrg 				 unsigned int *, stmt_vector_for_cost *);
   1440  1.3  mrg extern bool vect_supportable_shift (enum tree_code, tree);
   1441  1.9  mrg extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
   1442  1.9  mrg extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
   1443  1.6  mrg extern void optimize_mask_stores (struct loop*);
   1444  1.9  mrg extern gcall *vect_gen_while (tree, tree, tree);
   1445  1.9  mrg extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
   1446  1.1  mrg 
   1447  1.1  mrg /* In tree-vect-data-refs.c.  */
   1448  1.1  mrg extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
   1449  1.1  mrg extern enum dr_alignment_support vect_supportable_dr_alignment
   1450  1.3  mrg                                            (struct data_reference *, bool);
   1451  1.6  mrg extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *,
   1452  1.1  mrg                                            HOST_WIDE_INT *);
   1453  1.9  mrg extern bool vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
   1454  1.6  mrg extern bool vect_slp_analyze_instance_dependence (slp_instance);
   1455  1.1  mrg extern bool vect_enhance_data_refs_alignment (loop_vec_info);
   1456  1.6  mrg extern bool vect_analyze_data_refs_alignment (loop_vec_info);
   1457  1.6  mrg extern bool vect_verify_datarefs_alignment (loop_vec_info);
   1458  1.6  mrg extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
   1459  1.6  mrg extern bool vect_analyze_data_ref_accesses (vec_info *);
   1460  1.1  mrg extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
   1461  1.9  mrg extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int,
   1462  1.9  mrg 				      signop, int, internal_fn *, tree *);
   1463  1.8  mrg extern bool vect_check_gather_scatter (gimple *, loop_vec_info,
   1464  1.8  mrg 				       gather_scatter_info *);
   1465  1.9  mrg extern bool vect_analyze_data_refs (vec_info *, poly_uint64 *);
   1466  1.9  mrg extern void vect_record_base_alignments (vec_info *);
   1467  1.6  mrg extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree,
   1468  1.3  mrg 				      tree *, gimple_stmt_iterator *,
   1469  1.6  mrg 				      gimple **, bool, bool *,
   1470  1.9  mrg 				      tree = NULL_TREE, tree = NULL_TREE);
   1471  1.6  mrg extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *,
   1472  1.6  mrg 			     tree);
   1473  1.9  mrg extern void vect_copy_ref_info (tree, tree);
   1474  1.1  mrg extern tree vect_create_destination_var (tree, tree);
   1475  1.3  mrg extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
   1476  1.9  mrg extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
   1477  1.8  mrg extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
   1478  1.9  mrg extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
   1479  1.6  mrg extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple *,
   1480  1.3  mrg                                     gimple_stmt_iterator *, vec<tree> *);
   1481  1.6  mrg extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *,
   1482  1.1  mrg                                     enum dr_alignment_support, tree,
   1483  1.1  mrg                                     struct loop **);
   1484  1.6  mrg extern void vect_transform_grouped_load (gimple *, vec<tree> , int,
   1485  1.1  mrg                                          gimple_stmt_iterator *);
   1486  1.6  mrg extern void vect_record_grouped_load_vectors (gimple *, vec<tree> );
   1487  1.1  mrg extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
   1488  1.6  mrg extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
   1489  1.6  mrg 				   const char * = NULL);
   1490  1.6  mrg extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *,
   1491  1.9  mrg 						  tree, tree = NULL_TREE);
   1492  1.1  mrg 
   1493  1.1  mrg /* In tree-vect-loop.c.  */
   1494  1.1  mrg /* FORNOW: Used in tree-parloops.c.  */
   1495  1.9  mrg extern gimple *vect_force_simple_reduction (loop_vec_info, gimple *,
   1496  1.6  mrg 					    bool *, bool);
   1497  1.9  mrg /* Used in gimple-loop-interchange.c.  */
   1498  1.9  mrg extern bool check_reduction_path (location_t, loop_p, gphi *, tree,
   1499  1.9  mrg 				  enum tree_code);
   1500  1.1  mrg /* Drive for loop analysis stage.  */
   1501  1.8  mrg extern loop_vec_info vect_analyze_loop (struct loop *, loop_vec_info);
   1502  1.9  mrg extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
   1503  1.9  mrg extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
   1504  1.9  mrg 					 tree *, bool);
   1505  1.9  mrg extern tree vect_halve_mask_nunits (tree);
   1506  1.9  mrg extern tree vect_double_mask_nunits (tree);
   1507  1.9  mrg extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *,
   1508  1.9  mrg 				   unsigned int, tree);
   1509  1.9  mrg extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *,
   1510  1.9  mrg 				unsigned int, tree, unsigned int);
   1511  1.9  mrg 
   1512  1.1  mrg /* Drive for loop transformation stage.  */
   1513  1.8  mrg extern struct loop *vect_transform_loop (loop_vec_info);
   1514  1.1  mrg extern loop_vec_info vect_analyze_loop_form (struct loop *);
   1515  1.6  mrg extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *,
   1516  1.8  mrg 					 slp_tree, int, gimple **);
   1517  1.6  mrg extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *,
   1518  1.9  mrg 				    gimple **, slp_tree, slp_instance);
   1519  1.9  mrg extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *,
   1520  1.6  mrg 				    gimple **, slp_tree);
   1521  1.6  mrg extern tree get_initial_def_for_reduction (gimple *, tree, tree *);
   1522  1.9  mrg extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code);
   1523  1.5  mrg extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
   1524  1.5  mrg 					stmt_vector_for_cost *,
   1525  1.3  mrg 					stmt_vector_for_cost *,
   1526  1.3  mrg 					stmt_vector_for_cost *);
   1527  1.9  mrg extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree);
   1528  1.1  mrg 
   1529  1.1  mrg /* In tree-vect-slp.c.  */
   1530  1.1  mrg extern void vect_free_slp_instance (slp_instance);
   1531  1.5  mrg extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
   1532  1.9  mrg 					  gimple_stmt_iterator *, poly_uint64,
   1533  1.9  mrg 					  slp_instance, bool, unsigned *);
   1534  1.9  mrg extern bool vect_slp_analyze_operations (vec_info *);
   1535  1.6  mrg extern bool vect_schedule_slp (vec_info *);
   1536  1.6  mrg extern bool vect_analyze_slp (vec_info *, unsigned);
   1537  1.3  mrg extern bool vect_make_slp_decision (loop_vec_info);
   1538  1.1  mrg extern void vect_detect_hybrid_slp (loop_vec_info);
   1539  1.9  mrg extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *);
   1540  1.6  mrg extern bool vect_slp_bb (basic_block);
   1541  1.6  mrg extern gimple *vect_find_last_scalar_stmt_in_slp (slp_tree);
   1542  1.8  mrg extern bool is_simple_and_all_uses_invariant (gimple *, loop_vec_info);
   1543  1.9  mrg extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode,
   1544  1.9  mrg 					    unsigned int * = NULL,
   1545  1.9  mrg 					    tree * = NULL, tree * = NULL);
   1546  1.9  mrg extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>,
   1547  1.9  mrg 				      unsigned int, vec<tree> &);
   1548  1.9  mrg extern int vect_get_place_in_interleaving_chain (gimple *, gimple *);
   1549  1.1  mrg 
   1550  1.1  mrg /* In tree-vect-patterns.c.  */
   1551  1.1  mrg /* Pattern recognition functions.
   1552  1.1  mrg    Additional pattern recognition functions can (and will) be added
   1553  1.1  mrg    in the future.  */
   1554  1.6  mrg typedef gimple *(* vect_recog_func_ptr) (vec<gimple *> *, tree *, tree *);
   1555  1.9  mrg #define NUM_PATTERNS 15
   1556  1.6  mrg void vect_pattern_recog (vec_info *);
   1557  1.1  mrg 
   1558  1.1  mrg /* In tree-vectorizer.c.  */
   1559  1.1  mrg unsigned vectorize_loops (void);
   1560  1.6  mrg bool vect_stmt_in_region_p (vec_info *, gimple *);
   1561  1.8  mrg void vect_free_loop_info_assumptions (struct loop *);
   1562  1.1  mrg 
   1563  1.1  mrg #endif  /* GCC_TREE_VECTORIZER_H  */
   1564