tree-vectorizer.h revision 1.10 1 1.1 mrg /* Vectorizer
2 1.10 mrg Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 1.1 mrg Contributed by Dorit Naishlos <dorit (at) il.ibm.com>
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it under
8 1.1 mrg the terms of the GNU General Public License as published by the Free
9 1.1 mrg Software Foundation; either version 3, or (at your option) any later
10 1.1 mrg version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 1.1 mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 1.1 mrg for more details.
16 1.1 mrg
17 1.1 mrg You should have received a copy of the GNU General Public License
18 1.1 mrg along with GCC; see the file COPYING3. If not see
19 1.1 mrg <http://www.gnu.org/licenses/>. */
20 1.1 mrg
21 1.1 mrg #ifndef GCC_TREE_VECTORIZER_H
22 1.1 mrg #define GCC_TREE_VECTORIZER_H
23 1.1 mrg
24 1.10 mrg typedef struct _stmt_vec_info *stmt_vec_info;
25 1.10 mrg
26 1.1 mrg #include "tree-data-ref.h"
27 1.9 mrg #include "tree-hash-traits.h"
28 1.3 mrg #include "target.h"
29 1.1 mrg
30 1.1 mrg /* Used for naming of new temporaries. */
31 1.1 mrg enum vect_var_kind {
32 1.1 mrg vect_simple_var,
33 1.1 mrg vect_pointer_var,
34 1.6 mrg vect_scalar_var,
35 1.6 mrg vect_mask_var
36 1.1 mrg };
37 1.1 mrg
38 1.1 mrg /* Defines type of operation. */
39 1.1 mrg enum operation_type {
40 1.1 mrg unary_op = 1,
41 1.1 mrg binary_op,
42 1.1 mrg ternary_op
43 1.1 mrg };
44 1.1 mrg
45 1.1 mrg /* Define type of available alignment support. */
46 1.1 mrg enum dr_alignment_support {
47 1.1 mrg dr_unaligned_unsupported,
48 1.1 mrg dr_unaligned_supported,
49 1.1 mrg dr_explicit_realign,
50 1.1 mrg dr_explicit_realign_optimized,
51 1.1 mrg dr_aligned
52 1.1 mrg };
53 1.1 mrg
54 1.1 mrg /* Define type of def-use cross-iteration cycle. */
55 1.1 mrg enum vect_def_type {
56 1.1 mrg vect_uninitialized_def = 0,
57 1.1 mrg vect_constant_def = 1,
58 1.1 mrg vect_external_def,
59 1.1 mrg vect_internal_def,
60 1.1 mrg vect_induction_def,
61 1.1 mrg vect_reduction_def,
62 1.1 mrg vect_double_reduction_def,
63 1.1 mrg vect_nested_cycle,
64 1.1 mrg vect_unknown_def_type
65 1.1 mrg };
66 1.1 mrg
67 1.6 mrg /* Define type of reduction. */
68 1.6 mrg enum vect_reduction_type {
69 1.6 mrg TREE_CODE_REDUCTION,
70 1.6 mrg COND_REDUCTION,
71 1.8 mrg INTEGER_INDUC_COND_REDUCTION,
72 1.9 mrg CONST_COND_REDUCTION,
73 1.9 mrg
74 1.9 mrg /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
75 1.9 mrg to implement:
76 1.9 mrg
77 1.9 mrg for (int i = 0; i < VF; ++i)
78 1.9 mrg res = cond[i] ? val[i] : res; */
79 1.9 mrg EXTRACT_LAST_REDUCTION,
80 1.9 mrg
81 1.9 mrg /* Use a folding reduction within the loop to implement:
82 1.9 mrg
83 1.9 mrg for (int i = 0; i < VF; ++i)
84 1.9 mrg res = res OP val[i];
85 1.9 mrg
86 1.9 mrg (with no reassocation). */
87 1.9 mrg FOLD_LEFT_REDUCTION
88 1.6 mrg };
89 1.6 mrg
90 1.1 mrg #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
91 1.1 mrg || ((D) == vect_double_reduction_def) \
92 1.1 mrg || ((D) == vect_nested_cycle))
93 1.1 mrg
94 1.3 mrg /* Structure to encapsulate information about a group of like
95 1.3 mrg instructions to be presented to the target cost model. */
96 1.6 mrg struct stmt_info_for_cost {
97 1.3 mrg int count;
98 1.3 mrg enum vect_cost_for_stmt kind;
99 1.10 mrg enum vect_cost_model_location where;
100 1.10 mrg stmt_vec_info stmt_info;
101 1.3 mrg int misalign;
102 1.6 mrg };
103 1.3 mrg
104 1.3 mrg typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
105 1.3 mrg
106 1.9 mrg /* Maps base addresses to an innermost_loop_behavior that gives the maximum
107 1.9 mrg known alignment for that base. */
108 1.9 mrg typedef hash_map<tree_operand_hash,
109 1.9 mrg innermost_loop_behavior *> vec_base_alignments;
110 1.9 mrg
111 1.1 mrg /************************************************************************
112 1.1 mrg SLP
113 1.1 mrg ************************************************************************/
114 1.5 mrg typedef struct _slp_tree *slp_tree;
115 1.1 mrg
116 1.3 mrg /* A computation tree of an SLP instance. Each node corresponds to a group of
117 1.1 mrg stmts to be packed in a SIMD stmt. */
118 1.5 mrg struct _slp_tree {
119 1.3 mrg /* Nodes that contain def-stmts of this node statements operands. */
120 1.5 mrg vec<slp_tree> children;
121 1.1 mrg /* A group of scalar stmts to be vectorized together. */
122 1.10 mrg vec<stmt_vec_info> stmts;
123 1.5 mrg /* Load permutation relative to the stores, NULL if there is no
124 1.5 mrg permutation. */
125 1.5 mrg vec<unsigned> load_permutation;
126 1.1 mrg /* Vectorized stmt/s. */
127 1.10 mrg vec<stmt_vec_info> vec_stmts;
128 1.1 mrg /* Number of vector stmts that are created to replace the group of scalar
129 1.1 mrg stmts. It is calculated during the transformation phase as the number of
130 1.1 mrg scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
131 1.1 mrg divided by vector size. */
132 1.1 mrg unsigned int vec_stmts_size;
133 1.10 mrg /* Reference count in the SLP graph. */
134 1.10 mrg unsigned int refcnt;
135 1.10 mrg /* The maximum number of vector elements for the subtree rooted
136 1.10 mrg at this node. */
137 1.10 mrg poly_uint64 max_nunits;
138 1.6 mrg /* Whether the scalar computations use two different operators. */
139 1.6 mrg bool two_operators;
140 1.6 mrg /* The DEF type of this node. */
141 1.6 mrg enum vect_def_type def_type;
142 1.5 mrg };
143 1.1 mrg
144 1.1 mrg
145 1.1 mrg /* SLP instance is a sequence of stmts in a loop that can be packed into
146 1.1 mrg SIMD stmts. */
147 1.1 mrg typedef struct _slp_instance {
148 1.1 mrg /* The root of SLP tree. */
149 1.1 mrg slp_tree root;
150 1.1 mrg
151 1.1 mrg /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */
152 1.1 mrg unsigned int group_size;
153 1.1 mrg
154 1.1 mrg /* The unrolling factor required to vectorized this SLP instance. */
155 1.9 mrg poly_uint64 unrolling_factor;
156 1.1 mrg
157 1.1 mrg /* The group of nodes that contain loads of this SLP instance. */
158 1.3 mrg vec<slp_tree> loads;
159 1.9 mrg
160 1.9 mrg /* The SLP node containing the reduction PHIs. */
161 1.9 mrg slp_tree reduc_phis;
162 1.1 mrg } *slp_instance;
163 1.1 mrg
164 1.1 mrg
165 1.1 mrg /* Access Functions. */
166 1.1 mrg #define SLP_INSTANCE_TREE(S) (S)->root
167 1.1 mrg #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size
168 1.1 mrg #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor
169 1.1 mrg #define SLP_INSTANCE_LOADS(S) (S)->loads
170 1.1 mrg
171 1.3 mrg #define SLP_TREE_CHILDREN(S) (S)->children
172 1.1 mrg #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
173 1.1 mrg #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
174 1.1 mrg #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
175 1.5 mrg #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
176 1.6 mrg #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators
177 1.6 mrg #define SLP_TREE_DEF_TYPE(S) (S)->def_type
178 1.3 mrg
179 1.3 mrg
180 1.3 mrg
181 1.9 mrg /* Describes two objects whose addresses must be unequal for the vectorized
182 1.9 mrg loop to be valid. */
183 1.9 mrg typedef std::pair<tree, tree> vec_object_pair;
184 1.9 mrg
185 1.9 mrg /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
186 1.9 mrg UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */
187 1.9 mrg struct vec_lower_bound {
188 1.9 mrg vec_lower_bound () {}
189 1.9 mrg vec_lower_bound (tree e, bool u, poly_uint64 m)
190 1.9 mrg : expr (e), unsigned_p (u), min_value (m) {}
191 1.9 mrg
192 1.9 mrg tree expr;
193 1.9 mrg bool unsigned_p;
194 1.9 mrg poly_uint64 min_value;
195 1.5 mrg };
196 1.5 mrg
197 1.10 mrg /* Vectorizer state shared between different analyses like vector sizes
198 1.10 mrg of the same CFG region. */
199 1.10 mrg struct vec_info_shared {
200 1.10 mrg vec_info_shared();
201 1.10 mrg ~vec_info_shared();
202 1.10 mrg
203 1.10 mrg void save_datarefs();
204 1.10 mrg void check_datarefs();
205 1.10 mrg
206 1.10 mrg /* All data references. Freed by free_data_refs, so not an auto_vec. */
207 1.10 mrg vec<data_reference_p> datarefs;
208 1.10 mrg vec<data_reference> datarefs_copy;
209 1.10 mrg
210 1.10 mrg /* The loop nest in which the data dependences are computed. */
211 1.10 mrg auto_vec<loop_p> loop_nest;
212 1.10 mrg
213 1.10 mrg /* All data dependences. Freed by free_dependence_relations, so not
214 1.10 mrg an auto_vec. */
215 1.10 mrg vec<ddr_p> ddrs;
216 1.10 mrg };
217 1.10 mrg
218 1.9 mrg /* Vectorizer state common between loop and basic-block vectorization. */
219 1.9 mrg struct vec_info {
220 1.9 mrg enum vec_kind { bb, loop };
221 1.5 mrg
222 1.10 mrg vec_info (vec_kind, void *, vec_info_shared *);
223 1.9 mrg ~vec_info ();
224 1.5 mrg
225 1.10 mrg stmt_vec_info add_stmt (gimple *);
226 1.10 mrg stmt_vec_info lookup_stmt (gimple *);
227 1.10 mrg stmt_vec_info lookup_def (tree);
228 1.10 mrg stmt_vec_info lookup_single_use (tree);
229 1.10 mrg struct dr_vec_info *lookup_dr (data_reference *);
230 1.10 mrg void move_dr (stmt_vec_info, stmt_vec_info);
231 1.10 mrg void remove_stmt (stmt_vec_info);
232 1.10 mrg void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *);
233 1.10 mrg
234 1.9 mrg /* The type of vectorization. */
235 1.9 mrg vec_kind kind;
236 1.6 mrg
237 1.10 mrg /* Shared vectorizer state. */
238 1.10 mrg vec_info_shared *shared;
239 1.10 mrg
240 1.10 mrg /* The mapping of GIMPLE UID to stmt_vec_info. */
241 1.10 mrg vec<stmt_vec_info> stmt_vec_infos;
242 1.10 mrg
243 1.6 mrg /* All SLP instances. */
244 1.9 mrg auto_vec<slp_instance> slp_instances;
245 1.6 mrg
246 1.9 mrg /* Maps base addresses to an innermost_loop_behavior that gives the maximum
247 1.9 mrg known alignment for that base. */
248 1.9 mrg vec_base_alignments base_alignments;
249 1.9 mrg
250 1.6 mrg /* All interleaving chains of stores, represented by the first
251 1.6 mrg stmt in the chain. */
252 1.10 mrg auto_vec<stmt_vec_info> grouped_stores;
253 1.5 mrg
254 1.6 mrg /* Cost data used by the target cost model. */
255 1.6 mrg void *target_cost_data;
256 1.10 mrg
257 1.10 mrg private:
258 1.10 mrg stmt_vec_info new_stmt_vec_info (gimple *stmt);
259 1.10 mrg void set_vinfo_for_stmt (gimple *, stmt_vec_info);
260 1.10 mrg void free_stmt_vec_infos ();
261 1.10 mrg void free_stmt_vec_info (stmt_vec_info);
262 1.5 mrg };
263 1.5 mrg
264 1.6 mrg struct _loop_vec_info;
265 1.6 mrg struct _bb_vec_info;
266 1.6 mrg
267 1.6 mrg template<>
268 1.6 mrg template<>
269 1.6 mrg inline bool
270 1.6 mrg is_a_helper <_loop_vec_info *>::test (vec_info *i)
271 1.5 mrg {
272 1.6 mrg return i->kind == vec_info::loop;
273 1.5 mrg }
274 1.5 mrg
275 1.6 mrg template<>
276 1.6 mrg template<>
277 1.5 mrg inline bool
278 1.6 mrg is_a_helper <_bb_vec_info *>::test (vec_info *i)
279 1.5 mrg {
280 1.6 mrg return i->kind == vec_info::bb;
281 1.5 mrg }
282 1.5 mrg
283 1.5 mrg
284 1.9 mrg /* In general, we can divide the vector statements in a vectorized loop
285 1.9 mrg into related groups ("rgroups") and say that for each rgroup there is
286 1.9 mrg some nS such that the rgroup operates on nS values from one scalar
287 1.9 mrg iteration followed by nS values from the next. That is, if VF is the
288 1.9 mrg vectorization factor of the loop, the rgroup operates on a sequence:
289 1.9 mrg
290 1.9 mrg (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
291 1.9 mrg
292 1.9 mrg where (i,j) represents a scalar value with index j in a scalar
293 1.9 mrg iteration with index i.
294 1.9 mrg
295 1.9 mrg [ We use the term "rgroup" to emphasise that this grouping isn't
296 1.9 mrg necessarily the same as the grouping of statements used elsewhere.
297 1.9 mrg For example, if we implement a group of scalar loads using gather
298 1.9 mrg loads, we'll use a separate gather load for each scalar load, and
299 1.9 mrg thus each gather load will belong to its own rgroup. ]
300 1.9 mrg
301 1.9 mrg In general this sequence will occupy nV vectors concatenated
302 1.9 mrg together. If these vectors have nL lanes each, the total number
303 1.9 mrg of scalar values N is given by:
304 1.9 mrg
305 1.9 mrg N = nS * VF = nV * nL
306 1.9 mrg
307 1.9 mrg None of nS, VF, nV and nL are required to be a power of 2. nS and nV
308 1.9 mrg are compile-time constants but VF and nL can be variable (if the target
309 1.9 mrg supports variable-length vectors).
310 1.9 mrg
311 1.9 mrg In classical vectorization, each iteration of the vector loop would
312 1.9 mrg handle exactly VF iterations of the original scalar loop. However,
313 1.9 mrg in a fully-masked loop, a particular iteration of the vector loop
314 1.9 mrg might handle fewer than VF iterations of the scalar loop. The vector
315 1.9 mrg lanes that correspond to iterations of the scalar loop are said to be
316 1.9 mrg "active" and the other lanes are said to be "inactive".
317 1.9 mrg
318 1.9 mrg In a fully-masked loop, many rgroups need to be masked to ensure that
319 1.9 mrg they have no effect for the inactive lanes. Each such rgroup needs a
320 1.9 mrg sequence of booleans in the same order as above, but with each (i,j)
321 1.9 mrg replaced by a boolean that indicates whether iteration i is active.
322 1.9 mrg This sequence occupies nV vector masks that again have nL lanes each.
323 1.9 mrg Thus the mask sequence as a whole consists of VF independent booleans
324 1.9 mrg that are each repeated nS times.
325 1.9 mrg
326 1.9 mrg We make the simplifying assumption that if a sequence of nV masks is
327 1.9 mrg suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
328 1.9 mrg VIEW_CONVERTing it. This holds for all current targets that support
329 1.9 mrg fully-masked loops. For example, suppose the scalar loop is:
330 1.9 mrg
331 1.9 mrg float *f;
332 1.9 mrg double *d;
333 1.9 mrg for (int i = 0; i < n; ++i)
334 1.9 mrg {
335 1.9 mrg f[i * 2 + 0] += 1.0f;
336 1.9 mrg f[i * 2 + 1] += 2.0f;
337 1.9 mrg d[i] += 3.0;
338 1.9 mrg }
339 1.9 mrg
340 1.9 mrg and suppose that vectors have 256 bits. The vectorized f accesses
341 1.9 mrg will belong to one rgroup and the vectorized d access to another:
342 1.9 mrg
343 1.9 mrg f rgroup: nS = 2, nV = 1, nL = 8
344 1.9 mrg d rgroup: nS = 1, nV = 1, nL = 4
345 1.9 mrg VF = 4
346 1.9 mrg
347 1.9 mrg [ In this simple example the rgroups do correspond to the normal
348 1.9 mrg SLP grouping scheme. ]
349 1.9 mrg
350 1.9 mrg If only the first three lanes are active, the masks we need are:
351 1.9 mrg
352 1.9 mrg f rgroup: 1 1 | 1 1 | 1 1 | 0 0
353 1.9 mrg d rgroup: 1 | 1 | 1 | 0
354 1.9 mrg
355 1.9 mrg Here we can use a mask calculated for f's rgroup for d's, but not
356 1.9 mrg vice versa.
357 1.9 mrg
358 1.9 mrg Thus for each value of nV, it is enough to provide nV masks, with the
359 1.9 mrg mask being calculated based on the highest nL (or, equivalently, based
360 1.9 mrg on the highest nS) required by any rgroup with that nV. We therefore
361 1.9 mrg represent the entire collection of masks as a two-level table, with the
362 1.9 mrg first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
363 1.9 mrg the second being indexed by the mask index 0 <= i < nV. */
364 1.9 mrg
365 1.9 mrg /* The masks needed by rgroups with nV vectors, according to the
366 1.9 mrg description above. */
367 1.9 mrg struct rgroup_masks {
368 1.9 mrg /* The largest nS for all rgroups that use these masks. */
369 1.9 mrg unsigned int max_nscalars_per_iter;
370 1.9 mrg
371 1.9 mrg /* The type of mask to use, based on the highest nS recorded above. */
372 1.9 mrg tree mask_type;
373 1.9 mrg
374 1.9 mrg /* A vector of nV masks, in iteration order. */
375 1.9 mrg vec<tree> masks;
376 1.9 mrg };
377 1.9 mrg
378 1.9 mrg typedef auto_vec<rgroup_masks> vec_loop_masks;
379 1.9 mrg
380 1.1 mrg /*-----------------------------------------------------------------*/
381 1.1 mrg /* Info on vectorized loops. */
382 1.1 mrg /*-----------------------------------------------------------------*/
383 1.6 mrg typedef struct _loop_vec_info : public vec_info {
384 1.10 mrg _loop_vec_info (struct loop *, vec_info_shared *);
385 1.9 mrg ~_loop_vec_info ();
386 1.1 mrg
387 1.1 mrg /* The loop to which this info struct refers to. */
388 1.1 mrg struct loop *loop;
389 1.1 mrg
390 1.1 mrg /* The loop basic blocks. */
391 1.1 mrg basic_block *bbs;
392 1.1 mrg
393 1.5 mrg /* Number of latch executions. */
394 1.5 mrg tree num_itersm1;
395 1.1 mrg /* Number of iterations. */
396 1.1 mrg tree num_iters;
397 1.5 mrg /* Number of iterations of the original loop. */
398 1.1 mrg tree num_iters_unchanged;
399 1.8 mrg /* Condition under which this loop is analyzed and versioned. */
400 1.8 mrg tree num_iters_assumptions;
401 1.1 mrg
402 1.5 mrg /* Threshold of number of iterations below which vectorzation will not be
403 1.5 mrg performed. It is calculated from MIN_PROFITABLE_ITERS and
404 1.5 mrg PARAM_MIN_VECT_LOOP_BOUND. */
405 1.5 mrg unsigned int th;
406 1.5 mrg
407 1.9 mrg /* When applying loop versioning, the vector form should only be used
408 1.9 mrg if the number of scalar iterations is >= this value, on top of all
409 1.9 mrg the other requirements. Ignored when loop versioning is not being
410 1.9 mrg used. */
411 1.9 mrg poly_uint64 versioning_threshold;
412 1.9 mrg
413 1.1 mrg /* Unrolling factor */
414 1.9 mrg poly_uint64 vectorization_factor;
415 1.9 mrg
416 1.9 mrg /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
417 1.9 mrg if there is no particular limit. */
418 1.9 mrg unsigned HOST_WIDE_INT max_vectorization_factor;
419 1.9 mrg
420 1.9 mrg /* The masks that a fully-masked loop should use to avoid operating
421 1.9 mrg on inactive scalars. */
422 1.9 mrg vec_loop_masks masks;
423 1.9 mrg
424 1.9 mrg /* If we are using a loop mask to align memory addresses, this variable
425 1.9 mrg contains the number of vector elements that we should skip in the
426 1.9 mrg first iteration of the vector loop (i.e. the number of leading
427 1.9 mrg elements that should be false in the first mask). */
428 1.9 mrg tree mask_skip_niters;
429 1.9 mrg
430 1.9 mrg /* Type of the variables to use in the WHILE_ULT call for fully-masked
431 1.9 mrg loops. */
432 1.9 mrg tree mask_compare_type;
433 1.1 mrg
434 1.10 mrg /* For #pragma omp simd if (x) loops the x expression. If constant 0,
435 1.10 mrg the loop should not be vectorized, if constant non-zero, simd_if_cond
436 1.10 mrg shouldn't be set and loop vectorized normally, if SSA_NAME, the loop
437 1.10 mrg should be versioned on that condition, using scalar loop if the condition
438 1.10 mrg is false and vectorized loop otherwise. */
439 1.10 mrg tree simd_if_cond;
440 1.10 mrg
441 1.1 mrg /* Unknown DRs according to which loop was peeled. */
442 1.10 mrg struct dr_vec_info *unaligned_dr;
443 1.1 mrg
444 1.1 mrg /* peeling_for_alignment indicates whether peeling for alignment will take
445 1.1 mrg place, and what the peeling factor should be:
446 1.1 mrg peeling_for_alignment = X means:
447 1.1 mrg If X=0: Peeling for alignment will not be applied.
448 1.1 mrg If X>0: Peel first X iterations.
449 1.1 mrg If X=-1: Generate a runtime test to calculate the number of iterations
450 1.1 mrg to be peeled, using the dataref recorded in the field
451 1.1 mrg unaligned_dr. */
452 1.1 mrg int peeling_for_alignment;
453 1.1 mrg
454 1.1 mrg /* The mask used to check the alignment of pointers or arrays. */
455 1.1 mrg int ptr_mask;
456 1.1 mrg
457 1.1 mrg /* Data Dependence Relations defining address ranges that are candidates
458 1.1 mrg for a run-time aliasing check. */
459 1.9 mrg auto_vec<ddr_p> may_alias_ddrs;
460 1.1 mrg
461 1.5 mrg /* Data Dependence Relations defining address ranges together with segment
462 1.5 mrg lengths from which the run-time aliasing check is built. */
463 1.9 mrg auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
464 1.9 mrg
465 1.9 mrg /* Check that the addresses of each pair of objects is unequal. */
466 1.9 mrg auto_vec<vec_object_pair> check_unequal_addrs;
467 1.9 mrg
468 1.9 mrg /* List of values that are required to be nonzero. This is used to check
469 1.9 mrg whether things like "x[i * n] += 1;" are safe and eventually gets added
470 1.9 mrg to the checks for lower bounds below. */
471 1.9 mrg auto_vec<tree> check_nonzero;
472 1.9 mrg
473 1.9 mrg /* List of values that need to be checked for a minimum value. */
474 1.9 mrg auto_vec<vec_lower_bound> lower_bounds;
475 1.5 mrg
476 1.1 mrg /* Statements in the loop that have data references that are candidates for a
477 1.1 mrg runtime (loop versioning) misalignment check. */
478 1.10 mrg auto_vec<stmt_vec_info> may_misalign_stmts;
479 1.1 mrg
480 1.3 mrg /* Reduction cycles detected in the loop. Used in loop-aware SLP. */
481 1.10 mrg auto_vec<stmt_vec_info> reductions;
482 1.3 mrg
483 1.3 mrg /* All reduction chains in the loop, represented by the first
484 1.3 mrg stmt in the chain. */
485 1.10 mrg auto_vec<stmt_vec_info> reduction_chains;
486 1.3 mrg
487 1.6 mrg /* Cost vector for a single scalar iteration. */
488 1.9 mrg auto_vec<stmt_info_for_cost> scalar_cost_vec;
489 1.9 mrg
490 1.9 mrg /* Map of IV base/step expressions to inserted name in the preheader. */
491 1.9 mrg hash_map<tree_operand_hash, tree> *ivexpr_map;
492 1.3 mrg
493 1.8 mrg /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
494 1.8 mrg applied to the loop, i.e., no unrolling is needed, this is 1. */
495 1.9 mrg poly_uint64 slp_unrolling_factor;
496 1.8 mrg
497 1.6 mrg /* Cost of a single scalar iteration. */
498 1.6 mrg int single_scalar_iteration_cost;
499 1.3 mrg
500 1.8 mrg /* Is the loop vectorizable? */
501 1.8 mrg bool vectorizable;
502 1.8 mrg
503 1.9 mrg /* Records whether we still have the option of using a fully-masked loop. */
504 1.9 mrg bool can_fully_mask_p;
505 1.9 mrg
506 1.9 mrg /* True if have decided to use a fully-masked loop. */
507 1.9 mrg bool fully_masked_p;
508 1.9 mrg
509 1.3 mrg /* When we have grouped data accesses with gaps, we may introduce invalid
510 1.1 mrg memory accesses. We peel the last iteration of the loop to prevent
511 1.1 mrg this. */
512 1.1 mrg bool peeling_for_gaps;
513 1.1 mrg
514 1.5 mrg /* When the number of iterations is not a multiple of the vector size
515 1.5 mrg we need to peel off iterations at the end to form an epilogue loop. */
516 1.5 mrg bool peeling_for_niter;
517 1.5 mrg
518 1.3 mrg /* Reductions are canonicalized so that the last operand is the reduction
519 1.3 mrg operand. If this places a constant into RHS1, this decanonicalizes
520 1.3 mrg GIMPLE for other phases, so we must track when this has occurred and
521 1.3 mrg fix it up. */
522 1.3 mrg bool operands_swapped;
523 1.3 mrg
524 1.5 mrg /* True if there are no loop carried data dependencies in the loop.
525 1.5 mrg If loop->safelen <= 1, then this is always true, either the loop
526 1.5 mrg didn't have any loop carried data dependencies, or the loop is being
527 1.5 mrg vectorized guarded with some runtime alias checks, or couldn't
528 1.5 mrg be vectorized at all, but then this field shouldn't be used.
529 1.5 mrg For loop->safelen >= 2, the user has asserted that there are no
530 1.5 mrg backward dependencies, but there still could be loop carried forward
531 1.5 mrg dependencies in such loops. This flag will be false if normal
532 1.5 mrg vectorizer data dependency analysis would fail or require versioning
533 1.5 mrg for alias, but because of loop->safelen >= 2 it has been vectorized
534 1.5 mrg even without versioning for alias. E.g. in:
535 1.5 mrg #pragma omp simd
536 1.5 mrg for (int i = 0; i < m; i++)
537 1.5 mrg a[i] = a[i + k] * c;
538 1.5 mrg (or #pragma simd or #pragma ivdep) we can vectorize this and it will
539 1.5 mrg DTRT even for k > 0 && k < m, but without safelen we would not
540 1.5 mrg vectorize this, so this field would be false. */
541 1.5 mrg bool no_data_dependencies;
542 1.5 mrg
543 1.8 mrg /* Mark loops having masked stores. */
544 1.8 mrg bool has_mask_store;
545 1.8 mrg
546 1.5 mrg /* If if-conversion versioned this loop before conversion, this is the
547 1.5 mrg loop version without if-conversion. */
548 1.5 mrg struct loop *scalar_loop;
549 1.5 mrg
550 1.8 mrg /* For loops being epilogues of already vectorized loops
551 1.8 mrg this points to the original vectorized loop. Otherwise NULL. */
552 1.8 mrg _loop_vec_info *orig_loop_info;
553 1.6 mrg
554 1.1 mrg } *loop_vec_info;
555 1.1 mrg
556 1.1 mrg /* Access Functions. */
557 1.1 mrg #define LOOP_VINFO_LOOP(L) (L)->loop
558 1.1 mrg #define LOOP_VINFO_BBS(L) (L)->bbs
559 1.5 mrg #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
560 1.1 mrg #define LOOP_VINFO_NITERS(L) (L)->num_iters
561 1.5 mrg /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
562 1.5 mrg prologue peeling retain total unchanged scalar loop iterations for
563 1.5 mrg cost model. */
564 1.1 mrg #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
565 1.8 mrg #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions
566 1.5 mrg #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
567 1.9 mrg #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
568 1.1 mrg #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
569 1.9 mrg #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p
570 1.9 mrg #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p
571 1.1 mrg #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
572 1.9 mrg #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor
573 1.9 mrg #define LOOP_VINFO_MASKS(L) (L)->masks
574 1.9 mrg #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters
575 1.9 mrg #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type
576 1.1 mrg #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
577 1.10 mrg #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest
578 1.10 mrg #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs
579 1.10 mrg #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs
580 1.1 mrg #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
581 1.5 mrg #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
582 1.1 mrg #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
583 1.1 mrg #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
584 1.1 mrg #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
585 1.5 mrg #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
586 1.9 mrg #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs
587 1.9 mrg #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero
588 1.9 mrg #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds
589 1.3 mrg #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
590 1.1 mrg #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
591 1.1 mrg #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
592 1.3 mrg #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
593 1.3 mrg #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
594 1.3 mrg #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data
595 1.1 mrg #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
596 1.3 mrg #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped
597 1.5 mrg #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
598 1.5 mrg #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
599 1.5 mrg #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
600 1.6 mrg #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store
601 1.6 mrg #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
602 1.6 mrg #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
603 1.8 mrg #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info
604 1.10 mrg #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond
605 1.1 mrg
606 1.8 mrg #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
607 1.5 mrg ((L)->may_misalign_stmts.length () > 0)
608 1.8 mrg #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
609 1.9 mrg ((L)->comp_alias_ddrs.length () > 0 \
610 1.9 mrg || (L)->check_unequal_addrs.length () > 0 \
611 1.9 mrg || (L)->lower_bounds.length () > 0)
612 1.8 mrg #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \
613 1.8 mrg (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
614 1.10 mrg #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \
615 1.10 mrg (LOOP_VINFO_SIMD_IF_COND (L))
616 1.8 mrg #define LOOP_REQUIRES_VERSIONING(L) \
617 1.8 mrg (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \
618 1.8 mrg || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \
619 1.10 mrg || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \
620 1.10 mrg || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L))
621 1.1 mrg
622 1.1 mrg #define LOOP_VINFO_NITERS_KNOWN_P(L) \
623 1.5 mrg (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
624 1.1 mrg
625 1.8 mrg #define LOOP_VINFO_EPILOGUE_P(L) \
626 1.8 mrg (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
627 1.8 mrg
628 1.9 mrg #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
629 1.9 mrg (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
630 1.8 mrg
631 1.10 mrg /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
632 1.10 mrg value signifies success, and a NULL value signifies failure, supporting
633 1.10 mrg propagating an opt_problem * describing the failure back up the call
634 1.10 mrg stack. */
635 1.10 mrg typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
636 1.10 mrg
637 1.1 mrg static inline loop_vec_info
638 1.1 mrg loop_vec_info_for_loop (struct loop *loop)
639 1.1 mrg {
640 1.1 mrg return (loop_vec_info) loop->aux;
641 1.1 mrg }
642 1.1 mrg
643 1.6 mrg typedef struct _bb_vec_info : public vec_info
644 1.6 mrg {
645 1.10 mrg _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *);
646 1.9 mrg ~_bb_vec_info ();
647 1.9 mrg
648 1.1 mrg basic_block bb;
649 1.6 mrg gimple_stmt_iterator region_begin;
650 1.6 mrg gimple_stmt_iterator region_end;
651 1.1 mrg } *bb_vec_info;
652 1.1 mrg
653 1.3 mrg #define BB_VINFO_BB(B) (B)->bb
654 1.3 mrg #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
655 1.3 mrg #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
656 1.10 mrg #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs
657 1.10 mrg #define BB_VINFO_DDRS(B) (B)->shared->ddrs
658 1.3 mrg #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
659 1.1 mrg
660 1.1 mrg static inline bb_vec_info
661 1.1 mrg vec_info_for_bb (basic_block bb)
662 1.1 mrg {
663 1.1 mrg return (bb_vec_info) bb->aux;
664 1.1 mrg }
665 1.1 mrg
666 1.1 mrg /*-----------------------------------------------------------------*/
667 1.1 mrg /* Info on vectorized defs. */
668 1.1 mrg /*-----------------------------------------------------------------*/
669 1.1 mrg enum stmt_vec_info_type {
670 1.1 mrg undef_vec_info_type = 0,
671 1.1 mrg load_vec_info_type,
672 1.1 mrg store_vec_info_type,
673 1.1 mrg shift_vec_info_type,
674 1.1 mrg op_vec_info_type,
675 1.1 mrg call_vec_info_type,
676 1.5 mrg call_simd_clone_vec_info_type,
677 1.1 mrg assignment_vec_info_type,
678 1.1 mrg condition_vec_info_type,
679 1.6 mrg comparison_vec_info_type,
680 1.1 mrg reduc_vec_info_type,
681 1.1 mrg induc_vec_info_type,
682 1.1 mrg type_promotion_vec_info_type,
683 1.1 mrg type_demotion_vec_info_type,
684 1.1 mrg type_conversion_vec_info_type,
685 1.1 mrg loop_exit_ctrl_vec_info_type
686 1.1 mrg };
687 1.1 mrg
688 1.1 mrg /* Indicates whether/how a variable is used in the scope of loop/basic
689 1.1 mrg block. */
690 1.1 mrg enum vect_relevant {
691 1.1 mrg vect_unused_in_scope = 0,
692 1.8 mrg
693 1.8 mrg /* The def is only used outside the loop. */
694 1.8 mrg vect_used_only_live,
695 1.1 mrg /* The def is in the inner loop, and the use is in the outer loop, and the
696 1.1 mrg use is a reduction stmt. */
697 1.1 mrg vect_used_in_outer_by_reduction,
698 1.1 mrg /* The def is in the inner loop, and the use is in the outer loop (and is
699 1.1 mrg not part of reduction). */
700 1.1 mrg vect_used_in_outer,
701 1.1 mrg
702 1.1 mrg /* defs that feed computations that end up (only) in a reduction. These
703 1.1 mrg defs may be used by non-reduction stmts, but eventually, any
704 1.1 mrg computations/values that are affected by these defs are used to compute
705 1.1 mrg a reduction (i.e. don't get stored to memory, for example). We use this
706 1.1 mrg to identify computations that we can change the order in which they are
707 1.1 mrg computed. */
708 1.1 mrg vect_used_by_reduction,
709 1.1 mrg
710 1.1 mrg vect_used_in_scope
711 1.1 mrg };
712 1.1 mrg
713 1.1 mrg /* The type of vectorization that can be applied to the stmt: regular loop-based
714 1.1 mrg vectorization; pure SLP - the stmt is a part of SLP instances and does not
715 1.1 mrg have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
716 1.1 mrg a part of SLP instance and also must be loop-based vectorized, since it has
717 1.1 mrg uses outside SLP sequences.
718 1.1 mrg
719 1.1 mrg In the loop context the meanings of pure and hybrid SLP are slightly
720 1.1 mrg different. By saying that pure SLP is applied to the loop, we mean that we
721 1.1 mrg exploit only intra-iteration parallelism in the loop; i.e., the loop can be
722 1.1 mrg vectorized without doing any conceptual unrolling, cause we don't pack
723 1.1 mrg together stmts from different iterations, only within a single iteration.
724 1.1 mrg Loop hybrid SLP means that we exploit both intra-iteration and
725 1.1 mrg inter-iteration parallelism (e.g., number of elements in the vector is 4
726 1.1 mrg and the slp-group-size is 2, in which case we don't have enough parallelism
727 1.1 mrg within an iteration, so we obtain the rest of the parallelism from subsequent
728 1.1 mrg iterations by unrolling the loop by 2). */
729 1.1 mrg enum slp_vect_type {
730 1.1 mrg loop_vect = 0,
731 1.1 mrg pure_slp,
732 1.1 mrg hybrid
733 1.1 mrg };
734 1.1 mrg
735 1.9 mrg /* Says whether a statement is a load, a store of a vectorized statement
736 1.9 mrg result, or a store of an invariant value. */
737 1.9 mrg enum vec_load_store_type {
738 1.9 mrg VLS_LOAD,
739 1.9 mrg VLS_STORE,
740 1.9 mrg VLS_STORE_INVARIANT
741 1.9 mrg };
742 1.9 mrg
743 1.8 mrg /* Describes how we're going to vectorize an individual load or store,
744 1.8 mrg or a group of loads or stores. */
745 1.8 mrg enum vect_memory_access_type {
746 1.8 mrg /* An access to an invariant address. This is used only for loads. */
747 1.8 mrg VMAT_INVARIANT,
748 1.8 mrg
749 1.8 mrg /* A simple contiguous access. */
750 1.8 mrg VMAT_CONTIGUOUS,
751 1.8 mrg
752 1.8 mrg /* A contiguous access that goes down in memory rather than up,
753 1.8 mrg with no additional permutation. This is used only for stores
754 1.8 mrg of invariants. */
755 1.8 mrg VMAT_CONTIGUOUS_DOWN,
756 1.8 mrg
757 1.8 mrg /* A simple contiguous access in which the elements need to be permuted
758 1.8 mrg after loading or before storing. Only used for loop vectorization;
759 1.8 mrg SLP uses separate permutes. */
760 1.8 mrg VMAT_CONTIGUOUS_PERMUTE,
761 1.8 mrg
762 1.8 mrg /* A simple contiguous access in which the elements need to be reversed
763 1.8 mrg after loading or before storing. */
764 1.8 mrg VMAT_CONTIGUOUS_REVERSE,
765 1.8 mrg
766 1.8 mrg /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */
767 1.8 mrg VMAT_LOAD_STORE_LANES,
768 1.8 mrg
769 1.8 mrg /* An access in which each scalar element is loaded or stored
770 1.8 mrg individually. */
771 1.8 mrg VMAT_ELEMENTWISE,
772 1.8 mrg
773 1.8 mrg /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
774 1.8 mrg SLP accesses. Each unrolled iteration uses a contiguous load
775 1.8 mrg or store for the whole group, but the groups from separate iterations
776 1.8 mrg are combined in the same way as for VMAT_ELEMENTWISE. */
777 1.8 mrg VMAT_STRIDED_SLP,
778 1.8 mrg
779 1.8 mrg /* The access uses gather loads or scatter stores. */
780 1.8 mrg VMAT_GATHER_SCATTER
781 1.8 mrg };
782 1.1 mrg
783 1.10 mrg struct dr_vec_info {
784 1.10 mrg /* The data reference itself. */
785 1.10 mrg data_reference *dr;
786 1.10 mrg /* The statement that contains the data reference. */
787 1.10 mrg stmt_vec_info stmt;
788 1.10 mrg /* The misalignment in bytes of the reference, or -1 if not known. */
789 1.10 mrg int misalignment;
790 1.10 mrg /* The byte alignment that we'd ideally like the reference to have,
791 1.10 mrg and the value that misalignment is measured against. */
792 1.10 mrg poly_uint64 target_alignment;
793 1.10 mrg /* If true the alignment of base_decl needs to be increased. */
794 1.10 mrg bool base_misaligned;
795 1.10 mrg tree base_decl;
796 1.10 mrg };
797 1.10 mrg
798 1.1 mrg typedef struct data_reference *dr_p;
799 1.1 mrg
800 1.10 mrg struct _stmt_vec_info {
801 1.1 mrg
802 1.1 mrg enum stmt_vec_info_type type;
803 1.1 mrg
804 1.3 mrg /* Indicates whether this stmts is part of a computation whose result is
805 1.3 mrg used outside the loop. */
806 1.3 mrg bool live;
807 1.3 mrg
808 1.3 mrg /* Stmt is part of some pattern (computation idiom) */
809 1.3 mrg bool in_pattern_p;
810 1.3 mrg
811 1.10 mrg /* True if the statement was created during pattern recognition as
812 1.10 mrg part of the replacement for RELATED_STMT. This implies that the
813 1.10 mrg statement isn't part of any basic block, although for convenience
814 1.10 mrg its gimple_bb is the same as for RELATED_STMT. */
815 1.10 mrg bool pattern_stmt_p;
816 1.10 mrg
817 1.8 mrg /* Is this statement vectorizable or should it be skipped in (partial)
818 1.8 mrg vectorization. */
819 1.8 mrg bool vectorizable;
820 1.8 mrg
821 1.1 mrg /* The stmt to which this info struct refers to. */
822 1.6 mrg gimple *stmt;
823 1.1 mrg
824 1.6 mrg /* The vec_info with respect to which STMT is vectorized. */
825 1.6 mrg vec_info *vinfo;
826 1.1 mrg
827 1.3 mrg /* The vector type to be used for the LHS of this statement. */
828 1.1 mrg tree vectype;
829 1.1 mrg
830 1.1 mrg /* The vectorized version of the stmt. */
831 1.10 mrg stmt_vec_info vectorized_stmt;
832 1.1 mrg
833 1.1 mrg
834 1.9 mrg /* The following is relevant only for stmts that contain a non-scalar
835 1.1 mrg data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
836 1.9 mrg at most one such data-ref. */
837 1.1 mrg
838 1.10 mrg dr_vec_info dr_aux;
839 1.1 mrg
840 1.1 mrg /* Information about the data-ref relative to this loop
841 1.1 mrg nest (the loop that is being considered for vectorization). */
842 1.9 mrg innermost_loop_behavior dr_wrt_vec_loop;
843 1.1 mrg
844 1.6 mrg /* For loop PHI nodes, the base and evolution part of it. This makes sure
845 1.3 mrg this information is still available in vect_update_ivs_after_vectorizer
846 1.3 mrg where we may not be able to re-analyze the PHI nodes evolution as
847 1.3 mrg peeling for the prologue loop can make it unanalyzable. The evolution
848 1.6 mrg part is still correct after peeling, but the base may have changed from
849 1.6 mrg the version here. */
850 1.6 mrg tree loop_phi_evolution_base_unchanged;
851 1.3 mrg tree loop_phi_evolution_part;
852 1.1 mrg
853 1.1 mrg /* Used for various bookkeeping purposes, generally holding a pointer to
854 1.1 mrg some other stmt S that is in some way "related" to this stmt.
855 1.1 mrg Current use of this field is:
856 1.1 mrg If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
857 1.1 mrg true): S is the "pattern stmt" that represents (and replaces) the
858 1.1 mrg sequence of stmts that constitutes the pattern. Similarly, the
859 1.1 mrg related_stmt of the "pattern stmt" points back to this stmt (which is
860 1.1 mrg the last stmt in the original sequence of stmts that constitutes the
861 1.1 mrg pattern). */
862 1.10 mrg stmt_vec_info related_stmt;
863 1.1 mrg
864 1.10 mrg /* Used to keep a sequence of def stmts of a pattern stmt if such exists.
865 1.10 mrg The sequence is attached to the original statement rather than the
866 1.10 mrg pattern statement. */
867 1.3 mrg gimple_seq pattern_def_seq;
868 1.3 mrg
869 1.1 mrg /* List of datarefs that are known to have the same alignment as the dataref
870 1.1 mrg of this stmt. */
871 1.3 mrg vec<dr_p> same_align_refs;
872 1.1 mrg
873 1.5 mrg /* Selected SIMD clone's function info. First vector element
874 1.5 mrg is SIMD clone's function decl, followed by a pair of trees (base + step)
875 1.5 mrg for linear arguments (pair of NULLs for other arguments). */
876 1.5 mrg vec<tree> simd_clone_info;
877 1.5 mrg
878 1.1 mrg /* Classify the def of this stmt. */
879 1.1 mrg enum vect_def_type def_type;
880 1.1 mrg
881 1.3 mrg /* Whether the stmt is SLPed, loop-based vectorized, or both. */
882 1.3 mrg enum slp_vect_type slp_type;
883 1.3 mrg
884 1.3 mrg /* Interleaving and reduction chains info. */
885 1.3 mrg /* First element in the group. */
886 1.10 mrg stmt_vec_info first_element;
887 1.3 mrg /* Pointer to the next element in the group. */
888 1.10 mrg stmt_vec_info next_element;
889 1.3 mrg /* The size of the group. */
890 1.1 mrg unsigned int size;
891 1.1 mrg /* For stores, number of stores from this group seen. We vectorize the last
892 1.1 mrg one. */
893 1.1 mrg unsigned int store_count;
894 1.1 mrg /* For loads only, the gap from the previous load. For consecutive loads, GAP
895 1.1 mrg is 1. */
896 1.1 mrg unsigned int gap;
897 1.1 mrg
898 1.3 mrg /* The minimum negative dependence distance this stmt participates in
899 1.3 mrg or zero if none. */
900 1.3 mrg unsigned int min_neg_dist;
901 1.1 mrg
902 1.3 mrg /* Not all stmts in the loop need to be vectorized. e.g, the increment
903 1.3 mrg of the loop induction variable and computation of array indexes. relevant
904 1.3 mrg indicates whether the stmt needs to be vectorized. */
905 1.3 mrg enum vect_relevant relevant;
906 1.1 mrg
907 1.6 mrg /* For loads if this is a gather, for stores if this is a scatter. */
908 1.6 mrg bool gather_scatter_p;
909 1.6 mrg
910 1.6 mrg /* True if this is an access with loop-invariant stride. */
911 1.6 mrg bool strided_p;
912 1.5 mrg
913 1.5 mrg /* For both loads and stores. */
914 1.5 mrg bool simd_lane_access_p;
915 1.6 mrg
916 1.8 mrg /* Classifies how the load or store is going to be implemented
917 1.8 mrg for loop vectorization. */
918 1.8 mrg vect_memory_access_type memory_access_type;
919 1.8 mrg
920 1.6 mrg /* For reduction loops, this is the type of reduction. */
921 1.6 mrg enum vect_reduction_type v_reduc_type;
922 1.6 mrg
923 1.8 mrg /* For CONST_COND_REDUCTION, record the reduc code. */
924 1.8 mrg enum tree_code const_cond_reduc_code;
925 1.8 mrg
926 1.9 mrg /* On a reduction PHI the reduction type as detected by
927 1.9 mrg vect_force_simple_reduction. */
928 1.9 mrg enum vect_reduction_type reduc_type;
929 1.9 mrg
930 1.9 mrg /* On a reduction PHI the def returned by vect_force_simple_reduction.
931 1.9 mrg On the def returned by vect_force_simple_reduction the
932 1.9 mrg corresponding PHI. */
933 1.10 mrg stmt_vec_info reduc_def;
934 1.9 mrg
935 1.6 mrg /* The number of scalar stmt references from active SLP instances. */
936 1.6 mrg unsigned int num_slp_uses;
937 1.10 mrg
938 1.10 mrg /* If nonzero, the lhs of the statement could be truncated to this
939 1.10 mrg many bits without affecting any users of the result. */
940 1.10 mrg unsigned int min_output_precision;
941 1.10 mrg
942 1.10 mrg /* If nonzero, all non-boolean input operands have the same precision,
943 1.10 mrg and they could each be truncated to this many bits without changing
944 1.10 mrg the result. */
945 1.10 mrg unsigned int min_input_precision;
946 1.10 mrg
947 1.10 mrg /* If OPERATION_BITS is nonzero, the statement could be performed on
948 1.10 mrg an integer with the sign and number of bits given by OPERATION_SIGN
949 1.10 mrg and OPERATION_BITS without changing the result. */
950 1.10 mrg unsigned int operation_precision;
951 1.10 mrg signop operation_sign;
952 1.10 mrg };
953 1.1 mrg
954 1.8 mrg /* Information about a gather/scatter call. */
955 1.8 mrg struct gather_scatter_info {
956 1.9 mrg /* The internal function to use for the gather/scatter operation,
957 1.9 mrg or IFN_LAST if a built-in function should be used instead. */
958 1.9 mrg internal_fn ifn;
959 1.9 mrg
960 1.9 mrg /* The FUNCTION_DECL for the built-in gather/scatter function,
961 1.9 mrg or null if an internal function should be used instead. */
962 1.8 mrg tree decl;
963 1.8 mrg
964 1.8 mrg /* The loop-invariant base value. */
965 1.8 mrg tree base;
966 1.8 mrg
967 1.8 mrg /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */
968 1.8 mrg tree offset;
969 1.8 mrg
970 1.8 mrg /* Each offset element should be multiplied by this amount before
971 1.8 mrg being added to the base. */
972 1.8 mrg int scale;
973 1.8 mrg
974 1.8 mrg /* The definition type for the vectorized offset. */
975 1.8 mrg enum vect_def_type offset_dt;
976 1.8 mrg
977 1.8 mrg /* The type of the vectorized offset. */
978 1.8 mrg tree offset_vectype;
979 1.9 mrg
980 1.9 mrg /* The type of the scalar elements after loading or before storing. */
981 1.9 mrg tree element_type;
982 1.9 mrg
983 1.9 mrg /* The type of the scalar elements being loaded or stored. */
984 1.9 mrg tree memory_type;
985 1.8 mrg };
986 1.8 mrg
987 1.1 mrg /* Access Functions. */
988 1.1 mrg #define STMT_VINFO_TYPE(S) (S)->type
989 1.1 mrg #define STMT_VINFO_STMT(S) (S)->stmt
990 1.6 mrg inline loop_vec_info
991 1.6 mrg STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo)
992 1.6 mrg {
993 1.6 mrg if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo))
994 1.6 mrg return loop_vinfo;
995 1.6 mrg return NULL;
996 1.6 mrg }
997 1.6 mrg inline bb_vec_info
998 1.6 mrg STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
999 1.6 mrg {
1000 1.6 mrg if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo))
1001 1.6 mrg return bb_vinfo;
1002 1.6 mrg return NULL;
1003 1.6 mrg }
1004 1.1 mrg #define STMT_VINFO_RELEVANT(S) (S)->relevant
1005 1.1 mrg #define STMT_VINFO_LIVE_P(S) (S)->live
1006 1.1 mrg #define STMT_VINFO_VECTYPE(S) (S)->vectype
1007 1.1 mrg #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt
1008 1.3 mrg #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
1009 1.10 mrg #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0)
1010 1.6 mrg #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
1011 1.6 mrg #define STMT_VINFO_STRIDED_P(S) (S)->strided_p
1012 1.8 mrg #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type
1013 1.5 mrg #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
1014 1.6 mrg #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type
1015 1.8 mrg #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code
1016 1.1 mrg
1017 1.9 mrg #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop
1018 1.9 mrg #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address
1019 1.9 mrg #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init
1020 1.9 mrg #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset
1021 1.9 mrg #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step
1022 1.9 mrg #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment
1023 1.9 mrg #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
1024 1.9 mrg (S)->dr_wrt_vec_loop.base_misalignment
1025 1.9 mrg #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
1026 1.9 mrg (S)->dr_wrt_vec_loop.offset_alignment
1027 1.9 mrg #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
1028 1.9 mrg (S)->dr_wrt_vec_loop.step_alignment
1029 1.1 mrg
1030 1.10 mrg #define STMT_VINFO_DR_INFO(S) \
1031 1.10 mrg (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
1032 1.10 mrg
1033 1.1 mrg #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
1034 1.1 mrg #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
1035 1.3 mrg #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
1036 1.1 mrg #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
1037 1.5 mrg #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
1038 1.1 mrg #define STMT_VINFO_DEF_TYPE(S) (S)->def_type
1039 1.10 mrg #define STMT_VINFO_GROUPED_ACCESS(S) \
1040 1.10 mrg ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
1041 1.6 mrg #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
1042 1.3 mrg #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
1043 1.3 mrg #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
1044 1.6 mrg #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses
1045 1.9 mrg #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
1046 1.9 mrg #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
1047 1.3 mrg
1048 1.10 mrg #define DR_GROUP_FIRST_ELEMENT(S) \
1049 1.10 mrg (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
1050 1.10 mrg #define DR_GROUP_NEXT_ELEMENT(S) \
1051 1.10 mrg (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
1052 1.10 mrg #define DR_GROUP_SIZE(S) \
1053 1.10 mrg (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
1054 1.10 mrg #define DR_GROUP_STORE_COUNT(S) \
1055 1.10 mrg (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
1056 1.10 mrg #define DR_GROUP_GAP(S) \
1057 1.10 mrg (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
1058 1.10 mrg
1059 1.10 mrg #define REDUC_GROUP_FIRST_ELEMENT(S) \
1060 1.10 mrg (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
1061 1.10 mrg #define REDUC_GROUP_NEXT_ELEMENT(S) \
1062 1.10 mrg (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element)
1063 1.10 mrg #define REDUC_GROUP_SIZE(S) \
1064 1.10 mrg (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)
1065 1.1 mrg
1066 1.1 mrg #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
1067 1.1 mrg
1068 1.1 mrg #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
1069 1.1 mrg #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
1070 1.1 mrg #define STMT_SLP_TYPE(S) (S)->slp_type
1071 1.1 mrg
1072 1.3 mrg #define VECT_MAX_COST 1000
1073 1.1 mrg
1074 1.1 mrg /* The maximum number of intermediate steps required in multi-step type
1075 1.1 mrg conversion. */
1076 1.1 mrg #define MAX_INTERM_CVT_STEPS 3
1077 1.1 mrg
1078 1.9 mrg #define MAX_VECTORIZATION_FACTOR INT_MAX
1079 1.3 mrg
1080 1.8 mrg /* Nonzero if TYPE represents a (scalar) boolean type or type
1081 1.8 mrg in the middle-end compatible with it (unsigned precision 1 integral
1082 1.8 mrg types). Used to determine which types should be vectorized as
1083 1.8 mrg VECTOR_BOOLEAN_TYPE_P. */
1084 1.8 mrg
1085 1.8 mrg #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
1086 1.8 mrg (TREE_CODE (TYPE) == BOOLEAN_TYPE \
1087 1.8 mrg || ((TREE_CODE (TYPE) == INTEGER_TYPE \
1088 1.8 mrg || TREE_CODE (TYPE) == ENUMERAL_TYPE) \
1089 1.8 mrg && TYPE_PRECISION (TYPE) == 1 \
1090 1.8 mrg && TYPE_UNSIGNED (TYPE)))
1091 1.8 mrg
1092 1.10 mrg static inline bool
1093 1.10 mrg nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info)
1094 1.1 mrg {
1095 1.10 mrg return (loop->inner
1096 1.10 mrg && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
1097 1.1 mrg }
1098 1.1 mrg
1099 1.8 mrg /* Return TRUE if a statement represented by STMT_INFO is a part of a
1100 1.8 mrg pattern. */
1101 1.3 mrg
1102 1.8 mrg static inline bool
1103 1.8 mrg is_pattern_stmt_p (stmt_vec_info stmt_info)
1104 1.1 mrg {
1105 1.10 mrg return stmt_info->pattern_stmt_p;
1106 1.10 mrg }
1107 1.1 mrg
1108 1.10 mrg /* If STMT_INFO is a pattern statement, return the statement that it
1109 1.10 mrg replaces, otherwise return STMT_INFO itself. */
1110 1.1 mrg
1111 1.10 mrg inline stmt_vec_info
1112 1.10 mrg vect_orig_stmt (stmt_vec_info stmt_info)
1113 1.10 mrg {
1114 1.10 mrg if (is_pattern_stmt_p (stmt_info))
1115 1.10 mrg return STMT_VINFO_RELATED_STMT (stmt_info);
1116 1.10 mrg return stmt_info;
1117 1.1 mrg }
1118 1.1 mrg
1119 1.10 mrg /* Return the later statement between STMT1_INFO and STMT2_INFO. */
1120 1.3 mrg
1121 1.10 mrg static inline stmt_vec_info
1122 1.10 mrg get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
1123 1.3 mrg {
1124 1.10 mrg if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt)
1125 1.10 mrg > gimple_uid (vect_orig_stmt (stmt2_info)->stmt))
1126 1.10 mrg return stmt1_info;
1127 1.10 mrg else
1128 1.10 mrg return stmt2_info;
1129 1.10 mrg }
1130 1.3 mrg
1131 1.10 mrg /* If STMT_INFO has been replaced by a pattern statement, return the
1132 1.10 mrg replacement statement, otherwise return STMT_INFO itself. */
1133 1.3 mrg
1134 1.10 mrg inline stmt_vec_info
1135 1.10 mrg vect_stmt_to_vectorize (stmt_vec_info stmt_info)
1136 1.10 mrg {
1137 1.10 mrg if (STMT_VINFO_IN_PATTERN_P (stmt_info))
1138 1.10 mrg return STMT_VINFO_RELATED_STMT (stmt_info);
1139 1.10 mrg return stmt_info;
1140 1.3 mrg }
1141 1.3 mrg
1142 1.3 mrg /* Return true if BB is a loop header. */
1143 1.3 mrg
1144 1.1 mrg static inline bool
1145 1.1 mrg is_loop_header_bb_p (basic_block bb)
1146 1.1 mrg {
1147 1.1 mrg if (bb == (bb->loop_father)->header)
1148 1.1 mrg return true;
1149 1.3 mrg gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
1150 1.1 mrg return false;
1151 1.1 mrg }
1152 1.1 mrg
1153 1.3 mrg /* Return pow2 (X). */
1154 1.1 mrg
1155 1.1 mrg static inline int
1156 1.1 mrg vect_pow2 (int x)
1157 1.1 mrg {
1158 1.1 mrg int i, res = 1;
1159 1.1 mrg
1160 1.1 mrg for (i = 0; i < x; i++)
1161 1.1 mrg res *= 2;
1162 1.1 mrg
1163 1.1 mrg return res;
1164 1.1 mrg }
1165 1.1 mrg
1166 1.3 mrg /* Alias targetm.vectorize.builtin_vectorization_cost. */
1167 1.3 mrg
1168 1.3 mrg static inline int
1169 1.3 mrg builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
1170 1.3 mrg tree vectype, int misalign)
1171 1.3 mrg {
1172 1.3 mrg return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1173 1.3 mrg vectype, misalign);
1174 1.3 mrg }
1175 1.3 mrg
1176 1.3 mrg /* Get cost by calling cost target builtin. */
1177 1.3 mrg
1178 1.3 mrg static inline
1179 1.3 mrg int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
1180 1.3 mrg {
1181 1.3 mrg return builtin_vectorization_cost (type_of_cost, NULL, 0);
1182 1.3 mrg }
1183 1.3 mrg
1184 1.3 mrg /* Alias targetm.vectorize.init_cost. */
1185 1.3 mrg
1186 1.3 mrg static inline void *
1187 1.3 mrg init_cost (struct loop *loop_info)
1188 1.3 mrg {
1189 1.3 mrg return targetm.vectorize.init_cost (loop_info);
1190 1.3 mrg }
1191 1.3 mrg
1192 1.10 mrg extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt,
1193 1.10 mrg stmt_vec_info, int, unsigned,
1194 1.10 mrg enum vect_cost_model_location);
1195 1.10 mrg
1196 1.3 mrg /* Alias targetm.vectorize.add_stmt_cost. */
1197 1.3 mrg
1198 1.3 mrg static inline unsigned
1199 1.3 mrg add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
1200 1.3 mrg stmt_vec_info stmt_info, int misalign,
1201 1.3 mrg enum vect_cost_model_location where)
1202 1.3 mrg {
1203 1.10 mrg unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind,
1204 1.10 mrg stmt_info, misalign, where);
1205 1.10 mrg if (dump_file && (dump_flags & TDF_DETAILS))
1206 1.10 mrg dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign,
1207 1.10 mrg cost, where);
1208 1.10 mrg return cost;
1209 1.3 mrg }
1210 1.3 mrg
1211 1.3 mrg /* Alias targetm.vectorize.finish_cost. */
1212 1.3 mrg
1213 1.3 mrg static inline void
1214 1.3 mrg finish_cost (void *data, unsigned *prologue_cost,
1215 1.3 mrg unsigned *body_cost, unsigned *epilogue_cost)
1216 1.3 mrg {
1217 1.3 mrg targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost);
1218 1.3 mrg }
1219 1.3 mrg
1220 1.3 mrg /* Alias targetm.vectorize.destroy_cost_data. */
1221 1.3 mrg
1222 1.3 mrg static inline void
1223 1.3 mrg destroy_cost_data (void *data)
1224 1.3 mrg {
1225 1.3 mrg targetm.vectorize.destroy_cost_data (data);
1226 1.3 mrg }
1227 1.3 mrg
1228 1.10 mrg inline void
1229 1.10 mrg add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec)
1230 1.10 mrg {
1231 1.10 mrg stmt_info_for_cost *cost;
1232 1.10 mrg unsigned i;
1233 1.10 mrg FOR_EACH_VEC_ELT (*cost_vec, i, cost)
1234 1.10 mrg add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info,
1235 1.10 mrg cost->misalign, cost->where);
1236 1.10 mrg }
1237 1.10 mrg
1238 1.1 mrg /*-----------------------------------------------------------------*/
1239 1.1 mrg /* Info on data references alignment. */
1240 1.1 mrg /*-----------------------------------------------------------------*/
1241 1.10 mrg #define DR_MISALIGNMENT_UNKNOWN (-1)
1242 1.10 mrg #define DR_MISALIGNMENT_UNINITIALIZED (-2)
1243 1.10 mrg
1244 1.5 mrg inline void
1245 1.10 mrg set_dr_misalignment (dr_vec_info *dr_info, int val)
1246 1.5 mrg {
1247 1.10 mrg dr_info->misalignment = val;
1248 1.5 mrg }
1249 1.5 mrg
1250 1.5 mrg inline int
1251 1.10 mrg dr_misalignment (dr_vec_info *dr_info)
1252 1.5 mrg {
1253 1.10 mrg int misalign = dr_info->misalignment;
1254 1.10 mrg gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED);
1255 1.10 mrg return misalign;
1256 1.5 mrg }
1257 1.1 mrg
1258 1.1 mrg /* Reflects actual alignment of first access in the vectorized loop,
1259 1.1 mrg taking into account peeling/versioning if applied. */
1260 1.5 mrg #define DR_MISALIGNMENT(DR) dr_misalignment (DR)
1261 1.5 mrg #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
1262 1.1 mrg
1263 1.9 mrg /* Only defined once DR_MISALIGNMENT is defined. */
1264 1.10 mrg #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment)
1265 1.9 mrg
1266 1.10 mrg /* Return true if data access DR_INFO is aligned to its target alignment
1267 1.9 mrg (which may be less than a full vector). */
1268 1.3 mrg
1269 1.1 mrg static inline bool
1270 1.10 mrg aligned_access_p (dr_vec_info *dr_info)
1271 1.1 mrg {
1272 1.10 mrg return (DR_MISALIGNMENT (dr_info) == 0);
1273 1.1 mrg }
1274 1.1 mrg
1275 1.3 mrg /* Return TRUE if the alignment of the data access is known, and FALSE
1276 1.3 mrg otherwise. */
1277 1.3 mrg
1278 1.1 mrg static inline bool
1279 1.10 mrg known_alignment_for_access_p (dr_vec_info *dr_info)
1280 1.1 mrg {
1281 1.10 mrg return (DR_MISALIGNMENT (dr_info) != DR_MISALIGNMENT_UNKNOWN);
1282 1.9 mrg }
1283 1.9 mrg
1284 1.9 mrg /* Return the minimum alignment in bytes that the vectorized version
1285 1.10 mrg of DR_INFO is guaranteed to have. */
1286 1.9 mrg
1287 1.9 mrg static inline unsigned int
1288 1.10 mrg vect_known_alignment_in_bytes (dr_vec_info *dr_info)
1289 1.9 mrg {
1290 1.10 mrg if (DR_MISALIGNMENT (dr_info) == DR_MISALIGNMENT_UNKNOWN)
1291 1.10 mrg return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
1292 1.10 mrg if (DR_MISALIGNMENT (dr_info) == 0)
1293 1.10 mrg return known_alignment (DR_TARGET_ALIGNMENT (dr_info));
1294 1.10 mrg return DR_MISALIGNMENT (dr_info) & -DR_MISALIGNMENT (dr_info);
1295 1.1 mrg }
1296 1.1 mrg
1297 1.10 mrg /* Return the behavior of DR_INFO with respect to the vectorization context
1298 1.9 mrg (which for outer loop vectorization might not be the behavior recorded
1299 1.10 mrg in DR_INFO itself). */
1300 1.9 mrg
1301 1.9 mrg static inline innermost_loop_behavior *
1302 1.10 mrg vect_dr_behavior (dr_vec_info *dr_info)
1303 1.9 mrg {
1304 1.10 mrg stmt_vec_info stmt_info = dr_info->stmt;
1305 1.9 mrg loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1306 1.9 mrg if (loop_vinfo == NULL
1307 1.10 mrg || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info))
1308 1.10 mrg return &DR_INNERMOST (dr_info->dr);
1309 1.9 mrg else
1310 1.9 mrg return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
1311 1.9 mrg }
1312 1.5 mrg
1313 1.5 mrg /* Return true if the vect cost model is unlimited. */
1314 1.5 mrg static inline bool
1315 1.5 mrg unlimited_cost_model (loop_p loop)
1316 1.5 mrg {
1317 1.5 mrg if (loop != NULL && loop->force_vectorize
1318 1.5 mrg && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
1319 1.5 mrg return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED;
1320 1.5 mrg return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
1321 1.5 mrg }
1322 1.5 mrg
1323 1.9 mrg /* Return true if the loop described by LOOP_VINFO is fully-masked and
1324 1.9 mrg if the first iteration should use a partial mask in order to achieve
1325 1.9 mrg alignment. */
1326 1.9 mrg
1327 1.9 mrg static inline bool
1328 1.9 mrg vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
1329 1.9 mrg {
1330 1.9 mrg return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
1331 1.9 mrg && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
1332 1.9 mrg }
1333 1.9 mrg
1334 1.9 mrg /* Return the number of vectors of type VECTYPE that are needed to get
1335 1.9 mrg NUNITS elements. NUNITS should be based on the vectorization factor,
1336 1.9 mrg so it is always a known multiple of the number of elements in VECTYPE. */
1337 1.9 mrg
1338 1.9 mrg static inline unsigned int
1339 1.9 mrg vect_get_num_vectors (poly_uint64 nunits, tree vectype)
1340 1.9 mrg {
1341 1.9 mrg return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
1342 1.9 mrg }
1343 1.9 mrg
1344 1.9 mrg /* Return the number of copies needed for loop vectorization when
1345 1.9 mrg a statement operates on vectors of type VECTYPE. This is the
1346 1.9 mrg vectorization factor divided by the number of elements in
1347 1.9 mrg VECTYPE and is always known at compile time. */
1348 1.9 mrg
1349 1.9 mrg static inline unsigned int
1350 1.9 mrg vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
1351 1.9 mrg {
1352 1.9 mrg return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype);
1353 1.9 mrg }
1354 1.9 mrg
1355 1.9 mrg /* Update maximum unit count *MAX_NUNITS so that it accounts for
1356 1.10 mrg NUNITS. *MAX_NUNITS can be 1 if we haven't yet recorded anything. */
1357 1.9 mrg
1358 1.9 mrg static inline void
1359 1.10 mrg vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits)
1360 1.9 mrg {
1361 1.9 mrg /* All unit counts have the form current_vector_size * X for some
1362 1.9 mrg rational X, so two unit sizes must have a common multiple.
1363 1.9 mrg Everything is a multiple of the initial value of 1. */
1364 1.9 mrg *max_nunits = force_common_multiple (*max_nunits, nunits);
1365 1.9 mrg }
1366 1.9 mrg
1367 1.10 mrg /* Update maximum unit count *MAX_NUNITS so that it accounts for
1368 1.10 mrg the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
1369 1.10 mrg if we haven't yet recorded any vector types. */
1370 1.10 mrg
1371 1.10 mrg static inline void
1372 1.10 mrg vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
1373 1.10 mrg {
1374 1.10 mrg vect_update_max_nunits (max_nunits, TYPE_VECTOR_SUBPARTS (vectype));
1375 1.10 mrg }
1376 1.10 mrg
1377 1.9 mrg /* Return the vectorization factor that should be used for costing
1378 1.9 mrg purposes while vectorizing the loop described by LOOP_VINFO.
1379 1.9 mrg Pick a reasonable estimate if the vectorization factor isn't
1380 1.9 mrg known at compile time. */
1381 1.9 mrg
1382 1.9 mrg static inline unsigned int
1383 1.9 mrg vect_vf_for_cost (loop_vec_info loop_vinfo)
1384 1.9 mrg {
1385 1.9 mrg return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1386 1.9 mrg }
1387 1.9 mrg
1388 1.9 mrg /* Estimate the number of elements in VEC_TYPE for costing purposes.
1389 1.9 mrg Pick a reasonable estimate if the exact number isn't known at
1390 1.9 mrg compile time. */
1391 1.9 mrg
1392 1.9 mrg static inline unsigned int
1393 1.9 mrg vect_nunits_for_cost (tree vec_type)
1394 1.9 mrg {
1395 1.9 mrg return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type));
1396 1.9 mrg }
1397 1.9 mrg
1398 1.9 mrg /* Return the maximum possible vectorization factor for LOOP_VINFO. */
1399 1.9 mrg
1400 1.9 mrg static inline unsigned HOST_WIDE_INT
1401 1.9 mrg vect_max_vf (loop_vec_info loop_vinfo)
1402 1.9 mrg {
1403 1.9 mrg unsigned HOST_WIDE_INT vf;
1404 1.9 mrg if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
1405 1.9 mrg return vf;
1406 1.9 mrg return MAX_VECTORIZATION_FACTOR;
1407 1.9 mrg }
1408 1.9 mrg
1409 1.10 mrg /* Return the size of the value accessed by unvectorized data reference
1410 1.10 mrg DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated
1411 1.10 mrg for the associated gimple statement, since that guarantees that DR_INFO
1412 1.10 mrg accesses either a scalar or a scalar equivalent. ("Scalar equivalent"
1413 1.10 mrg here includes things like V1SI, which can be vectorized in the same way
1414 1.9 mrg as a plain SI.) */
1415 1.9 mrg
1416 1.9 mrg inline unsigned int
1417 1.10 mrg vect_get_scalar_dr_size (dr_vec_info *dr_info)
1418 1.9 mrg {
1419 1.10 mrg return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr))));
1420 1.9 mrg }
1421 1.9 mrg
1422 1.10 mrg /* Source location + hotness information. */
1423 1.10 mrg extern dump_user_location_t vect_location;
1424 1.10 mrg
1425 1.10 mrg /* A macro for calling:
1426 1.10 mrg dump_begin_scope (MSG, vect_location);
1427 1.10 mrg via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
1428 1.10 mrg and then calling
1429 1.10 mrg dump_end_scope ();
1430 1.10 mrg once the object goes out of scope, thus capturing the nesting of
1431 1.10 mrg the scopes.
1432 1.10 mrg
1433 1.10 mrg These scopes affect dump messages within them: dump messages at the
1434 1.10 mrg top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
1435 1.10 mrg in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */
1436 1.10 mrg
1437 1.10 mrg #define DUMP_VECT_SCOPE(MSG) \
1438 1.10 mrg AUTO_DUMP_SCOPE (MSG, vect_location)
1439 1.10 mrg
1440 1.10 mrg /* A sentinel class for ensuring that the "vect_location" global gets
1441 1.10 mrg reset at the end of a scope.
1442 1.10 mrg
1443 1.10 mrg The "vect_location" global is used during dumping and contains a
1444 1.10 mrg location_t, which could contain references to a tree block via the
1445 1.10 mrg ad-hoc data. This data is used for tracking inlining information,
1446 1.10 mrg but it's not a GC root; it's simply assumed that such locations never
1447 1.10 mrg get accessed if the blocks are optimized away.
1448 1.10 mrg
1449 1.10 mrg Hence we need to ensure that such locations are purged at the end
1450 1.10 mrg of any operations using them (e.g. via this class). */
1451 1.10 mrg
1452 1.10 mrg class auto_purge_vect_location
1453 1.10 mrg {
1454 1.10 mrg public:
1455 1.10 mrg ~auto_purge_vect_location ();
1456 1.10 mrg };
1457 1.1 mrg
1458 1.1 mrg /*-----------------------------------------------------------------*/
1459 1.1 mrg /* Function prototypes. */
1460 1.1 mrg /*-----------------------------------------------------------------*/
1461 1.1 mrg
1462 1.1 mrg /* Simple loop peeling and versioning utilities for vectorizer's purposes -
1463 1.1 mrg in tree-vect-loop-manip.c. */
1464 1.9 mrg extern void vect_set_loop_condition (struct loop *, loop_vec_info,
1465 1.9 mrg tree, tree, tree, bool);
1466 1.1 mrg extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
1467 1.5 mrg struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
1468 1.5 mrg struct loop *, edge);
1469 1.10 mrg struct loop *vect_loop_versioning (loop_vec_info, unsigned int, bool,
1470 1.10 mrg poly_uint64);
1471 1.8 mrg extern struct loop *vect_do_peeling (loop_vec_info, tree, tree,
1472 1.9 mrg tree *, tree *, tree *, int, bool, bool);
1473 1.9 mrg extern void vect_prepare_for_masked_peels (loop_vec_info);
1474 1.10 mrg extern dump_user_location_t find_loop_location (struct loop *);
1475 1.1 mrg extern bool vect_can_advance_ivs_p (loop_vec_info);
1476 1.1 mrg
1477 1.1 mrg /* In tree-vect-stmts.c. */
1478 1.9 mrg extern poly_uint64 current_vector_size;
1479 1.1 mrg extern tree get_vectype_for_scalar_type (tree);
1480 1.9 mrg extern tree get_vectype_for_scalar_type_and_size (tree, poly_uint64);
1481 1.6 mrg extern tree get_mask_type_for_scalar_type (tree);
1482 1.3 mrg extern tree get_same_sized_vectype (tree, tree);
1483 1.9 mrg extern bool vect_get_loop_mask_type (loop_vec_info);
1484 1.10 mrg extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
1485 1.10 mrg stmt_vec_info * = NULL, gimple ** = NULL);
1486 1.10 mrg extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
1487 1.10 mrg tree *, stmt_vec_info * = NULL,
1488 1.10 mrg gimple ** = NULL);
1489 1.10 mrg extern bool supportable_widening_operation (enum tree_code, stmt_vec_info,
1490 1.10 mrg tree, tree, enum tree_code *,
1491 1.6 mrg enum tree_code *, int *,
1492 1.6 mrg vec<tree> *);
1493 1.3 mrg extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
1494 1.3 mrg enum tree_code *,
1495 1.3 mrg int *, vec<tree> *);
1496 1.3 mrg extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
1497 1.3 mrg enum vect_cost_for_stmt, stmt_vec_info,
1498 1.3 mrg int, enum vect_cost_model_location);
1499 1.10 mrg extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *);
1500 1.10 mrg extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *,
1501 1.10 mrg gimple_stmt_iterator *);
1502 1.10 mrg extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info);
1503 1.10 mrg extern tree vect_get_store_rhs (stmt_vec_info);
1504 1.10 mrg extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type);
1505 1.10 mrg extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL);
1506 1.10 mrg extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *,
1507 1.9 mrg vec<tree> *, slp_tree);
1508 1.10 mrg extern void vect_get_vec_defs_for_stmt_copy (vec_info *,
1509 1.9 mrg vec<tree> *, vec<tree> *);
1510 1.10 mrg extern tree vect_init_vector (stmt_vec_info, tree, tree,
1511 1.1 mrg gimple_stmt_iterator *);
1512 1.10 mrg extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree);
1513 1.10 mrg extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *,
1514 1.10 mrg slp_tree, slp_instance);
1515 1.10 mrg extern void vect_remove_stores (stmt_vec_info);
1516 1.10 mrg extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree,
1517 1.10 mrg slp_instance, stmt_vector_for_cost *);
1518 1.10 mrg extern bool vectorizable_condition (stmt_vec_info, gimple_stmt_iterator *,
1519 1.10 mrg stmt_vec_info *, bool, slp_tree,
1520 1.10 mrg stmt_vector_for_cost *);
1521 1.10 mrg extern bool vectorizable_shift (stmt_vec_info, gimple_stmt_iterator *,
1522 1.10 mrg stmt_vec_info *, slp_tree,
1523 1.10 mrg stmt_vector_for_cost *);
1524 1.10 mrg extern void vect_get_load_cost (stmt_vec_info, int, bool,
1525 1.3 mrg unsigned int *, unsigned int *,
1526 1.3 mrg stmt_vector_for_cost *,
1527 1.3 mrg stmt_vector_for_cost *, bool);
1528 1.10 mrg extern void vect_get_store_cost (stmt_vec_info, int,
1529 1.3 mrg unsigned int *, stmt_vector_for_cost *);
1530 1.3 mrg extern bool vect_supportable_shift (enum tree_code, tree);
1531 1.9 mrg extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
1532 1.9 mrg extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
1533 1.6 mrg extern void optimize_mask_stores (struct loop*);
1534 1.9 mrg extern gcall *vect_gen_while (tree, tree, tree);
1535 1.9 mrg extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
1536 1.10 mrg extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *,
1537 1.10 mrg tree *);
1538 1.10 mrg extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info);
1539 1.1 mrg
1540 1.1 mrg /* In tree-vect-data-refs.c. */
1541 1.10 mrg extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
1542 1.1 mrg extern enum dr_alignment_support vect_supportable_dr_alignment
1543 1.10 mrg (dr_vec_info *, bool);
1544 1.10 mrg extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,
1545 1.1 mrg HOST_WIDE_INT *);
1546 1.10 mrg extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
1547 1.6 mrg extern bool vect_slp_analyze_instance_dependence (slp_instance);
1548 1.10 mrg extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);
1549 1.10 mrg extern opt_result vect_analyze_data_refs_alignment (loop_vec_info);
1550 1.10 mrg extern opt_result vect_verify_datarefs_alignment (loop_vec_info);
1551 1.6 mrg extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
1552 1.10 mrg extern opt_result vect_analyze_data_ref_accesses (vec_info *);
1553 1.10 mrg extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info);
1554 1.9 mrg extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int,
1555 1.9 mrg signop, int, internal_fn *, tree *);
1556 1.10 mrg extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info,
1557 1.8 mrg gather_scatter_info *);
1558 1.10 mrg extern opt_result vect_find_stmt_data_reference (loop_p, gimple *,
1559 1.10 mrg vec<data_reference_p> *);
1560 1.10 mrg extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *);
1561 1.9 mrg extern void vect_record_base_alignments (vec_info *);
1562 1.10 mrg extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree,
1563 1.3 mrg tree *, gimple_stmt_iterator *,
1564 1.10 mrg gimple **, bool,
1565 1.9 mrg tree = NULL_TREE, tree = NULL_TREE);
1566 1.10 mrg extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *,
1567 1.10 mrg stmt_vec_info, tree);
1568 1.9 mrg extern void vect_copy_ref_info (tree, tree);
1569 1.1 mrg extern tree vect_create_destination_var (tree, tree);
1570 1.3 mrg extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
1571 1.9 mrg extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
1572 1.8 mrg extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
1573 1.9 mrg extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
1574 1.10 mrg extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info,
1575 1.3 mrg gimple_stmt_iterator *, vec<tree> *);
1576 1.10 mrg extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *,
1577 1.10 mrg tree *, enum dr_alignment_support, tree,
1578 1.1 mrg struct loop **);
1579 1.10 mrg extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int,
1580 1.1 mrg gimple_stmt_iterator *);
1581 1.10 mrg extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>);
1582 1.1 mrg extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
1583 1.6 mrg extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
1584 1.6 mrg const char * = NULL);
1585 1.10 mrg extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *,
1586 1.9 mrg tree, tree = NULL_TREE);
1587 1.1 mrg
1588 1.1 mrg /* In tree-vect-loop.c. */
1589 1.1 mrg /* FORNOW: Used in tree-parloops.c. */
1590 1.10 mrg extern stmt_vec_info vect_force_simple_reduction (loop_vec_info, stmt_vec_info,
1591 1.10 mrg bool *, bool);
1592 1.9 mrg /* Used in gimple-loop-interchange.c. */
1593 1.10 mrg extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
1594 1.9 mrg enum tree_code);
1595 1.1 mrg /* Drive for loop analysis stage. */
1596 1.10 mrg extern opt_loop_vec_info vect_analyze_loop (struct loop *,
1597 1.10 mrg loop_vec_info,
1598 1.10 mrg vec_info_shared *);
1599 1.9 mrg extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
1600 1.9 mrg extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
1601 1.9 mrg tree *, bool);
1602 1.9 mrg extern tree vect_halve_mask_nunits (tree);
1603 1.9 mrg extern tree vect_double_mask_nunits (tree);
1604 1.9 mrg extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *,
1605 1.9 mrg unsigned int, tree);
1606 1.9 mrg extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *,
1607 1.9 mrg unsigned int, tree, unsigned int);
1608 1.9 mrg
1609 1.1 mrg /* Drive for loop transformation stage. */
1610 1.8 mrg extern struct loop *vect_transform_loop (loop_vec_info);
1611 1.10 mrg extern opt_loop_vec_info vect_analyze_loop_form (struct loop *,
1612 1.10 mrg vec_info_shared *);
1613 1.10 mrg extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *,
1614 1.10 mrg slp_tree, int, stmt_vec_info *,
1615 1.10 mrg stmt_vector_for_cost *);
1616 1.10 mrg extern bool vectorizable_reduction (stmt_vec_info, gimple_stmt_iterator *,
1617 1.10 mrg stmt_vec_info *, slp_tree, slp_instance,
1618 1.10 mrg stmt_vector_for_cost *);
1619 1.10 mrg extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *,
1620 1.10 mrg stmt_vec_info *, slp_tree,
1621 1.10 mrg stmt_vector_for_cost *);
1622 1.10 mrg extern tree get_initial_def_for_reduction (stmt_vec_info, tree, tree *);
1623 1.9 mrg extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code);
1624 1.5 mrg extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
1625 1.5 mrg stmt_vector_for_cost *,
1626 1.3 mrg stmt_vector_for_cost *,
1627 1.3 mrg stmt_vector_for_cost *);
1628 1.9 mrg extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree);
1629 1.1 mrg
1630 1.1 mrg /* In tree-vect-slp.c. */
1631 1.10 mrg extern void vect_free_slp_instance (slp_instance, bool);
1632 1.5 mrg extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
1633 1.9 mrg gimple_stmt_iterator *, poly_uint64,
1634 1.9 mrg slp_instance, bool, unsigned *);
1635 1.9 mrg extern bool vect_slp_analyze_operations (vec_info *);
1636 1.10 mrg extern void vect_schedule_slp (vec_info *);
1637 1.10 mrg extern opt_result vect_analyze_slp (vec_info *, unsigned);
1638 1.3 mrg extern bool vect_make_slp_decision (loop_vec_info);
1639 1.1 mrg extern void vect_detect_hybrid_slp (loop_vec_info);
1640 1.9 mrg extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *);
1641 1.6 mrg extern bool vect_slp_bb (basic_block);
1642 1.10 mrg extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree);
1643 1.10 mrg extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);
1644 1.9 mrg extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode,
1645 1.9 mrg unsigned int * = NULL,
1646 1.9 mrg tree * = NULL, tree * = NULL);
1647 1.9 mrg extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>,
1648 1.9 mrg unsigned int, vec<tree> &);
1649 1.10 mrg extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info);
1650 1.1 mrg
1651 1.1 mrg /* In tree-vect-patterns.c. */
1652 1.1 mrg /* Pattern recognition functions.
1653 1.1 mrg Additional pattern recognition functions can (and will) be added
1654 1.1 mrg in the future. */
1655 1.6 mrg void vect_pattern_recog (vec_info *);
1656 1.1 mrg
1657 1.1 mrg /* In tree-vectorizer.c. */
1658 1.1 mrg unsigned vectorize_loops (void);
1659 1.8 mrg void vect_free_loop_info_assumptions (struct loop *);
1660 1.1 mrg
1661 1.1 mrg #endif /* GCC_TREE_VECTORIZER_H */
1662