tree-affine.cc revision 1.1 1 1.1 mrg /* Operations with affine combinations of trees.
2 1.1 mrg Copyright (C) 2005-2022 Free Software Foundation, Inc.
3 1.1 mrg
4 1.1 mrg This file is part of GCC.
5 1.1 mrg
6 1.1 mrg GCC is free software; you can redistribute it and/or modify it
7 1.1 mrg under the terms of the GNU General Public License as published by the
8 1.1 mrg Free Software Foundation; either version 3, or (at your option) any
9 1.1 mrg later version.
10 1.1 mrg
11 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT
12 1.1 mrg ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 1.1 mrg for more details.
15 1.1 mrg
16 1.1 mrg You should have received a copy of the GNU General Public License
17 1.1 mrg along with GCC; see the file COPYING3. If not see
18 1.1 mrg <http://www.gnu.org/licenses/>. */
19 1.1 mrg
20 1.1 mrg #include "config.h"
21 1.1 mrg #include "system.h"
22 1.1 mrg #include "coretypes.h"
23 1.1 mrg #include "backend.h"
24 1.1 mrg #include "rtl.h"
25 1.1 mrg #include "tree.h"
26 1.1 mrg #include "gimple.h"
27 1.1 mrg #include "ssa.h"
28 1.1 mrg #include "tree-pretty-print.h"
29 1.1 mrg #include "fold-const.h"
30 1.1 mrg #include "tree-affine.h"
31 1.1 mrg #include "gimplify.h"
32 1.1 mrg #include "dumpfile.h"
33 1.1 mrg #include "cfgexpand.h"
34 1.1 mrg #include "value-query.h"
35 1.1 mrg
36 1.1 mrg /* Extends CST as appropriate for the affine combinations COMB. */
37 1.1 mrg
38 1.1 mrg static widest_int
39 1.1 mrg wide_int_ext_for_comb (const widest_int &cst, tree type)
40 1.1 mrg {
41 1.1 mrg return wi::sext (cst, TYPE_PRECISION (type));
42 1.1 mrg }
43 1.1 mrg
44 1.1 mrg /* Likewise for polynomial offsets. */
45 1.1 mrg
46 1.1 mrg static poly_widest_int
47 1.1 mrg wide_int_ext_for_comb (const poly_widest_int &cst, tree type)
48 1.1 mrg {
49 1.1 mrg return wi::sext (cst, TYPE_PRECISION (type));
50 1.1 mrg }
51 1.1 mrg
52 1.1 mrg /* Initializes affine combination COMB so that its value is zero in TYPE. */
53 1.1 mrg
54 1.1 mrg static void
55 1.1 mrg aff_combination_zero (aff_tree *comb, tree type)
56 1.1 mrg {
57 1.1 mrg int i;
58 1.1 mrg comb->type = type;
59 1.1 mrg comb->offset = 0;
60 1.1 mrg comb->n = 0;
61 1.1 mrg for (i = 0; i < MAX_AFF_ELTS; i++)
62 1.1 mrg comb->elts[i].coef = 0;
63 1.1 mrg comb->rest = NULL_TREE;
64 1.1 mrg }
65 1.1 mrg
66 1.1 mrg /* Sets COMB to CST. */
67 1.1 mrg
68 1.1 mrg void
69 1.1 mrg aff_combination_const (aff_tree *comb, tree type, const poly_widest_int &cst)
70 1.1 mrg {
71 1.1 mrg aff_combination_zero (comb, type);
72 1.1 mrg comb->offset = wide_int_ext_for_comb (cst, comb->type);;
73 1.1 mrg }
74 1.1 mrg
75 1.1 mrg /* Sets COMB to single element ELT. */
76 1.1 mrg
77 1.1 mrg void
78 1.1 mrg aff_combination_elt (aff_tree *comb, tree type, tree elt)
79 1.1 mrg {
80 1.1 mrg aff_combination_zero (comb, type);
81 1.1 mrg
82 1.1 mrg comb->n = 1;
83 1.1 mrg comb->elts[0].val = elt;
84 1.1 mrg comb->elts[0].coef = 1;
85 1.1 mrg }
86 1.1 mrg
87 1.1 mrg /* Scales COMB by SCALE. */
88 1.1 mrg
89 1.1 mrg void
90 1.1 mrg aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
91 1.1 mrg {
92 1.1 mrg unsigned i, j;
93 1.1 mrg
94 1.1 mrg widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
95 1.1 mrg if (scale == 1)
96 1.1 mrg return;
97 1.1 mrg
98 1.1 mrg if (scale == 0)
99 1.1 mrg {
100 1.1 mrg aff_combination_zero (comb, comb->type);
101 1.1 mrg return;
102 1.1 mrg }
103 1.1 mrg
104 1.1 mrg comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb->type);
105 1.1 mrg for (i = 0, j = 0; i < comb->n; i++)
106 1.1 mrg {
107 1.1 mrg widest_int new_coef
108 1.1 mrg = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb->type);
109 1.1 mrg /* A coefficient may become zero due to overflow. Remove the zero
110 1.1 mrg elements. */
111 1.1 mrg if (new_coef == 0)
112 1.1 mrg continue;
113 1.1 mrg comb->elts[j].coef = new_coef;
114 1.1 mrg comb->elts[j].val = comb->elts[i].val;
115 1.1 mrg j++;
116 1.1 mrg }
117 1.1 mrg comb->n = j;
118 1.1 mrg
119 1.1 mrg if (comb->rest)
120 1.1 mrg {
121 1.1 mrg tree type = comb->type;
122 1.1 mrg if (POINTER_TYPE_P (type))
123 1.1 mrg type = sizetype;
124 1.1 mrg if (comb->n < MAX_AFF_ELTS)
125 1.1 mrg {
126 1.1 mrg comb->elts[comb->n].coef = scale;
127 1.1 mrg comb->elts[comb->n].val = comb->rest;
128 1.1 mrg comb->rest = NULL_TREE;
129 1.1 mrg comb->n++;
130 1.1 mrg }
131 1.1 mrg else
132 1.1 mrg comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
133 1.1 mrg wide_int_to_tree (type, scale));
134 1.1 mrg }
135 1.1 mrg }
136 1.1 mrg
137 1.1 mrg /* Adds ELT * SCALE to COMB. */
138 1.1 mrg
139 1.1 mrg void
140 1.1 mrg aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
141 1.1 mrg {
142 1.1 mrg unsigned i;
143 1.1 mrg tree type;
144 1.1 mrg
145 1.1 mrg widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
146 1.1 mrg if (scale == 0)
147 1.1 mrg return;
148 1.1 mrg
149 1.1 mrg for (i = 0; i < comb->n; i++)
150 1.1 mrg if (operand_equal_p (comb->elts[i].val, elt, 0))
151 1.1 mrg {
152 1.1 mrg widest_int new_coef
153 1.1 mrg = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb->type);
154 1.1 mrg if (new_coef != 0)
155 1.1 mrg {
156 1.1 mrg comb->elts[i].coef = new_coef;
157 1.1 mrg return;
158 1.1 mrg }
159 1.1 mrg
160 1.1 mrg comb->n--;
161 1.1 mrg comb->elts[i] = comb->elts[comb->n];
162 1.1 mrg
163 1.1 mrg if (comb->rest)
164 1.1 mrg {
165 1.1 mrg gcc_assert (comb->n == MAX_AFF_ELTS - 1);
166 1.1 mrg comb->elts[comb->n].coef = 1;
167 1.1 mrg comb->elts[comb->n].val = comb->rest;
168 1.1 mrg comb->rest = NULL_TREE;
169 1.1 mrg comb->n++;
170 1.1 mrg }
171 1.1 mrg return;
172 1.1 mrg }
173 1.1 mrg if (comb->n < MAX_AFF_ELTS)
174 1.1 mrg {
175 1.1 mrg comb->elts[comb->n].coef = scale;
176 1.1 mrg comb->elts[comb->n].val = elt;
177 1.1 mrg comb->n++;
178 1.1 mrg return;
179 1.1 mrg }
180 1.1 mrg
181 1.1 mrg type = comb->type;
182 1.1 mrg if (POINTER_TYPE_P (type))
183 1.1 mrg type = sizetype;
184 1.1 mrg
185 1.1 mrg if (scale == 1)
186 1.1 mrg elt = fold_convert (type, elt);
187 1.1 mrg else
188 1.1 mrg elt = fold_build2 (MULT_EXPR, type,
189 1.1 mrg fold_convert (type, elt),
190 1.1 mrg wide_int_to_tree (type, scale));
191 1.1 mrg
192 1.1 mrg if (comb->rest)
193 1.1 mrg comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
194 1.1 mrg elt);
195 1.1 mrg else
196 1.1 mrg comb->rest = elt;
197 1.1 mrg }
198 1.1 mrg
199 1.1 mrg /* Adds CST to C. */
200 1.1 mrg
201 1.1 mrg static void
202 1.1 mrg aff_combination_add_cst (aff_tree *c, const poly_widest_int &cst)
203 1.1 mrg {
204 1.1 mrg c->offset = wide_int_ext_for_comb (c->offset + cst, c->type);
205 1.1 mrg }
206 1.1 mrg
207 1.1 mrg /* Adds COMB2 to COMB1. */
208 1.1 mrg
209 1.1 mrg void
210 1.1 mrg aff_combination_add (aff_tree *comb1, aff_tree *comb2)
211 1.1 mrg {
212 1.1 mrg unsigned i;
213 1.1 mrg
214 1.1 mrg aff_combination_add_cst (comb1, comb2->offset);
215 1.1 mrg for (i = 0; i < comb2->n; i++)
216 1.1 mrg aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
217 1.1 mrg if (comb2->rest)
218 1.1 mrg aff_combination_add_elt (comb1, comb2->rest, 1);
219 1.1 mrg }
220 1.1 mrg
221 1.1 mrg /* Converts affine combination COMB to TYPE. */
222 1.1 mrg
223 1.1 mrg void
224 1.1 mrg aff_combination_convert (aff_tree *comb, tree type)
225 1.1 mrg {
226 1.1 mrg unsigned i, j;
227 1.1 mrg tree comb_type = comb->type;
228 1.1 mrg
229 1.1 mrg if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
230 1.1 mrg {
231 1.1 mrg tree val = fold_convert (type, aff_combination_to_tree (comb));
232 1.1 mrg tree_to_aff_combination (val, type, comb);
233 1.1 mrg return;
234 1.1 mrg }
235 1.1 mrg
236 1.1 mrg comb->type = type;
237 1.1 mrg if (comb->rest && !POINTER_TYPE_P (type))
238 1.1 mrg comb->rest = fold_convert (type, comb->rest);
239 1.1 mrg
240 1.1 mrg if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
241 1.1 mrg return;
242 1.1 mrg
243 1.1 mrg comb->offset = wide_int_ext_for_comb (comb->offset, comb->type);
244 1.1 mrg for (i = j = 0; i < comb->n; i++)
245 1.1 mrg {
246 1.1 mrg if (comb->elts[i].coef == 0)
247 1.1 mrg continue;
248 1.1 mrg comb->elts[j].coef = comb->elts[i].coef;
249 1.1 mrg comb->elts[j].val = fold_convert (type, comb->elts[i].val);
250 1.1 mrg j++;
251 1.1 mrg }
252 1.1 mrg
253 1.1 mrg comb->n = j;
254 1.1 mrg if (comb->n < MAX_AFF_ELTS && comb->rest)
255 1.1 mrg {
256 1.1 mrg comb->elts[comb->n].coef = 1;
257 1.1 mrg comb->elts[comb->n].val = comb->rest;
258 1.1 mrg comb->rest = NULL_TREE;
259 1.1 mrg comb->n++;
260 1.1 mrg }
261 1.1 mrg }
262 1.1 mrg
263 1.1 mrg /* Tries to handle OP0 CODE OP1 as affine combination of parts. Returns
264 1.1 mrg true when that was successful and returns the combination in COMB. */
265 1.1 mrg
266 1.1 mrg static bool
267 1.1 mrg expr_to_aff_combination (aff_tree *comb, tree_code code, tree type,
268 1.1 mrg tree op0, tree op1 = NULL_TREE)
269 1.1 mrg {
270 1.1 mrg aff_tree tmp;
271 1.1 mrg poly_int64 bitpos, bitsize, bytepos;
272 1.1 mrg
273 1.1 mrg switch (code)
274 1.1 mrg {
275 1.1 mrg case POINTER_PLUS_EXPR:
276 1.1 mrg tree_to_aff_combination (op0, type, comb);
277 1.1 mrg tree_to_aff_combination (op1, sizetype, &tmp);
278 1.1 mrg aff_combination_add (comb, &tmp);
279 1.1 mrg return true;
280 1.1 mrg
281 1.1 mrg case PLUS_EXPR:
282 1.1 mrg case MINUS_EXPR:
283 1.1 mrg tree_to_aff_combination (op0, type, comb);
284 1.1 mrg tree_to_aff_combination (op1, type, &tmp);
285 1.1 mrg if (code == MINUS_EXPR)
286 1.1 mrg aff_combination_scale (&tmp, -1);
287 1.1 mrg aff_combination_add (comb, &tmp);
288 1.1 mrg return true;
289 1.1 mrg
290 1.1 mrg case MULT_EXPR:
291 1.1 mrg if (TREE_CODE (op1) != INTEGER_CST)
292 1.1 mrg break;
293 1.1 mrg tree_to_aff_combination (op0, type, comb);
294 1.1 mrg aff_combination_scale (comb, wi::to_widest (op1));
295 1.1 mrg return true;
296 1.1 mrg
297 1.1 mrg case NEGATE_EXPR:
298 1.1 mrg tree_to_aff_combination (op0, type, comb);
299 1.1 mrg aff_combination_scale (comb, -1);
300 1.1 mrg return true;
301 1.1 mrg
302 1.1 mrg case BIT_NOT_EXPR:
303 1.1 mrg /* ~x = -x - 1 */
304 1.1 mrg tree_to_aff_combination (op0, type, comb);
305 1.1 mrg aff_combination_scale (comb, -1);
306 1.1 mrg aff_combination_add_cst (comb, -1);
307 1.1 mrg return true;
308 1.1 mrg
309 1.1 mrg CASE_CONVERT:
310 1.1 mrg {
311 1.1 mrg tree otype = type;
312 1.1 mrg tree inner = op0;
313 1.1 mrg tree itype = TREE_TYPE (inner);
314 1.1 mrg enum tree_code icode = TREE_CODE (inner);
315 1.1 mrg
316 1.1 mrg /* STRIP_NOPS */
317 1.1 mrg if (tree_nop_conversion_p (otype, itype))
318 1.1 mrg {
319 1.1 mrg tree_to_aff_combination (op0, type, comb);
320 1.1 mrg return true;
321 1.1 mrg }
322 1.1 mrg
323 1.1 mrg /* In principle this is a valid folding, but it isn't necessarily
324 1.1 mrg an optimization, so do it here and not in fold_unary. */
325 1.1 mrg if ((icode == PLUS_EXPR || icode == MINUS_EXPR || icode == MULT_EXPR)
326 1.1 mrg && TREE_CODE (itype) == INTEGER_TYPE
327 1.1 mrg && TREE_CODE (otype) == INTEGER_TYPE
328 1.1 mrg && TYPE_PRECISION (otype) > TYPE_PRECISION (itype))
329 1.1 mrg {
330 1.1 mrg tree op0 = TREE_OPERAND (inner, 0), op1 = TREE_OPERAND (inner, 1);
331 1.1 mrg
332 1.1 mrg /* If inner type has undefined overflow behavior, fold conversion
333 1.1 mrg for below two cases:
334 1.1 mrg (T1)(X *+- CST) -> (T1)X *+- (T1)CST
335 1.1 mrg (T1)(X + X) -> (T1)X + (T1)X. */
336 1.1 mrg if (TYPE_OVERFLOW_UNDEFINED (itype)
337 1.1 mrg && (TREE_CODE (op1) == INTEGER_CST
338 1.1 mrg || (icode == PLUS_EXPR && operand_equal_p (op0, op1, 0))))
339 1.1 mrg {
340 1.1 mrg op0 = fold_convert (otype, op0);
341 1.1 mrg op1 = fold_convert (otype, op1);
342 1.1 mrg return expr_to_aff_combination (comb, icode, otype, op0, op1);
343 1.1 mrg }
344 1.1 mrg wide_int minv, maxv;
345 1.1 mrg /* If inner type has wrapping overflow behavior, fold conversion
346 1.1 mrg for below case:
347 1.1 mrg (T1)(X *+- CST) -> (T1)X *+- (T1)CST
348 1.1 mrg if X *+- CST doesn't overflow by range information. */
349 1.1 mrg value_range vr;
350 1.1 mrg if (TYPE_UNSIGNED (itype)
351 1.1 mrg && TYPE_OVERFLOW_WRAPS (itype)
352 1.1 mrg && TREE_CODE (op1) == INTEGER_CST
353 1.1 mrg && get_range_query (cfun)->range_of_expr (vr, op0)
354 1.1 mrg && vr.kind () == VR_RANGE)
355 1.1 mrg {
356 1.1 mrg wide_int minv = vr.lower_bound ();
357 1.1 mrg wide_int maxv = vr.upper_bound ();
358 1.1 mrg wi::overflow_type overflow = wi::OVF_NONE;
359 1.1 mrg signop sign = UNSIGNED;
360 1.1 mrg if (icode == PLUS_EXPR)
361 1.1 mrg wi::add (maxv, wi::to_wide (op1), sign, &overflow);
362 1.1 mrg else if (icode == MULT_EXPR)
363 1.1 mrg wi::mul (maxv, wi::to_wide (op1), sign, &overflow);
364 1.1 mrg else
365 1.1 mrg wi::sub (minv, wi::to_wide (op1), sign, &overflow);
366 1.1 mrg
367 1.1 mrg if (overflow == wi::OVF_NONE)
368 1.1 mrg {
369 1.1 mrg op0 = fold_convert (otype, op0);
370 1.1 mrg op1 = fold_convert (otype, op1);
371 1.1 mrg return expr_to_aff_combination (comb, icode, otype, op0,
372 1.1 mrg op1);
373 1.1 mrg }
374 1.1 mrg }
375 1.1 mrg }
376 1.1 mrg }
377 1.1 mrg break;
378 1.1 mrg
379 1.1 mrg default:;
380 1.1 mrg }
381 1.1 mrg
382 1.1 mrg return false;
383 1.1 mrg }
384 1.1 mrg
385 1.1 mrg /* Splits EXPR into an affine combination of parts. */
386 1.1 mrg
387 1.1 mrg void
388 1.1 mrg tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
389 1.1 mrg {
390 1.1 mrg aff_tree tmp;
391 1.1 mrg enum tree_code code;
392 1.1 mrg tree core, toffset;
393 1.1 mrg poly_int64 bitpos, bitsize, bytepos;
394 1.1 mrg machine_mode mode;
395 1.1 mrg int unsignedp, reversep, volatilep;
396 1.1 mrg
397 1.1 mrg STRIP_NOPS (expr);
398 1.1 mrg
399 1.1 mrg code = TREE_CODE (expr);
400 1.1 mrg switch (code)
401 1.1 mrg {
402 1.1 mrg case POINTER_PLUS_EXPR:
403 1.1 mrg case PLUS_EXPR:
404 1.1 mrg case MINUS_EXPR:
405 1.1 mrg case MULT_EXPR:
406 1.1 mrg if (expr_to_aff_combination (comb, code, type, TREE_OPERAND (expr, 0),
407 1.1 mrg TREE_OPERAND (expr, 1)))
408 1.1 mrg return;
409 1.1 mrg break;
410 1.1 mrg
411 1.1 mrg case NEGATE_EXPR:
412 1.1 mrg case BIT_NOT_EXPR:
413 1.1 mrg if (expr_to_aff_combination (comb, code, type, TREE_OPERAND (expr, 0)))
414 1.1 mrg return;
415 1.1 mrg break;
416 1.1 mrg
417 1.1 mrg CASE_CONVERT:
418 1.1 mrg /* ??? TREE_TYPE (expr) should be equal to type here, but IVOPTS
419 1.1 mrg calls this with not showing an outer widening cast. */
420 1.1 mrg if (expr_to_aff_combination (comb, code,
421 1.1 mrg TREE_TYPE (expr), TREE_OPERAND (expr, 0)))
422 1.1 mrg {
423 1.1 mrg aff_combination_convert (comb, type);
424 1.1 mrg return;
425 1.1 mrg }
426 1.1 mrg break;
427 1.1 mrg
428 1.1 mrg case ADDR_EXPR:
429 1.1 mrg /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
430 1.1 mrg if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
431 1.1 mrg {
432 1.1 mrg expr = TREE_OPERAND (expr, 0);
433 1.1 mrg tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
434 1.1 mrg tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
435 1.1 mrg aff_combination_add (comb, &tmp);
436 1.1 mrg return;
437 1.1 mrg }
438 1.1 mrg core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
439 1.1 mrg &toffset, &mode, &unsignedp, &reversep,
440 1.1 mrg &volatilep);
441 1.1 mrg if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
442 1.1 mrg break;
443 1.1 mrg aff_combination_const (comb, type, bytepos);
444 1.1 mrg if (TREE_CODE (core) == MEM_REF)
445 1.1 mrg {
446 1.1 mrg tree mem_offset = TREE_OPERAND (core, 1);
447 1.1 mrg aff_combination_add_cst (comb, wi::to_poly_widest (mem_offset));
448 1.1 mrg core = TREE_OPERAND (core, 0);
449 1.1 mrg }
450 1.1 mrg else
451 1.1 mrg core = build_fold_addr_expr (core);
452 1.1 mrg
453 1.1 mrg if (TREE_CODE (core) == ADDR_EXPR)
454 1.1 mrg aff_combination_add_elt (comb, core, 1);
455 1.1 mrg else
456 1.1 mrg {
457 1.1 mrg tree_to_aff_combination (core, type, &tmp);
458 1.1 mrg aff_combination_add (comb, &tmp);
459 1.1 mrg }
460 1.1 mrg if (toffset)
461 1.1 mrg {
462 1.1 mrg tree_to_aff_combination (toffset, type, &tmp);
463 1.1 mrg aff_combination_add (comb, &tmp);
464 1.1 mrg }
465 1.1 mrg return;
466 1.1 mrg
467 1.1 mrg default:
468 1.1 mrg {
469 1.1 mrg if (poly_int_tree_p (expr))
470 1.1 mrg {
471 1.1 mrg aff_combination_const (comb, type, wi::to_poly_widest (expr));
472 1.1 mrg return;
473 1.1 mrg }
474 1.1 mrg break;
475 1.1 mrg }
476 1.1 mrg }
477 1.1 mrg
478 1.1 mrg aff_combination_elt (comb, type, expr);
479 1.1 mrg }
480 1.1 mrg
481 1.1 mrg /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
482 1.1 mrg combination COMB. */
483 1.1 mrg
484 1.1 mrg static tree
485 1.1 mrg add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in)
486 1.1 mrg {
487 1.1 mrg enum tree_code code;
488 1.1 mrg
489 1.1 mrg widest_int scale = wide_int_ext_for_comb (scale_in, type);
490 1.1 mrg
491 1.1 mrg elt = fold_convert (type, elt);
492 1.1 mrg if (scale == 1)
493 1.1 mrg {
494 1.1 mrg if (!expr)
495 1.1 mrg return elt;
496 1.1 mrg
497 1.1 mrg return fold_build2 (PLUS_EXPR, type, expr, elt);
498 1.1 mrg }
499 1.1 mrg
500 1.1 mrg if (scale == -1)
501 1.1 mrg {
502 1.1 mrg if (!expr)
503 1.1 mrg return fold_build1 (NEGATE_EXPR, type, elt);
504 1.1 mrg
505 1.1 mrg return fold_build2 (MINUS_EXPR, type, expr, elt);
506 1.1 mrg }
507 1.1 mrg
508 1.1 mrg if (!expr)
509 1.1 mrg return fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
510 1.1 mrg
511 1.1 mrg if (wi::neg_p (scale))
512 1.1 mrg {
513 1.1 mrg code = MINUS_EXPR;
514 1.1 mrg scale = -scale;
515 1.1 mrg }
516 1.1 mrg else
517 1.1 mrg code = PLUS_EXPR;
518 1.1 mrg
519 1.1 mrg elt = fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
520 1.1 mrg return fold_build2 (code, type, expr, elt);
521 1.1 mrg }
522 1.1 mrg
523 1.1 mrg /* Makes tree from the affine combination COMB. */
524 1.1 mrg
525 1.1 mrg tree
526 1.1 mrg aff_combination_to_tree (aff_tree *comb)
527 1.1 mrg {
528 1.1 mrg tree type = comb->type, base = NULL_TREE, expr = NULL_TREE;
529 1.1 mrg unsigned i;
530 1.1 mrg poly_widest_int off;
531 1.1 mrg int sgn;
532 1.1 mrg
533 1.1 mrg gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
534 1.1 mrg
535 1.1 mrg i = 0;
536 1.1 mrg if (POINTER_TYPE_P (type))
537 1.1 mrg {
538 1.1 mrg type = sizetype;
539 1.1 mrg if (comb->n > 0 && comb->elts[0].coef == 1
540 1.1 mrg && POINTER_TYPE_P (TREE_TYPE (comb->elts[0].val)))
541 1.1 mrg {
542 1.1 mrg base = comb->elts[0].val;
543 1.1 mrg ++i;
544 1.1 mrg }
545 1.1 mrg }
546 1.1 mrg
547 1.1 mrg for (; i < comb->n; i++)
548 1.1 mrg expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef);
549 1.1 mrg
550 1.1 mrg if (comb->rest)
551 1.1 mrg expr = add_elt_to_tree (expr, type, comb->rest, 1);
552 1.1 mrg
553 1.1 mrg /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
554 1.1 mrg unsigned. */
555 1.1 mrg if (known_lt (comb->offset, 0))
556 1.1 mrg {
557 1.1 mrg off = -comb->offset;
558 1.1 mrg sgn = -1;
559 1.1 mrg }
560 1.1 mrg else
561 1.1 mrg {
562 1.1 mrg off = comb->offset;
563 1.1 mrg sgn = 1;
564 1.1 mrg }
565 1.1 mrg expr = add_elt_to_tree (expr, type, wide_int_to_tree (type, off), sgn);
566 1.1 mrg
567 1.1 mrg if (base)
568 1.1 mrg return fold_build_pointer_plus (base, expr);
569 1.1 mrg else
570 1.1 mrg return fold_convert (comb->type, expr);
571 1.1 mrg }
572 1.1 mrg
573 1.1 mrg /* Copies the tree elements of COMB to ensure that they are not shared. */
574 1.1 mrg
575 1.1 mrg void
576 1.1 mrg unshare_aff_combination (aff_tree *comb)
577 1.1 mrg {
578 1.1 mrg unsigned i;
579 1.1 mrg
580 1.1 mrg for (i = 0; i < comb->n; i++)
581 1.1 mrg comb->elts[i].val = unshare_expr (comb->elts[i].val);
582 1.1 mrg if (comb->rest)
583 1.1 mrg comb->rest = unshare_expr (comb->rest);
584 1.1 mrg }
585 1.1 mrg
586 1.1 mrg /* Remove M-th element from COMB. */
587 1.1 mrg
588 1.1 mrg void
589 1.1 mrg aff_combination_remove_elt (aff_tree *comb, unsigned m)
590 1.1 mrg {
591 1.1 mrg comb->n--;
592 1.1 mrg if (m <= comb->n)
593 1.1 mrg comb->elts[m] = comb->elts[comb->n];
594 1.1 mrg if (comb->rest)
595 1.1 mrg {
596 1.1 mrg comb->elts[comb->n].coef = 1;
597 1.1 mrg comb->elts[comb->n].val = comb->rest;
598 1.1 mrg comb->rest = NULL_TREE;
599 1.1 mrg comb->n++;
600 1.1 mrg }
601 1.1 mrg }
602 1.1 mrg
603 1.1 mrg /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
604 1.1 mrg C * COEF is added to R. */
605 1.1 mrg
606 1.1 mrg
607 1.1 mrg static void
608 1.1 mrg aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
609 1.1 mrg aff_tree *r)
610 1.1 mrg {
611 1.1 mrg unsigned i;
612 1.1 mrg tree aval, type;
613 1.1 mrg
614 1.1 mrg for (i = 0; i < c->n; i++)
615 1.1 mrg {
616 1.1 mrg aval = c->elts[i].val;
617 1.1 mrg if (val)
618 1.1 mrg {
619 1.1 mrg type = TREE_TYPE (aval);
620 1.1 mrg aval = fold_build2 (MULT_EXPR, type, aval,
621 1.1 mrg fold_convert (type, val));
622 1.1 mrg }
623 1.1 mrg
624 1.1 mrg aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
625 1.1 mrg }
626 1.1 mrg
627 1.1 mrg if (c->rest)
628 1.1 mrg {
629 1.1 mrg aval = c->rest;
630 1.1 mrg if (val)
631 1.1 mrg {
632 1.1 mrg type = TREE_TYPE (aval);
633 1.1 mrg aval = fold_build2 (MULT_EXPR, type, aval,
634 1.1 mrg fold_convert (type, val));
635 1.1 mrg }
636 1.1 mrg
637 1.1 mrg aff_combination_add_elt (r, aval, coef);
638 1.1 mrg }
639 1.1 mrg
640 1.1 mrg if (val)
641 1.1 mrg {
642 1.1 mrg if (c->offset.is_constant ())
643 1.1 mrg /* Access coeffs[0] directly, for efficiency. */
644 1.1 mrg aff_combination_add_elt (r, val, coef * c->offset.coeffs[0]);
645 1.1 mrg else
646 1.1 mrg {
647 1.1 mrg /* c->offset is polynomial, so multiply VAL rather than COEF
648 1.1 mrg by it. */
649 1.1 mrg tree offset = wide_int_to_tree (TREE_TYPE (val), c->offset);
650 1.1 mrg val = fold_build2 (MULT_EXPR, TREE_TYPE (val), val, offset);
651 1.1 mrg aff_combination_add_elt (r, val, coef);
652 1.1 mrg }
653 1.1 mrg }
654 1.1 mrg else
655 1.1 mrg aff_combination_add_cst (r, coef * c->offset);
656 1.1 mrg }
657 1.1 mrg
658 1.1 mrg /* Multiplies C1 by C2, storing the result to R */
659 1.1 mrg
660 1.1 mrg void
661 1.1 mrg aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
662 1.1 mrg {
663 1.1 mrg unsigned i;
664 1.1 mrg gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
665 1.1 mrg
666 1.1 mrg aff_combination_zero (r, c1->type);
667 1.1 mrg
668 1.1 mrg for (i = 0; i < c2->n; i++)
669 1.1 mrg aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
670 1.1 mrg if (c2->rest)
671 1.1 mrg aff_combination_add_product (c1, 1, c2->rest, r);
672 1.1 mrg if (c2->offset.is_constant ())
673 1.1 mrg /* Access coeffs[0] directly, for efficiency. */
674 1.1 mrg aff_combination_add_product (c1, c2->offset.coeffs[0], NULL, r);
675 1.1 mrg else
676 1.1 mrg {
677 1.1 mrg /* c2->offset is polynomial, so do the multiplication in tree form. */
678 1.1 mrg tree offset = wide_int_to_tree (c2->type, c2->offset);
679 1.1 mrg aff_combination_add_product (c1, 1, offset, r);
680 1.1 mrg }
681 1.1 mrg }
682 1.1 mrg
683 1.1 mrg /* Returns the element of COMB whose value is VAL, or NULL if no such
684 1.1 mrg element exists. If IDX is not NULL, it is set to the index of VAL in
685 1.1 mrg COMB. */
686 1.1 mrg
687 1.1 mrg static class aff_comb_elt *
688 1.1 mrg aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
689 1.1 mrg {
690 1.1 mrg unsigned i;
691 1.1 mrg
692 1.1 mrg for (i = 0; i < comb->n; i++)
693 1.1 mrg if (operand_equal_p (comb->elts[i].val, val, 0))
694 1.1 mrg {
695 1.1 mrg if (idx)
696 1.1 mrg *idx = i;
697 1.1 mrg
698 1.1 mrg return &comb->elts[i];
699 1.1 mrg }
700 1.1 mrg
701 1.1 mrg return NULL;
702 1.1 mrg }
703 1.1 mrg
704 1.1 mrg /* Element of the cache that maps ssa name NAME to its expanded form
705 1.1 mrg as an affine expression EXPANSION. */
706 1.1 mrg
707 1.1 mrg class name_expansion
708 1.1 mrg {
709 1.1 mrg public:
710 1.1 mrg aff_tree expansion;
711 1.1 mrg
712 1.1 mrg /* True if the expansion for the name is just being generated. */
713 1.1 mrg unsigned in_progress : 1;
714 1.1 mrg };
715 1.1 mrg
716 1.1 mrg /* Expands SSA names in COMB recursively. CACHE is used to cache the
717 1.1 mrg results. */
718 1.1 mrg
719 1.1 mrg void
720 1.1 mrg aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
721 1.1 mrg hash_map<tree, name_expansion *> **cache)
722 1.1 mrg {
723 1.1 mrg unsigned i;
724 1.1 mrg aff_tree to_add, current, curre;
725 1.1 mrg tree e;
726 1.1 mrg gimple *def;
727 1.1 mrg widest_int scale;
728 1.1 mrg class name_expansion *exp;
729 1.1 mrg
730 1.1 mrg aff_combination_zero (&to_add, comb->type);
731 1.1 mrg for (i = 0; i < comb->n; i++)
732 1.1 mrg {
733 1.1 mrg tree type, name;
734 1.1 mrg enum tree_code code;
735 1.1 mrg
736 1.1 mrg e = comb->elts[i].val;
737 1.1 mrg type = TREE_TYPE (e);
738 1.1 mrg name = e;
739 1.1 mrg /* Look through some conversions. */
740 1.1 mrg if (CONVERT_EXPR_P (e)
741 1.1 mrg && (TYPE_PRECISION (type)
742 1.1 mrg >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
743 1.1 mrg name = TREE_OPERAND (e, 0);
744 1.1 mrg if (TREE_CODE (name) != SSA_NAME)
745 1.1 mrg continue;
746 1.1 mrg def = SSA_NAME_DEF_STMT (name);
747 1.1 mrg if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
748 1.1 mrg continue;
749 1.1 mrg
750 1.1 mrg code = gimple_assign_rhs_code (def);
751 1.1 mrg if (code != SSA_NAME
752 1.1 mrg && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
753 1.1 mrg && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
754 1.1 mrg || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
755 1.1 mrg continue;
756 1.1 mrg
757 1.1 mrg /* We do not know whether the reference retains its value at the
758 1.1 mrg place where the expansion is used. */
759 1.1 mrg if (TREE_CODE_CLASS (code) == tcc_reference)
760 1.1 mrg continue;
761 1.1 mrg
762 1.1 mrg name_expansion **slot = NULL;
763 1.1 mrg if (*cache)
764 1.1 mrg slot = (*cache)->get (name);
765 1.1 mrg exp = slot ? *slot : NULL;
766 1.1 mrg if (!exp)
767 1.1 mrg {
768 1.1 mrg /* Only bother to handle cases tree_to_aff_combination will. */
769 1.1 mrg switch (code)
770 1.1 mrg {
771 1.1 mrg case POINTER_PLUS_EXPR:
772 1.1 mrg case PLUS_EXPR:
773 1.1 mrg case MINUS_EXPR:
774 1.1 mrg case MULT_EXPR:
775 1.1 mrg if (!expr_to_aff_combination (¤t, code, TREE_TYPE (name),
776 1.1 mrg gimple_assign_rhs1 (def),
777 1.1 mrg gimple_assign_rhs2 (def)))
778 1.1 mrg continue;
779 1.1 mrg break;
780 1.1 mrg case NEGATE_EXPR:
781 1.1 mrg case BIT_NOT_EXPR:
782 1.1 mrg if (!expr_to_aff_combination (¤t, code, TREE_TYPE (name),
783 1.1 mrg gimple_assign_rhs1 (def)))
784 1.1 mrg continue;
785 1.1 mrg break;
786 1.1 mrg CASE_CONVERT:
787 1.1 mrg if (!expr_to_aff_combination (¤t, code, TREE_TYPE (name),
788 1.1 mrg gimple_assign_rhs1 (def)))
789 1.1 mrg /* This makes us always expand conversions which we did
790 1.1 mrg in the past and makes gcc.dg/tree-ssa/ivopts-lt-2.c
791 1.1 mrg PASS, eliminating one induction variable in IVOPTs.
792 1.1 mrg ??? But it is really excessive and we should try
793 1.1 mrg harder to do without it. */
794 1.1 mrg aff_combination_elt (¤t, TREE_TYPE (name),
795 1.1 mrg fold_convert (TREE_TYPE (name),
796 1.1 mrg gimple_assign_rhs1 (def)));
797 1.1 mrg break;
798 1.1 mrg case ADDR_EXPR:
799 1.1 mrg case INTEGER_CST:
800 1.1 mrg case POLY_INT_CST:
801 1.1 mrg tree_to_aff_combination (gimple_assign_rhs1 (def),
802 1.1 mrg TREE_TYPE (name), ¤t);
803 1.1 mrg break;
804 1.1 mrg default:
805 1.1 mrg continue;
806 1.1 mrg }
807 1.1 mrg exp = XNEW (class name_expansion);
808 1.1 mrg exp->in_progress = 1;
809 1.1 mrg if (!*cache)
810 1.1 mrg *cache = new hash_map<tree, name_expansion *>;
811 1.1 mrg (*cache)->put (name, exp);
812 1.1 mrg aff_combination_expand (¤t, cache);
813 1.1 mrg exp->expansion = current;
814 1.1 mrg exp->in_progress = 0;
815 1.1 mrg }
816 1.1 mrg else
817 1.1 mrg {
818 1.1 mrg /* Since we follow the definitions in the SSA form, we should not
819 1.1 mrg enter a cycle unless we pass through a phi node. */
820 1.1 mrg gcc_assert (!exp->in_progress);
821 1.1 mrg current = exp->expansion;
822 1.1 mrg }
823 1.1 mrg if (!useless_type_conversion_p (comb->type, current.type))
824 1.1 mrg aff_combination_convert (¤t, comb->type);
825 1.1 mrg
826 1.1 mrg /* Accumulate the new terms to TO_ADD, so that we do not modify
827 1.1 mrg COMB while traversing it; include the term -coef * E, to remove
828 1.1 mrg it from COMB. */
829 1.1 mrg scale = comb->elts[i].coef;
830 1.1 mrg aff_combination_zero (&curre, comb->type);
831 1.1 mrg aff_combination_add_elt (&curre, e, -scale);
832 1.1 mrg aff_combination_scale (¤t, scale);
833 1.1 mrg aff_combination_add (&to_add, ¤t);
834 1.1 mrg aff_combination_add (&to_add, &curre);
835 1.1 mrg }
836 1.1 mrg aff_combination_add (comb, &to_add);
837 1.1 mrg }
838 1.1 mrg
839 1.1 mrg /* Similar to tree_to_aff_combination, but follows SSA name definitions
840 1.1 mrg and expands them recursively. CACHE is used to cache the expansions
841 1.1 mrg of the ssa names, to avoid exponential time complexity for cases
842 1.1 mrg like
843 1.1 mrg
844 1.1 mrg a1 = a0 + a0;
845 1.1 mrg a2 = a1 + a1;
846 1.1 mrg a3 = a2 + a2;
847 1.1 mrg ... */
848 1.1 mrg
849 1.1 mrg void
850 1.1 mrg tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
851 1.1 mrg hash_map<tree, name_expansion *> **cache)
852 1.1 mrg {
853 1.1 mrg tree_to_aff_combination (expr, type, comb);
854 1.1 mrg aff_combination_expand (comb, cache);
855 1.1 mrg }
856 1.1 mrg
857 1.1 mrg /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
858 1.1 mrg hash_map::traverse. */
859 1.1 mrg
860 1.1 mrg bool
861 1.1 mrg free_name_expansion (tree const &, name_expansion **value, void *)
862 1.1 mrg {
863 1.1 mrg free (*value);
864 1.1 mrg return true;
865 1.1 mrg }
866 1.1 mrg
867 1.1 mrg /* Frees memory allocated for the CACHE used by
868 1.1 mrg tree_to_aff_combination_expand. */
869 1.1 mrg
870 1.1 mrg void
871 1.1 mrg free_affine_expand_cache (hash_map<tree, name_expansion *> **cache)
872 1.1 mrg {
873 1.1 mrg if (!*cache)
874 1.1 mrg return;
875 1.1 mrg
876 1.1 mrg (*cache)->traverse<void *, free_name_expansion> (NULL);
877 1.1 mrg delete (*cache);
878 1.1 mrg *cache = NULL;
879 1.1 mrg }
880 1.1 mrg
881 1.1 mrg /* If VAL != CST * DIV for any constant CST, returns false.
882 1.1 mrg Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
883 1.1 mrg and if they are different, returns false. Finally, if neither of these
884 1.1 mrg two cases occur, true is returned, and CST is stored to MULT and MULT_SET
885 1.1 mrg is set to true. */
886 1.1 mrg
887 1.1 mrg static bool
888 1.1 mrg wide_int_constant_multiple_p (const poly_widest_int &val,
889 1.1 mrg const poly_widest_int &div,
890 1.1 mrg bool *mult_set, poly_widest_int *mult)
891 1.1 mrg {
892 1.1 mrg poly_widest_int rem, cst;
893 1.1 mrg
894 1.1 mrg if (known_eq (val, 0))
895 1.1 mrg {
896 1.1 mrg if (*mult_set && maybe_ne (*mult, 0))
897 1.1 mrg return false;
898 1.1 mrg *mult_set = true;
899 1.1 mrg *mult = 0;
900 1.1 mrg return true;
901 1.1 mrg }
902 1.1 mrg
903 1.1 mrg if (maybe_eq (div, 0))
904 1.1 mrg return false;
905 1.1 mrg
906 1.1 mrg if (!multiple_p (val, div, &cst))
907 1.1 mrg return false;
908 1.1 mrg
909 1.1 mrg if (*mult_set && maybe_ne (*mult, cst))
910 1.1 mrg return false;
911 1.1 mrg
912 1.1 mrg *mult_set = true;
913 1.1 mrg *mult = cst;
914 1.1 mrg return true;
915 1.1 mrg }
916 1.1 mrg
917 1.1 mrg /* Returns true if VAL = X * DIV for some constant X. If this is the case,
918 1.1 mrg X is stored to MULT. */
919 1.1 mrg
920 1.1 mrg bool
921 1.1 mrg aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
922 1.1 mrg poly_widest_int *mult)
923 1.1 mrg {
924 1.1 mrg bool mult_set = false;
925 1.1 mrg unsigned i;
926 1.1 mrg
927 1.1 mrg if (val->n == 0 && known_eq (val->offset, 0))
928 1.1 mrg {
929 1.1 mrg *mult = 0;
930 1.1 mrg return true;
931 1.1 mrg }
932 1.1 mrg if (val->n != div->n)
933 1.1 mrg return false;
934 1.1 mrg
935 1.1 mrg if (val->rest || div->rest)
936 1.1 mrg return false;
937 1.1 mrg
938 1.1 mrg if (!wide_int_constant_multiple_p (val->offset, div->offset,
939 1.1 mrg &mult_set, mult))
940 1.1 mrg return false;
941 1.1 mrg
942 1.1 mrg for (i = 0; i < div->n; i++)
943 1.1 mrg {
944 1.1 mrg class aff_comb_elt *elt
945 1.1 mrg = aff_combination_find_elt (val, div->elts[i].val, NULL);
946 1.1 mrg if (!elt)
947 1.1 mrg return false;
948 1.1 mrg if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
949 1.1 mrg &mult_set, mult))
950 1.1 mrg return false;
951 1.1 mrg }
952 1.1 mrg
953 1.1 mrg gcc_assert (mult_set);
954 1.1 mrg return true;
955 1.1 mrg }
956 1.1 mrg
957 1.1 mrg /* Prints the affine VAL to the FILE. */
958 1.1 mrg
959 1.1 mrg static void
960 1.1 mrg print_aff (FILE *file, aff_tree *val)
961 1.1 mrg {
962 1.1 mrg unsigned i;
963 1.1 mrg signop sgn = TYPE_SIGN (val->type);
964 1.1 mrg if (POINTER_TYPE_P (val->type))
965 1.1 mrg sgn = SIGNED;
966 1.1 mrg fprintf (file, "{\n type = ");
967 1.1 mrg print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
968 1.1 mrg fprintf (file, "\n offset = ");
969 1.1 mrg print_dec (val->offset, file, sgn);
970 1.1 mrg if (val->n > 0)
971 1.1 mrg {
972 1.1 mrg fprintf (file, "\n elements = {\n");
973 1.1 mrg for (i = 0; i < val->n; i++)
974 1.1 mrg {
975 1.1 mrg fprintf (file, " [%d] = ", i);
976 1.1 mrg print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
977 1.1 mrg
978 1.1 mrg fprintf (file, " * ");
979 1.1 mrg print_dec (val->elts[i].coef, file, sgn);
980 1.1 mrg if (i != val->n - 1)
981 1.1 mrg fprintf (file, ", \n");
982 1.1 mrg }
983 1.1 mrg fprintf (file, "\n }");
984 1.1 mrg }
985 1.1 mrg if (val->rest)
986 1.1 mrg {
987 1.1 mrg fprintf (file, "\n rest = ");
988 1.1 mrg print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
989 1.1 mrg }
990 1.1 mrg fprintf (file, "\n}");
991 1.1 mrg }
992 1.1 mrg
993 1.1 mrg /* Prints the affine VAL to the standard error, used for debugging. */
994 1.1 mrg
995 1.1 mrg DEBUG_FUNCTION void
996 1.1 mrg debug_aff (aff_tree *val)
997 1.1 mrg {
998 1.1 mrg print_aff (stderr, val);
999 1.1 mrg fprintf (stderr, "\n");
1000 1.1 mrg }
1001 1.1 mrg
1002 1.1 mrg /* Computes address of the reference REF in ADDR. The size of the accessed
1003 1.1 mrg location is stored to SIZE. Returns the ultimate containing object to
1004 1.1 mrg which REF refers. */
1005 1.1 mrg
1006 1.1 mrg tree
1007 1.1 mrg get_inner_reference_aff (tree ref, aff_tree *addr, poly_widest_int *size)
1008 1.1 mrg {
1009 1.1 mrg poly_int64 bitsize, bitpos;
1010 1.1 mrg tree toff;
1011 1.1 mrg machine_mode mode;
1012 1.1 mrg int uns, rev, vol;
1013 1.1 mrg aff_tree tmp;
1014 1.1 mrg tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
1015 1.1 mrg &uns, &rev, &vol);
1016 1.1 mrg tree base_addr = build_fold_addr_expr (base);
1017 1.1 mrg
1018 1.1 mrg /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
1019 1.1 mrg
1020 1.1 mrg tree_to_aff_combination (base_addr, sizetype, addr);
1021 1.1 mrg
1022 1.1 mrg if (toff)
1023 1.1 mrg {
1024 1.1 mrg tree_to_aff_combination (toff, sizetype, &tmp);
1025 1.1 mrg aff_combination_add (addr, &tmp);
1026 1.1 mrg }
1027 1.1 mrg
1028 1.1 mrg aff_combination_const (&tmp, sizetype, bits_to_bytes_round_down (bitpos));
1029 1.1 mrg aff_combination_add (addr, &tmp);
1030 1.1 mrg
1031 1.1 mrg *size = bits_to_bytes_round_up (bitsize);
1032 1.1 mrg
1033 1.1 mrg return base;
1034 1.1 mrg }
1035 1.1 mrg
1036 1.1 mrg /* Returns true if a region of size SIZE1 at position 0 and a region of
1037 1.1 mrg size SIZE2 at position DIFF cannot overlap. */
1038 1.1 mrg
1039 1.1 mrg bool
1040 1.1 mrg aff_comb_cannot_overlap_p (aff_tree *diff, const poly_widest_int &size1,
1041 1.1 mrg const poly_widest_int &size2)
1042 1.1 mrg {
1043 1.1 mrg /* Unless the difference is a constant, we fail. */
1044 1.1 mrg if (diff->n != 0)
1045 1.1 mrg return false;
1046 1.1 mrg
1047 1.1 mrg if (!ordered_p (diff->offset, 0))
1048 1.1 mrg return false;
1049 1.1 mrg
1050 1.1 mrg if (maybe_lt (diff->offset, 0))
1051 1.1 mrg {
1052 1.1 mrg /* The second object is before the first one, we succeed if the last
1053 1.1 mrg element of the second object is before the start of the first one. */
1054 1.1 mrg return known_le (diff->offset + size2, 0);
1055 1.1 mrg }
1056 1.1 mrg else
1057 1.1 mrg {
1058 1.1 mrg /* We succeed if the second object starts after the first one ends. */
1059 1.1 mrg return known_le (size1, diff->offset);
1060 1.1 mrg }
1061 1.1 mrg }
1062 1.1 mrg
1063