stor-layout.cc revision 1.1.1.1 1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "function.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "stringpool.h"
31 #include "regs.h"
32 #include "emit-rtl.h"
33 #include "cgraph.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "print-tree.h"
39 #include "langhooks.h"
40 #include "tree-inline.h"
41 #include "dumpfile.h"
42 #include "gimplify.h"
43 #include "attribs.h"
44 #include "debug.h"
45 #include "calls.h"
46
47 /* Data type for the expressions representing sizes of data types.
48 It is the first integer type laid out. */
49 tree sizetype_tab[(int) stk_type_kind_last];
50
51 /* If nonzero, this is an upper limit on alignment of structure fields.
52 The value is measured in bits. */
53 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
54
55 static tree self_referential_size (tree);
56 static void finalize_record_size (record_layout_info);
57 static void finalize_type_size (tree);
58 static void place_union_field (record_layout_info, tree);
59 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
60 HOST_WIDE_INT, tree);
61 extern void debug_rli (record_layout_info);
62
63 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
65 to serve as the actual size-expression for a type or decl. */
66
67 tree
68 variable_size (tree size)
69 {
70 /* Obviously. */
71 if (TREE_CONSTANT (size))
72 return size;
73
74 /* If the size is self-referential, we can't make a SAVE_EXPR (see
75 save_expr for the rationale). But we can do something else. */
76 if (CONTAINS_PLACEHOLDER_P (size))
77 return self_referential_size (size);
78
79 /* If we are in the global binding level, we can't make a SAVE_EXPR
80 since it may end up being shared across functions, so it is up
81 to the front-end to deal with this case. */
82 if (lang_hooks.decls.global_bindings_p ())
83 return size;
84
85 return save_expr (size);
86 }
87
88 /* An array of functions used for self-referential size computation. */
89 static GTY(()) vec<tree, va_gc> *size_functions;
90
91 /* Return true if T is a self-referential component reference. */
92
93 static bool
94 self_referential_component_ref_p (tree t)
95 {
96 if (TREE_CODE (t) != COMPONENT_REF)
97 return false;
98
99 while (REFERENCE_CLASS_P (t))
100 t = TREE_OPERAND (t, 0);
101
102 return (TREE_CODE (t) == PLACEHOLDER_EXPR);
103 }
104
105 /* Similar to copy_tree_r but do not copy component references involving
106 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
107 and substituted in substitute_in_expr. */
108
109 static tree
110 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
111 {
112 enum tree_code code = TREE_CODE (*tp);
113
114 /* Stop at types, decls, constants like copy_tree_r. */
115 if (TREE_CODE_CLASS (code) == tcc_type
116 || TREE_CODE_CLASS (code) == tcc_declaration
117 || TREE_CODE_CLASS (code) == tcc_constant)
118 {
119 *walk_subtrees = 0;
120 return NULL_TREE;
121 }
122
123 /* This is the pattern built in ada/make_aligning_type. */
124 else if (code == ADDR_EXPR
125 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
126 {
127 *walk_subtrees = 0;
128 return NULL_TREE;
129 }
130
131 /* Default case: the component reference. */
132 else if (self_referential_component_ref_p (*tp))
133 {
134 *walk_subtrees = 0;
135 return NULL_TREE;
136 }
137
138 /* We're not supposed to have them in self-referential size trees
139 because we wouldn't properly control when they are evaluated.
140 However, not creating superfluous SAVE_EXPRs requires accurate
141 tracking of readonly-ness all the way down to here, which we
142 cannot always guarantee in practice. So punt in this case. */
143 else if (code == SAVE_EXPR)
144 return error_mark_node;
145
146 else if (code == STATEMENT_LIST)
147 gcc_unreachable ();
148
149 return copy_tree_r (tp, walk_subtrees, data);
150 }
151
152 /* Given a SIZE expression that is self-referential, return an equivalent
153 expression to serve as the actual size expression for a type. */
154
155 static tree
156 self_referential_size (tree size)
157 {
158 static unsigned HOST_WIDE_INT fnno = 0;
159 vec<tree> self_refs = vNULL;
160 tree param_type_list = NULL, param_decl_list = NULL;
161 tree t, ref, return_type, fntype, fnname, fndecl;
162 unsigned int i;
163 char buf[128];
164 vec<tree, va_gc> *args = NULL;
165
166 /* Do not factor out simple operations. */
167 t = skip_simple_constant_arithmetic (size);
168 if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t))
169 return size;
170
171 /* Collect the list of self-references in the expression. */
172 find_placeholder_in_expr (size, &self_refs);
173 gcc_assert (self_refs.length () > 0);
174
175 /* Obtain a private copy of the expression. */
176 t = size;
177 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
178 return size;
179 size = t;
180
181 /* Build the parameter and argument lists in parallel; also
182 substitute the former for the latter in the expression. */
183 vec_alloc (args, self_refs.length ());
184 FOR_EACH_VEC_ELT (self_refs, i, ref)
185 {
186 tree subst, param_name, param_type, param_decl;
187
188 if (DECL_P (ref))
189 {
190 /* We shouldn't have true variables here. */
191 gcc_assert (TREE_READONLY (ref));
192 subst = ref;
193 }
194 /* This is the pattern built in ada/make_aligning_type. */
195 else if (TREE_CODE (ref) == ADDR_EXPR)
196 subst = ref;
197 /* Default case: the component reference. */
198 else
199 subst = TREE_OPERAND (ref, 1);
200
201 sprintf (buf, "p%d", i);
202 param_name = get_identifier (buf);
203 param_type = TREE_TYPE (ref);
204 param_decl
205 = build_decl (input_location, PARM_DECL, param_name, param_type);
206 DECL_ARG_TYPE (param_decl) = param_type;
207 DECL_ARTIFICIAL (param_decl) = 1;
208 TREE_READONLY (param_decl) = 1;
209
210 size = substitute_in_expr (size, subst, param_decl);
211
212 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
213 param_decl_list = chainon (param_decl, param_decl_list);
214 args->quick_push (ref);
215 }
216
217 self_refs.release ();
218
219 /* Append 'void' to indicate that the number of parameters is fixed. */
220 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
221
222 /* The 3 lists have been created in reverse order. */
223 param_type_list = nreverse (param_type_list);
224 param_decl_list = nreverse (param_decl_list);
225
226 /* Build the function type. */
227 return_type = TREE_TYPE (size);
228 fntype = build_function_type (return_type, param_type_list);
229
230 /* Build the function declaration. */
231 sprintf (buf, "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
232 fnname = get_file_function_name (buf);
233 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
234 for (t = param_decl_list; t; t = DECL_CHAIN (t))
235 DECL_CONTEXT (t) = fndecl;
236 DECL_ARGUMENTS (fndecl) = param_decl_list;
237 DECL_RESULT (fndecl)
238 = build_decl (input_location, RESULT_DECL, 0, return_type);
239 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
240
241 /* The function has been created by the compiler and we don't
242 want to emit debug info for it. */
243 DECL_ARTIFICIAL (fndecl) = 1;
244 DECL_IGNORED_P (fndecl) = 1;
245
246 /* It is supposed to be "const" and never throw. */
247 TREE_READONLY (fndecl) = 1;
248 TREE_NOTHROW (fndecl) = 1;
249
250 /* We want it to be inlined when this is deemed profitable, as
251 well as discarded if every call has been integrated. */
252 DECL_DECLARED_INLINE_P (fndecl) = 1;
253
254 /* It is made up of a unique return statement. */
255 DECL_INITIAL (fndecl) = make_node (BLOCK);
256 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
257 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
258 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
259 TREE_STATIC (fndecl) = 1;
260
261 /* Put it onto the list of size functions. */
262 vec_safe_push (size_functions, fndecl);
263
264 /* Replace the original expression with a call to the size function. */
265 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
266 }
267
268 /* Take, queue and compile all the size functions. It is essential that
269 the size functions be gimplified at the very end of the compilation
270 in order to guarantee transparent handling of self-referential sizes.
271 Otherwise the GENERIC inliner would not be able to inline them back
272 at each of their call sites, thus creating artificial non-constant
273 size expressions which would trigger nasty problems later on. */
274
275 void
276 finalize_size_functions (void)
277 {
278 unsigned int i;
279 tree fndecl;
280
281 for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
282 {
283 allocate_struct_function (fndecl, false);
284 set_cfun (NULL);
285 dump_function (TDI_original, fndecl);
286
287 /* As these functions are used to describe the layout of variable-length
288 structures, debug info generation needs their implementation. */
289 debug_hooks->size_function (fndecl);
290 gimplify_function_tree (fndecl);
291 cgraph_node::finalize_function (fndecl, false);
292 }
293
294 vec_free (size_functions);
295 }
296
297 /* Return a machine mode of class MCLASS with SIZE bits of precision,
299 if one exists. The mode may have padding bits as well the SIZE
300 value bits. If LIMIT is nonzero, disregard modes wider than
301 MAX_FIXED_MODE_SIZE. */
302
303 opt_machine_mode
304 mode_for_size (poly_uint64 size, enum mode_class mclass, int limit)
305 {
306 machine_mode mode;
307 int i;
308
309 if (limit && maybe_gt (size, (unsigned int) MAX_FIXED_MODE_SIZE))
310 return opt_machine_mode ();
311
312 /* Get the first mode which has this size, in the specified class. */
313 FOR_EACH_MODE_IN_CLASS (mode, mclass)
314 if (known_eq (GET_MODE_PRECISION (mode), size))
315 return mode;
316
317 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
318 for (i = 0; i < NUM_INT_N_ENTS; i ++)
319 if (known_eq (int_n_data[i].bitsize, size)
320 && int_n_enabled_p[i])
321 return int_n_data[i].m;
322
323 return opt_machine_mode ();
324 }
325
326 /* Similar, except passed a tree node. */
327
328 opt_machine_mode
329 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
330 {
331 unsigned HOST_WIDE_INT uhwi;
332 unsigned int ui;
333
334 if (!tree_fits_uhwi_p (size))
335 return opt_machine_mode ();
336 uhwi = tree_to_uhwi (size);
337 ui = uhwi;
338 if (uhwi != ui)
339 return opt_machine_mode ();
340 return mode_for_size (ui, mclass, limit);
341 }
342
343 /* Return the narrowest mode of class MCLASS that contains at least
344 SIZE bits. Abort if no such mode exists. */
345
346 machine_mode
347 smallest_mode_for_size (poly_uint64 size, enum mode_class mclass)
348 {
349 machine_mode mode = VOIDmode;
350 int i;
351
352 /* Get the first mode which has at least this size, in the
353 specified class. */
354 FOR_EACH_MODE_IN_CLASS (mode, mclass)
355 if (known_ge (GET_MODE_PRECISION (mode), size))
356 break;
357
358 gcc_assert (mode != VOIDmode);
359
360 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
361 for (i = 0; i < NUM_INT_N_ENTS; i ++)
362 if (known_ge (int_n_data[i].bitsize, size)
363 && known_lt (int_n_data[i].bitsize, GET_MODE_PRECISION (mode))
364 && int_n_enabled_p[i])
365 mode = int_n_data[i].m;
366
367 return mode;
368 }
369
370 /* Return an integer mode of exactly the same size as MODE, if one exists. */
371
372 opt_scalar_int_mode
373 int_mode_for_mode (machine_mode mode)
374 {
375 switch (GET_MODE_CLASS (mode))
376 {
377 case MODE_INT:
378 case MODE_PARTIAL_INT:
379 return as_a <scalar_int_mode> (mode);
380
381 case MODE_COMPLEX_INT:
382 case MODE_COMPLEX_FLOAT:
383 case MODE_FLOAT:
384 case MODE_DECIMAL_FLOAT:
385 case MODE_FRACT:
386 case MODE_ACCUM:
387 case MODE_UFRACT:
388 case MODE_UACCUM:
389 case MODE_VECTOR_BOOL:
390 case MODE_VECTOR_INT:
391 case MODE_VECTOR_FLOAT:
392 case MODE_VECTOR_FRACT:
393 case MODE_VECTOR_ACCUM:
394 case MODE_VECTOR_UFRACT:
395 case MODE_VECTOR_UACCUM:
396 return int_mode_for_size (GET_MODE_BITSIZE (mode), 0);
397
398 case MODE_OPAQUE:
399 return opt_scalar_int_mode ();
400
401 case MODE_RANDOM:
402 if (mode == BLKmode)
403 return opt_scalar_int_mode ();
404
405 /* fall through */
406
407 case MODE_CC:
408 default:
409 gcc_unreachable ();
410 }
411 }
412
413 /* Find a mode that can be used for efficient bitwise operations on MODE,
414 if one exists. */
415
416 opt_machine_mode
417 bitwise_mode_for_mode (machine_mode mode)
418 {
419 /* Quick exit if we already have a suitable mode. */
420 scalar_int_mode int_mode;
421 if (is_a <scalar_int_mode> (mode, &int_mode)
422 && GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE)
423 return int_mode;
424
425 /* Reuse the sanity checks from int_mode_for_mode. */
426 gcc_checking_assert ((int_mode_for_mode (mode), true));
427
428 poly_int64 bitsize = GET_MODE_BITSIZE (mode);
429
430 /* Try to replace complex modes with complex modes. In general we
431 expect both components to be processed independently, so we only
432 care whether there is a register for the inner mode. */
433 if (COMPLEX_MODE_P (mode))
434 {
435 machine_mode trial = mode;
436 if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT
437 || mode_for_size (bitsize, MODE_COMPLEX_INT, false).exists (&trial))
438 && have_regs_of_mode[GET_MODE_INNER (trial)])
439 return trial;
440 }
441
442 /* Try to replace vector modes with vector modes. Also try using vector
443 modes if an integer mode would be too big. */
444 if (VECTOR_MODE_P (mode)
445 || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE))
446 {
447 machine_mode trial = mode;
448 if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
449 || mode_for_size (bitsize, MODE_VECTOR_INT, 0).exists (&trial))
450 && have_regs_of_mode[trial]
451 && targetm.vector_mode_supported_p (trial))
452 return trial;
453 }
454
455 /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */
456 return mode_for_size (bitsize, MODE_INT, true);
457 }
458
459 /* Find a type that can be used for efficient bitwise operations on MODE.
460 Return null if no such mode exists. */
461
462 tree
463 bitwise_type_for_mode (machine_mode mode)
464 {
465 if (!bitwise_mode_for_mode (mode).exists (&mode))
466 return NULL_TREE;
467
468 unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode);
469 tree inner_type = build_nonstandard_integer_type (inner_size, true);
470
471 if (VECTOR_MODE_P (mode))
472 return build_vector_type_for_mode (inner_type, mode);
473
474 if (COMPLEX_MODE_P (mode))
475 return build_complex_type (inner_type);
476
477 gcc_checking_assert (GET_MODE_INNER (mode) == mode);
478 return inner_type;
479 }
480
481 /* Find a mode that is suitable for representing a vector with NUNITS
482 elements of mode INNERMODE, if one exists. The returned mode can be
483 either an integer mode or a vector mode. */
484
485 opt_machine_mode
486 mode_for_vector (scalar_mode innermode, poly_uint64 nunits)
487 {
488 machine_mode mode;
489
490 /* First, look for a supported vector type. */
491 if (SCALAR_FLOAT_MODE_P (innermode))
492 mode = MIN_MODE_VECTOR_FLOAT;
493 else if (SCALAR_FRACT_MODE_P (innermode))
494 mode = MIN_MODE_VECTOR_FRACT;
495 else if (SCALAR_UFRACT_MODE_P (innermode))
496 mode = MIN_MODE_VECTOR_UFRACT;
497 else if (SCALAR_ACCUM_MODE_P (innermode))
498 mode = MIN_MODE_VECTOR_ACCUM;
499 else if (SCALAR_UACCUM_MODE_P (innermode))
500 mode = MIN_MODE_VECTOR_UACCUM;
501 else
502 mode = MIN_MODE_VECTOR_INT;
503
504 /* Do not check vector_mode_supported_p here. We'll do that
505 later in vector_type_mode. */
506 FOR_EACH_MODE_FROM (mode, mode)
507 if (known_eq (GET_MODE_NUNITS (mode), nunits)
508 && GET_MODE_INNER (mode) == innermode)
509 return mode;
510
511 /* For integers, try mapping it to a same-sized scalar mode. */
512 if (GET_MODE_CLASS (innermode) == MODE_INT)
513 {
514 poly_uint64 nbits = nunits * GET_MODE_BITSIZE (innermode);
515 if (int_mode_for_size (nbits, 0).exists (&mode)
516 && have_regs_of_mode[mode])
517 return mode;
518 }
519
520 return opt_machine_mode ();
521 }
522
523 /* If a piece of code is using vector mode VECTOR_MODE and also wants
524 to operate on elements of mode ELEMENT_MODE, return the vector mode
525 it should use for those elements. If NUNITS is nonzero, ensure that
526 the mode has exactly NUNITS elements, otherwise pick whichever vector
527 size pairs the most naturally with VECTOR_MODE; this may mean choosing
528 a mode with a different size and/or number of elements, depending on
529 what the target prefers. Return an empty opt_machine_mode if there
530 is no supported vector mode with the required properties.
531
532 Unlike mode_for_vector. any returned mode is guaranteed to satisfy
533 both VECTOR_MODE_P and targetm.vector_mode_supported_p. */
534
535 opt_machine_mode
536 related_vector_mode (machine_mode vector_mode, scalar_mode element_mode,
537 poly_uint64 nunits)
538 {
539 gcc_assert (VECTOR_MODE_P (vector_mode));
540 return targetm.vectorize.related_mode (vector_mode, element_mode, nunits);
541 }
542
543 /* If a piece of code is using vector mode VECTOR_MODE and also wants
544 to operate on integer vectors with the same element size and number
545 of elements, return the vector mode it should use. Return an empty
546 opt_machine_mode if there is no supported vector mode with the
547 required properties.
548
549 Unlike mode_for_vector. any returned mode is guaranteed to satisfy
550 both VECTOR_MODE_P and targetm.vector_mode_supported_p. */
551
552 opt_machine_mode
553 related_int_vector_mode (machine_mode vector_mode)
554 {
555 gcc_assert (VECTOR_MODE_P (vector_mode));
556 scalar_int_mode int_mode;
557 if (int_mode_for_mode (GET_MODE_INNER (vector_mode)).exists (&int_mode))
558 return related_vector_mode (vector_mode, int_mode,
559 GET_MODE_NUNITS (vector_mode));
560 return opt_machine_mode ();
561 }
562
563 /* Return the alignment of MODE. This will be bounded by 1 and
564 BIGGEST_ALIGNMENT. */
565
566 unsigned int
567 get_mode_alignment (machine_mode mode)
568 {
569 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
570 }
571
572 /* Return the natural mode of an array, given that it is SIZE bytes in
573 total and has elements of type ELEM_TYPE. */
574
575 static machine_mode
576 mode_for_array (tree elem_type, tree size)
577 {
578 tree elem_size;
579 poly_uint64 int_size, int_elem_size;
580 unsigned HOST_WIDE_INT num_elems;
581 bool limit_p;
582
583 /* One-element arrays get the component type's mode. */
584 elem_size = TYPE_SIZE (elem_type);
585 if (simple_cst_equal (size, elem_size))
586 return TYPE_MODE (elem_type);
587
588 limit_p = true;
589 if (poly_int_tree_p (size, &int_size)
590 && poly_int_tree_p (elem_size, &int_elem_size)
591 && maybe_ne (int_elem_size, 0U)
592 && constant_multiple_p (int_size, int_elem_size, &num_elems))
593 {
594 machine_mode elem_mode = TYPE_MODE (elem_type);
595 machine_mode mode;
596 if (targetm.array_mode (elem_mode, num_elems).exists (&mode))
597 return mode;
598 if (targetm.array_mode_supported_p (elem_mode, num_elems))
599 limit_p = false;
600 }
601 return mode_for_size_tree (size, MODE_INT, limit_p).else_blk ();
602 }
603
604 /* Subroutine of layout_decl: Force alignment required for the data type.
606 But if the decl itself wants greater alignment, don't override that. */
607
608 static inline void
609 do_type_align (tree type, tree decl)
610 {
611 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
612 {
613 SET_DECL_ALIGN (decl, TYPE_ALIGN (type));
614 if (TREE_CODE (decl) == FIELD_DECL)
615 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
616 }
617 if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl))
618 SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type));
619 }
620
621 /* Set the size, mode and alignment of a ..._DECL node.
622 TYPE_DECL does need this for C++.
623 Note that LABEL_DECL and CONST_DECL nodes do not need this,
624 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
625 Don't call layout_decl for them.
626
627 KNOWN_ALIGN is the amount of alignment we can assume this
628 decl has with no special effort. It is relevant only for FIELD_DECLs
629 and depends on the previous fields.
630 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
631 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
632 the record will be aligned to suit. */
633
634 void
635 layout_decl (tree decl, unsigned int known_align)
636 {
637 tree type = TREE_TYPE (decl);
638 enum tree_code code = TREE_CODE (decl);
639 rtx rtl = NULL_RTX;
640 location_t loc = DECL_SOURCE_LOCATION (decl);
641
642 if (code == CONST_DECL)
643 return;
644
645 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
646 || code == TYPE_DECL || code == FIELD_DECL);
647
648 rtl = DECL_RTL_IF_SET (decl);
649
650 if (type == error_mark_node)
651 type = void_type_node;
652
653 /* Usually the size and mode come from the data type without change,
654 however, the front-end may set the explicit width of the field, so its
655 size may not be the same as the size of its type. This happens with
656 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
657 also happens with other fields. For example, the C++ front-end creates
658 zero-sized fields corresponding to empty base classes, and depends on
659 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
660 size in bytes from the size in bits. If we have already set the mode,
661 don't set it again since we can be called twice for FIELD_DECLs. */
662
663 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
664 if (DECL_MODE (decl) == VOIDmode)
665 SET_DECL_MODE (decl, TYPE_MODE (type));
666
667 if (DECL_SIZE (decl) == 0)
668 {
669 DECL_SIZE (decl) = TYPE_SIZE (type);
670 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
671 }
672 else if (DECL_SIZE_UNIT (decl) == 0)
673 DECL_SIZE_UNIT (decl)
674 = fold_convert_loc (loc, sizetype,
675 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
676 bitsize_unit_node));
677
678 if (code != FIELD_DECL)
679 /* For non-fields, update the alignment from the type. */
680 do_type_align (type, decl);
681 else
682 /* For fields, it's a bit more complicated... */
683 {
684 bool old_user_align = DECL_USER_ALIGN (decl);
685 bool zero_bitfield = false;
686 bool packed_p = DECL_PACKED (decl);
687 unsigned int mfa;
688
689 if (DECL_BIT_FIELD (decl))
690 {
691 DECL_BIT_FIELD_TYPE (decl) = type;
692
693 /* A zero-length bit-field affects the alignment of the next
694 field. In essence such bit-fields are not influenced by
695 any packing due to #pragma pack or attribute packed. */
696 if (integer_zerop (DECL_SIZE (decl))
697 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
698 {
699 zero_bitfield = true;
700 packed_p = false;
701 if (PCC_BITFIELD_TYPE_MATTERS)
702 do_type_align (type, decl);
703 else
704 {
705 #ifdef EMPTY_FIELD_BOUNDARY
706 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
707 {
708 SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY);
709 DECL_USER_ALIGN (decl) = 0;
710 }
711 #endif
712 }
713 }
714
715 /* See if we can use an ordinary integer mode for a bit-field.
716 Conditions are: a fixed size that is correct for another mode,
717 occupying a complete byte or bytes on proper boundary. */
718 if (TYPE_SIZE (type) != 0
719 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
720 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
721 {
722 machine_mode xmode;
723 if (mode_for_size_tree (DECL_SIZE (decl),
724 MODE_INT, 1).exists (&xmode))
725 {
726 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
727 if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
728 && (known_align == 0 || known_align >= xalign))
729 {
730 SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl)));
731 SET_DECL_MODE (decl, xmode);
732 DECL_BIT_FIELD (decl) = 0;
733 }
734 }
735 }
736
737 /* Turn off DECL_BIT_FIELD if we won't need it set. */
738 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
739 && known_align >= TYPE_ALIGN (type)
740 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
741 DECL_BIT_FIELD (decl) = 0;
742 }
743 else if (packed_p && DECL_USER_ALIGN (decl))
744 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
745 round up; we'll reduce it again below. We want packing to
746 supersede USER_ALIGN inherited from the type, but defer to
747 alignment explicitly specified on the field decl. */;
748 else
749 do_type_align (type, decl);
750
751 /* If the field is packed and not explicitly aligned, give it the
752 minimum alignment. Note that do_type_align may set
753 DECL_USER_ALIGN, so we need to check old_user_align instead. */
754 if (packed_p
755 && !old_user_align)
756 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT));
757
758 if (! packed_p && ! DECL_USER_ALIGN (decl))
759 {
760 /* Some targets (i.e. i386, VMS) limit struct field alignment
761 to a lower boundary than alignment of variables unless
762 it was overridden by attribute aligned. */
763 #ifdef BIGGEST_FIELD_ALIGNMENT
764 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl),
765 (unsigned) BIGGEST_FIELD_ALIGNMENT));
766 #endif
767 #ifdef ADJUST_FIELD_ALIGN
768 SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl),
769 DECL_ALIGN (decl)));
770 #endif
771 }
772
773 if (zero_bitfield)
774 mfa = initial_max_fld_align * BITS_PER_UNIT;
775 else
776 mfa = maximum_field_alignment;
777 /* Should this be controlled by DECL_USER_ALIGN, too? */
778 if (mfa != 0)
779 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa));
780 }
781
782 /* Evaluate nonconstant size only once, either now or as soon as safe. */
783 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
784 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
785 if (DECL_SIZE_UNIT (decl) != 0
786 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
787 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
788
789 /* If requested, warn about definitions of large data objects. */
790 if ((code == PARM_DECL || (code == VAR_DECL && !DECL_NONLOCAL_FRAME (decl)))
791 && !DECL_EXTERNAL (decl))
792 {
793 tree size = DECL_SIZE_UNIT (decl);
794
795 if (size != 0 && TREE_CODE (size) == INTEGER_CST)
796 {
797 /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated
798 as if PTRDIFF_MAX had been specified, with the value
799 being that on the target rather than the host. */
800 unsigned HOST_WIDE_INT max_size = warn_larger_than_size;
801 if (max_size == HOST_WIDE_INT_MAX)
802 max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
803
804 if (compare_tree_int (size, max_size) > 0)
805 warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds "
806 "maximum object size %wu",
807 decl, size, max_size);
808 }
809 }
810
811 /* If the RTL was already set, update its mode and mem attributes. */
812 if (rtl)
813 {
814 PUT_MODE (rtl, DECL_MODE (decl));
815 SET_DECL_RTL (decl, 0);
816 if (MEM_P (rtl))
817 set_mem_attributes (rtl, decl, 1);
818 SET_DECL_RTL (decl, rtl);
819 }
820 }
821
822 /* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the
823 results of a previous call to layout_decl and calls it again. */
824
825 void
826 relayout_decl (tree decl)
827 {
828 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
829 SET_DECL_MODE (decl, VOIDmode);
830 if (!DECL_USER_ALIGN (decl))
831 SET_DECL_ALIGN (decl, 0);
832 if (DECL_RTL_SET_P (decl))
833 SET_DECL_RTL (decl, 0);
834
835 layout_decl (decl, 0);
836 }
837
838 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
840 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
841 is to be passed to all other layout functions for this record. It is the
842 responsibility of the caller to call `free' for the storage returned.
843 Note that garbage collection is not permitted until we finish laying
844 out the record. */
845
846 record_layout_info
847 start_record_layout (tree t)
848 {
849 record_layout_info rli = XNEW (struct record_layout_info_s);
850
851 rli->t = t;
852
853 /* If the type has a minimum specified alignment (via an attribute
854 declaration, for example) use it -- otherwise, start with a
855 one-byte alignment. */
856 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
857 rli->unpacked_align = rli->record_align;
858 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
859
860 #ifdef STRUCTURE_SIZE_BOUNDARY
861 /* Packed structures don't need to have minimum size. */
862 if (! TYPE_PACKED (t))
863 {
864 unsigned tmp;
865
866 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
867 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
868 if (maximum_field_alignment != 0)
869 tmp = MIN (tmp, maximum_field_alignment);
870 rli->record_align = MAX (rli->record_align, tmp);
871 }
872 #endif
873
874 rli->offset = size_zero_node;
875 rli->bitpos = bitsize_zero_node;
876 rli->prev_field = 0;
877 rli->pending_statics = 0;
878 rli->packed_maybe_necessary = 0;
879 rli->remaining_in_alignment = 0;
880
881 return rli;
882 }
883
884 /* Fold sizetype value X to bitsizetype, given that X represents a type
885 size or offset. */
886
887 static tree
888 bits_from_bytes (tree x)
889 {
890 if (POLY_INT_CST_P (x))
891 /* The runtime calculation isn't allowed to overflow sizetype;
892 increasing the runtime values must always increase the size
893 or offset of the object. This means that the object imposes
894 a maximum value on the runtime parameters, but we don't record
895 what that is. */
896 return build_poly_int_cst
897 (bitsizetype,
898 poly_wide_int::from (poly_int_cst_value (x),
899 TYPE_PRECISION (bitsizetype),
900 TYPE_SIGN (TREE_TYPE (x))));
901 x = fold_convert (bitsizetype, x);
902 gcc_checking_assert (x);
903 return x;
904 }
905
906 /* Return the combined bit position for the byte offset OFFSET and the
907 bit position BITPOS.
908
909 These functions operate on byte and bit positions present in FIELD_DECLs
910 and assume that these expressions result in no (intermediate) overflow.
911 This assumption is necessary to fold the expressions as much as possible,
912 so as to avoid creating artificially variable-sized types in languages
913 supporting variable-sized types like Ada. */
914
915 tree
916 bit_from_pos (tree offset, tree bitpos)
917 {
918 return size_binop (PLUS_EXPR, bitpos,
919 size_binop (MULT_EXPR, bits_from_bytes (offset),
920 bitsize_unit_node));
921 }
922
923 /* Return the combined truncated byte position for the byte offset OFFSET and
924 the bit position BITPOS. */
925
926 tree
927 byte_from_pos (tree offset, tree bitpos)
928 {
929 tree bytepos;
930 if (TREE_CODE (bitpos) == MULT_EXPR
931 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
932 bytepos = TREE_OPERAND (bitpos, 0);
933 else
934 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
935 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
936 }
937
938 /* Split the bit position POS into a byte offset *POFFSET and a bit
939 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */
940
941 void
942 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
943 tree pos)
944 {
945 tree toff_align = bitsize_int (off_align);
946 if (TREE_CODE (pos) == MULT_EXPR
947 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
948 {
949 *poffset = size_binop (MULT_EXPR,
950 fold_convert (sizetype, TREE_OPERAND (pos, 0)),
951 size_int (off_align / BITS_PER_UNIT));
952 *pbitpos = bitsize_zero_node;
953 }
954 else
955 {
956 *poffset = size_binop (MULT_EXPR,
957 fold_convert (sizetype,
958 size_binop (FLOOR_DIV_EXPR, pos,
959 toff_align)),
960 size_int (off_align / BITS_PER_UNIT));
961 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
962 }
963 }
964
965 /* Given a pointer to bit and byte offsets and an offset alignment,
966 normalize the offsets so they are within the alignment. */
967
968 void
969 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
970 {
971 /* If the bit position is now larger than it should be, adjust it
972 downwards. */
973 if (compare_tree_int (*pbitpos, off_align) >= 0)
974 {
975 tree offset, bitpos;
976 pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
977 *poffset = size_binop (PLUS_EXPR, *poffset, offset);
978 *pbitpos = bitpos;
979 }
980 }
981
982 /* Print debugging information about the information in RLI. */
983
984 DEBUG_FUNCTION void
985 debug_rli (record_layout_info rli)
986 {
987 print_node_brief (stderr, "type", rli->t, 0);
988 print_node_brief (stderr, "\noffset", rli->offset, 0);
989 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
990
991 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
992 rli->record_align, rli->unpacked_align,
993 rli->offset_align);
994
995 /* The ms_struct code is the only that uses this. */
996 if (targetm.ms_bitfield_layout_p (rli->t))
997 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
998
999 if (rli->packed_maybe_necessary)
1000 fprintf (stderr, "packed may be necessary\n");
1001
1002 if (!vec_safe_is_empty (rli->pending_statics))
1003 {
1004 fprintf (stderr, "pending statics:\n");
1005 debug (rli->pending_statics);
1006 }
1007 }
1008
1009 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
1010 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
1011
1012 void
1013 normalize_rli (record_layout_info rli)
1014 {
1015 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
1016 }
1017
1018 /* Returns the size in bytes allocated so far. */
1019
1020 tree
1021 rli_size_unit_so_far (record_layout_info rli)
1022 {
1023 return byte_from_pos (rli->offset, rli->bitpos);
1024 }
1025
1026 /* Returns the size in bits allocated so far. */
1027
1028 tree
1029 rli_size_so_far (record_layout_info rli)
1030 {
1031 return bit_from_pos (rli->offset, rli->bitpos);
1032 }
1033
1034 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
1035 the next available location within the record is given by KNOWN_ALIGN.
1036 Update the variable alignment fields in RLI, and return the alignment
1037 to give the FIELD. */
1038
1039 unsigned int
1040 update_alignment_for_field (record_layout_info rli, tree field,
1041 unsigned int known_align)
1042 {
1043 /* The alignment required for FIELD. */
1044 unsigned int desired_align;
1045 /* The type of this field. */
1046 tree type = TREE_TYPE (field);
1047 /* True if the field was explicitly aligned by the user. */
1048 bool user_align;
1049 bool is_bitfield;
1050
1051 /* Do not attempt to align an ERROR_MARK node */
1052 if (TREE_CODE (type) == ERROR_MARK)
1053 return 0;
1054
1055 /* Lay out the field so we know what alignment it needs. */
1056 layout_decl (field, known_align);
1057 desired_align = DECL_ALIGN (field);
1058 user_align = DECL_USER_ALIGN (field);
1059
1060 is_bitfield = (type != error_mark_node
1061 && DECL_BIT_FIELD_TYPE (field)
1062 && ! integer_zerop (TYPE_SIZE (type)));
1063
1064 /* Record must have at least as much alignment as any field.
1065 Otherwise, the alignment of the field within the record is
1066 meaningless. */
1067 if (targetm.ms_bitfield_layout_p (rli->t))
1068 {
1069 /* Here, the alignment of the underlying type of a bitfield can
1070 affect the alignment of a record; even a zero-sized field
1071 can do this. The alignment should be to the alignment of
1072 the type, except that for zero-size bitfields this only
1073 applies if there was an immediately prior, nonzero-size
1074 bitfield. (That's the way it is, experimentally.) */
1075 if (!is_bitfield
1076 || ((DECL_SIZE (field) == NULL_TREE
1077 || !integer_zerop (DECL_SIZE (field)))
1078 ? !DECL_PACKED (field)
1079 : (rli->prev_field
1080 && DECL_BIT_FIELD_TYPE (rli->prev_field)
1081 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
1082 {
1083 unsigned int type_align = TYPE_ALIGN (type);
1084 if (!is_bitfield && DECL_PACKED (field))
1085 type_align = desired_align;
1086 else
1087 type_align = MAX (type_align, desired_align);
1088 if (maximum_field_alignment != 0)
1089 type_align = MIN (type_align, maximum_field_alignment);
1090 rli->record_align = MAX (rli->record_align, type_align);
1091 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1092 }
1093 }
1094 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
1095 {
1096 /* Named bit-fields cause the entire structure to have the
1097 alignment implied by their type. Some targets also apply the same
1098 rules to unnamed bitfields. */
1099 if (DECL_NAME (field) != 0
1100 || targetm.align_anon_bitfield ())
1101 {
1102 unsigned int type_align = TYPE_ALIGN (type);
1103
1104 #ifdef ADJUST_FIELD_ALIGN
1105 if (! TYPE_USER_ALIGN (type))
1106 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1107 #endif
1108
1109 /* Targets might chose to handle unnamed and hence possibly
1110 zero-width bitfield. Those are not influenced by #pragmas
1111 or packed attributes. */
1112 if (integer_zerop (DECL_SIZE (field)))
1113 {
1114 if (initial_max_fld_align)
1115 type_align = MIN (type_align,
1116 initial_max_fld_align * BITS_PER_UNIT);
1117 }
1118 else if (maximum_field_alignment != 0)
1119 type_align = MIN (type_align, maximum_field_alignment);
1120 else if (DECL_PACKED (field))
1121 type_align = MIN (type_align, BITS_PER_UNIT);
1122
1123 /* The alignment of the record is increased to the maximum
1124 of the current alignment, the alignment indicated on the
1125 field (i.e., the alignment specified by an __aligned__
1126 attribute), and the alignment indicated by the type of
1127 the field. */
1128 rli->record_align = MAX (rli->record_align, desired_align);
1129 rli->record_align = MAX (rli->record_align, type_align);
1130
1131 if (warn_packed)
1132 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1133 user_align |= TYPE_USER_ALIGN (type);
1134 }
1135 }
1136 else
1137 {
1138 rli->record_align = MAX (rli->record_align, desired_align);
1139 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1140 }
1141
1142 TYPE_USER_ALIGN (rli->t) |= user_align;
1143
1144 return desired_align;
1145 }
1146
1147 /* Issue a warning if the record alignment, RECORD_ALIGN, is less than
1148 the field alignment of FIELD or FIELD isn't aligned. */
1149
1150 static void
1151 handle_warn_if_not_align (tree field, unsigned int record_align)
1152 {
1153 tree type = TREE_TYPE (field);
1154
1155 if (type == error_mark_node)
1156 return;
1157
1158 unsigned int warn_if_not_align = 0;
1159
1160 int opt_w = 0;
1161
1162 if (warn_if_not_aligned)
1163 {
1164 warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field);
1165 if (!warn_if_not_align)
1166 warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type);
1167 if (warn_if_not_align)
1168 opt_w = OPT_Wif_not_aligned;
1169 }
1170
1171 if (!warn_if_not_align
1172 && warn_packed_not_aligned
1173 && lookup_attribute ("aligned", TYPE_ATTRIBUTES (type)))
1174 {
1175 warn_if_not_align = TYPE_ALIGN (type);
1176 opt_w = OPT_Wpacked_not_aligned;
1177 }
1178
1179 if (!warn_if_not_align)
1180 return;
1181
1182 tree context = DECL_CONTEXT (field);
1183
1184 warn_if_not_align /= BITS_PER_UNIT;
1185 record_align /= BITS_PER_UNIT;
1186 if ((record_align % warn_if_not_align) != 0)
1187 warning (opt_w, "alignment %u of %qT is less than %u",
1188 record_align, context, warn_if_not_align);
1189
1190 tree off = byte_position (field);
1191 if (!multiple_of_p (TREE_TYPE (off), off, size_int (warn_if_not_align)))
1192 {
1193 if (TREE_CODE (off) == INTEGER_CST)
1194 warning (opt_w, "%q+D offset %E in %qT isn%'t aligned to %u",
1195 field, off, context, warn_if_not_align);
1196 else
1197 warning (opt_w, "%q+D offset %E in %qT may not be aligned to %u",
1198 field, off, context, warn_if_not_align);
1199 }
1200 }
1201
1202 /* Called from place_field to handle unions. */
1203
1204 static void
1205 place_union_field (record_layout_info rli, tree field)
1206 {
1207 update_alignment_for_field (rli, field, /*known_align=*/0);
1208
1209 DECL_FIELD_OFFSET (field) = size_zero_node;
1210 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1211 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1212 handle_warn_if_not_align (field, rli->record_align);
1213
1214 /* If this is an ERROR_MARK return *after* having set the
1215 field at the start of the union. This helps when parsing
1216 invalid fields. */
1217 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1218 return;
1219
1220 if (AGGREGATE_TYPE_P (TREE_TYPE (field))
1221 && TYPE_TYPELESS_STORAGE (TREE_TYPE (field)))
1222 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1223
1224 /* We assume the union's size will be a multiple of a byte so we don't
1225 bother with BITPOS. */
1226 if (TREE_CODE (rli->t) == UNION_TYPE)
1227 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1228 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1229 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1230 DECL_SIZE_UNIT (field), rli->offset);
1231 }
1232
1233 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1234 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1235 units of alignment than the underlying TYPE. */
1236 static int
1237 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1238 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1239 {
1240 /* Note that the calculation of OFFSET might overflow; we calculate it so
1241 that we still get the right result as long as ALIGN is a power of two. */
1242 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1243
1244 offset = offset % align;
1245 return ((offset + size + align - 1) / align
1246 > tree_to_uhwi (TYPE_SIZE (type)) / align);
1247 }
1248
1249 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1250 is a FIELD_DECL to be added after those fields already present in
1251 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1252 callers that desire that behavior must manually perform that step.) */
1253
1254 void
1255 place_field (record_layout_info rli, tree field)
1256 {
1257 /* The alignment required for FIELD. */
1258 unsigned int desired_align;
1259 /* The alignment FIELD would have if we just dropped it into the
1260 record as it presently stands. */
1261 unsigned int known_align;
1262 unsigned int actual_align;
1263 /* The type of this field. */
1264 tree type = TREE_TYPE (field);
1265
1266 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1267
1268 /* If FIELD is static, then treat it like a separate variable, not
1269 really like a structure field. If it is a FUNCTION_DECL, it's a
1270 method. In both cases, all we do is lay out the decl, and we do
1271 it *after* the record is laid out. */
1272 if (VAR_P (field))
1273 {
1274 vec_safe_push (rli->pending_statics, field);
1275 return;
1276 }
1277
1278 /* Enumerators and enum types which are local to this class need not
1279 be laid out. Likewise for initialized constant fields. */
1280 else if (TREE_CODE (field) != FIELD_DECL)
1281 return;
1282
1283 /* Unions are laid out very differently than records, so split
1284 that code off to another function. */
1285 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1286 {
1287 place_union_field (rli, field);
1288 return;
1289 }
1290
1291 else if (TREE_CODE (type) == ERROR_MARK)
1292 {
1293 /* Place this field at the current allocation position, so we
1294 maintain monotonicity. */
1295 DECL_FIELD_OFFSET (field) = rli->offset;
1296 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1297 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1298 handle_warn_if_not_align (field, rli->record_align);
1299 return;
1300 }
1301
1302 if (AGGREGATE_TYPE_P (type)
1303 && TYPE_TYPELESS_STORAGE (type))
1304 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1305
1306 /* Work out the known alignment so far. Note that A & (-A) is the
1307 value of the least-significant bit in A that is one. */
1308 if (! integer_zerop (rli->bitpos))
1309 known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos));
1310 else if (integer_zerop (rli->offset))
1311 known_align = 0;
1312 else if (tree_fits_uhwi_p (rli->offset))
1313 known_align = (BITS_PER_UNIT
1314 * least_bit_hwi (tree_to_uhwi (rli->offset)));
1315 else
1316 known_align = rli->offset_align;
1317
1318 desired_align = update_alignment_for_field (rli, field, known_align);
1319 if (known_align == 0)
1320 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1321
1322 if (warn_packed && DECL_PACKED (field))
1323 {
1324 if (known_align >= TYPE_ALIGN (type))
1325 {
1326 if (TYPE_ALIGN (type) > desired_align)
1327 {
1328 if (STRICT_ALIGNMENT)
1329 warning (OPT_Wattributes, "packed attribute causes "
1330 "inefficient alignment for %q+D", field);
1331 /* Don't warn if DECL_PACKED was set by the type. */
1332 else if (!TYPE_PACKED (rli->t))
1333 warning (OPT_Wattributes, "packed attribute is "
1334 "unnecessary for %q+D", field);
1335 }
1336 }
1337 else
1338 rli->packed_maybe_necessary = 1;
1339 }
1340
1341 /* Does this field automatically have alignment it needs by virtue
1342 of the fields that precede it and the record's own alignment? */
1343 if (known_align < desired_align
1344 && (! targetm.ms_bitfield_layout_p (rli->t)
1345 || rli->prev_field == NULL))
1346 {
1347 /* No, we need to skip space before this field.
1348 Bump the cumulative size to multiple of field alignment. */
1349
1350 if (!targetm.ms_bitfield_layout_p (rli->t)
1351 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION
1352 && !TYPE_ARTIFICIAL (rli->t))
1353 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1354
1355 /* If the alignment is still within offset_align, just align
1356 the bit position. */
1357 if (desired_align < rli->offset_align)
1358 rli->bitpos = round_up (rli->bitpos, desired_align);
1359 else
1360 {
1361 /* First adjust OFFSET by the partial bits, then align. */
1362 rli->offset
1363 = size_binop (PLUS_EXPR, rli->offset,
1364 fold_convert (sizetype,
1365 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1366 bitsize_unit_node)));
1367 rli->bitpos = bitsize_zero_node;
1368
1369 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1370 }
1371
1372 if (! TREE_CONSTANT (rli->offset))
1373 rli->offset_align = desired_align;
1374 }
1375
1376 /* Handle compatibility with PCC. Note that if the record has any
1377 variable-sized fields, we need not worry about compatibility. */
1378 if (PCC_BITFIELD_TYPE_MATTERS
1379 && ! targetm.ms_bitfield_layout_p (rli->t)
1380 && TREE_CODE (field) == FIELD_DECL
1381 && type != error_mark_node
1382 && DECL_BIT_FIELD (field)
1383 && (! DECL_PACKED (field)
1384 /* Enter for these packed fields only to issue a warning. */
1385 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1386 && maximum_field_alignment == 0
1387 && ! integer_zerop (DECL_SIZE (field))
1388 && tree_fits_uhwi_p (DECL_SIZE (field))
1389 && tree_fits_uhwi_p (rli->offset)
1390 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1391 {
1392 unsigned int type_align = TYPE_ALIGN (type);
1393 tree dsize = DECL_SIZE (field);
1394 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1395 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1396 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1397
1398 #ifdef ADJUST_FIELD_ALIGN
1399 if (! TYPE_USER_ALIGN (type))
1400 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1401 #endif
1402
1403 /* A bit field may not span more units of alignment of its type
1404 than its type itself. Advance to next boundary if necessary. */
1405 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1406 {
1407 if (DECL_PACKED (field))
1408 {
1409 if (warn_packed_bitfield_compat == 1)
1410 inform
1411 (input_location,
1412 "offset of packed bit-field %qD has changed in GCC 4.4",
1413 field);
1414 }
1415 else
1416 rli->bitpos = round_up (rli->bitpos, type_align);
1417 }
1418
1419 if (! DECL_PACKED (field))
1420 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1421
1422 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1423 TYPE_WARN_IF_NOT_ALIGN (type));
1424 }
1425
1426 #ifdef BITFIELD_NBYTES_LIMITED
1427 if (BITFIELD_NBYTES_LIMITED
1428 && ! targetm.ms_bitfield_layout_p (rli->t)
1429 && TREE_CODE (field) == FIELD_DECL
1430 && type != error_mark_node
1431 && DECL_BIT_FIELD_TYPE (field)
1432 && ! DECL_PACKED (field)
1433 && ! integer_zerop (DECL_SIZE (field))
1434 && tree_fits_uhwi_p (DECL_SIZE (field))
1435 && tree_fits_uhwi_p (rli->offset)
1436 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1437 {
1438 unsigned int type_align = TYPE_ALIGN (type);
1439 tree dsize = DECL_SIZE (field);
1440 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1441 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1442 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1443
1444 #ifdef ADJUST_FIELD_ALIGN
1445 if (! TYPE_USER_ALIGN (type))
1446 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1447 #endif
1448
1449 if (maximum_field_alignment != 0)
1450 type_align = MIN (type_align, maximum_field_alignment);
1451 /* ??? This test is opposite the test in the containing if
1452 statement, so this code is unreachable currently. */
1453 else if (DECL_PACKED (field))
1454 type_align = MIN (type_align, BITS_PER_UNIT);
1455
1456 /* A bit field may not span the unit of alignment of its type.
1457 Advance to next boundary if necessary. */
1458 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1459 rli->bitpos = round_up (rli->bitpos, type_align);
1460
1461 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1462 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1463 TYPE_WARN_IF_NOT_ALIGN (type));
1464 }
1465 #endif
1466
1467 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1468 A subtlety:
1469 When a bit field is inserted into a packed record, the whole
1470 size of the underlying type is used by one or more same-size
1471 adjacent bitfields. (That is, if its long:3, 32 bits is
1472 used in the record, and any additional adjacent long bitfields are
1473 packed into the same chunk of 32 bits. However, if the size
1474 changes, a new field of that size is allocated.) In an unpacked
1475 record, this is the same as using alignment, but not equivalent
1476 when packing.
1477
1478 Note: for compatibility, we use the type size, not the type alignment
1479 to determine alignment, since that matches the documentation */
1480
1481 if (targetm.ms_bitfield_layout_p (rli->t))
1482 {
1483 tree prev_saved = rli->prev_field;
1484 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1485
1486 /* This is a bitfield if it exists. */
1487 if (rli->prev_field)
1488 {
1489 bool realign_p = known_align < desired_align;
1490
1491 /* If both are bitfields, nonzero, and the same size, this is
1492 the middle of a run. Zero declared size fields are special
1493 and handled as "end of run". (Note: it's nonzero declared
1494 size, but equal type sizes!) (Since we know that both
1495 the current and previous fields are bitfields by the
1496 time we check it, DECL_SIZE must be present for both.) */
1497 if (DECL_BIT_FIELD_TYPE (field)
1498 && !integer_zerop (DECL_SIZE (field))
1499 && !integer_zerop (DECL_SIZE (rli->prev_field))
1500 && tree_fits_shwi_p (DECL_SIZE (rli->prev_field))
1501 && tree_fits_uhwi_p (TYPE_SIZE (type))
1502 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1503 {
1504 /* We're in the middle of a run of equal type size fields; make
1505 sure we realign if we run out of bits. (Not decl size,
1506 type size!) */
1507 HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field));
1508
1509 if (rli->remaining_in_alignment < bitsize)
1510 {
1511 HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type));
1512
1513 /* out of bits; bump up to next 'word'. */
1514 rli->bitpos
1515 = size_binop (PLUS_EXPR, rli->bitpos,
1516 bitsize_int (rli->remaining_in_alignment));
1517 rli->prev_field = field;
1518 if (typesize < bitsize)
1519 rli->remaining_in_alignment = 0;
1520 else
1521 rli->remaining_in_alignment = typesize - bitsize;
1522 }
1523 else
1524 {
1525 rli->remaining_in_alignment -= bitsize;
1526 realign_p = false;
1527 }
1528 }
1529 else
1530 {
1531 /* End of a run: if leaving a run of bitfields of the same type
1532 size, we have to "use up" the rest of the bits of the type
1533 size.
1534
1535 Compute the new position as the sum of the size for the prior
1536 type and where we first started working on that type.
1537 Note: since the beginning of the field was aligned then
1538 of course the end will be too. No round needed. */
1539
1540 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1541 {
1542 rli->bitpos
1543 = size_binop (PLUS_EXPR, rli->bitpos,
1544 bitsize_int (rli->remaining_in_alignment));
1545 }
1546 else
1547 /* We "use up" size zero fields; the code below should behave
1548 as if the prior field was not a bitfield. */
1549 prev_saved = NULL;
1550
1551 /* Cause a new bitfield to be captured, either this time (if
1552 currently a bitfield) or next time we see one. */
1553 if (!DECL_BIT_FIELD_TYPE (field)
1554 || integer_zerop (DECL_SIZE (field)))
1555 rli->prev_field = NULL;
1556 }
1557
1558 /* Does this field automatically have alignment it needs by virtue
1559 of the fields that precede it and the record's own alignment? */
1560 if (realign_p)
1561 {
1562 /* If the alignment is still within offset_align, just align
1563 the bit position. */
1564 if (desired_align < rli->offset_align)
1565 rli->bitpos = round_up (rli->bitpos, desired_align);
1566 else
1567 {
1568 /* First adjust OFFSET by the partial bits, then align. */
1569 tree d = size_binop (CEIL_DIV_EXPR, rli->bitpos,
1570 bitsize_unit_node);
1571 rli->offset = size_binop (PLUS_EXPR, rli->offset,
1572 fold_convert (sizetype, d));
1573 rli->bitpos = bitsize_zero_node;
1574
1575 rli->offset = round_up (rli->offset,
1576 desired_align / BITS_PER_UNIT);
1577 }
1578
1579 if (! TREE_CONSTANT (rli->offset))
1580 rli->offset_align = desired_align;
1581 }
1582
1583 normalize_rli (rli);
1584 }
1585
1586 /* If we're starting a new run of same type size bitfields
1587 (or a run of non-bitfields), set up the "first of the run"
1588 fields.
1589
1590 That is, if the current field is not a bitfield, or if there
1591 was a prior bitfield the type sizes differ, or if there wasn't
1592 a prior bitfield the size of the current field is nonzero.
1593
1594 Note: we must be sure to test ONLY the type size if there was
1595 a prior bitfield and ONLY for the current field being zero if
1596 there wasn't. */
1597
1598 if (!DECL_BIT_FIELD_TYPE (field)
1599 || (prev_saved != NULL
1600 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1601 : !integer_zerop (DECL_SIZE (field))))
1602 {
1603 /* Never smaller than a byte for compatibility. */
1604 unsigned int type_align = BITS_PER_UNIT;
1605
1606 /* (When not a bitfield), we could be seeing a flex array (with
1607 no DECL_SIZE). Since we won't be using remaining_in_alignment
1608 until we see a bitfield (and come by here again) we just skip
1609 calculating it. */
1610 if (DECL_SIZE (field) != NULL
1611 && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field)))
1612 && tree_fits_uhwi_p (DECL_SIZE (field)))
1613 {
1614 unsigned HOST_WIDE_INT bitsize
1615 = tree_to_uhwi (DECL_SIZE (field));
1616 unsigned HOST_WIDE_INT typesize
1617 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)));
1618
1619 if (typesize < bitsize)
1620 rli->remaining_in_alignment = 0;
1621 else
1622 rli->remaining_in_alignment = typesize - bitsize;
1623 }
1624
1625 /* Now align (conventionally) for the new type. */
1626 if (! DECL_PACKED (field))
1627 type_align = TYPE_ALIGN (TREE_TYPE (field));
1628
1629 if (maximum_field_alignment != 0)
1630 type_align = MIN (type_align, maximum_field_alignment);
1631
1632 rli->bitpos = round_up (rli->bitpos, type_align);
1633
1634 /* If we really aligned, don't allow subsequent bitfields
1635 to undo that. */
1636 rli->prev_field = NULL;
1637 }
1638 }
1639
1640 /* Offset so far becomes the position of this field after normalizing. */
1641 normalize_rli (rli);
1642 DECL_FIELD_OFFSET (field) = rli->offset;
1643 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1644 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1645 handle_warn_if_not_align (field, rli->record_align);
1646
1647 /* Evaluate nonconstant offsets only once, either now or as soon as safe. */
1648 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST)
1649 DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field));
1650
1651 /* If this field ended up more aligned than we thought it would be (we
1652 approximate this by seeing if its position changed), lay out the field
1653 again; perhaps we can use an integral mode for it now. */
1654 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1655 actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
1656 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1657 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1658 else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
1659 actual_align = (BITS_PER_UNIT
1660 * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field))));
1661 else
1662 actual_align = DECL_OFFSET_ALIGN (field);
1663 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1664 store / extract bit field operations will check the alignment of the
1665 record against the mode of bit fields. */
1666
1667 if (known_align != actual_align)
1668 layout_decl (field, actual_align);
1669
1670 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1671 rli->prev_field = field;
1672
1673 /* Now add size of this field to the size of the record. If the size is
1674 not constant, treat the field as being a multiple of bytes and just
1675 adjust the offset, resetting the bit position. Otherwise, apportion the
1676 size amongst the bit position and offset. First handle the case of an
1677 unspecified size, which can happen when we have an invalid nested struct
1678 definition, such as struct j { struct j { int i; } }. The error message
1679 is printed in finish_struct. */
1680 if (DECL_SIZE (field) == 0)
1681 /* Do nothing. */;
1682 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1683 || TREE_OVERFLOW (DECL_SIZE (field)))
1684 {
1685 rli->offset
1686 = size_binop (PLUS_EXPR, rli->offset,
1687 fold_convert (sizetype,
1688 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1689 bitsize_unit_node)));
1690 rli->offset
1691 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1692 rli->bitpos = bitsize_zero_node;
1693 rli->offset_align = MIN (rli->offset_align, desired_align);
1694
1695 if (!multiple_of_p (bitsizetype, DECL_SIZE (field),
1696 bitsize_int (rli->offset_align)))
1697 {
1698 tree type = strip_array_types (TREE_TYPE (field));
1699 /* The above adjusts offset_align just based on the start of the
1700 field. The field might not have a size that is a multiple of
1701 that offset_align though. If the field is an array of fixed
1702 sized elements, assume there can be any multiple of those
1703 sizes. If it is a variable length aggregate or array of
1704 variable length aggregates, assume worst that the end is
1705 just BITS_PER_UNIT aligned. */
1706 if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
1707 {
1708 if (TREE_INT_CST_LOW (TYPE_SIZE (type)))
1709 {
1710 unsigned HOST_WIDE_INT sz
1711 = least_bit_hwi (TREE_INT_CST_LOW (TYPE_SIZE (type)));
1712 rli->offset_align = MIN (rli->offset_align, sz);
1713 }
1714 }
1715 else
1716 rli->offset_align = MIN (rli->offset_align, BITS_PER_UNIT);
1717 }
1718 }
1719 else if (targetm.ms_bitfield_layout_p (rli->t))
1720 {
1721 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1722
1723 /* If FIELD is the last field and doesn't end at the full length
1724 of the type then pad the struct out to the full length of the
1725 last type. */
1726 if (DECL_BIT_FIELD_TYPE (field)
1727 && !integer_zerop (DECL_SIZE (field)))
1728 {
1729 /* We have to scan, because non-field DECLS are also here. */
1730 tree probe = field;
1731 while ((probe = DECL_CHAIN (probe)))
1732 if (TREE_CODE (probe) == FIELD_DECL)
1733 break;
1734 if (!probe)
1735 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1736 bitsize_int (rli->remaining_in_alignment));
1737 }
1738
1739 normalize_rli (rli);
1740 }
1741 else
1742 {
1743 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1744 normalize_rli (rli);
1745 }
1746 }
1747
1748 /* Assuming that all the fields have been laid out, this function uses
1749 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1750 indicated by RLI. */
1751
1752 static void
1753 finalize_record_size (record_layout_info rli)
1754 {
1755 tree unpadded_size, unpadded_size_unit;
1756
1757 /* Now we want just byte and bit offsets, so set the offset alignment
1758 to be a byte and then normalize. */
1759 rli->offset_align = BITS_PER_UNIT;
1760 normalize_rli (rli);
1761
1762 /* Determine the desired alignment. */
1763 #ifdef ROUND_TYPE_ALIGN
1764 SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1765 rli->record_align));
1766 #else
1767 SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align));
1768 #endif
1769
1770 /* Compute the size so far. Be sure to allow for extra bits in the
1771 size in bytes. We have guaranteed above that it will be no more
1772 than a single byte. */
1773 unpadded_size = rli_size_so_far (rli);
1774 unpadded_size_unit = rli_size_unit_so_far (rli);
1775 if (! integer_zerop (rli->bitpos))
1776 unpadded_size_unit
1777 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1778
1779 /* Round the size up to be a multiple of the required alignment. */
1780 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1781 TYPE_SIZE_UNIT (rli->t)
1782 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1783
1784 if (TREE_CONSTANT (unpadded_size)
1785 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1786 && input_location != BUILTINS_LOCATION
1787 && !TYPE_ARTIFICIAL (rli->t))
1788 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1789
1790 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1791 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1792 && TREE_CONSTANT (unpadded_size))
1793 {
1794 tree unpacked_size;
1795
1796 #ifdef ROUND_TYPE_ALIGN
1797 rli->unpacked_align
1798 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1799 #else
1800 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1801 #endif
1802
1803 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1804 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1805 {
1806 if (TYPE_NAME (rli->t))
1807 {
1808 tree name;
1809
1810 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1811 name = TYPE_NAME (rli->t);
1812 else
1813 name = DECL_NAME (TYPE_NAME (rli->t));
1814
1815 if (STRICT_ALIGNMENT)
1816 warning (OPT_Wpacked, "packed attribute causes inefficient "
1817 "alignment for %qE", name);
1818 else
1819 warning (OPT_Wpacked,
1820 "packed attribute is unnecessary for %qE", name);
1821 }
1822 else
1823 {
1824 if (STRICT_ALIGNMENT)
1825 warning (OPT_Wpacked,
1826 "packed attribute causes inefficient alignment");
1827 else
1828 warning (OPT_Wpacked, "packed attribute is unnecessary");
1829 }
1830 }
1831 }
1832 }
1833
1834 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1835
1836 void
1837 compute_record_mode (tree type)
1838 {
1839 tree field;
1840 machine_mode mode = VOIDmode;
1841
1842 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1843 However, if possible, we use a mode that fits in a register
1844 instead, in order to allow for better optimization down the
1845 line. */
1846 SET_TYPE_MODE (type, BLKmode);
1847
1848 poly_uint64 type_size;
1849 if (!poly_int_tree_p (TYPE_SIZE (type), &type_size))
1850 return;
1851
1852 /* A record which has any BLKmode members must itself be
1853 BLKmode; it can't go in a register. Unless the member is
1854 BLKmode only because it isn't aligned. */
1855 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1856 {
1857 if (TREE_CODE (field) != FIELD_DECL)
1858 continue;
1859
1860 poly_uint64 field_size;
1861 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1862 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1863 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1864 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1865 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1866 || !tree_fits_poly_uint64_p (bit_position (field))
1867 || DECL_SIZE (field) == 0
1868 || !poly_int_tree_p (DECL_SIZE (field), &field_size))
1869 return;
1870
1871 /* If this field is the whole struct, remember its mode so
1872 that, say, we can put a double in a class into a DF
1873 register instead of forcing it to live in the stack. */
1874 if (known_eq (field_size, type_size)
1875 /* Partial int types (e.g. __int20) may have TYPE_SIZE equal to
1876 wider types (e.g. int32), despite precision being less. Ensure
1877 that the TYPE_MODE of the struct does not get set to the partial
1878 int mode if there is a wider type also in the struct. */
1879 && known_gt (GET_MODE_PRECISION (DECL_MODE (field)),
1880 GET_MODE_PRECISION (mode)))
1881 mode = DECL_MODE (field);
1882
1883 /* With some targets, it is sub-optimal to access an aligned
1884 BLKmode structure as a scalar. */
1885 if (targetm.member_type_forces_blk (field, mode))
1886 return;
1887 }
1888
1889 /* If we only have one real field; use its mode if that mode's size
1890 matches the type's size. This generally only applies to RECORD_TYPE.
1891 For UNION_TYPE, if the widest field is MODE_INT then use that mode.
1892 If the widest field is MODE_PARTIAL_INT, and the union will be passed
1893 by reference, then use that mode. */
1894 if ((TREE_CODE (type) == RECORD_TYPE
1895 || (TREE_CODE (type) == UNION_TYPE
1896 && (GET_MODE_CLASS (mode) == MODE_INT
1897 || (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
1898 && (targetm.calls.pass_by_reference
1899 (pack_cumulative_args (0),
1900 function_arg_info (type, mode, /*named=*/false)))))))
1901 && mode != VOIDmode
1902 && known_eq (GET_MODE_BITSIZE (mode), type_size))
1903 ;
1904 else
1905 mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk ();
1906
1907 /* If structure's known alignment is less than what the scalar
1908 mode would need, and it matters, then stick with BLKmode. */
1909 if (mode != BLKmode
1910 && STRICT_ALIGNMENT
1911 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1912 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode)))
1913 {
1914 /* If this is the only reason this type is BLKmode, then
1915 don't force containing types to be BLKmode. */
1916 TYPE_NO_FORCE_BLK (type) = 1;
1917 mode = BLKmode;
1918 }
1919
1920 SET_TYPE_MODE (type, mode);
1921 }
1922
1923 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1924 out. */
1925
1926 static void
1927 finalize_type_size (tree type)
1928 {
1929 /* Normally, use the alignment corresponding to the mode chosen.
1930 However, where strict alignment is not required, avoid
1931 over-aligning structures, since most compilers do not do this
1932 alignment. */
1933 bool tua_cleared_p = false;
1934 if (TYPE_MODE (type) != BLKmode
1935 && TYPE_MODE (type) != VOIDmode
1936 && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type)))
1937 {
1938 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1939
1940 /* Don't override a larger alignment requirement coming from a user
1941 alignment of one of the fields. */
1942 if (mode_align >= TYPE_ALIGN (type))
1943 {
1944 SET_TYPE_ALIGN (type, mode_align);
1945 /* Remember that we're about to reset this flag. */
1946 tua_cleared_p = TYPE_USER_ALIGN (type);
1947 TYPE_USER_ALIGN (type) = false;
1948 }
1949 }
1950
1951 /* Do machine-dependent extra alignment. */
1952 #ifdef ROUND_TYPE_ALIGN
1953 SET_TYPE_ALIGN (type,
1954 ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT));
1955 #endif
1956
1957 /* If we failed to find a simple way to calculate the unit size
1958 of the type, find it by division. */
1959 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1960 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1961 result will fit in sizetype. We will get more efficient code using
1962 sizetype, so we force a conversion. */
1963 TYPE_SIZE_UNIT (type)
1964 = fold_convert (sizetype,
1965 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1966 bitsize_unit_node));
1967
1968 if (TYPE_SIZE (type) != 0)
1969 {
1970 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1971 TYPE_SIZE_UNIT (type)
1972 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1973 }
1974
1975 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1976 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1977 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1978 if (TYPE_SIZE_UNIT (type) != 0
1979 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1980 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1981
1982 /* Handle empty records as per the x86-64 psABI. */
1983 TYPE_EMPTY_P (type) = targetm.calls.empty_record_p (type);
1984
1985 /* Also layout any other variants of the type. */
1986 if (TYPE_NEXT_VARIANT (type)
1987 || type != TYPE_MAIN_VARIANT (type))
1988 {
1989 tree variant;
1990 /* Record layout info of this variant. */
1991 tree size = TYPE_SIZE (type);
1992 tree size_unit = TYPE_SIZE_UNIT (type);
1993 unsigned int align = TYPE_ALIGN (type);
1994 unsigned int precision = TYPE_PRECISION (type);
1995 unsigned int user_align = TYPE_USER_ALIGN (type);
1996 machine_mode mode = TYPE_MODE (type);
1997 bool empty_p = TYPE_EMPTY_P (type);
1998
1999 /* Copy it into all variants. */
2000 for (variant = TYPE_MAIN_VARIANT (type);
2001 variant != NULL_TREE;
2002 variant = TYPE_NEXT_VARIANT (variant))
2003 {
2004 TYPE_SIZE (variant) = size;
2005 TYPE_SIZE_UNIT (variant) = size_unit;
2006 unsigned valign = align;
2007 if (TYPE_USER_ALIGN (variant))
2008 {
2009 valign = MAX (valign, TYPE_ALIGN (variant));
2010 /* If we reset TYPE_USER_ALIGN on the main variant, we might
2011 need to reset it on the variants too. TYPE_MODE will be set
2012 to MODE in this variant, so we can use that. */
2013 if (tua_cleared_p && GET_MODE_ALIGNMENT (mode) >= valign)
2014 TYPE_USER_ALIGN (variant) = false;
2015 }
2016 else
2017 TYPE_USER_ALIGN (variant) = user_align;
2018 SET_TYPE_ALIGN (variant, valign);
2019 TYPE_PRECISION (variant) = precision;
2020 SET_TYPE_MODE (variant, mode);
2021 TYPE_EMPTY_P (variant) = empty_p;
2022 }
2023 }
2024 }
2025
2026 /* Return a new underlying object for a bitfield started with FIELD. */
2027
2028 static tree
2029 start_bitfield_representative (tree field)
2030 {
2031 tree repr = make_node (FIELD_DECL);
2032 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
2033 /* Force the representative to begin at a BITS_PER_UNIT aligned
2034 boundary - C++ may use tail-padding of a base object to
2035 continue packing bits so the bitfield region does not start
2036 at bit zero (see g++.dg/abi/bitfield5.C for example).
2037 Unallocated bits may happen for other reasons as well,
2038 for example Ada which allows explicit bit-granular structure layout. */
2039 DECL_FIELD_BIT_OFFSET (repr)
2040 = size_binop (BIT_AND_EXPR,
2041 DECL_FIELD_BIT_OFFSET (field),
2042 bitsize_int (~(BITS_PER_UNIT - 1)));
2043 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
2044 DECL_SIZE (repr) = DECL_SIZE (field);
2045 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
2046 DECL_PACKED (repr) = DECL_PACKED (field);
2047 DECL_CONTEXT (repr) = DECL_CONTEXT (field);
2048 /* There are no indirect accesses to this field. If we introduce
2049 some then they have to use the record alias set. This makes
2050 sure to properly conflict with [indirect] accesses to addressable
2051 fields of the bitfield group. */
2052 DECL_NONADDRESSABLE_P (repr) = 1;
2053 return repr;
2054 }
2055
2056 /* Finish up a bitfield group that was started by creating the underlying
2057 object REPR with the last field in the bitfield group FIELD. */
2058
2059 static void
2060 finish_bitfield_representative (tree repr, tree field)
2061 {
2062 unsigned HOST_WIDE_INT bitsize, maxbitsize;
2063 tree nextf, size;
2064
2065 size = size_diffop (DECL_FIELD_OFFSET (field),
2066 DECL_FIELD_OFFSET (repr));
2067 while (TREE_CODE (size) == COMPOUND_EXPR)
2068 size = TREE_OPERAND (size, 1);
2069 gcc_assert (tree_fits_uhwi_p (size));
2070 bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT
2071 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
2072 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))
2073 + tree_to_uhwi (DECL_SIZE (field)));
2074
2075 /* Round up bitsize to multiples of BITS_PER_UNIT. */
2076 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2077
2078 /* Now nothing tells us how to pad out bitsize ... */
2079 if (TREE_CODE (DECL_CONTEXT (field)) == RECORD_TYPE)
2080 {
2081 nextf = DECL_CHAIN (field);
2082 while (nextf && TREE_CODE (nextf) != FIELD_DECL)
2083 nextf = DECL_CHAIN (nextf);
2084 }
2085 else
2086 nextf = NULL_TREE;
2087 if (nextf)
2088 {
2089 tree maxsize;
2090 /* If there was an error, the field may be not laid out
2091 correctly. Don't bother to do anything. */
2092 if (TREE_TYPE (nextf) == error_mark_node)
2093 {
2094 TREE_TYPE (repr) = error_mark_node;
2095 return;
2096 }
2097 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
2098 DECL_FIELD_OFFSET (repr));
2099 if (tree_fits_uhwi_p (maxsize))
2100 {
2101 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2102 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf))
2103 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2104 /* If the group ends within a bitfield nextf does not need to be
2105 aligned to BITS_PER_UNIT. Thus round up. */
2106 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2107 }
2108 else
2109 maxbitsize = bitsize;
2110 }
2111 else
2112 {
2113 /* Note that if the C++ FE sets up tail-padding to be re-used it
2114 creates a as-base variant of the type with TYPE_SIZE adjusted
2115 accordingly. So it is safe to include tail-padding here. */
2116 tree aggsize = lang_hooks.types.unit_size_without_reusable_padding
2117 (DECL_CONTEXT (field));
2118 tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr));
2119 /* We cannot generally rely on maxsize to fold to an integer constant,
2120 so use bitsize as fallback for this case. */
2121 if (tree_fits_uhwi_p (maxsize))
2122 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2123 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2124 else
2125 maxbitsize = bitsize;
2126 }
2127
2128 /* Only if we don't artificially break up the representative in
2129 the middle of a large bitfield with different possibly
2130 overlapping representatives. And all representatives start
2131 at byte offset. */
2132 gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
2133
2134 /* Find the smallest nice mode to use. */
2135 opt_scalar_int_mode mode_iter;
2136 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2137 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize)
2138 break;
2139
2140 scalar_int_mode mode;
2141 if (!mode_iter.exists (&mode)
2142 || GET_MODE_BITSIZE (mode) > maxbitsize
2143 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE)
2144 {
2145 /* We really want a BLKmode representative only as a last resort,
2146 considering the member b in
2147 struct { int a : 7; int b : 17; int c; } __attribute__((packed));
2148 Otherwise we simply want to split the representative up
2149 allowing for overlaps within the bitfield region as required for
2150 struct { int a : 7; int b : 7;
2151 int c : 10; int d; } __attribute__((packed));
2152 [0, 15] HImode for a and b, [8, 23] HImode for c. */
2153 DECL_SIZE (repr) = bitsize_int (bitsize);
2154 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
2155 SET_DECL_MODE (repr, BLKmode);
2156 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
2157 bitsize / BITS_PER_UNIT);
2158 }
2159 else
2160 {
2161 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
2162 DECL_SIZE (repr) = bitsize_int (modesize);
2163 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
2164 SET_DECL_MODE (repr, mode);
2165 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
2166 }
2167
2168 /* Remember whether the bitfield group is at the end of the
2169 structure or not. */
2170 DECL_CHAIN (repr) = nextf;
2171 }
2172
2173 /* Compute and set FIELD_DECLs for the underlying objects we should
2174 use for bitfield access for the structure T. */
2175
2176 void
2177 finish_bitfield_layout (tree t)
2178 {
2179 tree field, prev;
2180 tree repr = NULL_TREE;
2181
2182 if (TREE_CODE (t) == QUAL_UNION_TYPE)
2183 return;
2184
2185 for (prev = NULL_TREE, field = TYPE_FIELDS (t);
2186 field; field = DECL_CHAIN (field))
2187 {
2188 if (TREE_CODE (field) != FIELD_DECL)
2189 continue;
2190
2191 /* In the C++ memory model, consecutive bit fields in a structure are
2192 considered one memory location and updating a memory location
2193 may not store into adjacent memory locations. */
2194 if (!repr
2195 && DECL_BIT_FIELD_TYPE (field))
2196 {
2197 /* Start new representative. */
2198 repr = start_bitfield_representative (field);
2199 }
2200 else if (repr
2201 && ! DECL_BIT_FIELD_TYPE (field))
2202 {
2203 /* Finish off new representative. */
2204 finish_bitfield_representative (repr, prev);
2205 repr = NULL_TREE;
2206 }
2207 else if (DECL_BIT_FIELD_TYPE (field))
2208 {
2209 gcc_assert (repr != NULL_TREE);
2210
2211 /* Zero-size bitfields finish off a representative and
2212 do not have a representative themselves. This is
2213 required by the C++ memory model. */
2214 if (integer_zerop (DECL_SIZE (field)))
2215 {
2216 finish_bitfield_representative (repr, prev);
2217 repr = NULL_TREE;
2218 }
2219
2220 /* We assume that either DECL_FIELD_OFFSET of the representative
2221 and each bitfield member is a constant or they are equal.
2222 This is because we need to be able to compute the bit-offset
2223 of each field relative to the representative in get_bit_range
2224 during RTL expansion.
2225 If these constraints are not met, simply force a new
2226 representative to be generated. That will at most
2227 generate worse code but still maintain correctness with
2228 respect to the C++ memory model. */
2229 else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))
2230 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
2231 || operand_equal_p (DECL_FIELD_OFFSET (repr),
2232 DECL_FIELD_OFFSET (field), 0)))
2233 {
2234 finish_bitfield_representative (repr, prev);
2235 repr = start_bitfield_representative (field);
2236 }
2237 }
2238 else
2239 continue;
2240
2241 if (repr)
2242 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
2243
2244 if (TREE_CODE (t) == RECORD_TYPE)
2245 prev = field;
2246 else if (repr)
2247 {
2248 finish_bitfield_representative (repr, field);
2249 repr = NULL_TREE;
2250 }
2251 }
2252
2253 if (repr)
2254 finish_bitfield_representative (repr, prev);
2255 }
2256
2257 /* Do all of the work required to layout the type indicated by RLI,
2258 once the fields have been laid out. This function will call `free'
2259 for RLI, unless FREE_P is false. Passing a value other than false
2260 for FREE_P is bad practice; this option only exists to support the
2261 G++ 3.2 ABI. */
2262
2263 void
2264 finish_record_layout (record_layout_info rli, int free_p)
2265 {
2266 tree variant;
2267
2268 /* Compute the final size. */
2269 finalize_record_size (rli);
2270
2271 /* Compute the TYPE_MODE for the record. */
2272 compute_record_mode (rli->t);
2273
2274 /* Perform any last tweaks to the TYPE_SIZE, etc. */
2275 finalize_type_size (rli->t);
2276
2277 /* Compute bitfield representatives. */
2278 finish_bitfield_layout (rli->t);
2279
2280 /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants.
2281 With C++ templates, it is too early to do this when the attribute
2282 is being parsed. */
2283 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
2284 variant = TYPE_NEXT_VARIANT (variant))
2285 {
2286 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
2287 TYPE_REVERSE_STORAGE_ORDER (variant)
2288 = TYPE_REVERSE_STORAGE_ORDER (rli->t);
2289 }
2290
2291 /* Lay out any static members. This is done now because their type
2292 may use the record's type. */
2293 while (!vec_safe_is_empty (rli->pending_statics))
2294 layout_decl (rli->pending_statics->pop (), 0);
2295
2296 /* Clean up. */
2297 if (free_p)
2298 {
2299 vec_free (rli->pending_statics);
2300 free (rli);
2301 }
2302 }
2303
2304
2306 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
2307 NAME, its fields are chained in reverse on FIELDS.
2308
2309 If ALIGN_TYPE is non-null, it is given the same alignment as
2310 ALIGN_TYPE. */
2311
2312 void
2313 finish_builtin_struct (tree type, const char *name, tree fields,
2314 tree align_type)
2315 {
2316 tree tail, next;
2317
2318 for (tail = NULL_TREE; fields; tail = fields, fields = next)
2319 {
2320 DECL_FIELD_CONTEXT (fields) = type;
2321 next = DECL_CHAIN (fields);
2322 DECL_CHAIN (fields) = tail;
2323 }
2324 TYPE_FIELDS (type) = tail;
2325
2326 if (align_type)
2327 {
2328 SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type));
2329 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2330 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2331 TYPE_WARN_IF_NOT_ALIGN (align_type));
2332 }
2333
2334 layout_type (type);
2335 #if 0 /* not yet, should get fixed properly later */
2336 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2337 #else
2338 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2339 TYPE_DECL, get_identifier (name), type);
2340 #endif
2341 TYPE_STUB_DECL (type) = TYPE_NAME (type);
2342 layout_decl (TYPE_NAME (type), 0);
2343 }
2344
2345 /* Calculate the mode, size, and alignment for TYPE.
2346 For an array type, calculate the element separation as well.
2347 Record TYPE on the chain of permanent or temporary types
2348 so that dbxout will find out about it.
2349
2350 TYPE_SIZE of a type is nonzero if the type has been laid out already.
2351 layout_type does nothing on such a type.
2352
2353 If the type is incomplete, its TYPE_SIZE remains zero. */
2354
2355 void
2356 layout_type (tree type)
2357 {
2358 gcc_assert (type);
2359
2360 if (type == error_mark_node)
2361 return;
2362
2363 /* We don't want finalize_type_size to copy an alignment attribute to
2364 variants that don't have it. */
2365 type = TYPE_MAIN_VARIANT (type);
2366
2367 /* Do nothing if type has been laid out before. */
2368 if (TYPE_SIZE (type))
2369 return;
2370
2371 switch (TREE_CODE (type))
2372 {
2373 case LANG_TYPE:
2374 /* This kind of type is the responsibility
2375 of the language-specific code. */
2376 gcc_unreachable ();
2377
2378 case BOOLEAN_TYPE:
2379 case INTEGER_TYPE:
2380 case ENUMERAL_TYPE:
2381 {
2382 scalar_int_mode mode
2383 = smallest_int_mode_for_size (TYPE_PRECISION (type));
2384 SET_TYPE_MODE (type, mode);
2385 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2386 /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */
2387 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2388 break;
2389 }
2390
2391 case REAL_TYPE:
2392 {
2393 /* Allow the caller to choose the type mode, which is how decimal
2394 floats are distinguished from binary ones. */
2395 if (TYPE_MODE (type) == VOIDmode)
2396 SET_TYPE_MODE
2397 (type, float_mode_for_size (TYPE_PRECISION (type)).require ());
2398 scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type));
2399 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2400 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2401 break;
2402 }
2403
2404 case FIXED_POINT_TYPE:
2405 {
2406 /* TYPE_MODE (type) has been set already. */
2407 scalar_mode mode = SCALAR_TYPE_MODE (type);
2408 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2409 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2410 break;
2411 }
2412
2413 case COMPLEX_TYPE:
2414 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2415 SET_TYPE_MODE (type,
2416 GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type))));
2417
2418 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2419 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2420 break;
2421
2422 case VECTOR_TYPE:
2423 {
2424 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type);
2425 tree innertype = TREE_TYPE (type);
2426
2427 /* Find an appropriate mode for the vector type. */
2428 if (TYPE_MODE (type) == VOIDmode)
2429 SET_TYPE_MODE (type,
2430 mode_for_vector (SCALAR_TYPE_MODE (innertype),
2431 nunits).else_blk ());
2432
2433 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2434 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2435 /* Several boolean vector elements may fit in a single unit. */
2436 if (VECTOR_BOOLEAN_TYPE_P (type)
2437 && type->type_common.mode != BLKmode)
2438 TYPE_SIZE_UNIT (type)
2439 = size_int (GET_MODE_SIZE (type->type_common.mode));
2440 else
2441 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2442 TYPE_SIZE_UNIT (innertype),
2443 size_int (nunits));
2444 TYPE_SIZE (type) = int_const_binop
2445 (MULT_EXPR,
2446 bits_from_bytes (TYPE_SIZE_UNIT (type)),
2447 bitsize_int (BITS_PER_UNIT));
2448
2449 /* For vector types, we do not default to the mode's alignment.
2450 Instead, query a target hook, defaulting to natural alignment.
2451 This prevents ABI changes depending on whether or not native
2452 vector modes are supported. */
2453 SET_TYPE_ALIGN (type, targetm.vector_alignment (type));
2454
2455 /* However, if the underlying mode requires a bigger alignment than
2456 what the target hook provides, we cannot use the mode. For now,
2457 simply reject that case. */
2458 gcc_assert (TYPE_ALIGN (type)
2459 >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2460 break;
2461 }
2462
2463 case VOID_TYPE:
2464 /* This is an incomplete type and so doesn't have a size. */
2465 SET_TYPE_ALIGN (type, 1);
2466 TYPE_USER_ALIGN (type) = 0;
2467 SET_TYPE_MODE (type, VOIDmode);
2468 break;
2469
2470 case OFFSET_TYPE:
2471 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2472 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
2473 /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
2474 integral, which may be an __intN. */
2475 SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ());
2476 TYPE_PRECISION (type) = POINTER_SIZE;
2477 break;
2478
2479 case FUNCTION_TYPE:
2480 case METHOD_TYPE:
2481 /* It's hard to see what the mode and size of a function ought to
2482 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2483 make it consistent with that. */
2484 SET_TYPE_MODE (type,
2485 int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ());
2486 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2487 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2488 break;
2489
2490 case POINTER_TYPE:
2491 case REFERENCE_TYPE:
2492 {
2493 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2494 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2495 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2496 TYPE_UNSIGNED (type) = 1;
2497 TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
2498 }
2499 break;
2500
2501 case ARRAY_TYPE:
2502 {
2503 tree index = TYPE_DOMAIN (type);
2504 tree element = TREE_TYPE (type);
2505
2506 /* We need to know both bounds in order to compute the size. */
2507 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2508 && TYPE_SIZE (element))
2509 {
2510 tree ub = TYPE_MAX_VALUE (index);
2511 tree lb = TYPE_MIN_VALUE (index);
2512 tree element_size = TYPE_SIZE (element);
2513 tree length;
2514
2515 /* Make sure that an array of zero-sized element is zero-sized
2516 regardless of its extent. */
2517 if (integer_zerop (element_size))
2518 length = size_zero_node;
2519
2520 /* The computation should happen in the original signedness so
2521 that (possible) negative values are handled appropriately
2522 when determining overflow. */
2523 else
2524 {
2525 /* ??? When it is obvious that the range is signed
2526 represent it using ssizetype. */
2527 if (TREE_CODE (lb) == INTEGER_CST
2528 && TREE_CODE (ub) == INTEGER_CST
2529 && TYPE_UNSIGNED (TREE_TYPE (lb))
2530 && tree_int_cst_lt (ub, lb))
2531 {
2532 lb = wide_int_to_tree (ssizetype,
2533 offset_int::from (wi::to_wide (lb),
2534 SIGNED));
2535 ub = wide_int_to_tree (ssizetype,
2536 offset_int::from (wi::to_wide (ub),
2537 SIGNED));
2538 }
2539 length
2540 = fold_convert (sizetype,
2541 size_binop (PLUS_EXPR,
2542 build_int_cst (TREE_TYPE (lb), 1),
2543 size_binop (MINUS_EXPR, ub, lb)));
2544 }
2545
2546 /* ??? We have no way to distinguish a null-sized array from an
2547 array spanning the whole sizetype range, so we arbitrarily
2548 decide that [0, -1] is the only valid representation. */
2549 if (integer_zerop (length)
2550 && TREE_OVERFLOW (length)
2551 && integer_zerop (lb))
2552 length = size_zero_node;
2553
2554 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2555 bits_from_bytes (length));
2556
2557 /* If we know the size of the element, calculate the total size
2558 directly, rather than do some division thing below. This
2559 optimization helps Fortran assumed-size arrays (where the
2560 size of the array is determined at runtime) substantially. */
2561 if (TYPE_SIZE_UNIT (element))
2562 TYPE_SIZE_UNIT (type)
2563 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2564 }
2565
2566 /* Now round the alignment and size,
2567 using machine-dependent criteria if any. */
2568
2569 unsigned align = TYPE_ALIGN (element);
2570 if (TYPE_USER_ALIGN (type))
2571 align = MAX (align, TYPE_ALIGN (type));
2572 else
2573 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2574 if (!TYPE_WARN_IF_NOT_ALIGN (type))
2575 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2576 TYPE_WARN_IF_NOT_ALIGN (element));
2577 #ifdef ROUND_TYPE_ALIGN
2578 align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT);
2579 #else
2580 align = MAX (align, BITS_PER_UNIT);
2581 #endif
2582 SET_TYPE_ALIGN (type, align);
2583 SET_TYPE_MODE (type, BLKmode);
2584 if (TYPE_SIZE (type) != 0
2585 && ! targetm.member_type_forces_blk (type, VOIDmode)
2586 /* BLKmode elements force BLKmode aggregate;
2587 else extract/store fields may lose. */
2588 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2589 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2590 {
2591 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2592 TYPE_SIZE (type)));
2593 if (TYPE_MODE (type) != BLKmode
2594 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2595 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2596 {
2597 TYPE_NO_FORCE_BLK (type) = 1;
2598 SET_TYPE_MODE (type, BLKmode);
2599 }
2600 }
2601 if (AGGREGATE_TYPE_P (element))
2602 TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element);
2603 /* When the element size is constant, check that it is at least as
2604 large as the element alignment. */
2605 if (TYPE_SIZE_UNIT (element)
2606 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2607 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2608 TYPE_ALIGN_UNIT. */
2609 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2610 && !integer_zerop (TYPE_SIZE_UNIT (element)))
2611 {
2612 if (compare_tree_int (TYPE_SIZE_UNIT (element),
2613 TYPE_ALIGN_UNIT (element)) < 0)
2614 error ("alignment of array elements is greater than "
2615 "element size");
2616 else if (TYPE_ALIGN_UNIT (element) > 1
2617 && (wi::zext (wi::to_wide (TYPE_SIZE_UNIT (element)),
2618 ffs_hwi (TYPE_ALIGN_UNIT (element)) - 1)
2619 != 0))
2620 error ("size of array element is not a multiple of its "
2621 "alignment");
2622 }
2623 break;
2624 }
2625
2626 case RECORD_TYPE:
2627 case UNION_TYPE:
2628 case QUAL_UNION_TYPE:
2629 {
2630 tree field;
2631 record_layout_info rli;
2632
2633 /* Initialize the layout information. */
2634 rli = start_record_layout (type);
2635
2636 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2637 in the reverse order in building the COND_EXPR that denotes
2638 its size. We reverse them again later. */
2639 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2640 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2641
2642 /* Place all the fields. */
2643 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2644 place_field (rli, field);
2645
2646 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2647 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2648
2649 /* Finish laying out the record. */
2650 finish_record_layout (rli, /*free_p=*/true);
2651 }
2652 break;
2653
2654 default:
2655 gcc_unreachable ();
2656 }
2657
2658 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2659 records and unions, finish_record_layout already called this
2660 function. */
2661 if (!RECORD_OR_UNION_TYPE_P (type))
2662 finalize_type_size (type);
2663
2664 /* We should never see alias sets on incomplete aggregates. And we
2665 should not call layout_type on not incomplete aggregates. */
2666 if (AGGREGATE_TYPE_P (type))
2667 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2668 }
2669
2670 /* Return the least alignment required for type TYPE. */
2671
2672 unsigned int
2673 min_align_of_type (tree type)
2674 {
2675 unsigned int align = TYPE_ALIGN (type);
2676 if (!TYPE_USER_ALIGN (type))
2677 {
2678 align = MIN (align, BIGGEST_ALIGNMENT);
2679 #ifdef BIGGEST_FIELD_ALIGNMENT
2680 align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
2681 #endif
2682 unsigned int field_align = align;
2683 #ifdef ADJUST_FIELD_ALIGN
2684 field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align);
2685 #endif
2686 align = MIN (align, field_align);
2687 }
2688 return align / BITS_PER_UNIT;
2689 }
2690
2691 /* Create and return a type for signed integers of PRECISION bits. */
2693
2694 tree
2695 make_signed_type (int precision)
2696 {
2697 tree type = make_node (INTEGER_TYPE);
2698
2699 TYPE_PRECISION (type) = precision;
2700
2701 fixup_signed_type (type);
2702 return type;
2703 }
2704
2705 /* Create and return a type for unsigned integers of PRECISION bits. */
2706
2707 tree
2708 make_unsigned_type (int precision)
2709 {
2710 tree type = make_node (INTEGER_TYPE);
2711
2712 TYPE_PRECISION (type) = precision;
2713
2714 fixup_unsigned_type (type);
2715 return type;
2716 }
2717
2718 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2720 and SATP. */
2721
2722 tree
2723 make_fract_type (int precision, int unsignedp, int satp)
2724 {
2725 tree type = make_node (FIXED_POINT_TYPE);
2726
2727 TYPE_PRECISION (type) = precision;
2728
2729 if (satp)
2730 TYPE_SATURATING (type) = 1;
2731
2732 /* Lay out the type: set its alignment, size, etc. */
2733 TYPE_UNSIGNED (type) = unsignedp;
2734 enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT;
2735 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2736 layout_type (type);
2737
2738 return type;
2739 }
2740
2741 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2742 and SATP. */
2743
2744 tree
2745 make_accum_type (int precision, int unsignedp, int satp)
2746 {
2747 tree type = make_node (FIXED_POINT_TYPE);
2748
2749 TYPE_PRECISION (type) = precision;
2750
2751 if (satp)
2752 TYPE_SATURATING (type) = 1;
2753
2754 /* Lay out the type: set its alignment, size, etc. */
2755 TYPE_UNSIGNED (type) = unsignedp;
2756 enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
2757 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2758 layout_type (type);
2759
2760 return type;
2761 }
2762
2763 /* Initialize sizetypes so layout_type can use them. */
2764
2765 void
2766 initialize_sizetypes (void)
2767 {
2768 int precision, bprecision;
2769
2770 /* Get sizetypes precision from the SIZE_TYPE target macro. */
2771 if (strcmp (SIZETYPE, "unsigned int") == 0)
2772 precision = INT_TYPE_SIZE;
2773 else if (strcmp (SIZETYPE, "long unsigned int") == 0)
2774 precision = LONG_TYPE_SIZE;
2775 else if (strcmp (SIZETYPE, "long long unsigned int") == 0)
2776 precision = LONG_LONG_TYPE_SIZE;
2777 else if (strcmp (SIZETYPE, "short unsigned int") == 0)
2778 precision = SHORT_TYPE_SIZE;
2779 else
2780 {
2781 int i;
2782
2783 precision = -1;
2784 for (i = 0; i < NUM_INT_N_ENTS; i++)
2785 if (int_n_enabled_p[i])
2786 {
2787 char name[50], altname[50];
2788 sprintf (name, "__int%d unsigned", int_n_data[i].bitsize);
2789 sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize);
2790
2791 if (strcmp (name, SIZETYPE) == 0
2792 || strcmp (altname, SIZETYPE) == 0)
2793 {
2794 precision = int_n_data[i].bitsize;
2795 }
2796 }
2797 if (precision == -1)
2798 gcc_unreachable ();
2799 }
2800
2801 bprecision
2802 = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE);
2803 bprecision = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision));
2804 if (bprecision > HOST_BITS_PER_DOUBLE_INT)
2805 bprecision = HOST_BITS_PER_DOUBLE_INT;
2806
2807 /* Create stubs for sizetype and bitsizetype so we can create constants. */
2808 sizetype = make_node (INTEGER_TYPE);
2809 TYPE_NAME (sizetype) = get_identifier ("sizetype");
2810 TYPE_PRECISION (sizetype) = precision;
2811 TYPE_UNSIGNED (sizetype) = 1;
2812 bitsizetype = make_node (INTEGER_TYPE);
2813 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2814 TYPE_PRECISION (bitsizetype) = bprecision;
2815 TYPE_UNSIGNED (bitsizetype) = 1;
2816
2817 /* Now layout both types manually. */
2818 scalar_int_mode mode = smallest_int_mode_for_size (precision);
2819 SET_TYPE_MODE (sizetype, mode);
2820 SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)));
2821 TYPE_SIZE (sizetype) = bitsize_int (precision);
2822 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode));
2823 set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
2824
2825 mode = smallest_int_mode_for_size (bprecision);
2826 SET_TYPE_MODE (bitsizetype, mode);
2827 SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)));
2828 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2829 TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode));
2830 set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
2831
2832 /* Create the signed variants of *sizetype. */
2833 ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2834 TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2835 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2836 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2837 }
2838
2839 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2841 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2842 for TYPE, based on the PRECISION and whether or not the TYPE
2843 IS_UNSIGNED. PRECISION need not correspond to a width supported
2844 natively by the hardware; for example, on a machine with 8-bit,
2845 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2846 61. */
2847
2848 void
2849 set_min_and_max_values_for_integral_type (tree type,
2850 int precision,
2851 signop sgn)
2852 {
2853 /* For bitfields with zero width we end up creating integer types
2854 with zero precision. Don't assign any minimum/maximum values
2855 to those types, they don't have any valid value. */
2856 if (precision < 1)
2857 return;
2858
2859 gcc_assert (precision <= WIDE_INT_MAX_PRECISION);
2860
2861 TYPE_MIN_VALUE (type)
2862 = wide_int_to_tree (type, wi::min_value (precision, sgn));
2863 TYPE_MAX_VALUE (type)
2864 = wide_int_to_tree (type, wi::max_value (precision, sgn));
2865 }
2866
2867 /* Set the extreme values of TYPE based on its precision in bits,
2868 then lay it out. Used when make_signed_type won't do
2869 because the tree code is not INTEGER_TYPE. */
2870
2871 void
2872 fixup_signed_type (tree type)
2873 {
2874 int precision = TYPE_PRECISION (type);
2875
2876 set_min_and_max_values_for_integral_type (type, precision, SIGNED);
2877
2878 /* Lay out the type: set its alignment, size, etc. */
2879 layout_type (type);
2880 }
2881
2882 /* Set the extreme values of TYPE based on its precision in bits,
2883 then lay it out. This is used both in `make_unsigned_type'
2884 and for enumeral types. */
2885
2886 void
2887 fixup_unsigned_type (tree type)
2888 {
2889 int precision = TYPE_PRECISION (type);
2890
2891 TYPE_UNSIGNED (type) = 1;
2892
2893 set_min_and_max_values_for_integral_type (type, precision, UNSIGNED);
2894
2895 /* Lay out the type: set its alignment, size, etc. */
2896 layout_type (type);
2897 }
2898
2899 /* Construct an iterator for a bitfield that spans BITSIZE bits,
2901 starting at BITPOS.
2902
2903 BITREGION_START is the bit position of the first bit in this
2904 sequence of bit fields. BITREGION_END is the last bit in this
2905 sequence. If these two fields are non-zero, we should restrict the
2906 memory access to that range. Otherwise, we are allowed to touch
2907 any adjacent non bit-fields.
2908
2909 ALIGN is the alignment of the underlying object in bits.
2910 VOLATILEP says whether the bitfield is volatile. */
2911
2912 bit_field_mode_iterator
2913 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
2914 poly_int64 bitregion_start,
2915 poly_int64 bitregion_end,
2916 unsigned int align, bool volatilep)
2917 : m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize),
2918 m_bitpos (bitpos), m_bitregion_start (bitregion_start),
2919 m_bitregion_end (bitregion_end), m_align (align),
2920 m_volatilep (volatilep), m_count (0)
2921 {
2922 if (known_eq (m_bitregion_end, 0))
2923 {
2924 /* We can assume that any aligned chunk of ALIGN bits that overlaps
2925 the bitfield is mapped and won't trap, provided that ALIGN isn't
2926 too large. The cap is the biggest required alignment for data,
2927 or at least the word size. And force one such chunk at least. */
2928 unsigned HOST_WIDE_INT units
2929 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
2930 if (bitsize <= 0)
2931 bitsize = 1;
2932 HOST_WIDE_INT end = bitpos + bitsize + units - 1;
2933 m_bitregion_end = end - end % units - 1;
2934 }
2935 }
2936
2937 /* Calls to this function return successively larger modes that can be used
2938 to represent the bitfield. Return true if another bitfield mode is
2939 available, storing it in *OUT_MODE if so. */
2940
2941 bool
2942 bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode)
2943 {
2944 scalar_int_mode mode;
2945 for (; m_mode.exists (&mode); m_mode = GET_MODE_WIDER_MODE (mode))
2946 {
2947 unsigned int unit = GET_MODE_BITSIZE (mode);
2948
2949 /* Skip modes that don't have full precision. */
2950 if (unit != GET_MODE_PRECISION (mode))
2951 continue;
2952
2953 /* Stop if the mode is too wide to handle efficiently. */
2954 if (unit > MAX_FIXED_MODE_SIZE)
2955 break;
2956
2957 /* Don't deliver more than one multiword mode; the smallest one
2958 should be used. */
2959 if (m_count > 0 && unit > BITS_PER_WORD)
2960 break;
2961
2962 /* Skip modes that are too small. */
2963 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit;
2964 unsigned HOST_WIDE_INT subend = substart + m_bitsize;
2965 if (subend > unit)
2966 continue;
2967
2968 /* Stop if the mode goes outside the bitregion. */
2969 HOST_WIDE_INT start = m_bitpos - substart;
2970 if (maybe_ne (m_bitregion_start, 0)
2971 && maybe_lt (start, m_bitregion_start))
2972 break;
2973 HOST_WIDE_INT end = start + unit;
2974 if (maybe_gt (end, m_bitregion_end + 1))
2975 break;
2976
2977 /* Stop if the mode requires too much alignment. */
2978 if (GET_MODE_ALIGNMENT (mode) > m_align
2979 && targetm.slow_unaligned_access (mode, m_align))
2980 break;
2981
2982 *out_mode = mode;
2983 m_mode = GET_MODE_WIDER_MODE (mode);
2984 m_count++;
2985 return true;
2986 }
2987 return false;
2988 }
2989
2990 /* Return true if smaller modes are generally preferred for this kind
2991 of bitfield. */
2992
2993 bool
2994 bit_field_mode_iterator::prefer_smaller_modes ()
2995 {
2996 return (m_volatilep
2997 ? targetm.narrow_volatile_bitfield ()
2998 : !SLOW_BYTE_ACCESS);
2999 }
3000
3001 /* Find the best machine mode to use when referencing a bit field of length
3002 BITSIZE bits starting at BITPOS.
3003
3004 BITREGION_START is the bit position of the first bit in this
3005 sequence of bit fields. BITREGION_END is the last bit in this
3006 sequence. If these two fields are non-zero, we should restrict the
3007 memory access to that range. Otherwise, we are allowed to touch
3008 any adjacent non bit-fields.
3009
3010 The chosen mode must have no more than LARGEST_MODE_BITSIZE bits.
3011 INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller
3012 doesn't want to apply a specific limit.
3013
3014 If no mode meets all these conditions, we return VOIDmode.
3015
3016 The underlying object is known to be aligned to a boundary of ALIGN bits.
3017
3018 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
3019 smallest mode meeting these conditions.
3020
3021 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
3022 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
3023 all the conditions.
3024
3025 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
3026 decide which of the above modes should be used. */
3027
3028 bool
3029 get_best_mode (int bitsize, int bitpos,
3030 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
3031 unsigned int align,
3032 unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
3033 scalar_int_mode *best_mode)
3034 {
3035 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
3036 bitregion_end, align, volatilep);
3037 scalar_int_mode mode;
3038 bool found = false;
3039 while (iter.next_mode (&mode)
3040 /* ??? For historical reasons, reject modes that would normally
3041 receive greater alignment, even if unaligned accesses are
3042 acceptable. This has both advantages and disadvantages.
3043 Removing this check means that something like:
3044
3045 struct s { unsigned int x; unsigned int y; };
3046 int f (struct s *s) { return s->x == 0 && s->y == 0; }
3047
3048 can be implemented using a single load and compare on
3049 64-bit machines that have no alignment restrictions.
3050 For example, on powerpc64-linux-gnu, we would generate:
3051
3052 ld 3,0(3)
3053 cntlzd 3,3
3054 srdi 3,3,6
3055 blr
3056
3057 rather than:
3058
3059 lwz 9,0(3)
3060 cmpwi 7,9,0
3061 bne 7,.L3
3062 lwz 3,4(3)
3063 cntlzw 3,3
3064 srwi 3,3,5
3065 extsw 3,3
3066 blr
3067 .p2align 4,,15
3068 .L3:
3069 li 3,0
3070 blr
3071
3072 However, accessing more than one field can make life harder
3073 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c
3074 has a series of unsigned short copies followed by a series of
3075 unsigned short comparisons. With this check, both the copies
3076 and comparisons remain 16-bit accesses and FRE is able
3077 to eliminate the latter. Without the check, the comparisons
3078 can be done using 2 64-bit operations, which FRE isn't able
3079 to handle in the same way.
3080
3081 Either way, it would probably be worth disabling this check
3082 during expand. One particular example where removing the
3083 check would help is the get_best_mode call in store_bit_field.
3084 If we are given a memory bitregion of 128 bits that is aligned
3085 to a 64-bit boundary, and the bitfield we want to modify is
3086 in the second half of the bitregion, this check causes
3087 store_bitfield to turn the memory into a 64-bit reference
3088 to the _first_ half of the region. We later use
3089 adjust_bitfield_address to get a reference to the correct half,
3090 but doing so looks to adjust_bitfield_address as though we are
3091 moving past the end of the original object, so it drops the
3092 associated MEM_EXPR and MEM_OFFSET. Removing the check
3093 causes store_bit_field to keep a 128-bit memory reference,
3094 so that the final bitfield reference still has a MEM_EXPR
3095 and MEM_OFFSET. */
3096 && GET_MODE_ALIGNMENT (mode) <= align
3097 && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize)
3098 {
3099 *best_mode = mode;
3100 found = true;
3101 if (iter.prefer_smaller_modes ())
3102 break;
3103 }
3104
3105 return found;
3106 }
3107
3108 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
3109 SIGN). The returned constants are made to be usable in TARGET_MODE. */
3110
3111 void
3112 get_mode_bounds (scalar_int_mode mode, int sign,
3113 scalar_int_mode target_mode,
3114 rtx *mmin, rtx *mmax)
3115 {
3116 unsigned size = GET_MODE_PRECISION (mode);
3117 unsigned HOST_WIDE_INT min_val, max_val;
3118
3119 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
3120
3121 /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */
3122 if (mode == BImode)
3123 {
3124 if (STORE_FLAG_VALUE < 0)
3125 {
3126 min_val = STORE_FLAG_VALUE;
3127 max_val = 0;
3128 }
3129 else
3130 {
3131 min_val = 0;
3132 max_val = STORE_FLAG_VALUE;
3133 }
3134 }
3135 else if (sign)
3136 {
3137 min_val = -(HOST_WIDE_INT_1U << (size - 1));
3138 max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1;
3139 }
3140 else
3141 {
3142 min_val = 0;
3143 max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1;
3144 }
3145
3146 *mmin = gen_int_mode (min_val, target_mode);
3147 *mmax = gen_int_mode (max_val, target_mode);
3148 }
3149
3150 #include "gt-stor-layout.h"
3151