Home | History | Annotate | Line # | Download | only in gcc
stor-layout.cc revision 1.1.1.1
      1  1.1  mrg /* C-compiler utilities for types and variables storage layout
      2  1.1  mrg    Copyright (C) 1987-2022 Free Software Foundation, Inc.
      3  1.1  mrg 
      4  1.1  mrg This file is part of GCC.
      5  1.1  mrg 
      6  1.1  mrg GCC is free software; you can redistribute it and/or modify it under
      7  1.1  mrg the terms of the GNU General Public License as published by the Free
      8  1.1  mrg Software Foundation; either version 3, or (at your option) any later
      9  1.1  mrg version.
     10  1.1  mrg 
     11  1.1  mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
     12  1.1  mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
     13  1.1  mrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     14  1.1  mrg for more details.
     15  1.1  mrg 
     16  1.1  mrg You should have received a copy of the GNU General Public License
     17  1.1  mrg along with GCC; see the file COPYING3.  If not see
     18  1.1  mrg <http://www.gnu.org/licenses/>.  */
     19  1.1  mrg 
     20  1.1  mrg 
     21  1.1  mrg #include "config.h"
     22  1.1  mrg #include "system.h"
     23  1.1  mrg #include "coretypes.h"
     24  1.1  mrg #include "target.h"
     25  1.1  mrg #include "function.h"
     26  1.1  mrg #include "rtl.h"
     27  1.1  mrg #include "tree.h"
     28  1.1  mrg #include "memmodel.h"
     29  1.1  mrg #include "tm_p.h"
     30  1.1  mrg #include "stringpool.h"
     31  1.1  mrg #include "regs.h"
     32  1.1  mrg #include "emit-rtl.h"
     33  1.1  mrg #include "cgraph.h"
     34  1.1  mrg #include "diagnostic-core.h"
     35  1.1  mrg #include "fold-const.h"
     36  1.1  mrg #include "stor-layout.h"
     37  1.1  mrg #include "varasm.h"
     38  1.1  mrg #include "print-tree.h"
     39  1.1  mrg #include "langhooks.h"
     40  1.1  mrg #include "tree-inline.h"
     41  1.1  mrg #include "dumpfile.h"
     42  1.1  mrg #include "gimplify.h"
     43  1.1  mrg #include "attribs.h"
     44  1.1  mrg #include "debug.h"
     45  1.1  mrg #include "calls.h"
     46  1.1  mrg 
     47  1.1  mrg /* Data type for the expressions representing sizes of data types.
     48  1.1  mrg    It is the first integer type laid out.  */
     49  1.1  mrg tree sizetype_tab[(int) stk_type_kind_last];
     50  1.1  mrg 
     51  1.1  mrg /* If nonzero, this is an upper limit on alignment of structure fields.
     52  1.1  mrg    The value is measured in bits.  */
     53  1.1  mrg unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
     54  1.1  mrg 
     55  1.1  mrg static tree self_referential_size (tree);
     56  1.1  mrg static void finalize_record_size (record_layout_info);
     57  1.1  mrg static void finalize_type_size (tree);
     58  1.1  mrg static void place_union_field (record_layout_info, tree);
     59  1.1  mrg static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
     60  1.1  mrg 			     HOST_WIDE_INT, tree);
     61  1.1  mrg extern void debug_rli (record_layout_info);
     62  1.1  mrg 
     63  1.1  mrg /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
     65  1.1  mrg    to serve as the actual size-expression for a type or decl.  */
     66  1.1  mrg 
     67  1.1  mrg tree
     68  1.1  mrg variable_size (tree size)
     69  1.1  mrg {
     70  1.1  mrg   /* Obviously.  */
     71  1.1  mrg   if (TREE_CONSTANT (size))
     72  1.1  mrg     return size;
     73  1.1  mrg 
     74  1.1  mrg   /* If the size is self-referential, we can't make a SAVE_EXPR (see
     75  1.1  mrg      save_expr for the rationale).  But we can do something else.  */
     76  1.1  mrg   if (CONTAINS_PLACEHOLDER_P (size))
     77  1.1  mrg     return self_referential_size (size);
     78  1.1  mrg 
     79  1.1  mrg   /* If we are in the global binding level, we can't make a SAVE_EXPR
     80  1.1  mrg      since it may end up being shared across functions, so it is up
     81  1.1  mrg      to the front-end to deal with this case.  */
     82  1.1  mrg   if (lang_hooks.decls.global_bindings_p ())
     83  1.1  mrg     return size;
     84  1.1  mrg 
     85  1.1  mrg   return save_expr (size);
     86  1.1  mrg }
     87  1.1  mrg 
     88  1.1  mrg /* An array of functions used for self-referential size computation.  */
     89  1.1  mrg static GTY(()) vec<tree, va_gc> *size_functions;
     90  1.1  mrg 
     91  1.1  mrg /* Return true if T is a self-referential component reference.  */
     92  1.1  mrg 
     93  1.1  mrg static bool
     94  1.1  mrg self_referential_component_ref_p (tree t)
     95  1.1  mrg {
     96  1.1  mrg   if (TREE_CODE (t) != COMPONENT_REF)
     97  1.1  mrg     return false;
     98  1.1  mrg 
     99  1.1  mrg   while (REFERENCE_CLASS_P (t))
    100  1.1  mrg     t = TREE_OPERAND (t, 0);
    101  1.1  mrg 
    102  1.1  mrg   return (TREE_CODE (t) == PLACEHOLDER_EXPR);
    103  1.1  mrg }
    104  1.1  mrg 
    105  1.1  mrg /* Similar to copy_tree_r but do not copy component references involving
    106  1.1  mrg    PLACEHOLDER_EXPRs.  These nodes are spotted in find_placeholder_in_expr
    107  1.1  mrg    and substituted in substitute_in_expr.  */
    108  1.1  mrg 
    109  1.1  mrg static tree
    110  1.1  mrg copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
    111  1.1  mrg {
    112  1.1  mrg   enum tree_code code = TREE_CODE (*tp);
    113  1.1  mrg 
    114  1.1  mrg   /* Stop at types, decls, constants like copy_tree_r.  */
    115  1.1  mrg   if (TREE_CODE_CLASS (code) == tcc_type
    116  1.1  mrg       || TREE_CODE_CLASS (code) == tcc_declaration
    117  1.1  mrg       || TREE_CODE_CLASS (code) == tcc_constant)
    118  1.1  mrg     {
    119  1.1  mrg       *walk_subtrees = 0;
    120  1.1  mrg       return NULL_TREE;
    121  1.1  mrg     }
    122  1.1  mrg 
    123  1.1  mrg   /* This is the pattern built in ada/make_aligning_type.  */
    124  1.1  mrg   else if (code == ADDR_EXPR
    125  1.1  mrg 	   && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
    126  1.1  mrg     {
    127  1.1  mrg       *walk_subtrees = 0;
    128  1.1  mrg       return NULL_TREE;
    129  1.1  mrg     }
    130  1.1  mrg 
    131  1.1  mrg   /* Default case: the component reference.  */
    132  1.1  mrg   else if (self_referential_component_ref_p (*tp))
    133  1.1  mrg     {
    134  1.1  mrg       *walk_subtrees = 0;
    135  1.1  mrg       return NULL_TREE;
    136  1.1  mrg     }
    137  1.1  mrg 
    138  1.1  mrg   /* We're not supposed to have them in self-referential size trees
    139  1.1  mrg      because we wouldn't properly control when they are evaluated.
    140  1.1  mrg      However, not creating superfluous SAVE_EXPRs requires accurate
    141  1.1  mrg      tracking of readonly-ness all the way down to here, which we
    142  1.1  mrg      cannot always guarantee in practice.  So punt in this case.  */
    143  1.1  mrg   else if (code == SAVE_EXPR)
    144  1.1  mrg     return error_mark_node;
    145  1.1  mrg 
    146  1.1  mrg   else if (code == STATEMENT_LIST)
    147  1.1  mrg     gcc_unreachable ();
    148  1.1  mrg 
    149  1.1  mrg   return copy_tree_r (tp, walk_subtrees, data);
    150  1.1  mrg }
    151  1.1  mrg 
    152  1.1  mrg /* Given a SIZE expression that is self-referential, return an equivalent
    153  1.1  mrg    expression to serve as the actual size expression for a type.  */
    154  1.1  mrg 
    155  1.1  mrg static tree
    156  1.1  mrg self_referential_size (tree size)
    157  1.1  mrg {
    158  1.1  mrg   static unsigned HOST_WIDE_INT fnno = 0;
    159  1.1  mrg   vec<tree> self_refs = vNULL;
    160  1.1  mrg   tree param_type_list = NULL, param_decl_list = NULL;
    161  1.1  mrg   tree t, ref, return_type, fntype, fnname, fndecl;
    162  1.1  mrg   unsigned int i;
    163  1.1  mrg   char buf[128];
    164  1.1  mrg   vec<tree, va_gc> *args = NULL;
    165  1.1  mrg 
    166  1.1  mrg   /* Do not factor out simple operations.  */
    167  1.1  mrg   t = skip_simple_constant_arithmetic (size);
    168  1.1  mrg   if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t))
    169  1.1  mrg     return size;
    170  1.1  mrg 
    171  1.1  mrg   /* Collect the list of self-references in the expression.  */
    172  1.1  mrg   find_placeholder_in_expr (size, &self_refs);
    173  1.1  mrg   gcc_assert (self_refs.length () > 0);
    174  1.1  mrg 
    175  1.1  mrg   /* Obtain a private copy of the expression.  */
    176  1.1  mrg   t = size;
    177  1.1  mrg   if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
    178  1.1  mrg     return size;
    179  1.1  mrg   size = t;
    180  1.1  mrg 
    181  1.1  mrg   /* Build the parameter and argument lists in parallel; also
    182  1.1  mrg      substitute the former for the latter in the expression.  */
    183  1.1  mrg   vec_alloc (args, self_refs.length ());
    184  1.1  mrg   FOR_EACH_VEC_ELT (self_refs, i, ref)
    185  1.1  mrg     {
    186  1.1  mrg       tree subst, param_name, param_type, param_decl;
    187  1.1  mrg 
    188  1.1  mrg       if (DECL_P (ref))
    189  1.1  mrg 	{
    190  1.1  mrg 	  /* We shouldn't have true variables here.  */
    191  1.1  mrg 	  gcc_assert (TREE_READONLY (ref));
    192  1.1  mrg 	  subst = ref;
    193  1.1  mrg 	}
    194  1.1  mrg       /* This is the pattern built in ada/make_aligning_type.  */
    195  1.1  mrg       else if (TREE_CODE (ref) == ADDR_EXPR)
    196  1.1  mrg         subst = ref;
    197  1.1  mrg       /* Default case: the component reference.  */
    198  1.1  mrg       else
    199  1.1  mrg 	subst = TREE_OPERAND (ref, 1);
    200  1.1  mrg 
    201  1.1  mrg       sprintf (buf, "p%d", i);
    202  1.1  mrg       param_name = get_identifier (buf);
    203  1.1  mrg       param_type = TREE_TYPE (ref);
    204  1.1  mrg       param_decl
    205  1.1  mrg 	= build_decl (input_location, PARM_DECL, param_name, param_type);
    206  1.1  mrg       DECL_ARG_TYPE (param_decl) = param_type;
    207  1.1  mrg       DECL_ARTIFICIAL (param_decl) = 1;
    208  1.1  mrg       TREE_READONLY (param_decl) = 1;
    209  1.1  mrg 
    210  1.1  mrg       size = substitute_in_expr (size, subst, param_decl);
    211  1.1  mrg 
    212  1.1  mrg       param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
    213  1.1  mrg       param_decl_list = chainon (param_decl, param_decl_list);
    214  1.1  mrg       args->quick_push (ref);
    215  1.1  mrg     }
    216  1.1  mrg 
    217  1.1  mrg   self_refs.release ();
    218  1.1  mrg 
    219  1.1  mrg   /* Append 'void' to indicate that the number of parameters is fixed.  */
    220  1.1  mrg   param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
    221  1.1  mrg 
    222  1.1  mrg   /* The 3 lists have been created in reverse order.  */
    223  1.1  mrg   param_type_list = nreverse (param_type_list);
    224  1.1  mrg   param_decl_list = nreverse (param_decl_list);
    225  1.1  mrg 
    226  1.1  mrg   /* Build the function type.  */
    227  1.1  mrg   return_type = TREE_TYPE (size);
    228  1.1  mrg   fntype = build_function_type (return_type, param_type_list);
    229  1.1  mrg 
    230  1.1  mrg   /* Build the function declaration.  */
    231  1.1  mrg   sprintf (buf, "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
    232  1.1  mrg   fnname = get_file_function_name (buf);
    233  1.1  mrg   fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
    234  1.1  mrg   for (t = param_decl_list; t; t = DECL_CHAIN (t))
    235  1.1  mrg     DECL_CONTEXT (t) = fndecl;
    236  1.1  mrg   DECL_ARGUMENTS (fndecl) = param_decl_list;
    237  1.1  mrg   DECL_RESULT (fndecl)
    238  1.1  mrg     = build_decl (input_location, RESULT_DECL, 0, return_type);
    239  1.1  mrg   DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
    240  1.1  mrg 
    241  1.1  mrg   /* The function has been created by the compiler and we don't
    242  1.1  mrg      want to emit debug info for it.  */
    243  1.1  mrg   DECL_ARTIFICIAL (fndecl) = 1;
    244  1.1  mrg   DECL_IGNORED_P (fndecl) = 1;
    245  1.1  mrg 
    246  1.1  mrg   /* It is supposed to be "const" and never throw.  */
    247  1.1  mrg   TREE_READONLY (fndecl) = 1;
    248  1.1  mrg   TREE_NOTHROW (fndecl) = 1;
    249  1.1  mrg 
    250  1.1  mrg   /* We want it to be inlined when this is deemed profitable, as
    251  1.1  mrg      well as discarded if every call has been integrated.  */
    252  1.1  mrg   DECL_DECLARED_INLINE_P (fndecl) = 1;
    253  1.1  mrg 
    254  1.1  mrg   /* It is made up of a unique return statement.  */
    255  1.1  mrg   DECL_INITIAL (fndecl) = make_node (BLOCK);
    256  1.1  mrg   BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
    257  1.1  mrg   t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
    258  1.1  mrg   DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
    259  1.1  mrg   TREE_STATIC (fndecl) = 1;
    260  1.1  mrg 
    261  1.1  mrg   /* Put it onto the list of size functions.  */
    262  1.1  mrg   vec_safe_push (size_functions, fndecl);
    263  1.1  mrg 
    264  1.1  mrg   /* Replace the original expression with a call to the size function.  */
    265  1.1  mrg   return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
    266  1.1  mrg }
    267  1.1  mrg 
    268  1.1  mrg /* Take, queue and compile all the size functions.  It is essential that
    269  1.1  mrg    the size functions be gimplified at the very end of the compilation
    270  1.1  mrg    in order to guarantee transparent handling of self-referential sizes.
    271  1.1  mrg    Otherwise the GENERIC inliner would not be able to inline them back
    272  1.1  mrg    at each of their call sites, thus creating artificial non-constant
    273  1.1  mrg    size expressions which would trigger nasty problems later on.  */
    274  1.1  mrg 
    275  1.1  mrg void
    276  1.1  mrg finalize_size_functions (void)
    277  1.1  mrg {
    278  1.1  mrg   unsigned int i;
    279  1.1  mrg   tree fndecl;
    280  1.1  mrg 
    281  1.1  mrg   for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
    282  1.1  mrg     {
    283  1.1  mrg       allocate_struct_function (fndecl, false);
    284  1.1  mrg       set_cfun (NULL);
    285  1.1  mrg       dump_function (TDI_original, fndecl);
    286  1.1  mrg 
    287  1.1  mrg       /* As these functions are used to describe the layout of variable-length
    288  1.1  mrg          structures, debug info generation needs their implementation.  */
    289  1.1  mrg       debug_hooks->size_function (fndecl);
    290  1.1  mrg       gimplify_function_tree (fndecl);
    291  1.1  mrg       cgraph_node::finalize_function (fndecl, false);
    292  1.1  mrg     }
    293  1.1  mrg 
    294  1.1  mrg   vec_free (size_functions);
    295  1.1  mrg }
    296  1.1  mrg 
    297  1.1  mrg /* Return a machine mode of class MCLASS with SIZE bits of precision,
    299  1.1  mrg    if one exists.  The mode may have padding bits as well the SIZE
    300  1.1  mrg    value bits.  If LIMIT is nonzero, disregard modes wider than
    301  1.1  mrg    MAX_FIXED_MODE_SIZE.  */
    302  1.1  mrg 
    303  1.1  mrg opt_machine_mode
    304  1.1  mrg mode_for_size (poly_uint64 size, enum mode_class mclass, int limit)
    305  1.1  mrg {
    306  1.1  mrg   machine_mode mode;
    307  1.1  mrg   int i;
    308  1.1  mrg 
    309  1.1  mrg   if (limit && maybe_gt (size, (unsigned int) MAX_FIXED_MODE_SIZE))
    310  1.1  mrg     return opt_machine_mode ();
    311  1.1  mrg 
    312  1.1  mrg   /* Get the first mode which has this size, in the specified class.  */
    313  1.1  mrg   FOR_EACH_MODE_IN_CLASS (mode, mclass)
    314  1.1  mrg     if (known_eq (GET_MODE_PRECISION (mode), size))
    315  1.1  mrg       return mode;
    316  1.1  mrg 
    317  1.1  mrg   if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
    318  1.1  mrg     for (i = 0; i < NUM_INT_N_ENTS; i ++)
    319  1.1  mrg       if (known_eq (int_n_data[i].bitsize, size)
    320  1.1  mrg 	  && int_n_enabled_p[i])
    321  1.1  mrg 	return int_n_data[i].m;
    322  1.1  mrg 
    323  1.1  mrg   return opt_machine_mode ();
    324  1.1  mrg }
    325  1.1  mrg 
    326  1.1  mrg /* Similar, except passed a tree node.  */
    327  1.1  mrg 
    328  1.1  mrg opt_machine_mode
    329  1.1  mrg mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
    330  1.1  mrg {
    331  1.1  mrg   unsigned HOST_WIDE_INT uhwi;
    332  1.1  mrg   unsigned int ui;
    333  1.1  mrg 
    334  1.1  mrg   if (!tree_fits_uhwi_p (size))
    335  1.1  mrg     return opt_machine_mode ();
    336  1.1  mrg   uhwi = tree_to_uhwi (size);
    337  1.1  mrg   ui = uhwi;
    338  1.1  mrg   if (uhwi != ui)
    339  1.1  mrg     return opt_machine_mode ();
    340  1.1  mrg   return mode_for_size (ui, mclass, limit);
    341  1.1  mrg }
    342  1.1  mrg 
    343  1.1  mrg /* Return the narrowest mode of class MCLASS that contains at least
    344  1.1  mrg    SIZE bits.  Abort if no such mode exists.  */
    345  1.1  mrg 
    346  1.1  mrg machine_mode
    347  1.1  mrg smallest_mode_for_size (poly_uint64 size, enum mode_class mclass)
    348  1.1  mrg {
    349  1.1  mrg   machine_mode mode = VOIDmode;
    350  1.1  mrg   int i;
    351  1.1  mrg 
    352  1.1  mrg   /* Get the first mode which has at least this size, in the
    353  1.1  mrg      specified class.  */
    354  1.1  mrg   FOR_EACH_MODE_IN_CLASS (mode, mclass)
    355  1.1  mrg     if (known_ge (GET_MODE_PRECISION (mode), size))
    356  1.1  mrg       break;
    357  1.1  mrg 
    358  1.1  mrg   gcc_assert (mode != VOIDmode);
    359  1.1  mrg 
    360  1.1  mrg   if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
    361  1.1  mrg     for (i = 0; i < NUM_INT_N_ENTS; i ++)
    362  1.1  mrg       if (known_ge (int_n_data[i].bitsize, size)
    363  1.1  mrg 	  && known_lt (int_n_data[i].bitsize, GET_MODE_PRECISION (mode))
    364  1.1  mrg 	  && int_n_enabled_p[i])
    365  1.1  mrg 	mode = int_n_data[i].m;
    366  1.1  mrg 
    367  1.1  mrg   return mode;
    368  1.1  mrg }
    369  1.1  mrg 
    370  1.1  mrg /* Return an integer mode of exactly the same size as MODE, if one exists.  */
    371  1.1  mrg 
    372  1.1  mrg opt_scalar_int_mode
    373  1.1  mrg int_mode_for_mode (machine_mode mode)
    374  1.1  mrg {
    375  1.1  mrg   switch (GET_MODE_CLASS (mode))
    376  1.1  mrg     {
    377  1.1  mrg     case MODE_INT:
    378  1.1  mrg     case MODE_PARTIAL_INT:
    379  1.1  mrg       return as_a <scalar_int_mode> (mode);
    380  1.1  mrg 
    381  1.1  mrg     case MODE_COMPLEX_INT:
    382  1.1  mrg     case MODE_COMPLEX_FLOAT:
    383  1.1  mrg     case MODE_FLOAT:
    384  1.1  mrg     case MODE_DECIMAL_FLOAT:
    385  1.1  mrg     case MODE_FRACT:
    386  1.1  mrg     case MODE_ACCUM:
    387  1.1  mrg     case MODE_UFRACT:
    388  1.1  mrg     case MODE_UACCUM:
    389  1.1  mrg     case MODE_VECTOR_BOOL:
    390  1.1  mrg     case MODE_VECTOR_INT:
    391  1.1  mrg     case MODE_VECTOR_FLOAT:
    392  1.1  mrg     case MODE_VECTOR_FRACT:
    393  1.1  mrg     case MODE_VECTOR_ACCUM:
    394  1.1  mrg     case MODE_VECTOR_UFRACT:
    395  1.1  mrg     case MODE_VECTOR_UACCUM:
    396  1.1  mrg       return int_mode_for_size (GET_MODE_BITSIZE (mode), 0);
    397  1.1  mrg 
    398  1.1  mrg     case MODE_OPAQUE:
    399  1.1  mrg 	return opt_scalar_int_mode ();
    400  1.1  mrg 
    401  1.1  mrg     case MODE_RANDOM:
    402  1.1  mrg       if (mode == BLKmode)
    403  1.1  mrg 	return opt_scalar_int_mode ();
    404  1.1  mrg 
    405  1.1  mrg       /* fall through */
    406  1.1  mrg 
    407  1.1  mrg     case MODE_CC:
    408  1.1  mrg     default:
    409  1.1  mrg       gcc_unreachable ();
    410  1.1  mrg     }
    411  1.1  mrg }
    412  1.1  mrg 
    413  1.1  mrg /* Find a mode that can be used for efficient bitwise operations on MODE,
    414  1.1  mrg    if one exists.  */
    415  1.1  mrg 
    416  1.1  mrg opt_machine_mode
    417  1.1  mrg bitwise_mode_for_mode (machine_mode mode)
    418  1.1  mrg {
    419  1.1  mrg   /* Quick exit if we already have a suitable mode.  */
    420  1.1  mrg   scalar_int_mode int_mode;
    421  1.1  mrg   if (is_a <scalar_int_mode> (mode, &int_mode)
    422  1.1  mrg       && GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE)
    423  1.1  mrg     return int_mode;
    424  1.1  mrg 
    425  1.1  mrg   /* Reuse the sanity checks from int_mode_for_mode.  */
    426  1.1  mrg   gcc_checking_assert ((int_mode_for_mode (mode), true));
    427  1.1  mrg 
    428  1.1  mrg   poly_int64 bitsize = GET_MODE_BITSIZE (mode);
    429  1.1  mrg 
    430  1.1  mrg   /* Try to replace complex modes with complex modes.  In general we
    431  1.1  mrg      expect both components to be processed independently, so we only
    432  1.1  mrg      care whether there is a register for the inner mode.  */
    433  1.1  mrg   if (COMPLEX_MODE_P (mode))
    434  1.1  mrg     {
    435  1.1  mrg       machine_mode trial = mode;
    436  1.1  mrg       if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT
    437  1.1  mrg 	   || mode_for_size (bitsize, MODE_COMPLEX_INT, false).exists (&trial))
    438  1.1  mrg 	  && have_regs_of_mode[GET_MODE_INNER (trial)])
    439  1.1  mrg 	return trial;
    440  1.1  mrg     }
    441  1.1  mrg 
    442  1.1  mrg   /* Try to replace vector modes with vector modes.  Also try using vector
    443  1.1  mrg      modes if an integer mode would be too big.  */
    444  1.1  mrg   if (VECTOR_MODE_P (mode)
    445  1.1  mrg       || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE))
    446  1.1  mrg     {
    447  1.1  mrg       machine_mode trial = mode;
    448  1.1  mrg       if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
    449  1.1  mrg 	   || mode_for_size (bitsize, MODE_VECTOR_INT, 0).exists (&trial))
    450  1.1  mrg 	  && have_regs_of_mode[trial]
    451  1.1  mrg 	  && targetm.vector_mode_supported_p (trial))
    452  1.1  mrg 	return trial;
    453  1.1  mrg     }
    454  1.1  mrg 
    455  1.1  mrg   /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE.  */
    456  1.1  mrg   return mode_for_size (bitsize, MODE_INT, true);
    457  1.1  mrg }
    458  1.1  mrg 
    459  1.1  mrg /* Find a type that can be used for efficient bitwise operations on MODE.
    460  1.1  mrg    Return null if no such mode exists.  */
    461  1.1  mrg 
    462  1.1  mrg tree
    463  1.1  mrg bitwise_type_for_mode (machine_mode mode)
    464  1.1  mrg {
    465  1.1  mrg   if (!bitwise_mode_for_mode (mode).exists (&mode))
    466  1.1  mrg     return NULL_TREE;
    467  1.1  mrg 
    468  1.1  mrg   unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode);
    469  1.1  mrg   tree inner_type = build_nonstandard_integer_type (inner_size, true);
    470  1.1  mrg 
    471  1.1  mrg   if (VECTOR_MODE_P (mode))
    472  1.1  mrg     return build_vector_type_for_mode (inner_type, mode);
    473  1.1  mrg 
    474  1.1  mrg   if (COMPLEX_MODE_P (mode))
    475  1.1  mrg     return build_complex_type (inner_type);
    476  1.1  mrg 
    477  1.1  mrg   gcc_checking_assert (GET_MODE_INNER (mode) == mode);
    478  1.1  mrg   return inner_type;
    479  1.1  mrg }
    480  1.1  mrg 
    481  1.1  mrg /* Find a mode that is suitable for representing a vector with NUNITS
    482  1.1  mrg    elements of mode INNERMODE, if one exists.  The returned mode can be
    483  1.1  mrg    either an integer mode or a vector mode.  */
    484  1.1  mrg 
    485  1.1  mrg opt_machine_mode
    486  1.1  mrg mode_for_vector (scalar_mode innermode, poly_uint64 nunits)
    487  1.1  mrg {
    488  1.1  mrg   machine_mode mode;
    489  1.1  mrg 
    490  1.1  mrg   /* First, look for a supported vector type.  */
    491  1.1  mrg   if (SCALAR_FLOAT_MODE_P (innermode))
    492  1.1  mrg     mode = MIN_MODE_VECTOR_FLOAT;
    493  1.1  mrg   else if (SCALAR_FRACT_MODE_P (innermode))
    494  1.1  mrg     mode = MIN_MODE_VECTOR_FRACT;
    495  1.1  mrg   else if (SCALAR_UFRACT_MODE_P (innermode))
    496  1.1  mrg     mode = MIN_MODE_VECTOR_UFRACT;
    497  1.1  mrg   else if (SCALAR_ACCUM_MODE_P (innermode))
    498  1.1  mrg     mode = MIN_MODE_VECTOR_ACCUM;
    499  1.1  mrg   else if (SCALAR_UACCUM_MODE_P (innermode))
    500  1.1  mrg     mode = MIN_MODE_VECTOR_UACCUM;
    501  1.1  mrg   else
    502  1.1  mrg     mode = MIN_MODE_VECTOR_INT;
    503  1.1  mrg 
    504  1.1  mrg   /* Do not check vector_mode_supported_p here.  We'll do that
    505  1.1  mrg      later in vector_type_mode.  */
    506  1.1  mrg   FOR_EACH_MODE_FROM (mode, mode)
    507  1.1  mrg     if (known_eq (GET_MODE_NUNITS (mode), nunits)
    508  1.1  mrg 	&& GET_MODE_INNER (mode) == innermode)
    509  1.1  mrg       return mode;
    510  1.1  mrg 
    511  1.1  mrg   /* For integers, try mapping it to a same-sized scalar mode.  */
    512  1.1  mrg   if (GET_MODE_CLASS (innermode) == MODE_INT)
    513  1.1  mrg     {
    514  1.1  mrg       poly_uint64 nbits = nunits * GET_MODE_BITSIZE (innermode);
    515  1.1  mrg       if (int_mode_for_size (nbits, 0).exists (&mode)
    516  1.1  mrg 	  && have_regs_of_mode[mode])
    517  1.1  mrg 	return mode;
    518  1.1  mrg     }
    519  1.1  mrg 
    520  1.1  mrg   return opt_machine_mode ();
    521  1.1  mrg }
    522  1.1  mrg 
    523  1.1  mrg /* If a piece of code is using vector mode VECTOR_MODE and also wants
    524  1.1  mrg    to operate on elements of mode ELEMENT_MODE, return the vector mode
    525  1.1  mrg    it should use for those elements.  If NUNITS is nonzero, ensure that
    526  1.1  mrg    the mode has exactly NUNITS elements, otherwise pick whichever vector
    527  1.1  mrg    size pairs the most naturally with VECTOR_MODE; this may mean choosing
    528  1.1  mrg    a mode with a different size and/or number of elements, depending on
    529  1.1  mrg    what the target prefers.  Return an empty opt_machine_mode if there
    530  1.1  mrg    is no supported vector mode with the required properties.
    531  1.1  mrg 
    532  1.1  mrg    Unlike mode_for_vector. any returned mode is guaranteed to satisfy
    533  1.1  mrg    both VECTOR_MODE_P and targetm.vector_mode_supported_p.  */
    534  1.1  mrg 
    535  1.1  mrg opt_machine_mode
    536  1.1  mrg related_vector_mode (machine_mode vector_mode, scalar_mode element_mode,
    537  1.1  mrg 		     poly_uint64 nunits)
    538  1.1  mrg {
    539  1.1  mrg   gcc_assert (VECTOR_MODE_P (vector_mode));
    540  1.1  mrg   return targetm.vectorize.related_mode (vector_mode, element_mode, nunits);
    541  1.1  mrg }
    542  1.1  mrg 
    543  1.1  mrg /* If a piece of code is using vector mode VECTOR_MODE and also wants
    544  1.1  mrg    to operate on integer vectors with the same element size and number
    545  1.1  mrg    of elements, return the vector mode it should use.  Return an empty
    546  1.1  mrg    opt_machine_mode if there is no supported vector mode with the
    547  1.1  mrg    required properties.
    548  1.1  mrg 
    549  1.1  mrg    Unlike mode_for_vector. any returned mode is guaranteed to satisfy
    550  1.1  mrg    both VECTOR_MODE_P and targetm.vector_mode_supported_p.  */
    551  1.1  mrg 
    552  1.1  mrg opt_machine_mode
    553  1.1  mrg related_int_vector_mode (machine_mode vector_mode)
    554  1.1  mrg {
    555  1.1  mrg   gcc_assert (VECTOR_MODE_P (vector_mode));
    556  1.1  mrg   scalar_int_mode int_mode;
    557  1.1  mrg   if (int_mode_for_mode (GET_MODE_INNER (vector_mode)).exists (&int_mode))
    558  1.1  mrg     return related_vector_mode (vector_mode, int_mode,
    559  1.1  mrg 				GET_MODE_NUNITS (vector_mode));
    560  1.1  mrg   return opt_machine_mode ();
    561  1.1  mrg }
    562  1.1  mrg 
    563  1.1  mrg /* Return the alignment of MODE. This will be bounded by 1 and
    564  1.1  mrg    BIGGEST_ALIGNMENT.  */
    565  1.1  mrg 
    566  1.1  mrg unsigned int
    567  1.1  mrg get_mode_alignment (machine_mode mode)
    568  1.1  mrg {
    569  1.1  mrg   return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
    570  1.1  mrg }
    571  1.1  mrg 
    572  1.1  mrg /* Return the natural mode of an array, given that it is SIZE bytes in
    573  1.1  mrg    total and has elements of type ELEM_TYPE.  */
    574  1.1  mrg 
    575  1.1  mrg static machine_mode
    576  1.1  mrg mode_for_array (tree elem_type, tree size)
    577  1.1  mrg {
    578  1.1  mrg   tree elem_size;
    579  1.1  mrg   poly_uint64 int_size, int_elem_size;
    580  1.1  mrg   unsigned HOST_WIDE_INT num_elems;
    581  1.1  mrg   bool limit_p;
    582  1.1  mrg 
    583  1.1  mrg   /* One-element arrays get the component type's mode.  */
    584  1.1  mrg   elem_size = TYPE_SIZE (elem_type);
    585  1.1  mrg   if (simple_cst_equal (size, elem_size))
    586  1.1  mrg     return TYPE_MODE (elem_type);
    587  1.1  mrg 
    588  1.1  mrg   limit_p = true;
    589  1.1  mrg   if (poly_int_tree_p (size, &int_size)
    590  1.1  mrg       && poly_int_tree_p (elem_size, &int_elem_size)
    591  1.1  mrg       && maybe_ne (int_elem_size, 0U)
    592  1.1  mrg       && constant_multiple_p (int_size, int_elem_size, &num_elems))
    593  1.1  mrg     {
    594  1.1  mrg       machine_mode elem_mode = TYPE_MODE (elem_type);
    595  1.1  mrg       machine_mode mode;
    596  1.1  mrg       if (targetm.array_mode (elem_mode, num_elems).exists (&mode))
    597  1.1  mrg 	return mode;
    598  1.1  mrg       if (targetm.array_mode_supported_p (elem_mode, num_elems))
    599  1.1  mrg 	limit_p = false;
    600  1.1  mrg     }
    601  1.1  mrg   return mode_for_size_tree (size, MODE_INT, limit_p).else_blk ();
    602  1.1  mrg }
    603  1.1  mrg 
    604  1.1  mrg /* Subroutine of layout_decl: Force alignment required for the data type.
    606  1.1  mrg    But if the decl itself wants greater alignment, don't override that.  */
    607  1.1  mrg 
    608  1.1  mrg static inline void
    609  1.1  mrg do_type_align (tree type, tree decl)
    610  1.1  mrg {
    611  1.1  mrg   if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
    612  1.1  mrg     {
    613  1.1  mrg       SET_DECL_ALIGN (decl, TYPE_ALIGN (type));
    614  1.1  mrg       if (TREE_CODE (decl) == FIELD_DECL)
    615  1.1  mrg 	DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
    616  1.1  mrg     }
    617  1.1  mrg   if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl))
    618  1.1  mrg     SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type));
    619  1.1  mrg }
    620  1.1  mrg 
    621  1.1  mrg /* Set the size, mode and alignment of a ..._DECL node.
    622  1.1  mrg    TYPE_DECL does need this for C++.
    623  1.1  mrg    Note that LABEL_DECL and CONST_DECL nodes do not need this,
    624  1.1  mrg    and FUNCTION_DECL nodes have them set up in a special (and simple) way.
    625  1.1  mrg    Don't call layout_decl for them.
    626  1.1  mrg 
    627  1.1  mrg    KNOWN_ALIGN is the amount of alignment we can assume this
    628  1.1  mrg    decl has with no special effort.  It is relevant only for FIELD_DECLs
    629  1.1  mrg    and depends on the previous fields.
    630  1.1  mrg    All that matters about KNOWN_ALIGN is which powers of 2 divide it.
    631  1.1  mrg    If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
    632  1.1  mrg    the record will be aligned to suit.  */
    633  1.1  mrg 
    634  1.1  mrg void
    635  1.1  mrg layout_decl (tree decl, unsigned int known_align)
    636  1.1  mrg {
    637  1.1  mrg   tree type = TREE_TYPE (decl);
    638  1.1  mrg   enum tree_code code = TREE_CODE (decl);
    639  1.1  mrg   rtx rtl = NULL_RTX;
    640  1.1  mrg   location_t loc = DECL_SOURCE_LOCATION (decl);
    641  1.1  mrg 
    642  1.1  mrg   if (code == CONST_DECL)
    643  1.1  mrg     return;
    644  1.1  mrg 
    645  1.1  mrg   gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
    646  1.1  mrg 	      || code == TYPE_DECL || code == FIELD_DECL);
    647  1.1  mrg 
    648  1.1  mrg   rtl = DECL_RTL_IF_SET (decl);
    649  1.1  mrg 
    650  1.1  mrg   if (type == error_mark_node)
    651  1.1  mrg     type = void_type_node;
    652  1.1  mrg 
    653  1.1  mrg   /* Usually the size and mode come from the data type without change,
    654  1.1  mrg      however, the front-end may set the explicit width of the field, so its
    655  1.1  mrg      size may not be the same as the size of its type.  This happens with
    656  1.1  mrg      bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
    657  1.1  mrg      also happens with other fields.  For example, the C++ front-end creates
    658  1.1  mrg      zero-sized fields corresponding to empty base classes, and depends on
    659  1.1  mrg      layout_type setting DECL_FIELD_BITPOS correctly for the field.  Set the
    660  1.1  mrg      size in bytes from the size in bits.  If we have already set the mode,
    661  1.1  mrg      don't set it again since we can be called twice for FIELD_DECLs.  */
    662  1.1  mrg 
    663  1.1  mrg   DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
    664  1.1  mrg   if (DECL_MODE (decl) == VOIDmode)
    665  1.1  mrg     SET_DECL_MODE (decl, TYPE_MODE (type));
    666  1.1  mrg 
    667  1.1  mrg   if (DECL_SIZE (decl) == 0)
    668  1.1  mrg     {
    669  1.1  mrg       DECL_SIZE (decl) = TYPE_SIZE (type);
    670  1.1  mrg       DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
    671  1.1  mrg     }
    672  1.1  mrg   else if (DECL_SIZE_UNIT (decl) == 0)
    673  1.1  mrg     DECL_SIZE_UNIT (decl)
    674  1.1  mrg       = fold_convert_loc (loc, sizetype,
    675  1.1  mrg 			  size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
    676  1.1  mrg 					  bitsize_unit_node));
    677  1.1  mrg 
    678  1.1  mrg   if (code != FIELD_DECL)
    679  1.1  mrg     /* For non-fields, update the alignment from the type.  */
    680  1.1  mrg     do_type_align (type, decl);
    681  1.1  mrg   else
    682  1.1  mrg     /* For fields, it's a bit more complicated...  */
    683  1.1  mrg     {
    684  1.1  mrg       bool old_user_align = DECL_USER_ALIGN (decl);
    685  1.1  mrg       bool zero_bitfield = false;
    686  1.1  mrg       bool packed_p = DECL_PACKED (decl);
    687  1.1  mrg       unsigned int mfa;
    688  1.1  mrg 
    689  1.1  mrg       if (DECL_BIT_FIELD (decl))
    690  1.1  mrg 	{
    691  1.1  mrg 	  DECL_BIT_FIELD_TYPE (decl) = type;
    692  1.1  mrg 
    693  1.1  mrg 	  /* A zero-length bit-field affects the alignment of the next
    694  1.1  mrg 	     field.  In essence such bit-fields are not influenced by
    695  1.1  mrg 	     any packing due to #pragma pack or attribute packed.  */
    696  1.1  mrg 	  if (integer_zerop (DECL_SIZE (decl))
    697  1.1  mrg 	      && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
    698  1.1  mrg 	    {
    699  1.1  mrg 	      zero_bitfield = true;
    700  1.1  mrg 	      packed_p = false;
    701  1.1  mrg 	      if (PCC_BITFIELD_TYPE_MATTERS)
    702  1.1  mrg 		do_type_align (type, decl);
    703  1.1  mrg 	      else
    704  1.1  mrg 		{
    705  1.1  mrg #ifdef EMPTY_FIELD_BOUNDARY
    706  1.1  mrg 		  if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
    707  1.1  mrg 		    {
    708  1.1  mrg 		      SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY);
    709  1.1  mrg 		      DECL_USER_ALIGN (decl) = 0;
    710  1.1  mrg 		    }
    711  1.1  mrg #endif
    712  1.1  mrg 		}
    713  1.1  mrg 	    }
    714  1.1  mrg 
    715  1.1  mrg 	  /* See if we can use an ordinary integer mode for a bit-field.
    716  1.1  mrg 	     Conditions are: a fixed size that is correct for another mode,
    717  1.1  mrg 	     occupying a complete byte or bytes on proper boundary.  */
    718  1.1  mrg 	  if (TYPE_SIZE (type) != 0
    719  1.1  mrg 	      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
    720  1.1  mrg 	      && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
    721  1.1  mrg 	    {
    722  1.1  mrg 	      machine_mode xmode;
    723  1.1  mrg 	      if (mode_for_size_tree (DECL_SIZE (decl),
    724  1.1  mrg 				      MODE_INT, 1).exists (&xmode))
    725  1.1  mrg 		{
    726  1.1  mrg 		  unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
    727  1.1  mrg 		  if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
    728  1.1  mrg 		      && (known_align == 0 || known_align >= xalign))
    729  1.1  mrg 		    {
    730  1.1  mrg 		      SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl)));
    731  1.1  mrg 		      SET_DECL_MODE (decl, xmode);
    732  1.1  mrg 		      DECL_BIT_FIELD (decl) = 0;
    733  1.1  mrg 		    }
    734  1.1  mrg 		}
    735  1.1  mrg 	    }
    736  1.1  mrg 
    737  1.1  mrg 	  /* Turn off DECL_BIT_FIELD if we won't need it set.  */
    738  1.1  mrg 	  if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
    739  1.1  mrg 	      && known_align >= TYPE_ALIGN (type)
    740  1.1  mrg 	      && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
    741  1.1  mrg 	    DECL_BIT_FIELD (decl) = 0;
    742  1.1  mrg 	}
    743  1.1  mrg       else if (packed_p && DECL_USER_ALIGN (decl))
    744  1.1  mrg 	/* Don't touch DECL_ALIGN.  For other packed fields, go ahead and
    745  1.1  mrg 	   round up; we'll reduce it again below.  We want packing to
    746  1.1  mrg 	   supersede USER_ALIGN inherited from the type, but defer to
    747  1.1  mrg 	   alignment explicitly specified on the field decl.  */;
    748  1.1  mrg       else
    749  1.1  mrg 	do_type_align (type, decl);
    750  1.1  mrg 
    751  1.1  mrg       /* If the field is packed and not explicitly aligned, give it the
    752  1.1  mrg 	 minimum alignment.  Note that do_type_align may set
    753  1.1  mrg 	 DECL_USER_ALIGN, so we need to check old_user_align instead.  */
    754  1.1  mrg       if (packed_p
    755  1.1  mrg 	  && !old_user_align)
    756  1.1  mrg 	SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT));
    757  1.1  mrg 
    758  1.1  mrg       if (! packed_p && ! DECL_USER_ALIGN (decl))
    759  1.1  mrg 	{
    760  1.1  mrg 	  /* Some targets (i.e. i386, VMS) limit struct field alignment
    761  1.1  mrg 	     to a lower boundary than alignment of variables unless
    762  1.1  mrg 	     it was overridden by attribute aligned.  */
    763  1.1  mrg #ifdef BIGGEST_FIELD_ALIGNMENT
    764  1.1  mrg 	  SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl),
    765  1.1  mrg 				     (unsigned) BIGGEST_FIELD_ALIGNMENT));
    766  1.1  mrg #endif
    767  1.1  mrg #ifdef ADJUST_FIELD_ALIGN
    768  1.1  mrg 	  SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl),
    769  1.1  mrg 						    DECL_ALIGN (decl)));
    770  1.1  mrg #endif
    771  1.1  mrg 	}
    772  1.1  mrg 
    773  1.1  mrg       if (zero_bitfield)
    774  1.1  mrg         mfa = initial_max_fld_align * BITS_PER_UNIT;
    775  1.1  mrg       else
    776  1.1  mrg 	mfa = maximum_field_alignment;
    777  1.1  mrg       /* Should this be controlled by DECL_USER_ALIGN, too?  */
    778  1.1  mrg       if (mfa != 0)
    779  1.1  mrg 	SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa));
    780  1.1  mrg     }
    781  1.1  mrg 
    782  1.1  mrg   /* Evaluate nonconstant size only once, either now or as soon as safe.  */
    783  1.1  mrg   if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
    784  1.1  mrg     DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
    785  1.1  mrg   if (DECL_SIZE_UNIT (decl) != 0
    786  1.1  mrg       && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
    787  1.1  mrg     DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
    788  1.1  mrg 
    789  1.1  mrg   /* If requested, warn about definitions of large data objects.  */
    790  1.1  mrg   if ((code == PARM_DECL || (code == VAR_DECL && !DECL_NONLOCAL_FRAME (decl)))
    791  1.1  mrg       && !DECL_EXTERNAL (decl))
    792  1.1  mrg     {
    793  1.1  mrg       tree size = DECL_SIZE_UNIT (decl);
    794  1.1  mrg 
    795  1.1  mrg       if (size != 0 && TREE_CODE (size) == INTEGER_CST)
    796  1.1  mrg 	{
    797  1.1  mrg 	  /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated
    798  1.1  mrg 	     as if PTRDIFF_MAX had been specified, with the value
    799  1.1  mrg 	     being that on the target rather than the host.  */
    800  1.1  mrg 	  unsigned HOST_WIDE_INT max_size = warn_larger_than_size;
    801  1.1  mrg 	  if (max_size == HOST_WIDE_INT_MAX)
    802  1.1  mrg 	    max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
    803  1.1  mrg 
    804  1.1  mrg 	  if (compare_tree_int (size, max_size) > 0)
    805  1.1  mrg 	    warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds "
    806  1.1  mrg 		     "maximum object size %wu",
    807  1.1  mrg 		     decl, size, max_size);
    808  1.1  mrg 	}
    809  1.1  mrg     }
    810  1.1  mrg 
    811  1.1  mrg   /* If the RTL was already set, update its mode and mem attributes.  */
    812  1.1  mrg   if (rtl)
    813  1.1  mrg     {
    814  1.1  mrg       PUT_MODE (rtl, DECL_MODE (decl));
    815  1.1  mrg       SET_DECL_RTL (decl, 0);
    816  1.1  mrg       if (MEM_P (rtl))
    817  1.1  mrg 	set_mem_attributes (rtl, decl, 1);
    818  1.1  mrg       SET_DECL_RTL (decl, rtl);
    819  1.1  mrg     }
    820  1.1  mrg }
    821  1.1  mrg 
    822  1.1  mrg /* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the
    823  1.1  mrg    results of a previous call to layout_decl and calls it again.  */
    824  1.1  mrg 
    825  1.1  mrg void
    826  1.1  mrg relayout_decl (tree decl)
    827  1.1  mrg {
    828  1.1  mrg   DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
    829  1.1  mrg   SET_DECL_MODE (decl, VOIDmode);
    830  1.1  mrg   if (!DECL_USER_ALIGN (decl))
    831  1.1  mrg     SET_DECL_ALIGN (decl, 0);
    832  1.1  mrg   if (DECL_RTL_SET_P (decl))
    833  1.1  mrg     SET_DECL_RTL (decl, 0);
    834  1.1  mrg 
    835  1.1  mrg   layout_decl (decl, 0);
    836  1.1  mrg }
    837  1.1  mrg 
    838  1.1  mrg /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
    840  1.1  mrg    QUAL_UNION_TYPE.  Return a pointer to a struct record_layout_info which
    841  1.1  mrg    is to be passed to all other layout functions for this record.  It is the
    842  1.1  mrg    responsibility of the caller to call `free' for the storage returned.
    843  1.1  mrg    Note that garbage collection is not permitted until we finish laying
    844  1.1  mrg    out the record.  */
    845  1.1  mrg 
    846  1.1  mrg record_layout_info
    847  1.1  mrg start_record_layout (tree t)
    848  1.1  mrg {
    849  1.1  mrg   record_layout_info rli = XNEW (struct record_layout_info_s);
    850  1.1  mrg 
    851  1.1  mrg   rli->t = t;
    852  1.1  mrg 
    853  1.1  mrg   /* If the type has a minimum specified alignment (via an attribute
    854  1.1  mrg      declaration, for example) use it -- otherwise, start with a
    855  1.1  mrg      one-byte alignment.  */
    856  1.1  mrg   rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
    857  1.1  mrg   rli->unpacked_align = rli->record_align;
    858  1.1  mrg   rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
    859  1.1  mrg 
    860  1.1  mrg #ifdef STRUCTURE_SIZE_BOUNDARY
    861  1.1  mrg   /* Packed structures don't need to have minimum size.  */
    862  1.1  mrg   if (! TYPE_PACKED (t))
    863  1.1  mrg     {
    864  1.1  mrg       unsigned tmp;
    865  1.1  mrg 
    866  1.1  mrg       /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY.  */
    867  1.1  mrg       tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
    868  1.1  mrg       if (maximum_field_alignment != 0)
    869  1.1  mrg 	tmp = MIN (tmp, maximum_field_alignment);
    870  1.1  mrg       rli->record_align = MAX (rli->record_align, tmp);
    871  1.1  mrg     }
    872  1.1  mrg #endif
    873  1.1  mrg 
    874  1.1  mrg   rli->offset = size_zero_node;
    875  1.1  mrg   rli->bitpos = bitsize_zero_node;
    876  1.1  mrg   rli->prev_field = 0;
    877  1.1  mrg   rli->pending_statics = 0;
    878  1.1  mrg   rli->packed_maybe_necessary = 0;
    879  1.1  mrg   rli->remaining_in_alignment = 0;
    880  1.1  mrg 
    881  1.1  mrg   return rli;
    882  1.1  mrg }
    883  1.1  mrg 
    884  1.1  mrg /* Fold sizetype value X to bitsizetype, given that X represents a type
    885  1.1  mrg    size or offset.  */
    886  1.1  mrg 
    887  1.1  mrg static tree
    888  1.1  mrg bits_from_bytes (tree x)
    889  1.1  mrg {
    890  1.1  mrg   if (POLY_INT_CST_P (x))
    891  1.1  mrg     /* The runtime calculation isn't allowed to overflow sizetype;
    892  1.1  mrg        increasing the runtime values must always increase the size
    893  1.1  mrg        or offset of the object.  This means that the object imposes
    894  1.1  mrg        a maximum value on the runtime parameters, but we don't record
    895  1.1  mrg        what that is.  */
    896  1.1  mrg     return build_poly_int_cst
    897  1.1  mrg       (bitsizetype,
    898  1.1  mrg        poly_wide_int::from (poly_int_cst_value (x),
    899  1.1  mrg 			    TYPE_PRECISION (bitsizetype),
    900  1.1  mrg 			    TYPE_SIGN (TREE_TYPE (x))));
    901  1.1  mrg   x = fold_convert (bitsizetype, x);
    902  1.1  mrg   gcc_checking_assert (x);
    903  1.1  mrg   return x;
    904  1.1  mrg }
    905  1.1  mrg 
    906  1.1  mrg /* Return the combined bit position for the byte offset OFFSET and the
    907  1.1  mrg    bit position BITPOS.
    908  1.1  mrg 
    909  1.1  mrg    These functions operate on byte and bit positions present in FIELD_DECLs
    910  1.1  mrg    and assume that these expressions result in no (intermediate) overflow.
    911  1.1  mrg    This assumption is necessary to fold the expressions as much as possible,
    912  1.1  mrg    so as to avoid creating artificially variable-sized types in languages
    913  1.1  mrg    supporting variable-sized types like Ada.  */
    914  1.1  mrg 
    915  1.1  mrg tree
    916  1.1  mrg bit_from_pos (tree offset, tree bitpos)
    917  1.1  mrg {
    918  1.1  mrg   return size_binop (PLUS_EXPR, bitpos,
    919  1.1  mrg 		     size_binop (MULT_EXPR, bits_from_bytes (offset),
    920  1.1  mrg 				 bitsize_unit_node));
    921  1.1  mrg }
    922  1.1  mrg 
    923  1.1  mrg /* Return the combined truncated byte position for the byte offset OFFSET and
    924  1.1  mrg    the bit position BITPOS.  */
    925  1.1  mrg 
    926  1.1  mrg tree
    927  1.1  mrg byte_from_pos (tree offset, tree bitpos)
    928  1.1  mrg {
    929  1.1  mrg   tree bytepos;
    930  1.1  mrg   if (TREE_CODE (bitpos) == MULT_EXPR
    931  1.1  mrg       && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
    932  1.1  mrg     bytepos = TREE_OPERAND (bitpos, 0);
    933  1.1  mrg   else
    934  1.1  mrg     bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
    935  1.1  mrg   return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
    936  1.1  mrg }
    937  1.1  mrg 
    938  1.1  mrg /* Split the bit position POS into a byte offset *POFFSET and a bit
    939  1.1  mrg    position *PBITPOS with the byte offset aligned to OFF_ALIGN bits.  */
    940  1.1  mrg 
    941  1.1  mrg void
    942  1.1  mrg pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
    943  1.1  mrg 	      tree pos)
    944  1.1  mrg {
    945  1.1  mrg   tree toff_align = bitsize_int (off_align);
    946  1.1  mrg   if (TREE_CODE (pos) == MULT_EXPR
    947  1.1  mrg       && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
    948  1.1  mrg     {
    949  1.1  mrg       *poffset = size_binop (MULT_EXPR,
    950  1.1  mrg 			     fold_convert (sizetype, TREE_OPERAND (pos, 0)),
    951  1.1  mrg 			     size_int (off_align / BITS_PER_UNIT));
    952  1.1  mrg       *pbitpos = bitsize_zero_node;
    953  1.1  mrg     }
    954  1.1  mrg   else
    955  1.1  mrg     {
    956  1.1  mrg       *poffset = size_binop (MULT_EXPR,
    957  1.1  mrg 			     fold_convert (sizetype,
    958  1.1  mrg 					   size_binop (FLOOR_DIV_EXPR, pos,
    959  1.1  mrg 						       toff_align)),
    960  1.1  mrg 			     size_int (off_align / BITS_PER_UNIT));
    961  1.1  mrg       *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
    962  1.1  mrg     }
    963  1.1  mrg }
    964  1.1  mrg 
    965  1.1  mrg /* Given a pointer to bit and byte offsets and an offset alignment,
    966  1.1  mrg    normalize the offsets so they are within the alignment.  */
    967  1.1  mrg 
    968  1.1  mrg void
    969  1.1  mrg normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
    970  1.1  mrg {
    971  1.1  mrg   /* If the bit position is now larger than it should be, adjust it
    972  1.1  mrg      downwards.  */
    973  1.1  mrg   if (compare_tree_int (*pbitpos, off_align) >= 0)
    974  1.1  mrg     {
    975  1.1  mrg       tree offset, bitpos;
    976  1.1  mrg       pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
    977  1.1  mrg       *poffset = size_binop (PLUS_EXPR, *poffset, offset);
    978  1.1  mrg       *pbitpos = bitpos;
    979  1.1  mrg     }
    980  1.1  mrg }
    981  1.1  mrg 
    982  1.1  mrg /* Print debugging information about the information in RLI.  */
    983  1.1  mrg 
    984  1.1  mrg DEBUG_FUNCTION void
    985  1.1  mrg debug_rli (record_layout_info rli)
    986  1.1  mrg {
    987  1.1  mrg   print_node_brief (stderr, "type", rli->t, 0);
    988  1.1  mrg   print_node_brief (stderr, "\noffset", rli->offset, 0);
    989  1.1  mrg   print_node_brief (stderr, " bitpos", rli->bitpos, 0);
    990  1.1  mrg 
    991  1.1  mrg   fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
    992  1.1  mrg 	   rli->record_align, rli->unpacked_align,
    993  1.1  mrg 	   rli->offset_align);
    994  1.1  mrg 
    995  1.1  mrg   /* The ms_struct code is the only that uses this.  */
    996  1.1  mrg   if (targetm.ms_bitfield_layout_p (rli->t))
    997  1.1  mrg     fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
    998  1.1  mrg 
    999  1.1  mrg   if (rli->packed_maybe_necessary)
   1000  1.1  mrg     fprintf (stderr, "packed may be necessary\n");
   1001  1.1  mrg 
   1002  1.1  mrg   if (!vec_safe_is_empty (rli->pending_statics))
   1003  1.1  mrg     {
   1004  1.1  mrg       fprintf (stderr, "pending statics:\n");
   1005  1.1  mrg       debug (rli->pending_statics);
   1006  1.1  mrg     }
   1007  1.1  mrg }
   1008  1.1  mrg 
   1009  1.1  mrg /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
   1010  1.1  mrg    BITPOS if necessary to keep BITPOS below OFFSET_ALIGN.  */
   1011  1.1  mrg 
   1012  1.1  mrg void
   1013  1.1  mrg normalize_rli (record_layout_info rli)
   1014  1.1  mrg {
   1015  1.1  mrg   normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
   1016  1.1  mrg }
   1017  1.1  mrg 
   1018  1.1  mrg /* Returns the size in bytes allocated so far.  */
   1019  1.1  mrg 
   1020  1.1  mrg tree
   1021  1.1  mrg rli_size_unit_so_far (record_layout_info rli)
   1022  1.1  mrg {
   1023  1.1  mrg   return byte_from_pos (rli->offset, rli->bitpos);
   1024  1.1  mrg }
   1025  1.1  mrg 
   1026  1.1  mrg /* Returns the size in bits allocated so far.  */
   1027  1.1  mrg 
   1028  1.1  mrg tree
   1029  1.1  mrg rli_size_so_far (record_layout_info rli)
   1030  1.1  mrg {
   1031  1.1  mrg   return bit_from_pos (rli->offset, rli->bitpos);
   1032  1.1  mrg }
   1033  1.1  mrg 
   1034  1.1  mrg /* FIELD is about to be added to RLI->T.  The alignment (in bits) of
   1035  1.1  mrg    the next available location within the record is given by KNOWN_ALIGN.
   1036  1.1  mrg    Update the variable alignment fields in RLI, and return the alignment
   1037  1.1  mrg    to give the FIELD.  */
   1038  1.1  mrg 
   1039  1.1  mrg unsigned int
   1040  1.1  mrg update_alignment_for_field (record_layout_info rli, tree field,
   1041  1.1  mrg 			    unsigned int known_align)
   1042  1.1  mrg {
   1043  1.1  mrg   /* The alignment required for FIELD.  */
   1044  1.1  mrg   unsigned int desired_align;
   1045  1.1  mrg   /* The type of this field.  */
   1046  1.1  mrg   tree type = TREE_TYPE (field);
   1047  1.1  mrg   /* True if the field was explicitly aligned by the user.  */
   1048  1.1  mrg   bool user_align;
   1049  1.1  mrg   bool is_bitfield;
   1050  1.1  mrg 
   1051  1.1  mrg   /* Do not attempt to align an ERROR_MARK node */
   1052  1.1  mrg   if (TREE_CODE (type) == ERROR_MARK)
   1053  1.1  mrg     return 0;
   1054  1.1  mrg 
   1055  1.1  mrg   /* Lay out the field so we know what alignment it needs.  */
   1056  1.1  mrg   layout_decl (field, known_align);
   1057  1.1  mrg   desired_align = DECL_ALIGN (field);
   1058  1.1  mrg   user_align = DECL_USER_ALIGN (field);
   1059  1.1  mrg 
   1060  1.1  mrg   is_bitfield = (type != error_mark_node
   1061  1.1  mrg 		 && DECL_BIT_FIELD_TYPE (field)
   1062  1.1  mrg 		 && ! integer_zerop (TYPE_SIZE (type)));
   1063  1.1  mrg 
   1064  1.1  mrg   /* Record must have at least as much alignment as any field.
   1065  1.1  mrg      Otherwise, the alignment of the field within the record is
   1066  1.1  mrg      meaningless.  */
   1067  1.1  mrg   if (targetm.ms_bitfield_layout_p (rli->t))
   1068  1.1  mrg     {
   1069  1.1  mrg       /* Here, the alignment of the underlying type of a bitfield can
   1070  1.1  mrg 	 affect the alignment of a record; even a zero-sized field
   1071  1.1  mrg 	 can do this.  The alignment should be to the alignment of
   1072  1.1  mrg 	 the type, except that for zero-size bitfields this only
   1073  1.1  mrg 	 applies if there was an immediately prior, nonzero-size
   1074  1.1  mrg 	 bitfield.  (That's the way it is, experimentally.) */
   1075  1.1  mrg       if (!is_bitfield
   1076  1.1  mrg 	  || ((DECL_SIZE (field) == NULL_TREE
   1077  1.1  mrg 	       || !integer_zerop (DECL_SIZE (field)))
   1078  1.1  mrg 	      ? !DECL_PACKED (field)
   1079  1.1  mrg 	      : (rli->prev_field
   1080  1.1  mrg 		 && DECL_BIT_FIELD_TYPE (rli->prev_field)
   1081  1.1  mrg 		 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
   1082  1.1  mrg 	{
   1083  1.1  mrg 	  unsigned int type_align = TYPE_ALIGN (type);
   1084  1.1  mrg 	  if (!is_bitfield && DECL_PACKED (field))
   1085  1.1  mrg 	    type_align = desired_align;
   1086  1.1  mrg 	  else
   1087  1.1  mrg 	    type_align = MAX (type_align, desired_align);
   1088  1.1  mrg 	  if (maximum_field_alignment != 0)
   1089  1.1  mrg 	    type_align = MIN (type_align, maximum_field_alignment);
   1090  1.1  mrg 	  rli->record_align = MAX (rli->record_align, type_align);
   1091  1.1  mrg 	  rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
   1092  1.1  mrg 	}
   1093  1.1  mrg     }
   1094  1.1  mrg   else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
   1095  1.1  mrg     {
   1096  1.1  mrg       /* Named bit-fields cause the entire structure to have the
   1097  1.1  mrg 	 alignment implied by their type.  Some targets also apply the same
   1098  1.1  mrg 	 rules to unnamed bitfields.  */
   1099  1.1  mrg       if (DECL_NAME (field) != 0
   1100  1.1  mrg 	  || targetm.align_anon_bitfield ())
   1101  1.1  mrg 	{
   1102  1.1  mrg 	  unsigned int type_align = TYPE_ALIGN (type);
   1103  1.1  mrg 
   1104  1.1  mrg #ifdef ADJUST_FIELD_ALIGN
   1105  1.1  mrg 	  if (! TYPE_USER_ALIGN (type))
   1106  1.1  mrg 	    type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
   1107  1.1  mrg #endif
   1108  1.1  mrg 
   1109  1.1  mrg 	  /* Targets might chose to handle unnamed and hence possibly
   1110  1.1  mrg 	     zero-width bitfield.  Those are not influenced by #pragmas
   1111  1.1  mrg 	     or packed attributes.  */
   1112  1.1  mrg 	  if (integer_zerop (DECL_SIZE (field)))
   1113  1.1  mrg 	    {
   1114  1.1  mrg 	      if (initial_max_fld_align)
   1115  1.1  mrg 	        type_align = MIN (type_align,
   1116  1.1  mrg 				  initial_max_fld_align * BITS_PER_UNIT);
   1117  1.1  mrg 	    }
   1118  1.1  mrg 	  else if (maximum_field_alignment != 0)
   1119  1.1  mrg 	    type_align = MIN (type_align, maximum_field_alignment);
   1120  1.1  mrg 	  else if (DECL_PACKED (field))
   1121  1.1  mrg 	    type_align = MIN (type_align, BITS_PER_UNIT);
   1122  1.1  mrg 
   1123  1.1  mrg 	  /* The alignment of the record is increased to the maximum
   1124  1.1  mrg 	     of the current alignment, the alignment indicated on the
   1125  1.1  mrg 	     field (i.e., the alignment specified by an __aligned__
   1126  1.1  mrg 	     attribute), and the alignment indicated by the type of
   1127  1.1  mrg 	     the field.  */
   1128  1.1  mrg 	  rli->record_align = MAX (rli->record_align, desired_align);
   1129  1.1  mrg 	  rli->record_align = MAX (rli->record_align, type_align);
   1130  1.1  mrg 
   1131  1.1  mrg 	  if (warn_packed)
   1132  1.1  mrg 	    rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
   1133  1.1  mrg 	  user_align |= TYPE_USER_ALIGN (type);
   1134  1.1  mrg 	}
   1135  1.1  mrg     }
   1136  1.1  mrg   else
   1137  1.1  mrg     {
   1138  1.1  mrg       rli->record_align = MAX (rli->record_align, desired_align);
   1139  1.1  mrg       rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
   1140  1.1  mrg     }
   1141  1.1  mrg 
   1142  1.1  mrg   TYPE_USER_ALIGN (rli->t) |= user_align;
   1143  1.1  mrg 
   1144  1.1  mrg   return desired_align;
   1145  1.1  mrg }
   1146  1.1  mrg 
   1147  1.1  mrg /* Issue a warning if the record alignment, RECORD_ALIGN, is less than
   1148  1.1  mrg    the field alignment of FIELD or FIELD isn't aligned. */
   1149  1.1  mrg 
   1150  1.1  mrg static void
   1151  1.1  mrg handle_warn_if_not_align (tree field, unsigned int record_align)
   1152  1.1  mrg {
   1153  1.1  mrg   tree type = TREE_TYPE (field);
   1154  1.1  mrg 
   1155  1.1  mrg   if (type == error_mark_node)
   1156  1.1  mrg     return;
   1157  1.1  mrg 
   1158  1.1  mrg   unsigned int warn_if_not_align = 0;
   1159  1.1  mrg 
   1160  1.1  mrg   int opt_w = 0;
   1161  1.1  mrg 
   1162  1.1  mrg   if (warn_if_not_aligned)
   1163  1.1  mrg     {
   1164  1.1  mrg       warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field);
   1165  1.1  mrg       if (!warn_if_not_align)
   1166  1.1  mrg 	warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type);
   1167  1.1  mrg       if (warn_if_not_align)
   1168  1.1  mrg 	opt_w = OPT_Wif_not_aligned;
   1169  1.1  mrg     }
   1170  1.1  mrg 
   1171  1.1  mrg   if (!warn_if_not_align
   1172  1.1  mrg       && warn_packed_not_aligned
   1173  1.1  mrg       && lookup_attribute ("aligned", TYPE_ATTRIBUTES (type)))
   1174  1.1  mrg     {
   1175  1.1  mrg       warn_if_not_align = TYPE_ALIGN (type);
   1176  1.1  mrg       opt_w = OPT_Wpacked_not_aligned;
   1177  1.1  mrg     }
   1178  1.1  mrg 
   1179  1.1  mrg   if (!warn_if_not_align)
   1180  1.1  mrg     return;
   1181  1.1  mrg 
   1182  1.1  mrg   tree context = DECL_CONTEXT (field);
   1183  1.1  mrg 
   1184  1.1  mrg   warn_if_not_align /= BITS_PER_UNIT;
   1185  1.1  mrg   record_align /= BITS_PER_UNIT;
   1186  1.1  mrg   if ((record_align % warn_if_not_align) != 0)
   1187  1.1  mrg     warning (opt_w, "alignment %u of %qT is less than %u",
   1188  1.1  mrg 	     record_align, context, warn_if_not_align);
   1189  1.1  mrg 
   1190  1.1  mrg   tree off = byte_position (field);
   1191  1.1  mrg   if (!multiple_of_p (TREE_TYPE (off), off, size_int (warn_if_not_align)))
   1192  1.1  mrg     {
   1193  1.1  mrg       if (TREE_CODE (off) == INTEGER_CST)
   1194  1.1  mrg 	warning (opt_w, "%q+D offset %E in %qT isn%'t aligned to %u",
   1195  1.1  mrg 		 field, off, context, warn_if_not_align);
   1196  1.1  mrg       else
   1197  1.1  mrg 	warning (opt_w, "%q+D offset %E in %qT may not be aligned to %u",
   1198  1.1  mrg 		 field, off, context, warn_if_not_align);
   1199  1.1  mrg     }
   1200  1.1  mrg }
   1201  1.1  mrg 
   1202  1.1  mrg /* Called from place_field to handle unions.  */
   1203  1.1  mrg 
   1204  1.1  mrg static void
   1205  1.1  mrg place_union_field (record_layout_info rli, tree field)
   1206  1.1  mrg {
   1207  1.1  mrg   update_alignment_for_field (rli, field, /*known_align=*/0);
   1208  1.1  mrg 
   1209  1.1  mrg   DECL_FIELD_OFFSET (field) = size_zero_node;
   1210  1.1  mrg   DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
   1211  1.1  mrg   SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
   1212  1.1  mrg   handle_warn_if_not_align (field, rli->record_align);
   1213  1.1  mrg 
   1214  1.1  mrg   /* If this is an ERROR_MARK return *after* having set the
   1215  1.1  mrg      field at the start of the union. This helps when parsing
   1216  1.1  mrg      invalid fields. */
   1217  1.1  mrg   if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
   1218  1.1  mrg     return;
   1219  1.1  mrg 
   1220  1.1  mrg   if (AGGREGATE_TYPE_P (TREE_TYPE (field))
   1221  1.1  mrg       && TYPE_TYPELESS_STORAGE (TREE_TYPE (field)))
   1222  1.1  mrg     TYPE_TYPELESS_STORAGE (rli->t) = 1;
   1223  1.1  mrg 
   1224  1.1  mrg   /* We assume the union's size will be a multiple of a byte so we don't
   1225  1.1  mrg      bother with BITPOS.  */
   1226  1.1  mrg   if (TREE_CODE (rli->t) == UNION_TYPE)
   1227  1.1  mrg     rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
   1228  1.1  mrg   else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
   1229  1.1  mrg     rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
   1230  1.1  mrg 			       DECL_SIZE_UNIT (field), rli->offset);
   1231  1.1  mrg }
   1232  1.1  mrg 
   1233  1.1  mrg /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
   1234  1.1  mrg    at BYTE_OFFSET / BIT_OFFSET.  Return nonzero if the field would span more
   1235  1.1  mrg    units of alignment than the underlying TYPE.  */
   1236  1.1  mrg static int
   1237  1.1  mrg excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
   1238  1.1  mrg 		  HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
   1239  1.1  mrg {
   1240  1.1  mrg   /* Note that the calculation of OFFSET might overflow; we calculate it so
   1241  1.1  mrg      that we still get the right result as long as ALIGN is a power of two.  */
   1242  1.1  mrg   unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
   1243  1.1  mrg 
   1244  1.1  mrg   offset = offset % align;
   1245  1.1  mrg   return ((offset + size + align - 1) / align
   1246  1.1  mrg 	  > tree_to_uhwi (TYPE_SIZE (type)) / align);
   1247  1.1  mrg }
   1248  1.1  mrg 
   1249  1.1  mrg /* RLI contains information about the layout of a RECORD_TYPE.  FIELD
   1250  1.1  mrg    is a FIELD_DECL to be added after those fields already present in
   1251  1.1  mrg    T.  (FIELD is not actually added to the TYPE_FIELDS list here;
   1252  1.1  mrg    callers that desire that behavior must manually perform that step.)  */
   1253  1.1  mrg 
   1254  1.1  mrg void
   1255  1.1  mrg place_field (record_layout_info rli, tree field)
   1256  1.1  mrg {
   1257  1.1  mrg   /* The alignment required for FIELD.  */
   1258  1.1  mrg   unsigned int desired_align;
   1259  1.1  mrg   /* The alignment FIELD would have if we just dropped it into the
   1260  1.1  mrg      record as it presently stands.  */
   1261  1.1  mrg   unsigned int known_align;
   1262  1.1  mrg   unsigned int actual_align;
   1263  1.1  mrg   /* The type of this field.  */
   1264  1.1  mrg   tree type = TREE_TYPE (field);
   1265  1.1  mrg 
   1266  1.1  mrg   gcc_assert (TREE_CODE (field) != ERROR_MARK);
   1267  1.1  mrg 
   1268  1.1  mrg   /* If FIELD is static, then treat it like a separate variable, not
   1269  1.1  mrg      really like a structure field.  If it is a FUNCTION_DECL, it's a
   1270  1.1  mrg      method.  In both cases, all we do is lay out the decl, and we do
   1271  1.1  mrg      it *after* the record is laid out.  */
   1272  1.1  mrg   if (VAR_P (field))
   1273  1.1  mrg     {
   1274  1.1  mrg       vec_safe_push (rli->pending_statics, field);
   1275  1.1  mrg       return;
   1276  1.1  mrg     }
   1277  1.1  mrg 
   1278  1.1  mrg   /* Enumerators and enum types which are local to this class need not
   1279  1.1  mrg      be laid out.  Likewise for initialized constant fields.  */
   1280  1.1  mrg   else if (TREE_CODE (field) != FIELD_DECL)
   1281  1.1  mrg     return;
   1282  1.1  mrg 
   1283  1.1  mrg   /* Unions are laid out very differently than records, so split
   1284  1.1  mrg      that code off to another function.  */
   1285  1.1  mrg   else if (TREE_CODE (rli->t) != RECORD_TYPE)
   1286  1.1  mrg     {
   1287  1.1  mrg       place_union_field (rli, field);
   1288  1.1  mrg       return;
   1289  1.1  mrg     }
   1290  1.1  mrg 
   1291  1.1  mrg   else if (TREE_CODE (type) == ERROR_MARK)
   1292  1.1  mrg     {
   1293  1.1  mrg       /* Place this field at the current allocation position, so we
   1294  1.1  mrg 	 maintain monotonicity.  */
   1295  1.1  mrg       DECL_FIELD_OFFSET (field) = rli->offset;
   1296  1.1  mrg       DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
   1297  1.1  mrg       SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
   1298  1.1  mrg       handle_warn_if_not_align (field, rli->record_align);
   1299  1.1  mrg       return;
   1300  1.1  mrg     }
   1301  1.1  mrg 
   1302  1.1  mrg   if (AGGREGATE_TYPE_P (type)
   1303  1.1  mrg       && TYPE_TYPELESS_STORAGE (type))
   1304  1.1  mrg     TYPE_TYPELESS_STORAGE (rli->t) = 1;
   1305  1.1  mrg 
   1306  1.1  mrg   /* Work out the known alignment so far.  Note that A & (-A) is the
   1307  1.1  mrg      value of the least-significant bit in A that is one.  */
   1308  1.1  mrg   if (! integer_zerop (rli->bitpos))
   1309  1.1  mrg     known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos));
   1310  1.1  mrg   else if (integer_zerop (rli->offset))
   1311  1.1  mrg     known_align = 0;
   1312  1.1  mrg   else if (tree_fits_uhwi_p (rli->offset))
   1313  1.1  mrg     known_align = (BITS_PER_UNIT
   1314  1.1  mrg 		   * least_bit_hwi (tree_to_uhwi (rli->offset)));
   1315  1.1  mrg   else
   1316  1.1  mrg     known_align = rli->offset_align;
   1317  1.1  mrg 
   1318  1.1  mrg   desired_align = update_alignment_for_field (rli, field, known_align);
   1319  1.1  mrg   if (known_align == 0)
   1320  1.1  mrg     known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
   1321  1.1  mrg 
   1322  1.1  mrg   if (warn_packed && DECL_PACKED (field))
   1323  1.1  mrg     {
   1324  1.1  mrg       if (known_align >= TYPE_ALIGN (type))
   1325  1.1  mrg 	{
   1326  1.1  mrg 	  if (TYPE_ALIGN (type) > desired_align)
   1327  1.1  mrg 	    {
   1328  1.1  mrg 	      if (STRICT_ALIGNMENT)
   1329  1.1  mrg 		warning (OPT_Wattributes, "packed attribute causes "
   1330  1.1  mrg                          "inefficient alignment for %q+D", field);
   1331  1.1  mrg 	      /* Don't warn if DECL_PACKED was set by the type.  */
   1332  1.1  mrg 	      else if (!TYPE_PACKED (rli->t))
   1333  1.1  mrg 		warning (OPT_Wattributes, "packed attribute is "
   1334  1.1  mrg 			 "unnecessary for %q+D", field);
   1335  1.1  mrg 	    }
   1336  1.1  mrg 	}
   1337  1.1  mrg       else
   1338  1.1  mrg 	rli->packed_maybe_necessary = 1;
   1339  1.1  mrg     }
   1340  1.1  mrg 
   1341  1.1  mrg   /* Does this field automatically have alignment it needs by virtue
   1342  1.1  mrg      of the fields that precede it and the record's own alignment?  */
   1343  1.1  mrg   if (known_align < desired_align
   1344  1.1  mrg       && (! targetm.ms_bitfield_layout_p (rli->t)
   1345  1.1  mrg 	  || rli->prev_field == NULL))
   1346  1.1  mrg     {
   1347  1.1  mrg       /* No, we need to skip space before this field.
   1348  1.1  mrg 	 Bump the cumulative size to multiple of field alignment.  */
   1349  1.1  mrg 
   1350  1.1  mrg       if (!targetm.ms_bitfield_layout_p (rli->t)
   1351  1.1  mrg 	  && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION
   1352  1.1  mrg 	  && !TYPE_ARTIFICIAL (rli->t))
   1353  1.1  mrg 	warning (OPT_Wpadded, "padding struct to align %q+D", field);
   1354  1.1  mrg 
   1355  1.1  mrg       /* If the alignment is still within offset_align, just align
   1356  1.1  mrg 	 the bit position.  */
   1357  1.1  mrg       if (desired_align < rli->offset_align)
   1358  1.1  mrg 	rli->bitpos = round_up (rli->bitpos, desired_align);
   1359  1.1  mrg       else
   1360  1.1  mrg 	{
   1361  1.1  mrg 	  /* First adjust OFFSET by the partial bits, then align.  */
   1362  1.1  mrg 	  rli->offset
   1363  1.1  mrg 	    = size_binop (PLUS_EXPR, rli->offset,
   1364  1.1  mrg 			  fold_convert (sizetype,
   1365  1.1  mrg 					size_binop (CEIL_DIV_EXPR, rli->bitpos,
   1366  1.1  mrg 						    bitsize_unit_node)));
   1367  1.1  mrg 	  rli->bitpos = bitsize_zero_node;
   1368  1.1  mrg 
   1369  1.1  mrg 	  rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
   1370  1.1  mrg 	}
   1371  1.1  mrg 
   1372  1.1  mrg       if (! TREE_CONSTANT (rli->offset))
   1373  1.1  mrg 	rli->offset_align = desired_align;
   1374  1.1  mrg     }
   1375  1.1  mrg 
   1376  1.1  mrg   /* Handle compatibility with PCC.  Note that if the record has any
   1377  1.1  mrg      variable-sized fields, we need not worry about compatibility.  */
   1378  1.1  mrg   if (PCC_BITFIELD_TYPE_MATTERS
   1379  1.1  mrg       && ! targetm.ms_bitfield_layout_p (rli->t)
   1380  1.1  mrg       && TREE_CODE (field) == FIELD_DECL
   1381  1.1  mrg       && type != error_mark_node
   1382  1.1  mrg       && DECL_BIT_FIELD (field)
   1383  1.1  mrg       && (! DECL_PACKED (field)
   1384  1.1  mrg 	  /* Enter for these packed fields only to issue a warning.  */
   1385  1.1  mrg 	  || TYPE_ALIGN (type) <= BITS_PER_UNIT)
   1386  1.1  mrg       && maximum_field_alignment == 0
   1387  1.1  mrg       && ! integer_zerop (DECL_SIZE (field))
   1388  1.1  mrg       && tree_fits_uhwi_p (DECL_SIZE (field))
   1389  1.1  mrg       && tree_fits_uhwi_p (rli->offset)
   1390  1.1  mrg       && tree_fits_uhwi_p (TYPE_SIZE (type)))
   1391  1.1  mrg     {
   1392  1.1  mrg       unsigned int type_align = TYPE_ALIGN (type);
   1393  1.1  mrg       tree dsize = DECL_SIZE (field);
   1394  1.1  mrg       HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
   1395  1.1  mrg       HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
   1396  1.1  mrg       HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
   1397  1.1  mrg 
   1398  1.1  mrg #ifdef ADJUST_FIELD_ALIGN
   1399  1.1  mrg       if (! TYPE_USER_ALIGN (type))
   1400  1.1  mrg 	type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
   1401  1.1  mrg #endif
   1402  1.1  mrg 
   1403  1.1  mrg       /* A bit field may not span more units of alignment of its type
   1404  1.1  mrg 	 than its type itself.  Advance to next boundary if necessary.  */
   1405  1.1  mrg       if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
   1406  1.1  mrg 	{
   1407  1.1  mrg 	  if (DECL_PACKED (field))
   1408  1.1  mrg 	    {
   1409  1.1  mrg 	      if (warn_packed_bitfield_compat == 1)
   1410  1.1  mrg 		inform
   1411  1.1  mrg 		  (input_location,
   1412  1.1  mrg 		   "offset of packed bit-field %qD has changed in GCC 4.4",
   1413  1.1  mrg 		   field);
   1414  1.1  mrg 	    }
   1415  1.1  mrg 	  else
   1416  1.1  mrg 	    rli->bitpos = round_up (rli->bitpos, type_align);
   1417  1.1  mrg 	}
   1418  1.1  mrg 
   1419  1.1  mrg       if (! DECL_PACKED (field))
   1420  1.1  mrg 	TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
   1421  1.1  mrg 
   1422  1.1  mrg       SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
   1423  1.1  mrg 				  TYPE_WARN_IF_NOT_ALIGN (type));
   1424  1.1  mrg     }
   1425  1.1  mrg 
   1426  1.1  mrg #ifdef BITFIELD_NBYTES_LIMITED
   1427  1.1  mrg   if (BITFIELD_NBYTES_LIMITED
   1428  1.1  mrg       && ! targetm.ms_bitfield_layout_p (rli->t)
   1429  1.1  mrg       && TREE_CODE (field) == FIELD_DECL
   1430  1.1  mrg       && type != error_mark_node
   1431  1.1  mrg       && DECL_BIT_FIELD_TYPE (field)
   1432  1.1  mrg       && ! DECL_PACKED (field)
   1433  1.1  mrg       && ! integer_zerop (DECL_SIZE (field))
   1434  1.1  mrg       && tree_fits_uhwi_p (DECL_SIZE (field))
   1435  1.1  mrg       && tree_fits_uhwi_p (rli->offset)
   1436  1.1  mrg       && tree_fits_uhwi_p (TYPE_SIZE (type)))
   1437  1.1  mrg     {
   1438  1.1  mrg       unsigned int type_align = TYPE_ALIGN (type);
   1439  1.1  mrg       tree dsize = DECL_SIZE (field);
   1440  1.1  mrg       HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
   1441  1.1  mrg       HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
   1442  1.1  mrg       HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
   1443  1.1  mrg 
   1444  1.1  mrg #ifdef ADJUST_FIELD_ALIGN
   1445  1.1  mrg       if (! TYPE_USER_ALIGN (type))
   1446  1.1  mrg 	type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
   1447  1.1  mrg #endif
   1448  1.1  mrg 
   1449  1.1  mrg       if (maximum_field_alignment != 0)
   1450  1.1  mrg 	type_align = MIN (type_align, maximum_field_alignment);
   1451  1.1  mrg       /* ??? This test is opposite the test in the containing if
   1452  1.1  mrg 	 statement, so this code is unreachable currently.  */
   1453  1.1  mrg       else if (DECL_PACKED (field))
   1454  1.1  mrg 	type_align = MIN (type_align, BITS_PER_UNIT);
   1455  1.1  mrg 
   1456  1.1  mrg       /* A bit field may not span the unit of alignment of its type.
   1457  1.1  mrg 	 Advance to next boundary if necessary.  */
   1458  1.1  mrg       if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
   1459  1.1  mrg 	rli->bitpos = round_up (rli->bitpos, type_align);
   1460  1.1  mrg 
   1461  1.1  mrg       TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
   1462  1.1  mrg       SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
   1463  1.1  mrg 				  TYPE_WARN_IF_NOT_ALIGN (type));
   1464  1.1  mrg     }
   1465  1.1  mrg #endif
   1466  1.1  mrg 
   1467  1.1  mrg   /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
   1468  1.1  mrg      A subtlety:
   1469  1.1  mrg 	When a bit field is inserted into a packed record, the whole
   1470  1.1  mrg 	size of the underlying type is used by one or more same-size
   1471  1.1  mrg 	adjacent bitfields.  (That is, if its long:3, 32 bits is
   1472  1.1  mrg 	used in the record, and any additional adjacent long bitfields are
   1473  1.1  mrg 	packed into the same chunk of 32 bits. However, if the size
   1474  1.1  mrg 	changes, a new field of that size is allocated.)  In an unpacked
   1475  1.1  mrg 	record, this is the same as using alignment, but not equivalent
   1476  1.1  mrg 	when packing.
   1477  1.1  mrg 
   1478  1.1  mrg      Note: for compatibility, we use the type size, not the type alignment
   1479  1.1  mrg      to determine alignment, since that matches the documentation */
   1480  1.1  mrg 
   1481  1.1  mrg   if (targetm.ms_bitfield_layout_p (rli->t))
   1482  1.1  mrg     {
   1483  1.1  mrg       tree prev_saved = rli->prev_field;
   1484  1.1  mrg       tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
   1485  1.1  mrg 
   1486  1.1  mrg       /* This is a bitfield if it exists.  */
   1487  1.1  mrg       if (rli->prev_field)
   1488  1.1  mrg 	{
   1489  1.1  mrg 	  bool realign_p = known_align < desired_align;
   1490  1.1  mrg 
   1491  1.1  mrg 	  /* If both are bitfields, nonzero, and the same size, this is
   1492  1.1  mrg 	     the middle of a run.  Zero declared size fields are special
   1493  1.1  mrg 	     and handled as "end of run". (Note: it's nonzero declared
   1494  1.1  mrg 	     size, but equal type sizes!) (Since we know that both
   1495  1.1  mrg 	     the current and previous fields are bitfields by the
   1496  1.1  mrg 	     time we check it, DECL_SIZE must be present for both.) */
   1497  1.1  mrg 	  if (DECL_BIT_FIELD_TYPE (field)
   1498  1.1  mrg 	      && !integer_zerop (DECL_SIZE (field))
   1499  1.1  mrg 	      && !integer_zerop (DECL_SIZE (rli->prev_field))
   1500  1.1  mrg 	      && tree_fits_shwi_p (DECL_SIZE (rli->prev_field))
   1501  1.1  mrg 	      && tree_fits_uhwi_p (TYPE_SIZE (type))
   1502  1.1  mrg 	      && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
   1503  1.1  mrg 	    {
   1504  1.1  mrg 	      /* We're in the middle of a run of equal type size fields; make
   1505  1.1  mrg 		 sure we realign if we run out of bits.  (Not decl size,
   1506  1.1  mrg 		 type size!) */
   1507  1.1  mrg 	      HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field));
   1508  1.1  mrg 
   1509  1.1  mrg 	      if (rli->remaining_in_alignment < bitsize)
   1510  1.1  mrg 		{
   1511  1.1  mrg 		  HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type));
   1512  1.1  mrg 
   1513  1.1  mrg 		  /* out of bits; bump up to next 'word'.  */
   1514  1.1  mrg 		  rli->bitpos
   1515  1.1  mrg 		    = size_binop (PLUS_EXPR, rli->bitpos,
   1516  1.1  mrg 				  bitsize_int (rli->remaining_in_alignment));
   1517  1.1  mrg 		  rli->prev_field = field;
   1518  1.1  mrg 		  if (typesize < bitsize)
   1519  1.1  mrg 		    rli->remaining_in_alignment = 0;
   1520  1.1  mrg 		  else
   1521  1.1  mrg 		    rli->remaining_in_alignment = typesize - bitsize;
   1522  1.1  mrg 		}
   1523  1.1  mrg 	      else
   1524  1.1  mrg 		{
   1525  1.1  mrg 		  rli->remaining_in_alignment -= bitsize;
   1526  1.1  mrg 		  realign_p = false;
   1527  1.1  mrg 		}
   1528  1.1  mrg 	    }
   1529  1.1  mrg 	  else
   1530  1.1  mrg 	    {
   1531  1.1  mrg 	      /* End of a run: if leaving a run of bitfields of the same type
   1532  1.1  mrg 		 size, we have to "use up" the rest of the bits of the type
   1533  1.1  mrg 		 size.
   1534  1.1  mrg 
   1535  1.1  mrg 		 Compute the new position as the sum of the size for the prior
   1536  1.1  mrg 		 type and where we first started working on that type.
   1537  1.1  mrg 		 Note: since the beginning of the field was aligned then
   1538  1.1  mrg 		 of course the end will be too.  No round needed.  */
   1539  1.1  mrg 
   1540  1.1  mrg 	      if (!integer_zerop (DECL_SIZE (rli->prev_field)))
   1541  1.1  mrg 		{
   1542  1.1  mrg 		  rli->bitpos
   1543  1.1  mrg 		    = size_binop (PLUS_EXPR, rli->bitpos,
   1544  1.1  mrg 				  bitsize_int (rli->remaining_in_alignment));
   1545  1.1  mrg 		}
   1546  1.1  mrg 	      else
   1547  1.1  mrg 		/* We "use up" size zero fields; the code below should behave
   1548  1.1  mrg 		   as if the prior field was not a bitfield.  */
   1549  1.1  mrg 		prev_saved = NULL;
   1550  1.1  mrg 
   1551  1.1  mrg 	      /* Cause a new bitfield to be captured, either this time (if
   1552  1.1  mrg 		 currently a bitfield) or next time we see one.  */
   1553  1.1  mrg 	      if (!DECL_BIT_FIELD_TYPE (field)
   1554  1.1  mrg 		  || integer_zerop (DECL_SIZE (field)))
   1555  1.1  mrg 		rli->prev_field = NULL;
   1556  1.1  mrg 	    }
   1557  1.1  mrg 
   1558  1.1  mrg 	  /* Does this field automatically have alignment it needs by virtue
   1559  1.1  mrg 	     of the fields that precede it and the record's own alignment?  */
   1560  1.1  mrg 	  if (realign_p)
   1561  1.1  mrg 	    {
   1562  1.1  mrg 	      /* If the alignment is still within offset_align, just align
   1563  1.1  mrg 		 the bit position.  */
   1564  1.1  mrg 	      if (desired_align < rli->offset_align)
   1565  1.1  mrg 		rli->bitpos = round_up (rli->bitpos, desired_align);
   1566  1.1  mrg 	      else
   1567  1.1  mrg 		{
   1568  1.1  mrg 		  /* First adjust OFFSET by the partial bits, then align.  */
   1569  1.1  mrg 		  tree d = size_binop (CEIL_DIV_EXPR, rli->bitpos,
   1570  1.1  mrg 				       bitsize_unit_node);
   1571  1.1  mrg 		  rli->offset = size_binop (PLUS_EXPR, rli->offset,
   1572  1.1  mrg 					    fold_convert (sizetype, d));
   1573  1.1  mrg 		  rli->bitpos = bitsize_zero_node;
   1574  1.1  mrg 
   1575  1.1  mrg 		  rli->offset = round_up (rli->offset,
   1576  1.1  mrg 					  desired_align / BITS_PER_UNIT);
   1577  1.1  mrg 		}
   1578  1.1  mrg 
   1579  1.1  mrg 	      if (! TREE_CONSTANT (rli->offset))
   1580  1.1  mrg 		rli->offset_align = desired_align;
   1581  1.1  mrg 	    }
   1582  1.1  mrg 
   1583  1.1  mrg 	  normalize_rli (rli);
   1584  1.1  mrg         }
   1585  1.1  mrg 
   1586  1.1  mrg       /* If we're starting a new run of same type size bitfields
   1587  1.1  mrg 	 (or a run of non-bitfields), set up the "first of the run"
   1588  1.1  mrg 	 fields.
   1589  1.1  mrg 
   1590  1.1  mrg 	 That is, if the current field is not a bitfield, or if there
   1591  1.1  mrg 	 was a prior bitfield the type sizes differ, or if there wasn't
   1592  1.1  mrg 	 a prior bitfield the size of the current field is nonzero.
   1593  1.1  mrg 
   1594  1.1  mrg 	 Note: we must be sure to test ONLY the type size if there was
   1595  1.1  mrg 	 a prior bitfield and ONLY for the current field being zero if
   1596  1.1  mrg 	 there wasn't.  */
   1597  1.1  mrg 
   1598  1.1  mrg       if (!DECL_BIT_FIELD_TYPE (field)
   1599  1.1  mrg 	  || (prev_saved != NULL
   1600  1.1  mrg 	      ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
   1601  1.1  mrg 	      : !integer_zerop (DECL_SIZE (field))))
   1602  1.1  mrg 	{
   1603  1.1  mrg 	  /* Never smaller than a byte for compatibility.  */
   1604  1.1  mrg 	  unsigned int type_align = BITS_PER_UNIT;
   1605  1.1  mrg 
   1606  1.1  mrg 	  /* (When not a bitfield), we could be seeing a flex array (with
   1607  1.1  mrg 	     no DECL_SIZE).  Since we won't be using remaining_in_alignment
   1608  1.1  mrg 	     until we see a bitfield (and come by here again) we just skip
   1609  1.1  mrg 	     calculating it.  */
   1610  1.1  mrg 	  if (DECL_SIZE (field) != NULL
   1611  1.1  mrg 	      && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field)))
   1612  1.1  mrg 	      && tree_fits_uhwi_p (DECL_SIZE (field)))
   1613  1.1  mrg 	    {
   1614  1.1  mrg 	      unsigned HOST_WIDE_INT bitsize
   1615  1.1  mrg 		= tree_to_uhwi (DECL_SIZE (field));
   1616  1.1  mrg 	      unsigned HOST_WIDE_INT typesize
   1617  1.1  mrg 		= tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)));
   1618  1.1  mrg 
   1619  1.1  mrg 	      if (typesize < bitsize)
   1620  1.1  mrg 		rli->remaining_in_alignment = 0;
   1621  1.1  mrg 	      else
   1622  1.1  mrg 		rli->remaining_in_alignment = typesize - bitsize;
   1623  1.1  mrg 	    }
   1624  1.1  mrg 
   1625  1.1  mrg 	  /* Now align (conventionally) for the new type.  */
   1626  1.1  mrg 	  if (! DECL_PACKED (field))
   1627  1.1  mrg 	    type_align = TYPE_ALIGN (TREE_TYPE (field));
   1628  1.1  mrg 
   1629  1.1  mrg 	  if (maximum_field_alignment != 0)
   1630  1.1  mrg 	    type_align = MIN (type_align, maximum_field_alignment);
   1631  1.1  mrg 
   1632  1.1  mrg 	  rli->bitpos = round_up (rli->bitpos, type_align);
   1633  1.1  mrg 
   1634  1.1  mrg           /* If we really aligned, don't allow subsequent bitfields
   1635  1.1  mrg 	     to undo that.  */
   1636  1.1  mrg 	  rli->prev_field = NULL;
   1637  1.1  mrg 	}
   1638  1.1  mrg     }
   1639  1.1  mrg 
   1640  1.1  mrg   /* Offset so far becomes the position of this field after normalizing.  */
   1641  1.1  mrg   normalize_rli (rli);
   1642  1.1  mrg   DECL_FIELD_OFFSET (field) = rli->offset;
   1643  1.1  mrg   DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
   1644  1.1  mrg   SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
   1645  1.1  mrg   handle_warn_if_not_align (field, rli->record_align);
   1646  1.1  mrg 
   1647  1.1  mrg   /* Evaluate nonconstant offsets only once, either now or as soon as safe.  */
   1648  1.1  mrg   if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST)
   1649  1.1  mrg     DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field));
   1650  1.1  mrg 
   1651  1.1  mrg   /* If this field ended up more aligned than we thought it would be (we
   1652  1.1  mrg      approximate this by seeing if its position changed), lay out the field
   1653  1.1  mrg      again; perhaps we can use an integral mode for it now.  */
   1654  1.1  mrg   if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
   1655  1.1  mrg     actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
   1656  1.1  mrg   else if (integer_zerop (DECL_FIELD_OFFSET (field)))
   1657  1.1  mrg     actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
   1658  1.1  mrg   else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
   1659  1.1  mrg     actual_align = (BITS_PER_UNIT
   1660  1.1  mrg 		    * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field))));
   1661  1.1  mrg   else
   1662  1.1  mrg     actual_align = DECL_OFFSET_ALIGN (field);
   1663  1.1  mrg   /* ACTUAL_ALIGN is still the actual alignment *within the record* .
   1664  1.1  mrg      store / extract bit field operations will check the alignment of the
   1665  1.1  mrg      record against the mode of bit fields.  */
   1666  1.1  mrg 
   1667  1.1  mrg   if (known_align != actual_align)
   1668  1.1  mrg     layout_decl (field, actual_align);
   1669  1.1  mrg 
   1670  1.1  mrg   if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
   1671  1.1  mrg     rli->prev_field = field;
   1672  1.1  mrg 
   1673  1.1  mrg   /* Now add size of this field to the size of the record.  If the size is
   1674  1.1  mrg      not constant, treat the field as being a multiple of bytes and just
   1675  1.1  mrg      adjust the offset, resetting the bit position.  Otherwise, apportion the
   1676  1.1  mrg      size amongst the bit position and offset.  First handle the case of an
   1677  1.1  mrg      unspecified size, which can happen when we have an invalid nested struct
   1678  1.1  mrg      definition, such as struct j { struct j { int i; } }.  The error message
   1679  1.1  mrg      is printed in finish_struct.  */
   1680  1.1  mrg   if (DECL_SIZE (field) == 0)
   1681  1.1  mrg     /* Do nothing.  */;
   1682  1.1  mrg   else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
   1683  1.1  mrg 	   || TREE_OVERFLOW (DECL_SIZE (field)))
   1684  1.1  mrg     {
   1685  1.1  mrg       rli->offset
   1686  1.1  mrg 	= size_binop (PLUS_EXPR, rli->offset,
   1687  1.1  mrg 		      fold_convert (sizetype,
   1688  1.1  mrg 				    size_binop (CEIL_DIV_EXPR, rli->bitpos,
   1689  1.1  mrg 						bitsize_unit_node)));
   1690  1.1  mrg       rli->offset
   1691  1.1  mrg 	= size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
   1692  1.1  mrg       rli->bitpos = bitsize_zero_node;
   1693  1.1  mrg       rli->offset_align = MIN (rli->offset_align, desired_align);
   1694  1.1  mrg 
   1695  1.1  mrg       if (!multiple_of_p (bitsizetype, DECL_SIZE (field),
   1696  1.1  mrg 			  bitsize_int (rli->offset_align)))
   1697  1.1  mrg 	{
   1698  1.1  mrg 	  tree type = strip_array_types (TREE_TYPE (field));
   1699  1.1  mrg 	  /* The above adjusts offset_align just based on the start of the
   1700  1.1  mrg 	     field.  The field might not have a size that is a multiple of
   1701  1.1  mrg 	     that offset_align though.  If the field is an array of fixed
   1702  1.1  mrg 	     sized elements, assume there can be any multiple of those
   1703  1.1  mrg 	     sizes.  If it is a variable length aggregate or array of
   1704  1.1  mrg 	     variable length aggregates, assume worst that the end is
   1705  1.1  mrg 	     just BITS_PER_UNIT aligned.  */
   1706  1.1  mrg 	  if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
   1707  1.1  mrg 	    {
   1708  1.1  mrg 	      if (TREE_INT_CST_LOW (TYPE_SIZE (type)))
   1709  1.1  mrg 		{
   1710  1.1  mrg 		  unsigned HOST_WIDE_INT sz
   1711  1.1  mrg 		    = least_bit_hwi (TREE_INT_CST_LOW (TYPE_SIZE (type)));
   1712  1.1  mrg 		  rli->offset_align = MIN (rli->offset_align, sz);
   1713  1.1  mrg 		}
   1714  1.1  mrg 	    }
   1715  1.1  mrg 	  else
   1716  1.1  mrg 	    rli->offset_align = MIN (rli->offset_align, BITS_PER_UNIT);
   1717  1.1  mrg 	}
   1718  1.1  mrg     }
   1719  1.1  mrg   else if (targetm.ms_bitfield_layout_p (rli->t))
   1720  1.1  mrg     {
   1721  1.1  mrg       rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
   1722  1.1  mrg 
   1723  1.1  mrg       /* If FIELD is the last field and doesn't end at the full length
   1724  1.1  mrg 	 of the type then pad the struct out to the full length of the
   1725  1.1  mrg 	 last type.  */
   1726  1.1  mrg       if (DECL_BIT_FIELD_TYPE (field)
   1727  1.1  mrg 	  && !integer_zerop (DECL_SIZE (field)))
   1728  1.1  mrg 	{
   1729  1.1  mrg 	  /* We have to scan, because non-field DECLS are also here.  */
   1730  1.1  mrg 	  tree probe = field;
   1731  1.1  mrg 	  while ((probe = DECL_CHAIN (probe)))
   1732  1.1  mrg 	    if (TREE_CODE (probe) == FIELD_DECL)
   1733  1.1  mrg 	      break;
   1734  1.1  mrg 	  if (!probe)
   1735  1.1  mrg 	    rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
   1736  1.1  mrg 				      bitsize_int (rli->remaining_in_alignment));
   1737  1.1  mrg 	}
   1738  1.1  mrg 
   1739  1.1  mrg       normalize_rli (rli);
   1740  1.1  mrg     }
   1741  1.1  mrg   else
   1742  1.1  mrg     {
   1743  1.1  mrg       rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
   1744  1.1  mrg       normalize_rli (rli);
   1745  1.1  mrg     }
   1746  1.1  mrg }
   1747  1.1  mrg 
   1748  1.1  mrg /* Assuming that all the fields have been laid out, this function uses
   1749  1.1  mrg    RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
   1750  1.1  mrg    indicated by RLI.  */
   1751  1.1  mrg 
   1752  1.1  mrg static void
   1753  1.1  mrg finalize_record_size (record_layout_info rli)
   1754  1.1  mrg {
   1755  1.1  mrg   tree unpadded_size, unpadded_size_unit;
   1756  1.1  mrg 
   1757  1.1  mrg   /* Now we want just byte and bit offsets, so set the offset alignment
   1758  1.1  mrg      to be a byte and then normalize.  */
   1759  1.1  mrg   rli->offset_align = BITS_PER_UNIT;
   1760  1.1  mrg   normalize_rli (rli);
   1761  1.1  mrg 
   1762  1.1  mrg   /* Determine the desired alignment.  */
   1763  1.1  mrg #ifdef ROUND_TYPE_ALIGN
   1764  1.1  mrg   SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
   1765  1.1  mrg 					    rli->record_align));
   1766  1.1  mrg #else
   1767  1.1  mrg   SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align));
   1768  1.1  mrg #endif
   1769  1.1  mrg 
   1770  1.1  mrg   /* Compute the size so far.  Be sure to allow for extra bits in the
   1771  1.1  mrg      size in bytes.  We have guaranteed above that it will be no more
   1772  1.1  mrg      than a single byte.  */
   1773  1.1  mrg   unpadded_size = rli_size_so_far (rli);
   1774  1.1  mrg   unpadded_size_unit = rli_size_unit_so_far (rli);
   1775  1.1  mrg   if (! integer_zerop (rli->bitpos))
   1776  1.1  mrg     unpadded_size_unit
   1777  1.1  mrg       = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
   1778  1.1  mrg 
   1779  1.1  mrg   /* Round the size up to be a multiple of the required alignment.  */
   1780  1.1  mrg   TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
   1781  1.1  mrg   TYPE_SIZE_UNIT (rli->t)
   1782  1.1  mrg     = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
   1783  1.1  mrg 
   1784  1.1  mrg   if (TREE_CONSTANT (unpadded_size)
   1785  1.1  mrg       && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
   1786  1.1  mrg       && input_location != BUILTINS_LOCATION
   1787  1.1  mrg       && !TYPE_ARTIFICIAL (rli->t))
   1788  1.1  mrg     warning (OPT_Wpadded, "padding struct size to alignment boundary");
   1789  1.1  mrg 
   1790  1.1  mrg   if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
   1791  1.1  mrg       && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
   1792  1.1  mrg       && TREE_CONSTANT (unpadded_size))
   1793  1.1  mrg     {
   1794  1.1  mrg       tree unpacked_size;
   1795  1.1  mrg 
   1796  1.1  mrg #ifdef ROUND_TYPE_ALIGN
   1797  1.1  mrg       rli->unpacked_align
   1798  1.1  mrg 	= ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
   1799  1.1  mrg #else
   1800  1.1  mrg       rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
   1801  1.1  mrg #endif
   1802  1.1  mrg 
   1803  1.1  mrg       unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
   1804  1.1  mrg       if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
   1805  1.1  mrg 	{
   1806  1.1  mrg 	  if (TYPE_NAME (rli->t))
   1807  1.1  mrg 	    {
   1808  1.1  mrg 	      tree name;
   1809  1.1  mrg 
   1810  1.1  mrg 	      if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
   1811  1.1  mrg 		name = TYPE_NAME (rli->t);
   1812  1.1  mrg 	      else
   1813  1.1  mrg 		name = DECL_NAME (TYPE_NAME (rli->t));
   1814  1.1  mrg 
   1815  1.1  mrg 	      if (STRICT_ALIGNMENT)
   1816  1.1  mrg 		warning (OPT_Wpacked, "packed attribute causes inefficient "
   1817  1.1  mrg 			 "alignment for %qE", name);
   1818  1.1  mrg 	      else
   1819  1.1  mrg 		warning (OPT_Wpacked,
   1820  1.1  mrg 			 "packed attribute is unnecessary for %qE", name);
   1821  1.1  mrg 	    }
   1822  1.1  mrg 	  else
   1823  1.1  mrg 	    {
   1824  1.1  mrg 	      if (STRICT_ALIGNMENT)
   1825  1.1  mrg 		warning (OPT_Wpacked,
   1826  1.1  mrg 			 "packed attribute causes inefficient alignment");
   1827  1.1  mrg 	      else
   1828  1.1  mrg 		warning (OPT_Wpacked, "packed attribute is unnecessary");
   1829  1.1  mrg 	    }
   1830  1.1  mrg 	}
   1831  1.1  mrg     }
   1832  1.1  mrg }
   1833  1.1  mrg 
   1834  1.1  mrg /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE).  */
   1835  1.1  mrg 
   1836  1.1  mrg void
   1837  1.1  mrg compute_record_mode (tree type)
   1838  1.1  mrg {
   1839  1.1  mrg   tree field;
   1840  1.1  mrg   machine_mode mode = VOIDmode;
   1841  1.1  mrg 
   1842  1.1  mrg   /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
   1843  1.1  mrg      However, if possible, we use a mode that fits in a register
   1844  1.1  mrg      instead, in order to allow for better optimization down the
   1845  1.1  mrg      line.  */
   1846  1.1  mrg   SET_TYPE_MODE (type, BLKmode);
   1847  1.1  mrg 
   1848  1.1  mrg   poly_uint64 type_size;
   1849  1.1  mrg   if (!poly_int_tree_p (TYPE_SIZE (type), &type_size))
   1850  1.1  mrg     return;
   1851  1.1  mrg 
   1852  1.1  mrg   /* A record which has any BLKmode members must itself be
   1853  1.1  mrg      BLKmode; it can't go in a register.  Unless the member is
   1854  1.1  mrg      BLKmode only because it isn't aligned.  */
   1855  1.1  mrg   for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
   1856  1.1  mrg     {
   1857  1.1  mrg       if (TREE_CODE (field) != FIELD_DECL)
   1858  1.1  mrg 	continue;
   1859  1.1  mrg 
   1860  1.1  mrg       poly_uint64 field_size;
   1861  1.1  mrg       if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
   1862  1.1  mrg 	  || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
   1863  1.1  mrg 	      && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
   1864  1.1  mrg 	      && !(TYPE_SIZE (TREE_TYPE (field)) != 0
   1865  1.1  mrg 		   && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
   1866  1.1  mrg 	  || !tree_fits_poly_uint64_p (bit_position (field))
   1867  1.1  mrg 	  || DECL_SIZE (field) == 0
   1868  1.1  mrg 	  || !poly_int_tree_p (DECL_SIZE (field), &field_size))
   1869  1.1  mrg 	return;
   1870  1.1  mrg 
   1871  1.1  mrg       /* If this field is the whole struct, remember its mode so
   1872  1.1  mrg 	 that, say, we can put a double in a class into a DF
   1873  1.1  mrg 	 register instead of forcing it to live in the stack.  */
   1874  1.1  mrg       if (known_eq (field_size, type_size)
   1875  1.1  mrg 	  /* Partial int types (e.g. __int20) may have TYPE_SIZE equal to
   1876  1.1  mrg 	     wider types (e.g. int32), despite precision being less.  Ensure
   1877  1.1  mrg 	     that the TYPE_MODE of the struct does not get set to the partial
   1878  1.1  mrg 	     int mode if there is a wider type also in the struct.  */
   1879  1.1  mrg 	  && known_gt (GET_MODE_PRECISION (DECL_MODE (field)),
   1880  1.1  mrg 		       GET_MODE_PRECISION (mode)))
   1881  1.1  mrg 	mode = DECL_MODE (field);
   1882  1.1  mrg 
   1883  1.1  mrg       /* With some targets, it is sub-optimal to access an aligned
   1884  1.1  mrg 	 BLKmode structure as a scalar.  */
   1885  1.1  mrg       if (targetm.member_type_forces_blk (field, mode))
   1886  1.1  mrg 	return;
   1887  1.1  mrg     }
   1888  1.1  mrg 
   1889  1.1  mrg   /* If we only have one real field; use its mode if that mode's size
   1890  1.1  mrg      matches the type's size.  This generally only applies to RECORD_TYPE.
   1891  1.1  mrg      For UNION_TYPE, if the widest field is MODE_INT then use that mode.
   1892  1.1  mrg      If the widest field is MODE_PARTIAL_INT, and the union will be passed
   1893  1.1  mrg      by reference, then use that mode.  */
   1894  1.1  mrg   if ((TREE_CODE (type) == RECORD_TYPE
   1895  1.1  mrg        || (TREE_CODE (type) == UNION_TYPE
   1896  1.1  mrg 	   && (GET_MODE_CLASS (mode) == MODE_INT
   1897  1.1  mrg 	       || (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
   1898  1.1  mrg 		   && (targetm.calls.pass_by_reference
   1899  1.1  mrg 		       (pack_cumulative_args (0),
   1900  1.1  mrg 			function_arg_info (type, mode, /*named=*/false)))))))
   1901  1.1  mrg       && mode != VOIDmode
   1902  1.1  mrg       && known_eq (GET_MODE_BITSIZE (mode), type_size))
   1903  1.1  mrg     ;
   1904  1.1  mrg   else
   1905  1.1  mrg     mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk ();
   1906  1.1  mrg 
   1907  1.1  mrg   /* If structure's known alignment is less than what the scalar
   1908  1.1  mrg      mode would need, and it matters, then stick with BLKmode.  */
   1909  1.1  mrg   if (mode != BLKmode
   1910  1.1  mrg       && STRICT_ALIGNMENT
   1911  1.1  mrg       && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
   1912  1.1  mrg 	    || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode)))
   1913  1.1  mrg     {
   1914  1.1  mrg       /* If this is the only reason this type is BLKmode, then
   1915  1.1  mrg 	 don't force containing types to be BLKmode.  */
   1916  1.1  mrg       TYPE_NO_FORCE_BLK (type) = 1;
   1917  1.1  mrg       mode = BLKmode;
   1918  1.1  mrg     }
   1919  1.1  mrg 
   1920  1.1  mrg   SET_TYPE_MODE (type, mode);
   1921  1.1  mrg }
   1922  1.1  mrg 
   1923  1.1  mrg /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
   1924  1.1  mrg    out.  */
   1925  1.1  mrg 
   1926  1.1  mrg static void
   1927  1.1  mrg finalize_type_size (tree type)
   1928  1.1  mrg {
   1929  1.1  mrg   /* Normally, use the alignment corresponding to the mode chosen.
   1930  1.1  mrg      However, where strict alignment is not required, avoid
   1931  1.1  mrg      over-aligning structures, since most compilers do not do this
   1932  1.1  mrg      alignment.  */
   1933  1.1  mrg   bool tua_cleared_p = false;
   1934  1.1  mrg   if (TYPE_MODE (type) != BLKmode
   1935  1.1  mrg       && TYPE_MODE (type) != VOIDmode
   1936  1.1  mrg       && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type)))
   1937  1.1  mrg     {
   1938  1.1  mrg       unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
   1939  1.1  mrg 
   1940  1.1  mrg       /* Don't override a larger alignment requirement coming from a user
   1941  1.1  mrg 	 alignment of one of the fields.  */
   1942  1.1  mrg       if (mode_align >= TYPE_ALIGN (type))
   1943  1.1  mrg 	{
   1944  1.1  mrg 	  SET_TYPE_ALIGN (type, mode_align);
   1945  1.1  mrg 	  /* Remember that we're about to reset this flag.  */
   1946  1.1  mrg 	  tua_cleared_p = TYPE_USER_ALIGN (type);
   1947  1.1  mrg 	  TYPE_USER_ALIGN (type) = false;
   1948  1.1  mrg 	}
   1949  1.1  mrg     }
   1950  1.1  mrg 
   1951  1.1  mrg   /* Do machine-dependent extra alignment.  */
   1952  1.1  mrg #ifdef ROUND_TYPE_ALIGN
   1953  1.1  mrg   SET_TYPE_ALIGN (type,
   1954  1.1  mrg                   ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT));
   1955  1.1  mrg #endif
   1956  1.1  mrg 
   1957  1.1  mrg   /* If we failed to find a simple way to calculate the unit size
   1958  1.1  mrg      of the type, find it by division.  */
   1959  1.1  mrg   if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
   1960  1.1  mrg     /* TYPE_SIZE (type) is computed in bitsizetype.  After the division, the
   1961  1.1  mrg        result will fit in sizetype.  We will get more efficient code using
   1962  1.1  mrg        sizetype, so we force a conversion.  */
   1963  1.1  mrg     TYPE_SIZE_UNIT (type)
   1964  1.1  mrg       = fold_convert (sizetype,
   1965  1.1  mrg 		      size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
   1966  1.1  mrg 				  bitsize_unit_node));
   1967  1.1  mrg 
   1968  1.1  mrg   if (TYPE_SIZE (type) != 0)
   1969  1.1  mrg     {
   1970  1.1  mrg       TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
   1971  1.1  mrg       TYPE_SIZE_UNIT (type)
   1972  1.1  mrg 	= round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
   1973  1.1  mrg     }
   1974  1.1  mrg 
   1975  1.1  mrg   /* Evaluate nonconstant sizes only once, either now or as soon as safe.  */
   1976  1.1  mrg   if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
   1977  1.1  mrg     TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
   1978  1.1  mrg   if (TYPE_SIZE_UNIT (type) != 0
   1979  1.1  mrg       && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
   1980  1.1  mrg     TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
   1981  1.1  mrg 
   1982  1.1  mrg   /* Handle empty records as per the x86-64 psABI.  */
   1983  1.1  mrg   TYPE_EMPTY_P (type) = targetm.calls.empty_record_p (type);
   1984  1.1  mrg 
   1985  1.1  mrg   /* Also layout any other variants of the type.  */
   1986  1.1  mrg   if (TYPE_NEXT_VARIANT (type)
   1987  1.1  mrg       || type != TYPE_MAIN_VARIANT (type))
   1988  1.1  mrg     {
   1989  1.1  mrg       tree variant;
   1990  1.1  mrg       /* Record layout info of this variant.  */
   1991  1.1  mrg       tree size = TYPE_SIZE (type);
   1992  1.1  mrg       tree size_unit = TYPE_SIZE_UNIT (type);
   1993  1.1  mrg       unsigned int align = TYPE_ALIGN (type);
   1994  1.1  mrg       unsigned int precision = TYPE_PRECISION (type);
   1995  1.1  mrg       unsigned int user_align = TYPE_USER_ALIGN (type);
   1996  1.1  mrg       machine_mode mode = TYPE_MODE (type);
   1997  1.1  mrg       bool empty_p = TYPE_EMPTY_P (type);
   1998  1.1  mrg 
   1999  1.1  mrg       /* Copy it into all variants.  */
   2000  1.1  mrg       for (variant = TYPE_MAIN_VARIANT (type);
   2001  1.1  mrg 	   variant != NULL_TREE;
   2002  1.1  mrg 	   variant = TYPE_NEXT_VARIANT (variant))
   2003  1.1  mrg 	{
   2004  1.1  mrg 	  TYPE_SIZE (variant) = size;
   2005  1.1  mrg 	  TYPE_SIZE_UNIT (variant) = size_unit;
   2006  1.1  mrg 	  unsigned valign = align;
   2007  1.1  mrg 	  if (TYPE_USER_ALIGN (variant))
   2008  1.1  mrg 	    {
   2009  1.1  mrg 	      valign = MAX (valign, TYPE_ALIGN (variant));
   2010  1.1  mrg 	      /* If we reset TYPE_USER_ALIGN on the main variant, we might
   2011  1.1  mrg 		 need to reset it on the variants too.  TYPE_MODE will be set
   2012  1.1  mrg 		 to MODE in this variant, so we can use that.  */
   2013  1.1  mrg 	      if (tua_cleared_p && GET_MODE_ALIGNMENT (mode) >= valign)
   2014  1.1  mrg 		TYPE_USER_ALIGN (variant) = false;
   2015  1.1  mrg 	    }
   2016  1.1  mrg 	  else
   2017  1.1  mrg 	    TYPE_USER_ALIGN (variant) = user_align;
   2018  1.1  mrg 	  SET_TYPE_ALIGN (variant, valign);
   2019  1.1  mrg 	  TYPE_PRECISION (variant) = precision;
   2020  1.1  mrg 	  SET_TYPE_MODE (variant, mode);
   2021  1.1  mrg 	  TYPE_EMPTY_P (variant) = empty_p;
   2022  1.1  mrg 	}
   2023  1.1  mrg     }
   2024  1.1  mrg }
   2025  1.1  mrg 
   2026  1.1  mrg /* Return a new underlying object for a bitfield started with FIELD.  */
   2027  1.1  mrg 
   2028  1.1  mrg static tree
   2029  1.1  mrg start_bitfield_representative (tree field)
   2030  1.1  mrg {
   2031  1.1  mrg   tree repr = make_node (FIELD_DECL);
   2032  1.1  mrg   DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
   2033  1.1  mrg   /* Force the representative to begin at a BITS_PER_UNIT aligned
   2034  1.1  mrg      boundary - C++ may use tail-padding of a base object to
   2035  1.1  mrg      continue packing bits so the bitfield region does not start
   2036  1.1  mrg      at bit zero (see g++.dg/abi/bitfield5.C for example).
   2037  1.1  mrg      Unallocated bits may happen for other reasons as well,
   2038  1.1  mrg      for example Ada which allows explicit bit-granular structure layout.  */
   2039  1.1  mrg   DECL_FIELD_BIT_OFFSET (repr)
   2040  1.1  mrg     = size_binop (BIT_AND_EXPR,
   2041  1.1  mrg 		  DECL_FIELD_BIT_OFFSET (field),
   2042  1.1  mrg 		  bitsize_int (~(BITS_PER_UNIT - 1)));
   2043  1.1  mrg   SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
   2044  1.1  mrg   DECL_SIZE (repr) = DECL_SIZE (field);
   2045  1.1  mrg   DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
   2046  1.1  mrg   DECL_PACKED (repr) = DECL_PACKED (field);
   2047  1.1  mrg   DECL_CONTEXT (repr) = DECL_CONTEXT (field);
   2048  1.1  mrg   /* There are no indirect accesses to this field.  If we introduce
   2049  1.1  mrg      some then they have to use the record alias set.  This makes
   2050  1.1  mrg      sure to properly conflict with [indirect] accesses to addressable
   2051  1.1  mrg      fields of the bitfield group.  */
   2052  1.1  mrg   DECL_NONADDRESSABLE_P (repr) = 1;
   2053  1.1  mrg   return repr;
   2054  1.1  mrg }
   2055  1.1  mrg 
   2056  1.1  mrg /* Finish up a bitfield group that was started by creating the underlying
   2057  1.1  mrg    object REPR with the last field in the bitfield group FIELD.  */
   2058  1.1  mrg 
   2059  1.1  mrg static void
   2060  1.1  mrg finish_bitfield_representative (tree repr, tree field)
   2061  1.1  mrg {
   2062  1.1  mrg   unsigned HOST_WIDE_INT bitsize, maxbitsize;
   2063  1.1  mrg   tree nextf, size;
   2064  1.1  mrg 
   2065  1.1  mrg   size = size_diffop (DECL_FIELD_OFFSET (field),
   2066  1.1  mrg 		      DECL_FIELD_OFFSET (repr));
   2067  1.1  mrg   while (TREE_CODE (size) == COMPOUND_EXPR)
   2068  1.1  mrg     size = TREE_OPERAND (size, 1);
   2069  1.1  mrg   gcc_assert (tree_fits_uhwi_p (size));
   2070  1.1  mrg   bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT
   2071  1.1  mrg 	     + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
   2072  1.1  mrg 	     - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))
   2073  1.1  mrg 	     + tree_to_uhwi (DECL_SIZE (field)));
   2074  1.1  mrg 
   2075  1.1  mrg   /* Round up bitsize to multiples of BITS_PER_UNIT.  */
   2076  1.1  mrg   bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
   2077  1.1  mrg 
   2078  1.1  mrg   /* Now nothing tells us how to pad out bitsize ...  */
   2079  1.1  mrg   if (TREE_CODE (DECL_CONTEXT (field)) == RECORD_TYPE)
   2080  1.1  mrg     {
   2081  1.1  mrg       nextf = DECL_CHAIN (field);
   2082  1.1  mrg       while (nextf && TREE_CODE (nextf) != FIELD_DECL)
   2083  1.1  mrg 	nextf = DECL_CHAIN (nextf);
   2084  1.1  mrg     }
   2085  1.1  mrg   else
   2086  1.1  mrg     nextf = NULL_TREE;
   2087  1.1  mrg   if (nextf)
   2088  1.1  mrg     {
   2089  1.1  mrg       tree maxsize;
   2090  1.1  mrg       /* If there was an error, the field may be not laid out
   2091  1.1  mrg          correctly.  Don't bother to do anything.  */
   2092  1.1  mrg       if (TREE_TYPE (nextf) == error_mark_node)
   2093  1.1  mrg 	{
   2094  1.1  mrg 	  TREE_TYPE (repr) = error_mark_node;
   2095  1.1  mrg 	  return;
   2096  1.1  mrg 	}
   2097  1.1  mrg       maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
   2098  1.1  mrg 			     DECL_FIELD_OFFSET (repr));
   2099  1.1  mrg       if (tree_fits_uhwi_p (maxsize))
   2100  1.1  mrg 	{
   2101  1.1  mrg 	  maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
   2102  1.1  mrg 			+ tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf))
   2103  1.1  mrg 			- tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
   2104  1.1  mrg 	  /* If the group ends within a bitfield nextf does not need to be
   2105  1.1  mrg 	     aligned to BITS_PER_UNIT.  Thus round up.  */
   2106  1.1  mrg 	  maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
   2107  1.1  mrg 	}
   2108  1.1  mrg       else
   2109  1.1  mrg 	maxbitsize = bitsize;
   2110  1.1  mrg     }
   2111  1.1  mrg   else
   2112  1.1  mrg     {
   2113  1.1  mrg       /* Note that if the C++ FE sets up tail-padding to be re-used it
   2114  1.1  mrg          creates a as-base variant of the type with TYPE_SIZE adjusted
   2115  1.1  mrg 	 accordingly.  So it is safe to include tail-padding here.  */
   2116  1.1  mrg       tree aggsize = lang_hooks.types.unit_size_without_reusable_padding
   2117  1.1  mrg 							(DECL_CONTEXT (field));
   2118  1.1  mrg       tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr));
   2119  1.1  mrg       /* We cannot generally rely on maxsize to fold to an integer constant,
   2120  1.1  mrg 	 so use bitsize as fallback for this case.  */
   2121  1.1  mrg       if (tree_fits_uhwi_p (maxsize))
   2122  1.1  mrg 	maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
   2123  1.1  mrg 		      - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
   2124  1.1  mrg       else
   2125  1.1  mrg 	maxbitsize = bitsize;
   2126  1.1  mrg     }
   2127  1.1  mrg 
   2128  1.1  mrg   /* Only if we don't artificially break up the representative in
   2129  1.1  mrg      the middle of a large bitfield with different possibly
   2130  1.1  mrg      overlapping representatives.  And all representatives start
   2131  1.1  mrg      at byte offset.  */
   2132  1.1  mrg   gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
   2133  1.1  mrg 
   2134  1.1  mrg   /* Find the smallest nice mode to use.  */
   2135  1.1  mrg   opt_scalar_int_mode mode_iter;
   2136  1.1  mrg   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
   2137  1.1  mrg     if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize)
   2138  1.1  mrg       break;
   2139  1.1  mrg 
   2140  1.1  mrg   scalar_int_mode mode;
   2141  1.1  mrg   if (!mode_iter.exists (&mode)
   2142  1.1  mrg       || GET_MODE_BITSIZE (mode) > maxbitsize
   2143  1.1  mrg       || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE)
   2144  1.1  mrg     {
   2145  1.1  mrg       /* We really want a BLKmode representative only as a last resort,
   2146  1.1  mrg          considering the member b in
   2147  1.1  mrg 	   struct { int a : 7; int b : 17; int c; } __attribute__((packed));
   2148  1.1  mrg 	 Otherwise we simply want to split the representative up
   2149  1.1  mrg 	 allowing for overlaps within the bitfield region as required for
   2150  1.1  mrg 	   struct { int a : 7; int b : 7;
   2151  1.1  mrg 		    int c : 10; int d; } __attribute__((packed));
   2152  1.1  mrg 	 [0, 15] HImode for a and b, [8, 23] HImode for c.  */
   2153  1.1  mrg       DECL_SIZE (repr) = bitsize_int (bitsize);
   2154  1.1  mrg       DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
   2155  1.1  mrg       SET_DECL_MODE (repr, BLKmode);
   2156  1.1  mrg       TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
   2157  1.1  mrg 						 bitsize / BITS_PER_UNIT);
   2158  1.1  mrg     }
   2159  1.1  mrg   else
   2160  1.1  mrg     {
   2161  1.1  mrg       unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
   2162  1.1  mrg       DECL_SIZE (repr) = bitsize_int (modesize);
   2163  1.1  mrg       DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
   2164  1.1  mrg       SET_DECL_MODE (repr, mode);
   2165  1.1  mrg       TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
   2166  1.1  mrg     }
   2167  1.1  mrg 
   2168  1.1  mrg   /* Remember whether the bitfield group is at the end of the
   2169  1.1  mrg      structure or not.  */
   2170  1.1  mrg   DECL_CHAIN (repr) = nextf;
   2171  1.1  mrg }
   2172  1.1  mrg 
   2173  1.1  mrg /* Compute and set FIELD_DECLs for the underlying objects we should
   2174  1.1  mrg    use for bitfield access for the structure T.  */
   2175  1.1  mrg 
   2176  1.1  mrg void
   2177  1.1  mrg finish_bitfield_layout (tree t)
   2178  1.1  mrg {
   2179  1.1  mrg   tree field, prev;
   2180  1.1  mrg   tree repr = NULL_TREE;
   2181  1.1  mrg 
   2182  1.1  mrg   if (TREE_CODE (t) == QUAL_UNION_TYPE)
   2183  1.1  mrg     return;
   2184  1.1  mrg 
   2185  1.1  mrg   for (prev = NULL_TREE, field = TYPE_FIELDS (t);
   2186  1.1  mrg        field; field = DECL_CHAIN (field))
   2187  1.1  mrg     {
   2188  1.1  mrg       if (TREE_CODE (field) != FIELD_DECL)
   2189  1.1  mrg 	continue;
   2190  1.1  mrg 
   2191  1.1  mrg       /* In the C++ memory model, consecutive bit fields in a structure are
   2192  1.1  mrg 	 considered one memory location and updating a memory location
   2193  1.1  mrg 	 may not store into adjacent memory locations.  */
   2194  1.1  mrg       if (!repr
   2195  1.1  mrg 	  && DECL_BIT_FIELD_TYPE (field))
   2196  1.1  mrg 	{
   2197  1.1  mrg 	  /* Start new representative.  */
   2198  1.1  mrg 	  repr = start_bitfield_representative (field);
   2199  1.1  mrg 	}
   2200  1.1  mrg       else if (repr
   2201  1.1  mrg 	       && ! DECL_BIT_FIELD_TYPE (field))
   2202  1.1  mrg 	{
   2203  1.1  mrg 	  /* Finish off new representative.  */
   2204  1.1  mrg 	  finish_bitfield_representative (repr, prev);
   2205  1.1  mrg 	  repr = NULL_TREE;
   2206  1.1  mrg 	}
   2207  1.1  mrg       else if (DECL_BIT_FIELD_TYPE (field))
   2208  1.1  mrg 	{
   2209  1.1  mrg 	  gcc_assert (repr != NULL_TREE);
   2210  1.1  mrg 
   2211  1.1  mrg 	  /* Zero-size bitfields finish off a representative and
   2212  1.1  mrg 	     do not have a representative themselves.  This is
   2213  1.1  mrg 	     required by the C++ memory model.  */
   2214  1.1  mrg 	  if (integer_zerop (DECL_SIZE (field)))
   2215  1.1  mrg 	    {
   2216  1.1  mrg 	      finish_bitfield_representative (repr, prev);
   2217  1.1  mrg 	      repr = NULL_TREE;
   2218  1.1  mrg 	    }
   2219  1.1  mrg 
   2220  1.1  mrg 	  /* We assume that either DECL_FIELD_OFFSET of the representative
   2221  1.1  mrg 	     and each bitfield member is a constant or they are equal.
   2222  1.1  mrg 	     This is because we need to be able to compute the bit-offset
   2223  1.1  mrg 	     of each field relative to the representative in get_bit_range
   2224  1.1  mrg 	     during RTL expansion.
   2225  1.1  mrg 	     If these constraints are not met, simply force a new
   2226  1.1  mrg 	     representative to be generated.  That will at most
   2227  1.1  mrg 	     generate worse code but still maintain correctness with
   2228  1.1  mrg 	     respect to the C++ memory model.  */
   2229  1.1  mrg 	  else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))
   2230  1.1  mrg 		      && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
   2231  1.1  mrg 		     || operand_equal_p (DECL_FIELD_OFFSET (repr),
   2232  1.1  mrg 					 DECL_FIELD_OFFSET (field), 0)))
   2233  1.1  mrg 	    {
   2234  1.1  mrg 	      finish_bitfield_representative (repr, prev);
   2235  1.1  mrg 	      repr = start_bitfield_representative (field);
   2236  1.1  mrg 	    }
   2237  1.1  mrg 	}
   2238  1.1  mrg       else
   2239  1.1  mrg 	continue;
   2240  1.1  mrg 
   2241  1.1  mrg       if (repr)
   2242  1.1  mrg 	DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
   2243  1.1  mrg 
   2244  1.1  mrg       if (TREE_CODE (t) == RECORD_TYPE)
   2245  1.1  mrg 	prev = field;
   2246  1.1  mrg       else if (repr)
   2247  1.1  mrg 	{
   2248  1.1  mrg 	  finish_bitfield_representative (repr, field);
   2249  1.1  mrg 	  repr = NULL_TREE;
   2250  1.1  mrg 	}
   2251  1.1  mrg     }
   2252  1.1  mrg 
   2253  1.1  mrg   if (repr)
   2254  1.1  mrg     finish_bitfield_representative (repr, prev);
   2255  1.1  mrg }
   2256  1.1  mrg 
   2257  1.1  mrg /* Do all of the work required to layout the type indicated by RLI,
   2258  1.1  mrg    once the fields have been laid out.  This function will call `free'
   2259  1.1  mrg    for RLI, unless FREE_P is false.  Passing a value other than false
   2260  1.1  mrg    for FREE_P is bad practice; this option only exists to support the
   2261  1.1  mrg    G++ 3.2 ABI.  */
   2262  1.1  mrg 
   2263  1.1  mrg void
   2264  1.1  mrg finish_record_layout (record_layout_info rli, int free_p)
   2265  1.1  mrg {
   2266  1.1  mrg   tree variant;
   2267  1.1  mrg 
   2268  1.1  mrg   /* Compute the final size.  */
   2269  1.1  mrg   finalize_record_size (rli);
   2270  1.1  mrg 
   2271  1.1  mrg   /* Compute the TYPE_MODE for the record.  */
   2272  1.1  mrg   compute_record_mode (rli->t);
   2273  1.1  mrg 
   2274  1.1  mrg   /* Perform any last tweaks to the TYPE_SIZE, etc.  */
   2275  1.1  mrg   finalize_type_size (rli->t);
   2276  1.1  mrg 
   2277  1.1  mrg   /* Compute bitfield representatives.  */
   2278  1.1  mrg   finish_bitfield_layout (rli->t);
   2279  1.1  mrg 
   2280  1.1  mrg   /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants.
   2281  1.1  mrg      With C++ templates, it is too early to do this when the attribute
   2282  1.1  mrg      is being parsed.  */
   2283  1.1  mrg   for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
   2284  1.1  mrg        variant = TYPE_NEXT_VARIANT (variant))
   2285  1.1  mrg     {
   2286  1.1  mrg       TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
   2287  1.1  mrg       TYPE_REVERSE_STORAGE_ORDER (variant)
   2288  1.1  mrg 	= TYPE_REVERSE_STORAGE_ORDER (rli->t);
   2289  1.1  mrg     }
   2290  1.1  mrg 
   2291  1.1  mrg   /* Lay out any static members.  This is done now because their type
   2292  1.1  mrg      may use the record's type.  */
   2293  1.1  mrg   while (!vec_safe_is_empty (rli->pending_statics))
   2294  1.1  mrg     layout_decl (rli->pending_statics->pop (), 0);
   2295  1.1  mrg 
   2296  1.1  mrg   /* Clean up.  */
   2297  1.1  mrg   if (free_p)
   2298  1.1  mrg     {
   2299  1.1  mrg       vec_free (rli->pending_statics);
   2300  1.1  mrg       free (rli);
   2301  1.1  mrg     }
   2302  1.1  mrg }
   2303  1.1  mrg 
   2304  1.1  mrg 
   2306  1.1  mrg /* Finish processing a builtin RECORD_TYPE type TYPE.  It's name is
   2307  1.1  mrg    NAME, its fields are chained in reverse on FIELDS.
   2308  1.1  mrg 
   2309  1.1  mrg    If ALIGN_TYPE is non-null, it is given the same alignment as
   2310  1.1  mrg    ALIGN_TYPE.  */
   2311  1.1  mrg 
   2312  1.1  mrg void
   2313  1.1  mrg finish_builtin_struct (tree type, const char *name, tree fields,
   2314  1.1  mrg 		       tree align_type)
   2315  1.1  mrg {
   2316  1.1  mrg   tree tail, next;
   2317  1.1  mrg 
   2318  1.1  mrg   for (tail = NULL_TREE; fields; tail = fields, fields = next)
   2319  1.1  mrg     {
   2320  1.1  mrg       DECL_FIELD_CONTEXT (fields) = type;
   2321  1.1  mrg       next = DECL_CHAIN (fields);
   2322  1.1  mrg       DECL_CHAIN (fields) = tail;
   2323  1.1  mrg     }
   2324  1.1  mrg   TYPE_FIELDS (type) = tail;
   2325  1.1  mrg 
   2326  1.1  mrg   if (align_type)
   2327  1.1  mrg     {
   2328  1.1  mrg       SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type));
   2329  1.1  mrg       TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
   2330  1.1  mrg       SET_TYPE_WARN_IF_NOT_ALIGN (type,
   2331  1.1  mrg 				  TYPE_WARN_IF_NOT_ALIGN (align_type));
   2332  1.1  mrg     }
   2333  1.1  mrg 
   2334  1.1  mrg   layout_type (type);
   2335  1.1  mrg #if 0 /* not yet, should get fixed properly later */
   2336  1.1  mrg   TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
   2337  1.1  mrg #else
   2338  1.1  mrg   TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
   2339  1.1  mrg 				 TYPE_DECL, get_identifier (name), type);
   2340  1.1  mrg #endif
   2341  1.1  mrg   TYPE_STUB_DECL (type) = TYPE_NAME (type);
   2342  1.1  mrg   layout_decl (TYPE_NAME (type), 0);
   2343  1.1  mrg }
   2344  1.1  mrg 
   2345  1.1  mrg /* Calculate the mode, size, and alignment for TYPE.
   2346  1.1  mrg    For an array type, calculate the element separation as well.
   2347  1.1  mrg    Record TYPE on the chain of permanent or temporary types
   2348  1.1  mrg    so that dbxout will find out about it.
   2349  1.1  mrg 
   2350  1.1  mrg    TYPE_SIZE of a type is nonzero if the type has been laid out already.
   2351  1.1  mrg    layout_type does nothing on such a type.
   2352  1.1  mrg 
   2353  1.1  mrg    If the type is incomplete, its TYPE_SIZE remains zero.  */
   2354  1.1  mrg 
   2355  1.1  mrg void
   2356  1.1  mrg layout_type (tree type)
   2357  1.1  mrg {
   2358  1.1  mrg   gcc_assert (type);
   2359  1.1  mrg 
   2360  1.1  mrg   if (type == error_mark_node)
   2361  1.1  mrg     return;
   2362  1.1  mrg 
   2363  1.1  mrg   /* We don't want finalize_type_size to copy an alignment attribute to
   2364  1.1  mrg      variants that don't have it.  */
   2365  1.1  mrg   type = TYPE_MAIN_VARIANT (type);
   2366  1.1  mrg 
   2367  1.1  mrg   /* Do nothing if type has been laid out before.  */
   2368  1.1  mrg   if (TYPE_SIZE (type))
   2369  1.1  mrg     return;
   2370  1.1  mrg 
   2371  1.1  mrg   switch (TREE_CODE (type))
   2372  1.1  mrg     {
   2373  1.1  mrg     case LANG_TYPE:
   2374  1.1  mrg       /* This kind of type is the responsibility
   2375  1.1  mrg 	 of the language-specific code.  */
   2376  1.1  mrg       gcc_unreachable ();
   2377  1.1  mrg 
   2378  1.1  mrg     case BOOLEAN_TYPE:
   2379  1.1  mrg     case INTEGER_TYPE:
   2380  1.1  mrg     case ENUMERAL_TYPE:
   2381  1.1  mrg       {
   2382  1.1  mrg 	scalar_int_mode mode
   2383  1.1  mrg 	  = smallest_int_mode_for_size (TYPE_PRECISION (type));
   2384  1.1  mrg 	SET_TYPE_MODE (type, mode);
   2385  1.1  mrg 	TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
   2386  1.1  mrg 	/* Don't set TYPE_PRECISION here, as it may be set by a bitfield.  */
   2387  1.1  mrg 	TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
   2388  1.1  mrg 	break;
   2389  1.1  mrg       }
   2390  1.1  mrg 
   2391  1.1  mrg     case REAL_TYPE:
   2392  1.1  mrg       {
   2393  1.1  mrg 	/* Allow the caller to choose the type mode, which is how decimal
   2394  1.1  mrg 	   floats are distinguished from binary ones.  */
   2395  1.1  mrg 	if (TYPE_MODE (type) == VOIDmode)
   2396  1.1  mrg 	  SET_TYPE_MODE
   2397  1.1  mrg 	    (type, float_mode_for_size (TYPE_PRECISION (type)).require ());
   2398  1.1  mrg 	scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type));
   2399  1.1  mrg 	TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
   2400  1.1  mrg 	TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
   2401  1.1  mrg 	break;
   2402  1.1  mrg       }
   2403  1.1  mrg 
   2404  1.1  mrg    case FIXED_POINT_TYPE:
   2405  1.1  mrg      {
   2406  1.1  mrg        /* TYPE_MODE (type) has been set already.  */
   2407  1.1  mrg        scalar_mode mode = SCALAR_TYPE_MODE (type);
   2408  1.1  mrg        TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
   2409  1.1  mrg        TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
   2410  1.1  mrg        break;
   2411  1.1  mrg      }
   2412  1.1  mrg 
   2413  1.1  mrg     case COMPLEX_TYPE:
   2414  1.1  mrg       TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
   2415  1.1  mrg       SET_TYPE_MODE (type,
   2416  1.1  mrg 		     GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type))));
   2417  1.1  mrg 
   2418  1.1  mrg       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
   2419  1.1  mrg       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
   2420  1.1  mrg       break;
   2421  1.1  mrg 
   2422  1.1  mrg     case VECTOR_TYPE:
   2423  1.1  mrg       {
   2424  1.1  mrg 	poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type);
   2425  1.1  mrg 	tree innertype = TREE_TYPE (type);
   2426  1.1  mrg 
   2427  1.1  mrg 	/* Find an appropriate mode for the vector type.  */
   2428  1.1  mrg 	if (TYPE_MODE (type) == VOIDmode)
   2429  1.1  mrg 	  SET_TYPE_MODE (type,
   2430  1.1  mrg 			 mode_for_vector (SCALAR_TYPE_MODE (innertype),
   2431  1.1  mrg 					  nunits).else_blk ());
   2432  1.1  mrg 
   2433  1.1  mrg 	TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
   2434  1.1  mrg         TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
   2435  1.1  mrg 	/* Several boolean vector elements may fit in a single unit.  */
   2436  1.1  mrg 	if (VECTOR_BOOLEAN_TYPE_P (type)
   2437  1.1  mrg 	    && type->type_common.mode != BLKmode)
   2438  1.1  mrg 	  TYPE_SIZE_UNIT (type)
   2439  1.1  mrg 	    = size_int (GET_MODE_SIZE (type->type_common.mode));
   2440  1.1  mrg 	else
   2441  1.1  mrg 	  TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
   2442  1.1  mrg 						   TYPE_SIZE_UNIT (innertype),
   2443  1.1  mrg 						   size_int (nunits));
   2444  1.1  mrg 	TYPE_SIZE (type) = int_const_binop
   2445  1.1  mrg 	  (MULT_EXPR,
   2446  1.1  mrg 	   bits_from_bytes (TYPE_SIZE_UNIT (type)),
   2447  1.1  mrg 	   bitsize_int (BITS_PER_UNIT));
   2448  1.1  mrg 
   2449  1.1  mrg 	/* For vector types, we do not default to the mode's alignment.
   2450  1.1  mrg 	   Instead, query a target hook, defaulting to natural alignment.
   2451  1.1  mrg 	   This prevents ABI changes depending on whether or not native
   2452  1.1  mrg 	   vector modes are supported.  */
   2453  1.1  mrg 	SET_TYPE_ALIGN (type, targetm.vector_alignment (type));
   2454  1.1  mrg 
   2455  1.1  mrg 	/* However, if the underlying mode requires a bigger alignment than
   2456  1.1  mrg 	   what the target hook provides, we cannot use the mode.  For now,
   2457  1.1  mrg 	   simply reject that case.  */
   2458  1.1  mrg 	gcc_assert (TYPE_ALIGN (type)
   2459  1.1  mrg 		    >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
   2460  1.1  mrg         break;
   2461  1.1  mrg       }
   2462  1.1  mrg 
   2463  1.1  mrg     case VOID_TYPE:
   2464  1.1  mrg       /* This is an incomplete type and so doesn't have a size.  */
   2465  1.1  mrg       SET_TYPE_ALIGN (type, 1);
   2466  1.1  mrg       TYPE_USER_ALIGN (type) = 0;
   2467  1.1  mrg       SET_TYPE_MODE (type, VOIDmode);
   2468  1.1  mrg       break;
   2469  1.1  mrg 
   2470  1.1  mrg     case OFFSET_TYPE:
   2471  1.1  mrg       TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
   2472  1.1  mrg       TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
   2473  1.1  mrg       /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
   2474  1.1  mrg 	 integral, which may be an __intN.  */
   2475  1.1  mrg       SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ());
   2476  1.1  mrg       TYPE_PRECISION (type) = POINTER_SIZE;
   2477  1.1  mrg       break;
   2478  1.1  mrg 
   2479  1.1  mrg     case FUNCTION_TYPE:
   2480  1.1  mrg     case METHOD_TYPE:
   2481  1.1  mrg       /* It's hard to see what the mode and size of a function ought to
   2482  1.1  mrg 	 be, but we do know the alignment is FUNCTION_BOUNDARY, so
   2483  1.1  mrg 	 make it consistent with that.  */
   2484  1.1  mrg       SET_TYPE_MODE (type,
   2485  1.1  mrg 		     int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ());
   2486  1.1  mrg       TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
   2487  1.1  mrg       TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
   2488  1.1  mrg       break;
   2489  1.1  mrg 
   2490  1.1  mrg     case POINTER_TYPE:
   2491  1.1  mrg     case REFERENCE_TYPE:
   2492  1.1  mrg       {
   2493  1.1  mrg 	scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
   2494  1.1  mrg 	TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
   2495  1.1  mrg 	TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
   2496  1.1  mrg 	TYPE_UNSIGNED (type) = 1;
   2497  1.1  mrg 	TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
   2498  1.1  mrg       }
   2499  1.1  mrg       break;
   2500  1.1  mrg 
   2501  1.1  mrg     case ARRAY_TYPE:
   2502  1.1  mrg       {
   2503  1.1  mrg 	tree index = TYPE_DOMAIN (type);
   2504  1.1  mrg 	tree element = TREE_TYPE (type);
   2505  1.1  mrg 
   2506  1.1  mrg 	/* We need to know both bounds in order to compute the size.  */
   2507  1.1  mrg 	if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
   2508  1.1  mrg 	    && TYPE_SIZE (element))
   2509  1.1  mrg 	  {
   2510  1.1  mrg 	    tree ub = TYPE_MAX_VALUE (index);
   2511  1.1  mrg 	    tree lb = TYPE_MIN_VALUE (index);
   2512  1.1  mrg 	    tree element_size = TYPE_SIZE (element);
   2513  1.1  mrg 	    tree length;
   2514  1.1  mrg 
   2515  1.1  mrg 	    /* Make sure that an array of zero-sized element is zero-sized
   2516  1.1  mrg 	       regardless of its extent.  */
   2517  1.1  mrg 	    if (integer_zerop (element_size))
   2518  1.1  mrg 	      length = size_zero_node;
   2519  1.1  mrg 
   2520  1.1  mrg 	    /* The computation should happen in the original signedness so
   2521  1.1  mrg 	       that (possible) negative values are handled appropriately
   2522  1.1  mrg 	       when determining overflow.  */
   2523  1.1  mrg 	    else
   2524  1.1  mrg 	      {
   2525  1.1  mrg 		/* ???  When it is obvious that the range is signed
   2526  1.1  mrg 		   represent it using ssizetype.  */
   2527  1.1  mrg 		if (TREE_CODE (lb) == INTEGER_CST
   2528  1.1  mrg 		    && TREE_CODE (ub) == INTEGER_CST
   2529  1.1  mrg 		    && TYPE_UNSIGNED (TREE_TYPE (lb))
   2530  1.1  mrg 		    && tree_int_cst_lt (ub, lb))
   2531  1.1  mrg 		  {
   2532  1.1  mrg 		    lb = wide_int_to_tree (ssizetype,
   2533  1.1  mrg 					   offset_int::from (wi::to_wide (lb),
   2534  1.1  mrg 							     SIGNED));
   2535  1.1  mrg 		    ub = wide_int_to_tree (ssizetype,
   2536  1.1  mrg 					   offset_int::from (wi::to_wide (ub),
   2537  1.1  mrg 							     SIGNED));
   2538  1.1  mrg 		  }
   2539  1.1  mrg 		length
   2540  1.1  mrg 		  = fold_convert (sizetype,
   2541  1.1  mrg 				  size_binop (PLUS_EXPR,
   2542  1.1  mrg 					      build_int_cst (TREE_TYPE (lb), 1),
   2543  1.1  mrg 					      size_binop (MINUS_EXPR, ub, lb)));
   2544  1.1  mrg 	      }
   2545  1.1  mrg 
   2546  1.1  mrg 	    /* ??? We have no way to distinguish a null-sized array from an
   2547  1.1  mrg 	       array spanning the whole sizetype range, so we arbitrarily
   2548  1.1  mrg 	       decide that [0, -1] is the only valid representation.  */
   2549  1.1  mrg 	    if (integer_zerop (length)
   2550  1.1  mrg 	        && TREE_OVERFLOW (length)
   2551  1.1  mrg 		&& integer_zerop (lb))
   2552  1.1  mrg 	      length = size_zero_node;
   2553  1.1  mrg 
   2554  1.1  mrg 	    TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
   2555  1.1  mrg 					   bits_from_bytes (length));
   2556  1.1  mrg 
   2557  1.1  mrg 	    /* If we know the size of the element, calculate the total size
   2558  1.1  mrg 	       directly, rather than do some division thing below.  This
   2559  1.1  mrg 	       optimization helps Fortran assumed-size arrays (where the
   2560  1.1  mrg 	       size of the array is determined at runtime) substantially.  */
   2561  1.1  mrg 	    if (TYPE_SIZE_UNIT (element))
   2562  1.1  mrg 	      TYPE_SIZE_UNIT (type)
   2563  1.1  mrg 		= size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
   2564  1.1  mrg 	  }
   2565  1.1  mrg 
   2566  1.1  mrg 	/* Now round the alignment and size,
   2567  1.1  mrg 	   using machine-dependent criteria if any.  */
   2568  1.1  mrg 
   2569  1.1  mrg 	unsigned align = TYPE_ALIGN (element);
   2570  1.1  mrg 	if (TYPE_USER_ALIGN (type))
   2571  1.1  mrg 	  align = MAX (align, TYPE_ALIGN (type));
   2572  1.1  mrg 	else
   2573  1.1  mrg 	  TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
   2574  1.1  mrg 	if (!TYPE_WARN_IF_NOT_ALIGN (type))
   2575  1.1  mrg 	  SET_TYPE_WARN_IF_NOT_ALIGN (type,
   2576  1.1  mrg 				      TYPE_WARN_IF_NOT_ALIGN (element));
   2577  1.1  mrg #ifdef ROUND_TYPE_ALIGN
   2578  1.1  mrg 	align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT);
   2579  1.1  mrg #else
   2580  1.1  mrg 	align = MAX (align, BITS_PER_UNIT);
   2581  1.1  mrg #endif
   2582  1.1  mrg 	SET_TYPE_ALIGN (type, align);
   2583  1.1  mrg 	SET_TYPE_MODE (type, BLKmode);
   2584  1.1  mrg 	if (TYPE_SIZE (type) != 0
   2585  1.1  mrg 	    && ! targetm.member_type_forces_blk (type, VOIDmode)
   2586  1.1  mrg 	    /* BLKmode elements force BLKmode aggregate;
   2587  1.1  mrg 	       else extract/store fields may lose.  */
   2588  1.1  mrg 	    && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
   2589  1.1  mrg 		|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
   2590  1.1  mrg 	  {
   2591  1.1  mrg 	    SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
   2592  1.1  mrg 						 TYPE_SIZE (type)));
   2593  1.1  mrg 	    if (TYPE_MODE (type) != BLKmode
   2594  1.1  mrg 		&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
   2595  1.1  mrg 		&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
   2596  1.1  mrg 	      {
   2597  1.1  mrg 		TYPE_NO_FORCE_BLK (type) = 1;
   2598  1.1  mrg 		SET_TYPE_MODE (type, BLKmode);
   2599  1.1  mrg 	      }
   2600  1.1  mrg 	  }
   2601  1.1  mrg 	if (AGGREGATE_TYPE_P (element))
   2602  1.1  mrg 	  TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element);
   2603  1.1  mrg 	/* When the element size is constant, check that it is at least as
   2604  1.1  mrg 	   large as the element alignment.  */
   2605  1.1  mrg 	if (TYPE_SIZE_UNIT (element)
   2606  1.1  mrg 	    && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
   2607  1.1  mrg 	    /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
   2608  1.1  mrg 	       TYPE_ALIGN_UNIT.  */
   2609  1.1  mrg 	    && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
   2610  1.1  mrg 	    && !integer_zerop (TYPE_SIZE_UNIT (element)))
   2611  1.1  mrg 	  {
   2612  1.1  mrg 	    if (compare_tree_int (TYPE_SIZE_UNIT (element),
   2613  1.1  mrg 				  TYPE_ALIGN_UNIT (element)) < 0)
   2614  1.1  mrg 	      error ("alignment of array elements is greater than "
   2615  1.1  mrg 		     "element size");
   2616  1.1  mrg 	    else if (TYPE_ALIGN_UNIT (element) > 1
   2617  1.1  mrg 		     && (wi::zext (wi::to_wide (TYPE_SIZE_UNIT (element)),
   2618  1.1  mrg 				  ffs_hwi (TYPE_ALIGN_UNIT (element)) - 1)
   2619  1.1  mrg 			 != 0))
   2620  1.1  mrg 	      error ("size of array element is not a multiple of its "
   2621  1.1  mrg 		     "alignment");
   2622  1.1  mrg 	  }
   2623  1.1  mrg 	break;
   2624  1.1  mrg       }
   2625  1.1  mrg 
   2626  1.1  mrg     case RECORD_TYPE:
   2627  1.1  mrg     case UNION_TYPE:
   2628  1.1  mrg     case QUAL_UNION_TYPE:
   2629  1.1  mrg       {
   2630  1.1  mrg 	tree field;
   2631  1.1  mrg 	record_layout_info rli;
   2632  1.1  mrg 
   2633  1.1  mrg 	/* Initialize the layout information.  */
   2634  1.1  mrg 	rli = start_record_layout (type);
   2635  1.1  mrg 
   2636  1.1  mrg 	/* If this is a QUAL_UNION_TYPE, we want to process the fields
   2637  1.1  mrg 	   in the reverse order in building the COND_EXPR that denotes
   2638  1.1  mrg 	   its size.  We reverse them again later.  */
   2639  1.1  mrg 	if (TREE_CODE (type) == QUAL_UNION_TYPE)
   2640  1.1  mrg 	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
   2641  1.1  mrg 
   2642  1.1  mrg 	/* Place all the fields.  */
   2643  1.1  mrg 	for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
   2644  1.1  mrg 	  place_field (rli, field);
   2645  1.1  mrg 
   2646  1.1  mrg 	if (TREE_CODE (type) == QUAL_UNION_TYPE)
   2647  1.1  mrg 	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
   2648  1.1  mrg 
   2649  1.1  mrg 	/* Finish laying out the record.  */
   2650  1.1  mrg 	finish_record_layout (rli, /*free_p=*/true);
   2651  1.1  mrg       }
   2652  1.1  mrg       break;
   2653  1.1  mrg 
   2654  1.1  mrg     default:
   2655  1.1  mrg       gcc_unreachable ();
   2656  1.1  mrg     }
   2657  1.1  mrg 
   2658  1.1  mrg   /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE.  For
   2659  1.1  mrg      records and unions, finish_record_layout already called this
   2660  1.1  mrg      function.  */
   2661  1.1  mrg   if (!RECORD_OR_UNION_TYPE_P (type))
   2662  1.1  mrg     finalize_type_size (type);
   2663  1.1  mrg 
   2664  1.1  mrg   /* We should never see alias sets on incomplete aggregates.  And we
   2665  1.1  mrg      should not call layout_type on not incomplete aggregates.  */
   2666  1.1  mrg   if (AGGREGATE_TYPE_P (type))
   2667  1.1  mrg     gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
   2668  1.1  mrg }
   2669  1.1  mrg 
   2670  1.1  mrg /* Return the least alignment required for type TYPE.  */
   2671  1.1  mrg 
   2672  1.1  mrg unsigned int
   2673  1.1  mrg min_align_of_type (tree type)
   2674  1.1  mrg {
   2675  1.1  mrg   unsigned int align = TYPE_ALIGN (type);
   2676  1.1  mrg   if (!TYPE_USER_ALIGN (type))
   2677  1.1  mrg     {
   2678  1.1  mrg       align = MIN (align, BIGGEST_ALIGNMENT);
   2679  1.1  mrg #ifdef BIGGEST_FIELD_ALIGNMENT
   2680  1.1  mrg       align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
   2681  1.1  mrg #endif
   2682  1.1  mrg       unsigned int field_align = align;
   2683  1.1  mrg #ifdef ADJUST_FIELD_ALIGN
   2684  1.1  mrg       field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align);
   2685  1.1  mrg #endif
   2686  1.1  mrg       align = MIN (align, field_align);
   2687  1.1  mrg     }
   2688  1.1  mrg   return align / BITS_PER_UNIT;
   2689  1.1  mrg }
   2690  1.1  mrg 
   2691  1.1  mrg /* Create and return a type for signed integers of PRECISION bits.  */
   2693  1.1  mrg 
   2694  1.1  mrg tree
   2695  1.1  mrg make_signed_type (int precision)
   2696  1.1  mrg {
   2697  1.1  mrg   tree type = make_node (INTEGER_TYPE);
   2698  1.1  mrg 
   2699  1.1  mrg   TYPE_PRECISION (type) = precision;
   2700  1.1  mrg 
   2701  1.1  mrg   fixup_signed_type (type);
   2702  1.1  mrg   return type;
   2703  1.1  mrg }
   2704  1.1  mrg 
   2705  1.1  mrg /* Create and return a type for unsigned integers of PRECISION bits.  */
   2706  1.1  mrg 
   2707  1.1  mrg tree
   2708  1.1  mrg make_unsigned_type (int precision)
   2709  1.1  mrg {
   2710  1.1  mrg   tree type = make_node (INTEGER_TYPE);
   2711  1.1  mrg 
   2712  1.1  mrg   TYPE_PRECISION (type) = precision;
   2713  1.1  mrg 
   2714  1.1  mrg   fixup_unsigned_type (type);
   2715  1.1  mrg   return type;
   2716  1.1  mrg }
   2717  1.1  mrg 
   2718  1.1  mrg /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
   2720  1.1  mrg    and SATP.  */
   2721  1.1  mrg 
   2722  1.1  mrg tree
   2723  1.1  mrg make_fract_type (int precision, int unsignedp, int satp)
   2724  1.1  mrg {
   2725  1.1  mrg   tree type = make_node (FIXED_POINT_TYPE);
   2726  1.1  mrg 
   2727  1.1  mrg   TYPE_PRECISION (type) = precision;
   2728  1.1  mrg 
   2729  1.1  mrg   if (satp)
   2730  1.1  mrg     TYPE_SATURATING (type) = 1;
   2731  1.1  mrg 
   2732  1.1  mrg   /* Lay out the type: set its alignment, size, etc.  */
   2733  1.1  mrg   TYPE_UNSIGNED (type) = unsignedp;
   2734  1.1  mrg   enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT;
   2735  1.1  mrg   SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
   2736  1.1  mrg   layout_type (type);
   2737  1.1  mrg 
   2738  1.1  mrg   return type;
   2739  1.1  mrg }
   2740  1.1  mrg 
   2741  1.1  mrg /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
   2742  1.1  mrg    and SATP.  */
   2743  1.1  mrg 
   2744  1.1  mrg tree
   2745  1.1  mrg make_accum_type (int precision, int unsignedp, int satp)
   2746  1.1  mrg {
   2747  1.1  mrg   tree type = make_node (FIXED_POINT_TYPE);
   2748  1.1  mrg 
   2749  1.1  mrg   TYPE_PRECISION (type) = precision;
   2750  1.1  mrg 
   2751  1.1  mrg   if (satp)
   2752  1.1  mrg     TYPE_SATURATING (type) = 1;
   2753  1.1  mrg 
   2754  1.1  mrg   /* Lay out the type: set its alignment, size, etc.  */
   2755  1.1  mrg   TYPE_UNSIGNED (type) = unsignedp;
   2756  1.1  mrg   enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
   2757  1.1  mrg   SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
   2758  1.1  mrg   layout_type (type);
   2759  1.1  mrg 
   2760  1.1  mrg   return type;
   2761  1.1  mrg }
   2762  1.1  mrg 
   2763  1.1  mrg /* Initialize sizetypes so layout_type can use them.  */
   2764  1.1  mrg 
   2765  1.1  mrg void
   2766  1.1  mrg initialize_sizetypes (void)
   2767  1.1  mrg {
   2768  1.1  mrg   int precision, bprecision;
   2769  1.1  mrg 
   2770  1.1  mrg   /* Get sizetypes precision from the SIZE_TYPE target macro.  */
   2771  1.1  mrg   if (strcmp (SIZETYPE, "unsigned int") == 0)
   2772  1.1  mrg     precision = INT_TYPE_SIZE;
   2773  1.1  mrg   else if (strcmp (SIZETYPE, "long unsigned int") == 0)
   2774  1.1  mrg     precision = LONG_TYPE_SIZE;
   2775  1.1  mrg   else if (strcmp (SIZETYPE, "long long unsigned int") == 0)
   2776  1.1  mrg     precision = LONG_LONG_TYPE_SIZE;
   2777  1.1  mrg   else if (strcmp (SIZETYPE, "short unsigned int") == 0)
   2778  1.1  mrg     precision = SHORT_TYPE_SIZE;
   2779  1.1  mrg   else
   2780  1.1  mrg     {
   2781  1.1  mrg       int i;
   2782  1.1  mrg 
   2783  1.1  mrg       precision = -1;
   2784  1.1  mrg       for (i = 0; i < NUM_INT_N_ENTS; i++)
   2785  1.1  mrg 	if (int_n_enabled_p[i])
   2786  1.1  mrg 	  {
   2787  1.1  mrg 	    char name[50], altname[50];
   2788  1.1  mrg 	    sprintf (name, "__int%d unsigned", int_n_data[i].bitsize);
   2789  1.1  mrg 	    sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize);
   2790  1.1  mrg 
   2791  1.1  mrg 	    if (strcmp (name, SIZETYPE) == 0
   2792  1.1  mrg 		|| strcmp (altname, SIZETYPE) == 0)
   2793  1.1  mrg 	      {
   2794  1.1  mrg 		precision = int_n_data[i].bitsize;
   2795  1.1  mrg 	      }
   2796  1.1  mrg 	  }
   2797  1.1  mrg       if (precision == -1)
   2798  1.1  mrg 	gcc_unreachable ();
   2799  1.1  mrg     }
   2800  1.1  mrg 
   2801  1.1  mrg   bprecision
   2802  1.1  mrg     = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE);
   2803  1.1  mrg   bprecision = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision));
   2804  1.1  mrg   if (bprecision > HOST_BITS_PER_DOUBLE_INT)
   2805  1.1  mrg     bprecision = HOST_BITS_PER_DOUBLE_INT;
   2806  1.1  mrg 
   2807  1.1  mrg   /* Create stubs for sizetype and bitsizetype so we can create constants.  */
   2808  1.1  mrg   sizetype = make_node (INTEGER_TYPE);
   2809  1.1  mrg   TYPE_NAME (sizetype) = get_identifier ("sizetype");
   2810  1.1  mrg   TYPE_PRECISION (sizetype) = precision;
   2811  1.1  mrg   TYPE_UNSIGNED (sizetype) = 1;
   2812  1.1  mrg   bitsizetype = make_node (INTEGER_TYPE);
   2813  1.1  mrg   TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
   2814  1.1  mrg   TYPE_PRECISION (bitsizetype) = bprecision;
   2815  1.1  mrg   TYPE_UNSIGNED (bitsizetype) = 1;
   2816  1.1  mrg 
   2817  1.1  mrg   /* Now layout both types manually.  */
   2818  1.1  mrg   scalar_int_mode mode = smallest_int_mode_for_size (precision);
   2819  1.1  mrg   SET_TYPE_MODE (sizetype, mode);
   2820  1.1  mrg   SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)));
   2821  1.1  mrg   TYPE_SIZE (sizetype) = bitsize_int (precision);
   2822  1.1  mrg   TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode));
   2823  1.1  mrg   set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
   2824  1.1  mrg 
   2825  1.1  mrg   mode = smallest_int_mode_for_size (bprecision);
   2826  1.1  mrg   SET_TYPE_MODE (bitsizetype, mode);
   2827  1.1  mrg   SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)));
   2828  1.1  mrg   TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
   2829  1.1  mrg   TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode));
   2830  1.1  mrg   set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
   2831  1.1  mrg 
   2832  1.1  mrg   /* Create the signed variants of *sizetype.  */
   2833  1.1  mrg   ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
   2834  1.1  mrg   TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
   2835  1.1  mrg   sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
   2836  1.1  mrg   TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
   2837  1.1  mrg }
   2838  1.1  mrg 
   2839  1.1  mrg /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
   2841  1.1  mrg    or BOOLEAN_TYPE.  Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
   2842  1.1  mrg    for TYPE, based on the PRECISION and whether or not the TYPE
   2843  1.1  mrg    IS_UNSIGNED.  PRECISION need not correspond to a width supported
   2844  1.1  mrg    natively by the hardware; for example, on a machine with 8-bit,
   2845  1.1  mrg    16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
   2846  1.1  mrg    61.  */
   2847  1.1  mrg 
   2848  1.1  mrg void
   2849  1.1  mrg set_min_and_max_values_for_integral_type (tree type,
   2850  1.1  mrg 					  int precision,
   2851  1.1  mrg 					  signop sgn)
   2852  1.1  mrg {
   2853  1.1  mrg   /* For bitfields with zero width we end up creating integer types
   2854  1.1  mrg      with zero precision.  Don't assign any minimum/maximum values
   2855  1.1  mrg      to those types, they don't have any valid value.  */
   2856  1.1  mrg   if (precision < 1)
   2857  1.1  mrg     return;
   2858  1.1  mrg 
   2859  1.1  mrg   gcc_assert (precision <= WIDE_INT_MAX_PRECISION);
   2860  1.1  mrg 
   2861  1.1  mrg   TYPE_MIN_VALUE (type)
   2862  1.1  mrg     = wide_int_to_tree (type, wi::min_value (precision, sgn));
   2863  1.1  mrg   TYPE_MAX_VALUE (type)
   2864  1.1  mrg     = wide_int_to_tree (type, wi::max_value (precision, sgn));
   2865  1.1  mrg }
   2866  1.1  mrg 
   2867  1.1  mrg /* Set the extreme values of TYPE based on its precision in bits,
   2868  1.1  mrg    then lay it out.  Used when make_signed_type won't do
   2869  1.1  mrg    because the tree code is not INTEGER_TYPE.  */
   2870  1.1  mrg 
   2871  1.1  mrg void
   2872  1.1  mrg fixup_signed_type (tree type)
   2873  1.1  mrg {
   2874  1.1  mrg   int precision = TYPE_PRECISION (type);
   2875  1.1  mrg 
   2876  1.1  mrg   set_min_and_max_values_for_integral_type (type, precision, SIGNED);
   2877  1.1  mrg 
   2878  1.1  mrg   /* Lay out the type: set its alignment, size, etc.  */
   2879  1.1  mrg   layout_type (type);
   2880  1.1  mrg }
   2881  1.1  mrg 
   2882  1.1  mrg /* Set the extreme values of TYPE based on its precision in bits,
   2883  1.1  mrg    then lay it out.  This is used both in `make_unsigned_type'
   2884  1.1  mrg    and for enumeral types.  */
   2885  1.1  mrg 
   2886  1.1  mrg void
   2887  1.1  mrg fixup_unsigned_type (tree type)
   2888  1.1  mrg {
   2889  1.1  mrg   int precision = TYPE_PRECISION (type);
   2890  1.1  mrg 
   2891  1.1  mrg   TYPE_UNSIGNED (type) = 1;
   2892  1.1  mrg 
   2893  1.1  mrg   set_min_and_max_values_for_integral_type (type, precision, UNSIGNED);
   2894  1.1  mrg 
   2895  1.1  mrg   /* Lay out the type: set its alignment, size, etc.  */
   2896  1.1  mrg   layout_type (type);
   2897  1.1  mrg }
   2898  1.1  mrg 
   2899  1.1  mrg /* Construct an iterator for a bitfield that spans BITSIZE bits,
   2901  1.1  mrg    starting at BITPOS.
   2902  1.1  mrg 
   2903  1.1  mrg    BITREGION_START is the bit position of the first bit in this
   2904  1.1  mrg    sequence of bit fields.  BITREGION_END is the last bit in this
   2905  1.1  mrg    sequence.  If these two fields are non-zero, we should restrict the
   2906  1.1  mrg    memory access to that range.  Otherwise, we are allowed to touch
   2907  1.1  mrg    any adjacent non bit-fields.
   2908  1.1  mrg 
   2909  1.1  mrg    ALIGN is the alignment of the underlying object in bits.
   2910  1.1  mrg    VOLATILEP says whether the bitfield is volatile.  */
   2911  1.1  mrg 
   2912  1.1  mrg bit_field_mode_iterator
   2913  1.1  mrg ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
   2914  1.1  mrg 			   poly_int64 bitregion_start,
   2915  1.1  mrg 			   poly_int64 bitregion_end,
   2916  1.1  mrg 			   unsigned int align, bool volatilep)
   2917  1.1  mrg : m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize),
   2918  1.1  mrg   m_bitpos (bitpos), m_bitregion_start (bitregion_start),
   2919  1.1  mrg   m_bitregion_end (bitregion_end), m_align (align),
   2920  1.1  mrg   m_volatilep (volatilep), m_count (0)
   2921  1.1  mrg {
   2922  1.1  mrg   if (known_eq (m_bitregion_end, 0))
   2923  1.1  mrg     {
   2924  1.1  mrg       /* We can assume that any aligned chunk of ALIGN bits that overlaps
   2925  1.1  mrg 	 the bitfield is mapped and won't trap, provided that ALIGN isn't
   2926  1.1  mrg 	 too large.  The cap is the biggest required alignment for data,
   2927  1.1  mrg 	 or at least the word size.  And force one such chunk at least.  */
   2928  1.1  mrg       unsigned HOST_WIDE_INT units
   2929  1.1  mrg 	= MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
   2930  1.1  mrg       if (bitsize <= 0)
   2931  1.1  mrg 	bitsize = 1;
   2932  1.1  mrg       HOST_WIDE_INT end = bitpos + bitsize + units - 1;
   2933  1.1  mrg       m_bitregion_end = end - end % units - 1;
   2934  1.1  mrg     }
   2935  1.1  mrg }
   2936  1.1  mrg 
   2937  1.1  mrg /* Calls to this function return successively larger modes that can be used
   2938  1.1  mrg    to represent the bitfield.  Return true if another bitfield mode is
   2939  1.1  mrg    available, storing it in *OUT_MODE if so.  */
   2940  1.1  mrg 
   2941  1.1  mrg bool
   2942  1.1  mrg bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode)
   2943  1.1  mrg {
   2944  1.1  mrg   scalar_int_mode mode;
   2945  1.1  mrg   for (; m_mode.exists (&mode); m_mode = GET_MODE_WIDER_MODE (mode))
   2946  1.1  mrg     {
   2947  1.1  mrg       unsigned int unit = GET_MODE_BITSIZE (mode);
   2948  1.1  mrg 
   2949  1.1  mrg       /* Skip modes that don't have full precision.  */
   2950  1.1  mrg       if (unit != GET_MODE_PRECISION (mode))
   2951  1.1  mrg 	continue;
   2952  1.1  mrg 
   2953  1.1  mrg       /* Stop if the mode is too wide to handle efficiently.  */
   2954  1.1  mrg       if (unit > MAX_FIXED_MODE_SIZE)
   2955  1.1  mrg 	break;
   2956  1.1  mrg 
   2957  1.1  mrg       /* Don't deliver more than one multiword mode; the smallest one
   2958  1.1  mrg 	 should be used.  */
   2959  1.1  mrg       if (m_count > 0 && unit > BITS_PER_WORD)
   2960  1.1  mrg 	break;
   2961  1.1  mrg 
   2962  1.1  mrg       /* Skip modes that are too small.  */
   2963  1.1  mrg       unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit;
   2964  1.1  mrg       unsigned HOST_WIDE_INT subend = substart + m_bitsize;
   2965  1.1  mrg       if (subend > unit)
   2966  1.1  mrg 	continue;
   2967  1.1  mrg 
   2968  1.1  mrg       /* Stop if the mode goes outside the bitregion.  */
   2969  1.1  mrg       HOST_WIDE_INT start = m_bitpos - substart;
   2970  1.1  mrg       if (maybe_ne (m_bitregion_start, 0)
   2971  1.1  mrg 	  && maybe_lt (start, m_bitregion_start))
   2972  1.1  mrg 	break;
   2973  1.1  mrg       HOST_WIDE_INT end = start + unit;
   2974  1.1  mrg       if (maybe_gt (end, m_bitregion_end + 1))
   2975  1.1  mrg 	break;
   2976  1.1  mrg 
   2977  1.1  mrg       /* Stop if the mode requires too much alignment.  */
   2978  1.1  mrg       if (GET_MODE_ALIGNMENT (mode) > m_align
   2979  1.1  mrg 	  && targetm.slow_unaligned_access (mode, m_align))
   2980  1.1  mrg 	break;
   2981  1.1  mrg 
   2982  1.1  mrg       *out_mode = mode;
   2983  1.1  mrg       m_mode = GET_MODE_WIDER_MODE (mode);
   2984  1.1  mrg       m_count++;
   2985  1.1  mrg       return true;
   2986  1.1  mrg     }
   2987  1.1  mrg   return false;
   2988  1.1  mrg }
   2989  1.1  mrg 
   2990  1.1  mrg /* Return true if smaller modes are generally preferred for this kind
   2991  1.1  mrg    of bitfield.  */
   2992  1.1  mrg 
   2993  1.1  mrg bool
   2994  1.1  mrg bit_field_mode_iterator::prefer_smaller_modes ()
   2995  1.1  mrg {
   2996  1.1  mrg   return (m_volatilep
   2997  1.1  mrg 	  ? targetm.narrow_volatile_bitfield ()
   2998  1.1  mrg 	  : !SLOW_BYTE_ACCESS);
   2999  1.1  mrg }
   3000  1.1  mrg 
   3001  1.1  mrg /* Find the best machine mode to use when referencing a bit field of length
   3002  1.1  mrg    BITSIZE bits starting at BITPOS.
   3003  1.1  mrg 
   3004  1.1  mrg    BITREGION_START is the bit position of the first bit in this
   3005  1.1  mrg    sequence of bit fields.  BITREGION_END is the last bit in this
   3006  1.1  mrg    sequence.  If these two fields are non-zero, we should restrict the
   3007  1.1  mrg    memory access to that range.  Otherwise, we are allowed to touch
   3008  1.1  mrg    any adjacent non bit-fields.
   3009  1.1  mrg 
   3010  1.1  mrg    The chosen mode must have no more than LARGEST_MODE_BITSIZE bits.
   3011  1.1  mrg    INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller
   3012  1.1  mrg    doesn't want to apply a specific limit.
   3013  1.1  mrg 
   3014  1.1  mrg    If no mode meets all these conditions, we return VOIDmode.
   3015  1.1  mrg 
   3016  1.1  mrg    The underlying object is known to be aligned to a boundary of ALIGN bits.
   3017  1.1  mrg 
   3018  1.1  mrg    If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
   3019  1.1  mrg    smallest mode meeting these conditions.
   3020  1.1  mrg 
   3021  1.1  mrg    If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
   3022  1.1  mrg    largest mode (but a mode no wider than UNITS_PER_WORD) that meets
   3023  1.1  mrg    all the conditions.
   3024  1.1  mrg 
   3025  1.1  mrg    If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
   3026  1.1  mrg    decide which of the above modes should be used.  */
   3027  1.1  mrg 
   3028  1.1  mrg bool
   3029  1.1  mrg get_best_mode (int bitsize, int bitpos,
   3030  1.1  mrg 	       poly_uint64 bitregion_start, poly_uint64 bitregion_end,
   3031  1.1  mrg 	       unsigned int align,
   3032  1.1  mrg 	       unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
   3033  1.1  mrg 	       scalar_int_mode *best_mode)
   3034  1.1  mrg {
   3035  1.1  mrg   bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
   3036  1.1  mrg 				bitregion_end, align, volatilep);
   3037  1.1  mrg   scalar_int_mode mode;
   3038  1.1  mrg   bool found = false;
   3039  1.1  mrg   while (iter.next_mode (&mode)
   3040  1.1  mrg 	 /* ??? For historical reasons, reject modes that would normally
   3041  1.1  mrg 	    receive greater alignment, even if unaligned accesses are
   3042  1.1  mrg 	    acceptable.  This has both advantages and disadvantages.
   3043  1.1  mrg 	    Removing this check means that something like:
   3044  1.1  mrg 
   3045  1.1  mrg 	       struct s { unsigned int x; unsigned int y; };
   3046  1.1  mrg 	       int f (struct s *s) { return s->x == 0 && s->y == 0; }
   3047  1.1  mrg 
   3048  1.1  mrg 	    can be implemented using a single load and compare on
   3049  1.1  mrg 	    64-bit machines that have no alignment restrictions.
   3050  1.1  mrg 	    For example, on powerpc64-linux-gnu, we would generate:
   3051  1.1  mrg 
   3052  1.1  mrg 		    ld 3,0(3)
   3053  1.1  mrg 		    cntlzd 3,3
   3054  1.1  mrg 		    srdi 3,3,6
   3055  1.1  mrg 		    blr
   3056  1.1  mrg 
   3057  1.1  mrg 	    rather than:
   3058  1.1  mrg 
   3059  1.1  mrg 		    lwz 9,0(3)
   3060  1.1  mrg 		    cmpwi 7,9,0
   3061  1.1  mrg 		    bne 7,.L3
   3062  1.1  mrg 		    lwz 3,4(3)
   3063  1.1  mrg 		    cntlzw 3,3
   3064  1.1  mrg 		    srwi 3,3,5
   3065  1.1  mrg 		    extsw 3,3
   3066  1.1  mrg 		    blr
   3067  1.1  mrg 		    .p2align 4,,15
   3068  1.1  mrg 	    .L3:
   3069  1.1  mrg 		    li 3,0
   3070  1.1  mrg 		    blr
   3071  1.1  mrg 
   3072  1.1  mrg 	    However, accessing more than one field can make life harder
   3073  1.1  mrg 	    for the gimple optimizers.  For example, gcc.dg/vect/bb-slp-5.c
   3074  1.1  mrg 	    has a series of unsigned short copies followed by a series of
   3075  1.1  mrg 	    unsigned short comparisons.  With this check, both the copies
   3076  1.1  mrg 	    and comparisons remain 16-bit accesses and FRE is able
   3077  1.1  mrg 	    to eliminate the latter.  Without the check, the comparisons
   3078  1.1  mrg 	    can be done using 2 64-bit operations, which FRE isn't able
   3079  1.1  mrg 	    to handle in the same way.
   3080  1.1  mrg 
   3081  1.1  mrg 	    Either way, it would probably be worth disabling this check
   3082  1.1  mrg 	    during expand.  One particular example where removing the
   3083  1.1  mrg 	    check would help is the get_best_mode call in store_bit_field.
   3084  1.1  mrg 	    If we are given a memory bitregion of 128 bits that is aligned
   3085  1.1  mrg 	    to a 64-bit boundary, and the bitfield we want to modify is
   3086  1.1  mrg 	    in the second half of the bitregion, this check causes
   3087  1.1  mrg 	    store_bitfield to turn the memory into a 64-bit reference
   3088  1.1  mrg 	    to the _first_ half of the region.  We later use
   3089  1.1  mrg 	    adjust_bitfield_address to get a reference to the correct half,
   3090  1.1  mrg 	    but doing so looks to adjust_bitfield_address as though we are
   3091  1.1  mrg 	    moving past the end of the original object, so it drops the
   3092  1.1  mrg 	    associated MEM_EXPR and MEM_OFFSET.  Removing the check
   3093  1.1  mrg 	    causes store_bit_field to keep a 128-bit memory reference,
   3094  1.1  mrg 	    so that the final bitfield reference still has a MEM_EXPR
   3095  1.1  mrg 	    and MEM_OFFSET.  */
   3096  1.1  mrg 	 && GET_MODE_ALIGNMENT (mode) <= align
   3097  1.1  mrg 	 && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize)
   3098  1.1  mrg     {
   3099  1.1  mrg       *best_mode = mode;
   3100  1.1  mrg       found = true;
   3101  1.1  mrg       if (iter.prefer_smaller_modes ())
   3102  1.1  mrg 	break;
   3103  1.1  mrg     }
   3104  1.1  mrg 
   3105  1.1  mrg   return found;
   3106  1.1  mrg }
   3107  1.1  mrg 
   3108  1.1  mrg /* Gets minimal and maximal values for MODE (signed or unsigned depending on
   3109  1.1  mrg    SIGN).  The returned constants are made to be usable in TARGET_MODE.  */
   3110  1.1  mrg 
   3111  1.1  mrg void
   3112  1.1  mrg get_mode_bounds (scalar_int_mode mode, int sign,
   3113  1.1  mrg 		 scalar_int_mode target_mode,
   3114  1.1  mrg 		 rtx *mmin, rtx *mmax)
   3115  1.1  mrg {
   3116  1.1  mrg   unsigned size = GET_MODE_PRECISION (mode);
   3117  1.1  mrg   unsigned HOST_WIDE_INT min_val, max_val;
   3118  1.1  mrg 
   3119  1.1  mrg   gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
   3120  1.1  mrg 
   3121  1.1  mrg   /* Special case BImode, which has values 0 and STORE_FLAG_VALUE.  */
   3122  1.1  mrg   if (mode == BImode)
   3123  1.1  mrg     {
   3124  1.1  mrg       if (STORE_FLAG_VALUE < 0)
   3125  1.1  mrg 	{
   3126  1.1  mrg 	  min_val = STORE_FLAG_VALUE;
   3127  1.1  mrg 	  max_val = 0;
   3128  1.1  mrg 	}
   3129  1.1  mrg       else
   3130  1.1  mrg 	{
   3131  1.1  mrg 	  min_val = 0;
   3132  1.1  mrg 	  max_val = STORE_FLAG_VALUE;
   3133  1.1  mrg 	}
   3134  1.1  mrg     }
   3135  1.1  mrg   else if (sign)
   3136  1.1  mrg     {
   3137  1.1  mrg       min_val = -(HOST_WIDE_INT_1U << (size - 1));
   3138  1.1  mrg       max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1;
   3139  1.1  mrg     }
   3140  1.1  mrg   else
   3141  1.1  mrg     {
   3142                 min_val = 0;
   3143                 max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1;
   3144               }
   3145           
   3146             *mmin = gen_int_mode (min_val, target_mode);
   3147             *mmax = gen_int_mode (max_val, target_mode);
   3148           }
   3149           
   3150           #include "gt-stor-layout.h"
   3151