xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/stor-layout.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* C-compiler utilities for types and variables storage layout
2    Copyright (C) 1987-2015 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hash-set.h"
26 #include "machmode.h"
27 #include "vec.h"
28 #include "double-int.h"
29 #include "input.h"
30 #include "alias.h"
31 #include "symtab.h"
32 #include "wide-int.h"
33 #include "inchash.h"
34 #include "tree.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "stringpool.h"
38 #include "varasm.h"
39 #include "print-tree.h"
40 #include "rtl.h"
41 #include "tm_p.h"
42 #include "flags.h"
43 #include "hard-reg-set.h"
44 #include "function.h"
45 #include "hashtab.h"
46 #include "statistics.h"
47 #include "real.h"
48 #include "fixed-value.h"
49 #include "insn-config.h"
50 #include "expmed.h"
51 #include "dojump.h"
52 #include "explow.h"
53 #include "calls.h"
54 #include "emit-rtl.h"
55 #include "stmt.h"
56 #include "expr.h"
57 #include "diagnostic-core.h"
58 #include "target.h"
59 #include "langhooks.h"
60 #include "regs.h"
61 #include "params.h"
62 #include "hash-map.h"
63 #include "is-a.h"
64 #include "plugin-api.h"
65 #include "ipa-ref.h"
66 #include "cgraph.h"
67 #include "tree-inline.h"
68 #include "tree-dump.h"
69 #include "gimplify.h"
70 
71 /* Data type for the expressions representing sizes of data types.
72    It is the first integer type laid out.  */
73 tree sizetype_tab[(int) stk_type_kind_last];
74 
75 /* If nonzero, this is an upper limit on alignment of structure fields.
76    The value is measured in bits.  */
77 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
78 
79 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
80    in the address spaces' address_mode, not pointer_mode.   Set only by
81    internal_reference_types called only by a front end.  */
82 static int reference_types_internal = 0;
83 
84 static tree self_referential_size (tree);
85 static void finalize_record_size (record_layout_info);
86 static void finalize_type_size (tree);
87 static void place_union_field (record_layout_info, tree);
88 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
89 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
90 			     HOST_WIDE_INT, tree);
91 #endif
92 extern void debug_rli (record_layout_info);
93 
94 /* Show that REFERENCE_TYPES are internal and should use address_mode.
95    Called only by front end.  */
96 
97 void
98 internal_reference_types (void)
99 {
100   reference_types_internal = 1;
101 }
102 
103 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
104    to serve as the actual size-expression for a type or decl.  */
105 
106 tree
107 variable_size (tree size)
108 {
109   /* Obviously.  */
110   if (TREE_CONSTANT (size))
111     return size;
112 
113   /* If the size is self-referential, we can't make a SAVE_EXPR (see
114      save_expr for the rationale).  But we can do something else.  */
115   if (CONTAINS_PLACEHOLDER_P (size))
116     return self_referential_size (size);
117 
118   /* If we are in the global binding level, we can't make a SAVE_EXPR
119      since it may end up being shared across functions, so it is up
120      to the front-end to deal with this case.  */
121   if (lang_hooks.decls.global_bindings_p ())
122     return size;
123 
124   return save_expr (size);
125 }
126 
127 /* An array of functions used for self-referential size computation.  */
128 static GTY(()) vec<tree, va_gc> *size_functions;
129 
130 /* Similar to copy_tree_r but do not copy component references involving
131    PLACEHOLDER_EXPRs.  These nodes are spotted in find_placeholder_in_expr
132    and substituted in substitute_in_expr.  */
133 
134 static tree
135 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
136 {
137   enum tree_code code = TREE_CODE (*tp);
138 
139   /* Stop at types, decls, constants like copy_tree_r.  */
140   if (TREE_CODE_CLASS (code) == tcc_type
141       || TREE_CODE_CLASS (code) == tcc_declaration
142       || TREE_CODE_CLASS (code) == tcc_constant)
143     {
144       *walk_subtrees = 0;
145       return NULL_TREE;
146     }
147 
148   /* This is the pattern built in ada/make_aligning_type.  */
149   else if (code == ADDR_EXPR
150 	   && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
151     {
152       *walk_subtrees = 0;
153       return NULL_TREE;
154     }
155 
156   /* Default case: the component reference.  */
157   else if (code == COMPONENT_REF)
158     {
159       tree inner;
160       for (inner = TREE_OPERAND (*tp, 0);
161 	   REFERENCE_CLASS_P (inner);
162 	   inner = TREE_OPERAND (inner, 0))
163 	;
164 
165       if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
166 	{
167 	  *walk_subtrees = 0;
168 	  return NULL_TREE;
169 	}
170     }
171 
172   /* We're not supposed to have them in self-referential size trees
173      because we wouldn't properly control when they are evaluated.
174      However, not creating superfluous SAVE_EXPRs requires accurate
175      tracking of readonly-ness all the way down to here, which we
176      cannot always guarantee in practice.  So punt in this case.  */
177   else if (code == SAVE_EXPR)
178     return error_mark_node;
179 
180   else if (code == STATEMENT_LIST)
181     gcc_unreachable ();
182 
183   return copy_tree_r (tp, walk_subtrees, data);
184 }
185 
186 /* Given a SIZE expression that is self-referential, return an equivalent
187    expression to serve as the actual size expression for a type.  */
188 
189 static tree
190 self_referential_size (tree size)
191 {
192   static unsigned HOST_WIDE_INT fnno = 0;
193   vec<tree> self_refs = vNULL;
194   tree param_type_list = NULL, param_decl_list = NULL;
195   tree t, ref, return_type, fntype, fnname, fndecl;
196   unsigned int i;
197   char buf[128];
198   vec<tree, va_gc> *args = NULL;
199 
200   /* Do not factor out simple operations.  */
201   t = skip_simple_constant_arithmetic (size);
202   if (TREE_CODE (t) == CALL_EXPR)
203     return size;
204 
205   /* Collect the list of self-references in the expression.  */
206   find_placeholder_in_expr (size, &self_refs);
207   gcc_assert (self_refs.length () > 0);
208 
209   /* Obtain a private copy of the expression.  */
210   t = size;
211   if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
212     return size;
213   size = t;
214 
215   /* Build the parameter and argument lists in parallel; also
216      substitute the former for the latter in the expression.  */
217   vec_alloc (args, self_refs.length ());
218   FOR_EACH_VEC_ELT (self_refs, i, ref)
219     {
220       tree subst, param_name, param_type, param_decl;
221 
222       if (DECL_P (ref))
223 	{
224 	  /* We shouldn't have true variables here.  */
225 	  gcc_assert (TREE_READONLY (ref));
226 	  subst = ref;
227 	}
228       /* This is the pattern built in ada/make_aligning_type.  */
229       else if (TREE_CODE (ref) == ADDR_EXPR)
230         subst = ref;
231       /* Default case: the component reference.  */
232       else
233 	subst = TREE_OPERAND (ref, 1);
234 
235       sprintf (buf, "p%d", i);
236       param_name = get_identifier (buf);
237       param_type = TREE_TYPE (ref);
238       param_decl
239 	= build_decl (input_location, PARM_DECL, param_name, param_type);
240       DECL_ARG_TYPE (param_decl) = param_type;
241       DECL_ARTIFICIAL (param_decl) = 1;
242       TREE_READONLY (param_decl) = 1;
243 
244       size = substitute_in_expr (size, subst, param_decl);
245 
246       param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
247       param_decl_list = chainon (param_decl, param_decl_list);
248       args->quick_push (ref);
249     }
250 
251   self_refs.release ();
252 
253   /* Append 'void' to indicate that the number of parameters is fixed.  */
254   param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
255 
256   /* The 3 lists have been created in reverse order.  */
257   param_type_list = nreverse (param_type_list);
258   param_decl_list = nreverse (param_decl_list);
259 
260   /* Build the function type.  */
261   return_type = TREE_TYPE (size);
262   fntype = build_function_type (return_type, param_type_list);
263 
264   /* Build the function declaration.  */
265   sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
266   fnname = get_file_function_name (buf);
267   fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
268   for (t = param_decl_list; t; t = DECL_CHAIN (t))
269     DECL_CONTEXT (t) = fndecl;
270   DECL_ARGUMENTS (fndecl) = param_decl_list;
271   DECL_RESULT (fndecl)
272     = build_decl (input_location, RESULT_DECL, 0, return_type);
273   DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
274 
275   /* The function has been created by the compiler and we don't
276      want to emit debug info for it.  */
277   DECL_ARTIFICIAL (fndecl) = 1;
278   DECL_IGNORED_P (fndecl) = 1;
279 
280   /* It is supposed to be "const" and never throw.  */
281   TREE_READONLY (fndecl) = 1;
282   TREE_NOTHROW (fndecl) = 1;
283 
284   /* We want it to be inlined when this is deemed profitable, as
285      well as discarded if every call has been integrated.  */
286   DECL_DECLARED_INLINE_P (fndecl) = 1;
287 
288   /* It is made up of a unique return statement.  */
289   DECL_INITIAL (fndecl) = make_node (BLOCK);
290   BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
291   t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
292   DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
293   TREE_STATIC (fndecl) = 1;
294 
295   /* Put it onto the list of size functions.  */
296   vec_safe_push (size_functions, fndecl);
297 
298   /* Replace the original expression with a call to the size function.  */
299   return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
300 }
301 
302 /* Take, queue and compile all the size functions.  It is essential that
303    the size functions be gimplified at the very end of the compilation
304    in order to guarantee transparent handling of self-referential sizes.
305    Otherwise the GENERIC inliner would not be able to inline them back
306    at each of their call sites, thus creating artificial non-constant
307    size expressions which would trigger nasty problems later on.  */
308 
309 void
310 finalize_size_functions (void)
311 {
312   unsigned int i;
313   tree fndecl;
314 
315   for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
316     {
317       allocate_struct_function (fndecl, false);
318       set_cfun (NULL);
319       dump_function (TDI_original, fndecl);
320       gimplify_function_tree (fndecl);
321       dump_function (TDI_generic, fndecl);
322       cgraph_node::finalize_function (fndecl, false);
323     }
324 
325   vec_free (size_functions);
326 }
327 
328 /* Return the machine mode to use for a nonscalar of SIZE bits.  The
329    mode must be in class MCLASS, and have exactly that many value bits;
330    it may have padding as well.  If LIMIT is nonzero, modes of wider
331    than MAX_FIXED_MODE_SIZE will not be used.  */
332 
333 machine_mode
334 mode_for_size (unsigned int size, enum mode_class mclass, int limit)
335 {
336   machine_mode mode;
337   int i;
338 
339   if (limit && size > MAX_FIXED_MODE_SIZE)
340     return BLKmode;
341 
342   /* Get the first mode which has this size, in the specified class.  */
343   for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
344        mode = GET_MODE_WIDER_MODE (mode))
345     if (GET_MODE_PRECISION (mode) == size)
346       return mode;
347 
348   if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
349     for (i = 0; i < NUM_INT_N_ENTS; i ++)
350       if (int_n_data[i].bitsize == size
351 	  && int_n_enabled_p[i])
352 	return int_n_data[i].m;
353 
354   return BLKmode;
355 }
356 
357 /* Similar, except passed a tree node.  */
358 
359 machine_mode
360 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
361 {
362   unsigned HOST_WIDE_INT uhwi;
363   unsigned int ui;
364 
365   if (!tree_fits_uhwi_p (size))
366     return BLKmode;
367   uhwi = tree_to_uhwi (size);
368   ui = uhwi;
369   if (uhwi != ui)
370     return BLKmode;
371   return mode_for_size (ui, mclass, limit);
372 }
373 
374 /* Similar, but never return BLKmode; return the narrowest mode that
375    contains at least the requested number of value bits.  */
376 
377 machine_mode
378 smallest_mode_for_size (unsigned int size, enum mode_class mclass)
379 {
380   machine_mode mode = VOIDmode;
381   int i;
382 
383   /* Get the first mode which has at least this size, in the
384      specified class.  */
385   for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
386        mode = GET_MODE_WIDER_MODE (mode))
387     if (GET_MODE_PRECISION (mode) >= size)
388       break;
389 
390   if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
391     for (i = 0; i < NUM_INT_N_ENTS; i ++)
392       if (int_n_data[i].bitsize >= size
393 	  && int_n_data[i].bitsize < GET_MODE_PRECISION (mode)
394 	  && int_n_enabled_p[i])
395 	mode = int_n_data[i].m;
396 
397   if (mode == VOIDmode)
398     gcc_unreachable ();
399 
400   return mode;
401 }
402 
403 /* Find an integer mode of the exact same size, or BLKmode on failure.  */
404 
405 machine_mode
406 int_mode_for_mode (machine_mode mode)
407 {
408   switch (GET_MODE_CLASS (mode))
409     {
410     case MODE_INT:
411     case MODE_PARTIAL_INT:
412       break;
413 
414     case MODE_COMPLEX_INT:
415     case MODE_COMPLEX_FLOAT:
416     case MODE_FLOAT:
417     case MODE_DECIMAL_FLOAT:
418     case MODE_VECTOR_INT:
419     case MODE_VECTOR_FLOAT:
420     case MODE_FRACT:
421     case MODE_ACCUM:
422     case MODE_UFRACT:
423     case MODE_UACCUM:
424     case MODE_VECTOR_FRACT:
425     case MODE_VECTOR_ACCUM:
426     case MODE_VECTOR_UFRACT:
427     case MODE_VECTOR_UACCUM:
428     case MODE_POINTER_BOUNDS:
429       mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
430       break;
431 
432     case MODE_RANDOM:
433       if (mode == BLKmode)
434 	break;
435 
436       /* ... fall through ...  */
437 
438     case MODE_CC:
439     default:
440       gcc_unreachable ();
441     }
442 
443   return mode;
444 }
445 
446 /* Find a mode that can be used for efficient bitwise operations on MODE.
447    Return BLKmode if no such mode exists.  */
448 
449 machine_mode
450 bitwise_mode_for_mode (machine_mode mode)
451 {
452   /* Quick exit if we already have a suitable mode.  */
453   unsigned int bitsize = GET_MODE_BITSIZE (mode);
454   if (SCALAR_INT_MODE_P (mode) && bitsize <= MAX_FIXED_MODE_SIZE)
455     return mode;
456 
457   /* Reuse the sanity checks from int_mode_for_mode.  */
458   gcc_checking_assert ((int_mode_for_mode (mode), true));
459 
460   /* Try to replace complex modes with complex modes.  In general we
461      expect both components to be processed independently, so we only
462      care whether there is a register for the inner mode.  */
463   if (COMPLEX_MODE_P (mode))
464     {
465       machine_mode trial = mode;
466       if (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT)
467 	trial = mode_for_size (bitsize, MODE_COMPLEX_INT, false);
468       if (trial != BLKmode
469 	  && have_regs_of_mode[GET_MODE_INNER (trial)])
470 	return trial;
471     }
472 
473   /* Try to replace vector modes with vector modes.  Also try using vector
474      modes if an integer mode would be too big.  */
475   if (VECTOR_MODE_P (mode) || bitsize > MAX_FIXED_MODE_SIZE)
476     {
477       machine_mode trial = mode;
478       if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
479 	trial = mode_for_size (bitsize, MODE_VECTOR_INT, 0);
480       if (trial != BLKmode
481 	  && have_regs_of_mode[trial]
482 	  && targetm.vector_mode_supported_p (trial))
483 	return trial;
484     }
485 
486   /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE.  */
487   return mode_for_size (bitsize, MODE_INT, true);
488 }
489 
490 /* Find a type that can be used for efficient bitwise operations on MODE.
491    Return null if no such mode exists.  */
492 
493 tree
494 bitwise_type_for_mode (machine_mode mode)
495 {
496   mode = bitwise_mode_for_mode (mode);
497   if (mode == BLKmode)
498     return NULL_TREE;
499 
500   unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode);
501   tree inner_type = build_nonstandard_integer_type (inner_size, true);
502 
503   if (VECTOR_MODE_P (mode))
504     return build_vector_type_for_mode (inner_type, mode);
505 
506   if (COMPLEX_MODE_P (mode))
507     return build_complex_type (inner_type);
508 
509   gcc_checking_assert (GET_MODE_INNER (mode) == VOIDmode);
510   return inner_type;
511 }
512 
513 /* Find a mode that is suitable for representing a vector with
514    NUNITS elements of mode INNERMODE.  Returns BLKmode if there
515    is no suitable mode.  */
516 
517 machine_mode
518 mode_for_vector (machine_mode innermode, unsigned nunits)
519 {
520   machine_mode mode;
521 
522   /* First, look for a supported vector type.  */
523   if (SCALAR_FLOAT_MODE_P (innermode))
524     mode = MIN_MODE_VECTOR_FLOAT;
525   else if (SCALAR_FRACT_MODE_P (innermode))
526     mode = MIN_MODE_VECTOR_FRACT;
527   else if (SCALAR_UFRACT_MODE_P (innermode))
528     mode = MIN_MODE_VECTOR_UFRACT;
529   else if (SCALAR_ACCUM_MODE_P (innermode))
530     mode = MIN_MODE_VECTOR_ACCUM;
531   else if (SCALAR_UACCUM_MODE_P (innermode))
532     mode = MIN_MODE_VECTOR_UACCUM;
533   else
534     mode = MIN_MODE_VECTOR_INT;
535 
536   /* Do not check vector_mode_supported_p here.  We'll do that
537      later in vector_type_mode.  */
538   for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
539     if (GET_MODE_NUNITS (mode) == nunits
540 	&& GET_MODE_INNER (mode) == innermode)
541       break;
542 
543   /* For integers, try mapping it to a same-sized scalar mode.  */
544   if (mode == VOIDmode
545       && GET_MODE_CLASS (innermode) == MODE_INT)
546     mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
547 			  MODE_INT, 0);
548 
549   if (mode == VOIDmode
550       || (GET_MODE_CLASS (mode) == MODE_INT
551 	  && !have_regs_of_mode[mode]))
552     return BLKmode;
553 
554   return mode;
555 }
556 
557 /* Return the alignment of MODE. This will be bounded by 1 and
558    BIGGEST_ALIGNMENT.  */
559 
560 unsigned int
561 get_mode_alignment (machine_mode mode)
562 {
563   return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
564 }
565 
566 /* Return the precision of the mode, or for a complex or vector mode the
567    precision of the mode of its elements.  */
568 
569 unsigned int
570 element_precision (machine_mode mode)
571 {
572   if (COMPLEX_MODE_P (mode) || VECTOR_MODE_P (mode))
573     mode = GET_MODE_INNER (mode);
574 
575   return GET_MODE_PRECISION (mode);
576 }
577 
578 /* Return the natural mode of an array, given that it is SIZE bytes in
579    total and has elements of type ELEM_TYPE.  */
580 
581 static machine_mode
582 mode_for_array (tree elem_type, tree size)
583 {
584   tree elem_size;
585   unsigned HOST_WIDE_INT int_size, int_elem_size;
586   bool limit_p;
587 
588   /* One-element arrays get the component type's mode.  */
589   elem_size = TYPE_SIZE (elem_type);
590   if (simple_cst_equal (size, elem_size))
591     return TYPE_MODE (elem_type);
592 
593   limit_p = true;
594   if (tree_fits_uhwi_p (size) && tree_fits_uhwi_p (elem_size))
595     {
596       int_size = tree_to_uhwi (size);
597       int_elem_size = tree_to_uhwi (elem_size);
598       if (int_elem_size > 0
599 	  && int_size % int_elem_size == 0
600 	  && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
601 					     int_size / int_elem_size))
602 	limit_p = false;
603     }
604   return mode_for_size_tree (size, MODE_INT, limit_p);
605 }
606 
607 /* Subroutine of layout_decl: Force alignment required for the data type.
608    But if the decl itself wants greater alignment, don't override that.  */
609 
610 static inline void
611 do_type_align (tree type, tree decl)
612 {
613   if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
614     {
615       DECL_ALIGN (decl) = TYPE_ALIGN (type);
616       if (TREE_CODE (decl) == FIELD_DECL)
617 	DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
618     }
619 }
620 
621 /* Set the size, mode and alignment of a ..._DECL node.
622    TYPE_DECL does need this for C++.
623    Note that LABEL_DECL and CONST_DECL nodes do not need this,
624    and FUNCTION_DECL nodes have them set up in a special (and simple) way.
625    Don't call layout_decl for them.
626 
627    KNOWN_ALIGN is the amount of alignment we can assume this
628    decl has with no special effort.  It is relevant only for FIELD_DECLs
629    and depends on the previous fields.
630    All that matters about KNOWN_ALIGN is which powers of 2 divide it.
631    If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
632    the record will be aligned to suit.  */
633 
634 void
635 layout_decl (tree decl, unsigned int known_align)
636 {
637   tree type = TREE_TYPE (decl);
638   enum tree_code code = TREE_CODE (decl);
639   rtx rtl = NULL_RTX;
640   location_t loc = DECL_SOURCE_LOCATION (decl);
641 
642   if (code == CONST_DECL)
643     return;
644 
645   gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
646 	      || code == TYPE_DECL ||code == FIELD_DECL);
647 
648   rtl = DECL_RTL_IF_SET (decl);
649 
650   if (type == error_mark_node)
651     type = void_type_node;
652 
653   /* Usually the size and mode come from the data type without change,
654      however, the front-end may set the explicit width of the field, so its
655      size may not be the same as the size of its type.  This happens with
656      bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
657      also happens with other fields.  For example, the C++ front-end creates
658      zero-sized fields corresponding to empty base classes, and depends on
659      layout_type setting DECL_FIELD_BITPOS correctly for the field.  Set the
660      size in bytes from the size in bits.  If we have already set the mode,
661      don't set it again since we can be called twice for FIELD_DECLs.  */
662 
663   DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
664   if (DECL_MODE (decl) == VOIDmode)
665     DECL_MODE (decl) = TYPE_MODE (type);
666 
667   if (DECL_SIZE (decl) == 0)
668     {
669       DECL_SIZE (decl) = TYPE_SIZE (type);
670       DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
671     }
672   else if (DECL_SIZE_UNIT (decl) == 0)
673     DECL_SIZE_UNIT (decl)
674       = fold_convert_loc (loc, sizetype,
675 			  size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
676 					  bitsize_unit_node));
677 
678   if (code != FIELD_DECL)
679     /* For non-fields, update the alignment from the type.  */
680     do_type_align (type, decl);
681   else
682     /* For fields, it's a bit more complicated...  */
683     {
684       bool old_user_align = DECL_USER_ALIGN (decl);
685       bool zero_bitfield = false;
686       bool packed_p = DECL_PACKED (decl);
687       unsigned int mfa;
688 
689       if (DECL_BIT_FIELD (decl))
690 	{
691 	  DECL_BIT_FIELD_TYPE (decl) = type;
692 
693 	  /* A zero-length bit-field affects the alignment of the next
694 	     field.  In essence such bit-fields are not influenced by
695 	     any packing due to #pragma pack or attribute packed.  */
696 	  if (integer_zerop (DECL_SIZE (decl))
697 	      && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
698 	    {
699 	      zero_bitfield = true;
700 	      packed_p = false;
701 #ifdef PCC_BITFIELD_TYPE_MATTERS
702 	      if (PCC_BITFIELD_TYPE_MATTERS)
703 		do_type_align (type, decl);
704 	      else
705 #endif
706 		{
707 #ifdef EMPTY_FIELD_BOUNDARY
708 		  if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
709 		    {
710 		      DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
711 		      DECL_USER_ALIGN (decl) = 0;
712 		    }
713 #endif
714 		}
715 	    }
716 
717 	  /* See if we can use an ordinary integer mode for a bit-field.
718 	     Conditions are: a fixed size that is correct for another mode,
719 	     occupying a complete byte or bytes on proper boundary.  */
720 	  if (TYPE_SIZE (type) != 0
721 	      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
722 	      && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
723 	    {
724 	      machine_mode xmode
725 		= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
726 	      unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
727 
728 	      if (xmode != BLKmode
729 		  && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
730 		  && (known_align == 0 || known_align >= xalign))
731 		{
732 		  DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
733 		  DECL_MODE (decl) = xmode;
734 		  DECL_BIT_FIELD (decl) = 0;
735 		}
736 	    }
737 
738 	  /* Turn off DECL_BIT_FIELD if we won't need it set.  */
739 	  if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
740 	      && known_align >= TYPE_ALIGN (type)
741 	      && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
742 	    DECL_BIT_FIELD (decl) = 0;
743 	}
744       else if (packed_p && DECL_USER_ALIGN (decl))
745 	/* Don't touch DECL_ALIGN.  For other packed fields, go ahead and
746 	   round up; we'll reduce it again below.  We want packing to
747 	   supersede USER_ALIGN inherited from the type, but defer to
748 	   alignment explicitly specified on the field decl.  */;
749       else
750 	do_type_align (type, decl);
751 
752       /* If the field is packed and not explicitly aligned, give it the
753 	 minimum alignment.  Note that do_type_align may set
754 	 DECL_USER_ALIGN, so we need to check old_user_align instead.  */
755       if (packed_p
756 	  && !old_user_align)
757 	DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
758 
759       if (! packed_p && ! DECL_USER_ALIGN (decl))
760 	{
761 	  /* Some targets (i.e. i386, VMS) limit struct field alignment
762 	     to a lower boundary than alignment of variables unless
763 	     it was overridden by attribute aligned.  */
764 #ifdef BIGGEST_FIELD_ALIGNMENT
765 	  DECL_ALIGN (decl)
766 	    = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
767 #endif
768 #ifdef ADJUST_FIELD_ALIGN
769 	  DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
770 #endif
771 	}
772 
773       if (zero_bitfield)
774         mfa = initial_max_fld_align * BITS_PER_UNIT;
775       else
776 	mfa = maximum_field_alignment;
777       /* Should this be controlled by DECL_USER_ALIGN, too?  */
778       if (mfa != 0)
779 	DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
780     }
781 
782   /* Evaluate nonconstant size only once, either now or as soon as safe.  */
783   if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
784     DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
785   if (DECL_SIZE_UNIT (decl) != 0
786       && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
787     DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
788 
789   /* If requested, warn about definitions of large data objects.  */
790   if (warn_larger_than
791       && (code == VAR_DECL || code == PARM_DECL)
792       && ! DECL_EXTERNAL (decl))
793     {
794       tree size = DECL_SIZE_UNIT (decl);
795 
796       if (size != 0 && TREE_CODE (size) == INTEGER_CST
797 	  && compare_tree_int (size, larger_than_size) > 0)
798 	{
799 	  int size_as_int = TREE_INT_CST_LOW (size);
800 
801 	  if (compare_tree_int (size, size_as_int) == 0)
802 	    warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
803 	  else
804 	    warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
805                      decl, larger_than_size);
806 	}
807     }
808 
809   /* If the RTL was already set, update its mode and mem attributes.  */
810   if (rtl)
811     {
812       PUT_MODE (rtl, DECL_MODE (decl));
813       SET_DECL_RTL (decl, 0);
814       set_mem_attributes (rtl, decl, 1);
815       SET_DECL_RTL (decl, rtl);
816     }
817 }
818 
819 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
820    a previous call to layout_decl and calls it again.  */
821 
822 void
823 relayout_decl (tree decl)
824 {
825   DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
826   DECL_MODE (decl) = VOIDmode;
827   if (!DECL_USER_ALIGN (decl))
828     DECL_ALIGN (decl) = 0;
829   SET_DECL_RTL (decl, 0);
830 
831   layout_decl (decl, 0);
832 }
833 
834 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
835    QUAL_UNION_TYPE.  Return a pointer to a struct record_layout_info which
836    is to be passed to all other layout functions for this record.  It is the
837    responsibility of the caller to call `free' for the storage returned.
838    Note that garbage collection is not permitted until we finish laying
839    out the record.  */
840 
841 record_layout_info
842 start_record_layout (tree t)
843 {
844   record_layout_info rli = XNEW (struct record_layout_info_s);
845 
846   rli->t = t;
847 
848   /* If the type has a minimum specified alignment (via an attribute
849      declaration, for example) use it -- otherwise, start with a
850      one-byte alignment.  */
851   rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
852   rli->unpacked_align = rli->record_align;
853   rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
854 
855 #ifdef STRUCTURE_SIZE_BOUNDARY
856   /* Packed structures don't need to have minimum size.  */
857   if (! TYPE_PACKED (t))
858     {
859       unsigned tmp;
860 
861       /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY.  */
862       tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
863       if (maximum_field_alignment != 0)
864 	tmp = MIN (tmp, maximum_field_alignment);
865       rli->record_align = MAX (rli->record_align, tmp);
866     }
867 #endif
868 
869   rli->offset = size_zero_node;
870   rli->bitpos = bitsize_zero_node;
871   rli->prev_field = 0;
872   rli->pending_statics = 0;
873   rli->packed_maybe_necessary = 0;
874   rli->remaining_in_alignment = 0;
875 
876   return rli;
877 }
878 
879 /* Return the combined bit position for the byte offset OFFSET and the
880    bit position BITPOS.
881 
882    These functions operate on byte and bit positions present in FIELD_DECLs
883    and assume that these expressions result in no (intermediate) overflow.
884    This assumption is necessary to fold the expressions as much as possible,
885    so as to avoid creating artificially variable-sized types in languages
886    supporting variable-sized types like Ada.  */
887 
888 tree
889 bit_from_pos (tree offset, tree bitpos)
890 {
891   if (TREE_CODE (offset) == PLUS_EXPR)
892     offset = size_binop (PLUS_EXPR,
893 			 fold_convert (bitsizetype, TREE_OPERAND (offset, 0)),
894 			 fold_convert (bitsizetype, TREE_OPERAND (offset, 1)));
895   else
896     offset = fold_convert (bitsizetype, offset);
897   return size_binop (PLUS_EXPR, bitpos,
898 		     size_binop (MULT_EXPR, offset, bitsize_unit_node));
899 }
900 
901 /* Return the combined truncated byte position for the byte offset OFFSET and
902    the bit position BITPOS.  */
903 
904 tree
905 byte_from_pos (tree offset, tree bitpos)
906 {
907   tree bytepos;
908   if (TREE_CODE (bitpos) == MULT_EXPR
909       && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
910     bytepos = TREE_OPERAND (bitpos, 0);
911   else
912     bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
913   return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
914 }
915 
916 /* Split the bit position POS into a byte offset *POFFSET and a bit
917    position *PBITPOS with the byte offset aligned to OFF_ALIGN bits.  */
918 
919 void
920 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
921 	      tree pos)
922 {
923   tree toff_align = bitsize_int (off_align);
924   if (TREE_CODE (pos) == MULT_EXPR
925       && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
926     {
927       *poffset = size_binop (MULT_EXPR,
928 			     fold_convert (sizetype, TREE_OPERAND (pos, 0)),
929 			     size_int (off_align / BITS_PER_UNIT));
930       *pbitpos = bitsize_zero_node;
931     }
932   else
933     {
934       *poffset = size_binop (MULT_EXPR,
935 			     fold_convert (sizetype,
936 					   size_binop (FLOOR_DIV_EXPR, pos,
937 						       toff_align)),
938 			     size_int (off_align / BITS_PER_UNIT));
939       *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
940     }
941 }
942 
943 /* Given a pointer to bit and byte offsets and an offset alignment,
944    normalize the offsets so they are within the alignment.  */
945 
946 void
947 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
948 {
949   /* If the bit position is now larger than it should be, adjust it
950      downwards.  */
951   if (compare_tree_int (*pbitpos, off_align) >= 0)
952     {
953       tree offset, bitpos;
954       pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
955       *poffset = size_binop (PLUS_EXPR, *poffset, offset);
956       *pbitpos = bitpos;
957     }
958 }
959 
960 /* Print debugging information about the information in RLI.  */
961 
962 DEBUG_FUNCTION void
963 debug_rli (record_layout_info rli)
964 {
965   print_node_brief (stderr, "type", rli->t, 0);
966   print_node_brief (stderr, "\noffset", rli->offset, 0);
967   print_node_brief (stderr, " bitpos", rli->bitpos, 0);
968 
969   fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
970 	   rli->record_align, rli->unpacked_align,
971 	   rli->offset_align);
972 
973   /* The ms_struct code is the only that uses this.  */
974   if (targetm.ms_bitfield_layout_p (rli->t))
975     fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
976 
977   if (rli->packed_maybe_necessary)
978     fprintf (stderr, "packed may be necessary\n");
979 
980   if (!vec_safe_is_empty (rli->pending_statics))
981     {
982       fprintf (stderr, "pending statics:\n");
983       debug_vec_tree (rli->pending_statics);
984     }
985 }
986 
987 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
988    BITPOS if necessary to keep BITPOS below OFFSET_ALIGN.  */
989 
990 void
991 normalize_rli (record_layout_info rli)
992 {
993   normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
994 }
995 
996 /* Returns the size in bytes allocated so far.  */
997 
998 tree
999 rli_size_unit_so_far (record_layout_info rli)
1000 {
1001   return byte_from_pos (rli->offset, rli->bitpos);
1002 }
1003 
1004 /* Returns the size in bits allocated so far.  */
1005 
1006 tree
1007 rli_size_so_far (record_layout_info rli)
1008 {
1009   return bit_from_pos (rli->offset, rli->bitpos);
1010 }
1011 
1012 /* FIELD is about to be added to RLI->T.  The alignment (in bits) of
1013    the next available location within the record is given by KNOWN_ALIGN.
1014    Update the variable alignment fields in RLI, and return the alignment
1015    to give the FIELD.  */
1016 
1017 unsigned int
1018 update_alignment_for_field (record_layout_info rli, tree field,
1019 			    unsigned int known_align)
1020 {
1021   /* The alignment required for FIELD.  */
1022   unsigned int desired_align;
1023   /* The type of this field.  */
1024   tree type = TREE_TYPE (field);
1025   /* True if the field was explicitly aligned by the user.  */
1026   bool user_align;
1027   bool is_bitfield;
1028 
1029   /* Do not attempt to align an ERROR_MARK node */
1030   if (TREE_CODE (type) == ERROR_MARK)
1031     return 0;
1032 
1033   /* Lay out the field so we know what alignment it needs.  */
1034   layout_decl (field, known_align);
1035   desired_align = DECL_ALIGN (field);
1036   user_align = DECL_USER_ALIGN (field);
1037 
1038   is_bitfield = (type != error_mark_node
1039 		 && DECL_BIT_FIELD_TYPE (field)
1040 		 && ! integer_zerop (TYPE_SIZE (type)));
1041 
1042   /* Record must have at least as much alignment as any field.
1043      Otherwise, the alignment of the field within the record is
1044      meaningless.  */
1045   if (targetm.ms_bitfield_layout_p (rli->t))
1046     {
1047       /* Here, the alignment of the underlying type of a bitfield can
1048 	 affect the alignment of a record; even a zero-sized field
1049 	 can do this.  The alignment should be to the alignment of
1050 	 the type, except that for zero-size bitfields this only
1051 	 applies if there was an immediately prior, nonzero-size
1052 	 bitfield.  (That's the way it is, experimentally.) */
1053       if ((!is_bitfield && !DECL_PACKED (field))
1054 	  || ((DECL_SIZE (field) == NULL_TREE
1055 	       || !integer_zerop (DECL_SIZE (field)))
1056 	      ? !DECL_PACKED (field)
1057 	      : (rli->prev_field
1058 		 && DECL_BIT_FIELD_TYPE (rli->prev_field)
1059 		 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
1060 	{
1061 	  unsigned int type_align = TYPE_ALIGN (type);
1062 	  type_align = MAX (type_align, desired_align);
1063 	  if (maximum_field_alignment != 0)
1064 	    type_align = MIN (type_align, maximum_field_alignment);
1065 	  rli->record_align = MAX (rli->record_align, type_align);
1066 	  rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1067 	}
1068     }
1069 #ifdef PCC_BITFIELD_TYPE_MATTERS
1070   else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
1071     {
1072       /* Named bit-fields cause the entire structure to have the
1073 	 alignment implied by their type.  Some targets also apply the same
1074 	 rules to unnamed bitfields.  */
1075       if (DECL_NAME (field) != 0
1076 	  || targetm.align_anon_bitfield ())
1077 	{
1078 	  unsigned int type_align = TYPE_ALIGN (type);
1079 
1080 #ifdef ADJUST_FIELD_ALIGN
1081 	  if (! TYPE_USER_ALIGN (type))
1082 	    type_align = ADJUST_FIELD_ALIGN (field, type_align);
1083 #endif
1084 
1085 	  /* Targets might chose to handle unnamed and hence possibly
1086 	     zero-width bitfield.  Those are not influenced by #pragmas
1087 	     or packed attributes.  */
1088 	  if (integer_zerop (DECL_SIZE (field)))
1089 	    {
1090 	      if (initial_max_fld_align)
1091 	        type_align = MIN (type_align,
1092 				  initial_max_fld_align * BITS_PER_UNIT);
1093 	    }
1094 	  else if (maximum_field_alignment != 0)
1095 	    type_align = MIN (type_align, maximum_field_alignment);
1096 	  else if (DECL_PACKED (field))
1097 	    type_align = MIN (type_align, BITS_PER_UNIT);
1098 
1099 	  /* The alignment of the record is increased to the maximum
1100 	     of the current alignment, the alignment indicated on the
1101 	     field (i.e., the alignment specified by an __aligned__
1102 	     attribute), and the alignment indicated by the type of
1103 	     the field.  */
1104 	  rli->record_align = MAX (rli->record_align, desired_align);
1105 	  rli->record_align = MAX (rli->record_align, type_align);
1106 
1107 	  if (warn_packed)
1108 	    rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1109 	  user_align |= TYPE_USER_ALIGN (type);
1110 	}
1111     }
1112 #endif
1113   else
1114     {
1115       rli->record_align = MAX (rli->record_align, desired_align);
1116       rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1117     }
1118 
1119   TYPE_USER_ALIGN (rli->t) |= user_align;
1120 
1121   return desired_align;
1122 }
1123 
1124 /* Called from place_field to handle unions.  */
1125 
1126 static void
1127 place_union_field (record_layout_info rli, tree field)
1128 {
1129   update_alignment_for_field (rli, field, /*known_align=*/0);
1130 
1131   DECL_FIELD_OFFSET (field) = size_zero_node;
1132   DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1133   SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1134 
1135   /* If this is an ERROR_MARK return *after* having set the
1136      field at the start of the union. This helps when parsing
1137      invalid fields. */
1138   if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1139     return;
1140 
1141   /* We assume the union's size will be a multiple of a byte so we don't
1142      bother with BITPOS.  */
1143   if (TREE_CODE (rli->t) == UNION_TYPE)
1144     rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1145   else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1146     rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1147 			       DECL_SIZE_UNIT (field), rli->offset);
1148 }
1149 
1150 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1151 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1152    at BYTE_OFFSET / BIT_OFFSET.  Return nonzero if the field would span more
1153    units of alignment than the underlying TYPE.  */
1154 static int
1155 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1156 		  HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1157 {
1158   /* Note that the calculation of OFFSET might overflow; we calculate it so
1159      that we still get the right result as long as ALIGN is a power of two.  */
1160   unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1161 
1162   offset = offset % align;
1163   return ((offset + size + align - 1) / align
1164 	  > tree_to_uhwi (TYPE_SIZE (type)) / align);
1165 }
1166 #endif
1167 
1168 /* RLI contains information about the layout of a RECORD_TYPE.  FIELD
1169    is a FIELD_DECL to be added after those fields already present in
1170    T.  (FIELD is not actually added to the TYPE_FIELDS list here;
1171    callers that desire that behavior must manually perform that step.)  */
1172 
1173 void
1174 place_field (record_layout_info rli, tree field)
1175 {
1176   /* The alignment required for FIELD.  */
1177   unsigned int desired_align;
1178   /* The alignment FIELD would have if we just dropped it into the
1179      record as it presently stands.  */
1180   unsigned int known_align;
1181   unsigned int actual_align;
1182   /* The type of this field.  */
1183   tree type = TREE_TYPE (field);
1184 
1185   gcc_assert (TREE_CODE (field) != ERROR_MARK);
1186 
1187   /* If FIELD is static, then treat it like a separate variable, not
1188      really like a structure field.  If it is a FUNCTION_DECL, it's a
1189      method.  In both cases, all we do is lay out the decl, and we do
1190      it *after* the record is laid out.  */
1191   if (TREE_CODE (field) == VAR_DECL)
1192     {
1193       vec_safe_push (rli->pending_statics, field);
1194       return;
1195     }
1196 
1197   /* Enumerators and enum types which are local to this class need not
1198      be laid out.  Likewise for initialized constant fields.  */
1199   else if (TREE_CODE (field) != FIELD_DECL)
1200     return;
1201 
1202   /* Unions are laid out very differently than records, so split
1203      that code off to another function.  */
1204   else if (TREE_CODE (rli->t) != RECORD_TYPE)
1205     {
1206       place_union_field (rli, field);
1207       return;
1208     }
1209 
1210   else if (TREE_CODE (type) == ERROR_MARK)
1211     {
1212       /* Place this field at the current allocation position, so we
1213 	 maintain monotonicity.  */
1214       DECL_FIELD_OFFSET (field) = rli->offset;
1215       DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1216       SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1217       return;
1218     }
1219 
1220   /* Work out the known alignment so far.  Note that A & (-A) is the
1221      value of the least-significant bit in A that is one.  */
1222   if (! integer_zerop (rli->bitpos))
1223     known_align = (tree_to_uhwi (rli->bitpos)
1224 		   & - tree_to_uhwi (rli->bitpos));
1225   else if (integer_zerop (rli->offset))
1226     known_align = 0;
1227   else if (tree_fits_uhwi_p (rli->offset))
1228     known_align = (BITS_PER_UNIT
1229 		   * (tree_to_uhwi (rli->offset)
1230 		      & - tree_to_uhwi (rli->offset)));
1231   else
1232     known_align = rli->offset_align;
1233 
1234   desired_align = update_alignment_for_field (rli, field, known_align);
1235   if (known_align == 0)
1236     known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1237 
1238   if (warn_packed && DECL_PACKED (field))
1239     {
1240       if (known_align >= TYPE_ALIGN (type))
1241 	{
1242 	  if (TYPE_ALIGN (type) > desired_align)
1243 	    {
1244 	      if (STRICT_ALIGNMENT)
1245 		warning (OPT_Wattributes, "packed attribute causes "
1246                          "inefficient alignment for %q+D", field);
1247 	      /* Don't warn if DECL_PACKED was set by the type.  */
1248 	      else if (!TYPE_PACKED (rli->t))
1249 		warning (OPT_Wattributes, "packed attribute is "
1250 			 "unnecessary for %q+D", field);
1251 	    }
1252 	}
1253       else
1254 	rli->packed_maybe_necessary = 1;
1255     }
1256 
1257   /* Does this field automatically have alignment it needs by virtue
1258      of the fields that precede it and the record's own alignment?  */
1259   if (known_align < desired_align)
1260     {
1261       /* No, we need to skip space before this field.
1262 	 Bump the cumulative size to multiple of field alignment.  */
1263 
1264       if (!targetm.ms_bitfield_layout_p (rli->t)
1265           && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1266 	warning (OPT_Wpadded, "padding struct to align %q+D", field);
1267 
1268       /* If the alignment is still within offset_align, just align
1269 	 the bit position.  */
1270       if (desired_align < rli->offset_align)
1271 	rli->bitpos = round_up (rli->bitpos, desired_align);
1272       else
1273 	{
1274 	  /* First adjust OFFSET by the partial bits, then align.  */
1275 	  rli->offset
1276 	    = size_binop (PLUS_EXPR, rli->offset,
1277 			  fold_convert (sizetype,
1278 					size_binop (CEIL_DIV_EXPR, rli->bitpos,
1279 						    bitsize_unit_node)));
1280 	  rli->bitpos = bitsize_zero_node;
1281 
1282 	  rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1283 	}
1284 
1285       if (! TREE_CONSTANT (rli->offset))
1286 	rli->offset_align = desired_align;
1287       if (targetm.ms_bitfield_layout_p (rli->t))
1288 	rli->prev_field = NULL;
1289     }
1290 
1291   /* Handle compatibility with PCC.  Note that if the record has any
1292      variable-sized fields, we need not worry about compatibility.  */
1293 #ifdef PCC_BITFIELD_TYPE_MATTERS
1294   if (PCC_BITFIELD_TYPE_MATTERS
1295       && ! targetm.ms_bitfield_layout_p (rli->t)
1296       && TREE_CODE (field) == FIELD_DECL
1297       && type != error_mark_node
1298       && DECL_BIT_FIELD (field)
1299       && (! DECL_PACKED (field)
1300 	  /* Enter for these packed fields only to issue a warning.  */
1301 	  || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1302       && maximum_field_alignment == 0
1303       && ! integer_zerop (DECL_SIZE (field))
1304       && tree_fits_uhwi_p (DECL_SIZE (field))
1305       && tree_fits_uhwi_p (rli->offset)
1306       && tree_fits_uhwi_p (TYPE_SIZE (type)))
1307     {
1308       unsigned int type_align = TYPE_ALIGN (type);
1309       tree dsize = DECL_SIZE (field);
1310       HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1311       HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1312       HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1313 
1314 #ifdef ADJUST_FIELD_ALIGN
1315       if (! TYPE_USER_ALIGN (type))
1316 	type_align = ADJUST_FIELD_ALIGN (field, type_align);
1317 #endif
1318 
1319       /* A bit field may not span more units of alignment of its type
1320 	 than its type itself.  Advance to next boundary if necessary.  */
1321       if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1322 	{
1323 	  if (DECL_PACKED (field))
1324 	    {
1325 	      if (warn_packed_bitfield_compat == 1)
1326 		inform
1327 		  (input_location,
1328 		   "offset of packed bit-field %qD has changed in GCC 4.4",
1329 		   field);
1330 	    }
1331 	  else
1332 	    rli->bitpos = round_up (rli->bitpos, type_align);
1333 	}
1334 
1335       if (! DECL_PACKED (field))
1336 	TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1337     }
1338 #endif
1339 
1340 #ifdef BITFIELD_NBYTES_LIMITED
1341   if (BITFIELD_NBYTES_LIMITED
1342       && ! targetm.ms_bitfield_layout_p (rli->t)
1343       && TREE_CODE (field) == FIELD_DECL
1344       && type != error_mark_node
1345       && DECL_BIT_FIELD_TYPE (field)
1346       && ! DECL_PACKED (field)
1347       && ! integer_zerop (DECL_SIZE (field))
1348       && tree_fits_uhwi_p (DECL_SIZE (field))
1349       && tree_fits_uhwi_p (rli->offset)
1350       && tree_fits_uhwi_p (TYPE_SIZE (type)))
1351     {
1352       unsigned int type_align = TYPE_ALIGN (type);
1353       tree dsize = DECL_SIZE (field);
1354       HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1355       HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1356       HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1357 
1358 #ifdef ADJUST_FIELD_ALIGN
1359       if (! TYPE_USER_ALIGN (type))
1360 	type_align = ADJUST_FIELD_ALIGN (field, type_align);
1361 #endif
1362 
1363       if (maximum_field_alignment != 0)
1364 	type_align = MIN (type_align, maximum_field_alignment);
1365       /* ??? This test is opposite the test in the containing if
1366 	 statement, so this code is unreachable currently.  */
1367       else if (DECL_PACKED (field))
1368 	type_align = MIN (type_align, BITS_PER_UNIT);
1369 
1370       /* A bit field may not span the unit of alignment of its type.
1371 	 Advance to next boundary if necessary.  */
1372       if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1373 	rli->bitpos = round_up (rli->bitpos, type_align);
1374 
1375       TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1376     }
1377 #endif
1378 
1379   /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1380      A subtlety:
1381 	When a bit field is inserted into a packed record, the whole
1382 	size of the underlying type is used by one or more same-size
1383 	adjacent bitfields.  (That is, if its long:3, 32 bits is
1384 	used in the record, and any additional adjacent long bitfields are
1385 	packed into the same chunk of 32 bits. However, if the size
1386 	changes, a new field of that size is allocated.)  In an unpacked
1387 	record, this is the same as using alignment, but not equivalent
1388 	when packing.
1389 
1390      Note: for compatibility, we use the type size, not the type alignment
1391      to determine alignment, since that matches the documentation */
1392 
1393   if (targetm.ms_bitfield_layout_p (rli->t))
1394     {
1395       tree prev_saved = rli->prev_field;
1396       tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1397 
1398       /* This is a bitfield if it exists.  */
1399       if (rli->prev_field)
1400 	{
1401 	  /* If both are bitfields, nonzero, and the same size, this is
1402 	     the middle of a run.  Zero declared size fields are special
1403 	     and handled as "end of run". (Note: it's nonzero declared
1404 	     size, but equal type sizes!) (Since we know that both
1405 	     the current and previous fields are bitfields by the
1406 	     time we check it, DECL_SIZE must be present for both.) */
1407 	  if (DECL_BIT_FIELD_TYPE (field)
1408 	      && !integer_zerop (DECL_SIZE (field))
1409 	      && !integer_zerop (DECL_SIZE (rli->prev_field))
1410 	      && tree_fits_shwi_p (DECL_SIZE (rli->prev_field))
1411 	      && tree_fits_uhwi_p (TYPE_SIZE (type))
1412 	      && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1413 	    {
1414 	      /* We're in the middle of a run of equal type size fields; make
1415 		 sure we realign if we run out of bits.  (Not decl size,
1416 		 type size!) */
1417 	      HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field));
1418 
1419 	      if (rli->remaining_in_alignment < bitsize)
1420 		{
1421 		  HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type));
1422 
1423 		  /* out of bits; bump up to next 'word'.  */
1424 		  rli->bitpos
1425 		    = size_binop (PLUS_EXPR, rli->bitpos,
1426 				  bitsize_int (rli->remaining_in_alignment));
1427 		  rli->prev_field = field;
1428 		  if (typesize < bitsize)
1429 		    rli->remaining_in_alignment = 0;
1430 		  else
1431 		    rli->remaining_in_alignment = typesize - bitsize;
1432 		}
1433 	      else
1434 		rli->remaining_in_alignment -= bitsize;
1435 	    }
1436 	  else
1437 	    {
1438 	      /* End of a run: if leaving a run of bitfields of the same type
1439 		 size, we have to "use up" the rest of the bits of the type
1440 		 size.
1441 
1442 		 Compute the new position as the sum of the size for the prior
1443 		 type and where we first started working on that type.
1444 		 Note: since the beginning of the field was aligned then
1445 		 of course the end will be too.  No round needed.  */
1446 
1447 	      if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1448 		{
1449 		  rli->bitpos
1450 		    = size_binop (PLUS_EXPR, rli->bitpos,
1451 				  bitsize_int (rli->remaining_in_alignment));
1452 		}
1453 	      else
1454 		/* We "use up" size zero fields; the code below should behave
1455 		   as if the prior field was not a bitfield.  */
1456 		prev_saved = NULL;
1457 
1458 	      /* Cause a new bitfield to be captured, either this time (if
1459 		 currently a bitfield) or next time we see one.  */
1460 	      if (!DECL_BIT_FIELD_TYPE (field)
1461 		  || integer_zerop (DECL_SIZE (field)))
1462 		rli->prev_field = NULL;
1463 	    }
1464 
1465 	  normalize_rli (rli);
1466         }
1467 
1468       /* If we're starting a new run of same type size bitfields
1469 	 (or a run of non-bitfields), set up the "first of the run"
1470 	 fields.
1471 
1472 	 That is, if the current field is not a bitfield, or if there
1473 	 was a prior bitfield the type sizes differ, or if there wasn't
1474 	 a prior bitfield the size of the current field is nonzero.
1475 
1476 	 Note: we must be sure to test ONLY the type size if there was
1477 	 a prior bitfield and ONLY for the current field being zero if
1478 	 there wasn't.  */
1479 
1480       if (!DECL_BIT_FIELD_TYPE (field)
1481 	  || (prev_saved != NULL
1482 	      ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1483 	      : !integer_zerop (DECL_SIZE (field)) ))
1484 	{
1485 	  /* Never smaller than a byte for compatibility.  */
1486 	  unsigned int type_align = BITS_PER_UNIT;
1487 
1488 	  /* (When not a bitfield), we could be seeing a flex array (with
1489 	     no DECL_SIZE).  Since we won't be using remaining_in_alignment
1490 	     until we see a bitfield (and come by here again) we just skip
1491 	     calculating it.  */
1492 	  if (DECL_SIZE (field) != NULL
1493 	      && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field)))
1494 	      && tree_fits_uhwi_p (DECL_SIZE (field)))
1495 	    {
1496 	      unsigned HOST_WIDE_INT bitsize
1497 		= tree_to_uhwi (DECL_SIZE (field));
1498 	      unsigned HOST_WIDE_INT typesize
1499 		= tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)));
1500 
1501 	      if (typesize < bitsize)
1502 		rli->remaining_in_alignment = 0;
1503 	      else
1504 		rli->remaining_in_alignment = typesize - bitsize;
1505 	    }
1506 
1507 	  /* Now align (conventionally) for the new type.  */
1508 	  type_align = TYPE_ALIGN (TREE_TYPE (field));
1509 
1510 	  if (maximum_field_alignment != 0)
1511 	    type_align = MIN (type_align, maximum_field_alignment);
1512 
1513 	  rli->bitpos = round_up (rli->bitpos, type_align);
1514 
1515           /* If we really aligned, don't allow subsequent bitfields
1516 	     to undo that.  */
1517 	  rli->prev_field = NULL;
1518 	}
1519     }
1520 
1521   /* Offset so far becomes the position of this field after normalizing.  */
1522   normalize_rli (rli);
1523   DECL_FIELD_OFFSET (field) = rli->offset;
1524   DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1525   SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1526 
1527   /* Evaluate nonconstant offsets only once, either now or as soon as safe.  */
1528   if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST)
1529     DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field));
1530 
1531   /* If this field ended up more aligned than we thought it would be (we
1532      approximate this by seeing if its position changed), lay out the field
1533      again; perhaps we can use an integral mode for it now.  */
1534   if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1535     actual_align = (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
1536 		    & - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
1537   else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1538     actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1539   else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
1540     actual_align = (BITS_PER_UNIT
1541 		   * (tree_to_uhwi (DECL_FIELD_OFFSET (field))
1542 		      & - tree_to_uhwi (DECL_FIELD_OFFSET (field))));
1543   else
1544     actual_align = DECL_OFFSET_ALIGN (field);
1545   /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1546      store / extract bit field operations will check the alignment of the
1547      record against the mode of bit fields.  */
1548 
1549   if (known_align != actual_align)
1550     layout_decl (field, actual_align);
1551 
1552   if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1553     rli->prev_field = field;
1554 
1555   /* Now add size of this field to the size of the record.  If the size is
1556      not constant, treat the field as being a multiple of bytes and just
1557      adjust the offset, resetting the bit position.  Otherwise, apportion the
1558      size amongst the bit position and offset.  First handle the case of an
1559      unspecified size, which can happen when we have an invalid nested struct
1560      definition, such as struct j { struct j { int i; } }.  The error message
1561      is printed in finish_struct.  */
1562   if (DECL_SIZE (field) == 0)
1563     /* Do nothing.  */;
1564   else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1565 	   || TREE_OVERFLOW (DECL_SIZE (field)))
1566     {
1567       rli->offset
1568 	= size_binop (PLUS_EXPR, rli->offset,
1569 		      fold_convert (sizetype,
1570 				    size_binop (CEIL_DIV_EXPR, rli->bitpos,
1571 						bitsize_unit_node)));
1572       rli->offset
1573 	= size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1574       rli->bitpos = bitsize_zero_node;
1575       rli->offset_align = MIN (rli->offset_align, desired_align);
1576     }
1577   else if (targetm.ms_bitfield_layout_p (rli->t))
1578     {
1579       rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1580 
1581       /* If we ended a bitfield before the full length of the type then
1582 	 pad the struct out to the full length of the last type.  */
1583       if ((DECL_CHAIN (field) == NULL
1584 	   || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
1585 	  && DECL_BIT_FIELD_TYPE (field)
1586 	  && !integer_zerop (DECL_SIZE (field)))
1587 	rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1588 				  bitsize_int (rli->remaining_in_alignment));
1589 
1590       normalize_rli (rli);
1591     }
1592   else
1593     {
1594       rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1595       normalize_rli (rli);
1596     }
1597 }
1598 
1599 /* Assuming that all the fields have been laid out, this function uses
1600    RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1601    indicated by RLI.  */
1602 
1603 static void
1604 finalize_record_size (record_layout_info rli)
1605 {
1606   tree unpadded_size, unpadded_size_unit;
1607 
1608   /* Now we want just byte and bit offsets, so set the offset alignment
1609      to be a byte and then normalize.  */
1610   rli->offset_align = BITS_PER_UNIT;
1611   normalize_rli (rli);
1612 
1613   /* Determine the desired alignment.  */
1614 #ifdef ROUND_TYPE_ALIGN
1615   TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1616 					  rli->record_align);
1617 #else
1618   TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1619 #endif
1620 
1621   /* Compute the size so far.  Be sure to allow for extra bits in the
1622      size in bytes.  We have guaranteed above that it will be no more
1623      than a single byte.  */
1624   unpadded_size = rli_size_so_far (rli);
1625   unpadded_size_unit = rli_size_unit_so_far (rli);
1626   if (! integer_zerop (rli->bitpos))
1627     unpadded_size_unit
1628       = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1629 
1630   /* Round the size up to be a multiple of the required alignment.  */
1631   TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1632   TYPE_SIZE_UNIT (rli->t)
1633     = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1634 
1635   if (TREE_CONSTANT (unpadded_size)
1636       && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1637       && input_location != BUILTINS_LOCATION)
1638     warning (OPT_Wpadded, "padding struct size to alignment boundary");
1639 
1640   if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1641       && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1642       && TREE_CONSTANT (unpadded_size))
1643     {
1644       tree unpacked_size;
1645 
1646 #ifdef ROUND_TYPE_ALIGN
1647       rli->unpacked_align
1648 	= ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1649 #else
1650       rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1651 #endif
1652 
1653       unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1654       if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1655 	{
1656 	  if (TYPE_NAME (rli->t))
1657 	    {
1658 	      tree name;
1659 
1660 	      if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1661 		name = TYPE_NAME (rli->t);
1662 	      else
1663 		name = DECL_NAME (TYPE_NAME (rli->t));
1664 
1665 	      if (STRICT_ALIGNMENT)
1666 		warning (OPT_Wpacked, "packed attribute causes inefficient "
1667 			 "alignment for %qE", name);
1668 	      else
1669 		warning (OPT_Wpacked,
1670 			 "packed attribute is unnecessary for %qE", name);
1671 	    }
1672 	  else
1673 	    {
1674 	      if (STRICT_ALIGNMENT)
1675 		warning (OPT_Wpacked,
1676 			 "packed attribute causes inefficient alignment");
1677 	      else
1678 		warning (OPT_Wpacked, "packed attribute is unnecessary");
1679 	    }
1680 	}
1681     }
1682 }
1683 
1684 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE).  */
1685 
1686 void
1687 compute_record_mode (tree type)
1688 {
1689   tree field;
1690   machine_mode mode = VOIDmode;
1691 
1692   /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1693      However, if possible, we use a mode that fits in a register
1694      instead, in order to allow for better optimization down the
1695      line.  */
1696   SET_TYPE_MODE (type, BLKmode);
1697 
1698   if (! tree_fits_uhwi_p (TYPE_SIZE (type)))
1699     return;
1700 
1701   /* A record which has any BLKmode members must itself be
1702      BLKmode; it can't go in a register.  Unless the member is
1703      BLKmode only because it isn't aligned.  */
1704   for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1705     {
1706       if (TREE_CODE (field) != FIELD_DECL)
1707 	continue;
1708 
1709       if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1710 	  || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1711 	      && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1712 	      && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1713 		   && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1714 	  || ! tree_fits_uhwi_p (bit_position (field))
1715 	  || DECL_SIZE (field) == 0
1716 	  || ! tree_fits_uhwi_p (DECL_SIZE (field)))
1717 	return;
1718 
1719       /* If this field is the whole struct, remember its mode so
1720 	 that, say, we can put a double in a class into a DF
1721 	 register instead of forcing it to live in the stack.  */
1722       if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1723 	mode = DECL_MODE (field);
1724 
1725       /* With some targets, it is sub-optimal to access an aligned
1726 	 BLKmode structure as a scalar.  */
1727       if (targetm.member_type_forces_blk (field, mode))
1728 	return;
1729     }
1730 
1731   /* If we only have one real field; use its mode if that mode's size
1732      matches the type's size.  This only applies to RECORD_TYPE.  This
1733      does not apply to unions.  */
1734   if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1735       && tree_fits_uhwi_p (TYPE_SIZE (type))
1736       && GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type)))
1737     SET_TYPE_MODE (type, mode);
1738   else
1739     SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1740 
1741   /* If structure's known alignment is less than what the scalar
1742      mode would need, and it matters, then stick with BLKmode.  */
1743   if (TYPE_MODE (type) != BLKmode
1744       && STRICT_ALIGNMENT
1745       && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1746 	    || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1747     {
1748       /* If this is the only reason this type is BLKmode, then
1749 	 don't force containing types to be BLKmode.  */
1750       TYPE_NO_FORCE_BLK (type) = 1;
1751       SET_TYPE_MODE (type, BLKmode);
1752     }
1753 }
1754 
1755 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1756    out.  */
1757 
1758 static void
1759 finalize_type_size (tree type)
1760 {
1761   /* Normally, use the alignment corresponding to the mode chosen.
1762      However, where strict alignment is not required, avoid
1763      over-aligning structures, since most compilers do not do this
1764      alignment.  */
1765 
1766   if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1767       && (STRICT_ALIGNMENT
1768 	  || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1769 	      && TREE_CODE (type) != QUAL_UNION_TYPE
1770 	      && TREE_CODE (type) != ARRAY_TYPE)))
1771     {
1772       unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1773 
1774       /* Don't override a larger alignment requirement coming from a user
1775 	 alignment of one of the fields.  */
1776       if (mode_align >= TYPE_ALIGN (type))
1777 	{
1778 	  TYPE_ALIGN (type) = mode_align;
1779 	  TYPE_USER_ALIGN (type) = 0;
1780 	}
1781     }
1782 
1783   /* Do machine-dependent extra alignment.  */
1784 #ifdef ROUND_TYPE_ALIGN
1785   TYPE_ALIGN (type)
1786     = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1787 #endif
1788 
1789   /* If we failed to find a simple way to calculate the unit size
1790      of the type, find it by division.  */
1791   if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1792     /* TYPE_SIZE (type) is computed in bitsizetype.  After the division, the
1793        result will fit in sizetype.  We will get more efficient code using
1794        sizetype, so we force a conversion.  */
1795     TYPE_SIZE_UNIT (type)
1796       = fold_convert (sizetype,
1797 		      size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1798 				  bitsize_unit_node));
1799 
1800   if (TYPE_SIZE (type) != 0)
1801     {
1802       TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1803       TYPE_SIZE_UNIT (type)
1804 	= round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1805     }
1806 
1807   /* Evaluate nonconstant sizes only once, either now or as soon as safe.  */
1808   if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1809     TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1810   if (TYPE_SIZE_UNIT (type) != 0
1811       && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1812     TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1813 
1814   /* Also layout any other variants of the type.  */
1815   if (TYPE_NEXT_VARIANT (type)
1816       || type != TYPE_MAIN_VARIANT (type))
1817     {
1818       tree variant;
1819       /* Record layout info of this variant.  */
1820       tree size = TYPE_SIZE (type);
1821       tree size_unit = TYPE_SIZE_UNIT (type);
1822       unsigned int align = TYPE_ALIGN (type);
1823       unsigned int precision = TYPE_PRECISION (type);
1824       unsigned int user_align = TYPE_USER_ALIGN (type);
1825       machine_mode mode = TYPE_MODE (type);
1826 
1827       /* Copy it into all variants.  */
1828       for (variant = TYPE_MAIN_VARIANT (type);
1829 	   variant != 0;
1830 	   variant = TYPE_NEXT_VARIANT (variant))
1831 	{
1832 	  TYPE_SIZE (variant) = size;
1833 	  TYPE_SIZE_UNIT (variant) = size_unit;
1834 	  unsigned valign = align;
1835 	  if (TYPE_USER_ALIGN (variant))
1836 	    valign = MAX (valign, TYPE_ALIGN (variant));
1837 	  else
1838 	    TYPE_USER_ALIGN (variant) = user_align;
1839 	  TYPE_ALIGN (variant) = valign;
1840 	  TYPE_PRECISION (variant) = precision;
1841 	  SET_TYPE_MODE (variant, mode);
1842 	}
1843     }
1844 }
1845 
1846 /* Return a new underlying object for a bitfield started with FIELD.  */
1847 
1848 static tree
1849 start_bitfield_representative (tree field)
1850 {
1851   tree repr = make_node (FIELD_DECL);
1852   DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
1853   /* Force the representative to begin at a BITS_PER_UNIT aligned
1854      boundary - C++ may use tail-padding of a base object to
1855      continue packing bits so the bitfield region does not start
1856      at bit zero (see g++.dg/abi/bitfield5.C for example).
1857      Unallocated bits may happen for other reasons as well,
1858      for example Ada which allows explicit bit-granular structure layout.  */
1859   DECL_FIELD_BIT_OFFSET (repr)
1860     = size_binop (BIT_AND_EXPR,
1861 		  DECL_FIELD_BIT_OFFSET (field),
1862 		  bitsize_int (~(BITS_PER_UNIT - 1)));
1863   SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
1864   DECL_SIZE (repr) = DECL_SIZE (field);
1865   DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
1866   DECL_PACKED (repr) = DECL_PACKED (field);
1867   DECL_CONTEXT (repr) = DECL_CONTEXT (field);
1868   return repr;
1869 }
1870 
1871 /* Finish up a bitfield group that was started by creating the underlying
1872    object REPR with the last field in the bitfield group FIELD.  */
1873 
1874 static void
1875 finish_bitfield_representative (tree repr, tree field)
1876 {
1877   unsigned HOST_WIDE_INT bitsize, maxbitsize;
1878   machine_mode mode;
1879   tree nextf, size;
1880 
1881   size = size_diffop (DECL_FIELD_OFFSET (field),
1882 		      DECL_FIELD_OFFSET (repr));
1883   while (TREE_CODE (size) == COMPOUND_EXPR)
1884     size = TREE_OPERAND (size, 1);
1885   gcc_assert (tree_fits_uhwi_p (size));
1886   bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT
1887 	     + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
1888 	     - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))
1889 	     + tree_to_uhwi (DECL_SIZE (field)));
1890 
1891   /* Round up bitsize to multiples of BITS_PER_UNIT.  */
1892   bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
1893 
1894   /* Now nothing tells us how to pad out bitsize ...  */
1895   nextf = DECL_CHAIN (field);
1896   while (nextf && TREE_CODE (nextf) != FIELD_DECL)
1897     nextf = DECL_CHAIN (nextf);
1898   if (nextf)
1899     {
1900       tree maxsize;
1901       /* If there was an error, the field may be not laid out
1902          correctly.  Don't bother to do anything.  */
1903       if (TREE_TYPE (nextf) == error_mark_node)
1904 	return;
1905       maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
1906 			     DECL_FIELD_OFFSET (repr));
1907       if (tree_fits_uhwi_p (maxsize))
1908 	{
1909 	  maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
1910 			+ tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf))
1911 			- tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
1912 	  /* If the group ends within a bitfield nextf does not need to be
1913 	     aligned to BITS_PER_UNIT.  Thus round up.  */
1914 	  maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
1915 	}
1916       else
1917 	maxbitsize = bitsize;
1918     }
1919   else
1920     {
1921       /* ???  If you consider that tail-padding of this struct might be
1922          re-used when deriving from it we cannot really do the following
1923 	 and thus need to set maxsize to bitsize?  Also we cannot
1924 	 generally rely on maxsize to fold to an integer constant, so
1925 	 use bitsize as fallback for this case.  */
1926       tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)),
1927 				  DECL_FIELD_OFFSET (repr));
1928       if (tree_fits_uhwi_p (maxsize))
1929 	maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
1930 		      - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
1931       else
1932 	maxbitsize = bitsize;
1933     }
1934 
1935   /* Only if we don't artificially break up the representative in
1936      the middle of a large bitfield with different possibly
1937      overlapping representatives.  And all representatives start
1938      at byte offset.  */
1939   gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
1940 
1941   /* Find the smallest nice mode to use.  */
1942   for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1943        mode = GET_MODE_WIDER_MODE (mode))
1944     if (GET_MODE_BITSIZE (mode) >= bitsize)
1945       break;
1946   if (mode != VOIDmode
1947       && (GET_MODE_BITSIZE (mode) > maxbitsize
1948 	  || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE))
1949     mode = VOIDmode;
1950 
1951   if (mode == VOIDmode)
1952     {
1953       /* We really want a BLKmode representative only as a last resort,
1954          considering the member b in
1955 	   struct { int a : 7; int b : 17; int c; } __attribute__((packed));
1956 	 Otherwise we simply want to split the representative up
1957 	 allowing for overlaps within the bitfield region as required for
1958 	   struct { int a : 7; int b : 7;
1959 		    int c : 10; int d; } __attribute__((packed));
1960 	 [0, 15] HImode for a and b, [8, 23] HImode for c.  */
1961       DECL_SIZE (repr) = bitsize_int (bitsize);
1962       DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
1963       DECL_MODE (repr) = BLKmode;
1964       TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
1965 						 bitsize / BITS_PER_UNIT);
1966     }
1967   else
1968     {
1969       unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
1970       DECL_SIZE (repr) = bitsize_int (modesize);
1971       DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
1972       DECL_MODE (repr) = mode;
1973       TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
1974     }
1975 
1976   /* Remember whether the bitfield group is at the end of the
1977      structure or not.  */
1978   DECL_CHAIN (repr) = nextf;
1979 }
1980 
1981 /* Compute and set FIELD_DECLs for the underlying objects we should
1982    use for bitfield access for the structure T.  */
1983 
1984 void
1985 finish_bitfield_layout (tree t)
1986 {
1987   tree field, prev;
1988   tree repr = NULL_TREE;
1989 
1990   /* Unions would be special, for the ease of type-punning optimizations
1991      we could use the underlying type as hint for the representative
1992      if the bitfield would fit and the representative would not exceed
1993      the union in size.  */
1994   if (TREE_CODE (t) != RECORD_TYPE)
1995     return;
1996 
1997   for (prev = NULL_TREE, field = TYPE_FIELDS (t);
1998        field; field = DECL_CHAIN (field))
1999     {
2000       if (TREE_CODE (field) != FIELD_DECL)
2001 	continue;
2002 
2003       /* In the C++ memory model, consecutive bit fields in a structure are
2004 	 considered one memory location and updating a memory location
2005 	 may not store into adjacent memory locations.  */
2006       if (!repr
2007 	  && DECL_BIT_FIELD_TYPE (field))
2008 	{
2009 	  /* Start new representative.  */
2010 	  repr = start_bitfield_representative (field);
2011 	}
2012       else if (repr
2013 	       && ! DECL_BIT_FIELD_TYPE (field))
2014 	{
2015 	  /* Finish off new representative.  */
2016 	  finish_bitfield_representative (repr, prev);
2017 	  repr = NULL_TREE;
2018 	}
2019       else if (DECL_BIT_FIELD_TYPE (field))
2020 	{
2021 	  gcc_assert (repr != NULL_TREE);
2022 
2023 	  /* Zero-size bitfields finish off a representative and
2024 	     do not have a representative themselves.  This is
2025 	     required by the C++ memory model.  */
2026 	  if (integer_zerop (DECL_SIZE (field)))
2027 	    {
2028 	      finish_bitfield_representative (repr, prev);
2029 	      repr = NULL_TREE;
2030 	    }
2031 
2032 	  /* We assume that either DECL_FIELD_OFFSET of the representative
2033 	     and each bitfield member is a constant or they are equal.
2034 	     This is because we need to be able to compute the bit-offset
2035 	     of each field relative to the representative in get_bit_range
2036 	     during RTL expansion.
2037 	     If these constraints are not met, simply force a new
2038 	     representative to be generated.  That will at most
2039 	     generate worse code but still maintain correctness with
2040 	     respect to the C++ memory model.  */
2041 	  else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))
2042 		      && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
2043 		     || operand_equal_p (DECL_FIELD_OFFSET (repr),
2044 					 DECL_FIELD_OFFSET (field), 0)))
2045 	    {
2046 	      finish_bitfield_representative (repr, prev);
2047 	      repr = start_bitfield_representative (field);
2048 	    }
2049 	}
2050       else
2051 	continue;
2052 
2053       if (repr)
2054 	DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
2055 
2056       prev = field;
2057     }
2058 
2059   if (repr)
2060     finish_bitfield_representative (repr, prev);
2061 }
2062 
2063 /* Do all of the work required to layout the type indicated by RLI,
2064    once the fields have been laid out.  This function will call `free'
2065    for RLI, unless FREE_P is false.  Passing a value other than false
2066    for FREE_P is bad practice; this option only exists to support the
2067    G++ 3.2 ABI.  */
2068 
2069 void
2070 finish_record_layout (record_layout_info rli, int free_p)
2071 {
2072   tree variant;
2073 
2074   /* Compute the final size.  */
2075   finalize_record_size (rli);
2076 
2077   /* Compute the TYPE_MODE for the record.  */
2078   compute_record_mode (rli->t);
2079 
2080   /* Perform any last tweaks to the TYPE_SIZE, etc.  */
2081   finalize_type_size (rli->t);
2082 
2083   /* Compute bitfield representatives.  */
2084   finish_bitfield_layout (rli->t);
2085 
2086   /* Propagate TYPE_PACKED to variants.  With C++ templates,
2087      handle_packed_attribute is too early to do this.  */
2088   for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
2089        variant = TYPE_NEXT_VARIANT (variant))
2090     TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
2091 
2092   /* Lay out any static members.  This is done now because their type
2093      may use the record's type.  */
2094   while (!vec_safe_is_empty (rli->pending_statics))
2095     layout_decl (rli->pending_statics->pop (), 0);
2096 
2097   /* Clean up.  */
2098   if (free_p)
2099     {
2100       vec_free (rli->pending_statics);
2101       free (rli);
2102     }
2103 }
2104 
2105 
2106 /* Finish processing a builtin RECORD_TYPE type TYPE.  It's name is
2107    NAME, its fields are chained in reverse on FIELDS.
2108 
2109    If ALIGN_TYPE is non-null, it is given the same alignment as
2110    ALIGN_TYPE.  */
2111 
2112 void
2113 finish_builtin_struct (tree type, const char *name, tree fields,
2114 		       tree align_type)
2115 {
2116   tree tail, next;
2117 
2118   for (tail = NULL_TREE; fields; tail = fields, fields = next)
2119     {
2120       DECL_FIELD_CONTEXT (fields) = type;
2121       next = DECL_CHAIN (fields);
2122       DECL_CHAIN (fields) = tail;
2123     }
2124   TYPE_FIELDS (type) = tail;
2125 
2126   if (align_type)
2127     {
2128       TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
2129       TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2130     }
2131 
2132   layout_type (type);
2133 #if 0 /* not yet, should get fixed properly later */
2134   TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2135 #else
2136   TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2137 				 TYPE_DECL, get_identifier (name), type);
2138 #endif
2139   TYPE_STUB_DECL (type) = TYPE_NAME (type);
2140   layout_decl (TYPE_NAME (type), 0);
2141 }
2142 
2143 /* Calculate the mode, size, and alignment for TYPE.
2144    For an array type, calculate the element separation as well.
2145    Record TYPE on the chain of permanent or temporary types
2146    so that dbxout will find out about it.
2147 
2148    TYPE_SIZE of a type is nonzero if the type has been laid out already.
2149    layout_type does nothing on such a type.
2150 
2151    If the type is incomplete, its TYPE_SIZE remains zero.  */
2152 
2153 void
2154 layout_type (tree type)
2155 {
2156   gcc_assert (type);
2157 
2158   if (type == error_mark_node)
2159     return;
2160 
2161   /* We don't want finalize_type_size to copy an alignment attribute to
2162      variants that don't have it.  */
2163   type = TYPE_MAIN_VARIANT (type);
2164 
2165   /* Do nothing if type has been laid out before.  */
2166   if (TYPE_SIZE (type))
2167     return;
2168 
2169   switch (TREE_CODE (type))
2170     {
2171     case LANG_TYPE:
2172       /* This kind of type is the responsibility
2173 	 of the language-specific code.  */
2174       gcc_unreachable ();
2175 
2176     case BOOLEAN_TYPE:
2177     case INTEGER_TYPE:
2178     case ENUMERAL_TYPE:
2179       SET_TYPE_MODE (type,
2180 		     smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
2181       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2182       /* Don't set TYPE_PRECISION here, as it may be set by a bitfield.  */
2183       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2184       break;
2185 
2186     case REAL_TYPE:
2187       SET_TYPE_MODE (type,
2188 		     mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
2189       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2190       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2191       break;
2192 
2193    case FIXED_POINT_TYPE:
2194      /* TYPE_MODE (type) has been set already.  */
2195      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2196      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2197      break;
2198 
2199     case COMPLEX_TYPE:
2200       TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2201       SET_TYPE_MODE (type,
2202 		     mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
2203 				    (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
2204 				     ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
2205 				     0));
2206       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2207       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2208       break;
2209 
2210     case VECTOR_TYPE:
2211       {
2212 	int nunits = TYPE_VECTOR_SUBPARTS (type);
2213 	tree innertype = TREE_TYPE (type);
2214 
2215 	gcc_assert (!(nunits & (nunits - 1)));
2216 
2217 	/* Find an appropriate mode for the vector type.  */
2218 	if (TYPE_MODE (type) == VOIDmode)
2219 	  SET_TYPE_MODE (type,
2220 			 mode_for_vector (TYPE_MODE (innertype), nunits));
2221 
2222 	TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2223         TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2224 	TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2225 					         TYPE_SIZE_UNIT (innertype),
2226 					         size_int (nunits));
2227 	TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
2228 					    bitsize_int (nunits));
2229 
2230 	/* For vector types, we do not default to the mode's alignment.
2231 	   Instead, query a target hook, defaulting to natural alignment.
2232 	   This prevents ABI changes depending on whether or not native
2233 	   vector modes are supported.  */
2234 	TYPE_ALIGN (type) = targetm.vector_alignment (type);
2235 
2236 	/* However, if the underlying mode requires a bigger alignment than
2237 	   what the target hook provides, we cannot use the mode.  For now,
2238 	   simply reject that case.  */
2239 	gcc_assert (TYPE_ALIGN (type)
2240 		    >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2241         break;
2242       }
2243 
2244     case VOID_TYPE:
2245       /* This is an incomplete type and so doesn't have a size.  */
2246       TYPE_ALIGN (type) = 1;
2247       TYPE_USER_ALIGN (type) = 0;
2248       SET_TYPE_MODE (type, VOIDmode);
2249       break;
2250 
2251     case POINTER_BOUNDS_TYPE:
2252       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2253       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2254       break;
2255 
2256     case OFFSET_TYPE:
2257       TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2258       TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
2259       /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
2260 	 integral, which may be an __intN.  */
2261       SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
2262       TYPE_PRECISION (type) = POINTER_SIZE;
2263       break;
2264 
2265     case FUNCTION_TYPE:
2266     case METHOD_TYPE:
2267       /* It's hard to see what the mode and size of a function ought to
2268 	 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2269 	 make it consistent with that.  */
2270       SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
2271       TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2272       TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2273       break;
2274 
2275     case POINTER_TYPE:
2276     case REFERENCE_TYPE:
2277       {
2278 	machine_mode mode = TYPE_MODE (type);
2279 	if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
2280 	  {
2281 	    addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
2282 	    mode = targetm.addr_space.address_mode (as);
2283 	  }
2284 
2285 	TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2286 	TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2287 	TYPE_UNSIGNED (type) = 1;
2288 	TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
2289       }
2290       break;
2291 
2292     case ARRAY_TYPE:
2293       {
2294 	tree index = TYPE_DOMAIN (type);
2295 	tree element = TREE_TYPE (type);
2296 
2297 	build_pointer_type (element);
2298 
2299 	/* We need to know both bounds in order to compute the size.  */
2300 	if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2301 	    && TYPE_SIZE (element))
2302 	  {
2303 	    tree ub = TYPE_MAX_VALUE (index);
2304 	    tree lb = TYPE_MIN_VALUE (index);
2305 	    tree element_size = TYPE_SIZE (element);
2306 	    tree length;
2307 
2308 	    /* Make sure that an array of zero-sized element is zero-sized
2309 	       regardless of its extent.  */
2310 	    if (integer_zerop (element_size))
2311 	      length = size_zero_node;
2312 
2313 	    /* The computation should happen in the original signedness so
2314 	       that (possible) negative values are handled appropriately
2315 	       when determining overflow.  */
2316 	    else
2317 	      {
2318 		/* ???  When it is obvious that the range is signed
2319 		   represent it using ssizetype.  */
2320 		if (TREE_CODE (lb) == INTEGER_CST
2321 		    && TREE_CODE (ub) == INTEGER_CST
2322 		    && TYPE_UNSIGNED (TREE_TYPE (lb))
2323 		    && tree_int_cst_lt (ub, lb))
2324 		  {
2325 		    lb = wide_int_to_tree (ssizetype,
2326 					   offset_int::from (lb, SIGNED));
2327 		    ub = wide_int_to_tree (ssizetype,
2328 					   offset_int::from (ub, SIGNED));
2329 		  }
2330 		length
2331 		  = fold_convert (sizetype,
2332 				  size_binop (PLUS_EXPR,
2333 					      build_int_cst (TREE_TYPE (lb), 1),
2334 					      size_binop (MINUS_EXPR, ub, lb)));
2335 	      }
2336 
2337 	    /* ??? We have no way to distinguish a null-sized array from an
2338 	       array spanning the whole sizetype range, so we arbitrarily
2339 	       decide that [0, -1] is the only valid representation.  */
2340 	    if (integer_zerop (length)
2341 	        && TREE_OVERFLOW (length)
2342 		&& integer_zerop (lb))
2343 	      length = size_zero_node;
2344 
2345 	    TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2346 					   fold_convert (bitsizetype,
2347 							 length));
2348 
2349 	    /* If we know the size of the element, calculate the total size
2350 	       directly, rather than do some division thing below.  This
2351 	       optimization helps Fortran assumed-size arrays (where the
2352 	       size of the array is determined at runtime) substantially.  */
2353 	    if (TYPE_SIZE_UNIT (element))
2354 	      TYPE_SIZE_UNIT (type)
2355 		= size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2356 	  }
2357 
2358 	/* Now round the alignment and size,
2359 	   using machine-dependent criteria if any.  */
2360 
2361 	unsigned align = TYPE_ALIGN (element);
2362 	if (TYPE_USER_ALIGN (type))
2363 	  align = MAX (align, TYPE_ALIGN (type));
2364 	else
2365 	  TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2366 #ifdef ROUND_TYPE_ALIGN
2367 	align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT);
2368 #else
2369 	align = MAX (align, BITS_PER_UNIT);
2370 #endif
2371 	TYPE_ALIGN (type) = align;
2372 	SET_TYPE_MODE (type, BLKmode);
2373 	if (TYPE_SIZE (type) != 0
2374 	    && ! targetm.member_type_forces_blk (type, VOIDmode)
2375 	    /* BLKmode elements force BLKmode aggregate;
2376 	       else extract/store fields may lose.  */
2377 	    && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2378 		|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2379 	  {
2380 	    SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2381 						 TYPE_SIZE (type)));
2382 	    if (TYPE_MODE (type) != BLKmode
2383 		&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2384 		&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2385 	      {
2386 		TYPE_NO_FORCE_BLK (type) = 1;
2387 		SET_TYPE_MODE (type, BLKmode);
2388 	      }
2389 	  }
2390 	/* When the element size is constant, check that it is at least as
2391 	   large as the element alignment.  */
2392 	if (TYPE_SIZE_UNIT (element)
2393 	    && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2394 	    /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2395 	       TYPE_ALIGN_UNIT.  */
2396 	    && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2397 	    && !integer_zerop (TYPE_SIZE_UNIT (element))
2398 	    && compare_tree_int (TYPE_SIZE_UNIT (element),
2399 			  	 TYPE_ALIGN_UNIT (element)) < 0)
2400 	  error ("alignment of array elements is greater than element size");
2401 	break;
2402       }
2403 
2404     case RECORD_TYPE:
2405     case UNION_TYPE:
2406     case QUAL_UNION_TYPE:
2407       {
2408 	tree field;
2409 	record_layout_info rli;
2410 
2411 	/* Initialize the layout information.  */
2412 	rli = start_record_layout (type);
2413 
2414 	/* If this is a QUAL_UNION_TYPE, we want to process the fields
2415 	   in the reverse order in building the COND_EXPR that denotes
2416 	   its size.  We reverse them again later.  */
2417 	if (TREE_CODE (type) == QUAL_UNION_TYPE)
2418 	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2419 
2420 	/* Place all the fields.  */
2421 	for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2422 	  place_field (rli, field);
2423 
2424 	if (TREE_CODE (type) == QUAL_UNION_TYPE)
2425 	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2426 
2427 	/* Finish laying out the record.  */
2428 	finish_record_layout (rli, /*free_p=*/true);
2429       }
2430       break;
2431 
2432     default:
2433       gcc_unreachable ();
2434     }
2435 
2436   /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE.  For
2437      records and unions, finish_record_layout already called this
2438      function.  */
2439   if (TREE_CODE (type) != RECORD_TYPE
2440       && TREE_CODE (type) != UNION_TYPE
2441       && TREE_CODE (type) != QUAL_UNION_TYPE)
2442     finalize_type_size (type);
2443 
2444   /* We should never see alias sets on incomplete aggregates.  And we
2445      should not call layout_type on not incomplete aggregates.  */
2446   if (AGGREGATE_TYPE_P (type))
2447     gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2448 }
2449 
2450 /* Return the least alignment required for type TYPE.  */
2451 
2452 unsigned int
2453 min_align_of_type (tree type)
2454 {
2455   unsigned int align = TYPE_ALIGN (type);
2456   if (!TYPE_USER_ALIGN (type))
2457     {
2458       align = MIN (align, BIGGEST_ALIGNMENT);
2459 #ifdef BIGGEST_FIELD_ALIGNMENT
2460       align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
2461 #endif
2462       unsigned int field_align = align;
2463 #ifdef ADJUST_FIELD_ALIGN
2464       tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE, type);
2465       field_align = ADJUST_FIELD_ALIGN (field, field_align);
2466       ggc_free (field);
2467 #endif
2468       align = MIN (align, field_align);
2469     }
2470   return align / BITS_PER_UNIT;
2471 }
2472 
2473 /* Vector types need to re-check the target flags each time we report
2474    the machine mode.  We need to do this because attribute target can
2475    change the result of vector_mode_supported_p and have_regs_of_mode
2476    on a per-function basis.  Thus the TYPE_MODE of a VECTOR_TYPE can
2477    change on a per-function basis.  */
2478 /* ??? Possibly a better solution is to run through all the types
2479    referenced by a function and re-compute the TYPE_MODE once, rather
2480    than make the TYPE_MODE macro call a function.  */
2481 
2482 machine_mode
2483 vector_type_mode (const_tree t)
2484 {
2485   machine_mode mode;
2486 
2487   gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2488 
2489   mode = t->type_common.mode;
2490   if (VECTOR_MODE_P (mode)
2491       && (!targetm.vector_mode_supported_p (mode)
2492 	  || !have_regs_of_mode[mode]))
2493     {
2494       machine_mode innermode = TREE_TYPE (t)->type_common.mode;
2495 
2496       /* For integers, try mapping it to a same-sized scalar mode.  */
2497       if (GET_MODE_CLASS (innermode) == MODE_INT)
2498 	{
2499 	  mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2500 				* GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2501 
2502 	  if (mode != VOIDmode && have_regs_of_mode[mode])
2503 	    return mode;
2504 	}
2505 
2506       return BLKmode;
2507     }
2508 
2509   return mode;
2510 }
2511 
2512 /* Create and return a type for signed integers of PRECISION bits.  */
2513 
2514 tree
2515 make_signed_type (int precision)
2516 {
2517   tree type = make_node (INTEGER_TYPE);
2518 
2519   TYPE_PRECISION (type) = precision;
2520 
2521   fixup_signed_type (type);
2522   return type;
2523 }
2524 
2525 /* Create and return a type for unsigned integers of PRECISION bits.  */
2526 
2527 tree
2528 make_unsigned_type (int precision)
2529 {
2530   tree type = make_node (INTEGER_TYPE);
2531 
2532   TYPE_PRECISION (type) = precision;
2533 
2534   fixup_unsigned_type (type);
2535   return type;
2536 }
2537 
2538 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2539    and SATP.  */
2540 
2541 tree
2542 make_fract_type (int precision, int unsignedp, int satp)
2543 {
2544   tree type = make_node (FIXED_POINT_TYPE);
2545 
2546   TYPE_PRECISION (type) = precision;
2547 
2548   if (satp)
2549     TYPE_SATURATING (type) = 1;
2550 
2551   /* Lay out the type: set its alignment, size, etc.  */
2552   if (unsignedp)
2553     {
2554       TYPE_UNSIGNED (type) = 1;
2555       SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2556     }
2557   else
2558     SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2559   layout_type (type);
2560 
2561   return type;
2562 }
2563 
2564 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2565    and SATP.  */
2566 
2567 tree
2568 make_accum_type (int precision, int unsignedp, int satp)
2569 {
2570   tree type = make_node (FIXED_POINT_TYPE);
2571 
2572   TYPE_PRECISION (type) = precision;
2573 
2574   if (satp)
2575     TYPE_SATURATING (type) = 1;
2576 
2577   /* Lay out the type: set its alignment, size, etc.  */
2578   if (unsignedp)
2579     {
2580       TYPE_UNSIGNED (type) = 1;
2581       SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2582     }
2583   else
2584     SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2585   layout_type (type);
2586 
2587   return type;
2588 }
2589 
2590 /* Initialize sizetypes so layout_type can use them.  */
2591 
2592 void
2593 initialize_sizetypes (void)
2594 {
2595   int precision, bprecision;
2596 
2597   /* Get sizetypes precision from the SIZE_TYPE target macro.  */
2598   if (strcmp (SIZETYPE, "unsigned int") == 0)
2599     precision = INT_TYPE_SIZE;
2600   else if (strcmp (SIZETYPE, "long unsigned int") == 0)
2601     precision = LONG_TYPE_SIZE;
2602   else if (strcmp (SIZETYPE, "long long unsigned int") == 0)
2603     precision = LONG_LONG_TYPE_SIZE;
2604   else if (strcmp (SIZETYPE, "short unsigned int") == 0)
2605     precision = SHORT_TYPE_SIZE;
2606   else
2607     {
2608       int i;
2609 
2610       precision = -1;
2611       for (i = 0; i < NUM_INT_N_ENTS; i++)
2612 	if (int_n_enabled_p[i])
2613 	  {
2614 	    char name[50];
2615 	    sprintf (name, "__int%d unsigned", int_n_data[i].bitsize);
2616 
2617 	    if (strcmp (name, SIZETYPE) == 0)
2618 	      {
2619 		precision = int_n_data[i].bitsize;
2620 	      }
2621 	  }
2622       if (precision == -1)
2623 	gcc_unreachable ();
2624     }
2625 
2626   bprecision
2627     = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2628   bprecision
2629     = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
2630   if (bprecision > HOST_BITS_PER_DOUBLE_INT)
2631     bprecision = HOST_BITS_PER_DOUBLE_INT;
2632 
2633   /* Create stubs for sizetype and bitsizetype so we can create constants.  */
2634   sizetype = make_node (INTEGER_TYPE);
2635   TYPE_NAME (sizetype) = get_identifier ("sizetype");
2636   TYPE_PRECISION (sizetype) = precision;
2637   TYPE_UNSIGNED (sizetype) = 1;
2638   bitsizetype = make_node (INTEGER_TYPE);
2639   TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2640   TYPE_PRECISION (bitsizetype) = bprecision;
2641   TYPE_UNSIGNED (bitsizetype) = 1;
2642 
2643   /* Now layout both types manually.  */
2644   SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
2645   TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
2646   TYPE_SIZE (sizetype) = bitsize_int (precision);
2647   TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
2648   set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
2649 
2650   SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
2651   TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
2652   TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2653   TYPE_SIZE_UNIT (bitsizetype)
2654     = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
2655   set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
2656 
2657   /* Create the signed variants of *sizetype.  */
2658   ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2659   TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2660   sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2661   TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2662 }
2663 
2664 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2665    or BOOLEAN_TYPE.  Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2666    for TYPE, based on the PRECISION and whether or not the TYPE
2667    IS_UNSIGNED.  PRECISION need not correspond to a width supported
2668    natively by the hardware; for example, on a machine with 8-bit,
2669    16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2670    61.  */
2671 
2672 void
2673 set_min_and_max_values_for_integral_type (tree type,
2674 					  int precision,
2675 					  signop sgn)
2676 {
2677   /* For bitfields with zero width we end up creating integer types
2678      with zero precision.  Don't assign any minimum/maximum values
2679      to those types, they don't have any valid value.  */
2680   if (precision < 1)
2681     return;
2682 
2683   TYPE_MIN_VALUE (type)
2684     = wide_int_to_tree (type, wi::min_value (precision, sgn));
2685   TYPE_MAX_VALUE (type)
2686     = wide_int_to_tree (type, wi::max_value (precision, sgn));
2687 }
2688 
2689 /* Set the extreme values of TYPE based on its precision in bits,
2690    then lay it out.  Used when make_signed_type won't do
2691    because the tree code is not INTEGER_TYPE.
2692    E.g. for Pascal, when the -fsigned-char option is given.  */
2693 
2694 void
2695 fixup_signed_type (tree type)
2696 {
2697   int precision = TYPE_PRECISION (type);
2698 
2699   set_min_and_max_values_for_integral_type (type, precision, SIGNED);
2700 
2701   /* Lay out the type: set its alignment, size, etc.  */
2702   layout_type (type);
2703 }
2704 
2705 /* Set the extreme values of TYPE based on its precision in bits,
2706    then lay it out.  This is used both in `make_unsigned_type'
2707    and for enumeral types.  */
2708 
2709 void
2710 fixup_unsigned_type (tree type)
2711 {
2712   int precision = TYPE_PRECISION (type);
2713 
2714   TYPE_UNSIGNED (type) = 1;
2715 
2716   set_min_and_max_values_for_integral_type (type, precision, UNSIGNED);
2717 
2718   /* Lay out the type: set its alignment, size, etc.  */
2719   layout_type (type);
2720 }
2721 
2722 /* Construct an iterator for a bitfield that spans BITSIZE bits,
2723    starting at BITPOS.
2724 
2725    BITREGION_START is the bit position of the first bit in this
2726    sequence of bit fields.  BITREGION_END is the last bit in this
2727    sequence.  If these two fields are non-zero, we should restrict the
2728    memory access to that range.  Otherwise, we are allowed to touch
2729    any adjacent non bit-fields.
2730 
2731    ALIGN is the alignment of the underlying object in bits.
2732    VOLATILEP says whether the bitfield is volatile.  */
2733 
2734 bit_field_mode_iterator
2735 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
2736 			   HOST_WIDE_INT bitregion_start,
2737 			   HOST_WIDE_INT bitregion_end,
2738 			   unsigned int align, bool volatilep)
2739 : m_mode (GET_CLASS_NARROWEST_MODE (MODE_INT)), m_bitsize (bitsize),
2740   m_bitpos (bitpos), m_bitregion_start (bitregion_start),
2741   m_bitregion_end (bitregion_end), m_align (align),
2742   m_volatilep (volatilep), m_count (0)
2743 {
2744   if (!m_bitregion_end)
2745     {
2746       /* We can assume that any aligned chunk of ALIGN bits that overlaps
2747 	 the bitfield is mapped and won't trap, provided that ALIGN isn't
2748 	 too large.  The cap is the biggest required alignment for data,
2749 	 or at least the word size.  And force one such chunk at least.  */
2750       unsigned HOST_WIDE_INT units
2751 	= MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
2752       if (bitsize <= 0)
2753 	bitsize = 1;
2754       m_bitregion_end = bitpos + bitsize + units - 1;
2755       m_bitregion_end -= m_bitregion_end % units + 1;
2756     }
2757 }
2758 
2759 /* Calls to this function return successively larger modes that can be used
2760    to represent the bitfield.  Return true if another bitfield mode is
2761    available, storing it in *OUT_MODE if so.  */
2762 
2763 bool
2764 bit_field_mode_iterator::next_mode (machine_mode *out_mode)
2765 {
2766   for (; m_mode != VOIDmode; m_mode = GET_MODE_WIDER_MODE (m_mode))
2767     {
2768       unsigned int unit = GET_MODE_BITSIZE (m_mode);
2769 
2770       /* Skip modes that don't have full precision.  */
2771       if (unit != GET_MODE_PRECISION (m_mode))
2772 	continue;
2773 
2774       /* Stop if the mode is too wide to handle efficiently.  */
2775       if (unit > MAX_FIXED_MODE_SIZE)
2776 	break;
2777 
2778       /* Don't deliver more than one multiword mode; the smallest one
2779 	 should be used.  */
2780       if (m_count > 0 && unit > BITS_PER_WORD)
2781 	break;
2782 
2783       /* Skip modes that are too small.  */
2784       unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit;
2785       unsigned HOST_WIDE_INT subend = substart + m_bitsize;
2786       if (subend > unit)
2787 	continue;
2788 
2789       /* Stop if the mode goes outside the bitregion.  */
2790       HOST_WIDE_INT start = m_bitpos - substart;
2791       if (m_bitregion_start && start < m_bitregion_start)
2792 	break;
2793       HOST_WIDE_INT end = start + unit;
2794       if (end > m_bitregion_end + 1)
2795 	break;
2796 
2797       /* Stop if the mode requires too much alignment.  */
2798       if (GET_MODE_ALIGNMENT (m_mode) > m_align
2799 	  && SLOW_UNALIGNED_ACCESS (m_mode, m_align))
2800 	break;
2801 
2802       *out_mode = m_mode;
2803       m_mode = GET_MODE_WIDER_MODE (m_mode);
2804       m_count++;
2805       return true;
2806     }
2807   return false;
2808 }
2809 
2810 /* Return true if smaller modes are generally preferred for this kind
2811    of bitfield.  */
2812 
2813 bool
2814 bit_field_mode_iterator::prefer_smaller_modes ()
2815 {
2816   return (m_volatilep
2817 	  ? targetm.narrow_volatile_bitfield ()
2818 	  : !SLOW_BYTE_ACCESS);
2819 }
2820 
2821 /* Find the best machine mode to use when referencing a bit field of length
2822    BITSIZE bits starting at BITPOS.
2823 
2824    BITREGION_START is the bit position of the first bit in this
2825    sequence of bit fields.  BITREGION_END is the last bit in this
2826    sequence.  If these two fields are non-zero, we should restrict the
2827    memory access to that range.  Otherwise, we are allowed to touch
2828    any adjacent non bit-fields.
2829 
2830    The underlying object is known to be aligned to a boundary of ALIGN bits.
2831    If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2832    larger than LARGEST_MODE (usually SImode).
2833 
2834    If no mode meets all these conditions, we return VOIDmode.
2835 
2836    If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2837    smallest mode meeting these conditions.
2838 
2839    If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2840    largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2841    all the conditions.
2842 
2843    If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2844    decide which of the above modes should be used.  */
2845 
2846 machine_mode
2847 get_best_mode (int bitsize, int bitpos,
2848 	       unsigned HOST_WIDE_INT bitregion_start,
2849 	       unsigned HOST_WIDE_INT bitregion_end,
2850 	       unsigned int align,
2851 	       machine_mode largest_mode, bool volatilep)
2852 {
2853   bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
2854 				bitregion_end, align, volatilep);
2855   machine_mode widest_mode = VOIDmode;
2856   machine_mode mode;
2857   while (iter.next_mode (&mode)
2858 	 /* ??? For historical reasons, reject modes that would normally
2859 	    receive greater alignment, even if unaligned accesses are
2860 	    acceptable.  This has both advantages and disadvantages.
2861 	    Removing this check means that something like:
2862 
2863 	       struct s { unsigned int x; unsigned int y; };
2864 	       int f (struct s *s) { return s->x == 0 && s->y == 0; }
2865 
2866 	    can be implemented using a single load and compare on
2867 	    64-bit machines that have no alignment restrictions.
2868 	    For example, on powerpc64-linux-gnu, we would generate:
2869 
2870 		    ld 3,0(3)
2871 		    cntlzd 3,3
2872 		    srdi 3,3,6
2873 		    blr
2874 
2875 	    rather than:
2876 
2877 		    lwz 9,0(3)
2878 		    cmpwi 7,9,0
2879 		    bne 7,.L3
2880 		    lwz 3,4(3)
2881 		    cntlzw 3,3
2882 		    srwi 3,3,5
2883 		    extsw 3,3
2884 		    blr
2885 		    .p2align 4,,15
2886 	    .L3:
2887 		    li 3,0
2888 		    blr
2889 
2890 	    However, accessing more than one field can make life harder
2891 	    for the gimple optimizers.  For example, gcc.dg/vect/bb-slp-5.c
2892 	    has a series of unsigned short copies followed by a series of
2893 	    unsigned short comparisons.  With this check, both the copies
2894 	    and comparisons remain 16-bit accesses and FRE is able
2895 	    to eliminate the latter.  Without the check, the comparisons
2896 	    can be done using 2 64-bit operations, which FRE isn't able
2897 	    to handle in the same way.
2898 
2899 	    Either way, it would probably be worth disabling this check
2900 	    during expand.  One particular example where removing the
2901 	    check would help is the get_best_mode call in store_bit_field.
2902 	    If we are given a memory bitregion of 128 bits that is aligned
2903 	    to a 64-bit boundary, and the bitfield we want to modify is
2904 	    in the second half of the bitregion, this check causes
2905 	    store_bitfield to turn the memory into a 64-bit reference
2906 	    to the _first_ half of the region.  We later use
2907 	    adjust_bitfield_address to get a reference to the correct half,
2908 	    but doing so looks to adjust_bitfield_address as though we are
2909 	    moving past the end of the original object, so it drops the
2910 	    associated MEM_EXPR and MEM_OFFSET.  Removing the check
2911 	    causes store_bit_field to keep a 128-bit memory reference,
2912 	    so that the final bitfield reference still has a MEM_EXPR
2913 	    and MEM_OFFSET.  */
2914 	 && GET_MODE_ALIGNMENT (mode) <= align
2915 	 && (largest_mode == VOIDmode
2916 	     || GET_MODE_SIZE (mode) <= GET_MODE_SIZE (largest_mode)))
2917     {
2918       widest_mode = mode;
2919       if (iter.prefer_smaller_modes ())
2920 	break;
2921     }
2922   return widest_mode;
2923 }
2924 
2925 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2926    SIGN).  The returned constants are made to be usable in TARGET_MODE.  */
2927 
2928 void
2929 get_mode_bounds (machine_mode mode, int sign,
2930 		 machine_mode target_mode,
2931 		 rtx *mmin, rtx *mmax)
2932 {
2933   unsigned size = GET_MODE_PRECISION (mode);
2934   unsigned HOST_WIDE_INT min_val, max_val;
2935 
2936   gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2937 
2938   /* Special case BImode, which has values 0 and STORE_FLAG_VALUE.  */
2939   if (mode == BImode)
2940     {
2941       if (STORE_FLAG_VALUE < 0)
2942 	{
2943 	  min_val = STORE_FLAG_VALUE;
2944 	  max_val = 0;
2945 	}
2946       else
2947 	{
2948 	  min_val = 0;
2949 	  max_val = STORE_FLAG_VALUE;
2950 	}
2951     }
2952   else if (sign)
2953     {
2954       min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2955       max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2956     }
2957   else
2958     {
2959       min_val = 0;
2960       max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2961     }
2962 
2963   *mmin = gen_int_mode (min_val, target_mode);
2964   *mmax = gen_int_mode (max_val, target_mode);
2965 }
2966 
2967 #include "gt-stor-layout.h"
2968