1 /* C-compiler utilities for types and variables storage layout 2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998, 3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 4 Free Software Foundation, Inc. 5 6 This file is part of GCC. 7 8 GCC is free software; you can redistribute it and/or modify it under 9 the terms of the GNU General Public License as published by the Free 10 Software Foundation; either version 3, or (at your option) any later 11 version. 12 13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 14 WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with GCC; see the file COPYING3. If not see 20 <http://www.gnu.org/licenses/>. */ 21 22 23 #include "config.h" 24 #include "system.h" 25 #include "coretypes.h" 26 #include "tm.h" 27 #include "tree.h" 28 #include "rtl.h" 29 #include "tm_p.h" 30 #include "flags.h" 31 #include "function.h" 32 #include "expr.h" 33 #include "output.h" 34 #include "toplev.h" 35 #include "ggc.h" 36 #include "target.h" 37 #include "langhooks.h" 38 #include "regs.h" 39 #include "params.h" 40 #include "cgraph.h" 41 #include "tree-inline.h" 42 #include "tree-dump.h" 43 #include "gimple.h" 44 45 /* Data type for the expressions representing sizes of data types. 46 It is the first integer type laid out. */ 47 tree sizetype_tab[(int) TYPE_KIND_LAST]; 48 49 /* If nonzero, this is an upper limit on alignment of structure fields. 50 The value is measured in bits. */ 51 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT; 52 /* ... and its original value in bytes, specified via -fpack-struct=<value>. */ 53 unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT; 54 55 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated 56 in the address spaces' address_mode, not pointer_mode. Set only by 57 internal_reference_types called only by a front end. */ 58 static int reference_types_internal = 0; 59 60 static tree self_referential_size (tree); 61 static void finalize_record_size (record_layout_info); 62 static void finalize_type_size (tree); 63 static void place_union_field (record_layout_info, tree); 64 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED) 65 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, 66 HOST_WIDE_INT, tree); 67 #endif 68 extern void debug_rli (record_layout_info); 69 70 /* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */ 71 72 static GTY(()) tree pending_sizes; 73 74 /* Show that REFERENCE_TYPES are internal and should use address_mode. 75 Called only by front end. */ 76 77 void 78 internal_reference_types (void) 79 { 80 reference_types_internal = 1; 81 } 82 83 /* Get a list of all the objects put on the pending sizes list. */ 84 85 tree 86 get_pending_sizes (void) 87 { 88 tree chain = pending_sizes; 89 90 pending_sizes = 0; 91 return chain; 92 } 93 94 /* Add EXPR to the pending sizes list. */ 95 96 void 97 put_pending_size (tree expr) 98 { 99 /* Strip any simple arithmetic from EXPR to see if it has an underlying 100 SAVE_EXPR. */ 101 expr = skip_simple_arithmetic (expr); 102 103 if (TREE_CODE (expr) == SAVE_EXPR) 104 pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes); 105 } 106 107 /* Put a chain of objects into the pending sizes list, which must be 108 empty. */ 109 110 void 111 put_pending_sizes (tree chain) 112 { 113 gcc_assert (!pending_sizes); 114 pending_sizes = chain; 115 } 116 117 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR 118 to serve as the actual size-expression for a type or decl. */ 119 120 tree 121 variable_size (tree size) 122 { 123 tree save; 124 125 /* Obviously. */ 126 if (TREE_CONSTANT (size)) 127 return size; 128 129 /* If the size is self-referential, we can't make a SAVE_EXPR (see 130 save_expr for the rationale). But we can do something else. */ 131 if (CONTAINS_PLACEHOLDER_P (size)) 132 return self_referential_size (size); 133 134 /* If the language-processor is to take responsibility for variable-sized 135 items (e.g., languages which have elaboration procedures like Ada), 136 just return SIZE unchanged. */ 137 if (lang_hooks.decls.global_bindings_p () < 0) 138 return size; 139 140 size = save_expr (size); 141 142 /* If an array with a variable number of elements is declared, and 143 the elements require destruction, we will emit a cleanup for the 144 array. That cleanup is run both on normal exit from the block 145 and in the exception-handler for the block. Normally, when code 146 is used in both ordinary code and in an exception handler it is 147 `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do 148 not wish to do that here; the array-size is the same in both 149 places. */ 150 save = skip_simple_arithmetic (size); 151 152 if (cfun && cfun->dont_save_pending_sizes_p) 153 /* The front-end doesn't want us to keep a list of the expressions 154 that determine sizes for variable size objects. Trust it. */ 155 return size; 156 157 if (lang_hooks.decls.global_bindings_p ()) 158 { 159 if (TREE_CONSTANT (size)) 160 error ("type size can%'t be explicitly evaluated"); 161 else 162 error ("variable-size type declared outside of any function"); 163 164 return size_one_node; 165 } 166 167 put_pending_size (save); 168 169 return size; 170 } 171 172 /* An array of functions used for self-referential size computation. */ 173 static GTY(()) VEC (tree, gc) *size_functions; 174 175 /* Look inside EXPR into simple arithmetic operations involving constants. 176 Return the outermost non-arithmetic or non-constant node. */ 177 178 static tree 179 skip_simple_constant_arithmetic (tree expr) 180 { 181 while (true) 182 { 183 if (UNARY_CLASS_P (expr)) 184 expr = TREE_OPERAND (expr, 0); 185 else if (BINARY_CLASS_P (expr)) 186 { 187 if (TREE_CONSTANT (TREE_OPERAND (expr, 1))) 188 expr = TREE_OPERAND (expr, 0); 189 else if (TREE_CONSTANT (TREE_OPERAND (expr, 0))) 190 expr = TREE_OPERAND (expr, 1); 191 else 192 break; 193 } 194 else 195 break; 196 } 197 198 return expr; 199 } 200 201 /* Similar to copy_tree_r but do not copy component references involving 202 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr 203 and substituted in substitute_in_expr. */ 204 205 static tree 206 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data) 207 { 208 enum tree_code code = TREE_CODE (*tp); 209 210 /* Stop at types, decls, constants like copy_tree_r. */ 211 if (TREE_CODE_CLASS (code) == tcc_type 212 || TREE_CODE_CLASS (code) == tcc_declaration 213 || TREE_CODE_CLASS (code) == tcc_constant) 214 { 215 *walk_subtrees = 0; 216 return NULL_TREE; 217 } 218 219 /* This is the pattern built in ada/make_aligning_type. */ 220 else if (code == ADDR_EXPR 221 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR) 222 { 223 *walk_subtrees = 0; 224 return NULL_TREE; 225 } 226 227 /* Default case: the component reference. */ 228 else if (code == COMPONENT_REF) 229 { 230 tree inner; 231 for (inner = TREE_OPERAND (*tp, 0); 232 REFERENCE_CLASS_P (inner); 233 inner = TREE_OPERAND (inner, 0)) 234 ; 235 236 if (TREE_CODE (inner) == PLACEHOLDER_EXPR) 237 { 238 *walk_subtrees = 0; 239 return NULL_TREE; 240 } 241 } 242 243 /* We're not supposed to have them in self-referential size trees 244 because we wouldn't properly control when they are evaluated. 245 However, not creating superfluous SAVE_EXPRs requires accurate 246 tracking of readonly-ness all the way down to here, which we 247 cannot always guarantee in practice. So punt in this case. */ 248 else if (code == SAVE_EXPR) 249 return error_mark_node; 250 251 return copy_tree_r (tp, walk_subtrees, data); 252 } 253 254 /* Given a SIZE expression that is self-referential, return an equivalent 255 expression to serve as the actual size expression for a type. */ 256 257 static tree 258 self_referential_size (tree size) 259 { 260 static unsigned HOST_WIDE_INT fnno = 0; 261 VEC (tree, heap) *self_refs = NULL; 262 tree param_type_list = NULL, param_decl_list = NULL, arg_list = NULL; 263 tree t, ref, return_type, fntype, fnname, fndecl; 264 unsigned int i; 265 char buf[128]; 266 267 /* Do not factor out simple operations. */ 268 t = skip_simple_constant_arithmetic (size); 269 if (TREE_CODE (t) == CALL_EXPR) 270 return size; 271 272 /* Collect the list of self-references in the expression. */ 273 find_placeholder_in_expr (size, &self_refs); 274 gcc_assert (VEC_length (tree, self_refs) > 0); 275 276 /* Obtain a private copy of the expression. */ 277 t = size; 278 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE) 279 return size; 280 size = t; 281 282 /* Build the parameter and argument lists in parallel; also 283 substitute the former for the latter in the expression. */ 284 for (i = 0; VEC_iterate (tree, self_refs, i, ref); i++) 285 { 286 tree subst, param_name, param_type, param_decl; 287 288 if (DECL_P (ref)) 289 { 290 /* We shouldn't have true variables here. */ 291 gcc_assert (TREE_READONLY (ref)); 292 subst = ref; 293 } 294 /* This is the pattern built in ada/make_aligning_type. */ 295 else if (TREE_CODE (ref) == ADDR_EXPR) 296 subst = ref; 297 /* Default case: the component reference. */ 298 else 299 subst = TREE_OPERAND (ref, 1); 300 301 sprintf (buf, "p%d", i); 302 param_name = get_identifier (buf); 303 param_type = TREE_TYPE (ref); 304 param_decl 305 = build_decl (input_location, PARM_DECL, param_name, param_type); 306 if (targetm.calls.promote_prototypes (NULL_TREE) 307 && INTEGRAL_TYPE_P (param_type) 308 && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node)) 309 DECL_ARG_TYPE (param_decl) = integer_type_node; 310 else 311 DECL_ARG_TYPE (param_decl) = param_type; 312 DECL_ARTIFICIAL (param_decl) = 1; 313 TREE_READONLY (param_decl) = 1; 314 315 size = substitute_in_expr (size, subst, param_decl); 316 317 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list); 318 param_decl_list = chainon (param_decl, param_decl_list); 319 arg_list = tree_cons (NULL_TREE, ref, arg_list); 320 } 321 322 VEC_free (tree, heap, self_refs); 323 324 /* Append 'void' to indicate that the number of parameters is fixed. */ 325 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list); 326 327 /* The 3 lists have been created in reverse order. */ 328 param_type_list = nreverse (param_type_list); 329 param_decl_list = nreverse (param_decl_list); 330 arg_list = nreverse (arg_list); 331 332 /* Build the function type. */ 333 return_type = TREE_TYPE (size); 334 fntype = build_function_type (return_type, param_type_list); 335 336 /* Build the function declaration. */ 337 sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++); 338 fnname = get_file_function_name (buf); 339 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype); 340 for (t = param_decl_list; t; t = TREE_CHAIN (t)) 341 DECL_CONTEXT (t) = fndecl; 342 DECL_ARGUMENTS (fndecl) = param_decl_list; 343 DECL_RESULT (fndecl) 344 = build_decl (input_location, RESULT_DECL, 0, return_type); 345 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; 346 347 /* The function has been created by the compiler and we don't 348 want to emit debug info for it. */ 349 DECL_ARTIFICIAL (fndecl) = 1; 350 DECL_IGNORED_P (fndecl) = 1; 351 352 /* It is supposed to be "const" and never throw. */ 353 TREE_READONLY (fndecl) = 1; 354 TREE_NOTHROW (fndecl) = 1; 355 356 /* We want it to be inlined when this is deemed profitable, as 357 well as discarded if every call has been integrated. */ 358 DECL_DECLARED_INLINE_P (fndecl) = 1; 359 360 /* It is made up of a unique return statement. */ 361 DECL_INITIAL (fndecl) = make_node (BLOCK); 362 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; 363 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size); 364 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t); 365 TREE_STATIC (fndecl) = 1; 366 367 /* Put it onto the list of size functions. */ 368 VEC_safe_push (tree, gc, size_functions, fndecl); 369 370 /* Replace the original expression with a call to the size function. */ 371 return build_function_call_expr (input_location, fndecl, arg_list); 372 } 373 374 /* Take, queue and compile all the size functions. It is essential that 375 the size functions be gimplified at the very end of the compilation 376 in order to guarantee transparent handling of self-referential sizes. 377 Otherwise the GENERIC inliner would not be able to inline them back 378 at each of their call sites, thus creating artificial non-constant 379 size expressions which would trigger nasty problems later on. */ 380 381 void 382 finalize_size_functions (void) 383 { 384 unsigned int i; 385 tree fndecl; 386 387 for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++) 388 { 389 dump_function (TDI_original, fndecl); 390 gimplify_function_tree (fndecl); 391 dump_function (TDI_generic, fndecl); 392 cgraph_finalize_function (fndecl, false); 393 } 394 395 VEC_free (tree, gc, size_functions); 396 } 397 398 #ifndef MAX_FIXED_MODE_SIZE 399 #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode) 400 #endif 401 402 /* Return the machine mode to use for a nonscalar of SIZE bits. The 403 mode must be in class MCLASS, and have exactly that many value bits; 404 it may have padding as well. If LIMIT is nonzero, modes of wider 405 than MAX_FIXED_MODE_SIZE will not be used. */ 406 407 enum machine_mode 408 mode_for_size (unsigned int size, enum mode_class mclass, int limit) 409 { 410 enum machine_mode mode; 411 412 if (limit && size > MAX_FIXED_MODE_SIZE) 413 return BLKmode; 414 415 /* Get the first mode which has this size, in the specified class. */ 416 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode; 417 mode = GET_MODE_WIDER_MODE (mode)) 418 if (GET_MODE_PRECISION (mode) == size) 419 return mode; 420 421 return BLKmode; 422 } 423 424 /* Similar, except passed a tree node. */ 425 426 enum machine_mode 427 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit) 428 { 429 unsigned HOST_WIDE_INT uhwi; 430 unsigned int ui; 431 432 if (!host_integerp (size, 1)) 433 return BLKmode; 434 uhwi = tree_low_cst (size, 1); 435 ui = uhwi; 436 if (uhwi != ui) 437 return BLKmode; 438 return mode_for_size (ui, mclass, limit); 439 } 440 441 /* Similar, but never return BLKmode; return the narrowest mode that 442 contains at least the requested number of value bits. */ 443 444 enum machine_mode 445 smallest_mode_for_size (unsigned int size, enum mode_class mclass) 446 { 447 enum machine_mode mode; 448 449 /* Get the first mode which has at least this size, in the 450 specified class. */ 451 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode; 452 mode = GET_MODE_WIDER_MODE (mode)) 453 if (GET_MODE_PRECISION (mode) >= size) 454 return mode; 455 456 gcc_unreachable (); 457 } 458 459 /* Find an integer mode of the exact same size, or BLKmode on failure. */ 460 461 enum machine_mode 462 int_mode_for_mode (enum machine_mode mode) 463 { 464 switch (GET_MODE_CLASS (mode)) 465 { 466 case MODE_INT: 467 case MODE_PARTIAL_INT: 468 break; 469 470 case MODE_COMPLEX_INT: 471 case MODE_COMPLEX_FLOAT: 472 case MODE_FLOAT: 473 case MODE_DECIMAL_FLOAT: 474 case MODE_VECTOR_INT: 475 case MODE_VECTOR_FLOAT: 476 case MODE_FRACT: 477 case MODE_ACCUM: 478 case MODE_UFRACT: 479 case MODE_UACCUM: 480 case MODE_VECTOR_FRACT: 481 case MODE_VECTOR_ACCUM: 482 case MODE_VECTOR_UFRACT: 483 case MODE_VECTOR_UACCUM: 484 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0); 485 break; 486 487 case MODE_RANDOM: 488 if (mode == BLKmode) 489 break; 490 491 /* ... fall through ... */ 492 493 case MODE_CC: 494 default: 495 gcc_unreachable (); 496 } 497 498 return mode; 499 } 500 501 /* Return the alignment of MODE. This will be bounded by 1 and 502 BIGGEST_ALIGNMENT. */ 503 504 unsigned int 505 get_mode_alignment (enum machine_mode mode) 506 { 507 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT)); 508 } 509 510 511 /* Subroutine of layout_decl: Force alignment required for the data type. 512 But if the decl itself wants greater alignment, don't override that. */ 513 514 static inline void 515 do_type_align (tree type, tree decl) 516 { 517 if (TYPE_ALIGN (type) > DECL_ALIGN (decl)) 518 { 519 DECL_ALIGN (decl) = TYPE_ALIGN (type); 520 if (TREE_CODE (decl) == FIELD_DECL) 521 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type); 522 } 523 } 524 525 /* Set the size, mode and alignment of a ..._DECL node. 526 TYPE_DECL does need this for C++. 527 Note that LABEL_DECL and CONST_DECL nodes do not need this, 528 and FUNCTION_DECL nodes have them set up in a special (and simple) way. 529 Don't call layout_decl for them. 530 531 KNOWN_ALIGN is the amount of alignment we can assume this 532 decl has with no special effort. It is relevant only for FIELD_DECLs 533 and depends on the previous fields. 534 All that matters about KNOWN_ALIGN is which powers of 2 divide it. 535 If KNOWN_ALIGN is 0, it means, "as much alignment as you like": 536 the record will be aligned to suit. */ 537 538 void 539 layout_decl (tree decl, unsigned int known_align) 540 { 541 tree type = TREE_TYPE (decl); 542 enum tree_code code = TREE_CODE (decl); 543 rtx rtl = NULL_RTX; 544 location_t loc = DECL_SOURCE_LOCATION (decl); 545 546 if (code == CONST_DECL) 547 return; 548 549 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL 550 || code == TYPE_DECL ||code == FIELD_DECL); 551 552 rtl = DECL_RTL_IF_SET (decl); 553 554 if (type == error_mark_node) 555 type = void_type_node; 556 557 /* Usually the size and mode come from the data type without change, 558 however, the front-end may set the explicit width of the field, so its 559 size may not be the same as the size of its type. This happens with 560 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it 561 also happens with other fields. For example, the C++ front-end creates 562 zero-sized fields corresponding to empty base classes, and depends on 563 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the 564 size in bytes from the size in bits. If we have already set the mode, 565 don't set it again since we can be called twice for FIELD_DECLs. */ 566 567 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type); 568 if (DECL_MODE (decl) == VOIDmode) 569 DECL_MODE (decl) = TYPE_MODE (type); 570 571 if (DECL_SIZE (decl) == 0) 572 { 573 DECL_SIZE (decl) = TYPE_SIZE (type); 574 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type); 575 } 576 else if (DECL_SIZE_UNIT (decl) == 0) 577 DECL_SIZE_UNIT (decl) 578 = fold_convert_loc (loc, sizetype, 579 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl), 580 bitsize_unit_node)); 581 582 if (code != FIELD_DECL) 583 /* For non-fields, update the alignment from the type. */ 584 do_type_align (type, decl); 585 else 586 /* For fields, it's a bit more complicated... */ 587 { 588 bool old_user_align = DECL_USER_ALIGN (decl); 589 bool zero_bitfield = false; 590 bool packed_p = DECL_PACKED (decl); 591 unsigned int mfa; 592 593 if (DECL_BIT_FIELD (decl)) 594 { 595 DECL_BIT_FIELD_TYPE (decl) = type; 596 597 /* A zero-length bit-field affects the alignment of the next 598 field. In essence such bit-fields are not influenced by 599 any packing due to #pragma pack or attribute packed. */ 600 if (integer_zerop (DECL_SIZE (decl)) 601 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl))) 602 { 603 zero_bitfield = true; 604 packed_p = false; 605 #ifdef PCC_BITFIELD_TYPE_MATTERS 606 if (PCC_BITFIELD_TYPE_MATTERS) 607 do_type_align (type, decl); 608 else 609 #endif 610 { 611 #ifdef EMPTY_FIELD_BOUNDARY 612 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl)) 613 { 614 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY; 615 DECL_USER_ALIGN (decl) = 0; 616 } 617 #endif 618 } 619 } 620 621 /* See if we can use an ordinary integer mode for a bit-field. 622 Conditions are: a fixed size that is correct for another mode 623 and occupying a complete byte or bytes on proper boundary. */ 624 if (TYPE_SIZE (type) != 0 625 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST 626 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT) 627 { 628 enum machine_mode xmode 629 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1); 630 unsigned int xalign = GET_MODE_ALIGNMENT (xmode); 631 632 if (xmode != BLKmode 633 && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl)) 634 && (known_align == 0 || known_align >= xalign)) 635 { 636 DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl)); 637 DECL_MODE (decl) = xmode; 638 DECL_BIT_FIELD (decl) = 0; 639 } 640 } 641 642 /* Turn off DECL_BIT_FIELD if we won't need it set. */ 643 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode 644 && known_align >= TYPE_ALIGN (type) 645 && DECL_ALIGN (decl) >= TYPE_ALIGN (type)) 646 DECL_BIT_FIELD (decl) = 0; 647 } 648 else if (packed_p && DECL_USER_ALIGN (decl)) 649 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and 650 round up; we'll reduce it again below. We want packing to 651 supersede USER_ALIGN inherited from the type, but defer to 652 alignment explicitly specified on the field decl. */; 653 else 654 do_type_align (type, decl); 655 656 /* If the field is packed and not explicitly aligned, give it the 657 minimum alignment. Note that do_type_align may set 658 DECL_USER_ALIGN, so we need to check old_user_align instead. */ 659 if (packed_p 660 && !old_user_align) 661 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT); 662 663 if (! packed_p && ! DECL_USER_ALIGN (decl)) 664 { 665 /* Some targets (i.e. i386, VMS) limit struct field alignment 666 to a lower boundary than alignment of variables unless 667 it was overridden by attribute aligned. */ 668 #ifdef BIGGEST_FIELD_ALIGNMENT 669 DECL_ALIGN (decl) 670 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT); 671 #endif 672 #ifdef ADJUST_FIELD_ALIGN 673 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl)); 674 #endif 675 } 676 677 if (zero_bitfield) 678 mfa = initial_max_fld_align * BITS_PER_UNIT; 679 else 680 mfa = maximum_field_alignment; 681 /* Should this be controlled by DECL_USER_ALIGN, too? */ 682 if (mfa != 0) 683 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa); 684 } 685 686 /* Evaluate nonconstant size only once, either now or as soon as safe. */ 687 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) 688 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl)); 689 if (DECL_SIZE_UNIT (decl) != 0 690 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST) 691 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl)); 692 693 /* If requested, warn about definitions of large data objects. */ 694 if (warn_larger_than 695 && (code == VAR_DECL || code == PARM_DECL) 696 && ! DECL_EXTERNAL (decl)) 697 { 698 tree size = DECL_SIZE_UNIT (decl); 699 700 if (size != 0 && TREE_CODE (size) == INTEGER_CST 701 && compare_tree_int (size, larger_than_size) > 0) 702 { 703 int size_as_int = TREE_INT_CST_LOW (size); 704 705 if (compare_tree_int (size, size_as_int) == 0) 706 warning (OPT_Wlarger_than_eq, "size of %q+D is %d bytes", decl, size_as_int); 707 else 708 warning (OPT_Wlarger_than_eq, "size of %q+D is larger than %wd bytes", 709 decl, larger_than_size); 710 } 711 } 712 713 /* If the RTL was already set, update its mode and mem attributes. */ 714 if (rtl) 715 { 716 PUT_MODE (rtl, DECL_MODE (decl)); 717 SET_DECL_RTL (decl, 0); 718 set_mem_attributes (rtl, decl, 1); 719 SET_DECL_RTL (decl, rtl); 720 } 721 } 722 723 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of 724 a previous call to layout_decl and calls it again. */ 725 726 void 727 relayout_decl (tree decl) 728 { 729 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0; 730 DECL_MODE (decl) = VOIDmode; 731 if (!DECL_USER_ALIGN (decl)) 732 DECL_ALIGN (decl) = 0; 733 SET_DECL_RTL (decl, 0); 734 735 layout_decl (decl, 0); 736 } 737 738 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or 739 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which 740 is to be passed to all other layout functions for this record. It is the 741 responsibility of the caller to call `free' for the storage returned. 742 Note that garbage collection is not permitted until we finish laying 743 out the record. */ 744 745 record_layout_info 746 start_record_layout (tree t) 747 { 748 record_layout_info rli = XNEW (struct record_layout_info_s); 749 750 rli->t = t; 751 752 /* If the type has a minimum specified alignment (via an attribute 753 declaration, for example) use it -- otherwise, start with a 754 one-byte alignment. */ 755 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t)); 756 rli->unpacked_align = rli->record_align; 757 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT); 758 759 #ifdef STRUCTURE_SIZE_BOUNDARY 760 /* Packed structures don't need to have minimum size. */ 761 if (! TYPE_PACKED (t)) 762 { 763 unsigned tmp; 764 765 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */ 766 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY; 767 if (maximum_field_alignment != 0) 768 tmp = MIN (tmp, maximum_field_alignment); 769 rli->record_align = MAX (rli->record_align, tmp); 770 } 771 #endif 772 773 rli->offset = size_zero_node; 774 rli->bitpos = bitsize_zero_node; 775 rli->prev_field = 0; 776 rli->pending_statics = 0; 777 rli->packed_maybe_necessary = 0; 778 rli->remaining_in_alignment = 0; 779 780 return rli; 781 } 782 783 /* These four routines perform computations that convert between 784 the offset/bitpos forms and byte and bit offsets. */ 785 786 tree 787 bit_from_pos (tree offset, tree bitpos) 788 { 789 return size_binop (PLUS_EXPR, bitpos, 790 size_binop (MULT_EXPR, 791 fold_convert (bitsizetype, offset), 792 bitsize_unit_node)); 793 } 794 795 tree 796 byte_from_pos (tree offset, tree bitpos) 797 { 798 return size_binop (PLUS_EXPR, offset, 799 fold_convert (sizetype, 800 size_binop (TRUNC_DIV_EXPR, bitpos, 801 bitsize_unit_node))); 802 } 803 804 void 805 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align, 806 tree pos) 807 { 808 *poffset = size_binop (MULT_EXPR, 809 fold_convert (sizetype, 810 size_binop (FLOOR_DIV_EXPR, pos, 811 bitsize_int (off_align))), 812 size_int (off_align / BITS_PER_UNIT)); 813 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align)); 814 } 815 816 /* Given a pointer to bit and byte offsets and an offset alignment, 817 normalize the offsets so they are within the alignment. */ 818 819 void 820 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align) 821 { 822 /* If the bit position is now larger than it should be, adjust it 823 downwards. */ 824 if (compare_tree_int (*pbitpos, off_align) >= 0) 825 { 826 tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos, 827 bitsize_int (off_align)); 828 829 *poffset 830 = size_binop (PLUS_EXPR, *poffset, 831 size_binop (MULT_EXPR, 832 fold_convert (sizetype, extra_aligns), 833 size_int (off_align / BITS_PER_UNIT))); 834 835 *pbitpos 836 = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align)); 837 } 838 } 839 840 /* Print debugging information about the information in RLI. */ 841 842 void 843 debug_rli (record_layout_info rli) 844 { 845 print_node_brief (stderr, "type", rli->t, 0); 846 print_node_brief (stderr, "\noffset", rli->offset, 0); 847 print_node_brief (stderr, " bitpos", rli->bitpos, 0); 848 849 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n", 850 rli->record_align, rli->unpacked_align, 851 rli->offset_align); 852 853 /* The ms_struct code is the only that uses this. */ 854 if (targetm.ms_bitfield_layout_p (rli->t)) 855 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment); 856 857 if (rli->packed_maybe_necessary) 858 fprintf (stderr, "packed may be necessary\n"); 859 860 if (rli->pending_statics) 861 { 862 fprintf (stderr, "pending statics:\n"); 863 debug_tree (rli->pending_statics); 864 } 865 } 866 867 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and 868 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */ 869 870 void 871 normalize_rli (record_layout_info rli) 872 { 873 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align); 874 } 875 876 /* Returns the size in bytes allocated so far. */ 877 878 tree 879 rli_size_unit_so_far (record_layout_info rli) 880 { 881 return byte_from_pos (rli->offset, rli->bitpos); 882 } 883 884 /* Returns the size in bits allocated so far. */ 885 886 tree 887 rli_size_so_far (record_layout_info rli) 888 { 889 return bit_from_pos (rli->offset, rli->bitpos); 890 } 891 892 /* FIELD is about to be added to RLI->T. The alignment (in bits) of 893 the next available location within the record is given by KNOWN_ALIGN. 894 Update the variable alignment fields in RLI, and return the alignment 895 to give the FIELD. */ 896 897 unsigned int 898 update_alignment_for_field (record_layout_info rli, tree field, 899 unsigned int known_align) 900 { 901 /* The alignment required for FIELD. */ 902 unsigned int desired_align; 903 /* The type of this field. */ 904 tree type = TREE_TYPE (field); 905 /* True if the field was explicitly aligned by the user. */ 906 bool user_align; 907 bool is_bitfield; 908 909 /* Do not attempt to align an ERROR_MARK node */ 910 if (TREE_CODE (type) == ERROR_MARK) 911 return 0; 912 913 /* Lay out the field so we know what alignment it needs. */ 914 layout_decl (field, known_align); 915 desired_align = DECL_ALIGN (field); 916 user_align = DECL_USER_ALIGN (field); 917 918 is_bitfield = (type != error_mark_node 919 && DECL_BIT_FIELD_TYPE (field) 920 && ! integer_zerop (TYPE_SIZE (type))); 921 922 /* Record must have at least as much alignment as any field. 923 Otherwise, the alignment of the field within the record is 924 meaningless. */ 925 if (targetm.ms_bitfield_layout_p (rli->t)) 926 { 927 /* Here, the alignment of the underlying type of a bitfield can 928 affect the alignment of a record; even a zero-sized field 929 can do this. The alignment should be to the alignment of 930 the type, except that for zero-size bitfields this only 931 applies if there was an immediately prior, nonzero-size 932 bitfield. (That's the way it is, experimentally.) */ 933 if ((!is_bitfield && !DECL_PACKED (field)) 934 || (!integer_zerop (DECL_SIZE (field)) 935 ? !DECL_PACKED (field) 936 : (rli->prev_field 937 && DECL_BIT_FIELD_TYPE (rli->prev_field) 938 && ! integer_zerop (DECL_SIZE (rli->prev_field))))) 939 { 940 unsigned int type_align = TYPE_ALIGN (type); 941 type_align = MAX (type_align, desired_align); 942 if (maximum_field_alignment != 0) 943 type_align = MIN (type_align, maximum_field_alignment); 944 rli->record_align = MAX (rli->record_align, type_align); 945 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); 946 } 947 } 948 #ifdef PCC_BITFIELD_TYPE_MATTERS 949 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS) 950 { 951 /* Named bit-fields cause the entire structure to have the 952 alignment implied by their type. Some targets also apply the same 953 rules to unnamed bitfields. */ 954 if (DECL_NAME (field) != 0 955 || targetm.align_anon_bitfield ()) 956 { 957 unsigned int type_align = TYPE_ALIGN (type); 958 959 #ifdef ADJUST_FIELD_ALIGN 960 if (! TYPE_USER_ALIGN (type)) 961 type_align = ADJUST_FIELD_ALIGN (field, type_align); 962 #endif 963 964 /* Targets might chose to handle unnamed and hence possibly 965 zero-width bitfield. Those are not influenced by #pragmas 966 or packed attributes. */ 967 if (integer_zerop (DECL_SIZE (field))) 968 { 969 if (initial_max_fld_align) 970 type_align = MIN (type_align, 971 initial_max_fld_align * BITS_PER_UNIT); 972 } 973 else if (maximum_field_alignment != 0) 974 type_align = MIN (type_align, maximum_field_alignment); 975 else if (DECL_PACKED (field)) 976 type_align = MIN (type_align, BITS_PER_UNIT); 977 978 /* The alignment of the record is increased to the maximum 979 of the current alignment, the alignment indicated on the 980 field (i.e., the alignment specified by an __aligned__ 981 attribute), and the alignment indicated by the type of 982 the field. */ 983 rli->record_align = MAX (rli->record_align, desired_align); 984 rli->record_align = MAX (rli->record_align, type_align); 985 986 if (warn_packed) 987 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); 988 user_align |= TYPE_USER_ALIGN (type); 989 } 990 } 991 #endif 992 else 993 { 994 rli->record_align = MAX (rli->record_align, desired_align); 995 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); 996 } 997 998 TYPE_USER_ALIGN (rli->t) |= user_align; 999 1000 return desired_align; 1001 } 1002 1003 /* Called from place_field to handle unions. */ 1004 1005 static void 1006 place_union_field (record_layout_info rli, tree field) 1007 { 1008 update_alignment_for_field (rli, field, /*known_align=*/0); 1009 1010 DECL_FIELD_OFFSET (field) = size_zero_node; 1011 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node; 1012 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT); 1013 1014 /* If this is an ERROR_MARK return *after* having set the 1015 field at the start of the union. This helps when parsing 1016 invalid fields. */ 1017 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK) 1018 return; 1019 1020 /* We assume the union's size will be a multiple of a byte so we don't 1021 bother with BITPOS. */ 1022 if (TREE_CODE (rli->t) == UNION_TYPE) 1023 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field)); 1024 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE) 1025 rli->offset = fold_build3_loc (input_location, COND_EXPR, sizetype, 1026 DECL_QUALIFIER (field), 1027 DECL_SIZE_UNIT (field), rli->offset); 1028 } 1029 1030 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED) 1031 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated 1032 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more 1033 units of alignment than the underlying TYPE. */ 1034 static int 1035 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset, 1036 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type) 1037 { 1038 /* Note that the calculation of OFFSET might overflow; we calculate it so 1039 that we still get the right result as long as ALIGN is a power of two. */ 1040 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset; 1041 1042 offset = offset % align; 1043 return ((offset + size + align - 1) / align 1044 > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1) 1045 / align)); 1046 } 1047 #endif 1048 1049 /* RLI contains information about the layout of a RECORD_TYPE. FIELD 1050 is a FIELD_DECL to be added after those fields already present in 1051 T. (FIELD is not actually added to the TYPE_FIELDS list here; 1052 callers that desire that behavior must manually perform that step.) */ 1053 1054 void 1055 place_field (record_layout_info rli, tree field) 1056 { 1057 /* The alignment required for FIELD. */ 1058 unsigned int desired_align; 1059 /* The alignment FIELD would have if we just dropped it into the 1060 record as it presently stands. */ 1061 unsigned int known_align; 1062 unsigned int actual_align; 1063 /* The type of this field. */ 1064 tree type = TREE_TYPE (field); 1065 1066 gcc_assert (TREE_CODE (field) != ERROR_MARK); 1067 1068 /* If FIELD is static, then treat it like a separate variable, not 1069 really like a structure field. If it is a FUNCTION_DECL, it's a 1070 method. In both cases, all we do is lay out the decl, and we do 1071 it *after* the record is laid out. */ 1072 if (TREE_CODE (field) == VAR_DECL) 1073 { 1074 rli->pending_statics = tree_cons (NULL_TREE, field, 1075 rli->pending_statics); 1076 return; 1077 } 1078 1079 /* Enumerators and enum types which are local to this class need not 1080 be laid out. Likewise for initialized constant fields. */ 1081 else if (TREE_CODE (field) != FIELD_DECL) 1082 return; 1083 1084 /* Unions are laid out very differently than records, so split 1085 that code off to another function. */ 1086 else if (TREE_CODE (rli->t) != RECORD_TYPE) 1087 { 1088 place_union_field (rli, field); 1089 return; 1090 } 1091 1092 else if (TREE_CODE (type) == ERROR_MARK) 1093 { 1094 /* Place this field at the current allocation position, so we 1095 maintain monotonicity. */ 1096 DECL_FIELD_OFFSET (field) = rli->offset; 1097 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; 1098 SET_DECL_OFFSET_ALIGN (field, rli->offset_align); 1099 return; 1100 } 1101 1102 /* Work out the known alignment so far. Note that A & (-A) is the 1103 value of the least-significant bit in A that is one. */ 1104 if (! integer_zerop (rli->bitpos)) 1105 known_align = (tree_low_cst (rli->bitpos, 1) 1106 & - tree_low_cst (rli->bitpos, 1)); 1107 else if (integer_zerop (rli->offset)) 1108 known_align = 0; 1109 else if (host_integerp (rli->offset, 1)) 1110 known_align = (BITS_PER_UNIT 1111 * (tree_low_cst (rli->offset, 1) 1112 & - tree_low_cst (rli->offset, 1))); 1113 else 1114 known_align = rli->offset_align; 1115 1116 desired_align = update_alignment_for_field (rli, field, known_align); 1117 if (known_align == 0) 1118 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); 1119 1120 if (warn_packed && DECL_PACKED (field)) 1121 { 1122 if (known_align >= TYPE_ALIGN (type)) 1123 { 1124 if (TYPE_ALIGN (type) > desired_align) 1125 { 1126 if (STRICT_ALIGNMENT) 1127 warning (OPT_Wattributes, "packed attribute causes " 1128 "inefficient alignment for %q+D", field); 1129 /* Don't warn if DECL_PACKED was set by the type. */ 1130 else if (!TYPE_PACKED (rli->t)) 1131 warning (OPT_Wattributes, "packed attribute is " 1132 "unnecessary for %q+D", field); 1133 } 1134 } 1135 else 1136 rli->packed_maybe_necessary = 1; 1137 } 1138 1139 /* Does this field automatically have alignment it needs by virtue 1140 of the fields that precede it and the record's own alignment? 1141 We already align ms_struct fields, so don't re-align them. */ 1142 if (known_align < desired_align 1143 && !targetm.ms_bitfield_layout_p (rli->t)) 1144 { 1145 /* No, we need to skip space before this field. 1146 Bump the cumulative size to multiple of field alignment. */ 1147 1148 if (DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION) 1149 warning (OPT_Wpadded, "padding struct to align %q+D", field); 1150 1151 /* If the alignment is still within offset_align, just align 1152 the bit position. */ 1153 if (desired_align < rli->offset_align) 1154 rli->bitpos = round_up (rli->bitpos, desired_align); 1155 else 1156 { 1157 /* First adjust OFFSET by the partial bits, then align. */ 1158 rli->offset 1159 = size_binop (PLUS_EXPR, rli->offset, 1160 fold_convert (sizetype, 1161 size_binop (CEIL_DIV_EXPR, rli->bitpos, 1162 bitsize_unit_node))); 1163 rli->bitpos = bitsize_zero_node; 1164 1165 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT); 1166 } 1167 1168 if (! TREE_CONSTANT (rli->offset)) 1169 rli->offset_align = desired_align; 1170 1171 } 1172 1173 /* Handle compatibility with PCC. Note that if the record has any 1174 variable-sized fields, we need not worry about compatibility. */ 1175 #ifdef PCC_BITFIELD_TYPE_MATTERS 1176 if (PCC_BITFIELD_TYPE_MATTERS 1177 && ! targetm.ms_bitfield_layout_p (rli->t) 1178 && TREE_CODE (field) == FIELD_DECL 1179 && type != error_mark_node 1180 && DECL_BIT_FIELD (field) 1181 && (! DECL_PACKED (field) 1182 /* Enter for these packed fields only to issue a warning. */ 1183 || TYPE_ALIGN (type) <= BITS_PER_UNIT) 1184 && maximum_field_alignment == 0 1185 && ! integer_zerop (DECL_SIZE (field)) 1186 && host_integerp (DECL_SIZE (field), 1) 1187 && host_integerp (rli->offset, 1) 1188 && host_integerp (TYPE_SIZE (type), 1)) 1189 { 1190 unsigned int type_align = TYPE_ALIGN (type); 1191 tree dsize = DECL_SIZE (field); 1192 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1); 1193 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0); 1194 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0); 1195 1196 #ifdef ADJUST_FIELD_ALIGN 1197 if (! TYPE_USER_ALIGN (type)) 1198 type_align = ADJUST_FIELD_ALIGN (field, type_align); 1199 #endif 1200 1201 /* A bit field may not span more units of alignment of its type 1202 than its type itself. Advance to next boundary if necessary. */ 1203 if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) 1204 { 1205 if (DECL_PACKED (field)) 1206 { 1207 if (warn_packed_bitfield_compat == 1) 1208 inform 1209 (input_location, 1210 "Offset of packed bit-field %qD has changed in GCC 4.4", 1211 field); 1212 } 1213 else 1214 rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align); 1215 } 1216 1217 if (! DECL_PACKED (field)) 1218 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); 1219 } 1220 #endif 1221 1222 #ifdef BITFIELD_NBYTES_LIMITED 1223 if (BITFIELD_NBYTES_LIMITED 1224 && ! targetm.ms_bitfield_layout_p (rli->t) 1225 && TREE_CODE (field) == FIELD_DECL 1226 && type != error_mark_node 1227 && DECL_BIT_FIELD_TYPE (field) 1228 && ! DECL_PACKED (field) 1229 && ! integer_zerop (DECL_SIZE (field)) 1230 && host_integerp (DECL_SIZE (field), 1) 1231 && host_integerp (rli->offset, 1) 1232 && host_integerp (TYPE_SIZE (type), 1)) 1233 { 1234 unsigned int type_align = TYPE_ALIGN (type); 1235 tree dsize = DECL_SIZE (field); 1236 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1); 1237 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0); 1238 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0); 1239 1240 #ifdef ADJUST_FIELD_ALIGN 1241 if (! TYPE_USER_ALIGN (type)) 1242 type_align = ADJUST_FIELD_ALIGN (field, type_align); 1243 #endif 1244 1245 if (maximum_field_alignment != 0) 1246 type_align = MIN (type_align, maximum_field_alignment); 1247 /* ??? This test is opposite the test in the containing if 1248 statement, so this code is unreachable currently. */ 1249 else if (DECL_PACKED (field)) 1250 type_align = MIN (type_align, BITS_PER_UNIT); 1251 1252 /* A bit field may not span the unit of alignment of its type. 1253 Advance to next boundary if necessary. */ 1254 if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) 1255 rli->bitpos = round_up (rli->bitpos, type_align); 1256 1257 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); 1258 } 1259 #endif 1260 1261 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details. 1262 A subtlety: 1263 When a bit field is inserted into a packed record, the whole 1264 size of the underlying type is used by one or more same-size 1265 adjacent bitfields. (That is, if its long:3, 32 bits is 1266 used in the record, and any additional adjacent long bitfields are 1267 packed into the same chunk of 32 bits. However, if the size 1268 changes, a new field of that size is allocated.) In an unpacked 1269 record, this is the same as using alignment, but not equivalent 1270 when packing. 1271 1272 Note: for compatibility, we use the type size, not the type alignment 1273 to determine alignment, since that matches the documentation */ 1274 1275 if (targetm.ms_bitfield_layout_p (rli->t)) 1276 { 1277 tree prev_saved = rli->prev_field; 1278 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL; 1279 1280 /* This is a bitfield if it exists. */ 1281 if (rli->prev_field) 1282 { 1283 /* If both are bitfields, nonzero, and the same size, this is 1284 the middle of a run. Zero declared size fields are special 1285 and handled as "end of run". (Note: it's nonzero declared 1286 size, but equal type sizes!) (Since we know that both 1287 the current and previous fields are bitfields by the 1288 time we check it, DECL_SIZE must be present for both.) */ 1289 if (DECL_BIT_FIELD_TYPE (field) 1290 && !integer_zerop (DECL_SIZE (field)) 1291 && !integer_zerop (DECL_SIZE (rli->prev_field)) 1292 && host_integerp (DECL_SIZE (rli->prev_field), 0) 1293 && host_integerp (TYPE_SIZE (type), 0) 1294 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))) 1295 { 1296 /* We're in the middle of a run of equal type size fields; make 1297 sure we realign if we run out of bits. (Not decl size, 1298 type size!) */ 1299 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1); 1300 1301 if (rli->remaining_in_alignment < bitsize) 1302 { 1303 HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1); 1304 1305 /* out of bits; bump up to next 'word'. */ 1306 rli->bitpos 1307 = size_binop (PLUS_EXPR, rli->bitpos, 1308 bitsize_int (rli->remaining_in_alignment)); 1309 rli->prev_field = field; 1310 if (typesize < bitsize) 1311 rli->remaining_in_alignment = 0; 1312 else 1313 rli->remaining_in_alignment = typesize - bitsize; 1314 } 1315 else 1316 rli->remaining_in_alignment -= bitsize; 1317 } 1318 else 1319 { 1320 /* End of a run: if leaving a run of bitfields of the same type 1321 size, we have to "use up" the rest of the bits of the type 1322 size. 1323 1324 Compute the new position as the sum of the size for the prior 1325 type and where we first started working on that type. 1326 Note: since the beginning of the field was aligned then 1327 of course the end will be too. No round needed. */ 1328 1329 if (!integer_zerop (DECL_SIZE (rli->prev_field))) 1330 { 1331 rli->bitpos 1332 = size_binop (PLUS_EXPR, rli->bitpos, 1333 bitsize_int (rli->remaining_in_alignment)); 1334 } 1335 else 1336 /* We "use up" size zero fields; the code below should behave 1337 as if the prior field was not a bitfield. */ 1338 prev_saved = NULL; 1339 1340 /* Cause a new bitfield to be captured, either this time (if 1341 currently a bitfield) or next time we see one. */ 1342 if (!DECL_BIT_FIELD_TYPE(field) 1343 || integer_zerop (DECL_SIZE (field))) 1344 rli->prev_field = NULL; 1345 } 1346 1347 normalize_rli (rli); 1348 } 1349 1350 /* If we're starting a new run of same size type bitfields 1351 (or a run of non-bitfields), set up the "first of the run" 1352 fields. 1353 1354 That is, if the current field is not a bitfield, or if there 1355 was a prior bitfield the type sizes differ, or if there wasn't 1356 a prior bitfield the size of the current field is nonzero. 1357 1358 Note: we must be sure to test ONLY the type size if there was 1359 a prior bitfield and ONLY for the current field being zero if 1360 there wasn't. */ 1361 1362 if (!DECL_BIT_FIELD_TYPE (field) 1363 || (prev_saved != NULL 1364 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)) 1365 : !integer_zerop (DECL_SIZE (field)) )) 1366 { 1367 /* Never smaller than a byte for compatibility. */ 1368 unsigned int type_align = BITS_PER_UNIT; 1369 1370 /* (When not a bitfield), we could be seeing a flex array (with 1371 no DECL_SIZE). Since we won't be using remaining_in_alignment 1372 until we see a bitfield (and come by here again) we just skip 1373 calculating it. */ 1374 if (DECL_SIZE (field) != NULL 1375 && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1) 1376 && host_integerp (DECL_SIZE (field), 1)) 1377 { 1378 unsigned HOST_WIDE_INT bitsize 1379 = tree_low_cst (DECL_SIZE (field), 1); 1380 unsigned HOST_WIDE_INT typesize 1381 = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1); 1382 1383 if (typesize < bitsize) 1384 rli->remaining_in_alignment = 0; 1385 else 1386 rli->remaining_in_alignment = typesize - bitsize; 1387 } 1388 1389 /* Now align (conventionally) for the new type. */ 1390 type_align = TYPE_ALIGN (TREE_TYPE (field)); 1391 1392 if (maximum_field_alignment != 0) 1393 type_align = MIN (type_align, maximum_field_alignment); 1394 1395 rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align); 1396 1397 /* If we really aligned, don't allow subsequent bitfields 1398 to undo that. */ 1399 rli->prev_field = NULL; 1400 } 1401 } 1402 1403 /* Offset so far becomes the position of this field after normalizing. */ 1404 normalize_rli (rli); 1405 DECL_FIELD_OFFSET (field) = rli->offset; 1406 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; 1407 SET_DECL_OFFSET_ALIGN (field, rli->offset_align); 1408 1409 /* If this field ended up more aligned than we thought it would be (we 1410 approximate this by seeing if its position changed), lay out the field 1411 again; perhaps we can use an integral mode for it now. */ 1412 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) 1413 actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) 1414 & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)); 1415 else if (integer_zerop (DECL_FIELD_OFFSET (field))) 1416 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); 1417 else if (host_integerp (DECL_FIELD_OFFSET (field), 1)) 1418 actual_align = (BITS_PER_UNIT 1419 * (tree_low_cst (DECL_FIELD_OFFSET (field), 1) 1420 & - tree_low_cst (DECL_FIELD_OFFSET (field), 1))); 1421 else 1422 actual_align = DECL_OFFSET_ALIGN (field); 1423 /* ACTUAL_ALIGN is still the actual alignment *within the record* . 1424 store / extract bit field operations will check the alignment of the 1425 record against the mode of bit fields. */ 1426 1427 if (known_align != actual_align) 1428 layout_decl (field, actual_align); 1429 1430 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field)) 1431 rli->prev_field = field; 1432 1433 /* Now add size of this field to the size of the record. If the size is 1434 not constant, treat the field as being a multiple of bytes and just 1435 adjust the offset, resetting the bit position. Otherwise, apportion the 1436 size amongst the bit position and offset. First handle the case of an 1437 unspecified size, which can happen when we have an invalid nested struct 1438 definition, such as struct j { struct j { int i; } }. The error message 1439 is printed in finish_struct. */ 1440 if (DECL_SIZE (field) == 0) 1441 /* Do nothing. */; 1442 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST 1443 || TREE_OVERFLOW (DECL_SIZE (field))) 1444 { 1445 rli->offset 1446 = size_binop (PLUS_EXPR, rli->offset, 1447 fold_convert (sizetype, 1448 size_binop (CEIL_DIV_EXPR, rli->bitpos, 1449 bitsize_unit_node))); 1450 rli->offset 1451 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field)); 1452 rli->bitpos = bitsize_zero_node; 1453 rli->offset_align = MIN (rli->offset_align, desired_align); 1454 } 1455 else if (targetm.ms_bitfield_layout_p (rli->t)) 1456 { 1457 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); 1458 1459 /* If we ended a bitfield before the full length of the type then 1460 pad the struct out to the full length of the last type. */ 1461 if ((TREE_CHAIN (field) == NULL 1462 || TREE_CODE (TREE_CHAIN (field)) != FIELD_DECL) 1463 && DECL_BIT_FIELD_TYPE (field) 1464 && !integer_zerop (DECL_SIZE (field))) 1465 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, 1466 bitsize_int (rli->remaining_in_alignment)); 1467 1468 normalize_rli (rli); 1469 } 1470 else 1471 { 1472 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); 1473 normalize_rli (rli); 1474 } 1475 } 1476 1477 /* Assuming that all the fields have been laid out, this function uses 1478 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type 1479 indicated by RLI. */ 1480 1481 static void 1482 finalize_record_size (record_layout_info rli) 1483 { 1484 tree unpadded_size, unpadded_size_unit; 1485 1486 /* Now we want just byte and bit offsets, so set the offset alignment 1487 to be a byte and then normalize. */ 1488 rli->offset_align = BITS_PER_UNIT; 1489 normalize_rli (rli); 1490 1491 /* Determine the desired alignment. */ 1492 #ifdef ROUND_TYPE_ALIGN 1493 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), 1494 rli->record_align); 1495 #else 1496 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align); 1497 #endif 1498 1499 /* Compute the size so far. Be sure to allow for extra bits in the 1500 size in bytes. We have guaranteed above that it will be no more 1501 than a single byte. */ 1502 unpadded_size = rli_size_so_far (rli); 1503 unpadded_size_unit = rli_size_unit_so_far (rli); 1504 if (! integer_zerop (rli->bitpos)) 1505 unpadded_size_unit 1506 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node); 1507 1508 /* Round the size up to be a multiple of the required alignment. */ 1509 TYPE_SIZE (rli->t) = round_up_loc (input_location, unpadded_size, 1510 TYPE_ALIGN (rli->t)); 1511 TYPE_SIZE_UNIT (rli->t) 1512 = round_up_loc (input_location, unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t)); 1513 1514 if (TREE_CONSTANT (unpadded_size) 1515 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0 1516 && input_location != BUILTINS_LOCATION) 1517 warning (OPT_Wpadded, "padding struct size to alignment boundary"); 1518 1519 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE 1520 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary 1521 && TREE_CONSTANT (unpadded_size)) 1522 { 1523 tree unpacked_size; 1524 1525 #ifdef ROUND_TYPE_ALIGN 1526 rli->unpacked_align 1527 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align); 1528 #else 1529 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align); 1530 #endif 1531 1532 unpacked_size = round_up_loc (input_location, TYPE_SIZE (rli->t), rli->unpacked_align); 1533 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t))) 1534 { 1535 TYPE_PACKED (rli->t) = 0; 1536 1537 if (TYPE_NAME (rli->t)) 1538 { 1539 tree name; 1540 1541 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE) 1542 name = TYPE_NAME (rli->t); 1543 else 1544 name = DECL_NAME (TYPE_NAME (rli->t)); 1545 1546 if (STRICT_ALIGNMENT) 1547 warning (OPT_Wpacked, "packed attribute causes inefficient " 1548 "alignment for %qE", name); 1549 else 1550 warning (OPT_Wpacked, 1551 "packed attribute is unnecessary for %qE", name); 1552 } 1553 else 1554 { 1555 if (STRICT_ALIGNMENT) 1556 warning (OPT_Wpacked, 1557 "packed attribute causes inefficient alignment"); 1558 else 1559 warning (OPT_Wpacked, "packed attribute is unnecessary"); 1560 } 1561 } 1562 } 1563 } 1564 1565 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */ 1566 1567 void 1568 compute_record_mode (tree type) 1569 { 1570 tree field; 1571 enum machine_mode mode = VOIDmode; 1572 1573 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that. 1574 However, if possible, we use a mode that fits in a register 1575 instead, in order to allow for better optimization down the 1576 line. */ 1577 SET_TYPE_MODE (type, BLKmode); 1578 1579 if (! host_integerp (TYPE_SIZE (type), 1)) 1580 return; 1581 1582 /* A record which has any BLKmode members must itself be 1583 BLKmode; it can't go in a register. Unless the member is 1584 BLKmode only because it isn't aligned. */ 1585 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) 1586 { 1587 if (TREE_CODE (field) != FIELD_DECL) 1588 continue; 1589 1590 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK 1591 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode 1592 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)) 1593 && !(TYPE_SIZE (TREE_TYPE (field)) != 0 1594 && integer_zerop (TYPE_SIZE (TREE_TYPE (field))))) 1595 || ! host_integerp (bit_position (field), 1) 1596 || DECL_SIZE (field) == 0 1597 || ! host_integerp (DECL_SIZE (field), 1)) 1598 return; 1599 1600 /* If this field is the whole struct, remember its mode so 1601 that, say, we can put a double in a class into a DF 1602 register instead of forcing it to live in the stack. */ 1603 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field))) 1604 mode = DECL_MODE (field); 1605 1606 #ifdef MEMBER_TYPE_FORCES_BLK 1607 /* With some targets, eg. c4x, it is sub-optimal 1608 to access an aligned BLKmode structure as a scalar. */ 1609 1610 if (MEMBER_TYPE_FORCES_BLK (field, mode)) 1611 return; 1612 #endif /* MEMBER_TYPE_FORCES_BLK */ 1613 } 1614 1615 /* If we only have one real field; use its mode if that mode's size 1616 matches the type's size. This only applies to RECORD_TYPE. This 1617 does not apply to unions. */ 1618 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode 1619 && host_integerp (TYPE_SIZE (type), 1) 1620 && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type))) 1621 SET_TYPE_MODE (type, mode); 1622 else 1623 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1)); 1624 1625 /* If structure's known alignment is less than what the scalar 1626 mode would need, and it matters, then stick with BLKmode. */ 1627 if (TYPE_MODE (type) != BLKmode 1628 && STRICT_ALIGNMENT 1629 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT 1630 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type)))) 1631 { 1632 /* If this is the only reason this type is BLKmode, then 1633 don't force containing types to be BLKmode. */ 1634 TYPE_NO_FORCE_BLK (type) = 1; 1635 SET_TYPE_MODE (type, BLKmode); 1636 } 1637 } 1638 1639 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid 1640 out. */ 1641 1642 static void 1643 finalize_type_size (tree type) 1644 { 1645 /* Normally, use the alignment corresponding to the mode chosen. 1646 However, where strict alignment is not required, avoid 1647 over-aligning structures, since most compilers do not do this 1648 alignment. */ 1649 1650 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode 1651 && (STRICT_ALIGNMENT 1652 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE 1653 && TREE_CODE (type) != QUAL_UNION_TYPE 1654 && TREE_CODE (type) != ARRAY_TYPE))) 1655 { 1656 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type)); 1657 1658 /* Don't override a larger alignment requirement coming from a user 1659 alignment of one of the fields. */ 1660 if (mode_align >= TYPE_ALIGN (type)) 1661 { 1662 TYPE_ALIGN (type) = mode_align; 1663 TYPE_USER_ALIGN (type) = 0; 1664 } 1665 } 1666 1667 /* Do machine-dependent extra alignment. */ 1668 #ifdef ROUND_TYPE_ALIGN 1669 TYPE_ALIGN (type) 1670 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT); 1671 #endif 1672 1673 /* If we failed to find a simple way to calculate the unit size 1674 of the type, find it by division. */ 1675 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0) 1676 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the 1677 result will fit in sizetype. We will get more efficient code using 1678 sizetype, so we force a conversion. */ 1679 TYPE_SIZE_UNIT (type) 1680 = fold_convert (sizetype, 1681 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type), 1682 bitsize_unit_node)); 1683 1684 if (TYPE_SIZE (type) != 0) 1685 { 1686 TYPE_SIZE (type) = round_up_loc (input_location, 1687 TYPE_SIZE (type), TYPE_ALIGN (type)); 1688 TYPE_SIZE_UNIT (type) = round_up_loc (input_location, TYPE_SIZE_UNIT (type), 1689 TYPE_ALIGN_UNIT (type)); 1690 } 1691 1692 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */ 1693 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) 1694 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type)); 1695 if (TYPE_SIZE_UNIT (type) != 0 1696 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST) 1697 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type)); 1698 1699 /* Also layout any other variants of the type. */ 1700 if (TYPE_NEXT_VARIANT (type) 1701 || type != TYPE_MAIN_VARIANT (type)) 1702 { 1703 tree variant; 1704 /* Record layout info of this variant. */ 1705 tree size = TYPE_SIZE (type); 1706 tree size_unit = TYPE_SIZE_UNIT (type); 1707 unsigned int align = TYPE_ALIGN (type); 1708 unsigned int user_align = TYPE_USER_ALIGN (type); 1709 enum machine_mode mode = TYPE_MODE (type); 1710 1711 /* Copy it into all variants. */ 1712 for (variant = TYPE_MAIN_VARIANT (type); 1713 variant != 0; 1714 variant = TYPE_NEXT_VARIANT (variant)) 1715 { 1716 TYPE_SIZE (variant) = size; 1717 TYPE_SIZE_UNIT (variant) = size_unit; 1718 TYPE_ALIGN (variant) = align; 1719 TYPE_USER_ALIGN (variant) = user_align; 1720 SET_TYPE_MODE (variant, mode); 1721 } 1722 } 1723 } 1724 1725 /* Do all of the work required to layout the type indicated by RLI, 1726 once the fields have been laid out. This function will call `free' 1727 for RLI, unless FREE_P is false. Passing a value other than false 1728 for FREE_P is bad practice; this option only exists to support the 1729 G++ 3.2 ABI. */ 1730 1731 void 1732 finish_record_layout (record_layout_info rli, int free_p) 1733 { 1734 tree variant; 1735 1736 /* Compute the final size. */ 1737 finalize_record_size (rli); 1738 1739 /* Compute the TYPE_MODE for the record. */ 1740 compute_record_mode (rli->t); 1741 1742 /* Perform any last tweaks to the TYPE_SIZE, etc. */ 1743 finalize_type_size (rli->t); 1744 1745 /* Propagate TYPE_PACKED to variants. With C++ templates, 1746 handle_packed_attribute is too early to do this. */ 1747 for (variant = TYPE_NEXT_VARIANT (rli->t); variant; 1748 variant = TYPE_NEXT_VARIANT (variant)) 1749 TYPE_PACKED (variant) = TYPE_PACKED (rli->t); 1750 1751 /* Lay out any static members. This is done now because their type 1752 may use the record's type. */ 1753 while (rli->pending_statics) 1754 { 1755 layout_decl (TREE_VALUE (rli->pending_statics), 0); 1756 rli->pending_statics = TREE_CHAIN (rli->pending_statics); 1757 } 1758 1759 /* Clean up. */ 1760 if (free_p) 1761 free (rli); 1762 } 1763 1764 1765 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is 1766 NAME, its fields are chained in reverse on FIELDS. 1767 1768 If ALIGN_TYPE is non-null, it is given the same alignment as 1769 ALIGN_TYPE. */ 1770 1771 void 1772 finish_builtin_struct (tree type, const char *name, tree fields, 1773 tree align_type) 1774 { 1775 tree tail, next; 1776 1777 for (tail = NULL_TREE; fields; tail = fields, fields = next) 1778 { 1779 DECL_FIELD_CONTEXT (fields) = type; 1780 next = TREE_CHAIN (fields); 1781 TREE_CHAIN (fields) = tail; 1782 } 1783 TYPE_FIELDS (type) = tail; 1784 1785 if (align_type) 1786 { 1787 TYPE_ALIGN (type) = TYPE_ALIGN (align_type); 1788 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type); 1789 } 1790 1791 layout_type (type); 1792 #if 0 /* not yet, should get fixed properly later */ 1793 TYPE_NAME (type) = make_type_decl (get_identifier (name), type); 1794 #else 1795 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION, 1796 TYPE_DECL, get_identifier (name), type); 1797 #endif 1798 TYPE_STUB_DECL (type) = TYPE_NAME (type); 1799 layout_decl (TYPE_NAME (type), 0); 1800 } 1801 1802 /* Calculate the mode, size, and alignment for TYPE. 1803 For an array type, calculate the element separation as well. 1804 Record TYPE on the chain of permanent or temporary types 1805 so that dbxout will find out about it. 1806 1807 TYPE_SIZE of a type is nonzero if the type has been laid out already. 1808 layout_type does nothing on such a type. 1809 1810 If the type is incomplete, its TYPE_SIZE remains zero. */ 1811 1812 void 1813 layout_type (tree type) 1814 { 1815 gcc_assert (type); 1816 1817 if (type == error_mark_node) 1818 return; 1819 1820 /* Do nothing if type has been laid out before. */ 1821 if (TYPE_SIZE (type)) 1822 return; 1823 1824 switch (TREE_CODE (type)) 1825 { 1826 case LANG_TYPE: 1827 /* This kind of type is the responsibility 1828 of the language-specific code. */ 1829 gcc_unreachable (); 1830 1831 case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */ 1832 if (TYPE_PRECISION (type) == 0) 1833 TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */ 1834 1835 /* ... fall through ... */ 1836 1837 case INTEGER_TYPE: 1838 case ENUMERAL_TYPE: 1839 if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST 1840 && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0) 1841 TYPE_UNSIGNED (type) = 1; 1842 1843 SET_TYPE_MODE (type, 1844 smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT)); 1845 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); 1846 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); 1847 break; 1848 1849 case REAL_TYPE: 1850 SET_TYPE_MODE (type, 1851 mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0)); 1852 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); 1853 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); 1854 break; 1855 1856 case FIXED_POINT_TYPE: 1857 /* TYPE_MODE (type) has been set already. */ 1858 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); 1859 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); 1860 break; 1861 1862 case COMPLEX_TYPE: 1863 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); 1864 SET_TYPE_MODE (type, 1865 mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)), 1866 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE 1867 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT), 1868 0)); 1869 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); 1870 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); 1871 break; 1872 1873 case VECTOR_TYPE: 1874 { 1875 int nunits = TYPE_VECTOR_SUBPARTS (type); 1876 tree innertype = TREE_TYPE (type); 1877 1878 gcc_assert (!(nunits & (nunits - 1))); 1879 1880 /* Find an appropriate mode for the vector type. */ 1881 if (TYPE_MODE (type) == VOIDmode) 1882 { 1883 enum machine_mode innermode = TYPE_MODE (innertype); 1884 enum machine_mode mode; 1885 1886 /* First, look for a supported vector type. */ 1887 if (SCALAR_FLOAT_MODE_P (innermode)) 1888 mode = MIN_MODE_VECTOR_FLOAT; 1889 else if (SCALAR_FRACT_MODE_P (innermode)) 1890 mode = MIN_MODE_VECTOR_FRACT; 1891 else if (SCALAR_UFRACT_MODE_P (innermode)) 1892 mode = MIN_MODE_VECTOR_UFRACT; 1893 else if (SCALAR_ACCUM_MODE_P (innermode)) 1894 mode = MIN_MODE_VECTOR_ACCUM; 1895 else if (SCALAR_UACCUM_MODE_P (innermode)) 1896 mode = MIN_MODE_VECTOR_UACCUM; 1897 else 1898 mode = MIN_MODE_VECTOR_INT; 1899 1900 /* Do not check vector_mode_supported_p here. We'll do that 1901 later in vector_type_mode. */ 1902 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode)) 1903 if (GET_MODE_NUNITS (mode) == nunits 1904 && GET_MODE_INNER (mode) == innermode) 1905 break; 1906 1907 /* For integers, try mapping it to a same-sized scalar mode. */ 1908 if (mode == VOIDmode 1909 && GET_MODE_CLASS (innermode) == MODE_INT) 1910 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode), 1911 MODE_INT, 0); 1912 1913 if (mode == VOIDmode || 1914 (GET_MODE_CLASS (mode) == MODE_INT 1915 && !have_regs_of_mode[mode])) 1916 SET_TYPE_MODE (type, BLKmode); 1917 else 1918 SET_TYPE_MODE (type, mode); 1919 } 1920 1921 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type)); 1922 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); 1923 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR, 1924 TYPE_SIZE_UNIT (innertype), 1925 size_int (nunits), 0); 1926 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype), 1927 bitsize_int (nunits), 0); 1928 1929 /* Always naturally align vectors. This prevents ABI changes 1930 depending on whether or not native vector modes are supported. */ 1931 TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0); 1932 break; 1933 } 1934 1935 case VOID_TYPE: 1936 /* This is an incomplete type and so doesn't have a size. */ 1937 TYPE_ALIGN (type) = 1; 1938 TYPE_USER_ALIGN (type) = 0; 1939 SET_TYPE_MODE (type, VOIDmode); 1940 break; 1941 1942 case OFFSET_TYPE: 1943 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE); 1944 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT); 1945 /* A pointer might be MODE_PARTIAL_INT, 1946 but ptrdiff_t must be integral. */ 1947 SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0)); 1948 TYPE_PRECISION (type) = POINTER_SIZE; 1949 break; 1950 1951 case FUNCTION_TYPE: 1952 case METHOD_TYPE: 1953 /* It's hard to see what the mode and size of a function ought to 1954 be, but we do know the alignment is FUNCTION_BOUNDARY, so 1955 make it consistent with that. */ 1956 SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0)); 1957 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY); 1958 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); 1959 break; 1960 1961 case POINTER_TYPE: 1962 case REFERENCE_TYPE: 1963 { 1964 enum machine_mode mode = TYPE_MODE (type); 1965 if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal) 1966 { 1967 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type)); 1968 mode = targetm.addr_space.address_mode (as); 1969 } 1970 1971 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); 1972 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); 1973 TYPE_UNSIGNED (type) = 1; 1974 TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode); 1975 } 1976 break; 1977 1978 case ARRAY_TYPE: 1979 { 1980 tree index = TYPE_DOMAIN (type); 1981 tree element = TREE_TYPE (type); 1982 1983 build_pointer_type (element); 1984 1985 /* We need to know both bounds in order to compute the size. */ 1986 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index) 1987 && TYPE_SIZE (element)) 1988 { 1989 tree ub = TYPE_MAX_VALUE (index); 1990 tree lb = TYPE_MIN_VALUE (index); 1991 tree element_size = TYPE_SIZE (element); 1992 tree length; 1993 1994 /* Make sure that an array of zero-sized element is zero-sized 1995 regardless of its extent. */ 1996 if (integer_zerop (element_size)) 1997 length = size_zero_node; 1998 1999 /* The initial subtraction should happen in the original type so 2000 that (possible) negative values are handled appropriately. */ 2001 else 2002 length 2003 = size_binop (PLUS_EXPR, size_one_node, 2004 fold_convert (sizetype, 2005 fold_build2_loc (input_location, 2006 MINUS_EXPR, 2007 TREE_TYPE (lb), 2008 ub, lb))); 2009 2010 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size, 2011 fold_convert (bitsizetype, 2012 length)); 2013 2014 /* If we know the size of the element, calculate the total size 2015 directly, rather than do some division thing below. This 2016 optimization helps Fortran assumed-size arrays (where the 2017 size of the array is determined at runtime) substantially. */ 2018 if (TYPE_SIZE_UNIT (element)) 2019 TYPE_SIZE_UNIT (type) 2020 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length); 2021 } 2022 2023 /* Now round the alignment and size, 2024 using machine-dependent criteria if any. */ 2025 2026 #ifdef ROUND_TYPE_ALIGN 2027 TYPE_ALIGN (type) 2028 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT); 2029 #else 2030 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT); 2031 #endif 2032 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element); 2033 SET_TYPE_MODE (type, BLKmode); 2034 if (TYPE_SIZE (type) != 0 2035 #ifdef MEMBER_TYPE_FORCES_BLK 2036 && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode) 2037 #endif 2038 /* BLKmode elements force BLKmode aggregate; 2039 else extract/store fields may lose. */ 2040 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode 2041 || TYPE_NO_FORCE_BLK (TREE_TYPE (type)))) 2042 { 2043 /* One-element arrays get the component type's mode. */ 2044 if (simple_cst_equal (TYPE_SIZE (type), 2045 TYPE_SIZE (TREE_TYPE (type)))) 2046 SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type))); 2047 else 2048 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), 2049 MODE_INT, 1)); 2050 2051 if (TYPE_MODE (type) != BLKmode 2052 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT 2053 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))) 2054 { 2055 TYPE_NO_FORCE_BLK (type) = 1; 2056 SET_TYPE_MODE (type, BLKmode); 2057 } 2058 } 2059 /* When the element size is constant, check that it is at least as 2060 large as the element alignment. */ 2061 if (TYPE_SIZE_UNIT (element) 2062 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST 2063 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than 2064 TYPE_ALIGN_UNIT. */ 2065 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element)) 2066 && !integer_zerop (TYPE_SIZE_UNIT (element)) 2067 && compare_tree_int (TYPE_SIZE_UNIT (element), 2068 TYPE_ALIGN_UNIT (element)) < 0) 2069 error ("alignment of array elements is greater than element size"); 2070 break; 2071 } 2072 2073 case RECORD_TYPE: 2074 case UNION_TYPE: 2075 case QUAL_UNION_TYPE: 2076 { 2077 tree field; 2078 record_layout_info rli; 2079 2080 /* Initialize the layout information. */ 2081 rli = start_record_layout (type); 2082 2083 /* If this is a QUAL_UNION_TYPE, we want to process the fields 2084 in the reverse order in building the COND_EXPR that denotes 2085 its size. We reverse them again later. */ 2086 if (TREE_CODE (type) == QUAL_UNION_TYPE) 2087 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); 2088 2089 /* Place all the fields. */ 2090 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) 2091 place_field (rli, field); 2092 2093 if (TREE_CODE (type) == QUAL_UNION_TYPE) 2094 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); 2095 2096 /* Finish laying out the record. */ 2097 finish_record_layout (rli, /*free_p=*/true); 2098 } 2099 break; 2100 2101 default: 2102 gcc_unreachable (); 2103 } 2104 2105 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For 2106 records and unions, finish_record_layout already called this 2107 function. */ 2108 if (TREE_CODE (type) != RECORD_TYPE 2109 && TREE_CODE (type) != UNION_TYPE 2110 && TREE_CODE (type) != QUAL_UNION_TYPE) 2111 finalize_type_size (type); 2112 2113 /* We should never see alias sets on incomplete aggregates. And we 2114 should not call layout_type on not incomplete aggregates. */ 2115 if (AGGREGATE_TYPE_P (type)) 2116 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type)); 2117 } 2118 2119 /* Vector types need to re-check the target flags each time we report 2120 the machine mode. We need to do this because attribute target can 2121 change the result of vector_mode_supported_p and have_regs_of_mode 2122 on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can 2123 change on a per-function basis. */ 2124 /* ??? Possibly a better solution is to run through all the types 2125 referenced by a function and re-compute the TYPE_MODE once, rather 2126 than make the TYPE_MODE macro call a function. */ 2127 2128 enum machine_mode 2129 vector_type_mode (const_tree t) 2130 { 2131 enum machine_mode mode; 2132 2133 gcc_assert (TREE_CODE (t) == VECTOR_TYPE); 2134 2135 mode = t->type.mode; 2136 if (VECTOR_MODE_P (mode) 2137 && (!targetm.vector_mode_supported_p (mode) 2138 || !have_regs_of_mode[mode])) 2139 { 2140 enum machine_mode innermode = TREE_TYPE (t)->type.mode; 2141 2142 /* For integers, try mapping it to a same-sized scalar mode. */ 2143 if (GET_MODE_CLASS (innermode) == MODE_INT) 2144 { 2145 mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t) 2146 * GET_MODE_BITSIZE (innermode), MODE_INT, 0); 2147 2148 if (mode != VOIDmode && have_regs_of_mode[mode]) 2149 return mode; 2150 } 2151 2152 return BLKmode; 2153 } 2154 2155 return mode; 2156 } 2157 2158 /* Create and return a type for signed integers of PRECISION bits. */ 2159 2160 tree 2161 make_signed_type (int precision) 2162 { 2163 tree type = make_node (INTEGER_TYPE); 2164 2165 TYPE_PRECISION (type) = precision; 2166 2167 fixup_signed_type (type); 2168 return type; 2169 } 2170 2171 /* Create and return a type for unsigned integers of PRECISION bits. */ 2172 2173 tree 2174 make_unsigned_type (int precision) 2175 { 2176 tree type = make_node (INTEGER_TYPE); 2177 2178 TYPE_PRECISION (type) = precision; 2179 2180 fixup_unsigned_type (type); 2181 return type; 2182 } 2183 2184 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP, 2185 and SATP. */ 2186 2187 tree 2188 make_fract_type (int precision, int unsignedp, int satp) 2189 { 2190 tree type = make_node (FIXED_POINT_TYPE); 2191 2192 TYPE_PRECISION (type) = precision; 2193 2194 if (satp) 2195 TYPE_SATURATING (type) = 1; 2196 2197 /* Lay out the type: set its alignment, size, etc. */ 2198 if (unsignedp) 2199 { 2200 TYPE_UNSIGNED (type) = 1; 2201 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0)); 2202 } 2203 else 2204 SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0)); 2205 layout_type (type); 2206 2207 return type; 2208 } 2209 2210 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP, 2211 and SATP. */ 2212 2213 tree 2214 make_accum_type (int precision, int unsignedp, int satp) 2215 { 2216 tree type = make_node (FIXED_POINT_TYPE); 2217 2218 TYPE_PRECISION (type) = precision; 2219 2220 if (satp) 2221 TYPE_SATURATING (type) = 1; 2222 2223 /* Lay out the type: set its alignment, size, etc. */ 2224 if (unsignedp) 2225 { 2226 TYPE_UNSIGNED (type) = 1; 2227 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0)); 2228 } 2229 else 2230 SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0)); 2231 layout_type (type); 2232 2233 return type; 2234 } 2235 2236 /* Initialize sizetype and bitsizetype to a reasonable and temporary 2237 value to enable integer types to be created. */ 2238 2239 void 2240 initialize_sizetypes (bool signed_p) 2241 { 2242 tree t = make_node (INTEGER_TYPE); 2243 int precision = GET_MODE_BITSIZE (SImode); 2244 2245 SET_TYPE_MODE (t, SImode); 2246 TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode); 2247 TYPE_USER_ALIGN (t) = 0; 2248 TYPE_IS_SIZETYPE (t) = 1; 2249 TYPE_UNSIGNED (t) = !signed_p; 2250 TYPE_SIZE (t) = build_int_cst (t, precision); 2251 TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode)); 2252 TYPE_PRECISION (t) = precision; 2253 2254 /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE. */ 2255 set_min_and_max_values_for_integral_type (t, precision, !signed_p); 2256 2257 sizetype = t; 2258 bitsizetype = build_distinct_type_copy (t); 2259 } 2260 2261 /* Make sizetype a version of TYPE, and initialize *sizetype 2262 accordingly. We do this by overwriting the stub sizetype and 2263 bitsizetype nodes created by initialize_sizetypes. This makes sure 2264 that (a) anything stubby about them no longer exists, (b) any 2265 INTEGER_CSTs created with such a type, remain valid. */ 2266 2267 void 2268 set_sizetype (tree type) 2269 { 2270 tree t; 2271 int oprecision = TYPE_PRECISION (type); 2272 /* The *bitsizetype types use a precision that avoids overflows when 2273 calculating signed sizes / offsets in bits. However, when 2274 cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit 2275 precision. */ 2276 int precision 2277 = MIN (oprecision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE); 2278 precision 2279 = GET_MODE_PRECISION (smallest_mode_for_size (precision, MODE_INT)); 2280 if (precision > HOST_BITS_PER_WIDE_INT * 2) 2281 precision = HOST_BITS_PER_WIDE_INT * 2; 2282 2283 gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype)); 2284 2285 t = build_distinct_type_copy (type); 2286 /* We do want to use sizetype's cache, as we will be replacing that 2287 type. */ 2288 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype); 2289 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype); 2290 TREE_TYPE (TYPE_CACHED_VALUES (t)) = type; 2291 TYPE_UID (t) = TYPE_UID (sizetype); 2292 TYPE_IS_SIZETYPE (t) = 1; 2293 2294 /* Replace our original stub sizetype. */ 2295 memcpy (sizetype, t, tree_size (sizetype)); 2296 TYPE_MAIN_VARIANT (sizetype) = sizetype; 2297 TYPE_CANONICAL (sizetype) = sizetype; 2298 2299 t = make_node (INTEGER_TYPE); 2300 TYPE_NAME (t) = get_identifier ("bit_size_type"); 2301 /* We do want to use bitsizetype's cache, as we will be replacing that 2302 type. */ 2303 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype); 2304 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype); 2305 TYPE_PRECISION (t) = precision; 2306 TYPE_UID (t) = TYPE_UID (bitsizetype); 2307 TYPE_IS_SIZETYPE (t) = 1; 2308 2309 /* Replace our original stub bitsizetype. */ 2310 memcpy (bitsizetype, t, tree_size (bitsizetype)); 2311 TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype; 2312 TYPE_CANONICAL (bitsizetype) = bitsizetype; 2313 2314 if (TYPE_UNSIGNED (type)) 2315 { 2316 fixup_unsigned_type (bitsizetype); 2317 ssizetype = make_signed_type (oprecision); 2318 TYPE_IS_SIZETYPE (ssizetype) = 1; 2319 sbitsizetype = make_signed_type (precision); 2320 TYPE_IS_SIZETYPE (sbitsizetype) = 1; 2321 } 2322 else 2323 { 2324 fixup_signed_type (bitsizetype); 2325 ssizetype = sizetype; 2326 sbitsizetype = bitsizetype; 2327 } 2328 2329 /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that 2330 it is sign extended in a way consistent with force_fit_type. */ 2331 if (TYPE_UNSIGNED (type)) 2332 { 2333 tree orig_max, new_max; 2334 2335 orig_max = TYPE_MAX_VALUE (sizetype); 2336 2337 /* Build a new node with the same values, but a different type. 2338 Sign extend it to ensure consistency. */ 2339 new_max = build_int_cst_wide_type (sizetype, 2340 TREE_INT_CST_LOW (orig_max), 2341 TREE_INT_CST_HIGH (orig_max)); 2342 TYPE_MAX_VALUE (sizetype) = new_max; 2343 } 2344 } 2345 2346 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE 2347 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE 2348 for TYPE, based on the PRECISION and whether or not the TYPE 2349 IS_UNSIGNED. PRECISION need not correspond to a width supported 2350 natively by the hardware; for example, on a machine with 8-bit, 2351 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or 2352 61. */ 2353 2354 void 2355 set_min_and_max_values_for_integral_type (tree type, 2356 int precision, 2357 bool is_unsigned) 2358 { 2359 tree min_value; 2360 tree max_value; 2361 2362 if (is_unsigned) 2363 { 2364 min_value = build_int_cst (type, 0); 2365 max_value 2366 = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0 2367 ? -1 2368 : ((HOST_WIDE_INT) 1 << precision) - 1, 2369 precision - HOST_BITS_PER_WIDE_INT > 0 2370 ? ((unsigned HOST_WIDE_INT) ~0 2371 >> (HOST_BITS_PER_WIDE_INT 2372 - (precision - HOST_BITS_PER_WIDE_INT))) 2373 : 0); 2374 } 2375 else 2376 { 2377 min_value 2378 = build_int_cst_wide (type, 2379 (precision - HOST_BITS_PER_WIDE_INT > 0 2380 ? 0 2381 : (HOST_WIDE_INT) (-1) << (precision - 1)), 2382 (((HOST_WIDE_INT) (-1) 2383 << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 2384 ? precision - HOST_BITS_PER_WIDE_INT - 1 2385 : 0)))); 2386 max_value 2387 = build_int_cst_wide (type, 2388 (precision - HOST_BITS_PER_WIDE_INT > 0 2389 ? -1 2390 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1), 2391 (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 2392 ? (((HOST_WIDE_INT) 1 2393 << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1 2394 : 0)); 2395 } 2396 2397 TYPE_MIN_VALUE (type) = min_value; 2398 TYPE_MAX_VALUE (type) = max_value; 2399 } 2400 2401 /* Set the extreme values of TYPE based on its precision in bits, 2402 then lay it out. Used when make_signed_type won't do 2403 because the tree code is not INTEGER_TYPE. 2404 E.g. for Pascal, when the -fsigned-char option is given. */ 2405 2406 void 2407 fixup_signed_type (tree type) 2408 { 2409 int precision = TYPE_PRECISION (type); 2410 2411 /* We can not represent properly constants greater then 2412 2 * HOST_BITS_PER_WIDE_INT, still we need the types 2413 as they are used by i386 vector extensions and friends. */ 2414 if (precision > HOST_BITS_PER_WIDE_INT * 2) 2415 precision = HOST_BITS_PER_WIDE_INT * 2; 2416 2417 set_min_and_max_values_for_integral_type (type, precision, 2418 /*is_unsigned=*/false); 2419 2420 /* Lay out the type: set its alignment, size, etc. */ 2421 layout_type (type); 2422 } 2423 2424 /* Set the extreme values of TYPE based on its precision in bits, 2425 then lay it out. This is used both in `make_unsigned_type' 2426 and for enumeral types. */ 2427 2428 void 2429 fixup_unsigned_type (tree type) 2430 { 2431 int precision = TYPE_PRECISION (type); 2432 2433 /* We can not represent properly constants greater then 2434 2 * HOST_BITS_PER_WIDE_INT, still we need the types 2435 as they are used by i386 vector extensions and friends. */ 2436 if (precision > HOST_BITS_PER_WIDE_INT * 2) 2437 precision = HOST_BITS_PER_WIDE_INT * 2; 2438 2439 TYPE_UNSIGNED (type) = 1; 2440 2441 set_min_and_max_values_for_integral_type (type, precision, 2442 /*is_unsigned=*/true); 2443 2444 /* Lay out the type: set its alignment, size, etc. */ 2445 layout_type (type); 2446 } 2447 2448 /* Find the best machine mode to use when referencing a bit field of length 2449 BITSIZE bits starting at BITPOS. 2450 2451 The underlying object is known to be aligned to a boundary of ALIGN bits. 2452 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode 2453 larger than LARGEST_MODE (usually SImode). 2454 2455 If no mode meets all these conditions, we return VOIDmode. 2456 2457 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the 2458 smallest mode meeting these conditions. 2459 2460 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the 2461 largest mode (but a mode no wider than UNITS_PER_WORD) that meets 2462 all the conditions. 2463 2464 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to 2465 decide which of the above modes should be used. */ 2466 2467 enum machine_mode 2468 get_best_mode (int bitsize, int bitpos, unsigned int align, 2469 enum machine_mode largest_mode, int volatilep) 2470 { 2471 enum machine_mode mode; 2472 unsigned int unit = 0; 2473 2474 /* Find the narrowest integer mode that contains the bit field. */ 2475 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; 2476 mode = GET_MODE_WIDER_MODE (mode)) 2477 { 2478 unit = GET_MODE_BITSIZE (mode); 2479 if ((bitpos % unit) + bitsize <= unit) 2480 break; 2481 } 2482 2483 if (mode == VOIDmode 2484 /* It is tempting to omit the following line 2485 if STRICT_ALIGNMENT is true. 2486 But that is incorrect, since if the bitfield uses part of 3 bytes 2487 and we use a 4-byte mode, we could get a spurious segv 2488 if the extra 4th byte is past the end of memory. 2489 (Though at least one Unix compiler ignores this problem: 2490 that on the Sequent 386 machine. */ 2491 || MIN (unit, BIGGEST_ALIGNMENT) > align 2492 || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode))) 2493 return VOIDmode; 2494 2495 if ((SLOW_BYTE_ACCESS && ! volatilep) 2496 || (volatilep && !targetm.narrow_volatile_bitfield ())) 2497 { 2498 enum machine_mode wide_mode = VOIDmode, tmode; 2499 2500 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; 2501 tmode = GET_MODE_WIDER_MODE (tmode)) 2502 { 2503 unit = GET_MODE_BITSIZE (tmode); 2504 if (bitpos / unit == (bitpos + bitsize - 1) / unit 2505 && unit <= BITS_PER_WORD 2506 && unit <= MIN (align, BIGGEST_ALIGNMENT) 2507 && (largest_mode == VOIDmode 2508 || unit <= GET_MODE_BITSIZE (largest_mode))) 2509 wide_mode = tmode; 2510 } 2511 2512 if (wide_mode != VOIDmode) 2513 return wide_mode; 2514 } 2515 2516 return mode; 2517 } 2518 2519 /* Gets minimal and maximal values for MODE (signed or unsigned depending on 2520 SIGN). The returned constants are made to be usable in TARGET_MODE. */ 2521 2522 void 2523 get_mode_bounds (enum machine_mode mode, int sign, 2524 enum machine_mode target_mode, 2525 rtx *mmin, rtx *mmax) 2526 { 2527 unsigned size = GET_MODE_BITSIZE (mode); 2528 unsigned HOST_WIDE_INT min_val, max_val; 2529 2530 gcc_assert (size <= HOST_BITS_PER_WIDE_INT); 2531 2532 if (sign) 2533 { 2534 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1)); 2535 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1; 2536 } 2537 else 2538 { 2539 min_val = 0; 2540 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1; 2541 } 2542 2543 *mmin = gen_int_mode (min_val, target_mode); 2544 *mmax = gen_int_mode (max_val, target_mode); 2545 } 2546 2547 #include "gt-stor-layout.h" 2548