1 /* Default target hook functions. 2 Copyright (C) 2003-2020 Free Software Foundation, Inc. 3 4 This file is part of GCC. 5 6 GCC is free software; you can redistribute it and/or modify it under 7 the terms of the GNU General Public License as published by the Free 8 Software Foundation; either version 3, or (at your option) any later 9 version. 10 11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 12 WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with GCC; see the file COPYING3. If not see 18 <http://www.gnu.org/licenses/>. */ 19 20 /* The migration of target macros to target hooks works as follows: 21 22 1. Create a target hook that uses the existing target macros to 23 implement the same functionality. 24 25 2. Convert all the MI files to use the hook instead of the macro. 26 27 3. Repeat for a majority of the remaining target macros. This will 28 take some time. 29 30 4. Tell target maintainers to start migrating. 31 32 5. Eventually convert the backends to override the hook instead of 33 defining the macros. This will take some time too. 34 35 6. TBD when, poison the macros. Unmigrated targets will break at 36 this point. 37 38 Note that we expect steps 1-3 to be done by the people that 39 understand what the MI does with each macro, and step 5 to be done 40 by the target maintainers for their respective targets. 41 42 Note that steps 1 and 2 don't have to be done together, but no 43 target can override the new hook until step 2 is complete for it. 44 45 Once the macros are poisoned, we will revert to the old migration 46 rules - migrate the macro, callers, and targets all at once. This 47 comment can thus be removed at that point. */ 48 49 #include "config.h" 50 #include "system.h" 51 #include "coretypes.h" 52 #include "target.h" 53 #include "function.h" 54 #include "rtl.h" 55 #include "tree.h" 56 #include "tree-ssa-alias.h" 57 #include "gimple-expr.h" 58 #include "memmodel.h" 59 #include "tm_p.h" 60 #include "stringpool.h" 61 #include "tree-vrp.h" 62 #include "tree-ssanames.h" 63 #include "profile-count.h" 64 #include "optabs.h" 65 #include "regs.h" 66 #include "recog.h" 67 #include "diagnostic-core.h" 68 #include "fold-const.h" 69 #include "stor-layout.h" 70 #include "varasm.h" 71 #include "flags.h" 72 #include "explow.h" 73 #include "calls.h" 74 #include "expr.h" 75 #include "output.h" 76 #include "common/common-target.h" 77 #include "reload.h" 78 #include "intl.h" 79 #include "opts.h" 80 #include "gimplify.h" 81 #include "predict.h" 82 #include "real.h" 83 #include "langhooks.h" 84 #include "sbitmap.h" 85 #include "function-abi.h" 86 87 bool 88 default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED, 89 rtx addr ATTRIBUTE_UNUSED, 90 bool strict ATTRIBUTE_UNUSED) 91 { 92 #ifdef GO_IF_LEGITIMATE_ADDRESS 93 /* Defer to the old implementation using a goto. */ 94 if (strict) 95 return strict_memory_address_p (mode, addr); 96 else 97 return memory_address_p (mode, addr); 98 #else 99 gcc_unreachable (); 100 #endif 101 } 102 103 void 104 default_external_libcall (rtx fun ATTRIBUTE_UNUSED) 105 { 106 #ifdef ASM_OUTPUT_EXTERNAL_LIBCALL 107 ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun); 108 #endif 109 } 110 111 int 112 default_unspec_may_trap_p (const_rtx x, unsigned flags) 113 { 114 int i; 115 116 /* Any floating arithmetic may trap. */ 117 if ((SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)) 118 return 1; 119 120 for (i = 0; i < XVECLEN (x, 0); ++i) 121 { 122 if (may_trap_p_1 (XVECEXP (x, 0, i), flags)) 123 return 1; 124 } 125 126 return 0; 127 } 128 129 machine_mode 130 default_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, 131 machine_mode mode, 132 int *punsignedp ATTRIBUTE_UNUSED, 133 const_tree funtype ATTRIBUTE_UNUSED, 134 int for_return ATTRIBUTE_UNUSED) 135 { 136 if (type != NULL_TREE && for_return == 2) 137 return promote_mode (type, mode, punsignedp); 138 return mode; 139 } 140 141 machine_mode 142 default_promote_function_mode_always_promote (const_tree type, 143 machine_mode mode, 144 int *punsignedp, 145 const_tree funtype ATTRIBUTE_UNUSED, 146 int for_return ATTRIBUTE_UNUSED) 147 { 148 return promote_mode (type, mode, punsignedp); 149 } 150 151 machine_mode 152 default_cc_modes_compatible (machine_mode m1, machine_mode m2) 153 { 154 if (m1 == m2) 155 return m1; 156 return VOIDmode; 157 } 158 159 bool 160 default_return_in_memory (const_tree type, 161 const_tree fntype ATTRIBUTE_UNUSED) 162 { 163 return (TYPE_MODE (type) == BLKmode); 164 } 165 166 rtx 167 default_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED, 168 machine_mode mode ATTRIBUTE_UNUSED) 169 { 170 return x; 171 } 172 173 bool 174 default_legitimize_address_displacement (rtx *, rtx *, poly_int64, 175 machine_mode) 176 { 177 return false; 178 } 179 180 bool 181 default_const_not_ok_for_debug_p (rtx x) 182 { 183 if (GET_CODE (x) == UNSPEC) 184 return true; 185 return false; 186 } 187 188 rtx 189 default_expand_builtin_saveregs (void) 190 { 191 error ("%<__builtin_saveregs%> not supported by this target"); 192 return const0_rtx; 193 } 194 195 void 196 default_setup_incoming_varargs (cumulative_args_t, 197 const function_arg_info &, int *, int) 198 { 199 } 200 201 /* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE. */ 202 203 rtx 204 default_builtin_setjmp_frame_value (void) 205 { 206 return virtual_stack_vars_rtx; 207 } 208 209 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false. */ 210 211 bool 212 hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t ca ATTRIBUTE_UNUSED) 213 { 214 return false; 215 } 216 217 bool 218 default_pretend_outgoing_varargs_named (cumulative_args_t ca ATTRIBUTE_UNUSED) 219 { 220 return (targetm.calls.setup_incoming_varargs 221 != default_setup_incoming_varargs); 222 } 223 224 scalar_int_mode 225 default_eh_return_filter_mode (void) 226 { 227 return targetm.unwind_word_mode (); 228 } 229 230 scalar_int_mode 231 default_libgcc_cmp_return_mode (void) 232 { 233 return word_mode; 234 } 235 236 scalar_int_mode 237 default_libgcc_shift_count_mode (void) 238 { 239 return word_mode; 240 } 241 242 scalar_int_mode 243 default_unwind_word_mode (void) 244 { 245 return word_mode; 246 } 247 248 /* The default implementation of TARGET_SHIFT_TRUNCATION_MASK. */ 249 250 unsigned HOST_WIDE_INT 251 default_shift_truncation_mask (machine_mode mode) 252 { 253 return SHIFT_COUNT_TRUNCATED ? GET_MODE_UNIT_BITSIZE (mode) - 1 : 0; 254 } 255 256 /* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL. */ 257 258 unsigned int 259 default_min_divisions_for_recip_mul (machine_mode mode ATTRIBUTE_UNUSED) 260 { 261 return have_insn_for (DIV, mode) ? 3 : 2; 262 } 263 264 /* The default implementation of TARGET_MODE_REP_EXTENDED. */ 265 266 int 267 default_mode_rep_extended (scalar_int_mode, scalar_int_mode) 268 { 269 return UNKNOWN; 270 } 271 272 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true. */ 273 274 bool 275 hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t a ATTRIBUTE_UNUSED) 276 { 277 return true; 278 } 279 280 /* Return machine mode for non-standard suffix 281 or VOIDmode if non-standard suffixes are unsupported. */ 282 machine_mode 283 default_mode_for_suffix (char suffix ATTRIBUTE_UNUSED) 284 { 285 return VOIDmode; 286 } 287 288 /* The generic C++ ABI specifies this is a 64-bit value. */ 289 tree 290 default_cxx_guard_type (void) 291 { 292 return long_long_integer_type_node; 293 } 294 295 /* Returns the size of the cookie to use when allocating an array 296 whose elements have the indicated TYPE. Assumes that it is already 297 known that a cookie is needed. */ 298 299 tree 300 default_cxx_get_cookie_size (tree type) 301 { 302 tree cookie_size; 303 304 /* We need to allocate an additional max (sizeof (size_t), alignof 305 (true_type)) bytes. */ 306 tree sizetype_size; 307 tree type_align; 308 309 sizetype_size = size_in_bytes (sizetype); 310 type_align = size_int (TYPE_ALIGN_UNIT (type)); 311 if (tree_int_cst_lt (type_align, sizetype_size)) 312 cookie_size = sizetype_size; 313 else 314 cookie_size = type_align; 315 316 return cookie_size; 317 } 318 319 /* Return true if a parameter must be passed by reference. This version 320 of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK. */ 321 322 bool 323 hook_pass_by_reference_must_pass_in_stack (cumulative_args_t, 324 const function_arg_info &arg) 325 { 326 return targetm.calls.must_pass_in_stack (arg); 327 } 328 329 /* Return true if a parameter follows callee copies conventions. This 330 version of the hook is true for all named arguments. */ 331 332 bool 333 hook_callee_copies_named (cumulative_args_t, const function_arg_info &arg) 334 { 335 return arg.named; 336 } 337 338 /* Emit to STREAM the assembler syntax for insn operand X. */ 339 340 void 341 default_print_operand (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED, 342 int code ATTRIBUTE_UNUSED) 343 { 344 #ifdef PRINT_OPERAND 345 PRINT_OPERAND (stream, x, code); 346 #else 347 gcc_unreachable (); 348 #endif 349 } 350 351 /* Emit to STREAM the assembler syntax for an insn operand whose memory 352 address is X. */ 353 354 void 355 default_print_operand_address (FILE *stream ATTRIBUTE_UNUSED, 356 machine_mode /*mode*/, 357 rtx x ATTRIBUTE_UNUSED) 358 { 359 #ifdef PRINT_OPERAND_ADDRESS 360 PRINT_OPERAND_ADDRESS (stream, x); 361 #else 362 gcc_unreachable (); 363 #endif 364 } 365 366 /* Return true if CODE is a valid punctuation character for the 367 `print_operand' hook. */ 368 369 bool 370 default_print_operand_punct_valid_p (unsigned char code ATTRIBUTE_UNUSED) 371 { 372 #ifdef PRINT_OPERAND_PUNCT_VALID_P 373 return PRINT_OPERAND_PUNCT_VALID_P (code); 374 #else 375 return false; 376 #endif 377 } 378 379 /* The default implementation of TARGET_MANGLE_ASSEMBLER_NAME. */ 380 tree 381 default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED) 382 { 383 const char *skipped = name + (*name == '*' ? 1 : 0); 384 const char *stripped = targetm.strip_name_encoding (skipped); 385 if (*name != '*' && user_label_prefix[0]) 386 stripped = ACONCAT ((user_label_prefix, stripped, NULL)); 387 return get_identifier (stripped); 388 } 389 390 /* The default implementation of TARGET_TRANSLATE_MODE_ATTRIBUTE. */ 391 392 machine_mode 393 default_translate_mode_attribute (machine_mode mode) 394 { 395 return mode; 396 } 397 398 /* True if MODE is valid for the target. By "valid", we mean able to 399 be manipulated in non-trivial ways. In particular, this means all 400 the arithmetic is supported. 401 402 By default we guess this means that any C type is supported. If 403 we can't map the mode back to a type that would be available in C, 404 then reject it. Special case, here, is the double-word arithmetic 405 supported by optabs.c. */ 406 407 bool 408 default_scalar_mode_supported_p (scalar_mode mode) 409 { 410 int precision = GET_MODE_PRECISION (mode); 411 412 switch (GET_MODE_CLASS (mode)) 413 { 414 case MODE_PARTIAL_INT: 415 case MODE_INT: 416 if (precision == CHAR_TYPE_SIZE) 417 return true; 418 if (precision == SHORT_TYPE_SIZE) 419 return true; 420 if (precision == INT_TYPE_SIZE) 421 return true; 422 if (precision == LONG_TYPE_SIZE) 423 return true; 424 if (precision == LONG_LONG_TYPE_SIZE) 425 return true; 426 if (precision == 2 * BITS_PER_WORD) 427 return true; 428 return false; 429 430 case MODE_FLOAT: 431 if (precision == FLOAT_TYPE_SIZE) 432 return true; 433 if (precision == DOUBLE_TYPE_SIZE) 434 return true; 435 if (precision == LONG_DOUBLE_TYPE_SIZE) 436 return true; 437 return false; 438 439 case MODE_DECIMAL_FLOAT: 440 case MODE_FRACT: 441 case MODE_UFRACT: 442 case MODE_ACCUM: 443 case MODE_UACCUM: 444 return false; 445 446 default: 447 gcc_unreachable (); 448 } 449 } 450 451 /* Return true if libgcc supports floating-point mode MODE (known to 452 be supported as a scalar mode). */ 453 454 bool 455 default_libgcc_floating_mode_supported_p (scalar_float_mode mode) 456 { 457 switch (mode) 458 { 459 #ifdef HAVE_SFmode 460 case E_SFmode: 461 #endif 462 #ifdef HAVE_DFmode 463 case E_DFmode: 464 #endif 465 #ifdef HAVE_XFmode 466 case E_XFmode: 467 #endif 468 #ifdef HAVE_TFmode 469 case E_TFmode: 470 #endif 471 return true; 472 473 default: 474 return false; 475 } 476 } 477 478 /* Return the machine mode to use for the type _FloatN, if EXTENDED is 479 false, or _FloatNx, if EXTENDED is true, or VOIDmode if not 480 supported. */ 481 opt_scalar_float_mode 482 default_floatn_mode (int n, bool extended) 483 { 484 if (extended) 485 { 486 opt_scalar_float_mode cand1, cand2; 487 scalar_float_mode mode; 488 switch (n) 489 { 490 case 32: 491 #ifdef HAVE_DFmode 492 cand1 = DFmode; 493 #endif 494 break; 495 496 case 64: 497 #ifdef HAVE_XFmode 498 cand1 = XFmode; 499 #endif 500 #ifdef HAVE_TFmode 501 cand2 = TFmode; 502 #endif 503 break; 504 505 case 128: 506 break; 507 508 default: 509 /* Those are the only valid _FloatNx types. */ 510 gcc_unreachable (); 511 } 512 if (cand1.exists (&mode) 513 && REAL_MODE_FORMAT (mode)->ieee_bits > n 514 && targetm.scalar_mode_supported_p (mode) 515 && targetm.libgcc_floating_mode_supported_p (mode)) 516 return cand1; 517 if (cand2.exists (&mode) 518 && REAL_MODE_FORMAT (mode)->ieee_bits > n 519 && targetm.scalar_mode_supported_p (mode) 520 && targetm.libgcc_floating_mode_supported_p (mode)) 521 return cand2; 522 } 523 else 524 { 525 opt_scalar_float_mode cand; 526 scalar_float_mode mode; 527 switch (n) 528 { 529 case 16: 530 /* Always enable _Float16 if we have basic support for the mode. 531 Targets can control the range and precision of operations on 532 the _Float16 type using TARGET_C_EXCESS_PRECISION. */ 533 #ifdef HAVE_HFmode 534 cand = HFmode; 535 #endif 536 break; 537 538 case 32: 539 #ifdef HAVE_SFmode 540 cand = SFmode; 541 #endif 542 break; 543 544 case 64: 545 #ifdef HAVE_DFmode 546 cand = DFmode; 547 #endif 548 break; 549 550 case 128: 551 #ifdef HAVE_TFmode 552 cand = TFmode; 553 #endif 554 break; 555 556 default: 557 break; 558 } 559 if (cand.exists (&mode) 560 && REAL_MODE_FORMAT (mode)->ieee_bits == n 561 && targetm.scalar_mode_supported_p (mode) 562 && targetm.libgcc_floating_mode_supported_p (mode)) 563 return cand; 564 } 565 return opt_scalar_float_mode (); 566 } 567 568 /* Define this to return true if the _Floatn and _Floatnx built-in functions 569 should implicitly enable the built-in function without the __builtin_ prefix 570 in addition to the normal built-in function with the __builtin_ prefix. The 571 default is to only enable built-in functions without the __builtin_ prefix 572 for the GNU C langauge. The argument FUNC is the enum builtin_in_function 573 id of the function to be enabled. */ 574 575 bool 576 default_floatn_builtin_p (int func ATTRIBUTE_UNUSED) 577 { 578 static bool first_time_p = true; 579 static bool c_or_objective_c; 580 581 if (first_time_p) 582 { 583 first_time_p = false; 584 c_or_objective_c = lang_GNU_C () || lang_GNU_OBJC (); 585 } 586 587 return c_or_objective_c; 588 } 589 590 /* Make some target macros useable by target-independent code. */ 591 bool 592 targhook_words_big_endian (void) 593 { 594 return !!WORDS_BIG_ENDIAN; 595 } 596 597 bool 598 targhook_float_words_big_endian (void) 599 { 600 return !!FLOAT_WORDS_BIG_ENDIAN; 601 } 602 603 /* True if the target supports floating-point exceptions and rounding 604 modes. */ 605 606 bool 607 default_float_exceptions_rounding_supported_p (void) 608 { 609 #ifdef HAVE_adddf3 610 return HAVE_adddf3; 611 #else 612 return false; 613 #endif 614 } 615 616 /* True if the target supports decimal floating point. */ 617 618 bool 619 default_decimal_float_supported_p (void) 620 { 621 return ENABLE_DECIMAL_FLOAT; 622 } 623 624 /* True if the target supports fixed-point arithmetic. */ 625 626 bool 627 default_fixed_point_supported_p (void) 628 { 629 return ENABLE_FIXED_POINT; 630 } 631 632 /* True if the target supports GNU indirect functions. */ 633 634 bool 635 default_has_ifunc_p (void) 636 { 637 return HAVE_GNU_INDIRECT_FUNCTION; 638 } 639 640 /* Return true if we predict the loop LOOP will be transformed to a 641 low-overhead loop, otherwise return false. 642 643 By default, false is returned, as this hook's applicability should be 644 verified for each target. Target maintainers should re-define the hook 645 if the target can take advantage of it. */ 646 647 bool 648 default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED) 649 { 650 return false; 651 } 652 653 /* NULL if INSN insn is valid within a low-overhead loop, otherwise returns 654 an error message. 655 656 This function checks whether a given INSN is valid within a low-overhead 657 loop. If INSN is invalid it returns the reason for that, otherwise it 658 returns NULL. A called function may clobber any special registers required 659 for low-overhead looping. Additionally, some targets (eg, PPC) use the count 660 register for branch on table instructions. We reject the doloop pattern in 661 these cases. */ 662 663 const char * 664 default_invalid_within_doloop (const rtx_insn *insn) 665 { 666 if (CALL_P (insn)) 667 return "Function call in loop."; 668 669 if (tablejump_p (insn, NULL, NULL) || computed_jump_p (insn)) 670 return "Computed branch in the loop."; 671 672 return NULL; 673 } 674 675 /* Mapping of builtin functions to vectorized variants. */ 676 677 tree 678 default_builtin_vectorized_function (unsigned int, tree, tree) 679 { 680 return NULL_TREE; 681 } 682 683 /* Mapping of target builtin functions to vectorized variants. */ 684 685 tree 686 default_builtin_md_vectorized_function (tree, tree, tree) 687 { 688 return NULL_TREE; 689 } 690 691 /* Default vectorizer cost model values. */ 692 693 int 694 default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, 695 tree vectype, 696 int misalign ATTRIBUTE_UNUSED) 697 { 698 switch (type_of_cost) 699 { 700 case scalar_stmt: 701 case scalar_load: 702 case scalar_store: 703 case vector_stmt: 704 case vector_load: 705 case vector_store: 706 case vec_to_scalar: 707 case scalar_to_vec: 708 case cond_branch_not_taken: 709 case vec_perm: 710 case vec_promote_demote: 711 return 1; 712 713 case unaligned_load: 714 case unaligned_store: 715 return 2; 716 717 case cond_branch_taken: 718 return 3; 719 720 case vec_construct: 721 return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1; 722 723 default: 724 gcc_unreachable (); 725 } 726 } 727 728 /* Reciprocal. */ 729 730 tree 731 default_builtin_reciprocal (tree) 732 { 733 return NULL_TREE; 734 } 735 736 bool 737 hook_bool_CUMULATIVE_ARGS_arg_info_false (cumulative_args_t, 738 const function_arg_info &) 739 { 740 return false; 741 } 742 743 bool 744 hook_bool_CUMULATIVE_ARGS_arg_info_true (cumulative_args_t, 745 const function_arg_info &) 746 { 747 return true; 748 } 749 750 int 751 hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t, 752 const function_arg_info &) 753 { 754 return 0; 755 } 756 757 void 758 hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED, 759 tree ATTRIBUTE_UNUSED) 760 { 761 } 762 763 void 764 default_function_arg_advance (cumulative_args_t, const function_arg_info &) 765 { 766 gcc_unreachable (); 767 } 768 769 /* Default implementation of TARGET_FUNCTION_ARG_OFFSET. */ 770 771 HOST_WIDE_INT 772 default_function_arg_offset (machine_mode, const_tree) 773 { 774 return 0; 775 } 776 777 /* Default implementation of TARGET_FUNCTION_ARG_PADDING: usually pad 778 upward, but pad short args downward on big-endian machines. */ 779 780 pad_direction 781 default_function_arg_padding (machine_mode mode, const_tree type) 782 { 783 if (!BYTES_BIG_ENDIAN) 784 return PAD_UPWARD; 785 786 unsigned HOST_WIDE_INT size; 787 if (mode == BLKmode) 788 { 789 if (!type || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) 790 return PAD_UPWARD; 791 size = int_size_in_bytes (type); 792 } 793 else 794 /* Targets with variable-sized modes must override this hook 795 and handle variable-sized modes explicitly. */ 796 size = GET_MODE_SIZE (mode).to_constant (); 797 798 if (size < (PARM_BOUNDARY / BITS_PER_UNIT)) 799 return PAD_DOWNWARD; 800 801 return PAD_UPWARD; 802 } 803 804 rtx 805 default_function_arg (cumulative_args_t, const function_arg_info &) 806 { 807 gcc_unreachable (); 808 } 809 810 rtx 811 default_function_incoming_arg (cumulative_args_t, const function_arg_info &) 812 { 813 gcc_unreachable (); 814 } 815 816 unsigned int 817 default_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED, 818 const_tree type ATTRIBUTE_UNUSED) 819 { 820 return PARM_BOUNDARY; 821 } 822 823 unsigned int 824 default_function_arg_round_boundary (machine_mode mode ATTRIBUTE_UNUSED, 825 const_tree type ATTRIBUTE_UNUSED) 826 { 827 return PARM_BOUNDARY; 828 } 829 830 void 831 hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED) 832 { 833 } 834 835 const char * 836 hook_invalid_arg_for_unprototyped_fn ( 837 const_tree typelist ATTRIBUTE_UNUSED, 838 const_tree funcdecl ATTRIBUTE_UNUSED, 839 const_tree val ATTRIBUTE_UNUSED) 840 { 841 return NULL; 842 } 843 844 /* Initialize the stack protection decls. */ 845 846 /* Stack protection related decls living in libgcc. */ 847 static GTY(()) tree stack_chk_guard_decl; 848 849 tree 850 default_stack_protect_guard (void) 851 { 852 tree t = stack_chk_guard_decl; 853 854 if (t == NULL) 855 { 856 rtx x; 857 858 t = build_decl (UNKNOWN_LOCATION, 859 VAR_DECL, get_identifier ("__stack_chk_guard"), 860 ptr_type_node); 861 TREE_STATIC (t) = 1; 862 TREE_PUBLIC (t) = 1; 863 DECL_EXTERNAL (t) = 1; 864 TREE_USED (t) = 1; 865 TREE_THIS_VOLATILE (t) = 1; 866 DECL_ARTIFICIAL (t) = 1; 867 DECL_IGNORED_P (t) = 1; 868 869 /* Do not share RTL as the declaration is visible outside of 870 current function. */ 871 x = DECL_RTL (t); 872 RTX_FLAG (x, used) = 1; 873 874 stack_chk_guard_decl = t; 875 } 876 877 return t; 878 } 879 880 static GTY(()) tree stack_chk_fail_decl; 881 882 tree 883 default_external_stack_protect_fail (void) 884 { 885 tree t = stack_chk_fail_decl; 886 887 if (t == NULL_TREE) 888 { 889 t = build_function_type_list (void_type_node, NULL_TREE); 890 t = build_decl (UNKNOWN_LOCATION, 891 FUNCTION_DECL, get_identifier ("__stack_chk_fail"), t); 892 TREE_STATIC (t) = 1; 893 TREE_PUBLIC (t) = 1; 894 DECL_EXTERNAL (t) = 1; 895 TREE_USED (t) = 1; 896 TREE_THIS_VOLATILE (t) = 1; 897 TREE_NOTHROW (t) = 1; 898 DECL_ARTIFICIAL (t) = 1; 899 DECL_IGNORED_P (t) = 1; 900 DECL_VISIBILITY (t) = VISIBILITY_DEFAULT; 901 DECL_VISIBILITY_SPECIFIED (t) = 1; 902 903 stack_chk_fail_decl = t; 904 } 905 906 return build_call_expr (t, 0); 907 } 908 909 tree 910 default_hidden_stack_protect_fail (void) 911 { 912 #ifndef HAVE_GAS_HIDDEN 913 return default_external_stack_protect_fail (); 914 #else 915 tree t = stack_chk_fail_decl; 916 917 if (!flag_pic) 918 return default_external_stack_protect_fail (); 919 920 if (t == NULL_TREE) 921 { 922 t = build_function_type_list (void_type_node, NULL_TREE); 923 t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL, 924 get_identifier ("__stack_chk_fail_local"), t); 925 TREE_STATIC (t) = 1; 926 TREE_PUBLIC (t) = 1; 927 DECL_EXTERNAL (t) = 1; 928 TREE_USED (t) = 1; 929 TREE_THIS_VOLATILE (t) = 1; 930 TREE_NOTHROW (t) = 1; 931 DECL_ARTIFICIAL (t) = 1; 932 DECL_IGNORED_P (t) = 1; 933 DECL_VISIBILITY_SPECIFIED (t) = 1; 934 #if 1 935 /* 936 * This is a hack: 937 * It appears that our gas does not generate @PLT for hidden 938 * symbols. It could be that we need a newer version, or that 939 * this local function is handled differently on linux. 940 */ 941 DECL_VISIBILITY (t) = VISIBILITY_DEFAULT; 942 #else 943 DECL_VISIBILITY (t) = VISIBILITY_HIDDEN; 944 #endif 945 946 stack_chk_fail_decl = t; 947 } 948 949 return build_call_expr (t, 0); 950 #endif 951 } 952 953 bool 954 hook_bool_const_rtx_commutative_p (const_rtx x, 955 int outer_code ATTRIBUTE_UNUSED) 956 { 957 return COMMUTATIVE_P (x); 958 } 959 960 rtx 961 default_function_value (const_tree ret_type ATTRIBUTE_UNUSED, 962 const_tree fn_decl_or_type, 963 bool outgoing ATTRIBUTE_UNUSED) 964 { 965 /* The old interface doesn't handle receiving the function type. */ 966 if (fn_decl_or_type 967 && !DECL_P (fn_decl_or_type)) 968 fn_decl_or_type = NULL; 969 970 #ifdef FUNCTION_VALUE 971 return FUNCTION_VALUE (ret_type, fn_decl_or_type); 972 #else 973 gcc_unreachable (); 974 #endif 975 } 976 977 rtx 978 default_libcall_value (machine_mode mode ATTRIBUTE_UNUSED, 979 const_rtx fun ATTRIBUTE_UNUSED) 980 { 981 #ifdef LIBCALL_VALUE 982 return LIBCALL_VALUE (MACRO_MODE (mode)); 983 #else 984 gcc_unreachable (); 985 #endif 986 } 987 988 /* The default hook for TARGET_FUNCTION_VALUE_REGNO_P. */ 989 990 bool 991 default_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED) 992 { 993 #ifdef FUNCTION_VALUE_REGNO_P 994 return FUNCTION_VALUE_REGNO_P (regno); 995 #else 996 gcc_unreachable (); 997 #endif 998 } 999 1000 rtx 1001 default_internal_arg_pointer (void) 1002 { 1003 /* If the reg that the virtual arg pointer will be translated into is 1004 not a fixed reg or is the stack pointer, make a copy of the virtual 1005 arg pointer, and address parms via the copy. The frame pointer is 1006 considered fixed even though it is not marked as such. */ 1007 if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM 1008 || ! (fixed_regs[ARG_POINTER_REGNUM] 1009 || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM))) 1010 return copy_to_reg (virtual_incoming_args_rtx); 1011 else 1012 return virtual_incoming_args_rtx; 1013 } 1014 1015 rtx 1016 default_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p) 1017 { 1018 if (incoming_p) 1019 { 1020 #ifdef STATIC_CHAIN_INCOMING_REGNUM 1021 return gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM); 1022 #endif 1023 } 1024 1025 #ifdef STATIC_CHAIN_REGNUM 1026 return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM); 1027 #endif 1028 1029 { 1030 static bool issued_error; 1031 if (!issued_error) 1032 { 1033 issued_error = true; 1034 sorry ("nested functions not supported on this target"); 1035 } 1036 1037 /* It really doesn't matter what we return here, so long at it 1038 doesn't cause the rest of the compiler to crash. */ 1039 return gen_rtx_MEM (Pmode, stack_pointer_rtx); 1040 } 1041 } 1042 1043 void 1044 default_trampoline_init (rtx ARG_UNUSED (m_tramp), tree ARG_UNUSED (t_func), 1045 rtx ARG_UNUSED (r_chain)) 1046 { 1047 sorry ("nested function trampolines not supported on this target"); 1048 } 1049 1050 poly_int64 1051 default_return_pops_args (tree, tree, poly_int64) 1052 { 1053 return 0; 1054 } 1055 1056 reg_class_t 1057 default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED, 1058 reg_class_t cl, 1059 reg_class_t best_cl ATTRIBUTE_UNUSED) 1060 { 1061 return cl; 1062 } 1063 1064 extern bool 1065 default_lra_p (void) 1066 { 1067 return true; 1068 } 1069 1070 int 1071 default_register_priority (int hard_regno ATTRIBUTE_UNUSED) 1072 { 1073 return 0; 1074 } 1075 1076 extern bool 1077 default_register_usage_leveling_p (void) 1078 { 1079 return false; 1080 } 1081 1082 extern bool 1083 default_different_addr_displacement_p (void) 1084 { 1085 return false; 1086 } 1087 1088 reg_class_t 1089 default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED, 1090 reg_class_t reload_class_i ATTRIBUTE_UNUSED, 1091 machine_mode reload_mode ATTRIBUTE_UNUSED, 1092 secondary_reload_info *sri) 1093 { 1094 enum reg_class rclass = NO_REGS; 1095 enum reg_class reload_class = (enum reg_class) reload_class_i; 1096 1097 if (sri->prev_sri && sri->prev_sri->t_icode != CODE_FOR_nothing) 1098 { 1099 sri->icode = sri->prev_sri->t_icode; 1100 return NO_REGS; 1101 } 1102 #ifdef SECONDARY_INPUT_RELOAD_CLASS 1103 if (in_p) 1104 rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class, 1105 MACRO_MODE (reload_mode), x); 1106 #endif 1107 #ifdef SECONDARY_OUTPUT_RELOAD_CLASS 1108 if (! in_p) 1109 rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class, 1110 MACRO_MODE (reload_mode), x); 1111 #endif 1112 if (rclass != NO_REGS) 1113 { 1114 enum insn_code icode 1115 = direct_optab_handler (in_p ? reload_in_optab : reload_out_optab, 1116 reload_mode); 1117 1118 if (icode != CODE_FOR_nothing 1119 && !insn_operand_matches (icode, in_p, x)) 1120 icode = CODE_FOR_nothing; 1121 else if (icode != CODE_FOR_nothing) 1122 { 1123 const char *insn_constraint, *scratch_constraint; 1124 enum reg_class insn_class, scratch_class; 1125 1126 gcc_assert (insn_data[(int) icode].n_operands == 3); 1127 insn_constraint = insn_data[(int) icode].operand[!in_p].constraint; 1128 if (!*insn_constraint) 1129 insn_class = ALL_REGS; 1130 else 1131 { 1132 if (in_p) 1133 { 1134 gcc_assert (*insn_constraint == '='); 1135 insn_constraint++; 1136 } 1137 insn_class = (reg_class_for_constraint 1138 (lookup_constraint (insn_constraint))); 1139 gcc_assert (insn_class != NO_REGS); 1140 } 1141 1142 scratch_constraint = insn_data[(int) icode].operand[2].constraint; 1143 /* The scratch register's constraint must start with "=&", 1144 except for an input reload, where only "=" is necessary, 1145 and where it might be beneficial to re-use registers from 1146 the input. */ 1147 gcc_assert (scratch_constraint[0] == '=' 1148 && (in_p || scratch_constraint[1] == '&')); 1149 scratch_constraint++; 1150 if (*scratch_constraint == '&') 1151 scratch_constraint++; 1152 scratch_class = (reg_class_for_constraint 1153 (lookup_constraint (scratch_constraint))); 1154 1155 if (reg_class_subset_p (reload_class, insn_class)) 1156 { 1157 gcc_assert (scratch_class == rclass); 1158 rclass = NO_REGS; 1159 } 1160 else 1161 rclass = insn_class; 1162 1163 } 1164 if (rclass == NO_REGS) 1165 sri->icode = icode; 1166 else 1167 sri->t_icode = icode; 1168 } 1169 return rclass; 1170 } 1171 1172 /* The default implementation of TARGET_SECONDARY_MEMORY_NEEDED_MODE. */ 1173 1174 machine_mode 1175 default_secondary_memory_needed_mode (machine_mode mode) 1176 { 1177 if (!targetm.lra_p () 1178 && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD) 1179 && INTEGRAL_MODE_P (mode)) 1180 return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require (); 1181 return mode; 1182 } 1183 1184 /* By default, if flag_pic is true, then neither local nor global relocs 1185 should be placed in readonly memory. */ 1186 1187 int 1188 default_reloc_rw_mask (void) 1189 { 1190 return flag_pic ? 3 : 0; 1191 } 1192 1193 /* By default, address diff vectors are generated 1194 for jump tables when flag_pic is true. */ 1195 1196 bool 1197 default_generate_pic_addr_diff_vec (void) 1198 { 1199 return flag_pic; 1200 } 1201 1202 /* By default, do no modification. */ 1203 tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED, 1204 tree id) 1205 { 1206 return id; 1207 } 1208 1209 /* The default implementation of TARGET_STATIC_RTX_ALIGNMENT. */ 1210 1211 HOST_WIDE_INT 1212 default_static_rtx_alignment (machine_mode mode) 1213 { 1214 return GET_MODE_ALIGNMENT (mode); 1215 } 1216 1217 /* The default implementation of TARGET_CONSTANT_ALIGNMENT. */ 1218 1219 HOST_WIDE_INT 1220 default_constant_alignment (const_tree, HOST_WIDE_INT align) 1221 { 1222 return align; 1223 } 1224 1225 /* An implementation of TARGET_CONSTANT_ALIGNMENT that aligns strings 1226 to at least BITS_PER_WORD but otherwise makes no changes. */ 1227 1228 HOST_WIDE_INT 1229 constant_alignment_word_strings (const_tree exp, HOST_WIDE_INT align) 1230 { 1231 if (TREE_CODE (exp) == STRING_CST) 1232 return MAX (align, BITS_PER_WORD); 1233 return align; 1234 } 1235 1236 /* Default to natural alignment for vector types, bounded by 1237 MAX_OFILE_ALIGNMENT. */ 1238 1239 HOST_WIDE_INT 1240 default_vector_alignment (const_tree type) 1241 { 1242 unsigned HOST_WIDE_INT align = MAX_OFILE_ALIGNMENT; 1243 tree size = TYPE_SIZE (type); 1244 if (tree_fits_uhwi_p (size)) 1245 align = tree_to_uhwi (size); 1246 1247 return align < MAX_OFILE_ALIGNMENT ? align : MAX_OFILE_ALIGNMENT; 1248 } 1249 1250 /* The default implementation of 1251 TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT. */ 1252 1253 poly_uint64 1254 default_preferred_vector_alignment (const_tree type) 1255 { 1256 return TYPE_ALIGN (type); 1257 } 1258 1259 /* By default assume vectors of element TYPE require a multiple of the natural 1260 alignment of TYPE. TYPE is naturally aligned if IS_PACKED is false. */ 1261 bool 1262 default_builtin_vector_alignment_reachable (const_tree /*type*/, bool is_packed) 1263 { 1264 return ! is_packed; 1265 } 1266 1267 /* By default, assume that a target supports any factor of misalignment 1268 memory access if it supports movmisalign patten. 1269 is_packed is true if the memory access is defined in a packed struct. */ 1270 bool 1271 default_builtin_support_vector_misalignment (machine_mode mode, 1272 const_tree type 1273 ATTRIBUTE_UNUSED, 1274 int misalignment 1275 ATTRIBUTE_UNUSED, 1276 bool is_packed 1277 ATTRIBUTE_UNUSED) 1278 { 1279 if (optab_handler (movmisalign_optab, mode) != CODE_FOR_nothing) 1280 return true; 1281 return false; 1282 } 1283 1284 /* By default, only attempt to parallelize bitwise operations, and 1285 possibly adds/subtracts using bit-twiddling. */ 1286 1287 machine_mode 1288 default_preferred_simd_mode (scalar_mode) 1289 { 1290 return word_mode; 1291 } 1292 1293 /* By default do not split reductions further. */ 1294 1295 machine_mode 1296 default_split_reduction (machine_mode mode) 1297 { 1298 return mode; 1299 } 1300 1301 /* By default only the preferred vector mode is tried. */ 1302 1303 unsigned int 1304 default_autovectorize_vector_modes (vector_modes *, bool) 1305 { 1306 return 0; 1307 } 1308 1309 /* The default implementation of TARGET_VECTORIZE_RELATED_MODE. */ 1310 1311 opt_machine_mode 1312 default_vectorize_related_mode (machine_mode vector_mode, 1313 scalar_mode element_mode, 1314 poly_uint64 nunits) 1315 { 1316 machine_mode result_mode; 1317 if ((maybe_ne (nunits, 0U) 1318 || multiple_p (GET_MODE_SIZE (vector_mode), 1319 GET_MODE_SIZE (element_mode), &nunits)) 1320 && mode_for_vector (element_mode, nunits).exists (&result_mode) 1321 && VECTOR_MODE_P (result_mode) 1322 && targetm.vector_mode_supported_p (result_mode)) 1323 return result_mode; 1324 1325 return opt_machine_mode (); 1326 } 1327 1328 /* By default a vector of integers is used as a mask. */ 1329 1330 opt_machine_mode 1331 default_get_mask_mode (machine_mode mode) 1332 { 1333 return related_int_vector_mode (mode); 1334 } 1335 1336 /* By default consider masked stores to be expensive. */ 1337 1338 bool 1339 default_empty_mask_is_expensive (unsigned ifn) 1340 { 1341 return ifn == IFN_MASK_STORE; 1342 } 1343 1344 /* By default, the cost model accumulates three separate costs (prologue, 1345 loop body, and epilogue) for a vectorized loop or block. So allocate an 1346 array of three unsigned ints, set it to zero, and return its address. */ 1347 1348 void * 1349 default_init_cost (class loop *loop_info ATTRIBUTE_UNUSED) 1350 { 1351 unsigned *cost = XNEWVEC (unsigned, 3); 1352 cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0; 1353 return cost; 1354 } 1355 1356 /* By default, the cost model looks up the cost of the given statement 1357 kind and mode, multiplies it by the occurrence count, accumulates 1358 it into the cost specified by WHERE, and returns the cost added. */ 1359 1360 unsigned 1361 default_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, 1362 class _stmt_vec_info *stmt_info, int misalign, 1363 enum vect_cost_model_location where) 1364 { 1365 unsigned *cost = (unsigned *) data; 1366 unsigned retval = 0; 1367 1368 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; 1369 int stmt_cost = targetm.vectorize.builtin_vectorization_cost (kind, vectype, 1370 misalign); 1371 /* Statements in an inner loop relative to the loop being 1372 vectorized are weighted more heavily. The value here is 1373 arbitrary and could potentially be improved with analysis. */ 1374 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) 1375 count *= 50; /* FIXME. */ 1376 1377 retval = (unsigned) (count * stmt_cost); 1378 cost[where] += retval; 1379 1380 return retval; 1381 } 1382 1383 /* By default, the cost model just returns the accumulated costs. */ 1384 1385 void 1386 default_finish_cost (void *data, unsigned *prologue_cost, 1387 unsigned *body_cost, unsigned *epilogue_cost) 1388 { 1389 unsigned *cost = (unsigned *) data; 1390 *prologue_cost = cost[vect_prologue]; 1391 *body_cost = cost[vect_body]; 1392 *epilogue_cost = cost[vect_epilogue]; 1393 } 1394 1395 /* Free the cost data. */ 1396 1397 void 1398 default_destroy_cost_data (void *data) 1399 { 1400 free (data); 1401 } 1402 1403 /* Determine whether or not a pointer mode is valid. Assume defaults 1404 of ptr_mode or Pmode - can be overridden. */ 1405 bool 1406 default_valid_pointer_mode (scalar_int_mode mode) 1407 { 1408 return (mode == ptr_mode || mode == Pmode); 1409 } 1410 1411 /* Determine whether the memory reference specified by REF may alias 1412 the C libraries errno location. */ 1413 bool 1414 default_ref_may_alias_errno (ao_ref *ref) 1415 { 1416 tree base = ao_ref_base (ref); 1417 /* The default implementation assumes the errno location is 1418 a declaration of type int or is always accessed via a 1419 pointer to int. We assume that accesses to errno are 1420 not deliberately obfuscated (even in conforming ways). */ 1421 if (TYPE_UNSIGNED (TREE_TYPE (base)) 1422 || TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node)) 1423 return false; 1424 /* The default implementation assumes an errno location declaration 1425 is never defined in the current compilation unit and may not be 1426 aliased by a local variable. */ 1427 if (DECL_P (base) 1428 && DECL_EXTERNAL (base) 1429 && !TREE_STATIC (base)) 1430 return true; 1431 else if (TREE_CODE (base) == MEM_REF 1432 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) 1433 { 1434 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0)); 1435 return !pi || pi->pt.anything || pi->pt.nonlocal; 1436 } 1437 return false; 1438 } 1439 1440 /* Return the mode for a pointer to a given ADDRSPACE, 1441 defaulting to ptr_mode for all address spaces. */ 1442 1443 scalar_int_mode 1444 default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED) 1445 { 1446 return ptr_mode; 1447 } 1448 1449 /* Return the mode for an address in a given ADDRSPACE, 1450 defaulting to Pmode for all address spaces. */ 1451 1452 scalar_int_mode 1453 default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED) 1454 { 1455 return Pmode; 1456 } 1457 1458 /* Named address space version of valid_pointer_mode. 1459 To match the above, the same modes apply to all address spaces. */ 1460 1461 bool 1462 default_addr_space_valid_pointer_mode (scalar_int_mode mode, 1463 addr_space_t as ATTRIBUTE_UNUSED) 1464 { 1465 return targetm.valid_pointer_mode (mode); 1466 } 1467 1468 /* Some places still assume that all pointer or address modes are the 1469 standard Pmode and ptr_mode. These optimizations become invalid if 1470 the target actually supports multiple different modes. For now, 1471 we disable such optimizations on such targets, using this function. */ 1472 1473 bool 1474 target_default_pointer_address_modes_p (void) 1475 { 1476 if (targetm.addr_space.address_mode != default_addr_space_address_mode) 1477 return false; 1478 if (targetm.addr_space.pointer_mode != default_addr_space_pointer_mode) 1479 return false; 1480 1481 return true; 1482 } 1483 1484 /* Named address space version of legitimate_address_p. 1485 By default, all address spaces have the same form. */ 1486 1487 bool 1488 default_addr_space_legitimate_address_p (machine_mode mode, rtx mem, 1489 bool strict, 1490 addr_space_t as ATTRIBUTE_UNUSED) 1491 { 1492 return targetm.legitimate_address_p (mode, mem, strict); 1493 } 1494 1495 /* Named address space version of LEGITIMIZE_ADDRESS. 1496 By default, all address spaces have the same form. */ 1497 1498 rtx 1499 default_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode, 1500 addr_space_t as ATTRIBUTE_UNUSED) 1501 { 1502 return targetm.legitimize_address (x, oldx, mode); 1503 } 1504 1505 /* The default hook for determining if one named address space is a subset of 1506 another and to return which address space to use as the common address 1507 space. */ 1508 1509 bool 1510 default_addr_space_subset_p (addr_space_t subset, addr_space_t superset) 1511 { 1512 return (subset == superset); 1513 } 1514 1515 /* The default hook for determining if 0 within a named address 1516 space is a valid address. */ 1517 1518 bool 1519 default_addr_space_zero_address_valid (addr_space_t as ATTRIBUTE_UNUSED) 1520 { 1521 return false; 1522 } 1523 1524 /* The default hook for debugging the address space is to return the 1525 address space number to indicate DW_AT_address_class. */ 1526 int 1527 default_addr_space_debug (addr_space_t as) 1528 { 1529 return as; 1530 } 1531 1532 /* The default hook implementation for TARGET_ADDR_SPACE_DIAGNOSE_USAGE. 1533 Don't complain about any address space. */ 1534 1535 void 1536 default_addr_space_diagnose_usage (addr_space_t, location_t) 1537 { 1538 } 1539 1540 1541 /* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be 1542 called for targets with only a generic address space. */ 1543 1544 rtx 1545 default_addr_space_convert (rtx op ATTRIBUTE_UNUSED, 1546 tree from_type ATTRIBUTE_UNUSED, 1547 tree to_type ATTRIBUTE_UNUSED) 1548 { 1549 gcc_unreachable (); 1550 } 1551 1552 /* The defualt implementation of TARGET_HARD_REGNO_NREGS. */ 1553 1554 unsigned int 1555 default_hard_regno_nregs (unsigned int, machine_mode mode) 1556 { 1557 /* Targets with variable-sized modes must provide their own definition 1558 of this hook. */ 1559 return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD); 1560 } 1561 1562 bool 1563 default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED) 1564 { 1565 return true; 1566 } 1567 1568 /* The default implementation of TARGET_MODE_DEPENDENT_ADDRESS_P. */ 1569 1570 bool 1571 default_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED, 1572 addr_space_t addrspace ATTRIBUTE_UNUSED) 1573 { 1574 return false; 1575 } 1576 1577 bool 1578 default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl), 1579 tree ARG_UNUSED (name), 1580 tree ARG_UNUSED (args), 1581 int ARG_UNUSED (flags)) 1582 { 1583 warning (OPT_Wattributes, 1584 "target attribute is not supported on this machine"); 1585 1586 return false; 1587 } 1588 1589 bool 1590 default_target_option_pragma_parse (tree ARG_UNUSED (args), 1591 tree ARG_UNUSED (pop_target)) 1592 { 1593 /* If args is NULL the caller is handle_pragma_pop_options (). In that case, 1594 emit no warning because "#pragma GCC pop_target" is valid on targets that 1595 do not have the "target" pragma. */ 1596 if (args) 1597 warning (OPT_Wpragmas, 1598 "%<#pragma GCC target%> is not supported for this machine"); 1599 1600 return false; 1601 } 1602 1603 bool 1604 default_target_can_inline_p (tree caller, tree callee) 1605 { 1606 tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee); 1607 tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller); 1608 if (! callee_opts) 1609 callee_opts = target_option_default_node; 1610 if (! caller_opts) 1611 caller_opts = target_option_default_node; 1612 1613 /* If both caller and callee have attributes, assume that if the 1614 pointer is different, the two functions have different target 1615 options since build_target_option_node uses a hash table for the 1616 options. */ 1617 return callee_opts == caller_opts; 1618 } 1619 1620 /* If the machine does not have a case insn that compares the bounds, 1621 this means extra overhead for dispatch tables, which raises the 1622 threshold for using them. */ 1623 1624 unsigned int 1625 default_case_values_threshold (void) 1626 { 1627 return (targetm.have_casesi () ? 4 : 5); 1628 } 1629 1630 bool 1631 default_have_conditional_execution (void) 1632 { 1633 return HAVE_conditional_execution; 1634 } 1635 1636 /* By default we assume that c99 functions are present at the runtime, 1637 but sincos is not. */ 1638 bool 1639 default_libc_has_function (enum function_class fn_class) 1640 { 1641 if (fn_class == function_c94 1642 || fn_class == function_c99_misc 1643 || fn_class == function_c99_math_complex) 1644 return true; 1645 1646 return false; 1647 } 1648 1649 /* By default assume that libc has not a fast implementation. */ 1650 1651 bool 1652 default_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED) 1653 { 1654 return false; 1655 } 1656 1657 bool 1658 gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED) 1659 { 1660 return true; 1661 } 1662 1663 bool 1664 no_c99_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED) 1665 { 1666 return false; 1667 } 1668 1669 tree 1670 default_builtin_tm_load_store (tree ARG_UNUSED (type)) 1671 { 1672 return NULL_TREE; 1673 } 1674 1675 /* Compute cost of moving registers to/from memory. */ 1676 1677 int 1678 default_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED, 1679 reg_class_t rclass ATTRIBUTE_UNUSED, 1680 bool in ATTRIBUTE_UNUSED) 1681 { 1682 #ifndef MEMORY_MOVE_COST 1683 return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in)); 1684 #else 1685 return MEMORY_MOVE_COST (MACRO_MODE (mode), (enum reg_class) rclass, in); 1686 #endif 1687 } 1688 1689 /* Compute cost of moving data from a register of class FROM to one of 1690 TO, using MODE. */ 1691 1692 int 1693 default_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED, 1694 reg_class_t from ATTRIBUTE_UNUSED, 1695 reg_class_t to ATTRIBUTE_UNUSED) 1696 { 1697 #ifndef REGISTER_MOVE_COST 1698 return 2; 1699 #else 1700 return REGISTER_MOVE_COST (MACRO_MODE (mode), 1701 (enum reg_class) from, (enum reg_class) to); 1702 #endif 1703 } 1704 1705 /* The default implementation of TARGET_SLOW_UNALIGNED_ACCESS. */ 1706 1707 bool 1708 default_slow_unaligned_access (machine_mode, unsigned int) 1709 { 1710 return STRICT_ALIGNMENT; 1711 } 1712 1713 /* The default implementation of TARGET_ESTIMATED_POLY_VALUE. */ 1714 1715 HOST_WIDE_INT 1716 default_estimated_poly_value (poly_int64 x) 1717 { 1718 return x.coeffs[0]; 1719 } 1720 1721 /* For hooks which use the MOVE_RATIO macro, this gives the legacy default 1722 behavior. SPEED_P is true if we are compiling for speed. */ 1723 1724 unsigned int 1725 get_move_ratio (bool speed_p ATTRIBUTE_UNUSED) 1726 { 1727 unsigned int move_ratio; 1728 #ifdef MOVE_RATIO 1729 move_ratio = (unsigned int) MOVE_RATIO (speed_p); 1730 #else 1731 #if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti) 1732 move_ratio = 2; 1733 #else /* No cpymem patterns, pick a default. */ 1734 move_ratio = ((speed_p) ? 15 : 3); 1735 #endif 1736 #endif 1737 return move_ratio; 1738 } 1739 1740 /* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be 1741 used; return FALSE if the cpymem/setmem optab should be expanded, or 1742 a call to memcpy emitted. */ 1743 1744 bool 1745 default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size, 1746 unsigned int alignment, 1747 enum by_pieces_operation op, 1748 bool speed_p) 1749 { 1750 unsigned int max_size = 0; 1751 unsigned int ratio = 0; 1752 1753 switch (op) 1754 { 1755 case CLEAR_BY_PIECES: 1756 max_size = STORE_MAX_PIECES; 1757 ratio = CLEAR_RATIO (speed_p); 1758 break; 1759 case MOVE_BY_PIECES: 1760 max_size = MOVE_MAX_PIECES; 1761 ratio = get_move_ratio (speed_p); 1762 break; 1763 case SET_BY_PIECES: 1764 max_size = STORE_MAX_PIECES; 1765 ratio = SET_RATIO (speed_p); 1766 break; 1767 case STORE_BY_PIECES: 1768 max_size = STORE_MAX_PIECES; 1769 ratio = get_move_ratio (speed_p); 1770 break; 1771 case COMPARE_BY_PIECES: 1772 max_size = COMPARE_MAX_PIECES; 1773 /* Pick a likely default, just as in get_move_ratio. */ 1774 ratio = speed_p ? 15 : 3; 1775 break; 1776 } 1777 1778 return by_pieces_ninsns (size, alignment, max_size + 1, op) < ratio; 1779 } 1780 1781 /* This hook controls code generation for expanding a memcmp operation by 1782 pieces. Return 1 for the normal pattern of compare/jump after each pair 1783 of loads, or a higher number to reduce the number of branches. */ 1784 1785 int 1786 default_compare_by_pieces_branch_ratio (machine_mode) 1787 { 1788 return 1; 1789 } 1790 1791 /* Write PATCH_AREA_SIZE NOPs into the asm outfile FILE around a function 1792 entry. If RECORD_P is true and the target supports named sections, 1793 the location of the NOPs will be recorded in a special object section 1794 called "__patchable_function_entries". This routine may be called 1795 twice per function to put NOPs before and after the function 1796 entry. */ 1797 1798 void 1799 default_print_patchable_function_entry (FILE *file, 1800 unsigned HOST_WIDE_INT patch_area_size, 1801 bool record_p) 1802 { 1803 const char *nop_templ = 0; 1804 int code_num; 1805 rtx_insn *my_nop = make_insn_raw (gen_nop ()); 1806 1807 /* We use the template alone, relying on the (currently sane) assumption 1808 that the NOP template does not have variable operands. */ 1809 code_num = recog_memoized (my_nop); 1810 nop_templ = get_insn_template (code_num, my_nop); 1811 1812 if (record_p && targetm_common.have_named_sections) 1813 { 1814 char buf[256]; 1815 static int patch_area_number; 1816 section *previous_section = in_section; 1817 const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false); 1818 1819 gcc_assert (asm_op != NULL); 1820 patch_area_number++; 1821 ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE", patch_area_number); 1822 1823 switch_to_section (get_section ("__patchable_function_entries", 1824 SECTION_WRITE | SECTION_RELRO, NULL)); 1825 assemble_align (POINTER_SIZE); 1826 fputs (asm_op, file); 1827 assemble_name_raw (file, buf); 1828 fputc ('\n', file); 1829 1830 switch_to_section (previous_section); 1831 ASM_OUTPUT_LABEL (file, buf); 1832 } 1833 1834 unsigned i; 1835 for (i = 0; i < patch_area_size; ++i) 1836 output_asm_insn (nop_templ, NULL); 1837 } 1838 1839 bool 1840 default_profile_before_prologue (void) 1841 { 1842 #ifdef PROFILE_BEFORE_PROLOGUE 1843 return true; 1844 #else 1845 return false; 1846 #endif 1847 } 1848 1849 /* The default implementation of TARGET_PREFERRED_RELOAD_CLASS. */ 1850 1851 reg_class_t 1852 default_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, 1853 reg_class_t rclass) 1854 { 1855 #ifdef PREFERRED_RELOAD_CLASS 1856 return (reg_class_t) PREFERRED_RELOAD_CLASS (x, (enum reg_class) rclass); 1857 #else 1858 return rclass; 1859 #endif 1860 } 1861 1862 /* The default implementation of TARGET_OUTPUT_PREFERRED_RELOAD_CLASS. */ 1863 1864 reg_class_t 1865 default_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED, 1866 reg_class_t rclass) 1867 { 1868 return rclass; 1869 } 1870 1871 /* The default implementation of TARGET_PREFERRED_RENAME_CLASS. */ 1872 reg_class_t 1873 default_preferred_rename_class (reg_class_t rclass ATTRIBUTE_UNUSED) 1874 { 1875 return NO_REGS; 1876 } 1877 1878 /* The default implementation of TARGET_CLASS_LIKELY_SPILLED_P. */ 1879 1880 bool 1881 default_class_likely_spilled_p (reg_class_t rclass) 1882 { 1883 return (reg_class_size[(int) rclass] == 1); 1884 } 1885 1886 /* The default implementation of TARGET_CLASS_MAX_NREGS. */ 1887 1888 unsigned char 1889 default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED, 1890 machine_mode mode ATTRIBUTE_UNUSED) 1891 { 1892 #ifdef CLASS_MAX_NREGS 1893 return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass, 1894 MACRO_MODE (mode)); 1895 #else 1896 /* Targets with variable-sized modes must provide their own definition 1897 of this hook. */ 1898 unsigned int size = GET_MODE_SIZE (mode).to_constant (); 1899 return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; 1900 #endif 1901 } 1902 1903 /* Determine the debugging unwind mechanism for the target. */ 1904 1905 enum unwind_info_type 1906 default_debug_unwind_info (void) 1907 { 1908 /* If the target wants to force the use of dwarf2 unwind info, let it. */ 1909 /* ??? Change all users to the hook, then poison this. */ 1910 #ifdef DWARF2_FRAME_INFO 1911 if (DWARF2_FRAME_INFO) 1912 return UI_DWARF2; 1913 #endif 1914 1915 /* Otherwise, only turn it on if dwarf2 debugging is enabled. */ 1916 #ifdef DWARF2_DEBUGGING_INFO 1917 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG) 1918 return UI_DWARF2; 1919 #endif 1920 1921 return UI_NONE; 1922 } 1923 1924 /* Targets that set NUM_POLY_INT_COEFFS to something greater than 1 1925 must define this hook. */ 1926 1927 unsigned int 1928 default_dwarf_poly_indeterminate_value (unsigned int, unsigned int *, int *) 1929 { 1930 gcc_unreachable (); 1931 } 1932 1933 /* Determine the correct mode for a Dwarf frame register that represents 1934 register REGNO. */ 1935 1936 machine_mode 1937 default_dwarf_frame_reg_mode (int regno) 1938 { 1939 machine_mode save_mode = reg_raw_mode[regno]; 1940 1941 if (targetm.hard_regno_call_part_clobbered (eh_edge_abi.id (), 1942 regno, save_mode)) 1943 save_mode = choose_hard_reg_mode (regno, 1, &eh_edge_abi); 1944 return save_mode; 1945 } 1946 1947 /* To be used by targets where reg_raw_mode doesn't return the right 1948 mode for registers used in apply_builtin_return and apply_builtin_arg. */ 1949 1950 fixed_size_mode 1951 default_get_reg_raw_mode (int regno) 1952 { 1953 /* Targets must override this hook if the underlying register is 1954 variable-sized. */ 1955 return as_a <fixed_size_mode> (reg_raw_mode[regno]); 1956 } 1957 1958 /* Return true if a leaf function should stay leaf even with profiling 1959 enabled. */ 1960 1961 bool 1962 default_keep_leaf_when_profiled () 1963 { 1964 return false; 1965 } 1966 1967 /* Return true if the state of option OPTION should be stored in PCH files 1968 and checked by default_pch_valid_p. Store the option's current state 1969 in STATE if so. */ 1970 1971 static inline bool 1972 option_affects_pch_p (int option, struct cl_option_state *state) 1973 { 1974 if ((cl_options[option].flags & CL_TARGET) == 0) 1975 return false; 1976 if ((cl_options[option].flags & CL_PCH_IGNORE) != 0) 1977 return false; 1978 if (option_flag_var (option, &global_options) == &target_flags) 1979 if (targetm.check_pch_target_flags) 1980 return false; 1981 return get_option_state (&global_options, option, state); 1982 } 1983 1984 /* Default version of get_pch_validity. 1985 By default, every flag difference is fatal; that will be mostly right for 1986 most targets, but completely right for very few. */ 1987 1988 void * 1989 default_get_pch_validity (size_t *sz) 1990 { 1991 struct cl_option_state state; 1992 size_t i; 1993 char *result, *r; 1994 1995 *sz = 2; 1996 if (targetm.check_pch_target_flags) 1997 *sz += sizeof (target_flags); 1998 for (i = 0; i < cl_options_count; i++) 1999 if (option_affects_pch_p (i, &state)) 2000 *sz += state.size; 2001 2002 result = r = XNEWVEC (char, *sz); 2003 r[0] = flag_pic; 2004 r[1] = flag_pie; 2005 r += 2; 2006 if (targetm.check_pch_target_flags) 2007 { 2008 memcpy (r, &target_flags, sizeof (target_flags)); 2009 r += sizeof (target_flags); 2010 } 2011 2012 for (i = 0; i < cl_options_count; i++) 2013 if (option_affects_pch_p (i, &state)) 2014 { 2015 memcpy (r, state.data, state.size); 2016 r += state.size; 2017 } 2018 2019 return result; 2020 } 2021 2022 /* Return a message which says that a PCH file was created with a different 2023 setting of OPTION. */ 2024 2025 static const char * 2026 pch_option_mismatch (const char *option) 2027 { 2028 return xasprintf (_("created and used with differing settings of '%s'"), 2029 option); 2030 } 2031 2032 /* Default version of pch_valid_p. */ 2033 2034 const char * 2035 default_pch_valid_p (const void *data_p, size_t len) 2036 { 2037 struct cl_option_state state; 2038 const char *data = (const char *)data_p; 2039 size_t i; 2040 2041 /* -fpic and -fpie also usually make a PCH invalid. */ 2042 if (data[0] != flag_pic) 2043 return _("created and used with different settings of %<-fpic%>"); 2044 if (data[1] != flag_pie) 2045 return _("created and used with different settings of %<-fpie%>"); 2046 data += 2; 2047 2048 /* Check target_flags. */ 2049 if (targetm.check_pch_target_flags) 2050 { 2051 int tf; 2052 const char *r; 2053 2054 memcpy (&tf, data, sizeof (target_flags)); 2055 data += sizeof (target_flags); 2056 len -= sizeof (target_flags); 2057 r = targetm.check_pch_target_flags (tf); 2058 if (r != NULL) 2059 return r; 2060 } 2061 2062 for (i = 0; i < cl_options_count; i++) 2063 if (option_affects_pch_p (i, &state)) 2064 { 2065 if (memcmp (data, state.data, state.size) != 0) 2066 return pch_option_mismatch (cl_options[i].opt_text); 2067 data += state.size; 2068 len -= state.size; 2069 } 2070 2071 return NULL; 2072 } 2073 2074 /* Default version of cstore_mode. */ 2075 2076 scalar_int_mode 2077 default_cstore_mode (enum insn_code icode) 2078 { 2079 return as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode); 2080 } 2081 2082 /* Default version of member_type_forces_blk. */ 2083 2084 bool 2085 default_member_type_forces_blk (const_tree, machine_mode) 2086 { 2087 return false; 2088 } 2089 2090 rtx 2091 default_load_bounds_for_arg (rtx addr ATTRIBUTE_UNUSED, 2092 rtx ptr ATTRIBUTE_UNUSED, 2093 rtx bnd ATTRIBUTE_UNUSED) 2094 { 2095 gcc_unreachable (); 2096 } 2097 2098 void 2099 default_store_bounds_for_arg (rtx val ATTRIBUTE_UNUSED, 2100 rtx addr ATTRIBUTE_UNUSED, 2101 rtx bounds ATTRIBUTE_UNUSED, 2102 rtx to ATTRIBUTE_UNUSED) 2103 { 2104 gcc_unreachable (); 2105 } 2106 2107 rtx 2108 default_load_returned_bounds (rtx slot ATTRIBUTE_UNUSED) 2109 { 2110 gcc_unreachable (); 2111 } 2112 2113 void 2114 default_store_returned_bounds (rtx slot ATTRIBUTE_UNUSED, 2115 rtx bounds ATTRIBUTE_UNUSED) 2116 { 2117 gcc_unreachable (); 2118 } 2119 2120 /* Default version of canonicalize_comparison. */ 2121 2122 void 2123 default_canonicalize_comparison (int *, rtx *, rtx *, bool) 2124 { 2125 } 2126 2127 /* Default implementation of TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */ 2128 2129 void 2130 default_atomic_assign_expand_fenv (tree *, tree *, tree *) 2131 { 2132 } 2133 2134 #ifndef PAD_VARARGS_DOWN 2135 #define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN 2136 #endif 2137 2138 /* Build an indirect-ref expression over the given TREE, which represents a 2139 piece of a va_arg() expansion. */ 2140 tree 2141 build_va_arg_indirect_ref (tree addr) 2142 { 2143 addr = build_simple_mem_ref_loc (EXPR_LOCATION (addr), addr); 2144 return addr; 2145 } 2146 2147 /* The "standard" implementation of va_arg: read the value from the 2148 current (padded) address and increment by the (padded) size. */ 2149 2150 tree 2151 std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, 2152 gimple_seq *post_p) 2153 { 2154 tree addr, t, type_size, rounded_size, valist_tmp; 2155 unsigned HOST_WIDE_INT align, boundary; 2156 bool indirect; 2157 2158 /* All of the alignment and movement below is for args-grow-up machines. 2159 As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all 2160 implement their own specialized gimplify_va_arg_expr routines. */ 2161 if (ARGS_GROW_DOWNWARD) 2162 gcc_unreachable (); 2163 2164 indirect = pass_va_arg_by_reference (type); 2165 if (indirect) 2166 type = build_pointer_type (type); 2167 2168 if (targetm.calls.split_complex_arg 2169 && TREE_CODE (type) == COMPLEX_TYPE 2170 && targetm.calls.split_complex_arg (type)) 2171 { 2172 tree real_part, imag_part; 2173 2174 real_part = std_gimplify_va_arg_expr (valist, 2175 TREE_TYPE (type), pre_p, NULL); 2176 real_part = get_initialized_tmp_var (real_part, pre_p); 2177 2178 imag_part = std_gimplify_va_arg_expr (unshare_expr (valist), 2179 TREE_TYPE (type), pre_p, NULL); 2180 imag_part = get_initialized_tmp_var (imag_part, pre_p); 2181 2182 return build2 (COMPLEX_EXPR, type, real_part, imag_part); 2183 } 2184 2185 align = PARM_BOUNDARY / BITS_PER_UNIT; 2186 boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type); 2187 2188 /* When we align parameter on stack for caller, if the parameter 2189 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be 2190 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee 2191 here with caller. */ 2192 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT) 2193 boundary = MAX_SUPPORTED_STACK_ALIGNMENT; 2194 2195 boundary /= BITS_PER_UNIT; 2196 2197 /* Hoist the valist value into a temporary for the moment. */ 2198 valist_tmp = get_initialized_tmp_var (valist, pre_p); 2199 2200 /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually 2201 requires greater alignment, we must perform dynamic alignment. */ 2202 if (boundary > align 2203 && !TYPE_EMPTY_P (type) 2204 && !integer_zerop (TYPE_SIZE (type))) 2205 { 2206 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, 2207 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1)); 2208 gimplify_and_add (t, pre_p); 2209 2210 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, 2211 fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist), 2212 valist_tmp, 2213 build_int_cst (TREE_TYPE (valist), -boundary))); 2214 gimplify_and_add (t, pre_p); 2215 } 2216 else 2217 boundary = align; 2218 2219 /* If the actual alignment is less than the alignment of the type, 2220 adjust the type accordingly so that we don't assume strict alignment 2221 when dereferencing the pointer. */ 2222 boundary *= BITS_PER_UNIT; 2223 if (boundary < TYPE_ALIGN (type)) 2224 { 2225 type = build_variant_type_copy (type); 2226 SET_TYPE_ALIGN (type, boundary); 2227 } 2228 2229 /* Compute the rounded size of the type. */ 2230 type_size = arg_size_in_bytes (type); 2231 rounded_size = round_up (type_size, align); 2232 2233 /* Reduce rounded_size so it's sharable with the postqueue. */ 2234 gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue); 2235 2236 /* Get AP. */ 2237 addr = valist_tmp; 2238 if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size)) 2239 { 2240 /* Small args are padded downward. */ 2241 t = fold_build2_loc (input_location, GT_EXPR, sizetype, 2242 rounded_size, size_int (align)); 2243 t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node, 2244 size_binop (MINUS_EXPR, rounded_size, type_size)); 2245 addr = fold_build_pointer_plus (addr, t); 2246 } 2247 2248 /* Compute new value for AP. */ 2249 t = fold_build_pointer_plus (valist_tmp, rounded_size); 2250 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t); 2251 gimplify_and_add (t, pre_p); 2252 2253 addr = fold_convert (build_pointer_type (type), addr); 2254 2255 if (indirect) 2256 addr = build_va_arg_indirect_ref (addr); 2257 2258 return build_va_arg_indirect_ref (addr); 2259 } 2260 2261 /* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do 2262 not support nested low-overhead loops. */ 2263 2264 bool 2265 can_use_doloop_if_innermost (const widest_int &, const widest_int &, 2266 unsigned int loop_depth, bool) 2267 { 2268 return loop_depth == 1; 2269 } 2270 2271 /* Default implementation of TARGET_OPTAB_SUPPORTED_P. */ 2272 2273 bool 2274 default_optab_supported_p (int, machine_mode, machine_mode, optimization_type) 2275 { 2276 return true; 2277 } 2278 2279 /* Default implementation of TARGET_MAX_NOCE_IFCVT_SEQ_COST. */ 2280 2281 unsigned int 2282 default_max_noce_ifcvt_seq_cost (edge e) 2283 { 2284 bool predictable_p = predictable_edge_p (e); 2285 2286 if (predictable_p) 2287 { 2288 if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost) 2289 return param_max_rtl_if_conversion_predictable_cost; 2290 } 2291 else 2292 { 2293 if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost) 2294 return param_max_rtl_if_conversion_unpredictable_cost; 2295 } 2296 2297 return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3); 2298 } 2299 2300 /* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION. */ 2301 2302 unsigned int 2303 default_min_arithmetic_precision (void) 2304 { 2305 return WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : BITS_PER_UNIT; 2306 } 2307 2308 /* Default implementation of TARGET_C_EXCESS_PRECISION. */ 2309 2310 enum flt_eval_method 2311 default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED) 2312 { 2313 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT; 2314 } 2315 2316 /* Default implementation for 2317 TARGET_STACK_CLASH_PROTECTION_ALLOCA_PROBE_RANGE. */ 2318 HOST_WIDE_INT 2319 default_stack_clash_protection_alloca_probe_range (void) 2320 { 2321 return 0; 2322 } 2323 2324 /* The default implementation of TARGET_EARLY_REMAT_MODES. */ 2325 2326 void 2327 default_select_early_remat_modes (sbitmap) 2328 { 2329 } 2330 2331 /* The default implementation of TARGET_PREFERRED_ELSE_VALUE. */ 2332 2333 tree 2334 default_preferred_else_value (unsigned, tree type, unsigned, tree *) 2335 { 2336 return build_zero_cst (type); 2337 } 2338 2339 /* Default implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE. */ 2340 bool 2341 default_have_speculation_safe_value (bool active ATTRIBUTE_UNUSED) 2342 { 2343 #ifdef HAVE_speculation_barrier 2344 return active ? HAVE_speculation_barrier : true; 2345 #else 2346 return false; 2347 #endif 2348 } 2349 /* Alternative implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE 2350 that can be used on targets that never have speculative execution. */ 2351 bool 2352 speculation_safe_value_not_needed (bool active) 2353 { 2354 return !active; 2355 } 2356 2357 /* Default implementation of the speculation-safe-load builtin. This 2358 implementation simply copies val to result and generates a 2359 speculation_barrier insn, if such a pattern is defined. */ 2360 rtx 2361 default_speculation_safe_value (machine_mode mode ATTRIBUTE_UNUSED, 2362 rtx result, rtx val, 2363 rtx failval ATTRIBUTE_UNUSED) 2364 { 2365 emit_move_insn (result, val); 2366 2367 #ifdef HAVE_speculation_barrier 2368 /* Assume the target knows what it is doing: if it defines a 2369 speculation barrier, but it is not enabled, then assume that one 2370 isn't needed. */ 2371 if (HAVE_speculation_barrier) 2372 emit_insn (gen_speculation_barrier ()); 2373 #endif 2374 2375 return result; 2376 } 2377 2378 #include "gt-targhooks.h" 2379