1 /* Inlining decision heuristics. 2 Copyright (C) 2003-2016 Free Software Foundation, Inc. 3 Contributed by Jan Hubicka 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it under 8 the terms of the GNU General Public License as published by the Free 9 Software Foundation; either version 3, or (at your option) any later 10 version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 /* Analysis used by the inliner and other passes limiting code size growth. 22 23 We estimate for each function 24 - function body size 25 - average function execution time 26 - inlining size benefit (that is how much of function body size 27 and its call sequence is expected to disappear by inlining) 28 - inlining time benefit 29 - function frame size 30 For each call 31 - call statement size and time 32 33 inlinie_summary datastructures store above information locally (i.e. 34 parameters of the function itself) and globally (i.e. parameters of 35 the function created by applying all the inline decisions already 36 present in the callgraph). 37 38 We provide accestor to the inline_summary datastructure and 39 basic logic updating the parameters when inlining is performed. 40 41 The summaries are context sensitive. Context means 42 1) partial assignment of known constant values of operands 43 2) whether function is inlined into the call or not. 44 It is easy to add more variants. To represent function size and time 45 that depends on context (i.e. it is known to be optimized away when 46 context is known either by inlining or from IP-CP and clonning), 47 we use predicates. Predicates are logical formulas in 48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps 49 specifying what conditions must be true. Conditions are simple test 50 of the form described above. 51 52 In order to make predicate (possibly) true, all of its clauses must 53 be (possibly) true. To make clause (possibly) true, one of conditions 54 it mentions must be (possibly) true. There are fixed bounds on 55 number of clauses and conditions and all the manipulation functions 56 are conservative in positive direction. I.e. we may lose precision 57 by thinking that predicate may be true even when it is not. 58 59 estimate_edge_size and estimate_edge_growth can be used to query 60 function size/time in the given context. inline_merge_summary merges 61 properties of caller and callee after inlining. 62 63 Finally pass_inline_parameters is exported. This is used to drive 64 computation of function parameters used by the early inliner. IPA 65 inlined performs analysis via its analyze_function method. */ 66 67 #include "config.h" 68 #include "system.h" 69 #include "coretypes.h" 70 #include "backend.h" 71 #include "tree.h" 72 #include "gimple.h" 73 #include "alloc-pool.h" 74 #include "tree-pass.h" 75 #include "ssa.h" 76 #include "tree-streamer.h" 77 #include "cgraph.h" 78 #include "diagnostic.h" 79 #include "fold-const.h" 80 #include "print-tree.h" 81 #include "tree-inline.h" 82 #include "gimple-pretty-print.h" 83 #include "params.h" 84 #include "cfganal.h" 85 #include "gimple-iterator.h" 86 #include "tree-cfg.h" 87 #include "tree-ssa-loop-niter.h" 88 #include "tree-ssa-loop.h" 89 #include "symbol-summary.h" 90 #include "ipa-prop.h" 91 #include "ipa-inline.h" 92 #include "cfgloop.h" 93 #include "tree-scalar-evolution.h" 94 #include "ipa-utils.h" 95 #include "cilk.h" 96 #include "cfgexpand.h" 97 #include "gimplify.h" 98 99 /* Estimate runtime of function can easilly run into huge numbers with many 100 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an 101 integer. For anything larger we use gcov_type. */ 102 #define MAX_TIME 500000 103 104 /* Number of bits in integer, but we really want to be stable across different 105 hosts. */ 106 #define NUM_CONDITIONS 32 107 108 enum predicate_conditions 109 { 110 predicate_false_condition = 0, 111 predicate_not_inlined_condition = 1, 112 predicate_first_dynamic_condition = 2 113 }; 114 115 /* Special condition code we use to represent test that operand is compile time 116 constant. */ 117 #define IS_NOT_CONSTANT ERROR_MARK 118 /* Special condition code we use to represent test that operand is not changed 119 across invocation of the function. When operand IS_NOT_CONSTANT it is always 120 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage 121 of executions even when they are not compile time constants. */ 122 #define CHANGED IDENTIFIER_NODE 123 124 /* Holders of ipa cgraph hooks: */ 125 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder; 126 static struct cgraph_edge_hook_list *edge_removal_hook_holder; 127 static void inline_edge_removal_hook (struct cgraph_edge *, void *); 128 static void inline_edge_duplication_hook (struct cgraph_edge *, 129 struct cgraph_edge *, void *); 130 131 /* VECtor holding inline summaries. 132 In GGC memory because conditions might point to constant trees. */ 133 function_summary <inline_summary *> *inline_summaries; 134 vec<inline_edge_summary_t> inline_edge_summary_vec; 135 136 /* Cached node/edge growths. */ 137 vec<edge_growth_cache_entry> edge_growth_cache; 138 139 /* Edge predicates goes here. */ 140 static object_allocator<predicate> edge_predicate_pool ("edge predicates"); 141 142 /* Return true predicate (tautology). 143 We represent it by empty list of clauses. */ 144 145 static inline struct predicate 146 true_predicate (void) 147 { 148 struct predicate p; 149 p.clause[0] = 0; 150 return p; 151 } 152 153 154 /* Return predicate testing single condition number COND. */ 155 156 static inline struct predicate 157 single_cond_predicate (int cond) 158 { 159 struct predicate p; 160 p.clause[0] = 1 << cond; 161 p.clause[1] = 0; 162 return p; 163 } 164 165 166 /* Return false predicate. First clause require false condition. */ 167 168 static inline struct predicate 169 false_predicate (void) 170 { 171 return single_cond_predicate (predicate_false_condition); 172 } 173 174 175 /* Return true if P is (true). */ 176 177 static inline bool 178 true_predicate_p (struct predicate *p) 179 { 180 return !p->clause[0]; 181 } 182 183 184 /* Return true if P is (false). */ 185 186 static inline bool 187 false_predicate_p (struct predicate *p) 188 { 189 if (p->clause[0] == (1 << predicate_false_condition)) 190 { 191 gcc_checking_assert (!p->clause[1] 192 && p->clause[0] == 1 << predicate_false_condition); 193 return true; 194 } 195 return false; 196 } 197 198 199 /* Return predicate that is set true when function is not inlined. */ 200 201 static inline struct predicate 202 not_inlined_predicate (void) 203 { 204 return single_cond_predicate (predicate_not_inlined_condition); 205 } 206 207 /* Simple description of whether a memory load or a condition refers to a load 208 from an aggregate and if so, how and where from in the aggregate. 209 Individual fields have the same meaning like fields with the same name in 210 struct condition. */ 211 212 struct agg_position_info 213 { 214 HOST_WIDE_INT offset; 215 bool agg_contents; 216 bool by_ref; 217 }; 218 219 /* Add condition to condition list SUMMARY. OPERAND_NUM, SIZE, CODE and VAL 220 correspond to fields of condition structure. AGGPOS describes whether the 221 used operand is loaded from an aggregate and where in the aggregate it is. 222 It can be NULL, which means this not a load from an aggregate. */ 223 224 static struct predicate 225 add_condition (struct inline_summary *summary, int operand_num, 226 HOST_WIDE_INT size, struct agg_position_info *aggpos, 227 enum tree_code code, tree val) 228 { 229 int i; 230 struct condition *c; 231 struct condition new_cond; 232 HOST_WIDE_INT offset; 233 bool agg_contents, by_ref; 234 235 if (aggpos) 236 { 237 offset = aggpos->offset; 238 agg_contents = aggpos->agg_contents; 239 by_ref = aggpos->by_ref; 240 } 241 else 242 { 243 offset = 0; 244 agg_contents = false; 245 by_ref = false; 246 } 247 248 gcc_checking_assert (operand_num >= 0); 249 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++) 250 { 251 if (c->operand_num == operand_num 252 && c->size == size 253 && c->code == code 254 && c->val == val 255 && c->agg_contents == agg_contents 256 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref))) 257 return single_cond_predicate (i + predicate_first_dynamic_condition); 258 } 259 /* Too many conditions. Give up and return constant true. */ 260 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition) 261 return true_predicate (); 262 263 new_cond.operand_num = operand_num; 264 new_cond.code = code; 265 new_cond.val = val; 266 new_cond.agg_contents = agg_contents; 267 new_cond.by_ref = by_ref; 268 new_cond.offset = offset; 269 new_cond.size = size; 270 vec_safe_push (summary->conds, new_cond); 271 return single_cond_predicate (i + predicate_first_dynamic_condition); 272 } 273 274 275 /* Add clause CLAUSE into the predicate P. */ 276 277 static inline void 278 add_clause (conditions conditions, struct predicate *p, clause_t clause) 279 { 280 int i; 281 int i2; 282 int insert_here = -1; 283 int c1, c2; 284 285 /* True clause. */ 286 if (!clause) 287 return; 288 289 /* False clause makes the whole predicate false. Kill the other variants. */ 290 if (clause == (1 << predicate_false_condition)) 291 { 292 p->clause[0] = (1 << predicate_false_condition); 293 p->clause[1] = 0; 294 return; 295 } 296 if (false_predicate_p (p)) 297 return; 298 299 /* No one should be silly enough to add false into nontrivial clauses. */ 300 gcc_checking_assert (!(clause & (1 << predicate_false_condition))); 301 302 /* Look where to insert the clause. At the same time prune out 303 clauses of P that are implied by the new clause and thus 304 redundant. */ 305 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++) 306 { 307 p->clause[i2] = p->clause[i]; 308 309 if (!p->clause[i]) 310 break; 311 312 /* If p->clause[i] implies clause, there is nothing to add. */ 313 if ((p->clause[i] & clause) == p->clause[i]) 314 { 315 /* We had nothing to add, none of clauses should've become 316 redundant. */ 317 gcc_checking_assert (i == i2); 318 return; 319 } 320 321 if (p->clause[i] < clause && insert_here < 0) 322 insert_here = i2; 323 324 /* If clause implies p->clause[i], then p->clause[i] becomes redundant. 325 Otherwise the p->clause[i] has to stay. */ 326 if ((p->clause[i] & clause) != clause) 327 i2++; 328 } 329 330 /* Look for clauses that are obviously true. I.e. 331 op0 == 5 || op0 != 5. */ 332 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++) 333 { 334 condition *cc1; 335 if (!(clause & (1 << c1))) 336 continue; 337 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition]; 338 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT 339 and thus there is no point for looking for them. */ 340 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT) 341 continue; 342 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++) 343 if (clause & (1 << c2)) 344 { 345 condition *cc1 = 346 &(*conditions)[c1 - predicate_first_dynamic_condition]; 347 condition *cc2 = 348 &(*conditions)[c2 - predicate_first_dynamic_condition]; 349 if (cc1->operand_num == cc2->operand_num 350 && cc1->val == cc2->val 351 && cc2->code != IS_NOT_CONSTANT 352 && cc2->code != CHANGED 353 && cc1->code == invert_tree_comparison (cc2->code, 354 HONOR_NANS (cc1->val))) 355 return; 356 } 357 } 358 359 360 /* We run out of variants. Be conservative in positive direction. */ 361 if (i2 == MAX_CLAUSES) 362 return; 363 /* Keep clauses in decreasing order. This makes equivalence testing easy. */ 364 p->clause[i2 + 1] = 0; 365 if (insert_here >= 0) 366 for (; i2 > insert_here; i2--) 367 p->clause[i2] = p->clause[i2 - 1]; 368 else 369 insert_here = i2; 370 p->clause[insert_here] = clause; 371 } 372 373 374 /* Return P & P2. */ 375 376 static struct predicate 377 and_predicates (conditions conditions, 378 struct predicate *p, struct predicate *p2) 379 { 380 struct predicate out = *p; 381 int i; 382 383 /* Avoid busy work. */ 384 if (false_predicate_p (p2) || true_predicate_p (p)) 385 return *p2; 386 if (false_predicate_p (p) || true_predicate_p (p2)) 387 return *p; 388 389 /* See how far predicates match. */ 390 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++) 391 { 392 gcc_checking_assert (i < MAX_CLAUSES); 393 } 394 395 /* Combine the predicates rest. */ 396 for (; p2->clause[i]; i++) 397 { 398 gcc_checking_assert (i < MAX_CLAUSES); 399 add_clause (conditions, &out, p2->clause[i]); 400 } 401 return out; 402 } 403 404 405 /* Return true if predicates are obviously equal. */ 406 407 static inline bool 408 predicates_equal_p (struct predicate *p, struct predicate *p2) 409 { 410 int i; 411 for (i = 0; p->clause[i]; i++) 412 { 413 gcc_checking_assert (i < MAX_CLAUSES); 414 gcc_checking_assert (p->clause[i] > p->clause[i + 1]); 415 gcc_checking_assert (!p2->clause[i] 416 || p2->clause[i] > p2->clause[i + 1]); 417 if (p->clause[i] != p2->clause[i]) 418 return false; 419 } 420 return !p2->clause[i]; 421 } 422 423 424 /* Return P | P2. */ 425 426 static struct predicate 427 or_predicates (conditions conditions, 428 struct predicate *p, struct predicate *p2) 429 { 430 struct predicate out = true_predicate (); 431 int i, j; 432 433 /* Avoid busy work. */ 434 if (false_predicate_p (p2) || true_predicate_p (p)) 435 return *p; 436 if (false_predicate_p (p) || true_predicate_p (p2)) 437 return *p2; 438 if (predicates_equal_p (p, p2)) 439 return *p; 440 441 /* OK, combine the predicates. */ 442 for (i = 0; p->clause[i]; i++) 443 for (j = 0; p2->clause[j]; j++) 444 { 445 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES); 446 add_clause (conditions, &out, p->clause[i] | p2->clause[j]); 447 } 448 return out; 449 } 450 451 452 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false 453 if predicate P is known to be false. */ 454 455 static bool 456 evaluate_predicate (struct predicate *p, clause_t possible_truths) 457 { 458 int i; 459 460 /* True remains true. */ 461 if (true_predicate_p (p)) 462 return true; 463 464 gcc_assert (!(possible_truths & (1 << predicate_false_condition))); 465 466 /* See if we can find clause we can disprove. */ 467 for (i = 0; p->clause[i]; i++) 468 { 469 gcc_checking_assert (i < MAX_CLAUSES); 470 if (!(p->clause[i] & possible_truths)) 471 return false; 472 } 473 return true; 474 } 475 476 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated 477 instruction will be recomputed per invocation of the inlined call. */ 478 479 static int 480 predicate_probability (conditions conds, 481 struct predicate *p, clause_t possible_truths, 482 vec<inline_param_summary> inline_param_summary) 483 { 484 int i; 485 int combined_prob = REG_BR_PROB_BASE; 486 487 /* True remains true. */ 488 if (true_predicate_p (p)) 489 return REG_BR_PROB_BASE; 490 491 if (false_predicate_p (p)) 492 return 0; 493 494 gcc_assert (!(possible_truths & (1 << predicate_false_condition))); 495 496 /* See if we can find clause we can disprove. */ 497 for (i = 0; p->clause[i]; i++) 498 { 499 gcc_checking_assert (i < MAX_CLAUSES); 500 if (!(p->clause[i] & possible_truths)) 501 return 0; 502 else 503 { 504 int this_prob = 0; 505 int i2; 506 if (!inline_param_summary.exists ()) 507 return REG_BR_PROB_BASE; 508 for (i2 = 0; i2 < NUM_CONDITIONS; i2++) 509 if ((p->clause[i] & possible_truths) & (1 << i2)) 510 { 511 if (i2 >= predicate_first_dynamic_condition) 512 { 513 condition *c = 514 &(*conds)[i2 - predicate_first_dynamic_condition]; 515 if (c->code == CHANGED 516 && (c->operand_num < 517 (int) inline_param_summary.length ())) 518 { 519 int iprob = 520 inline_param_summary[c->operand_num].change_prob; 521 this_prob = MAX (this_prob, iprob); 522 } 523 else 524 this_prob = REG_BR_PROB_BASE; 525 } 526 else 527 this_prob = REG_BR_PROB_BASE; 528 } 529 combined_prob = MIN (this_prob, combined_prob); 530 if (!combined_prob) 531 return 0; 532 } 533 } 534 return combined_prob; 535 } 536 537 538 /* Dump conditional COND. */ 539 540 static void 541 dump_condition (FILE *f, conditions conditions, int cond) 542 { 543 condition *c; 544 if (cond == predicate_false_condition) 545 fprintf (f, "false"); 546 else if (cond == predicate_not_inlined_condition) 547 fprintf (f, "not inlined"); 548 else 549 { 550 c = &(*conditions)[cond - predicate_first_dynamic_condition]; 551 fprintf (f, "op%i", c->operand_num); 552 if (c->agg_contents) 553 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]", 554 c->by_ref ? "ref " : "", c->offset); 555 if (c->code == IS_NOT_CONSTANT) 556 { 557 fprintf (f, " not constant"); 558 return; 559 } 560 if (c->code == CHANGED) 561 { 562 fprintf (f, " changed"); 563 return; 564 } 565 fprintf (f, " %s ", op_symbol_code (c->code)); 566 print_generic_expr (f, c->val, 1); 567 } 568 } 569 570 571 /* Dump clause CLAUSE. */ 572 573 static void 574 dump_clause (FILE *f, conditions conds, clause_t clause) 575 { 576 int i; 577 bool found = false; 578 fprintf (f, "("); 579 if (!clause) 580 fprintf (f, "true"); 581 for (i = 0; i < NUM_CONDITIONS; i++) 582 if (clause & (1 << i)) 583 { 584 if (found) 585 fprintf (f, " || "); 586 found = true; 587 dump_condition (f, conds, i); 588 } 589 fprintf (f, ")"); 590 } 591 592 593 /* Dump predicate PREDICATE. */ 594 595 static void 596 dump_predicate (FILE *f, conditions conds, struct predicate *pred) 597 { 598 int i; 599 if (true_predicate_p (pred)) 600 dump_clause (f, conds, 0); 601 else 602 for (i = 0; pred->clause[i]; i++) 603 { 604 if (i) 605 fprintf (f, " && "); 606 dump_clause (f, conds, pred->clause[i]); 607 } 608 fprintf (f, "\n"); 609 } 610 611 612 /* Dump inline hints. */ 613 void 614 dump_inline_hints (FILE *f, inline_hints hints) 615 { 616 if (!hints) 617 return; 618 fprintf (f, "inline hints:"); 619 if (hints & INLINE_HINT_indirect_call) 620 { 621 hints &= ~INLINE_HINT_indirect_call; 622 fprintf (f, " indirect_call"); 623 } 624 if (hints & INLINE_HINT_loop_iterations) 625 { 626 hints &= ~INLINE_HINT_loop_iterations; 627 fprintf (f, " loop_iterations"); 628 } 629 if (hints & INLINE_HINT_loop_stride) 630 { 631 hints &= ~INLINE_HINT_loop_stride; 632 fprintf (f, " loop_stride"); 633 } 634 if (hints & INLINE_HINT_same_scc) 635 { 636 hints &= ~INLINE_HINT_same_scc; 637 fprintf (f, " same_scc"); 638 } 639 if (hints & INLINE_HINT_in_scc) 640 { 641 hints &= ~INLINE_HINT_in_scc; 642 fprintf (f, " in_scc"); 643 } 644 if (hints & INLINE_HINT_cross_module) 645 { 646 hints &= ~INLINE_HINT_cross_module; 647 fprintf (f, " cross_module"); 648 } 649 if (hints & INLINE_HINT_declared_inline) 650 { 651 hints &= ~INLINE_HINT_declared_inline; 652 fprintf (f, " declared_inline"); 653 } 654 if (hints & INLINE_HINT_array_index) 655 { 656 hints &= ~INLINE_HINT_array_index; 657 fprintf (f, " array_index"); 658 } 659 if (hints & INLINE_HINT_known_hot) 660 { 661 hints &= ~INLINE_HINT_known_hot; 662 fprintf (f, " known_hot"); 663 } 664 gcc_assert (!hints); 665 } 666 667 668 /* Record SIZE and TIME under condition PRED into the inline summary. */ 669 670 static void 671 account_size_time (struct inline_summary *summary, int size, int time, 672 struct predicate *pred) 673 { 674 size_time_entry *e; 675 bool found = false; 676 int i; 677 678 if (false_predicate_p (pred)) 679 return; 680 681 /* We need to create initial empty unconitional clause, but otherwie 682 we don't need to account empty times and sizes. */ 683 if (!size && !time && summary->entry) 684 return; 685 686 /* Watch overflow that might result from insane profiles. */ 687 if (time > MAX_TIME * INLINE_TIME_SCALE) 688 time = MAX_TIME * INLINE_TIME_SCALE; 689 gcc_assert (time >= 0); 690 691 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++) 692 if (predicates_equal_p (&e->predicate, pred)) 693 { 694 found = true; 695 break; 696 } 697 if (i == 256) 698 { 699 i = 0; 700 found = true; 701 e = &(*summary->entry)[0]; 702 gcc_assert (!e->predicate.clause[0]); 703 if (dump_file && (dump_flags & TDF_DETAILS)) 704 fprintf (dump_file, 705 "\t\tReached limit on number of entries, " 706 "ignoring the predicate."); 707 } 708 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size)) 709 { 710 fprintf (dump_file, 711 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:", 712 ((double) size) / INLINE_SIZE_SCALE, 713 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new "); 714 dump_predicate (dump_file, summary->conds, pred); 715 } 716 if (!found) 717 { 718 struct size_time_entry new_entry; 719 new_entry.size = size; 720 new_entry.time = time; 721 new_entry.predicate = *pred; 722 vec_safe_push (summary->entry, new_entry); 723 } 724 else 725 { 726 e->size += size; 727 e->time += time; 728 if (e->time > MAX_TIME * INLINE_TIME_SCALE) 729 e->time = MAX_TIME * INLINE_TIME_SCALE; 730 } 731 } 732 733 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */ 734 735 static struct cgraph_edge * 736 redirect_to_unreachable (struct cgraph_edge *e) 737 { 738 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL; 739 struct cgraph_node *target = cgraph_node::get_create 740 (builtin_decl_implicit (BUILT_IN_UNREACHABLE)); 741 742 if (e->speculative) 743 e = e->resolve_speculation (target->decl); 744 else if (!e->callee) 745 e->make_direct (target); 746 else 747 e->redirect_callee (target); 748 struct inline_edge_summary *es = inline_edge_summary (e); 749 e->inline_failed = CIF_UNREACHABLE; 750 e->frequency = 0; 751 e->count = 0; 752 es->call_stmt_size = 0; 753 es->call_stmt_time = 0; 754 if (callee) 755 callee->remove_symbol_and_inline_clones (); 756 return e; 757 } 758 759 /* Set predicate for edge E. */ 760 761 static void 762 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate) 763 { 764 /* If the edge is determined to be never executed, redirect it 765 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */ 766 if (predicate && false_predicate_p (predicate) 767 /* When handling speculative edges, we need to do the redirection 768 just once. Do it always on the direct edge, so we do not 769 attempt to resolve speculation while duplicating the edge. */ 770 && (!e->speculative || e->callee)) 771 e = redirect_to_unreachable (e); 772 773 struct inline_edge_summary *es = inline_edge_summary (e); 774 if (predicate && !true_predicate_p (predicate)) 775 { 776 if (!es->predicate) 777 es->predicate = edge_predicate_pool.allocate (); 778 *es->predicate = *predicate; 779 } 780 else 781 { 782 if (es->predicate) 783 edge_predicate_pool.remove (es->predicate); 784 es->predicate = NULL; 785 } 786 } 787 788 /* Set predicate for hint *P. */ 789 790 static void 791 set_hint_predicate (struct predicate **p, struct predicate new_predicate) 792 { 793 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate)) 794 { 795 if (*p) 796 edge_predicate_pool.remove (*p); 797 *p = NULL; 798 } 799 else 800 { 801 if (!*p) 802 *p = edge_predicate_pool.allocate (); 803 **p = new_predicate; 804 } 805 } 806 807 808 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values. 809 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter. 810 Return clause of possible truths. When INLINE_P is true, assume that we are 811 inlining. 812 813 ERROR_MARK means compile time invariant. */ 814 815 static clause_t 816 evaluate_conditions_for_known_args (struct cgraph_node *node, 817 bool inline_p, 818 vec<tree> known_vals, 819 vec<ipa_agg_jump_function_p> 820 known_aggs) 821 { 822 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition; 823 struct inline_summary *info = inline_summaries->get (node); 824 int i; 825 struct condition *c; 826 827 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++) 828 { 829 tree val; 830 tree res; 831 832 /* We allow call stmt to have fewer arguments than the callee function 833 (especially for K&R style programs). So bound check here (we assume 834 known_aggs vector, if non-NULL, has the same length as 835 known_vals). */ 836 gcc_checking_assert (!known_aggs.exists () 837 || (known_vals.length () == known_aggs.length ())); 838 if (c->operand_num >= (int) known_vals.length ()) 839 { 840 clause |= 1 << (i + predicate_first_dynamic_condition); 841 continue; 842 } 843 844 if (c->agg_contents) 845 { 846 struct ipa_agg_jump_function *agg; 847 848 if (c->code == CHANGED 849 && !c->by_ref 850 && (known_vals[c->operand_num] == error_mark_node)) 851 continue; 852 853 if (known_aggs.exists ()) 854 { 855 agg = known_aggs[c->operand_num]; 856 val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref); 857 } 858 else 859 val = NULL_TREE; 860 } 861 else 862 { 863 val = known_vals[c->operand_num]; 864 if (val == error_mark_node && c->code != CHANGED) 865 val = NULL_TREE; 866 } 867 868 if (!val) 869 { 870 clause |= 1 << (i + predicate_first_dynamic_condition); 871 continue; 872 } 873 if (c->code == CHANGED) 874 continue; 875 876 if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size) 877 { 878 clause |= 1 << (i + predicate_first_dynamic_condition); 879 continue; 880 } 881 if (c->code == IS_NOT_CONSTANT) 882 continue; 883 884 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val); 885 res = val 886 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val) 887 : NULL; 888 889 if (res && integer_zerop (res)) 890 continue; 891 892 clause |= 1 << (i + predicate_first_dynamic_condition); 893 } 894 return clause; 895 } 896 897 898 /* Work out what conditions might be true at invocation of E. */ 899 900 static void 901 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p, 902 clause_t *clause_ptr, 903 vec<tree> *known_vals_ptr, 904 vec<ipa_polymorphic_call_context> 905 *known_contexts_ptr, 906 vec<ipa_agg_jump_function_p> *known_aggs_ptr) 907 { 908 struct cgraph_node *callee = e->callee->ultimate_alias_target (); 909 struct inline_summary *info = inline_summaries->get (callee); 910 vec<tree> known_vals = vNULL; 911 vec<ipa_agg_jump_function_p> known_aggs = vNULL; 912 913 if (clause_ptr) 914 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition; 915 if (known_vals_ptr) 916 known_vals_ptr->create (0); 917 if (known_contexts_ptr) 918 known_contexts_ptr->create (0); 919 920 if (ipa_node_params_sum 921 && !e->call_stmt_cannot_inline_p 922 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr)) 923 { 924 struct ipa_node_params *parms_info; 925 struct ipa_edge_args *args = IPA_EDGE_REF (e); 926 struct inline_edge_summary *es = inline_edge_summary (e); 927 int i, count = ipa_get_cs_argument_count (args); 928 929 if (e->caller->global.inlined_to) 930 parms_info = IPA_NODE_REF (e->caller->global.inlined_to); 931 else 932 parms_info = IPA_NODE_REF (e->caller); 933 934 if (count && (info->conds || known_vals_ptr)) 935 known_vals.safe_grow_cleared (count); 936 if (count && (info->conds || known_aggs_ptr)) 937 known_aggs.safe_grow_cleared (count); 938 if (count && known_contexts_ptr) 939 known_contexts_ptr->safe_grow_cleared (count); 940 941 for (i = 0; i < count; i++) 942 { 943 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i); 944 tree cst = ipa_value_from_jfunc (parms_info, jf); 945 946 if (!cst && e->call_stmt 947 && i < (int)gimple_call_num_args (e->call_stmt)) 948 { 949 cst = gimple_call_arg (e->call_stmt, i); 950 if (!is_gimple_min_invariant (cst)) 951 cst = NULL; 952 } 953 if (cst) 954 { 955 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO); 956 if (known_vals.exists ()) 957 known_vals[i] = cst; 958 } 959 else if (inline_p && !es->param[i].change_prob) 960 known_vals[i] = error_mark_node; 961 962 if (known_contexts_ptr) 963 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e, 964 i, jf); 965 /* TODO: When IPA-CP starts propagating and merging aggregate jump 966 functions, use its knowledge of the caller too, just like the 967 scalar case above. */ 968 known_aggs[i] = &jf->agg; 969 } 970 } 971 else if (e->call_stmt && !e->call_stmt_cannot_inline_p 972 && ((clause_ptr && info->conds) || known_vals_ptr)) 973 { 974 int i, count = (int)gimple_call_num_args (e->call_stmt); 975 976 if (count && (info->conds || known_vals_ptr)) 977 known_vals.safe_grow_cleared (count); 978 for (i = 0; i < count; i++) 979 { 980 tree cst = gimple_call_arg (e->call_stmt, i); 981 if (!is_gimple_min_invariant (cst)) 982 cst = NULL; 983 if (cst) 984 known_vals[i] = cst; 985 } 986 } 987 988 if (clause_ptr) 989 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p, 990 known_vals, known_aggs); 991 992 if (known_vals_ptr) 993 *known_vals_ptr = known_vals; 994 else 995 known_vals.release (); 996 997 if (known_aggs_ptr) 998 *known_aggs_ptr = known_aggs; 999 else 1000 known_aggs.release (); 1001 } 1002 1003 1004 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */ 1005 1006 static void 1007 inline_summary_alloc (void) 1008 { 1009 if (!edge_removal_hook_holder) 1010 edge_removal_hook_holder = 1011 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL); 1012 if (!edge_duplication_hook_holder) 1013 edge_duplication_hook_holder = 1014 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL); 1015 1016 if (!inline_summaries) 1017 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab); 1018 1019 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid) 1020 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1); 1021 } 1022 1023 /* We are called multiple time for given function; clear 1024 data from previous run so they are not cumulated. */ 1025 1026 static void 1027 reset_inline_edge_summary (struct cgraph_edge *e) 1028 { 1029 if (e->uid < (int) inline_edge_summary_vec.length ()) 1030 { 1031 struct inline_edge_summary *es = inline_edge_summary (e); 1032 1033 es->call_stmt_size = es->call_stmt_time = 0; 1034 if (es->predicate) 1035 edge_predicate_pool.remove (es->predicate); 1036 es->predicate = NULL; 1037 es->param.release (); 1038 } 1039 } 1040 1041 /* We are called multiple time for given function; clear 1042 data from previous run so they are not cumulated. */ 1043 1044 static void 1045 reset_inline_summary (struct cgraph_node *node, 1046 inline_summary *info) 1047 { 1048 struct cgraph_edge *e; 1049 1050 info->self_size = info->self_time = 0; 1051 info->estimated_stack_size = 0; 1052 info->estimated_self_stack_size = 0; 1053 info->stack_frame_offset = 0; 1054 info->size = 0; 1055 info->time = 0; 1056 info->growth = 0; 1057 info->scc_no = 0; 1058 if (info->loop_iterations) 1059 { 1060 edge_predicate_pool.remove (info->loop_iterations); 1061 info->loop_iterations = NULL; 1062 } 1063 if (info->loop_stride) 1064 { 1065 edge_predicate_pool.remove (info->loop_stride); 1066 info->loop_stride = NULL; 1067 } 1068 if (info->array_index) 1069 { 1070 edge_predicate_pool.remove (info->array_index); 1071 info->array_index = NULL; 1072 } 1073 vec_free (info->conds); 1074 vec_free (info->entry); 1075 for (e = node->callees; e; e = e->next_callee) 1076 reset_inline_edge_summary (e); 1077 for (e = node->indirect_calls; e; e = e->next_callee) 1078 reset_inline_edge_summary (e); 1079 } 1080 1081 /* Hook that is called by cgraph.c when a node is removed. */ 1082 1083 void 1084 inline_summary_t::remove (cgraph_node *node, inline_summary *info) 1085 { 1086 reset_inline_summary (node, info); 1087 } 1088 1089 /* Remap predicate P of former function to be predicate of duplicated function. 1090 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node, 1091 INFO is inline summary of the duplicated node. */ 1092 1093 static struct predicate 1094 remap_predicate_after_duplication (struct predicate *p, 1095 clause_t possible_truths, 1096 struct inline_summary *info) 1097 { 1098 struct predicate new_predicate = true_predicate (); 1099 int j; 1100 for (j = 0; p->clause[j]; j++) 1101 if (!(possible_truths & p->clause[j])) 1102 { 1103 new_predicate = false_predicate (); 1104 break; 1105 } 1106 else 1107 add_clause (info->conds, &new_predicate, 1108 possible_truths & p->clause[j]); 1109 return new_predicate; 1110 } 1111 1112 /* Same as remap_predicate_after_duplication but handle hint predicate *P. 1113 Additionally care about allocating new memory slot for updated predicate 1114 and set it to NULL when it becomes true or false (and thus uninteresting). 1115 */ 1116 1117 static void 1118 remap_hint_predicate_after_duplication (struct predicate **p, 1119 clause_t possible_truths, 1120 struct inline_summary *info) 1121 { 1122 struct predicate new_predicate; 1123 1124 if (!*p) 1125 return; 1126 1127 new_predicate = remap_predicate_after_duplication (*p, 1128 possible_truths, info); 1129 /* We do not want to free previous predicate; it is used by node origin. */ 1130 *p = NULL; 1131 set_hint_predicate (p, new_predicate); 1132 } 1133 1134 1135 /* Hook that is called by cgraph.c when a node is duplicated. */ 1136 void 1137 inline_summary_t::duplicate (cgraph_node *src, 1138 cgraph_node *dst, 1139 inline_summary *, 1140 inline_summary *info) 1141 { 1142 inline_summary_alloc (); 1143 memcpy (info, inline_summaries->get (src), sizeof (inline_summary)); 1144 /* TODO: as an optimization, we may avoid copying conditions 1145 that are known to be false or true. */ 1146 info->conds = vec_safe_copy (info->conds); 1147 1148 /* When there are any replacements in the function body, see if we can figure 1149 out that something was optimized out. */ 1150 if (ipa_node_params_sum && dst->clone.tree_map) 1151 { 1152 vec<size_time_entry, va_gc> *entry = info->entry; 1153 /* Use SRC parm info since it may not be copied yet. */ 1154 struct ipa_node_params *parms_info = IPA_NODE_REF (src); 1155 vec<tree> known_vals = vNULL; 1156 int count = ipa_get_param_count (parms_info); 1157 int i, j; 1158 clause_t possible_truths; 1159 struct predicate true_pred = true_predicate (); 1160 size_time_entry *e; 1161 int optimized_out_size = 0; 1162 bool inlined_to_p = false; 1163 struct cgraph_edge *edge, *next; 1164 1165 info->entry = 0; 1166 known_vals.safe_grow_cleared (count); 1167 for (i = 0; i < count; i++) 1168 { 1169 struct ipa_replace_map *r; 1170 1171 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++) 1172 { 1173 if (((!r->old_tree && r->parm_num == i) 1174 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i))) 1175 && r->replace_p && !r->ref_p) 1176 { 1177 known_vals[i] = r->new_tree; 1178 break; 1179 } 1180 } 1181 } 1182 possible_truths = evaluate_conditions_for_known_args (dst, false, 1183 known_vals, 1184 vNULL); 1185 known_vals.release (); 1186 1187 account_size_time (info, 0, 0, &true_pred); 1188 1189 /* Remap size_time vectors. 1190 Simplify the predicate by prunning out alternatives that are known 1191 to be false. 1192 TODO: as on optimization, we can also eliminate conditions known 1193 to be true. */ 1194 for (i = 0; vec_safe_iterate (entry, i, &e); i++) 1195 { 1196 struct predicate new_predicate; 1197 new_predicate = remap_predicate_after_duplication (&e->predicate, 1198 possible_truths, 1199 info); 1200 if (false_predicate_p (&new_predicate)) 1201 optimized_out_size += e->size; 1202 else 1203 account_size_time (info, e->size, e->time, &new_predicate); 1204 } 1205 1206 /* Remap edge predicates with the same simplification as above. 1207 Also copy constantness arrays. */ 1208 for (edge = dst->callees; edge; edge = next) 1209 { 1210 struct predicate new_predicate; 1211 struct inline_edge_summary *es = inline_edge_summary (edge); 1212 next = edge->next_callee; 1213 1214 if (!edge->inline_failed) 1215 inlined_to_p = true; 1216 if (!es->predicate) 1217 continue; 1218 new_predicate = remap_predicate_after_duplication (es->predicate, 1219 possible_truths, 1220 info); 1221 if (false_predicate_p (&new_predicate) 1222 && !false_predicate_p (es->predicate)) 1223 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE; 1224 edge_set_predicate (edge, &new_predicate); 1225 } 1226 1227 /* Remap indirect edge predicates with the same simplificaiton as above. 1228 Also copy constantness arrays. */ 1229 for (edge = dst->indirect_calls; edge; edge = next) 1230 { 1231 struct predicate new_predicate; 1232 struct inline_edge_summary *es = inline_edge_summary (edge); 1233 next = edge->next_callee; 1234 1235 gcc_checking_assert (edge->inline_failed); 1236 if (!es->predicate) 1237 continue; 1238 new_predicate = remap_predicate_after_duplication (es->predicate, 1239 possible_truths, 1240 info); 1241 if (false_predicate_p (&new_predicate) 1242 && !false_predicate_p (es->predicate)) 1243 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE; 1244 edge_set_predicate (edge, &new_predicate); 1245 } 1246 remap_hint_predicate_after_duplication (&info->loop_iterations, 1247 possible_truths, info); 1248 remap_hint_predicate_after_duplication (&info->loop_stride, 1249 possible_truths, info); 1250 remap_hint_predicate_after_duplication (&info->array_index, 1251 possible_truths, info); 1252 1253 /* If inliner or someone after inliner will ever start producing 1254 non-trivial clones, we will get trouble with lack of information 1255 about updating self sizes, because size vectors already contains 1256 sizes of the calees. */ 1257 gcc_assert (!inlined_to_p || !optimized_out_size); 1258 } 1259 else 1260 { 1261 info->entry = vec_safe_copy (info->entry); 1262 if (info->loop_iterations) 1263 { 1264 predicate p = *info->loop_iterations; 1265 info->loop_iterations = NULL; 1266 set_hint_predicate (&info->loop_iterations, p); 1267 } 1268 if (info->loop_stride) 1269 { 1270 predicate p = *info->loop_stride; 1271 info->loop_stride = NULL; 1272 set_hint_predicate (&info->loop_stride, p); 1273 } 1274 if (info->array_index) 1275 { 1276 predicate p = *info->array_index; 1277 info->array_index = NULL; 1278 set_hint_predicate (&info->array_index, p); 1279 } 1280 } 1281 if (!dst->global.inlined_to) 1282 inline_update_overall_summary (dst); 1283 } 1284 1285 1286 /* Hook that is called by cgraph.c when a node is duplicated. */ 1287 1288 static void 1289 inline_edge_duplication_hook (struct cgraph_edge *src, 1290 struct cgraph_edge *dst, 1291 ATTRIBUTE_UNUSED void *data) 1292 { 1293 struct inline_edge_summary *info; 1294 struct inline_edge_summary *srcinfo; 1295 inline_summary_alloc (); 1296 info = inline_edge_summary (dst); 1297 srcinfo = inline_edge_summary (src); 1298 memcpy (info, srcinfo, sizeof (struct inline_edge_summary)); 1299 info->predicate = NULL; 1300 edge_set_predicate (dst, srcinfo->predicate); 1301 info->param = srcinfo->param.copy (); 1302 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee) 1303 { 1304 info->call_stmt_size -= (eni_size_weights.indirect_call_cost 1305 - eni_size_weights.call_cost); 1306 info->call_stmt_time -= (eni_time_weights.indirect_call_cost 1307 - eni_time_weights.call_cost); 1308 } 1309 } 1310 1311 1312 /* Keep edge cache consistent across edge removal. */ 1313 1314 static void 1315 inline_edge_removal_hook (struct cgraph_edge *edge, 1316 void *data ATTRIBUTE_UNUSED) 1317 { 1318 if (edge_growth_cache.exists ()) 1319 reset_edge_growth_cache (edge); 1320 reset_inline_edge_summary (edge); 1321 } 1322 1323 1324 /* Initialize growth caches. */ 1325 1326 void 1327 initialize_growth_caches (void) 1328 { 1329 if (symtab->edges_max_uid) 1330 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid); 1331 } 1332 1333 1334 /* Free growth caches. */ 1335 1336 void 1337 free_growth_caches (void) 1338 { 1339 edge_growth_cache.release (); 1340 } 1341 1342 1343 /* Dump edge summaries associated to NODE and recursively to all clones. 1344 Indent by INDENT. */ 1345 1346 static void 1347 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node, 1348 struct inline_summary *info) 1349 { 1350 struct cgraph_edge *edge; 1351 for (edge = node->callees; edge; edge = edge->next_callee) 1352 { 1353 struct inline_edge_summary *es = inline_edge_summary (edge); 1354 struct cgraph_node *callee = edge->callee->ultimate_alias_target (); 1355 int i; 1356 1357 fprintf (f, 1358 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i" 1359 " time: %2i callee size:%2i stack:%2i", 1360 indent, "", callee->name (), callee->order, 1361 !edge->inline_failed 1362 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed), 1363 indent, "", es->loop_depth, edge->frequency, 1364 es->call_stmt_size, es->call_stmt_time, 1365 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE, 1366 (int) inline_summaries->get (callee)->estimated_stack_size); 1367 1368 if (es->predicate) 1369 { 1370 fprintf (f, " predicate: "); 1371 dump_predicate (f, info->conds, es->predicate); 1372 } 1373 else 1374 fprintf (f, "\n"); 1375 if (es->param.exists ()) 1376 for (i = 0; i < (int) es->param.length (); i++) 1377 { 1378 int prob = es->param[i].change_prob; 1379 1380 if (!prob) 1381 fprintf (f, "%*s op%i is compile time invariant\n", 1382 indent + 2, "", i); 1383 else if (prob != REG_BR_PROB_BASE) 1384 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i, 1385 prob * 100.0 / REG_BR_PROB_BASE); 1386 } 1387 if (!edge->inline_failed) 1388 { 1389 fprintf (f, "%*sStack frame offset %i, callee self size %i," 1390 " callee size %i\n", 1391 indent + 2, "", 1392 (int) inline_summaries->get (callee)->stack_frame_offset, 1393 (int) inline_summaries->get (callee)->estimated_self_stack_size, 1394 (int) inline_summaries->get (callee)->estimated_stack_size); 1395 dump_inline_edge_summary (f, indent + 2, callee, info); 1396 } 1397 } 1398 for (edge = node->indirect_calls; edge; edge = edge->next_callee) 1399 { 1400 struct inline_edge_summary *es = inline_edge_summary (edge); 1401 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i" 1402 " time: %2i", 1403 indent, "", 1404 es->loop_depth, 1405 edge->frequency, es->call_stmt_size, es->call_stmt_time); 1406 if (es->predicate) 1407 { 1408 fprintf (f, "predicate: "); 1409 dump_predicate (f, info->conds, es->predicate); 1410 } 1411 else 1412 fprintf (f, "\n"); 1413 } 1414 } 1415 1416 1417 void 1418 dump_inline_summary (FILE *f, struct cgraph_node *node) 1419 { 1420 if (node->definition) 1421 { 1422 struct inline_summary *s = inline_summaries->get (node); 1423 size_time_entry *e; 1424 int i; 1425 fprintf (f, "Inline summary for %s/%i", node->name (), 1426 node->order); 1427 if (DECL_DISREGARD_INLINE_LIMITS (node->decl)) 1428 fprintf (f, " always_inline"); 1429 if (s->inlinable) 1430 fprintf (f, " inlinable"); 1431 if (s->contains_cilk_spawn) 1432 fprintf (f, " contains_cilk_spawn"); 1433 fprintf (f, "\n self time: %i\n", s->self_time); 1434 fprintf (f, " global time: %i\n", s->time); 1435 fprintf (f, " self size: %i\n", s->self_size); 1436 fprintf (f, " global size: %i\n", s->size); 1437 fprintf (f, " min size: %i\n", s->min_size); 1438 fprintf (f, " self stack: %i\n", 1439 (int) s->estimated_self_stack_size); 1440 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size); 1441 if (s->growth) 1442 fprintf (f, " estimated growth:%i\n", (int) s->growth); 1443 if (s->scc_no) 1444 fprintf (f, " In SCC: %i\n", (int) s->scc_no); 1445 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++) 1446 { 1447 fprintf (f, " size:%f, time:%f, predicate:", 1448 (double) e->size / INLINE_SIZE_SCALE, 1449 (double) e->time / INLINE_TIME_SCALE); 1450 dump_predicate (f, s->conds, &e->predicate); 1451 } 1452 if (s->loop_iterations) 1453 { 1454 fprintf (f, " loop iterations:"); 1455 dump_predicate (f, s->conds, s->loop_iterations); 1456 } 1457 if (s->loop_stride) 1458 { 1459 fprintf (f, " loop stride:"); 1460 dump_predicate (f, s->conds, s->loop_stride); 1461 } 1462 if (s->array_index) 1463 { 1464 fprintf (f, " array index:"); 1465 dump_predicate (f, s->conds, s->array_index); 1466 } 1467 fprintf (f, " calls:\n"); 1468 dump_inline_edge_summary (f, 4, node, s); 1469 fprintf (f, "\n"); 1470 } 1471 } 1472 1473 DEBUG_FUNCTION void 1474 debug_inline_summary (struct cgraph_node *node) 1475 { 1476 dump_inline_summary (stderr, node); 1477 } 1478 1479 void 1480 dump_inline_summaries (FILE *f) 1481 { 1482 struct cgraph_node *node; 1483 1484 FOR_EACH_DEFINED_FUNCTION (node) 1485 if (!node->global.inlined_to) 1486 dump_inline_summary (f, node); 1487 } 1488 1489 /* Give initial reasons why inlining would fail on EDGE. This gets either 1490 nullified or usually overwritten by more precise reasons later. */ 1491 1492 void 1493 initialize_inline_failed (struct cgraph_edge *e) 1494 { 1495 struct cgraph_node *callee = e->callee; 1496 1497 if (e->indirect_unknown_callee) 1498 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL; 1499 else if (!callee->definition) 1500 e->inline_failed = CIF_BODY_NOT_AVAILABLE; 1501 else if (callee->local.redefined_extern_inline) 1502 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE; 1503 else if (e->call_stmt_cannot_inline_p) 1504 e->inline_failed = CIF_MISMATCHED_ARGUMENTS; 1505 else if (cfun && fn_contains_cilk_spawn_p (cfun)) 1506 /* We can't inline if the function is spawing a function. */ 1507 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE; 1508 else 1509 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED; 1510 } 1511 1512 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the 1513 boolean variable pointed to by DATA. */ 1514 1515 static bool 1516 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED, 1517 void *data) 1518 { 1519 bool *b = (bool *) data; 1520 *b = true; 1521 return true; 1522 } 1523 1524 /* If OP refers to value of function parameter, return the corresponding 1525 parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the 1526 PARM_DECL) will be stored to *SIZE_P in that case too. */ 1527 1528 static tree 1529 unmodified_parm_1 (gimple *stmt, tree op, HOST_WIDE_INT *size_p) 1530 { 1531 /* SSA_NAME referring to parm default def? */ 1532 if (TREE_CODE (op) == SSA_NAME 1533 && SSA_NAME_IS_DEFAULT_DEF (op) 1534 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL) 1535 { 1536 if (size_p) 1537 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op))); 1538 return SSA_NAME_VAR (op); 1539 } 1540 /* Non-SSA parm reference? */ 1541 if (TREE_CODE (op) == PARM_DECL) 1542 { 1543 bool modified = false; 1544 1545 ao_ref refd; 1546 ao_ref_init (&refd, op); 1547 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified, 1548 NULL); 1549 if (!modified) 1550 { 1551 if (size_p) 1552 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op))); 1553 return op; 1554 } 1555 } 1556 return NULL_TREE; 1557 } 1558 1559 /* If OP refers to value of function parameter, return the corresponding 1560 parameter. Also traverse chains of SSA register assignments. If non-NULL, 1561 the size of the memory load (or the SSA_NAME of the PARM_DECL) will be 1562 stored to *SIZE_P in that case too. */ 1563 1564 static tree 1565 unmodified_parm (gimple *stmt, tree op, HOST_WIDE_INT *size_p) 1566 { 1567 tree res = unmodified_parm_1 (stmt, op, size_p); 1568 if (res) 1569 return res; 1570 1571 if (TREE_CODE (op) == SSA_NAME 1572 && !SSA_NAME_IS_DEFAULT_DEF (op) 1573 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op))) 1574 return unmodified_parm (SSA_NAME_DEF_STMT (op), 1575 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)), 1576 size_p); 1577 return NULL_TREE; 1578 } 1579 1580 /* If OP refers to a value of a function parameter or value loaded from an 1581 aggregate passed to a parameter (either by value or reference), return TRUE 1582 and store the number of the parameter to *INDEX_P, the access size into 1583 *SIZE_P, and information whether and how it has been loaded from an 1584 aggregate into *AGGPOS. INFO describes the function parameters, STMT is the 1585 statement in which OP is used or loaded. */ 1586 1587 static bool 1588 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi, 1589 gimple *stmt, tree op, int *index_p, 1590 HOST_WIDE_INT *size_p, 1591 struct agg_position_info *aggpos) 1592 { 1593 tree res = unmodified_parm_1 (stmt, op, size_p); 1594 1595 gcc_checking_assert (aggpos); 1596 if (res) 1597 { 1598 *index_p = ipa_get_param_decl_index (fbi->info, res); 1599 if (*index_p < 0) 1600 return false; 1601 aggpos->agg_contents = false; 1602 aggpos->by_ref = false; 1603 return true; 1604 } 1605 1606 if (TREE_CODE (op) == SSA_NAME) 1607 { 1608 if (SSA_NAME_IS_DEFAULT_DEF (op) 1609 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op))) 1610 return false; 1611 stmt = SSA_NAME_DEF_STMT (op); 1612 op = gimple_assign_rhs1 (stmt); 1613 if (!REFERENCE_CLASS_P (op)) 1614 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p, 1615 aggpos); 1616 } 1617 1618 aggpos->agg_contents = true; 1619 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors, 1620 stmt, op, index_p, &aggpos->offset, 1621 size_p, &aggpos->by_ref); 1622 } 1623 1624 /* See if statement might disappear after inlining. 1625 0 - means not eliminated 1626 1 - half of statements goes away 1627 2 - for sure it is eliminated. 1628 We are not terribly sophisticated, basically looking for simple abstraction 1629 penalty wrappers. */ 1630 1631 static int 1632 eliminated_by_inlining_prob (gimple *stmt) 1633 { 1634 enum gimple_code code = gimple_code (stmt); 1635 enum tree_code rhs_code; 1636 1637 if (!optimize) 1638 return 0; 1639 1640 switch (code) 1641 { 1642 case GIMPLE_RETURN: 1643 return 2; 1644 case GIMPLE_ASSIGN: 1645 if (gimple_num_ops (stmt) != 2) 1646 return 0; 1647 1648 rhs_code = gimple_assign_rhs_code (stmt); 1649 1650 /* Casts of parameters, loads from parameters passed by reference 1651 and stores to return value or parameters are often free after 1652 inlining dua to SRA and further combining. 1653 Assume that half of statements goes away. */ 1654 if (CONVERT_EXPR_CODE_P (rhs_code) 1655 || rhs_code == VIEW_CONVERT_EXPR 1656 || rhs_code == ADDR_EXPR 1657 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS) 1658 { 1659 tree rhs = gimple_assign_rhs1 (stmt); 1660 tree lhs = gimple_assign_lhs (stmt); 1661 tree inner_rhs = get_base_address (rhs); 1662 tree inner_lhs = get_base_address (lhs); 1663 bool rhs_free = false; 1664 bool lhs_free = false; 1665 1666 if (!inner_rhs) 1667 inner_rhs = rhs; 1668 if (!inner_lhs) 1669 inner_lhs = lhs; 1670 1671 /* Reads of parameter are expected to be free. */ 1672 if (unmodified_parm (stmt, inner_rhs, NULL)) 1673 rhs_free = true; 1674 /* Match expressions of form &this->field. Those will most likely 1675 combine with something upstream after inlining. */ 1676 else if (TREE_CODE (inner_rhs) == ADDR_EXPR) 1677 { 1678 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0)); 1679 if (TREE_CODE (op) == PARM_DECL) 1680 rhs_free = true; 1681 else if (TREE_CODE (op) == MEM_REF 1682 && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL)) 1683 rhs_free = true; 1684 } 1685 1686 /* When parameter is not SSA register because its address is taken 1687 and it is just copied into one, the statement will be completely 1688 free after inlining (we will copy propagate backward). */ 1689 if (rhs_free && is_gimple_reg (lhs)) 1690 return 2; 1691 1692 /* Reads of parameters passed by reference 1693 expected to be free (i.e. optimized out after inlining). */ 1694 if (TREE_CODE (inner_rhs) == MEM_REF 1695 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL)) 1696 rhs_free = true; 1697 1698 /* Copying parameter passed by reference into gimple register is 1699 probably also going to copy propagate, but we can't be quite 1700 sure. */ 1701 if (rhs_free && is_gimple_reg (lhs)) 1702 lhs_free = true; 1703 1704 /* Writes to parameters, parameters passed by value and return value 1705 (either dirrectly or passed via invisible reference) are free. 1706 1707 TODO: We ought to handle testcase like 1708 struct a {int a,b;}; 1709 struct a 1710 retrurnsturct (void) 1711 { 1712 struct a a ={1,2}; 1713 return a; 1714 } 1715 1716 This translate into: 1717 1718 retrurnsturct () 1719 { 1720 int a$b; 1721 int a$a; 1722 struct a a; 1723 struct a D.2739; 1724 1725 <bb 2>: 1726 D.2739.a = 1; 1727 D.2739.b = 2; 1728 return D.2739; 1729 1730 } 1731 For that we either need to copy ipa-split logic detecting writes 1732 to return value. */ 1733 if (TREE_CODE (inner_lhs) == PARM_DECL 1734 || TREE_CODE (inner_lhs) == RESULT_DECL 1735 || (TREE_CODE (inner_lhs) == MEM_REF 1736 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL) 1737 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME 1738 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0)) 1739 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND 1740 (inner_lhs, 1741 0))) == RESULT_DECL)))) 1742 lhs_free = true; 1743 if (lhs_free 1744 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs))) 1745 rhs_free = true; 1746 if (lhs_free && rhs_free) 1747 return 1; 1748 } 1749 return 0; 1750 default: 1751 return 0; 1752 } 1753 } 1754 1755 1756 /* If BB ends by a conditional we can turn into predicates, attach corresponding 1757 predicates to the CFG edges. */ 1758 1759 static void 1760 set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi, 1761 struct inline_summary *summary, 1762 basic_block bb) 1763 { 1764 gimple *last; 1765 tree op; 1766 int index; 1767 HOST_WIDE_INT size; 1768 struct agg_position_info aggpos; 1769 enum tree_code code, inverted_code; 1770 edge e; 1771 edge_iterator ei; 1772 gimple *set_stmt; 1773 tree op2; 1774 1775 last = last_stmt (bb); 1776 if (!last || gimple_code (last) != GIMPLE_COND) 1777 return; 1778 if (!is_gimple_ip_invariant (gimple_cond_rhs (last))) 1779 return; 1780 op = gimple_cond_lhs (last); 1781 /* TODO: handle conditionals like 1782 var = op0 < 4; 1783 if (var != 0). */ 1784 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos)) 1785 { 1786 code = gimple_cond_code (last); 1787 inverted_code = invert_tree_comparison (code, HONOR_NANS (op)); 1788 1789 FOR_EACH_EDGE (e, ei, bb->succs) 1790 { 1791 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE 1792 ? code : inverted_code); 1793 /* invert_tree_comparison will return ERROR_MARK on FP 1794 comparsions that are not EQ/NE instead of returning proper 1795 unordered one. Be sure it is not confused with NON_CONSTANT. */ 1796 if (this_code != ERROR_MARK) 1797 { 1798 struct predicate p 1799 = add_condition (summary, index, size, &aggpos, this_code, 1800 unshare_expr_without_location 1801 (gimple_cond_rhs (last))); 1802 e->aux = edge_predicate_pool.allocate (); 1803 *(struct predicate *) e->aux = p; 1804 } 1805 } 1806 } 1807 1808 if (TREE_CODE (op) != SSA_NAME) 1809 return; 1810 /* Special case 1811 if (builtin_constant_p (op)) 1812 constant_code 1813 else 1814 nonconstant_code. 1815 Here we can predicate nonconstant_code. We can't 1816 really handle constant_code since we have no predicate 1817 for this and also the constant code is not known to be 1818 optimized away when inliner doen't see operand is constant. 1819 Other optimizers might think otherwise. */ 1820 if (gimple_cond_code (last) != NE_EXPR 1821 || !integer_zerop (gimple_cond_rhs (last))) 1822 return; 1823 set_stmt = SSA_NAME_DEF_STMT (op); 1824 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P) 1825 || gimple_call_num_args (set_stmt) != 1) 1826 return; 1827 op2 = gimple_call_arg (set_stmt, 0); 1828 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size, 1829 &aggpos)) 1830 return; 1831 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE) 1832 { 1833 struct predicate p = add_condition (summary, index, size, &aggpos, 1834 IS_NOT_CONSTANT, NULL_TREE); 1835 e->aux = edge_predicate_pool.allocate (); 1836 *(struct predicate *) e->aux = p; 1837 } 1838 } 1839 1840 1841 /* If BB ends by a switch we can turn into predicates, attach corresponding 1842 predicates to the CFG edges. */ 1843 1844 static void 1845 set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi, 1846 struct inline_summary *summary, 1847 basic_block bb) 1848 { 1849 gimple *lastg; 1850 tree op; 1851 int index; 1852 HOST_WIDE_INT size; 1853 struct agg_position_info aggpos; 1854 edge e; 1855 edge_iterator ei; 1856 size_t n; 1857 size_t case_idx; 1858 1859 lastg = last_stmt (bb); 1860 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH) 1861 return; 1862 gswitch *last = as_a <gswitch *> (lastg); 1863 op = gimple_switch_index (last); 1864 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos)) 1865 return; 1866 1867 FOR_EACH_EDGE (e, ei, bb->succs) 1868 { 1869 e->aux = edge_predicate_pool.allocate (); 1870 *(struct predicate *) e->aux = false_predicate (); 1871 } 1872 n = gimple_switch_num_labels (last); 1873 for (case_idx = 0; case_idx < n; ++case_idx) 1874 { 1875 tree cl = gimple_switch_label (last, case_idx); 1876 tree min, max; 1877 struct predicate p; 1878 1879 e = find_edge (bb, label_to_block (CASE_LABEL (cl))); 1880 min = CASE_LOW (cl); 1881 max = CASE_HIGH (cl); 1882 1883 /* For default we might want to construct predicate that none 1884 of cases is met, but it is bit hard to do not having negations 1885 of conditionals handy. */ 1886 if (!min && !max) 1887 p = true_predicate (); 1888 else if (!max) 1889 p = add_condition (summary, index, size, &aggpos, EQ_EXPR, 1890 unshare_expr_without_location (min)); 1891 else 1892 { 1893 struct predicate p1, p2; 1894 p1 = add_condition (summary, index, size, &aggpos, GE_EXPR, 1895 unshare_expr_without_location (min)); 1896 p2 = add_condition (summary, index, size, &aggpos, LE_EXPR, 1897 unshare_expr_without_location (max)); 1898 p = and_predicates (summary->conds, &p1, &p2); 1899 } 1900 *(struct predicate *) e->aux 1901 = or_predicates (summary->conds, &p, (struct predicate *) e->aux); 1902 } 1903 } 1904 1905 1906 /* For each BB in NODE attach to its AUX pointer predicate under 1907 which it is executable. */ 1908 1909 static void 1910 compute_bb_predicates (struct ipa_func_body_info *fbi, 1911 struct cgraph_node *node, 1912 struct inline_summary *summary) 1913 { 1914 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl); 1915 bool done = false; 1916 basic_block bb; 1917 1918 FOR_EACH_BB_FN (bb, my_function) 1919 { 1920 set_cond_stmt_execution_predicate (fbi, summary, bb); 1921 set_switch_stmt_execution_predicate (fbi, summary, bb); 1922 } 1923 1924 /* Entry block is always executable. */ 1925 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux 1926 = edge_predicate_pool.allocate (); 1927 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux 1928 = true_predicate (); 1929 1930 /* A simple dataflow propagation of predicates forward in the CFG. 1931 TODO: work in reverse postorder. */ 1932 while (!done) 1933 { 1934 done = true; 1935 FOR_EACH_BB_FN (bb, my_function) 1936 { 1937 struct predicate p = false_predicate (); 1938 edge e; 1939 edge_iterator ei; 1940 FOR_EACH_EDGE (e, ei, bb->preds) 1941 { 1942 if (e->src->aux) 1943 { 1944 struct predicate this_bb_predicate 1945 = *(struct predicate *) e->src->aux; 1946 if (e->aux) 1947 this_bb_predicate 1948 = and_predicates (summary->conds, &this_bb_predicate, 1949 (struct predicate *) e->aux); 1950 p = or_predicates (summary->conds, &p, &this_bb_predicate); 1951 if (true_predicate_p (&p)) 1952 break; 1953 } 1954 } 1955 if (false_predicate_p (&p)) 1956 gcc_assert (!bb->aux); 1957 else 1958 { 1959 if (!bb->aux) 1960 { 1961 done = false; 1962 bb->aux = edge_predicate_pool.allocate (); 1963 *((struct predicate *) bb->aux) = p; 1964 } 1965 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux)) 1966 { 1967 /* This OR operation is needed to ensure monotonous data flow 1968 in the case we hit the limit on number of clauses and the 1969 and/or operations above give approximate answers. */ 1970 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux); 1971 if (!predicates_equal_p (&p, (struct predicate *) bb->aux)) 1972 { 1973 done = false; 1974 *((struct predicate *) bb->aux) = p; 1975 } 1976 } 1977 } 1978 } 1979 } 1980 } 1981 1982 1983 /* We keep info about constantness of SSA names. */ 1984 1985 typedef struct predicate predicate_t; 1986 /* Return predicate specifying when the STMT might have result that is not 1987 a compile time constant. */ 1988 1989 static struct predicate 1990 will_be_nonconstant_expr_predicate (struct ipa_node_params *info, 1991 struct inline_summary *summary, 1992 tree expr, 1993 vec<predicate_t> nonconstant_names) 1994 { 1995 tree parm; 1996 int index; 1997 HOST_WIDE_INT size; 1998 1999 while (UNARY_CLASS_P (expr)) 2000 expr = TREE_OPERAND (expr, 0); 2001 2002 parm = unmodified_parm (NULL, expr, &size); 2003 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0) 2004 return add_condition (summary, index, size, NULL, CHANGED, NULL_TREE); 2005 if (is_gimple_min_invariant (expr)) 2006 return false_predicate (); 2007 if (TREE_CODE (expr) == SSA_NAME) 2008 return nonconstant_names[SSA_NAME_VERSION (expr)]; 2009 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr)) 2010 { 2011 struct predicate p1 = will_be_nonconstant_expr_predicate 2012 (info, summary, TREE_OPERAND (expr, 0), 2013 nonconstant_names); 2014 struct predicate p2; 2015 if (true_predicate_p (&p1)) 2016 return p1; 2017 p2 = will_be_nonconstant_expr_predicate (info, summary, 2018 TREE_OPERAND (expr, 1), 2019 nonconstant_names); 2020 return or_predicates (summary->conds, &p1, &p2); 2021 } 2022 else if (TREE_CODE (expr) == COND_EXPR) 2023 { 2024 struct predicate p1 = will_be_nonconstant_expr_predicate 2025 (info, summary, TREE_OPERAND (expr, 0), 2026 nonconstant_names); 2027 struct predicate p2; 2028 if (true_predicate_p (&p1)) 2029 return p1; 2030 p2 = will_be_nonconstant_expr_predicate (info, summary, 2031 TREE_OPERAND (expr, 1), 2032 nonconstant_names); 2033 if (true_predicate_p (&p2)) 2034 return p2; 2035 p1 = or_predicates (summary->conds, &p1, &p2); 2036 p2 = will_be_nonconstant_expr_predicate (info, summary, 2037 TREE_OPERAND (expr, 2), 2038 nonconstant_names); 2039 return or_predicates (summary->conds, &p1, &p2); 2040 } 2041 else 2042 { 2043 debug_tree (expr); 2044 gcc_unreachable (); 2045 } 2046 return false_predicate (); 2047 } 2048 2049 2050 /* Return predicate specifying when the STMT might have result that is not 2051 a compile time constant. */ 2052 2053 static struct predicate 2054 will_be_nonconstant_predicate (struct ipa_func_body_info *fbi, 2055 struct inline_summary *summary, 2056 gimple *stmt, 2057 vec<predicate_t> nonconstant_names) 2058 { 2059 struct predicate p = true_predicate (); 2060 ssa_op_iter iter; 2061 tree use; 2062 struct predicate op_non_const; 2063 bool is_load; 2064 int base_index; 2065 HOST_WIDE_INT size; 2066 struct agg_position_info aggpos; 2067 2068 /* What statments might be optimized away 2069 when their arguments are constant. */ 2070 if (gimple_code (stmt) != GIMPLE_ASSIGN 2071 && gimple_code (stmt) != GIMPLE_COND 2072 && gimple_code (stmt) != GIMPLE_SWITCH 2073 && (gimple_code (stmt) != GIMPLE_CALL 2074 || !(gimple_call_flags (stmt) & ECF_CONST))) 2075 return p; 2076 2077 /* Stores will stay anyway. */ 2078 if (gimple_store_p (stmt)) 2079 return p; 2080 2081 is_load = gimple_assign_load_p (stmt); 2082 2083 /* Loads can be optimized when the value is known. */ 2084 if (is_load) 2085 { 2086 tree op; 2087 gcc_assert (gimple_assign_single_p (stmt)); 2088 op = gimple_assign_rhs1 (stmt); 2089 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size, 2090 &aggpos)) 2091 return p; 2092 } 2093 else 2094 base_index = -1; 2095 2096 /* See if we understand all operands before we start 2097 adding conditionals. */ 2098 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) 2099 { 2100 tree parm = unmodified_parm (stmt, use, NULL); 2101 /* For arguments we can build a condition. */ 2102 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0) 2103 continue; 2104 if (TREE_CODE (use) != SSA_NAME) 2105 return p; 2106 /* If we know when operand is constant, 2107 we still can say something useful. */ 2108 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)])) 2109 continue; 2110 return p; 2111 } 2112 2113 if (is_load) 2114 op_non_const = 2115 add_condition (summary, base_index, size, &aggpos, CHANGED, NULL); 2116 else 2117 op_non_const = false_predicate (); 2118 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) 2119 { 2120 HOST_WIDE_INT size; 2121 tree parm = unmodified_parm (stmt, use, &size); 2122 int index; 2123 2124 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0) 2125 { 2126 if (index != base_index) 2127 p = add_condition (summary, index, size, NULL, CHANGED, NULL_TREE); 2128 else 2129 continue; 2130 } 2131 else 2132 p = nonconstant_names[SSA_NAME_VERSION (use)]; 2133 op_non_const = or_predicates (summary->conds, &p, &op_non_const); 2134 } 2135 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL) 2136 && gimple_op (stmt, 0) 2137 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME) 2138 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))] 2139 = op_non_const; 2140 return op_non_const; 2141 } 2142 2143 struct record_modified_bb_info 2144 { 2145 bitmap bb_set; 2146 gimple *stmt; 2147 }; 2148 2149 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be 2150 set except for info->stmt. */ 2151 2152 static bool 2153 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data) 2154 { 2155 struct record_modified_bb_info *info = 2156 (struct record_modified_bb_info *) data; 2157 if (SSA_NAME_DEF_STMT (vdef) == info->stmt) 2158 return false; 2159 bitmap_set_bit (info->bb_set, 2160 SSA_NAME_IS_DEFAULT_DEF (vdef) 2161 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index 2162 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index); 2163 return false; 2164 } 2165 2166 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT 2167 will change since last invocation of STMT. 2168 2169 Value 0 is reserved for compile time invariants. 2170 For common parameters it is REG_BR_PROB_BASE. For loop invariants it 2171 ought to be REG_BR_PROB_BASE / estimated_iters. */ 2172 2173 static int 2174 param_change_prob (gimple *stmt, int i) 2175 { 2176 tree op = gimple_call_arg (stmt, i); 2177 basic_block bb = gimple_bb (stmt); 2178 tree base; 2179 2180 /* Global invariants neve change. */ 2181 if (is_gimple_min_invariant (op)) 2182 return 0; 2183 /* We would have to do non-trivial analysis to really work out what 2184 is the probability of value to change (i.e. when init statement 2185 is in a sibling loop of the call). 2186 2187 We do an conservative estimate: when call is executed N times more often 2188 than the statement defining value, we take the frequency 1/N. */ 2189 if (TREE_CODE (op) == SSA_NAME) 2190 { 2191 int init_freq; 2192 2193 if (!bb->frequency) 2194 return REG_BR_PROB_BASE; 2195 2196 if (SSA_NAME_IS_DEFAULT_DEF (op)) 2197 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency; 2198 else 2199 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency; 2200 2201 if (!init_freq) 2202 init_freq = 1; 2203 if (init_freq < bb->frequency) 2204 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1); 2205 else 2206 return REG_BR_PROB_BASE; 2207 } 2208 2209 base = get_base_address (op); 2210 if (base) 2211 { 2212 ao_ref refd; 2213 int max; 2214 struct record_modified_bb_info info; 2215 bitmap_iterator bi; 2216 unsigned index; 2217 tree init = ctor_for_folding (base); 2218 2219 if (init != error_mark_node) 2220 return 0; 2221 if (!bb->frequency) 2222 return REG_BR_PROB_BASE; 2223 ao_ref_init (&refd, op); 2224 info.stmt = stmt; 2225 info.bb_set = BITMAP_ALLOC (NULL); 2226 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info, 2227 NULL); 2228 if (bitmap_bit_p (info.bb_set, bb->index)) 2229 { 2230 BITMAP_FREE (info.bb_set); 2231 return REG_BR_PROB_BASE; 2232 } 2233 2234 /* Assume that every memory is initialized at entry. 2235 TODO: Can we easilly determine if value is always defined 2236 and thus we may skip entry block? */ 2237 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency) 2238 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency; 2239 else 2240 max = 1; 2241 2242 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi) 2243 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency); 2244 2245 BITMAP_FREE (info.bb_set); 2246 if (max < bb->frequency) 2247 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1); 2248 else 2249 return REG_BR_PROB_BASE; 2250 } 2251 return REG_BR_PROB_BASE; 2252 } 2253 2254 /* Find whether a basic block BB is the final block of a (half) diamond CFG 2255 sub-graph and if the predicate the condition depends on is known. If so, 2256 return true and store the pointer the predicate in *P. */ 2257 2258 static bool 2259 phi_result_unknown_predicate (struct ipa_node_params *info, 2260 inline_summary *summary, basic_block bb, 2261 struct predicate *p, 2262 vec<predicate_t> nonconstant_names) 2263 { 2264 edge e; 2265 edge_iterator ei; 2266 basic_block first_bb = NULL; 2267 gimple *stmt; 2268 2269 if (single_pred_p (bb)) 2270 { 2271 *p = false_predicate (); 2272 return true; 2273 } 2274 2275 FOR_EACH_EDGE (e, ei, bb->preds) 2276 { 2277 if (single_succ_p (e->src)) 2278 { 2279 if (!single_pred_p (e->src)) 2280 return false; 2281 if (!first_bb) 2282 first_bb = single_pred (e->src); 2283 else if (single_pred (e->src) != first_bb) 2284 return false; 2285 } 2286 else 2287 { 2288 if (!first_bb) 2289 first_bb = e->src; 2290 else if (e->src != first_bb) 2291 return false; 2292 } 2293 } 2294 2295 if (!first_bb) 2296 return false; 2297 2298 stmt = last_stmt (first_bb); 2299 if (!stmt 2300 || gimple_code (stmt) != GIMPLE_COND 2301 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt))) 2302 return false; 2303 2304 *p = will_be_nonconstant_expr_predicate (info, summary, 2305 gimple_cond_lhs (stmt), 2306 nonconstant_names); 2307 if (true_predicate_p (p)) 2308 return false; 2309 else 2310 return true; 2311 } 2312 2313 /* Given a PHI statement in a function described by inline properties SUMMARY 2314 and *P being the predicate describing whether the selected PHI argument is 2315 known, store a predicate for the result of the PHI statement into 2316 NONCONSTANT_NAMES, if possible. */ 2317 2318 static void 2319 predicate_for_phi_result (struct inline_summary *summary, gphi *phi, 2320 struct predicate *p, 2321 vec<predicate_t> nonconstant_names) 2322 { 2323 unsigned i; 2324 2325 for (i = 0; i < gimple_phi_num_args (phi); i++) 2326 { 2327 tree arg = gimple_phi_arg (phi, i)->def; 2328 if (!is_gimple_min_invariant (arg)) 2329 { 2330 gcc_assert (TREE_CODE (arg) == SSA_NAME); 2331 *p = or_predicates (summary->conds, p, 2332 &nonconstant_names[SSA_NAME_VERSION (arg)]); 2333 if (true_predicate_p (p)) 2334 return; 2335 } 2336 } 2337 2338 if (dump_file && (dump_flags & TDF_DETAILS)) 2339 { 2340 fprintf (dump_file, "\t\tphi predicate: "); 2341 dump_predicate (dump_file, summary->conds, p); 2342 } 2343 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p; 2344 } 2345 2346 /* Return predicate specifying when array index in access OP becomes non-constant. */ 2347 2348 static struct predicate 2349 array_index_predicate (inline_summary *info, 2350 vec< predicate_t> nonconstant_names, tree op) 2351 { 2352 struct predicate p = false_predicate (); 2353 while (handled_component_p (op)) 2354 { 2355 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF) 2356 { 2357 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME) 2358 p = or_predicates (info->conds, &p, 2359 &nonconstant_names[SSA_NAME_VERSION 2360 (TREE_OPERAND (op, 1))]); 2361 } 2362 op = TREE_OPERAND (op, 0); 2363 } 2364 return p; 2365 } 2366 2367 /* For a typical usage of __builtin_expect (a<b, 1), we 2368 may introduce an extra relation stmt: 2369 With the builtin, we have 2370 t1 = a <= b; 2371 t2 = (long int) t1; 2372 t3 = __builtin_expect (t2, 1); 2373 if (t3 != 0) 2374 goto ... 2375 Without the builtin, we have 2376 if (a<=b) 2377 goto... 2378 This affects the size/time estimation and may have 2379 an impact on the earlier inlining. 2380 Here find this pattern and fix it up later. */ 2381 2382 static gimple * 2383 find_foldable_builtin_expect (basic_block bb) 2384 { 2385 gimple_stmt_iterator bsi; 2386 2387 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) 2388 { 2389 gimple *stmt = gsi_stmt (bsi); 2390 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT) 2391 || (is_gimple_call (stmt) 2392 && gimple_call_internal_p (stmt) 2393 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT)) 2394 { 2395 tree var = gimple_call_lhs (stmt); 2396 tree arg = gimple_call_arg (stmt, 0); 2397 use_operand_p use_p; 2398 gimple *use_stmt; 2399 bool match = false; 2400 bool done = false; 2401 2402 if (!var || !arg) 2403 continue; 2404 gcc_assert (TREE_CODE (var) == SSA_NAME); 2405 2406 while (TREE_CODE (arg) == SSA_NAME) 2407 { 2408 gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg); 2409 if (!is_gimple_assign (stmt_tmp)) 2410 break; 2411 switch (gimple_assign_rhs_code (stmt_tmp)) 2412 { 2413 case LT_EXPR: 2414 case LE_EXPR: 2415 case GT_EXPR: 2416 case GE_EXPR: 2417 case EQ_EXPR: 2418 case NE_EXPR: 2419 match = true; 2420 done = true; 2421 break; 2422 CASE_CONVERT: 2423 break; 2424 default: 2425 done = true; 2426 break; 2427 } 2428 if (done) 2429 break; 2430 arg = gimple_assign_rhs1 (stmt_tmp); 2431 } 2432 2433 if (match && single_imm_use (var, &use_p, &use_stmt) 2434 && gimple_code (use_stmt) == GIMPLE_COND) 2435 return use_stmt; 2436 } 2437 } 2438 return NULL; 2439 } 2440 2441 /* Return true when the basic blocks contains only clobbers followed by RESX. 2442 Such BBs are kept around to make removal of dead stores possible with 2443 presence of EH and will be optimized out by optimize_clobbers later in the 2444 game. 2445 2446 NEED_EH is used to recurse in case the clobber has non-EH predecestors 2447 that can be clobber only, too.. When it is false, the RESX is not necessary 2448 on the end of basic block. */ 2449 2450 static bool 2451 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true) 2452 { 2453 gimple_stmt_iterator gsi = gsi_last_bb (bb); 2454 edge_iterator ei; 2455 edge e; 2456 2457 if (need_eh) 2458 { 2459 if (gsi_end_p (gsi)) 2460 return false; 2461 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX) 2462 return false; 2463 gsi_prev (&gsi); 2464 } 2465 else if (!single_succ_p (bb)) 2466 return false; 2467 2468 for (; !gsi_end_p (gsi); gsi_prev (&gsi)) 2469 { 2470 gimple *stmt = gsi_stmt (gsi); 2471 if (is_gimple_debug (stmt)) 2472 continue; 2473 if (gimple_clobber_p (stmt)) 2474 continue; 2475 if (gimple_code (stmt) == GIMPLE_LABEL) 2476 break; 2477 return false; 2478 } 2479 2480 /* See if all predecestors are either throws or clobber only BBs. */ 2481 FOR_EACH_EDGE (e, ei, bb->preds) 2482 if (!(e->flags & EDGE_EH) 2483 && !clobber_only_eh_bb_p (e->src, false)) 2484 return false; 2485 2486 return true; 2487 } 2488 2489 /* Compute function body size parameters for NODE. 2490 When EARLY is true, we compute only simple summaries without 2491 non-trivial predicates to drive the early inliner. */ 2492 2493 static void 2494 estimate_function_body_sizes (struct cgraph_node *node, bool early) 2495 { 2496 gcov_type time = 0; 2497 /* Estimate static overhead for function prologue/epilogue and alignment. */ 2498 int size = 2; 2499 /* Benefits are scaled by probability of elimination that is in range 2500 <0,2>. */ 2501 basic_block bb; 2502 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl); 2503 int freq; 2504 struct inline_summary *info = inline_summaries->get (node); 2505 struct predicate bb_predicate; 2506 struct ipa_func_body_info fbi; 2507 vec<predicate_t> nonconstant_names = vNULL; 2508 int nblocks, n; 2509 int *order; 2510 predicate array_index = true_predicate (); 2511 gimple *fix_builtin_expect_stmt; 2512 2513 gcc_assert (my_function && my_function->cfg); 2514 gcc_assert (cfun == my_function); 2515 2516 memset(&fbi, 0, sizeof(fbi)); 2517 info->conds = NULL; 2518 info->entry = NULL; 2519 2520 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer 2521 so we can produce proper inline hints. 2522 2523 When optimizing and analyzing for early inliner, initialize node params 2524 so we can produce correct BB predicates. */ 2525 2526 if (opt_for_fn (node->decl, optimize)) 2527 { 2528 calculate_dominance_info (CDI_DOMINATORS); 2529 if (!early) 2530 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); 2531 else 2532 { 2533 ipa_check_create_node_params (); 2534 ipa_initialize_node_params (node); 2535 } 2536 2537 if (ipa_node_params_sum) 2538 { 2539 fbi.node = node; 2540 fbi.info = IPA_NODE_REF (node); 2541 fbi.bb_infos = vNULL; 2542 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun)); 2543 fbi.param_count = count_formal_params(node->decl); 2544 nonconstant_names.safe_grow_cleared 2545 (SSANAMES (my_function)->length ()); 2546 } 2547 } 2548 2549 if (dump_file) 2550 fprintf (dump_file, "\nAnalyzing function body size: %s\n", 2551 node->name ()); 2552 2553 /* When we run into maximal number of entries, we assign everything to the 2554 constant truth case. Be sure to have it in list. */ 2555 bb_predicate = true_predicate (); 2556 account_size_time (info, 0, 0, &bb_predicate); 2557 2558 bb_predicate = not_inlined_predicate (); 2559 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate); 2560 2561 if (fbi.info) 2562 compute_bb_predicates (&fbi, node, info); 2563 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); 2564 nblocks = pre_and_rev_post_order_compute (NULL, order, false); 2565 for (n = 0; n < nblocks; n++) 2566 { 2567 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]); 2568 freq = compute_call_stmt_bb_frequency (node->decl, bb); 2569 if (clobber_only_eh_bb_p (bb)) 2570 { 2571 if (dump_file && (dump_flags & TDF_DETAILS)) 2572 fprintf (dump_file, "\n Ignoring BB %i;" 2573 " it will be optimized away by cleanup_clobbers\n", 2574 bb->index); 2575 continue; 2576 } 2577 2578 /* TODO: Obviously predicates can be propagated down across CFG. */ 2579 if (fbi.info) 2580 { 2581 if (bb->aux) 2582 bb_predicate = *(struct predicate *) bb->aux; 2583 else 2584 bb_predicate = false_predicate (); 2585 } 2586 else 2587 bb_predicate = true_predicate (); 2588 2589 if (dump_file && (dump_flags & TDF_DETAILS)) 2590 { 2591 fprintf (dump_file, "\n BB %i predicate:", bb->index); 2592 dump_predicate (dump_file, info->conds, &bb_predicate); 2593 } 2594 2595 if (fbi.info && nonconstant_names.exists ()) 2596 { 2597 struct predicate phi_predicate; 2598 bool first_phi = true; 2599 2600 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); 2601 gsi_next (&bsi)) 2602 { 2603 if (first_phi 2604 && !phi_result_unknown_predicate (fbi.info, info, bb, 2605 &phi_predicate, 2606 nonconstant_names)) 2607 break; 2608 first_phi = false; 2609 if (dump_file && (dump_flags & TDF_DETAILS)) 2610 { 2611 fprintf (dump_file, " "); 2612 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0); 2613 } 2614 predicate_for_phi_result (info, bsi.phi (), &phi_predicate, 2615 nonconstant_names); 2616 } 2617 } 2618 2619 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb); 2620 2621 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); 2622 gsi_next (&bsi)) 2623 { 2624 gimple *stmt = gsi_stmt (bsi); 2625 int this_size = estimate_num_insns (stmt, &eni_size_weights); 2626 int this_time = estimate_num_insns (stmt, &eni_time_weights); 2627 int prob; 2628 struct predicate will_be_nonconstant; 2629 2630 /* This relation stmt should be folded after we remove 2631 buildin_expect call. Adjust the cost here. */ 2632 if (stmt == fix_builtin_expect_stmt) 2633 { 2634 this_size--; 2635 this_time--; 2636 } 2637 2638 if (dump_file && (dump_flags & TDF_DETAILS)) 2639 { 2640 fprintf (dump_file, " "); 2641 print_gimple_stmt (dump_file, stmt, 0, 0); 2642 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n", 2643 ((double) freq) / CGRAPH_FREQ_BASE, this_size, 2644 this_time); 2645 } 2646 2647 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ()) 2648 { 2649 struct predicate this_array_index; 2650 this_array_index = 2651 array_index_predicate (info, nonconstant_names, 2652 gimple_assign_rhs1 (stmt)); 2653 if (!false_predicate_p (&this_array_index)) 2654 array_index = 2655 and_predicates (info->conds, &array_index, 2656 &this_array_index); 2657 } 2658 if (gimple_store_p (stmt) && nonconstant_names.exists ()) 2659 { 2660 struct predicate this_array_index; 2661 this_array_index = 2662 array_index_predicate (info, nonconstant_names, 2663 gimple_get_lhs (stmt)); 2664 if (!false_predicate_p (&this_array_index)) 2665 array_index = 2666 and_predicates (info->conds, &array_index, 2667 &this_array_index); 2668 } 2669 2670 2671 if (is_gimple_call (stmt) 2672 && !gimple_call_internal_p (stmt)) 2673 { 2674 struct cgraph_edge *edge = node->get_edge (stmt); 2675 struct inline_edge_summary *es = inline_edge_summary (edge); 2676 2677 /* Special case: results of BUILT_IN_CONSTANT_P will be always 2678 resolved as constant. We however don't want to optimize 2679 out the cgraph edges. */ 2680 if (nonconstant_names.exists () 2681 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P) 2682 && gimple_call_lhs (stmt) 2683 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME) 2684 { 2685 struct predicate false_p = false_predicate (); 2686 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))] 2687 = false_p; 2688 } 2689 if (ipa_node_params_sum) 2690 { 2691 int count = gimple_call_num_args (stmt); 2692 int i; 2693 2694 if (count) 2695 es->param.safe_grow_cleared (count); 2696 for (i = 0; i < count; i++) 2697 { 2698 int prob = param_change_prob (stmt, i); 2699 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE); 2700 es->param[i].change_prob = prob; 2701 } 2702 } 2703 2704 es->call_stmt_size = this_size; 2705 es->call_stmt_time = this_time; 2706 es->loop_depth = bb_loop_depth (bb); 2707 edge_set_predicate (edge, &bb_predicate); 2708 } 2709 2710 /* TODO: When conditional jump or swithc is known to be constant, but 2711 we did not translate it into the predicates, we really can account 2712 just maximum of the possible paths. */ 2713 if (fbi.info) 2714 will_be_nonconstant 2715 = will_be_nonconstant_predicate (&fbi, info, 2716 stmt, nonconstant_names); 2717 if (this_time || this_size) 2718 { 2719 struct predicate p; 2720 2721 this_time *= freq; 2722 2723 prob = eliminated_by_inlining_prob (stmt); 2724 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS)) 2725 fprintf (dump_file, 2726 "\t\t50%% will be eliminated by inlining\n"); 2727 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS)) 2728 fprintf (dump_file, "\t\tWill be eliminated by inlining\n"); 2729 2730 if (fbi.info) 2731 p = and_predicates (info->conds, &bb_predicate, 2732 &will_be_nonconstant); 2733 else 2734 p = true_predicate (); 2735 2736 if (!false_predicate_p (&p) 2737 || (is_gimple_call (stmt) 2738 && !false_predicate_p (&bb_predicate))) 2739 { 2740 time += this_time; 2741 size += this_size; 2742 if (time > MAX_TIME * INLINE_TIME_SCALE) 2743 time = MAX_TIME * INLINE_TIME_SCALE; 2744 } 2745 2746 /* We account everything but the calls. Calls have their own 2747 size/time info attached to cgraph edges. This is necessary 2748 in order to make the cost disappear after inlining. */ 2749 if (!is_gimple_call (stmt)) 2750 { 2751 if (prob) 2752 { 2753 struct predicate ip = not_inlined_predicate (); 2754 ip = and_predicates (info->conds, &ip, &p); 2755 account_size_time (info, this_size * prob, 2756 this_time * prob, &ip); 2757 } 2758 if (prob != 2) 2759 account_size_time (info, this_size * (2 - prob), 2760 this_time * (2 - prob), &p); 2761 } 2762 2763 gcc_assert (time >= 0); 2764 gcc_assert (size >= 0); 2765 } 2766 } 2767 } 2768 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index); 2769 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE; 2770 if (time > MAX_TIME) 2771 time = MAX_TIME; 2772 free (order); 2773 2774 if (nonconstant_names.exists () && !early) 2775 { 2776 struct loop *loop; 2777 predicate loop_iterations = true_predicate (); 2778 predicate loop_stride = true_predicate (); 2779 2780 if (dump_file && (dump_flags & TDF_DETAILS)) 2781 flow_loops_dump (dump_file, NULL, 0); 2782 scev_initialize (); 2783 FOR_EACH_LOOP (loop, 0) 2784 { 2785 vec<edge> exits; 2786 edge ex; 2787 unsigned int j; 2788 struct tree_niter_desc niter_desc; 2789 bb_predicate = *(struct predicate *) loop->header->aux; 2790 2791 exits = get_loop_exit_edges (loop); 2792 FOR_EACH_VEC_ELT (exits, j, ex) 2793 if (number_of_iterations_exit (loop, ex, &niter_desc, false) 2794 && !is_gimple_min_invariant (niter_desc.niter)) 2795 { 2796 predicate will_be_nonconstant 2797 = will_be_nonconstant_expr_predicate (fbi.info, info, 2798 niter_desc.niter, 2799 nonconstant_names); 2800 if (!true_predicate_p (&will_be_nonconstant)) 2801 will_be_nonconstant = and_predicates (info->conds, 2802 &bb_predicate, 2803 &will_be_nonconstant); 2804 if (!true_predicate_p (&will_be_nonconstant) 2805 && !false_predicate_p (&will_be_nonconstant)) 2806 /* This is slightly inprecise. We may want to represent each 2807 loop with independent predicate. */ 2808 loop_iterations = 2809 and_predicates (info->conds, &loop_iterations, 2810 &will_be_nonconstant); 2811 } 2812 exits.release (); 2813 } 2814 2815 /* To avoid quadratic behavior we analyze stride predicates only 2816 with respect to the containing loop. Thus we simply iterate 2817 over all defs in the outermost loop body. */ 2818 for (loop = loops_for_fn (cfun)->tree_root->inner; 2819 loop != NULL; loop = loop->next) 2820 { 2821 basic_block *body = get_loop_body (loop); 2822 for (unsigned i = 0; i < loop->num_nodes; i++) 2823 { 2824 gimple_stmt_iterator gsi; 2825 bb_predicate = *(struct predicate *) body[i]->aux; 2826 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); 2827 gsi_next (&gsi)) 2828 { 2829 gimple *stmt = gsi_stmt (gsi); 2830 2831 if (!is_gimple_assign (stmt)) 2832 continue; 2833 2834 tree def = gimple_assign_lhs (stmt); 2835 if (TREE_CODE (def) != SSA_NAME) 2836 continue; 2837 2838 affine_iv iv; 2839 if (!simple_iv (loop_containing_stmt (stmt), 2840 loop_containing_stmt (stmt), 2841 def, &iv, true) 2842 || is_gimple_min_invariant (iv.step)) 2843 continue; 2844 2845 predicate will_be_nonconstant 2846 = will_be_nonconstant_expr_predicate (fbi.info, info, 2847 iv.step, 2848 nonconstant_names); 2849 if (!true_predicate_p (&will_be_nonconstant)) 2850 will_be_nonconstant 2851 = and_predicates (info->conds, &bb_predicate, 2852 &will_be_nonconstant); 2853 if (!true_predicate_p (&will_be_nonconstant) 2854 && !false_predicate_p (&will_be_nonconstant)) 2855 /* This is slightly inprecise. We may want to represent 2856 each loop with independent predicate. */ 2857 loop_stride = and_predicates (info->conds, &loop_stride, 2858 &will_be_nonconstant); 2859 } 2860 } 2861 free (body); 2862 } 2863 set_hint_predicate (&inline_summaries->get (node)->loop_iterations, 2864 loop_iterations); 2865 set_hint_predicate (&inline_summaries->get (node)->loop_stride, 2866 loop_stride); 2867 scev_finalize (); 2868 } 2869 FOR_ALL_BB_FN (bb, my_function) 2870 { 2871 edge e; 2872 edge_iterator ei; 2873 2874 if (bb->aux) 2875 edge_predicate_pool.remove ((predicate *)bb->aux); 2876 bb->aux = NULL; 2877 FOR_EACH_EDGE (e, ei, bb->succs) 2878 { 2879 if (e->aux) 2880 edge_predicate_pool.remove ((predicate *) e->aux); 2881 e->aux = NULL; 2882 } 2883 } 2884 inline_summaries->get (node)->self_time = time; 2885 inline_summaries->get (node)->self_size = size; 2886 nonconstant_names.release (); 2887 ipa_release_body_info (&fbi); 2888 if (opt_for_fn (node->decl, optimize)) 2889 { 2890 if (!early) 2891 loop_optimizer_finalize (); 2892 else if (!ipa_edge_args_vector) 2893 ipa_free_all_node_params (); 2894 free_dominance_info (CDI_DOMINATORS); 2895 } 2896 if (dump_file) 2897 { 2898 fprintf (dump_file, "\n"); 2899 dump_inline_summary (dump_file, node); 2900 } 2901 } 2902 2903 2904 /* Compute parameters of functions used by inliner. 2905 EARLY is true when we compute parameters for the early inliner */ 2906 2907 void 2908 compute_inline_parameters (struct cgraph_node *node, bool early) 2909 { 2910 HOST_WIDE_INT self_stack_size; 2911 struct cgraph_edge *e; 2912 struct inline_summary *info; 2913 2914 gcc_assert (!node->global.inlined_to); 2915 2916 inline_summary_alloc (); 2917 2918 info = inline_summaries->get (node); 2919 reset_inline_summary (node, info); 2920 2921 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that. 2922 Once this happen, we will need to more curefully predict call 2923 statement size. */ 2924 if (node->thunk.thunk_p) 2925 { 2926 struct inline_edge_summary *es = inline_edge_summary (node->callees); 2927 struct predicate t = true_predicate (); 2928 2929 info->inlinable = 0; 2930 node->callees->call_stmt_cannot_inline_p = true; 2931 node->local.can_change_signature = false; 2932 es->call_stmt_time = 1; 2933 es->call_stmt_size = 1; 2934 account_size_time (info, 0, 0, &t); 2935 return; 2936 } 2937 2938 /* Even is_gimple_min_invariant rely on current_function_decl. */ 2939 push_cfun (DECL_STRUCT_FUNCTION (node->decl)); 2940 2941 /* Estimate the stack size for the function if we're optimizing. */ 2942 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0; 2943 info->estimated_self_stack_size = self_stack_size; 2944 info->estimated_stack_size = self_stack_size; 2945 info->stack_frame_offset = 0; 2946 2947 /* Can this function be inlined at all? */ 2948 if (!opt_for_fn (node->decl, optimize) 2949 && !lookup_attribute ("always_inline", 2950 DECL_ATTRIBUTES (node->decl))) 2951 info->inlinable = false; 2952 else 2953 info->inlinable = tree_inlinable_function_p (node->decl); 2954 2955 info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun); 2956 2957 /* Type attributes can use parameter indices to describe them. */ 2958 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl))) 2959 node->local.can_change_signature = false; 2960 else 2961 { 2962 /* Otherwise, inlinable functions always can change signature. */ 2963 if (info->inlinable) 2964 node->local.can_change_signature = true; 2965 else 2966 { 2967 /* Functions calling builtin_apply can not change signature. */ 2968 for (e = node->callees; e; e = e->next_callee) 2969 { 2970 tree cdecl = e->callee->decl; 2971 if (DECL_BUILT_IN (cdecl) 2972 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL 2973 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS 2974 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START)) 2975 break; 2976 } 2977 node->local.can_change_signature = !e; 2978 } 2979 } 2980 2981 /* Functions called by instrumentation thunk can't change signature 2982 because instrumentation thunk modification is not supported. */ 2983 if (node->local.can_change_signature) 2984 for (e = node->callers; e; e = e->next_caller) 2985 if (e->caller->thunk.thunk_p 2986 && e->caller->thunk.add_pointer_bounds_args) 2987 { 2988 node->local.can_change_signature = false; 2989 break; 2990 } 2991 2992 estimate_function_body_sizes (node, early); 2993 2994 for (e = node->callees; e; e = e->next_callee) 2995 if (e->callee->comdat_local_p ()) 2996 break; 2997 node->calls_comdat_local = (e != NULL); 2998 2999 /* Inlining characteristics are maintained by the cgraph_mark_inline. */ 3000 info->time = info->self_time; 3001 info->size = info->self_size; 3002 info->stack_frame_offset = 0; 3003 info->estimated_stack_size = info->estimated_self_stack_size; 3004 if (flag_checking) 3005 { 3006 inline_update_overall_summary (node); 3007 gcc_assert (info->time == info->self_time 3008 && info->size == info->self_size); 3009 } 3010 3011 pop_cfun (); 3012 } 3013 3014 3015 /* Compute parameters of functions used by inliner using 3016 current_function_decl. */ 3017 3018 static unsigned int 3019 compute_inline_parameters_for_current (void) 3020 { 3021 compute_inline_parameters (cgraph_node::get (current_function_decl), true); 3022 return 0; 3023 } 3024 3025 namespace { 3026 3027 const pass_data pass_data_inline_parameters = 3028 { 3029 GIMPLE_PASS, /* type */ 3030 "inline_param", /* name */ 3031 OPTGROUP_INLINE, /* optinfo_flags */ 3032 TV_INLINE_PARAMETERS, /* tv_id */ 3033 0, /* properties_required */ 3034 0, /* properties_provided */ 3035 0, /* properties_destroyed */ 3036 0, /* todo_flags_start */ 3037 0, /* todo_flags_finish */ 3038 }; 3039 3040 class pass_inline_parameters : public gimple_opt_pass 3041 { 3042 public: 3043 pass_inline_parameters (gcc::context *ctxt) 3044 : gimple_opt_pass (pass_data_inline_parameters, ctxt) 3045 {} 3046 3047 /* opt_pass methods: */ 3048 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); } 3049 virtual unsigned int execute (function *) 3050 { 3051 return compute_inline_parameters_for_current (); 3052 } 3053 3054 }; // class pass_inline_parameters 3055 3056 } // anon namespace 3057 3058 gimple_opt_pass * 3059 make_pass_inline_parameters (gcc::context *ctxt) 3060 { 3061 return new pass_inline_parameters (ctxt); 3062 } 3063 3064 3065 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS, 3066 KNOWN_CONTEXTS and KNOWN_AGGS. */ 3067 3068 static bool 3069 estimate_edge_devirt_benefit (struct cgraph_edge *ie, 3070 int *size, int *time, 3071 vec<tree> known_vals, 3072 vec<ipa_polymorphic_call_context> known_contexts, 3073 vec<ipa_agg_jump_function_p> known_aggs) 3074 { 3075 tree target; 3076 struct cgraph_node *callee; 3077 struct inline_summary *isummary; 3078 enum availability avail; 3079 bool speculative; 3080 3081 if (!known_vals.exists () && !known_contexts.exists ()) 3082 return false; 3083 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining)) 3084 return false; 3085 3086 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts, 3087 known_aggs, &speculative); 3088 if (!target || speculative) 3089 return false; 3090 3091 /* Account for difference in cost between indirect and direct calls. */ 3092 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost); 3093 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost); 3094 gcc_checking_assert (*time >= 0); 3095 gcc_checking_assert (*size >= 0); 3096 3097 callee = cgraph_node::get (target); 3098 if (!callee || !callee->definition) 3099 return false; 3100 callee = callee->function_symbol (&avail); 3101 if (avail < AVAIL_AVAILABLE) 3102 return false; 3103 isummary = inline_summaries->get (callee); 3104 return isummary->inlinable; 3105 } 3106 3107 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to 3108 handle edge E with probability PROB. 3109 Set HINTS if edge may be devirtualized. 3110 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call 3111 site. */ 3112 3113 static inline void 3114 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size, 3115 int *time, 3116 int prob, 3117 vec<tree> known_vals, 3118 vec<ipa_polymorphic_call_context> known_contexts, 3119 vec<ipa_agg_jump_function_p> known_aggs, 3120 inline_hints *hints) 3121 { 3122 struct inline_edge_summary *es = inline_edge_summary (e); 3123 int call_size = es->call_stmt_size; 3124 int call_time = es->call_stmt_time; 3125 int cur_size; 3126 if (!e->callee 3127 && estimate_edge_devirt_benefit (e, &call_size, &call_time, 3128 known_vals, known_contexts, known_aggs) 3129 && hints && e->maybe_hot_p ()) 3130 *hints |= INLINE_HINT_indirect_call; 3131 cur_size = call_size * INLINE_SIZE_SCALE; 3132 *size += cur_size; 3133 if (min_size) 3134 *min_size += cur_size; 3135 *time += apply_probability ((gcov_type) call_time, prob) 3136 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE); 3137 if (*time > MAX_TIME * INLINE_TIME_SCALE) 3138 *time = MAX_TIME * INLINE_TIME_SCALE; 3139 } 3140 3141 3142 3143 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all 3144 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS 3145 describe context of the call site. */ 3146 3147 static void 3148 estimate_calls_size_and_time (struct cgraph_node *node, int *size, 3149 int *min_size, int *time, 3150 inline_hints *hints, 3151 clause_t possible_truths, 3152 vec<tree> known_vals, 3153 vec<ipa_polymorphic_call_context> known_contexts, 3154 vec<ipa_agg_jump_function_p> known_aggs) 3155 { 3156 struct cgraph_edge *e; 3157 for (e = node->callees; e; e = e->next_callee) 3158 { 3159 if (inline_edge_summary_vec.length () <= (unsigned) e->uid) 3160 continue; 3161 3162 struct inline_edge_summary *es = inline_edge_summary (e); 3163 3164 /* Do not care about zero sized builtins. */ 3165 if (e->inline_failed && !es->call_stmt_size) 3166 { 3167 gcc_checking_assert (!es->call_stmt_time); 3168 continue; 3169 } 3170 if (!es->predicate 3171 || evaluate_predicate (es->predicate, possible_truths)) 3172 { 3173 if (e->inline_failed) 3174 { 3175 /* Predicates of calls shall not use NOT_CHANGED codes, 3176 sowe do not need to compute probabilities. */ 3177 estimate_edge_size_and_time (e, size, 3178 es->predicate ? NULL : min_size, 3179 time, REG_BR_PROB_BASE, 3180 known_vals, known_contexts, 3181 known_aggs, hints); 3182 } 3183 else 3184 estimate_calls_size_and_time (e->callee, size, min_size, time, 3185 hints, 3186 possible_truths, 3187 known_vals, known_contexts, 3188 known_aggs); 3189 } 3190 } 3191 for (e = node->indirect_calls; e; e = e->next_callee) 3192 { 3193 if (inline_edge_summary_vec.length () <= (unsigned) e->uid) 3194 continue; 3195 3196 struct inline_edge_summary *es = inline_edge_summary (e); 3197 if (!es->predicate 3198 || evaluate_predicate (es->predicate, possible_truths)) 3199 estimate_edge_size_and_time (e, size, 3200 es->predicate ? NULL : min_size, 3201 time, REG_BR_PROB_BASE, 3202 known_vals, known_contexts, known_aggs, 3203 hints); 3204 } 3205 } 3206 3207 3208 /* Estimate size and time needed to execute NODE assuming 3209 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS 3210 information about NODE's arguments. If non-NULL use also probability 3211 information present in INLINE_PARAM_SUMMARY vector. 3212 Additionally detemine hints determined by the context. Finally compute 3213 minimal size needed for the call that is independent on the call context and 3214 can be used for fast estimates. Return the values in RET_SIZE, 3215 RET_MIN_SIZE, RET_TIME and RET_HINTS. */ 3216 3217 static void 3218 estimate_node_size_and_time (struct cgraph_node *node, 3219 clause_t possible_truths, 3220 vec<tree> known_vals, 3221 vec<ipa_polymorphic_call_context> known_contexts, 3222 vec<ipa_agg_jump_function_p> known_aggs, 3223 int *ret_size, int *ret_min_size, int *ret_time, 3224 inline_hints *ret_hints, 3225 vec<inline_param_summary> 3226 inline_param_summary) 3227 { 3228 struct inline_summary *info = inline_summaries->get (node); 3229 size_time_entry *e; 3230 int size = 0; 3231 int time = 0; 3232 int min_size = 0; 3233 inline_hints hints = 0; 3234 int i; 3235 3236 if (dump_file && (dump_flags & TDF_DETAILS)) 3237 { 3238 bool found = false; 3239 fprintf (dump_file, " Estimating body: %s/%i\n" 3240 " Known to be false: ", node->name (), 3241 node->order); 3242 3243 for (i = predicate_not_inlined_condition; 3244 i < (predicate_first_dynamic_condition 3245 + (int) vec_safe_length (info->conds)); i++) 3246 if (!(possible_truths & (1 << i))) 3247 { 3248 if (found) 3249 fprintf (dump_file, ", "); 3250 found = true; 3251 dump_condition (dump_file, info->conds, i); 3252 } 3253 } 3254 3255 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++) 3256 if (evaluate_predicate (&e->predicate, possible_truths)) 3257 { 3258 size += e->size; 3259 gcc_checking_assert (e->time >= 0); 3260 gcc_checking_assert (time >= 0); 3261 if (!inline_param_summary.exists ()) 3262 time += e->time; 3263 else 3264 { 3265 int prob = predicate_probability (info->conds, 3266 &e->predicate, 3267 possible_truths, 3268 inline_param_summary); 3269 gcc_checking_assert (prob >= 0); 3270 gcc_checking_assert (prob <= REG_BR_PROB_BASE); 3271 time += apply_probability ((gcov_type) e->time, prob); 3272 } 3273 if (time > MAX_TIME * INLINE_TIME_SCALE) 3274 time = MAX_TIME * INLINE_TIME_SCALE; 3275 gcc_checking_assert (time >= 0); 3276 3277 } 3278 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate)); 3279 min_size = (*info->entry)[0].size; 3280 gcc_checking_assert (size >= 0); 3281 gcc_checking_assert (time >= 0); 3282 3283 if (info->loop_iterations 3284 && !evaluate_predicate (info->loop_iterations, possible_truths)) 3285 hints |= INLINE_HINT_loop_iterations; 3286 if (info->loop_stride 3287 && !evaluate_predicate (info->loop_stride, possible_truths)) 3288 hints |= INLINE_HINT_loop_stride; 3289 if (info->array_index 3290 && !evaluate_predicate (info->array_index, possible_truths)) 3291 hints |= INLINE_HINT_array_index; 3292 if (info->scc_no) 3293 hints |= INLINE_HINT_in_scc; 3294 if (DECL_DECLARED_INLINE_P (node->decl)) 3295 hints |= INLINE_HINT_declared_inline; 3296 3297 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths, 3298 known_vals, known_contexts, known_aggs); 3299 gcc_checking_assert (size >= 0); 3300 gcc_checking_assert (time >= 0); 3301 time = RDIV (time, INLINE_TIME_SCALE); 3302 size = RDIV (size, INLINE_SIZE_SCALE); 3303 min_size = RDIV (min_size, INLINE_SIZE_SCALE); 3304 3305 if (dump_file && (dump_flags & TDF_DETAILS)) 3306 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time); 3307 if (ret_time) 3308 *ret_time = time; 3309 if (ret_size) 3310 *ret_size = size; 3311 if (ret_min_size) 3312 *ret_min_size = min_size; 3313 if (ret_hints) 3314 *ret_hints = hints; 3315 return; 3316 } 3317 3318 3319 /* Estimate size and time needed to execute callee of EDGE assuming that 3320 parameters known to be constant at caller of EDGE are propagated. 3321 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values 3322 and types for parameters. */ 3323 3324 void 3325 estimate_ipcp_clone_size_and_time (struct cgraph_node *node, 3326 vec<tree> known_vals, 3327 vec<ipa_polymorphic_call_context> 3328 known_contexts, 3329 vec<ipa_agg_jump_function_p> known_aggs, 3330 int *ret_size, int *ret_time, 3331 inline_hints *hints) 3332 { 3333 clause_t clause; 3334 3335 clause = evaluate_conditions_for_known_args (node, false, known_vals, 3336 known_aggs); 3337 estimate_node_size_and_time (node, clause, known_vals, known_contexts, 3338 known_aggs, ret_size, NULL, ret_time, hints, vNULL); 3339 } 3340 3341 /* Translate all conditions from callee representation into caller 3342 representation and symbolically evaluate predicate P into new predicate. 3343 3344 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO 3345 is summary of function predicate P is from. OPERAND_MAP is array giving 3346 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all 3347 callee conditions that may be true in caller context. TOPLEV_PREDICATE is 3348 predicate under which callee is executed. OFFSET_MAP is an array of of 3349 offsets that need to be added to conditions, negative offset means that 3350 conditions relying on values passed by reference have to be discarded 3351 because they might not be preserved (and should be considered offset zero 3352 for other purposes). */ 3353 3354 static struct predicate 3355 remap_predicate (struct inline_summary *info, 3356 struct inline_summary *callee_info, 3357 struct predicate *p, 3358 vec<int> operand_map, 3359 vec<int> offset_map, 3360 clause_t possible_truths, struct predicate *toplev_predicate) 3361 { 3362 int i; 3363 struct predicate out = true_predicate (); 3364 3365 /* True predicate is easy. */ 3366 if (true_predicate_p (p)) 3367 return *toplev_predicate; 3368 for (i = 0; p->clause[i]; i++) 3369 { 3370 clause_t clause = p->clause[i]; 3371 int cond; 3372 struct predicate clause_predicate = false_predicate (); 3373 3374 gcc_assert (i < MAX_CLAUSES); 3375 3376 for (cond = 0; cond < NUM_CONDITIONS; cond++) 3377 /* Do we have condition we can't disprove? */ 3378 if (clause & possible_truths & (1 << cond)) 3379 { 3380 struct predicate cond_predicate; 3381 /* Work out if the condition can translate to predicate in the 3382 inlined function. */ 3383 if (cond >= predicate_first_dynamic_condition) 3384 { 3385 struct condition *c; 3386 3387 c = &(*callee_info->conds)[cond 3388 - 3389 predicate_first_dynamic_condition]; 3390 /* See if we can remap condition operand to caller's operand. 3391 Otherwise give up. */ 3392 if (!operand_map.exists () 3393 || (int) operand_map.length () <= c->operand_num 3394 || operand_map[c->operand_num] == -1 3395 /* TODO: For non-aggregate conditions, adding an offset is 3396 basically an arithmetic jump function processing which 3397 we should support in future. */ 3398 || ((!c->agg_contents || !c->by_ref) 3399 && offset_map[c->operand_num] > 0) 3400 || (c->agg_contents && c->by_ref 3401 && offset_map[c->operand_num] < 0)) 3402 cond_predicate = true_predicate (); 3403 else 3404 { 3405 struct agg_position_info ap; 3406 HOST_WIDE_INT offset_delta = offset_map[c->operand_num]; 3407 if (offset_delta < 0) 3408 { 3409 gcc_checking_assert (!c->agg_contents || !c->by_ref); 3410 offset_delta = 0; 3411 } 3412 gcc_assert (!c->agg_contents 3413 || c->by_ref || offset_delta == 0); 3414 ap.offset = c->offset + offset_delta; 3415 ap.agg_contents = c->agg_contents; 3416 ap.by_ref = c->by_ref; 3417 cond_predicate = add_condition (info, 3418 operand_map[c->operand_num], 3419 c->size, &ap, c->code, 3420 c->val); 3421 } 3422 } 3423 /* Fixed conditions remains same, construct single 3424 condition predicate. */ 3425 else 3426 { 3427 cond_predicate.clause[0] = 1 << cond; 3428 cond_predicate.clause[1] = 0; 3429 } 3430 clause_predicate = or_predicates (info->conds, &clause_predicate, 3431 &cond_predicate); 3432 } 3433 out = and_predicates (info->conds, &out, &clause_predicate); 3434 } 3435 return and_predicates (info->conds, &out, toplev_predicate); 3436 } 3437 3438 3439 /* Update summary information of inline clones after inlining. 3440 Compute peak stack usage. */ 3441 3442 static void 3443 inline_update_callee_summaries (struct cgraph_node *node, int depth) 3444 { 3445 struct cgraph_edge *e; 3446 struct inline_summary *callee_info = inline_summaries->get (node); 3447 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller); 3448 HOST_WIDE_INT peak; 3449 3450 callee_info->stack_frame_offset 3451 = caller_info->stack_frame_offset 3452 + caller_info->estimated_self_stack_size; 3453 peak = callee_info->stack_frame_offset 3454 + callee_info->estimated_self_stack_size; 3455 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak) 3456 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak; 3457 ipa_propagate_frequency (node); 3458 for (e = node->callees; e; e = e->next_callee) 3459 { 3460 if (!e->inline_failed) 3461 inline_update_callee_summaries (e->callee, depth); 3462 inline_edge_summary (e)->loop_depth += depth; 3463 } 3464 for (e = node->indirect_calls; e; e = e->next_callee) 3465 inline_edge_summary (e)->loop_depth += depth; 3466 } 3467 3468 /* Update change_prob of EDGE after INLINED_EDGE has been inlined. 3469 When functoin A is inlined in B and A calls C with parameter that 3470 changes with probability PROB1 and C is known to be passthroug 3471 of argument if B that change with probability PROB2, the probability 3472 of change is now PROB1*PROB2. */ 3473 3474 static void 3475 remap_edge_change_prob (struct cgraph_edge *inlined_edge, 3476 struct cgraph_edge *edge) 3477 { 3478 if (ipa_node_params_sum) 3479 { 3480 int i; 3481 struct ipa_edge_args *args = IPA_EDGE_REF (edge); 3482 struct inline_edge_summary *es = inline_edge_summary (edge); 3483 struct inline_edge_summary *inlined_es 3484 = inline_edge_summary (inlined_edge); 3485 3486 for (i = 0; i < ipa_get_cs_argument_count (args); i++) 3487 { 3488 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i); 3489 if (jfunc->type == IPA_JF_PASS_THROUGH 3490 && (ipa_get_jf_pass_through_formal_id (jfunc) 3491 < (int) inlined_es->param.length ())) 3492 { 3493 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc); 3494 int prob1 = es->param[i].change_prob; 3495 int prob2 = inlined_es->param[jf_formal_id].change_prob; 3496 int prob = combine_probabilities (prob1, prob2); 3497 3498 if (prob1 && prob2 && !prob) 3499 prob = 1; 3500 3501 es->param[i].change_prob = prob; 3502 } 3503 } 3504 } 3505 } 3506 3507 /* Update edge summaries of NODE after INLINED_EDGE has been inlined. 3508 3509 Remap predicates of callees of NODE. Rest of arguments match 3510 remap_predicate. 3511 3512 Also update change probabilities. */ 3513 3514 static void 3515 remap_edge_summaries (struct cgraph_edge *inlined_edge, 3516 struct cgraph_node *node, 3517 struct inline_summary *info, 3518 struct inline_summary *callee_info, 3519 vec<int> operand_map, 3520 vec<int> offset_map, 3521 clause_t possible_truths, 3522 struct predicate *toplev_predicate) 3523 { 3524 struct cgraph_edge *e, *next; 3525 for (e = node->callees; e; e = next) 3526 { 3527 struct inline_edge_summary *es = inline_edge_summary (e); 3528 struct predicate p; 3529 next = e->next_callee; 3530 3531 if (e->inline_failed) 3532 { 3533 remap_edge_change_prob (inlined_edge, e); 3534 3535 if (es->predicate) 3536 { 3537 p = remap_predicate (info, callee_info, 3538 es->predicate, operand_map, offset_map, 3539 possible_truths, toplev_predicate); 3540 edge_set_predicate (e, &p); 3541 } 3542 else 3543 edge_set_predicate (e, toplev_predicate); 3544 } 3545 else 3546 remap_edge_summaries (inlined_edge, e->callee, info, callee_info, 3547 operand_map, offset_map, possible_truths, 3548 toplev_predicate); 3549 } 3550 for (e = node->indirect_calls; e; e = next) 3551 { 3552 struct inline_edge_summary *es = inline_edge_summary (e); 3553 struct predicate p; 3554 next = e->next_callee; 3555 3556 remap_edge_change_prob (inlined_edge, e); 3557 if (es->predicate) 3558 { 3559 p = remap_predicate (info, callee_info, 3560 es->predicate, operand_map, offset_map, 3561 possible_truths, toplev_predicate); 3562 edge_set_predicate (e, &p); 3563 } 3564 else 3565 edge_set_predicate (e, toplev_predicate); 3566 } 3567 } 3568 3569 /* Same as remap_predicate, but set result into hint *HINT. */ 3570 3571 static void 3572 remap_hint_predicate (struct inline_summary *info, 3573 struct inline_summary *callee_info, 3574 struct predicate **hint, 3575 vec<int> operand_map, 3576 vec<int> offset_map, 3577 clause_t possible_truths, 3578 struct predicate *toplev_predicate) 3579 { 3580 predicate p; 3581 3582 if (!*hint) 3583 return; 3584 p = remap_predicate (info, callee_info, 3585 *hint, 3586 operand_map, offset_map, 3587 possible_truths, toplev_predicate); 3588 if (!false_predicate_p (&p) && !true_predicate_p (&p)) 3589 { 3590 if (!*hint) 3591 set_hint_predicate (hint, p); 3592 else 3593 **hint = and_predicates (info->conds, *hint, &p); 3594 } 3595 } 3596 3597 /* We inlined EDGE. Update summary of the function we inlined into. */ 3598 3599 void 3600 inline_merge_summary (struct cgraph_edge *edge) 3601 { 3602 struct inline_summary *callee_info = inline_summaries->get (edge->callee); 3603 struct cgraph_node *to = (edge->caller->global.inlined_to 3604 ? edge->caller->global.inlined_to : edge->caller); 3605 struct inline_summary *info = inline_summaries->get (to); 3606 clause_t clause = 0; /* not_inline is known to be false. */ 3607 size_time_entry *e; 3608 vec<int> operand_map = vNULL; 3609 vec<int> offset_map = vNULL; 3610 int i; 3611 struct predicate toplev_predicate; 3612 struct predicate true_p = true_predicate (); 3613 struct inline_edge_summary *es = inline_edge_summary (edge); 3614 3615 if (es->predicate) 3616 toplev_predicate = *es->predicate; 3617 else 3618 toplev_predicate = true_predicate (); 3619 3620 if (callee_info->conds) 3621 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL); 3622 if (ipa_node_params_sum && callee_info->conds) 3623 { 3624 struct ipa_edge_args *args = IPA_EDGE_REF (edge); 3625 int count = ipa_get_cs_argument_count (args); 3626 int i; 3627 3628 if (count) 3629 { 3630 operand_map.safe_grow_cleared (count); 3631 offset_map.safe_grow_cleared (count); 3632 } 3633 for (i = 0; i < count; i++) 3634 { 3635 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i); 3636 int map = -1; 3637 3638 /* TODO: handle non-NOPs when merging. */ 3639 if (jfunc->type == IPA_JF_PASS_THROUGH) 3640 { 3641 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR) 3642 map = ipa_get_jf_pass_through_formal_id (jfunc); 3643 if (!ipa_get_jf_pass_through_agg_preserved (jfunc)) 3644 offset_map[i] = -1; 3645 } 3646 else if (jfunc->type == IPA_JF_ANCESTOR) 3647 { 3648 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc); 3649 if (offset >= 0 && offset < INT_MAX) 3650 { 3651 map = ipa_get_jf_ancestor_formal_id (jfunc); 3652 if (!ipa_get_jf_ancestor_agg_preserved (jfunc)) 3653 offset = -1; 3654 offset_map[i] = offset; 3655 } 3656 } 3657 operand_map[i] = map; 3658 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to))); 3659 } 3660 } 3661 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++) 3662 { 3663 struct predicate p = remap_predicate (info, callee_info, 3664 &e->predicate, operand_map, 3665 offset_map, clause, 3666 &toplev_predicate); 3667 if (!false_predicate_p (&p)) 3668 { 3669 gcov_type add_time = ((gcov_type) e->time * edge->frequency 3670 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE; 3671 int prob = predicate_probability (callee_info->conds, 3672 &e->predicate, 3673 clause, es->param); 3674 add_time = apply_probability ((gcov_type) add_time, prob); 3675 if (add_time > MAX_TIME * INLINE_TIME_SCALE) 3676 add_time = MAX_TIME * INLINE_TIME_SCALE; 3677 if (prob != REG_BR_PROB_BASE 3678 && dump_file && (dump_flags & TDF_DETAILS)) 3679 { 3680 fprintf (dump_file, "\t\tScaling time by probability:%f\n", 3681 (double) prob / REG_BR_PROB_BASE); 3682 } 3683 account_size_time (info, e->size, add_time, &p); 3684 } 3685 } 3686 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map, 3687 offset_map, clause, &toplev_predicate); 3688 remap_hint_predicate (info, callee_info, 3689 &callee_info->loop_iterations, 3690 operand_map, offset_map, clause, &toplev_predicate); 3691 remap_hint_predicate (info, callee_info, 3692 &callee_info->loop_stride, 3693 operand_map, offset_map, clause, &toplev_predicate); 3694 remap_hint_predicate (info, callee_info, 3695 &callee_info->array_index, 3696 operand_map, offset_map, clause, &toplev_predicate); 3697 3698 inline_update_callee_summaries (edge->callee, 3699 inline_edge_summary (edge)->loop_depth); 3700 3701 /* We do not maintain predicates of inlined edges, free it. */ 3702 edge_set_predicate (edge, &true_p); 3703 /* Similarly remove param summaries. */ 3704 es->param.release (); 3705 operand_map.release (); 3706 offset_map.release (); 3707 } 3708 3709 /* For performance reasons inline_merge_summary is not updating overall size 3710 and time. Recompute it. */ 3711 3712 void 3713 inline_update_overall_summary (struct cgraph_node *node) 3714 { 3715 struct inline_summary *info = inline_summaries->get (node); 3716 size_time_entry *e; 3717 int i; 3718 3719 info->size = 0; 3720 info->time = 0; 3721 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++) 3722 { 3723 info->size += e->size, info->time += e->time; 3724 if (info->time > MAX_TIME * INLINE_TIME_SCALE) 3725 info->time = MAX_TIME * INLINE_TIME_SCALE; 3726 } 3727 estimate_calls_size_and_time (node, &info->size, &info->min_size, 3728 &info->time, NULL, 3729 ~(clause_t) (1 << predicate_false_condition), 3730 vNULL, vNULL, vNULL); 3731 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE; 3732 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE; 3733 } 3734 3735 /* Return hints derrived from EDGE. */ 3736 int 3737 simple_edge_hints (struct cgraph_edge *edge) 3738 { 3739 int hints = 0; 3740 struct cgraph_node *to = (edge->caller->global.inlined_to 3741 ? edge->caller->global.inlined_to : edge->caller); 3742 struct cgraph_node *callee = edge->callee->ultimate_alias_target (); 3743 if (inline_summaries->get (to)->scc_no 3744 && inline_summaries->get (to)->scc_no 3745 == inline_summaries->get (callee)->scc_no 3746 && !edge->recursive_p ()) 3747 hints |= INLINE_HINT_same_scc; 3748 3749 if (callee->lto_file_data && edge->caller->lto_file_data 3750 && edge->caller->lto_file_data != callee->lto_file_data 3751 && !callee->merged_comdat && !callee->icf_merged) 3752 hints |= INLINE_HINT_cross_module; 3753 3754 return hints; 3755 } 3756 3757 /* Estimate the time cost for the caller when inlining EDGE. 3758 Only to be called via estimate_edge_time, that handles the 3759 caching mechanism. 3760 3761 When caching, also update the cache entry. Compute both time and 3762 size, since we always need both metrics eventually. */ 3763 3764 int 3765 do_estimate_edge_time (struct cgraph_edge *edge) 3766 { 3767 int time; 3768 int size; 3769 inline_hints hints; 3770 struct cgraph_node *callee; 3771 clause_t clause; 3772 vec<tree> known_vals; 3773 vec<ipa_polymorphic_call_context> known_contexts; 3774 vec<ipa_agg_jump_function_p> known_aggs; 3775 struct inline_edge_summary *es = inline_edge_summary (edge); 3776 int min_size; 3777 3778 callee = edge->callee->ultimate_alias_target (); 3779 3780 gcc_checking_assert (edge->inline_failed); 3781 evaluate_properties_for_edge (edge, true, 3782 &clause, &known_vals, &known_contexts, 3783 &known_aggs); 3784 estimate_node_size_and_time (callee, clause, known_vals, known_contexts, 3785 known_aggs, &size, &min_size, &time, &hints, es->param); 3786 3787 /* When we have profile feedback, we can quite safely identify hot 3788 edges and for those we disable size limits. Don't do that when 3789 probability that caller will call the callee is low however, since it 3790 may hurt optimization of the caller's hot path. */ 3791 if (edge->count && edge->maybe_hot_p () 3792 && (edge->count * 2 3793 > (edge->caller->global.inlined_to 3794 ? edge->caller->global.inlined_to->count : edge->caller->count))) 3795 hints |= INLINE_HINT_known_hot; 3796 3797 known_vals.release (); 3798 known_contexts.release (); 3799 known_aggs.release (); 3800 gcc_checking_assert (size >= 0); 3801 gcc_checking_assert (time >= 0); 3802 3803 /* When caching, update the cache entry. */ 3804 if (edge_growth_cache.exists ()) 3805 { 3806 inline_summaries->get (edge->callee)->min_size = min_size; 3807 if ((int) edge_growth_cache.length () <= edge->uid) 3808 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid); 3809 edge_growth_cache[edge->uid].time = time + (time >= 0); 3810 3811 edge_growth_cache[edge->uid].size = size + (size >= 0); 3812 hints |= simple_edge_hints (edge); 3813 edge_growth_cache[edge->uid].hints = hints + 1; 3814 } 3815 return time; 3816 } 3817 3818 3819 /* Return estimated callee growth after inlining EDGE. 3820 Only to be called via estimate_edge_size. */ 3821 3822 int 3823 do_estimate_edge_size (struct cgraph_edge *edge) 3824 { 3825 int size; 3826 struct cgraph_node *callee; 3827 clause_t clause; 3828 vec<tree> known_vals; 3829 vec<ipa_polymorphic_call_context> known_contexts; 3830 vec<ipa_agg_jump_function_p> known_aggs; 3831 3832 /* When we do caching, use do_estimate_edge_time to populate the entry. */ 3833 3834 if (edge_growth_cache.exists ()) 3835 { 3836 do_estimate_edge_time (edge); 3837 size = edge_growth_cache[edge->uid].size; 3838 gcc_checking_assert (size); 3839 return size - (size > 0); 3840 } 3841 3842 callee = edge->callee->ultimate_alias_target (); 3843 3844 /* Early inliner runs without caching, go ahead and do the dirty work. */ 3845 gcc_checking_assert (edge->inline_failed); 3846 evaluate_properties_for_edge (edge, true, 3847 &clause, &known_vals, &known_contexts, 3848 &known_aggs); 3849 estimate_node_size_and_time (callee, clause, known_vals, known_contexts, 3850 known_aggs, &size, NULL, NULL, NULL, vNULL); 3851 known_vals.release (); 3852 known_contexts.release (); 3853 known_aggs.release (); 3854 return size; 3855 } 3856 3857 3858 /* Estimate the growth of the caller when inlining EDGE. 3859 Only to be called via estimate_edge_size. */ 3860 3861 inline_hints 3862 do_estimate_edge_hints (struct cgraph_edge *edge) 3863 { 3864 inline_hints hints; 3865 struct cgraph_node *callee; 3866 clause_t clause; 3867 vec<tree> known_vals; 3868 vec<ipa_polymorphic_call_context> known_contexts; 3869 vec<ipa_agg_jump_function_p> known_aggs; 3870 3871 /* When we do caching, use do_estimate_edge_time to populate the entry. */ 3872 3873 if (edge_growth_cache.exists ()) 3874 { 3875 do_estimate_edge_time (edge); 3876 hints = edge_growth_cache[edge->uid].hints; 3877 gcc_checking_assert (hints); 3878 return hints - 1; 3879 } 3880 3881 callee = edge->callee->ultimate_alias_target (); 3882 3883 /* Early inliner runs without caching, go ahead and do the dirty work. */ 3884 gcc_checking_assert (edge->inline_failed); 3885 evaluate_properties_for_edge (edge, true, 3886 &clause, &known_vals, &known_contexts, 3887 &known_aggs); 3888 estimate_node_size_and_time (callee, clause, known_vals, known_contexts, 3889 known_aggs, NULL, NULL, NULL, &hints, vNULL); 3890 known_vals.release (); 3891 known_contexts.release (); 3892 known_aggs.release (); 3893 hints |= simple_edge_hints (edge); 3894 return hints; 3895 } 3896 3897 3898 /* Estimate self time of the function NODE after inlining EDGE. */ 3899 3900 int 3901 estimate_time_after_inlining (struct cgraph_node *node, 3902 struct cgraph_edge *edge) 3903 { 3904 struct inline_edge_summary *es = inline_edge_summary (edge); 3905 if (!es->predicate || !false_predicate_p (es->predicate)) 3906 { 3907 gcov_type time = 3908 inline_summaries->get (node)->time + estimate_edge_time (edge); 3909 if (time < 0) 3910 time = 0; 3911 if (time > MAX_TIME) 3912 time = MAX_TIME; 3913 return time; 3914 } 3915 return inline_summaries->get (node)->time; 3916 } 3917 3918 3919 /* Estimate the size of NODE after inlining EDGE which should be an 3920 edge to either NODE or a call inlined into NODE. */ 3921 3922 int 3923 estimate_size_after_inlining (struct cgraph_node *node, 3924 struct cgraph_edge *edge) 3925 { 3926 struct inline_edge_summary *es = inline_edge_summary (edge); 3927 if (!es->predicate || !false_predicate_p (es->predicate)) 3928 { 3929 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge); 3930 gcc_assert (size >= 0); 3931 return size; 3932 } 3933 return inline_summaries->get (node)->size; 3934 } 3935 3936 3937 struct growth_data 3938 { 3939 struct cgraph_node *node; 3940 bool self_recursive; 3941 bool uninlinable; 3942 int growth; 3943 }; 3944 3945 3946 /* Worker for do_estimate_growth. Collect growth for all callers. */ 3947 3948 static bool 3949 do_estimate_growth_1 (struct cgraph_node *node, void *data) 3950 { 3951 struct cgraph_edge *e; 3952 struct growth_data *d = (struct growth_data *) data; 3953 3954 for (e = node->callers; e; e = e->next_caller) 3955 { 3956 gcc_checking_assert (e->inline_failed); 3957 3958 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) 3959 { 3960 d->uninlinable = true; 3961 continue; 3962 } 3963 3964 if (e->recursive_p ()) 3965 { 3966 d->self_recursive = true; 3967 continue; 3968 } 3969 d->growth += estimate_edge_growth (e); 3970 } 3971 return false; 3972 } 3973 3974 3975 /* Estimate the growth caused by inlining NODE into all callees. */ 3976 3977 int 3978 estimate_growth (struct cgraph_node *node) 3979 { 3980 struct growth_data d = { node, false, false, 0 }; 3981 struct inline_summary *info = inline_summaries->get (node); 3982 3983 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true); 3984 3985 /* For self recursive functions the growth estimation really should be 3986 infinity. We don't want to return very large values because the growth 3987 plays various roles in badness computation fractions. Be sure to not 3988 return zero or negative growths. */ 3989 if (d.self_recursive) 3990 d.growth = d.growth < info->size ? info->size : d.growth; 3991 else if (DECL_EXTERNAL (node->decl) || d.uninlinable) 3992 ; 3993 else 3994 { 3995 if (node->will_be_removed_from_program_if_no_direct_calls_p ()) 3996 d.growth -= info->size; 3997 /* COMDAT functions are very often not shared across multiple units 3998 since they come from various template instantiations. 3999 Take this into account. */ 4000 else if (DECL_COMDAT (node->decl) 4001 && node->can_remove_if_no_direct_calls_p ()) 4002 d.growth -= (info->size 4003 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY)) 4004 + 50) / 100; 4005 } 4006 4007 return d.growth; 4008 } 4009 4010 /* Verify if there are fewer than MAX_CALLERS. */ 4011 4012 static bool 4013 check_callers (cgraph_node *node, int *max_callers) 4014 { 4015 ipa_ref *ref; 4016 4017 if (!node->can_remove_if_no_direct_calls_and_refs_p ()) 4018 return true; 4019 4020 for (cgraph_edge *e = node->callers; e; e = e->next_caller) 4021 { 4022 (*max_callers)--; 4023 if (!*max_callers 4024 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) 4025 return true; 4026 } 4027 4028 FOR_EACH_ALIAS (node, ref) 4029 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers)) 4030 return true; 4031 4032 return false; 4033 } 4034 4035 4036 /* Make cheap estimation if growth of NODE is likely positive knowing 4037 EDGE_GROWTH of one particular edge. 4038 We assume that most of other edges will have similar growth 4039 and skip computation if there are too many callers. */ 4040 4041 bool 4042 growth_likely_positive (struct cgraph_node *node, 4043 int edge_growth) 4044 { 4045 int max_callers; 4046 struct cgraph_edge *e; 4047 gcc_checking_assert (edge_growth > 0); 4048 4049 /* First quickly check if NODE is removable at all. */ 4050 if (DECL_EXTERNAL (node->decl)) 4051 return true; 4052 if (!node->can_remove_if_no_direct_calls_and_refs_p () 4053 || node->address_taken) 4054 return true; 4055 4056 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2; 4057 4058 for (e = node->callers; e; e = e->next_caller) 4059 { 4060 max_callers--; 4061 if (!max_callers 4062 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) 4063 return true; 4064 } 4065 4066 ipa_ref *ref; 4067 FOR_EACH_ALIAS (node, ref) 4068 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers)) 4069 return true; 4070 4071 /* Unlike for functions called once, we play unsafe with 4072 COMDATs. We can allow that since we know functions 4073 in consideration are small (and thus risk is small) and 4074 moreover grow estimates already accounts that COMDAT 4075 functions may or may not disappear when eliminated from 4076 current unit. With good probability making aggressive 4077 choice in all units is going to make overall program 4078 smaller. */ 4079 if (DECL_COMDAT (node->decl)) 4080 { 4081 if (!node->can_remove_if_no_direct_calls_p ()) 4082 return true; 4083 } 4084 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ()) 4085 return true; 4086 4087 return estimate_growth (node) > 0; 4088 } 4089 4090 4091 /* This function performs intraprocedural analysis in NODE that is required to 4092 inline indirect calls. */ 4093 4094 static void 4095 inline_indirect_intraprocedural_analysis (struct cgraph_node *node) 4096 { 4097 ipa_analyze_node (node); 4098 if (dump_file && (dump_flags & TDF_DETAILS)) 4099 { 4100 ipa_print_node_params (dump_file, node); 4101 ipa_print_node_jump_functions (dump_file, node); 4102 } 4103 } 4104 4105 4106 /* Note function body size. */ 4107 4108 void 4109 inline_analyze_function (struct cgraph_node *node) 4110 { 4111 push_cfun (DECL_STRUCT_FUNCTION (node->decl)); 4112 4113 if (dump_file) 4114 fprintf (dump_file, "\nAnalyzing function: %s/%u\n", 4115 node->name (), node->order); 4116 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p) 4117 inline_indirect_intraprocedural_analysis (node); 4118 compute_inline_parameters (node, false); 4119 if (!optimize) 4120 { 4121 struct cgraph_edge *e; 4122 for (e = node->callees; e; e = e->next_callee) 4123 { 4124 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED) 4125 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED; 4126 e->call_stmt_cannot_inline_p = true; 4127 } 4128 for (e = node->indirect_calls; e; e = e->next_callee) 4129 { 4130 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED) 4131 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED; 4132 e->call_stmt_cannot_inline_p = true; 4133 } 4134 } 4135 4136 pop_cfun (); 4137 } 4138 4139 4140 /* Called when new function is inserted to callgraph late. */ 4141 4142 void 4143 inline_summary_t::insert (struct cgraph_node *node, inline_summary *) 4144 { 4145 inline_analyze_function (node); 4146 } 4147 4148 /* Note function body size. */ 4149 4150 void 4151 inline_generate_summary (void) 4152 { 4153 struct cgraph_node *node; 4154 4155 FOR_EACH_DEFINED_FUNCTION (node) 4156 if (DECL_STRUCT_FUNCTION (node->decl)) 4157 node->local.versionable = tree_versionable_function_p (node->decl); 4158 4159 /* When not optimizing, do not bother to analyze. Inlining is still done 4160 because edge redirection needs to happen there. */ 4161 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa) 4162 return; 4163 4164 if (!inline_summaries) 4165 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab); 4166 4167 inline_summaries->enable_insertion_hook (); 4168 4169 ipa_register_cgraph_hooks (); 4170 inline_free_summary (); 4171 4172 FOR_EACH_DEFINED_FUNCTION (node) 4173 if (!node->alias) 4174 inline_analyze_function (node); 4175 } 4176 4177 4178 /* Read predicate from IB. */ 4179 4180 static struct predicate 4181 read_predicate (struct lto_input_block *ib) 4182 { 4183 struct predicate out; 4184 clause_t clause; 4185 int k = 0; 4186 4187 do 4188 { 4189 gcc_assert (k <= MAX_CLAUSES); 4190 clause = out.clause[k++] = streamer_read_uhwi (ib); 4191 } 4192 while (clause); 4193 4194 /* Zero-initialize the remaining clauses in OUT. */ 4195 while (k <= MAX_CLAUSES) 4196 out.clause[k++] = 0; 4197 4198 return out; 4199 } 4200 4201 4202 /* Write inline summary for edge E to OB. */ 4203 4204 static void 4205 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e) 4206 { 4207 struct inline_edge_summary *es = inline_edge_summary (e); 4208 struct predicate p; 4209 int length, i; 4210 4211 es->call_stmt_size = streamer_read_uhwi (ib); 4212 es->call_stmt_time = streamer_read_uhwi (ib); 4213 es->loop_depth = streamer_read_uhwi (ib); 4214 p = read_predicate (ib); 4215 edge_set_predicate (e, &p); 4216 length = streamer_read_uhwi (ib); 4217 if (length) 4218 { 4219 es->param.safe_grow_cleared (length); 4220 for (i = 0; i < length; i++) 4221 es->param[i].change_prob = streamer_read_uhwi (ib); 4222 } 4223 } 4224 4225 4226 /* Stream in inline summaries from the section. */ 4227 4228 static void 4229 inline_read_section (struct lto_file_decl_data *file_data, const char *data, 4230 size_t len) 4231 { 4232 const struct lto_function_header *header = 4233 (const struct lto_function_header *) data; 4234 const int cfg_offset = sizeof (struct lto_function_header); 4235 const int main_offset = cfg_offset + header->cfg_size; 4236 const int string_offset = main_offset + header->main_size; 4237 struct data_in *data_in; 4238 unsigned int i, count2, j; 4239 unsigned int f_count; 4240 4241 lto_input_block ib ((const char *) data + main_offset, header->main_size, 4242 file_data->mode_table); 4243 4244 data_in = 4245 lto_data_in_create (file_data, (const char *) data + string_offset, 4246 header->string_size, vNULL); 4247 f_count = streamer_read_uhwi (&ib); 4248 for (i = 0; i < f_count; i++) 4249 { 4250 unsigned int index; 4251 struct cgraph_node *node; 4252 struct inline_summary *info; 4253 lto_symtab_encoder_t encoder; 4254 struct bitpack_d bp; 4255 struct cgraph_edge *e; 4256 predicate p; 4257 4258 index = streamer_read_uhwi (&ib); 4259 encoder = file_data->symtab_node_encoder; 4260 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder, 4261 index)); 4262 info = inline_summaries->get (node); 4263 4264 info->estimated_stack_size 4265 = info->estimated_self_stack_size = streamer_read_uhwi (&ib); 4266 info->size = info->self_size = streamer_read_uhwi (&ib); 4267 info->time = info->self_time = streamer_read_uhwi (&ib); 4268 4269 bp = streamer_read_bitpack (&ib); 4270 info->inlinable = bp_unpack_value (&bp, 1); 4271 info->contains_cilk_spawn = bp_unpack_value (&bp, 1); 4272 4273 count2 = streamer_read_uhwi (&ib); 4274 gcc_assert (!info->conds); 4275 for (j = 0; j < count2; j++) 4276 { 4277 struct condition c; 4278 c.operand_num = streamer_read_uhwi (&ib); 4279 c.size = streamer_read_uhwi (&ib); 4280 c.code = (enum tree_code) streamer_read_uhwi (&ib); 4281 c.val = stream_read_tree (&ib, data_in); 4282 bp = streamer_read_bitpack (&ib); 4283 c.agg_contents = bp_unpack_value (&bp, 1); 4284 c.by_ref = bp_unpack_value (&bp, 1); 4285 if (c.agg_contents) 4286 c.offset = streamer_read_uhwi (&ib); 4287 vec_safe_push (info->conds, c); 4288 } 4289 count2 = streamer_read_uhwi (&ib); 4290 gcc_assert (!info->entry); 4291 for (j = 0; j < count2; j++) 4292 { 4293 struct size_time_entry e; 4294 4295 e.size = streamer_read_uhwi (&ib); 4296 e.time = streamer_read_uhwi (&ib); 4297 e.predicate = read_predicate (&ib); 4298 4299 vec_safe_push (info->entry, e); 4300 } 4301 4302 p = read_predicate (&ib); 4303 set_hint_predicate (&info->loop_iterations, p); 4304 p = read_predicate (&ib); 4305 set_hint_predicate (&info->loop_stride, p); 4306 p = read_predicate (&ib); 4307 set_hint_predicate (&info->array_index, p); 4308 for (e = node->callees; e; e = e->next_callee) 4309 read_inline_edge_summary (&ib, e); 4310 for (e = node->indirect_calls; e; e = e->next_callee) 4311 read_inline_edge_summary (&ib, e); 4312 } 4313 4314 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data, 4315 len); 4316 lto_data_in_delete (data_in); 4317 } 4318 4319 4320 /* Read inline summary. Jump functions are shared among ipa-cp 4321 and inliner, so when ipa-cp is active, we don't need to write them 4322 twice. */ 4323 4324 void 4325 inline_read_summary (void) 4326 { 4327 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data (); 4328 struct lto_file_decl_data *file_data; 4329 unsigned int j = 0; 4330 4331 inline_summary_alloc (); 4332 4333 while ((file_data = file_data_vec[j++])) 4334 { 4335 size_t len; 4336 const char *data = lto_get_section_data (file_data, 4337 LTO_section_inline_summary, 4338 NULL, &len); 4339 if (data) 4340 inline_read_section (file_data, data, len); 4341 else 4342 /* Fatal error here. We do not want to support compiling ltrans units 4343 with different version of compiler or different flags than the WPA 4344 unit, so this should never happen. */ 4345 fatal_error (input_location, 4346 "ipa inline summary is missing in input file"); 4347 } 4348 if (optimize) 4349 { 4350 ipa_register_cgraph_hooks (); 4351 if (!flag_ipa_cp) 4352 ipa_prop_read_jump_functions (); 4353 } 4354 4355 gcc_assert (inline_summaries); 4356 inline_summaries->enable_insertion_hook (); 4357 } 4358 4359 4360 /* Write predicate P to OB. */ 4361 4362 static void 4363 write_predicate (struct output_block *ob, struct predicate *p) 4364 { 4365 int j; 4366 if (p) 4367 for (j = 0; p->clause[j]; j++) 4368 { 4369 gcc_assert (j < MAX_CLAUSES); 4370 streamer_write_uhwi (ob, p->clause[j]); 4371 } 4372 streamer_write_uhwi (ob, 0); 4373 } 4374 4375 4376 /* Write inline summary for edge E to OB. */ 4377 4378 static void 4379 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e) 4380 { 4381 struct inline_edge_summary *es = inline_edge_summary (e); 4382 int i; 4383 4384 streamer_write_uhwi (ob, es->call_stmt_size); 4385 streamer_write_uhwi (ob, es->call_stmt_time); 4386 streamer_write_uhwi (ob, es->loop_depth); 4387 write_predicate (ob, es->predicate); 4388 streamer_write_uhwi (ob, es->param.length ()); 4389 for (i = 0; i < (int) es->param.length (); i++) 4390 streamer_write_uhwi (ob, es->param[i].change_prob); 4391 } 4392 4393 4394 /* Write inline summary for node in SET. 4395 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is 4396 active, we don't need to write them twice. */ 4397 4398 void 4399 inline_write_summary (void) 4400 { 4401 struct cgraph_node *node; 4402 struct output_block *ob = create_output_block (LTO_section_inline_summary); 4403 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder; 4404 unsigned int count = 0; 4405 int i; 4406 4407 for (i = 0; i < lto_symtab_encoder_size (encoder); i++) 4408 { 4409 symtab_node *snode = lto_symtab_encoder_deref (encoder, i); 4410 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode); 4411 if (cnode && cnode->definition && !cnode->alias) 4412 count++; 4413 } 4414 streamer_write_uhwi (ob, count); 4415 4416 for (i = 0; i < lto_symtab_encoder_size (encoder); i++) 4417 { 4418 symtab_node *snode = lto_symtab_encoder_deref (encoder, i); 4419 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode); 4420 if (cnode && (node = cnode)->definition && !node->alias) 4421 { 4422 struct inline_summary *info = inline_summaries->get (node); 4423 struct bitpack_d bp; 4424 struct cgraph_edge *edge; 4425 int i; 4426 size_time_entry *e; 4427 struct condition *c; 4428 4429 streamer_write_uhwi (ob, 4430 lto_symtab_encoder_encode (encoder, 4431 4432 node)); 4433 streamer_write_hwi (ob, info->estimated_self_stack_size); 4434 streamer_write_hwi (ob, info->self_size); 4435 streamer_write_hwi (ob, info->self_time); 4436 bp = bitpack_create (ob->main_stream); 4437 bp_pack_value (&bp, info->inlinable, 1); 4438 bp_pack_value (&bp, info->contains_cilk_spawn, 1); 4439 streamer_write_bitpack (&bp); 4440 streamer_write_uhwi (ob, vec_safe_length (info->conds)); 4441 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++) 4442 { 4443 streamer_write_uhwi (ob, c->operand_num); 4444 streamer_write_uhwi (ob, c->size); 4445 streamer_write_uhwi (ob, c->code); 4446 stream_write_tree (ob, c->val, true); 4447 bp = bitpack_create (ob->main_stream); 4448 bp_pack_value (&bp, c->agg_contents, 1); 4449 bp_pack_value (&bp, c->by_ref, 1); 4450 streamer_write_bitpack (&bp); 4451 if (c->agg_contents) 4452 streamer_write_uhwi (ob, c->offset); 4453 } 4454 streamer_write_uhwi (ob, vec_safe_length (info->entry)); 4455 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++) 4456 { 4457 streamer_write_uhwi (ob, e->size); 4458 streamer_write_uhwi (ob, e->time); 4459 write_predicate (ob, &e->predicate); 4460 } 4461 write_predicate (ob, info->loop_iterations); 4462 write_predicate (ob, info->loop_stride); 4463 write_predicate (ob, info->array_index); 4464 for (edge = node->callees; edge; edge = edge->next_callee) 4465 write_inline_edge_summary (ob, edge); 4466 for (edge = node->indirect_calls; edge; edge = edge->next_callee) 4467 write_inline_edge_summary (ob, edge); 4468 } 4469 } 4470 streamer_write_char_stream (ob->main_stream, 0); 4471 produce_asm (ob, NULL); 4472 destroy_output_block (ob); 4473 4474 if (optimize && !flag_ipa_cp) 4475 ipa_prop_write_jump_functions (); 4476 } 4477 4478 4479 /* Release inline summary. */ 4480 4481 void 4482 inline_free_summary (void) 4483 { 4484 struct cgraph_node *node; 4485 if (edge_removal_hook_holder) 4486 symtab->remove_edge_removal_hook (edge_removal_hook_holder); 4487 edge_removal_hook_holder = NULL; 4488 if (edge_duplication_hook_holder) 4489 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder); 4490 edge_duplication_hook_holder = NULL; 4491 if (!inline_edge_summary_vec.exists ()) 4492 return; 4493 FOR_EACH_DEFINED_FUNCTION (node) 4494 if (!node->alias) 4495 reset_inline_summary (node, inline_summaries->get (node)); 4496 inline_summaries->release (); 4497 inline_summaries = NULL; 4498 inline_edge_summary_vec.release (); 4499 edge_predicate_pool.release (); 4500 } 4501