xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ipa-inline-analysis.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* Inlining decision heuristics.
2    Copyright (C) 2003-2017 Free Software Foundation, Inc.
3    Contributed by Jan Hubicka
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 /* Analysis used by the inliner and other passes limiting code size growth.
22 
23    We estimate for each function
24      - function body size
25      - average function execution time
26      - inlining size benefit (that is how much of function body size
27        and its call sequence is expected to disappear by inlining)
28      - inlining time benefit
29      - function frame size
30    For each call
31      - call statement size and time
32 
33    inline_summary data structures store above information locally (i.e.
34    parameters of the function itself) and globally (i.e. parameters of
35    the function created by applying all the inline decisions already
36    present in the callgraph).
37 
38    We provide access to the inline_summary data structure and
39    basic logic updating the parameters when inlining is performed.
40 
41    The summaries are context sensitive.  Context means
42      1) partial assignment of known constant values of operands
43      2) whether function is inlined into the call or not.
44    It is easy to add more variants.  To represent function size and time
45    that depends on context (i.e. it is known to be optimized away when
46    context is known either by inlining or from IP-CP and cloning),
47    we use predicates. Predicates are logical formulas in
48    conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49    specifying what conditions must be true. Conditions are simple test
50    of the form described above.
51 
52    In order to make predicate (possibly) true, all of its clauses must
53    be (possibly) true. To make clause (possibly) true, one of conditions
54    it mentions must be (possibly) true.  There are fixed bounds on
55    number of clauses and conditions and all the manipulation functions
56    are conservative in positive direction. I.e. we may lose precision
57    by thinking that predicate may be true even when it is not.
58 
59    estimate_edge_size and estimate_edge_growth can be used to query
60    function size/time in the given context.  inline_merge_summary merges
61    properties of caller and callee after inlining.
62 
63    Finally pass_inline_parameters is exported.  This is used to drive
64    computation of function parameters used by the early inliner. IPA
65    inlined performs analysis via its analyze_function method. */
66 
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "backend.h"
71 #include "tree.h"
72 #include "gimple.h"
73 #include "alloc-pool.h"
74 #include "tree-pass.h"
75 #include "ssa.h"
76 #include "tree-streamer.h"
77 #include "cgraph.h"
78 #include "diagnostic.h"
79 #include "fold-const.h"
80 #include "print-tree.h"
81 #include "tree-inline.h"
82 #include "gimple-pretty-print.h"
83 #include "params.h"
84 #include "cfganal.h"
85 #include "gimple-iterator.h"
86 #include "tree-cfg.h"
87 #include "tree-ssa-loop-niter.h"
88 #include "tree-ssa-loop.h"
89 #include "symbol-summary.h"
90 #include "ipa-prop.h"
91 #include "ipa-inline.h"
92 #include "cfgloop.h"
93 #include "tree-scalar-evolution.h"
94 #include "ipa-utils.h"
95 #include "cilk.h"
96 #include "cfgexpand.h"
97 #include "gimplify.h"
98 
99 /* Estimate runtime of function can easilly run into huge numbers with many
100    nested loops.  Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
101    integer.  For anything larger we use gcov_type.  */
102 #define MAX_TIME 500000
103 
104 /* Number of bits in integer, but we really want to be stable across different
105    hosts.  */
106 #define NUM_CONDITIONS 32
107 
108 enum predicate_conditions
109 {
110   predicate_false_condition = 0,
111   predicate_not_inlined_condition = 1,
112   predicate_first_dynamic_condition = 2
113 };
114 
115 /* Special condition code we use to represent test that operand is compile time
116    constant.  */
117 #define IS_NOT_CONSTANT ERROR_MARK
118 /* Special condition code we use to represent test that operand is not changed
119    across invocation of the function.  When operand IS_NOT_CONSTANT it is always
120    CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
121    of executions even when they are not compile time constants.  */
122 #define CHANGED IDENTIFIER_NODE
123 
124 /* Holders of ipa cgraph hooks: */
125 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
126 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
127 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
128 static void inline_edge_duplication_hook (struct cgraph_edge *,
129 					  struct cgraph_edge *, void *);
130 
131 /* VECtor holding inline summaries.
132    In GGC memory because conditions might point to constant trees.  */
133 function_summary <inline_summary *> *inline_summaries;
134 vec<inline_edge_summary_t> inline_edge_summary_vec;
135 
136 /* Cached node/edge growths.  */
137 vec<edge_growth_cache_entry> edge_growth_cache;
138 
139 /* Edge predicates goes here.  */
140 static object_allocator<predicate> edge_predicate_pool ("edge predicates");
141 
142 /* Return true predicate (tautology).
143    We represent it by empty list of clauses.  */
144 
145 static inline struct predicate
146 true_predicate (void)
147 {
148   struct predicate p;
149   p.clause[0] = 0;
150   return p;
151 }
152 
153 
154 /* Return predicate testing single condition number COND.  */
155 
156 static inline struct predicate
157 single_cond_predicate (int cond)
158 {
159   struct predicate p;
160   p.clause[0] = 1 << cond;
161   p.clause[1] = 0;
162   return p;
163 }
164 
165 
166 /* Return false predicate.  First clause require false condition.  */
167 
168 static inline struct predicate
169 false_predicate (void)
170 {
171   return single_cond_predicate (predicate_false_condition);
172 }
173 
174 
175 /* Return true if P is (true).  */
176 
177 static inline bool
178 true_predicate_p (struct predicate *p)
179 {
180   return !p->clause[0];
181 }
182 
183 
184 /* Return true if P is (false).  */
185 
186 static inline bool
187 false_predicate_p (struct predicate *p)
188 {
189   if (p->clause[0] == (1 << predicate_false_condition))
190     {
191       gcc_checking_assert (!p->clause[1]
192 			   && p->clause[0] == 1 << predicate_false_condition);
193       return true;
194     }
195   return false;
196 }
197 
198 
199 /* Return predicate that is set true when function is not inlined.  */
200 
201 static inline struct predicate
202 not_inlined_predicate (void)
203 {
204   return single_cond_predicate (predicate_not_inlined_condition);
205 }
206 
207 /* Simple description of whether a memory load or a condition refers to a load
208    from an aggregate and if so, how and where from in the aggregate.
209    Individual fields have the same meaning like fields with the same name in
210    struct condition.  */
211 
212 struct agg_position_info
213 {
214   HOST_WIDE_INT offset;
215   bool agg_contents;
216   bool by_ref;
217 };
218 
219 /* Add condition to condition list SUMMARY. OPERAND_NUM, SIZE, CODE and VAL
220    correspond to fields of condition structure.  AGGPOS describes whether the
221    used operand is loaded from an aggregate and where in the aggregate it is.
222    It can be NULL, which means this not a load from an aggregate.  */
223 
224 static struct predicate
225 add_condition (struct inline_summary *summary, int operand_num,
226 	       HOST_WIDE_INT size, struct agg_position_info *aggpos,
227 	       enum tree_code code, tree val)
228 {
229   int i;
230   struct condition *c;
231   struct condition new_cond;
232   HOST_WIDE_INT offset;
233   bool agg_contents, by_ref;
234 
235   if (aggpos)
236     {
237       offset = aggpos->offset;
238       agg_contents = aggpos->agg_contents;
239       by_ref = aggpos->by_ref;
240     }
241   else
242     {
243       offset = 0;
244       agg_contents = false;
245       by_ref = false;
246     }
247 
248   gcc_checking_assert (operand_num >= 0);
249   for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
250     {
251       if (c->operand_num == operand_num
252 	  && c->size == size
253 	  && c->code == code
254 	  && c->val == val
255 	  && c->agg_contents == agg_contents
256 	  && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
257 	return single_cond_predicate (i + predicate_first_dynamic_condition);
258     }
259   /* Too many conditions.  Give up and return constant true.  */
260   if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
261     return true_predicate ();
262 
263   new_cond.operand_num = operand_num;
264   new_cond.code = code;
265   new_cond.val = val;
266   new_cond.agg_contents = agg_contents;
267   new_cond.by_ref = by_ref;
268   new_cond.offset = offset;
269   new_cond.size = size;
270   vec_safe_push (summary->conds, new_cond);
271   return single_cond_predicate (i + predicate_first_dynamic_condition);
272 }
273 
274 
275 /* Add clause CLAUSE into the predicate P.  */
276 
277 static inline void
278 add_clause (conditions conditions, struct predicate *p, clause_t clause)
279 {
280   int i;
281   int i2;
282   int insert_here = -1;
283   int c1, c2;
284 
285   /* True clause.  */
286   if (!clause)
287     return;
288 
289   /* False clause makes the whole predicate false.  Kill the other variants.  */
290   if (clause == (1 << predicate_false_condition))
291     {
292       p->clause[0] = (1 << predicate_false_condition);
293       p->clause[1] = 0;
294       return;
295     }
296   if (false_predicate_p (p))
297     return;
298 
299   /* No one should be silly enough to add false into nontrivial clauses.  */
300   gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
301 
302   /* Look where to insert the clause.  At the same time prune out
303      clauses of P that are implied by the new clause and thus
304      redundant.  */
305   for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
306     {
307       p->clause[i2] = p->clause[i];
308 
309       if (!p->clause[i])
310 	break;
311 
312       /* If p->clause[i] implies clause, there is nothing to add.  */
313       if ((p->clause[i] & clause) == p->clause[i])
314 	{
315 	  /* We had nothing to add, none of clauses should've become
316 	     redundant.  */
317 	  gcc_checking_assert (i == i2);
318 	  return;
319 	}
320 
321       if (p->clause[i] < clause && insert_here < 0)
322 	insert_here = i2;
323 
324       /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
325          Otherwise the p->clause[i] has to stay.  */
326       if ((p->clause[i] & clause) != clause)
327 	i2++;
328     }
329 
330   /* Look for clauses that are obviously true.  I.e.
331      op0 == 5 || op0 != 5.  */
332   for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
333     {
334       condition *cc1;
335       if (!(clause & (1 << c1)))
336 	continue;
337       cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
338       /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
339          and thus there is no point for looking for them.  */
340       if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
341 	continue;
342       for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
343 	if (clause & (1 << c2))
344 	  {
345 	    condition *cc1 =
346 	      &(*conditions)[c1 - predicate_first_dynamic_condition];
347 	    condition *cc2 =
348 	      &(*conditions)[c2 - predicate_first_dynamic_condition];
349 	    if (cc1->operand_num == cc2->operand_num
350 		&& cc1->val == cc2->val
351 		&& cc2->code != IS_NOT_CONSTANT
352 		&& cc2->code != CHANGED
353 		&& cc1->code == invert_tree_comparison (cc2->code,
354 							HONOR_NANS (cc1->val)))
355 	      return;
356 	  }
357     }
358 
359 
360   /* We run out of variants.  Be conservative in positive direction.  */
361   if (i2 == MAX_CLAUSES)
362     return;
363   /* Keep clauses in decreasing order. This makes equivalence testing easy.  */
364   p->clause[i2 + 1] = 0;
365   if (insert_here >= 0)
366     for (; i2 > insert_here; i2--)
367       p->clause[i2] = p->clause[i2 - 1];
368   else
369     insert_here = i2;
370   p->clause[insert_here] = clause;
371 }
372 
373 
374 /* Return P & P2.  */
375 
376 static struct predicate
377 and_predicates (conditions conditions,
378 		struct predicate *p, struct predicate *p2)
379 {
380   struct predicate out = *p;
381   int i;
382 
383   /* Avoid busy work.  */
384   if (false_predicate_p (p2) || true_predicate_p (p))
385     return *p2;
386   if (false_predicate_p (p) || true_predicate_p (p2))
387     return *p;
388 
389   /* See how far predicates match.  */
390   for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
391     {
392       gcc_checking_assert (i < MAX_CLAUSES);
393     }
394 
395   /* Combine the predicates rest.  */
396   for (; p2->clause[i]; i++)
397     {
398       gcc_checking_assert (i < MAX_CLAUSES);
399       add_clause (conditions, &out, p2->clause[i]);
400     }
401   return out;
402 }
403 
404 
405 /* Return true if predicates are obviously equal.  */
406 
407 static inline bool
408 predicates_equal_p (struct predicate *p, struct predicate *p2)
409 {
410   int i;
411   for (i = 0; p->clause[i]; i++)
412     {
413       gcc_checking_assert (i < MAX_CLAUSES);
414       gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
415       gcc_checking_assert (!p2->clause[i]
416 			   || p2->clause[i] > p2->clause[i + 1]);
417       if (p->clause[i] != p2->clause[i])
418 	return false;
419     }
420   return !p2->clause[i];
421 }
422 
423 
424 /* Return P | P2.  */
425 
426 static struct predicate
427 or_predicates (conditions conditions,
428 	       struct predicate *p, struct predicate *p2)
429 {
430   struct predicate out = true_predicate ();
431   int i, j;
432 
433   /* Avoid busy work.  */
434   if (false_predicate_p (p2) || true_predicate_p (p))
435     return *p;
436   if (false_predicate_p (p) || true_predicate_p (p2))
437     return *p2;
438   if (predicates_equal_p (p, p2))
439     return *p;
440 
441   /* OK, combine the predicates.  */
442   for (i = 0; p->clause[i]; i++)
443     for (j = 0; p2->clause[j]; j++)
444       {
445 	gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
446 	add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
447       }
448   return out;
449 }
450 
451 
452 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
453    if predicate P is known to be false.  */
454 
455 static bool
456 evaluate_predicate (struct predicate *p, clause_t possible_truths)
457 {
458   int i;
459 
460   /* True remains true.  */
461   if (true_predicate_p (p))
462     return true;
463 
464   gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
465 
466   /* See if we can find clause we can disprove.  */
467   for (i = 0; p->clause[i]; i++)
468     {
469       gcc_checking_assert (i < MAX_CLAUSES);
470       if (!(p->clause[i] & possible_truths))
471 	return false;
472     }
473   return true;
474 }
475 
476 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
477    instruction will be recomputed per invocation of the inlined call.  */
478 
479 static int
480 predicate_probability (conditions conds,
481 		       struct predicate *p, clause_t possible_truths,
482 		       vec<inline_param_summary> inline_param_summary)
483 {
484   int i;
485   int combined_prob = REG_BR_PROB_BASE;
486 
487   /* True remains true.  */
488   if (true_predicate_p (p))
489     return REG_BR_PROB_BASE;
490 
491   if (false_predicate_p (p))
492     return 0;
493 
494   gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
495 
496   /* See if we can find clause we can disprove.  */
497   for (i = 0; p->clause[i]; i++)
498     {
499       gcc_checking_assert (i < MAX_CLAUSES);
500       if (!(p->clause[i] & possible_truths))
501 	return 0;
502       else
503 	{
504 	  int this_prob = 0;
505 	  int i2;
506 	  if (!inline_param_summary.exists ())
507 	    return REG_BR_PROB_BASE;
508 	  for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
509 	    if ((p->clause[i] & possible_truths) & (1 << i2))
510 	      {
511 		if (i2 >= predicate_first_dynamic_condition)
512 		  {
513 		    condition *c =
514 		      &(*conds)[i2 - predicate_first_dynamic_condition];
515 		    if (c->code == CHANGED
516 			&& (c->operand_num <
517 			    (int) inline_param_summary.length ()))
518 		      {
519 			int iprob =
520 			  inline_param_summary[c->operand_num].change_prob;
521 			this_prob = MAX (this_prob, iprob);
522 		      }
523 		    else
524 		      this_prob = REG_BR_PROB_BASE;
525 		  }
526 		else
527 		  this_prob = REG_BR_PROB_BASE;
528 	      }
529 	  combined_prob = MIN (this_prob, combined_prob);
530 	  if (!combined_prob)
531 	    return 0;
532 	}
533     }
534   return combined_prob;
535 }
536 
537 
538 /* Dump conditional COND.  */
539 
540 static void
541 dump_condition (FILE *f, conditions conditions, int cond)
542 {
543   condition *c;
544   if (cond == predicate_false_condition)
545     fprintf (f, "false");
546   else if (cond == predicate_not_inlined_condition)
547     fprintf (f, "not inlined");
548   else
549     {
550       c = &(*conditions)[cond - predicate_first_dynamic_condition];
551       fprintf (f, "op%i", c->operand_num);
552       if (c->agg_contents)
553 	fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
554 		 c->by_ref ? "ref " : "", c->offset);
555       if (c->code == IS_NOT_CONSTANT)
556 	{
557 	  fprintf (f, " not constant");
558 	  return;
559 	}
560       if (c->code == CHANGED)
561 	{
562 	  fprintf (f, " changed");
563 	  return;
564 	}
565       fprintf (f, " %s ", op_symbol_code (c->code));
566       print_generic_expr (f, c->val, 1);
567     }
568 }
569 
570 
571 /* Dump clause CLAUSE.  */
572 
573 static void
574 dump_clause (FILE *f, conditions conds, clause_t clause)
575 {
576   int i;
577   bool found = false;
578   fprintf (f, "(");
579   if (!clause)
580     fprintf (f, "true");
581   for (i = 0; i < NUM_CONDITIONS; i++)
582     if (clause & (1 << i))
583       {
584 	if (found)
585 	  fprintf (f, " || ");
586 	found = true;
587 	dump_condition (f, conds, i);
588       }
589   fprintf (f, ")");
590 }
591 
592 
593 /* Dump predicate PREDICATE.  */
594 
595 static void
596 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
597 {
598   int i;
599   if (true_predicate_p (pred))
600     dump_clause (f, conds, 0);
601   else
602     for (i = 0; pred->clause[i]; i++)
603       {
604 	if (i)
605 	  fprintf (f, " && ");
606 	dump_clause (f, conds, pred->clause[i]);
607       }
608   fprintf (f, "\n");
609 }
610 
611 
612 /* Dump inline hints.  */
613 void
614 dump_inline_hints (FILE *f, inline_hints hints)
615 {
616   if (!hints)
617     return;
618   fprintf (f, "inline hints:");
619   if (hints & INLINE_HINT_indirect_call)
620     {
621       hints &= ~INLINE_HINT_indirect_call;
622       fprintf (f, " indirect_call");
623     }
624   if (hints & INLINE_HINT_loop_iterations)
625     {
626       hints &= ~INLINE_HINT_loop_iterations;
627       fprintf (f, " loop_iterations");
628     }
629   if (hints & INLINE_HINT_loop_stride)
630     {
631       hints &= ~INLINE_HINT_loop_stride;
632       fprintf (f, " loop_stride");
633     }
634   if (hints & INLINE_HINT_same_scc)
635     {
636       hints &= ~INLINE_HINT_same_scc;
637       fprintf (f, " same_scc");
638     }
639   if (hints & INLINE_HINT_in_scc)
640     {
641       hints &= ~INLINE_HINT_in_scc;
642       fprintf (f, " in_scc");
643     }
644   if (hints & INLINE_HINT_cross_module)
645     {
646       hints &= ~INLINE_HINT_cross_module;
647       fprintf (f, " cross_module");
648     }
649   if (hints & INLINE_HINT_declared_inline)
650     {
651       hints &= ~INLINE_HINT_declared_inline;
652       fprintf (f, " declared_inline");
653     }
654   if (hints & INLINE_HINT_array_index)
655     {
656       hints &= ~INLINE_HINT_array_index;
657       fprintf (f, " array_index");
658     }
659   if (hints & INLINE_HINT_known_hot)
660     {
661       hints &= ~INLINE_HINT_known_hot;
662       fprintf (f, " known_hot");
663     }
664   gcc_assert (!hints);
665 }
666 
667 
668 /* Record SIZE and TIME under condition PRED into the inline summary.  */
669 
670 static void
671 account_size_time (struct inline_summary *summary, int size, int time,
672 		   struct predicate *pred)
673 {
674   size_time_entry *e;
675   bool found = false;
676   int i;
677 
678   if (false_predicate_p (pred))
679     return;
680 
681   /* We need to create initial empty unconitional clause, but otherwie
682      we don't need to account empty times and sizes.  */
683   if (!size && !time && summary->entry)
684     return;
685 
686   /* Watch overflow that might result from insane profiles.  */
687   if (time > MAX_TIME * INLINE_TIME_SCALE)
688     time = MAX_TIME * INLINE_TIME_SCALE;
689   gcc_assert (time >= 0);
690 
691   for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
692     if (predicates_equal_p (&e->predicate, pred))
693       {
694 	found = true;
695 	break;
696       }
697   if (i == 256)
698     {
699       i = 0;
700       found = true;
701       e = &(*summary->entry)[0];
702       gcc_assert (!e->predicate.clause[0]);
703       if (dump_file && (dump_flags & TDF_DETAILS))
704 	fprintf (dump_file,
705 		 "\t\tReached limit on number of entries, "
706 		 "ignoring the predicate.");
707     }
708   if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
709     {
710       fprintf (dump_file,
711 	       "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
712 	       ((double) size) / INLINE_SIZE_SCALE,
713 	       ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
714       dump_predicate (dump_file, summary->conds, pred);
715     }
716   if (!found)
717     {
718       struct size_time_entry new_entry;
719       new_entry.size = size;
720       new_entry.time = time;
721       new_entry.predicate = *pred;
722       vec_safe_push (summary->entry, new_entry);
723     }
724   else
725     {
726       e->size += size;
727       e->time += time;
728       if (e->time > MAX_TIME * INLINE_TIME_SCALE)
729 	e->time = MAX_TIME * INLINE_TIME_SCALE;
730     }
731 }
732 
733 /* We proved E to be unreachable, redirect it to __bultin_unreachable.  */
734 
735 static struct cgraph_edge *
736 redirect_to_unreachable (struct cgraph_edge *e)
737 {
738   struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
739   struct cgraph_node *target = cgraph_node::get_create
740 		      (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
741 
742   if (e->speculative)
743     e = e->resolve_speculation (target->decl);
744   else if (!e->callee)
745     e->make_direct (target);
746   else
747     e->redirect_callee (target);
748   struct inline_edge_summary *es = inline_edge_summary (e);
749   e->inline_failed = CIF_UNREACHABLE;
750   e->frequency = 0;
751   e->count = 0;
752   es->call_stmt_size = 0;
753   es->call_stmt_time = 0;
754   if (callee)
755     callee->remove_symbol_and_inline_clones ();
756   return e;
757 }
758 
759 /* Set predicate for edge E.  */
760 
761 static void
762 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
763 {
764   /* If the edge is determined to be never executed, redirect it
765      to BUILTIN_UNREACHABLE to save inliner from inlining into it.  */
766   if (predicate && false_predicate_p (predicate)
767       /* When handling speculative edges, we need to do the redirection
768          just once.  Do it always on the direct edge, so we do not
769 	 attempt to resolve speculation while duplicating the edge.  */
770       && (!e->speculative || e->callee))
771     e = redirect_to_unreachable (e);
772 
773   struct inline_edge_summary *es = inline_edge_summary (e);
774   if (predicate && !true_predicate_p (predicate))
775     {
776       if (!es->predicate)
777 	es->predicate = edge_predicate_pool.allocate ();
778       *es->predicate = *predicate;
779     }
780   else
781     {
782       if (es->predicate)
783 	edge_predicate_pool.remove (es->predicate);
784       es->predicate = NULL;
785     }
786 }
787 
788 /* Set predicate for hint *P.  */
789 
790 static void
791 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
792 {
793   if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
794     {
795       if (*p)
796 	edge_predicate_pool.remove (*p);
797       *p = NULL;
798     }
799   else
800     {
801       if (!*p)
802 	*p = edge_predicate_pool.allocate ();
803       **p = new_predicate;
804     }
805 }
806 
807 
808 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
809    KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
810    Return clause of possible truths. When INLINE_P is true, assume that we are
811    inlining.
812 
813    ERROR_MARK means compile time invariant.  */
814 
815 static clause_t
816 evaluate_conditions_for_known_args (struct cgraph_node *node,
817 				    bool inline_p,
818 				    vec<tree> known_vals,
819 				    vec<ipa_agg_jump_function_p>
820 				    known_aggs)
821 {
822   clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
823   struct inline_summary *info = inline_summaries->get (node);
824   int i;
825   struct condition *c;
826 
827   for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
828     {
829       tree val;
830       tree res;
831 
832       /* We allow call stmt to have fewer arguments than the callee function
833          (especially for K&R style programs).  So bound check here (we assume
834          known_aggs vector, if non-NULL, has the same length as
835          known_vals).  */
836       gcc_checking_assert (!known_aggs.exists ()
837 			   || (known_vals.length () == known_aggs.length ()));
838       if (c->operand_num >= (int) known_vals.length ())
839 	{
840 	  clause |= 1 << (i + predicate_first_dynamic_condition);
841 	  continue;
842 	}
843 
844       if (c->agg_contents)
845 	{
846 	  struct ipa_agg_jump_function *agg;
847 
848 	  if (c->code == CHANGED
849 	      && !c->by_ref
850 	      && (known_vals[c->operand_num] == error_mark_node))
851 	    continue;
852 
853 	  if (known_aggs.exists ())
854 	    {
855 	      agg = known_aggs[c->operand_num];
856 	      val = ipa_find_agg_cst_for_param (agg, known_vals[c->operand_num],
857 						c->offset, c->by_ref);
858 	    }
859 	  else
860 	    val = NULL_TREE;
861 	}
862       else
863 	{
864 	  val = known_vals[c->operand_num];
865 	  if (val == error_mark_node && c->code != CHANGED)
866 	    val = NULL_TREE;
867 	}
868 
869       if (!val)
870 	{
871 	  clause |= 1 << (i + predicate_first_dynamic_condition);
872 	  continue;
873 	}
874       if (c->code == CHANGED)
875 	continue;
876 
877       if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size)
878 	{
879 	  clause |= 1 << (i + predicate_first_dynamic_condition);
880 	  continue;
881 	}
882       if (c->code == IS_NOT_CONSTANT)
883 	continue;
884 
885       val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
886       res = val
887 	? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
888 	: NULL;
889 
890       if (res && integer_zerop (res))
891 	continue;
892 
893       clause |= 1 << (i + predicate_first_dynamic_condition);
894     }
895   return clause;
896 }
897 
898 
899 /* Work out what conditions might be true at invocation of E.  */
900 
901 static void
902 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
903 			      clause_t *clause_ptr,
904 			      vec<tree> *known_vals_ptr,
905 			      vec<ipa_polymorphic_call_context>
906 			      *known_contexts_ptr,
907 			      vec<ipa_agg_jump_function_p> *known_aggs_ptr)
908 {
909   struct cgraph_node *callee = e->callee->ultimate_alias_target ();
910   struct inline_summary *info = inline_summaries->get (callee);
911   vec<tree> known_vals = vNULL;
912   vec<ipa_agg_jump_function_p> known_aggs = vNULL;
913 
914   if (clause_ptr)
915     *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
916   if (known_vals_ptr)
917     known_vals_ptr->create (0);
918   if (known_contexts_ptr)
919     known_contexts_ptr->create (0);
920 
921   if (ipa_node_params_sum
922       && !e->call_stmt_cannot_inline_p
923       && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
924     {
925       struct ipa_node_params *parms_info;
926       struct ipa_edge_args *args = IPA_EDGE_REF (e);
927       struct inline_edge_summary *es = inline_edge_summary (e);
928       int i, count = ipa_get_cs_argument_count (args);
929 
930       if (e->caller->global.inlined_to)
931 	parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
932       else
933 	parms_info = IPA_NODE_REF (e->caller);
934 
935       if (count && (info->conds || known_vals_ptr))
936 	known_vals.safe_grow_cleared (count);
937       if (count && (info->conds || known_aggs_ptr))
938 	known_aggs.safe_grow_cleared (count);
939       if (count && known_contexts_ptr)
940 	known_contexts_ptr->safe_grow_cleared (count);
941 
942       for (i = 0; i < count; i++)
943 	{
944 	  struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
945 	  tree cst = ipa_value_from_jfunc (parms_info, jf);
946 
947 	  if (!cst && e->call_stmt
948 	      && i < (int)gimple_call_num_args (e->call_stmt))
949 	    {
950 	      cst = gimple_call_arg (e->call_stmt, i);
951 	      if (!is_gimple_min_invariant (cst))
952 		cst = NULL;
953 	    }
954 	  if (cst)
955 	    {
956 	      gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
957 	      if (known_vals.exists ())
958 		known_vals[i] = cst;
959 	    }
960 	  else if (inline_p && !es->param[i].change_prob)
961 	    known_vals[i] = error_mark_node;
962 
963 	  if (known_contexts_ptr)
964 	    (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
965 							       i, jf);
966 	  /* TODO: When IPA-CP starts propagating and merging aggregate jump
967 	     functions, use its knowledge of the caller too, just like the
968 	     scalar case above.  */
969 	  known_aggs[i] = &jf->agg;
970 	}
971     }
972   else if (e->call_stmt && !e->call_stmt_cannot_inline_p
973 	   && ((clause_ptr && info->conds) || known_vals_ptr))
974     {
975       int i, count = (int)gimple_call_num_args (e->call_stmt);
976 
977       if (count && (info->conds || known_vals_ptr))
978 	known_vals.safe_grow_cleared (count);
979       for (i = 0; i < count; i++)
980 	{
981 	  tree cst = gimple_call_arg (e->call_stmt, i);
982 	  if (!is_gimple_min_invariant (cst))
983 	    cst = NULL;
984 	  if (cst)
985 	    known_vals[i] = cst;
986 	}
987     }
988 
989   if (clause_ptr)
990     *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
991 						      known_vals, known_aggs);
992 
993   if (known_vals_ptr)
994     *known_vals_ptr = known_vals;
995   else
996     known_vals.release ();
997 
998   if (known_aggs_ptr)
999     *known_aggs_ptr = known_aggs;
1000   else
1001     known_aggs.release ();
1002 }
1003 
1004 
1005 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
1006 
1007 static void
1008 inline_summary_alloc (void)
1009 {
1010   if (!edge_removal_hook_holder)
1011     edge_removal_hook_holder =
1012       symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1013   if (!edge_duplication_hook_holder)
1014     edge_duplication_hook_holder =
1015       symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1016 
1017   if (!inline_summaries)
1018     inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1019 
1020   if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1021     inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1022 }
1023 
1024 /* We are called multiple time for given function; clear
1025    data from previous run so they are not cumulated.  */
1026 
1027 static void
1028 reset_inline_edge_summary (struct cgraph_edge *e)
1029 {
1030   if (e->uid < (int) inline_edge_summary_vec.length ())
1031     {
1032       struct inline_edge_summary *es = inline_edge_summary (e);
1033 
1034       es->call_stmt_size = es->call_stmt_time = 0;
1035       if (es->predicate)
1036 	edge_predicate_pool.remove (es->predicate);
1037       es->predicate = NULL;
1038       es->param.release ();
1039     }
1040 }
1041 
1042 /* We are called multiple time for given function; clear
1043    data from previous run so they are not cumulated.  */
1044 
1045 static void
1046 reset_inline_summary (struct cgraph_node *node,
1047 		      inline_summary *info)
1048 {
1049   struct cgraph_edge *e;
1050 
1051   info->self_size = info->self_time = 0;
1052   info->estimated_stack_size = 0;
1053   info->estimated_self_stack_size = 0;
1054   info->stack_frame_offset = 0;
1055   info->size = 0;
1056   info->time = 0;
1057   info->growth = 0;
1058   info->scc_no = 0;
1059   if (info->loop_iterations)
1060     {
1061       edge_predicate_pool.remove (info->loop_iterations);
1062       info->loop_iterations = NULL;
1063     }
1064   if (info->loop_stride)
1065     {
1066       edge_predicate_pool.remove (info->loop_stride);
1067       info->loop_stride = NULL;
1068     }
1069   if (info->array_index)
1070     {
1071       edge_predicate_pool.remove (info->array_index);
1072       info->array_index = NULL;
1073     }
1074   vec_free (info->conds);
1075   vec_free (info->entry);
1076   for (e = node->callees; e; e = e->next_callee)
1077     reset_inline_edge_summary (e);
1078   for (e = node->indirect_calls; e; e = e->next_callee)
1079     reset_inline_edge_summary (e);
1080   info->fp_expressions = false;
1081 }
1082 
1083 /* Hook that is called by cgraph.c when a node is removed.  */
1084 
1085 void
1086 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1087 {
1088   reset_inline_summary (node, info);
1089 }
1090 
1091 /* Remap predicate P of former function to be predicate of duplicated function.
1092    POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1093    INFO is inline summary of the duplicated node.  */
1094 
1095 static struct predicate
1096 remap_predicate_after_duplication (struct predicate *p,
1097 				   clause_t possible_truths,
1098 				   struct inline_summary *info)
1099 {
1100   struct predicate new_predicate = true_predicate ();
1101   int j;
1102   for (j = 0; p->clause[j]; j++)
1103     if (!(possible_truths & p->clause[j]))
1104       {
1105 	new_predicate = false_predicate ();
1106 	break;
1107       }
1108     else
1109       add_clause (info->conds, &new_predicate,
1110 		  possible_truths & p->clause[j]);
1111   return new_predicate;
1112 }
1113 
1114 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1115    Additionally care about allocating new memory slot for updated predicate
1116    and set it to NULL when it becomes true or false (and thus uninteresting).
1117  */
1118 
1119 static void
1120 remap_hint_predicate_after_duplication (struct predicate **p,
1121 					clause_t possible_truths,
1122 					struct inline_summary *info)
1123 {
1124   struct predicate new_predicate;
1125 
1126   if (!*p)
1127     return;
1128 
1129   new_predicate = remap_predicate_after_duplication (*p,
1130 						     possible_truths, info);
1131   /* We do not want to free previous predicate; it is used by node origin.  */
1132   *p = NULL;
1133   set_hint_predicate (p, new_predicate);
1134 }
1135 
1136 
1137 /* Hook that is called by cgraph.c when a node is duplicated.  */
1138 void
1139 inline_summary_t::duplicate (cgraph_node *src,
1140 			     cgraph_node *dst,
1141 			     inline_summary *,
1142 			     inline_summary *info)
1143 {
1144   inline_summary_alloc ();
1145   memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1146   /* TODO: as an optimization, we may avoid copying conditions
1147      that are known to be false or true.  */
1148   info->conds = vec_safe_copy (info->conds);
1149 
1150   /* When there are any replacements in the function body, see if we can figure
1151      out that something was optimized out.  */
1152   if (ipa_node_params_sum && dst->clone.tree_map)
1153     {
1154       vec<size_time_entry, va_gc> *entry = info->entry;
1155       /* Use SRC parm info since it may not be copied yet.  */
1156       struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1157       vec<tree> known_vals = vNULL;
1158       int count = ipa_get_param_count (parms_info);
1159       int i, j;
1160       clause_t possible_truths;
1161       struct predicate true_pred = true_predicate ();
1162       size_time_entry *e;
1163       int optimized_out_size = 0;
1164       bool inlined_to_p = false;
1165       struct cgraph_edge *edge, *next;
1166 
1167       info->entry = 0;
1168       known_vals.safe_grow_cleared (count);
1169       for (i = 0; i < count; i++)
1170 	{
1171 	  struct ipa_replace_map *r;
1172 
1173 	  for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1174 	    {
1175 	      if (((!r->old_tree && r->parm_num == i)
1176 		   || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1177 		   && r->replace_p && !r->ref_p)
1178 		{
1179 		  known_vals[i] = r->new_tree;
1180 		  break;
1181 		}
1182 	    }
1183 	}
1184       possible_truths = evaluate_conditions_for_known_args (dst, false,
1185 							    known_vals,
1186 							    vNULL);
1187       known_vals.release ();
1188 
1189       account_size_time (info, 0, 0, &true_pred);
1190 
1191       /* Remap size_time vectors.
1192          Simplify the predicate by prunning out alternatives that are known
1193          to be false.
1194          TODO: as on optimization, we can also eliminate conditions known
1195          to be true.  */
1196       for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1197 	{
1198 	  struct predicate new_predicate;
1199 	  new_predicate = remap_predicate_after_duplication (&e->predicate,
1200 							     possible_truths,
1201 							     info);
1202 	  if (false_predicate_p (&new_predicate))
1203 	    optimized_out_size += e->size;
1204 	  else
1205 	    account_size_time (info, e->size, e->time, &new_predicate);
1206 	}
1207 
1208       /* Remap edge predicates with the same simplification as above.
1209          Also copy constantness arrays.   */
1210       for (edge = dst->callees; edge; edge = next)
1211 	{
1212 	  struct predicate new_predicate;
1213 	  struct inline_edge_summary *es = inline_edge_summary (edge);
1214 	  next = edge->next_callee;
1215 
1216 	  if (!edge->inline_failed)
1217 	    inlined_to_p = true;
1218 	  if (!es->predicate)
1219 	    continue;
1220 	  new_predicate = remap_predicate_after_duplication (es->predicate,
1221 							     possible_truths,
1222 							     info);
1223 	  if (false_predicate_p (&new_predicate)
1224 	      && !false_predicate_p (es->predicate))
1225 	    optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1226 	  edge_set_predicate (edge, &new_predicate);
1227 	}
1228 
1229       /* Remap indirect edge predicates with the same simplificaiton as above.
1230          Also copy constantness arrays.   */
1231       for (edge = dst->indirect_calls; edge; edge = next)
1232 	{
1233 	  struct predicate new_predicate;
1234 	  struct inline_edge_summary *es = inline_edge_summary (edge);
1235 	  next = edge->next_callee;
1236 
1237 	  gcc_checking_assert (edge->inline_failed);
1238 	  if (!es->predicate)
1239 	    continue;
1240 	  new_predicate = remap_predicate_after_duplication (es->predicate,
1241 							     possible_truths,
1242 							     info);
1243 	  if (false_predicate_p (&new_predicate)
1244 	      && !false_predicate_p (es->predicate))
1245 	    optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1246 	  edge_set_predicate (edge, &new_predicate);
1247 	}
1248       remap_hint_predicate_after_duplication (&info->loop_iterations,
1249 					      possible_truths, info);
1250       remap_hint_predicate_after_duplication (&info->loop_stride,
1251 					      possible_truths, info);
1252       remap_hint_predicate_after_duplication (&info->array_index,
1253 					      possible_truths, info);
1254 
1255       /* If inliner or someone after inliner will ever start producing
1256          non-trivial clones, we will get trouble with lack of information
1257          about updating self sizes, because size vectors already contains
1258          sizes of the calees.  */
1259       gcc_assert (!inlined_to_p || !optimized_out_size);
1260     }
1261   else
1262     {
1263       info->entry = vec_safe_copy (info->entry);
1264       if (info->loop_iterations)
1265 	{
1266 	  predicate p = *info->loop_iterations;
1267 	  info->loop_iterations = NULL;
1268 	  set_hint_predicate (&info->loop_iterations, p);
1269 	}
1270       if (info->loop_stride)
1271 	{
1272 	  predicate p = *info->loop_stride;
1273 	  info->loop_stride = NULL;
1274 	  set_hint_predicate (&info->loop_stride, p);
1275 	}
1276       if (info->array_index)
1277 	{
1278 	  predicate p = *info->array_index;
1279 	  info->array_index = NULL;
1280 	  set_hint_predicate (&info->array_index, p);
1281 	}
1282     }
1283   if (!dst->global.inlined_to)
1284     inline_update_overall_summary (dst);
1285 }
1286 
1287 
1288 /* Hook that is called by cgraph.c when a node is duplicated.  */
1289 
1290 static void
1291 inline_edge_duplication_hook (struct cgraph_edge *src,
1292 			      struct cgraph_edge *dst,
1293 			      ATTRIBUTE_UNUSED void *data)
1294 {
1295   struct inline_edge_summary *info;
1296   struct inline_edge_summary *srcinfo;
1297   inline_summary_alloc ();
1298   info = inline_edge_summary (dst);
1299   srcinfo = inline_edge_summary (src);
1300   memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1301   info->predicate = NULL;
1302   edge_set_predicate (dst, srcinfo->predicate);
1303   info->param = srcinfo->param.copy ();
1304   if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1305     {
1306       info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1307 			       - eni_size_weights.call_cost);
1308       info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1309 			       - eni_time_weights.call_cost);
1310     }
1311 }
1312 
1313 
1314 /* Keep edge cache consistent across edge removal.  */
1315 
1316 static void
1317 inline_edge_removal_hook (struct cgraph_edge *edge,
1318 			  void *data ATTRIBUTE_UNUSED)
1319 {
1320   if (edge_growth_cache.exists ())
1321     reset_edge_growth_cache (edge);
1322   reset_inline_edge_summary (edge);
1323 }
1324 
1325 
1326 /* Initialize growth caches.  */
1327 
1328 void
1329 initialize_growth_caches (void)
1330 {
1331   if (symtab->edges_max_uid)
1332     edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1333 }
1334 
1335 
1336 /* Free growth caches.  */
1337 
1338 void
1339 free_growth_caches (void)
1340 {
1341   edge_growth_cache.release ();
1342 }
1343 
1344 
1345 /* Dump edge summaries associated to NODE and recursively to all clones.
1346    Indent by INDENT.  */
1347 
1348 static void
1349 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1350 			  struct inline_summary *info)
1351 {
1352   struct cgraph_edge *edge;
1353   for (edge = node->callees; edge; edge = edge->next_callee)
1354     {
1355       struct inline_edge_summary *es = inline_edge_summary (edge);
1356       struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1357       int i;
1358 
1359       fprintf (f,
1360 	       "%*s%s/%i %s\n%*s  loop depth:%2i freq:%4i size:%2i"
1361 	       " time: %2i callee size:%2i stack:%2i",
1362 	       indent, "", callee->name (), callee->order,
1363 	       !edge->inline_failed
1364 	       ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1365 	       indent, "", es->loop_depth, edge->frequency,
1366 	       es->call_stmt_size, es->call_stmt_time,
1367 	       (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1368 	       (int) inline_summaries->get (callee)->estimated_stack_size);
1369 
1370       if (es->predicate)
1371 	{
1372 	  fprintf (f, " predicate: ");
1373 	  dump_predicate (f, info->conds, es->predicate);
1374 	}
1375       else
1376 	fprintf (f, "\n");
1377       if (es->param.exists ())
1378 	for (i = 0; i < (int) es->param.length (); i++)
1379 	  {
1380 	    int prob = es->param[i].change_prob;
1381 
1382 	    if (!prob)
1383 	      fprintf (f, "%*s op%i is compile time invariant\n",
1384 		       indent + 2, "", i);
1385 	    else if (prob != REG_BR_PROB_BASE)
1386 	      fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1387 		       prob * 100.0 / REG_BR_PROB_BASE);
1388 	  }
1389       if (!edge->inline_failed)
1390 	{
1391 	  fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1392 		   " callee size %i\n",
1393 		   indent + 2, "",
1394 		   (int) inline_summaries->get (callee)->stack_frame_offset,
1395 		   (int) inline_summaries->get (callee)->estimated_self_stack_size,
1396 		   (int) inline_summaries->get (callee)->estimated_stack_size);
1397 	  dump_inline_edge_summary (f, indent + 2, callee, info);
1398 	}
1399     }
1400   for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1401     {
1402       struct inline_edge_summary *es = inline_edge_summary (edge);
1403       fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1404 	       " time: %2i",
1405 	       indent, "",
1406 	       es->loop_depth,
1407 	       edge->frequency, es->call_stmt_size, es->call_stmt_time);
1408       if (es->predicate)
1409 	{
1410 	  fprintf (f, "predicate: ");
1411 	  dump_predicate (f, info->conds, es->predicate);
1412 	}
1413       else
1414 	fprintf (f, "\n");
1415     }
1416 }
1417 
1418 
1419 void
1420 dump_inline_summary (FILE *f, struct cgraph_node *node)
1421 {
1422   if (node->definition)
1423     {
1424       struct inline_summary *s = inline_summaries->get (node);
1425       size_time_entry *e;
1426       int i;
1427       fprintf (f, "Inline summary for %s/%i", node->name (),
1428 	       node->order);
1429       if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1430 	fprintf (f, " always_inline");
1431       if (s->inlinable)
1432 	fprintf (f, " inlinable");
1433       if (s->contains_cilk_spawn)
1434 	fprintf (f, " contains_cilk_spawn");
1435       if (s->fp_expressions)
1436 	fprintf (f, " fp_expression");
1437       fprintf (f, "\n  self time:       %i\n", s->self_time);
1438       fprintf (f, "  global time:     %i\n", s->time);
1439       fprintf (f, "  self size:       %i\n", s->self_size);
1440       fprintf (f, "  global size:     %i\n", s->size);
1441       fprintf (f, "  min size:       %i\n", s->min_size);
1442       fprintf (f, "  self stack:      %i\n",
1443 	       (int) s->estimated_self_stack_size);
1444       fprintf (f, "  global stack:    %i\n", (int) s->estimated_stack_size);
1445       if (s->growth)
1446 	fprintf (f, "  estimated growth:%i\n", (int) s->growth);
1447       if (s->scc_no)
1448 	fprintf (f, "  In SCC:          %i\n", (int) s->scc_no);
1449       for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1450 	{
1451 	  fprintf (f, "    size:%f, time:%f, predicate:",
1452 		   (double) e->size / INLINE_SIZE_SCALE,
1453 		   (double) e->time / INLINE_TIME_SCALE);
1454 	  dump_predicate (f, s->conds, &e->predicate);
1455 	}
1456       if (s->loop_iterations)
1457 	{
1458 	  fprintf (f, "  loop iterations:");
1459 	  dump_predicate (f, s->conds, s->loop_iterations);
1460 	}
1461       if (s->loop_stride)
1462 	{
1463 	  fprintf (f, "  loop stride:");
1464 	  dump_predicate (f, s->conds, s->loop_stride);
1465 	}
1466       if (s->array_index)
1467 	{
1468 	  fprintf (f, "  array index:");
1469 	  dump_predicate (f, s->conds, s->array_index);
1470 	}
1471       fprintf (f, "  calls:\n");
1472       dump_inline_edge_summary (f, 4, node, s);
1473       fprintf (f, "\n");
1474     }
1475 }
1476 
1477 DEBUG_FUNCTION void
1478 debug_inline_summary (struct cgraph_node *node)
1479 {
1480   dump_inline_summary (stderr, node);
1481 }
1482 
1483 void
1484 dump_inline_summaries (FILE *f)
1485 {
1486   struct cgraph_node *node;
1487 
1488   FOR_EACH_DEFINED_FUNCTION (node)
1489     if (!node->global.inlined_to)
1490       dump_inline_summary (f, node);
1491 }
1492 
1493 /* Give initial reasons why inlining would fail on EDGE.  This gets either
1494    nullified or usually overwritten by more precise reasons later.  */
1495 
1496 void
1497 initialize_inline_failed (struct cgraph_edge *e)
1498 {
1499   struct cgraph_node *callee = e->callee;
1500 
1501   if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
1502       && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
1503     ;
1504   else if (e->indirect_unknown_callee)
1505     e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1506   else if (!callee->definition)
1507     e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1508   else if (callee->local.redefined_extern_inline)
1509     e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1510   else
1511     e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1512   gcc_checking_assert (!e->call_stmt_cannot_inline_p
1513 		       || cgraph_inline_failed_type (e->inline_failed)
1514 			    == CIF_FINAL_ERROR);
1515 }
1516 
1517 /* Callback of walk_aliased_vdefs.  Flags that it has been invoked to the
1518    boolean variable pointed to by DATA.  */
1519 
1520 static bool
1521 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1522 	       void *data)
1523 {
1524   bool *b = (bool *) data;
1525   *b = true;
1526   return true;
1527 }
1528 
1529 /* If OP refers to value of function parameter, return the corresponding
1530    parameter.  If non-NULL, the size of the memory load (or the SSA_NAME of the
1531    PARM_DECL) will be stored to *SIZE_P in that case too.  */
1532 
1533 static tree
1534 unmodified_parm_1 (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
1535 {
1536   /* SSA_NAME referring to parm default def?  */
1537   if (TREE_CODE (op) == SSA_NAME
1538       && SSA_NAME_IS_DEFAULT_DEF (op)
1539       && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1540     {
1541       if (size_p)
1542 	*size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1543       return SSA_NAME_VAR (op);
1544     }
1545   /* Non-SSA parm reference?  */
1546   if (TREE_CODE (op) == PARM_DECL)
1547     {
1548       bool modified = false;
1549 
1550       ao_ref refd;
1551       ao_ref_init (&refd, op);
1552       walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1553 			  NULL);
1554       if (!modified)
1555 	{
1556 	  if (size_p)
1557 	    *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1558 	  return op;
1559 	}
1560     }
1561   return NULL_TREE;
1562 }
1563 
1564 /* If OP refers to value of function parameter, return the corresponding
1565    parameter.  Also traverse chains of SSA register assignments.  If non-NULL,
1566    the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
1567    stored to *SIZE_P in that case too.  */
1568 
1569 static tree
1570 unmodified_parm (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
1571 {
1572   tree res = unmodified_parm_1 (stmt, op, size_p);
1573   if (res)
1574     return res;
1575 
1576   if (TREE_CODE (op) == SSA_NAME
1577       && !SSA_NAME_IS_DEFAULT_DEF (op)
1578       && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1579     return unmodified_parm (SSA_NAME_DEF_STMT (op),
1580 			    gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)),
1581 			    size_p);
1582   return NULL_TREE;
1583 }
1584 
1585 /* If OP refers to a value of a function parameter or value loaded from an
1586    aggregate passed to a parameter (either by value or reference), return TRUE
1587    and store the number of the parameter to *INDEX_P, the access size into
1588    *SIZE_P, and information whether and how it has been loaded from an
1589    aggregate into *AGGPOS.  INFO describes the function parameters, STMT is the
1590    statement in which OP is used or loaded.  */
1591 
1592 static bool
1593 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
1594 				  gimple *stmt, tree op, int *index_p,
1595 				  HOST_WIDE_INT *size_p,
1596 				  struct agg_position_info *aggpos)
1597 {
1598   tree res = unmodified_parm_1 (stmt, op, size_p);
1599 
1600   gcc_checking_assert (aggpos);
1601   if (res)
1602     {
1603       *index_p = ipa_get_param_decl_index (fbi->info, res);
1604       if (*index_p < 0)
1605 	return false;
1606       aggpos->agg_contents = false;
1607       aggpos->by_ref = false;
1608       return true;
1609     }
1610 
1611   if (TREE_CODE (op) == SSA_NAME)
1612     {
1613       if (SSA_NAME_IS_DEFAULT_DEF (op)
1614 	  || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1615 	return false;
1616       stmt = SSA_NAME_DEF_STMT (op);
1617       op = gimple_assign_rhs1 (stmt);
1618       if (!REFERENCE_CLASS_P (op))
1619 	return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p,
1620 						 aggpos);
1621     }
1622 
1623   aggpos->agg_contents = true;
1624   return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1625 				 stmt, op, index_p, &aggpos->offset,
1626 				 size_p, &aggpos->by_ref);
1627 }
1628 
1629 /* See if statement might disappear after inlining.
1630    0 - means not eliminated
1631    1 - half of statements goes away
1632    2 - for sure it is eliminated.
1633    We are not terribly sophisticated, basically looking for simple abstraction
1634    penalty wrappers.  */
1635 
1636 static int
1637 eliminated_by_inlining_prob (gimple *stmt)
1638 {
1639   enum gimple_code code = gimple_code (stmt);
1640   enum tree_code rhs_code;
1641 
1642   if (!optimize)
1643     return 0;
1644 
1645   switch (code)
1646     {
1647     case GIMPLE_RETURN:
1648       return 2;
1649     case GIMPLE_ASSIGN:
1650       if (gimple_num_ops (stmt) != 2)
1651 	return 0;
1652 
1653       rhs_code = gimple_assign_rhs_code (stmt);
1654 
1655       /* Casts of parameters, loads from parameters passed by reference
1656          and stores to return value or parameters are often free after
1657          inlining dua to SRA and further combining.
1658          Assume that half of statements goes away.  */
1659       if (CONVERT_EXPR_CODE_P (rhs_code)
1660 	  || rhs_code == VIEW_CONVERT_EXPR
1661 	  || rhs_code == ADDR_EXPR
1662 	  || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1663 	{
1664 	  tree rhs = gimple_assign_rhs1 (stmt);
1665 	  tree lhs = gimple_assign_lhs (stmt);
1666 	  tree inner_rhs = get_base_address (rhs);
1667 	  tree inner_lhs = get_base_address (lhs);
1668 	  bool rhs_free = false;
1669 	  bool lhs_free = false;
1670 
1671 	  if (!inner_rhs)
1672 	    inner_rhs = rhs;
1673 	  if (!inner_lhs)
1674 	    inner_lhs = lhs;
1675 
1676 	  /* Reads of parameter are expected to be free.  */
1677 	  if (unmodified_parm (stmt, inner_rhs, NULL))
1678 	    rhs_free = true;
1679 	  /* Match expressions of form &this->field. Those will most likely
1680 	     combine with something upstream after inlining.  */
1681 	  else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1682 	    {
1683 	      tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1684 	      if (TREE_CODE (op) == PARM_DECL)
1685 		rhs_free = true;
1686 	      else if (TREE_CODE (op) == MEM_REF
1687 		       && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL))
1688 		rhs_free = true;
1689 	    }
1690 
1691 	  /* When parameter is not SSA register because its address is taken
1692 	     and it is just copied into one, the statement will be completely
1693 	     free after inlining (we will copy propagate backward).   */
1694 	  if (rhs_free && is_gimple_reg (lhs))
1695 	    return 2;
1696 
1697 	  /* Reads of parameters passed by reference
1698 	     expected to be free (i.e. optimized out after inlining).  */
1699 	  if (TREE_CODE (inner_rhs) == MEM_REF
1700 	      && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL))
1701 	    rhs_free = true;
1702 
1703 	  /* Copying parameter passed by reference into gimple register is
1704 	     probably also going to copy propagate, but we can't be quite
1705 	     sure.  */
1706 	  if (rhs_free && is_gimple_reg (lhs))
1707 	    lhs_free = true;
1708 
1709 	  /* Writes to parameters, parameters passed by value and return value
1710 	     (either dirrectly or passed via invisible reference) are free.
1711 
1712 	     TODO: We ought to handle testcase like
1713 	     struct a {int a,b;};
1714 	     struct a
1715 	     retrurnsturct (void)
1716 	     {
1717 	     struct a a ={1,2};
1718 	     return a;
1719 	     }
1720 
1721 	     This translate into:
1722 
1723 	     retrurnsturct ()
1724 	     {
1725 	     int a$b;
1726 	     int a$a;
1727 	     struct a a;
1728 	     struct a D.2739;
1729 
1730 	     <bb 2>:
1731 	     D.2739.a = 1;
1732 	     D.2739.b = 2;
1733 	     return D.2739;
1734 
1735 	     }
1736 	     For that we either need to copy ipa-split logic detecting writes
1737 	     to return value.  */
1738 	  if (TREE_CODE (inner_lhs) == PARM_DECL
1739 	      || TREE_CODE (inner_lhs) == RESULT_DECL
1740 	      || (TREE_CODE (inner_lhs) == MEM_REF
1741 		  && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL)
1742 		      || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1743 			  && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1744 			  && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1745 						      (inner_lhs,
1746 						       0))) == RESULT_DECL))))
1747 	    lhs_free = true;
1748 	  if (lhs_free
1749 	      && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1750 	    rhs_free = true;
1751 	  if (lhs_free && rhs_free)
1752 	    return 1;
1753 	}
1754       return 0;
1755     default:
1756       return 0;
1757     }
1758 }
1759 
1760 
1761 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1762    predicates to the CFG edges.   */
1763 
1764 static void
1765 set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1766 				   struct inline_summary *summary,
1767 				   basic_block bb)
1768 {
1769   gimple *last;
1770   tree op;
1771   int index;
1772   HOST_WIDE_INT size;
1773   struct agg_position_info aggpos;
1774   enum tree_code code, inverted_code;
1775   edge e;
1776   edge_iterator ei;
1777   gimple *set_stmt;
1778   tree op2;
1779 
1780   last = last_stmt (bb);
1781   if (!last || gimple_code (last) != GIMPLE_COND)
1782     return;
1783   if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1784     return;
1785   op = gimple_cond_lhs (last);
1786   /* TODO: handle conditionals like
1787      var = op0 < 4;
1788      if (var != 0).  */
1789   if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1790     {
1791       code = gimple_cond_code (last);
1792       inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1793 
1794       FOR_EACH_EDGE (e, ei, bb->succs)
1795 	{
1796 	  enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1797 				      ? code : inverted_code);
1798 	  /* invert_tree_comparison will return ERROR_MARK on FP
1799 	     comparsions that are not EQ/NE instead of returning proper
1800 	     unordered one.  Be sure it is not confused with NON_CONSTANT.  */
1801 	  if (this_code != ERROR_MARK)
1802 	    {
1803 	      struct predicate p
1804 		= add_condition (summary, index, size, &aggpos, this_code,
1805 				 unshare_expr_without_location
1806 				 (gimple_cond_rhs (last)));
1807 	      e->aux = edge_predicate_pool.allocate ();
1808 	      *(struct predicate *) e->aux = p;
1809 	    }
1810 	}
1811     }
1812 
1813   if (TREE_CODE (op) != SSA_NAME)
1814     return;
1815   /* Special case
1816      if (builtin_constant_p (op))
1817      constant_code
1818      else
1819      nonconstant_code.
1820      Here we can predicate nonconstant_code.  We can't
1821      really handle constant_code since we have no predicate
1822      for this and also the constant code is not known to be
1823      optimized away when inliner doen't see operand is constant.
1824      Other optimizers might think otherwise.  */
1825   if (gimple_cond_code (last) != NE_EXPR
1826       || !integer_zerop (gimple_cond_rhs (last)))
1827     return;
1828   set_stmt = SSA_NAME_DEF_STMT (op);
1829   if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1830       || gimple_call_num_args (set_stmt) != 1)
1831     return;
1832   op2 = gimple_call_arg (set_stmt, 0);
1833   if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size,
1834 					 &aggpos))
1835     return;
1836   FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1837     {
1838       struct predicate p = add_condition (summary, index, size, &aggpos,
1839 					  IS_NOT_CONSTANT, NULL_TREE);
1840       e->aux = edge_predicate_pool.allocate ();
1841       *(struct predicate *) e->aux = p;
1842     }
1843 }
1844 
1845 
1846 /* If BB ends by a switch we can turn into predicates, attach corresponding
1847    predicates to the CFG edges.   */
1848 
1849 static void
1850 set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1851 				     struct inline_summary *summary,
1852 				     basic_block bb)
1853 {
1854   gimple *lastg;
1855   tree op;
1856   int index;
1857   HOST_WIDE_INT size;
1858   struct agg_position_info aggpos;
1859   edge e;
1860   edge_iterator ei;
1861   size_t n;
1862   size_t case_idx;
1863 
1864   lastg = last_stmt (bb);
1865   if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1866     return;
1867   gswitch *last = as_a <gswitch *> (lastg);
1868   op = gimple_switch_index (last);
1869   if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1870     return;
1871 
1872   FOR_EACH_EDGE (e, ei, bb->succs)
1873     {
1874       e->aux = edge_predicate_pool.allocate ();
1875       *(struct predicate *) e->aux = false_predicate ();
1876     }
1877   n = gimple_switch_num_labels (last);
1878   for (case_idx = 0; case_idx < n; ++case_idx)
1879     {
1880       tree cl = gimple_switch_label (last, case_idx);
1881       tree min, max;
1882       struct predicate p;
1883 
1884       e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1885       min = CASE_LOW (cl);
1886       max = CASE_HIGH (cl);
1887 
1888       /* For default we might want to construct predicate that none
1889          of cases is met, but it is bit hard to do not having negations
1890          of conditionals handy.  */
1891       if (!min && !max)
1892 	p = true_predicate ();
1893       else if (!max)
1894 	p = add_condition (summary, index, size, &aggpos, EQ_EXPR,
1895 			   unshare_expr_without_location (min));
1896       else
1897 	{
1898 	  struct predicate p1, p2;
1899 	  p1 = add_condition (summary, index, size, &aggpos, GE_EXPR,
1900 			      unshare_expr_without_location (min));
1901 	  p2 = add_condition (summary, index, size, &aggpos, LE_EXPR,
1902 			      unshare_expr_without_location (max));
1903 	  p = and_predicates (summary->conds, &p1, &p2);
1904 	}
1905       *(struct predicate *) e->aux
1906 	= or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1907     }
1908 }
1909 
1910 
1911 /* For each BB in NODE attach to its AUX pointer predicate under
1912    which it is executable.  */
1913 
1914 static void
1915 compute_bb_predicates (struct ipa_func_body_info *fbi,
1916 		       struct cgraph_node *node,
1917 		       struct inline_summary *summary)
1918 {
1919   struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1920   bool done = false;
1921   basic_block bb;
1922 
1923   FOR_EACH_BB_FN (bb, my_function)
1924     {
1925       set_cond_stmt_execution_predicate (fbi, summary, bb);
1926       set_switch_stmt_execution_predicate (fbi, summary, bb);
1927     }
1928 
1929   /* Entry block is always executable.  */
1930   ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1931     = edge_predicate_pool.allocate ();
1932   *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1933     = true_predicate ();
1934 
1935   /* A simple dataflow propagation of predicates forward in the CFG.
1936      TODO: work in reverse postorder.  */
1937   while (!done)
1938     {
1939       done = true;
1940       FOR_EACH_BB_FN (bb, my_function)
1941 	{
1942 	  struct predicate p = false_predicate ();
1943 	  edge e;
1944 	  edge_iterator ei;
1945 	  FOR_EACH_EDGE (e, ei, bb->preds)
1946 	    {
1947 	      if (e->src->aux)
1948 		{
1949 		  struct predicate this_bb_predicate
1950 		    = *(struct predicate *) e->src->aux;
1951 		  if (e->aux)
1952 		    this_bb_predicate
1953 		      = and_predicates (summary->conds, &this_bb_predicate,
1954 					(struct predicate *) e->aux);
1955 		  p = or_predicates (summary->conds, &p, &this_bb_predicate);
1956 		  if (true_predicate_p (&p))
1957 		    break;
1958 		}
1959 	    }
1960 	  if (false_predicate_p (&p))
1961 	    gcc_assert (!bb->aux);
1962 	  else
1963 	    {
1964 	      if (!bb->aux)
1965 		{
1966 		  done = false;
1967 		  bb->aux = edge_predicate_pool.allocate ();
1968 		  *((struct predicate *) bb->aux) = p;
1969 		}
1970 	      else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1971 		{
1972 		  /* This OR operation is needed to ensure monotonous data flow
1973 		     in the case we hit the limit on number of clauses and the
1974 		     and/or operations above give approximate answers.  */
1975 		  p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1976 	          if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1977 		    {
1978 		      done = false;
1979 		      *((struct predicate *) bb->aux) = p;
1980 		    }
1981 		}
1982 	    }
1983 	}
1984     }
1985 }
1986 
1987 
1988 /* We keep info about constantness of SSA names.  */
1989 
1990 typedef struct predicate predicate_t;
1991 /* Return predicate specifying when the STMT might have result that is not
1992    a compile time constant.  */
1993 
1994 static struct predicate
1995 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1996 				    struct inline_summary *summary,
1997 				    tree expr,
1998 				    vec<predicate_t> nonconstant_names)
1999 {
2000   tree parm;
2001   int index;
2002   HOST_WIDE_INT size;
2003 
2004   while (UNARY_CLASS_P (expr))
2005     expr = TREE_OPERAND (expr, 0);
2006 
2007   parm = unmodified_parm (NULL, expr, &size);
2008   if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
2009     return add_condition (summary, index, size, NULL, CHANGED, NULL_TREE);
2010   if (is_gimple_min_invariant (expr))
2011     return false_predicate ();
2012   if (TREE_CODE (expr) == SSA_NAME)
2013     return nonconstant_names[SSA_NAME_VERSION (expr)];
2014   if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
2015     {
2016       struct predicate p1 = will_be_nonconstant_expr_predicate
2017 	(info, summary, TREE_OPERAND (expr, 0),
2018 	 nonconstant_names);
2019       struct predicate p2;
2020       if (true_predicate_p (&p1))
2021 	return p1;
2022       p2 = will_be_nonconstant_expr_predicate (info, summary,
2023 					       TREE_OPERAND (expr, 1),
2024 					       nonconstant_names);
2025       return or_predicates (summary->conds, &p1, &p2);
2026     }
2027   else if (TREE_CODE (expr) == COND_EXPR)
2028     {
2029       struct predicate p1 = will_be_nonconstant_expr_predicate
2030 	(info, summary, TREE_OPERAND (expr, 0),
2031 	 nonconstant_names);
2032       struct predicate p2;
2033       if (true_predicate_p (&p1))
2034 	return p1;
2035       p2 = will_be_nonconstant_expr_predicate (info, summary,
2036 					       TREE_OPERAND (expr, 1),
2037 					       nonconstant_names);
2038       if (true_predicate_p (&p2))
2039 	return p2;
2040       p1 = or_predicates (summary->conds, &p1, &p2);
2041       p2 = will_be_nonconstant_expr_predicate (info, summary,
2042 					       TREE_OPERAND (expr, 2),
2043 					       nonconstant_names);
2044       return or_predicates (summary->conds, &p1, &p2);
2045     }
2046   else
2047     {
2048       debug_tree (expr);
2049       gcc_unreachable ();
2050     }
2051   return false_predicate ();
2052 }
2053 
2054 
2055 /* Return predicate specifying when the STMT might have result that is not
2056    a compile time constant.  */
2057 
2058 static struct predicate
2059 will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
2060 			       struct inline_summary *summary,
2061 			       gimple *stmt,
2062 			       vec<predicate_t> nonconstant_names)
2063 {
2064   struct predicate p = true_predicate ();
2065   ssa_op_iter iter;
2066   tree use;
2067   struct predicate op_non_const;
2068   bool is_load;
2069   int base_index;
2070   HOST_WIDE_INT size;
2071   struct agg_position_info aggpos;
2072 
2073   /* What statments might be optimized away
2074      when their arguments are constant.  */
2075   if (gimple_code (stmt) != GIMPLE_ASSIGN
2076       && gimple_code (stmt) != GIMPLE_COND
2077       && gimple_code (stmt) != GIMPLE_SWITCH
2078       && (gimple_code (stmt) != GIMPLE_CALL
2079 	  || !(gimple_call_flags (stmt) & ECF_CONST)))
2080     return p;
2081 
2082   /* Stores will stay anyway.  */
2083   if (gimple_store_p (stmt))
2084     return p;
2085 
2086   is_load = gimple_assign_load_p (stmt);
2087 
2088   /* Loads can be optimized when the value is known.  */
2089   if (is_load)
2090     {
2091       tree op;
2092       gcc_assert (gimple_assign_single_p (stmt));
2093       op = gimple_assign_rhs1 (stmt);
2094       if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size,
2095 					     &aggpos))
2096 	return p;
2097     }
2098   else
2099     base_index = -1;
2100 
2101   /* See if we understand all operands before we start
2102      adding conditionals.  */
2103   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2104     {
2105       tree parm = unmodified_parm (stmt, use, NULL);
2106       /* For arguments we can build a condition.  */
2107       if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
2108 	continue;
2109       if (TREE_CODE (use) != SSA_NAME)
2110 	return p;
2111       /* If we know when operand is constant,
2112 	 we still can say something useful.  */
2113       if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2114 	continue;
2115       return p;
2116     }
2117 
2118   if (is_load)
2119     op_non_const =
2120       add_condition (summary, base_index, size, &aggpos, CHANGED, NULL);
2121   else
2122     op_non_const = false_predicate ();
2123   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2124     {
2125       HOST_WIDE_INT size;
2126       tree parm = unmodified_parm (stmt, use, &size);
2127       int index;
2128 
2129       if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
2130 	{
2131 	  if (index != base_index)
2132 	    p = add_condition (summary, index, size, NULL, CHANGED, NULL_TREE);
2133 	  else
2134 	    continue;
2135 	}
2136       else
2137 	p = nonconstant_names[SSA_NAME_VERSION (use)];
2138       op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2139     }
2140   if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2141       && gimple_op (stmt, 0)
2142       && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2143     nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2144       = op_non_const;
2145   return op_non_const;
2146 }
2147 
2148 struct record_modified_bb_info
2149 {
2150   bitmap bb_set;
2151   gimple *stmt;
2152 };
2153 
2154 /* Value is initialized in INIT_BB and used in USE_BB.  We want to copute
2155    probability how often it changes between USE_BB.
2156    INIT_BB->frequency/USE_BB->frequency is an estimate, but if INIT_BB
2157    is in different loop nest, we can do better.
2158    This is all just estimate.  In theory we look for minimal cut separating
2159    INIT_BB and USE_BB, but we only want to anticipate loop invariant motion
2160    anyway.  */
2161 
2162 static basic_block
2163 get_minimal_bb (basic_block init_bb, basic_block use_bb)
2164 {
2165   struct loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
2166   if (l && l->header->frequency < init_bb->frequency)
2167     return l->header;
2168   return init_bb;
2169 }
2170 
2171 /* Callback of walk_aliased_vdefs.  Records basic blocks where the value may be
2172    set except for info->stmt.  */
2173 
2174 static bool
2175 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2176 {
2177   struct record_modified_bb_info *info =
2178     (struct record_modified_bb_info *) data;
2179   if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2180     return false;
2181   bitmap_set_bit (info->bb_set,
2182 		  SSA_NAME_IS_DEFAULT_DEF (vdef)
2183 		  ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2184 		  : get_minimal_bb
2185 			 (gimple_bb (SSA_NAME_DEF_STMT (vdef)),
2186 			  gimple_bb (info->stmt))->index);
2187   return false;
2188 }
2189 
2190 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2191    will change since last invocation of STMT.
2192 
2193    Value 0 is reserved for compile time invariants.
2194    For common parameters it is REG_BR_PROB_BASE.  For loop invariants it
2195    ought to be REG_BR_PROB_BASE / estimated_iters.  */
2196 
2197 static int
2198 param_change_prob (gimple *stmt, int i)
2199 {
2200   tree op = gimple_call_arg (stmt, i);
2201   basic_block bb = gimple_bb (stmt);
2202 
2203   if (TREE_CODE (op) == WITH_SIZE_EXPR)
2204     op = TREE_OPERAND (op, 0);
2205 
2206   tree base = get_base_address (op);
2207 
2208   /* Global invariants never change.  */
2209   if (is_gimple_min_invariant (base))
2210     return 0;
2211 
2212   /* We would have to do non-trivial analysis to really work out what
2213      is the probability of value to change (i.e. when init statement
2214      is in a sibling loop of the call).
2215 
2216      We do an conservative estimate: when call is executed N times more often
2217      than the statement defining value, we take the frequency 1/N.  */
2218   if (TREE_CODE (base) == SSA_NAME)
2219     {
2220       int init_freq;
2221 
2222       if (!bb->frequency)
2223 	return REG_BR_PROB_BASE;
2224 
2225       if (SSA_NAME_IS_DEFAULT_DEF (base))
2226 	init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2227       else
2228 	init_freq = get_minimal_bb
2229 		      (gimple_bb (SSA_NAME_DEF_STMT (base)),
2230 		       gimple_bb (stmt))->frequency;
2231 
2232       if (!init_freq)
2233 	init_freq = 1;
2234       if (init_freq < bb->frequency)
2235 	return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2236       else
2237 	return REG_BR_PROB_BASE;
2238     }
2239   else
2240     {
2241       ao_ref refd;
2242       int max;
2243       struct record_modified_bb_info info;
2244       bitmap_iterator bi;
2245       unsigned index;
2246       tree init = ctor_for_folding (base);
2247 
2248       if (init != error_mark_node)
2249 	return 0;
2250       if (!bb->frequency)
2251 	return REG_BR_PROB_BASE;
2252       ao_ref_init (&refd, op);
2253       info.stmt = stmt;
2254       info.bb_set = BITMAP_ALLOC (NULL);
2255       walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2256 			  NULL);
2257       if (bitmap_bit_p (info.bb_set, bb->index))
2258 	{
2259 	  BITMAP_FREE (info.bb_set);
2260 	  return REG_BR_PROB_BASE;
2261 	}
2262 
2263       /* Assume that every memory is initialized at entry.
2264          TODO: Can we easilly determine if value is always defined
2265          and thus we may skip entry block?  */
2266       if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2267 	max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2268       else
2269 	max = 1;
2270 
2271       EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2272 	max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2273 
2274       BITMAP_FREE (info.bb_set);
2275       if (max < bb->frequency)
2276 	return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2277       else
2278 	return REG_BR_PROB_BASE;
2279     }
2280 }
2281 
2282 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2283    sub-graph and if the predicate the condition depends on is known.  If so,
2284    return true and store the pointer the predicate in *P.  */
2285 
2286 static bool
2287 phi_result_unknown_predicate (struct ipa_node_params *info,
2288 			      inline_summary *summary, basic_block bb,
2289 			      struct predicate *p,
2290 			      vec<predicate_t> nonconstant_names)
2291 {
2292   edge e;
2293   edge_iterator ei;
2294   basic_block first_bb = NULL;
2295   gimple *stmt;
2296 
2297   if (single_pred_p (bb))
2298     {
2299       *p = false_predicate ();
2300       return true;
2301     }
2302 
2303   FOR_EACH_EDGE (e, ei, bb->preds)
2304     {
2305       if (single_succ_p (e->src))
2306 	{
2307 	  if (!single_pred_p (e->src))
2308 	    return false;
2309 	  if (!first_bb)
2310 	    first_bb = single_pred (e->src);
2311 	  else if (single_pred (e->src) != first_bb)
2312 	    return false;
2313 	}
2314       else
2315 	{
2316 	  if (!first_bb)
2317 	    first_bb = e->src;
2318 	  else if (e->src != first_bb)
2319 	    return false;
2320 	}
2321     }
2322 
2323   if (!first_bb)
2324     return false;
2325 
2326   stmt = last_stmt (first_bb);
2327   if (!stmt
2328       || gimple_code (stmt) != GIMPLE_COND
2329       || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2330     return false;
2331 
2332   *p = will_be_nonconstant_expr_predicate (info, summary,
2333 					   gimple_cond_lhs (stmt),
2334 					   nonconstant_names);
2335   if (true_predicate_p (p))
2336     return false;
2337   else
2338     return true;
2339 }
2340 
2341 /* Given a PHI statement in a function described by inline properties SUMMARY
2342    and *P being the predicate describing whether the selected PHI argument is
2343    known, store a predicate for the result of the PHI statement into
2344    NONCONSTANT_NAMES, if possible.  */
2345 
2346 static void
2347 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2348 			  struct predicate *p,
2349 			  vec<predicate_t> nonconstant_names)
2350 {
2351   unsigned i;
2352 
2353   for (i = 0; i < gimple_phi_num_args (phi); i++)
2354     {
2355       tree arg = gimple_phi_arg (phi, i)->def;
2356       if (!is_gimple_min_invariant (arg))
2357 	{
2358 	  gcc_assert (TREE_CODE (arg) == SSA_NAME);
2359 	  *p = or_predicates (summary->conds, p,
2360 			      &nonconstant_names[SSA_NAME_VERSION (arg)]);
2361 	  if (true_predicate_p (p))
2362 	    return;
2363 	}
2364     }
2365 
2366   if (dump_file && (dump_flags & TDF_DETAILS))
2367     {
2368       fprintf (dump_file, "\t\tphi predicate: ");
2369       dump_predicate (dump_file, summary->conds, p);
2370     }
2371   nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2372 }
2373 
2374 /* Return predicate specifying when array index in access OP becomes non-constant.  */
2375 
2376 static struct predicate
2377 array_index_predicate (inline_summary *info,
2378 		       vec< predicate_t> nonconstant_names, tree op)
2379 {
2380   struct predicate p = false_predicate ();
2381   while (handled_component_p (op))
2382     {
2383       if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2384 	{
2385 	  if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2386 	    p = or_predicates (info->conds, &p,
2387 			       &nonconstant_names[SSA_NAME_VERSION
2388 						  (TREE_OPERAND (op, 1))]);
2389 	}
2390       op = TREE_OPERAND (op, 0);
2391     }
2392   return p;
2393 }
2394 
2395 /* For a typical usage of __builtin_expect (a<b, 1), we
2396    may introduce an extra relation stmt:
2397    With the builtin, we have
2398      t1 = a <= b;
2399      t2 = (long int) t1;
2400      t3 = __builtin_expect (t2, 1);
2401      if (t3 != 0)
2402        goto ...
2403    Without the builtin, we have
2404      if (a<=b)
2405        goto...
2406    This affects the size/time estimation and may have
2407    an impact on the earlier inlining.
2408    Here find this pattern and fix it up later.  */
2409 
2410 static gimple *
2411 find_foldable_builtin_expect (basic_block bb)
2412 {
2413   gimple_stmt_iterator bsi;
2414 
2415   for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2416     {
2417       gimple *stmt = gsi_stmt (bsi);
2418       if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2419 	  || gimple_call_internal_p (stmt, IFN_BUILTIN_EXPECT))
2420         {
2421           tree var = gimple_call_lhs (stmt);
2422           tree arg = gimple_call_arg (stmt, 0);
2423           use_operand_p use_p;
2424 	  gimple *use_stmt;
2425           bool match = false;
2426           bool done = false;
2427 
2428           if (!var || !arg)
2429             continue;
2430           gcc_assert (TREE_CODE (var) == SSA_NAME);
2431 
2432           while (TREE_CODE (arg) == SSA_NAME)
2433             {
2434 	      gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
2435               if (!is_gimple_assign (stmt_tmp))
2436                 break;
2437               switch (gimple_assign_rhs_code (stmt_tmp))
2438                 {
2439                   case LT_EXPR:
2440                   case LE_EXPR:
2441                   case GT_EXPR:
2442                   case GE_EXPR:
2443                   case EQ_EXPR:
2444                   case NE_EXPR:
2445                     match = true;
2446                     done = true;
2447                     break;
2448                   CASE_CONVERT:
2449                     break;
2450                   default:
2451                     done = true;
2452                     break;
2453                 }
2454               if (done)
2455                 break;
2456               arg = gimple_assign_rhs1 (stmt_tmp);
2457             }
2458 
2459           if (match && single_imm_use (var, &use_p, &use_stmt)
2460               && gimple_code (use_stmt) == GIMPLE_COND)
2461             return use_stmt;
2462         }
2463     }
2464   return NULL;
2465 }
2466 
2467 /* Return true when the basic blocks contains only clobbers followed by RESX.
2468    Such BBs are kept around to make removal of dead stores possible with
2469    presence of EH and will be optimized out by optimize_clobbers later in the
2470    game.
2471 
2472    NEED_EH is used to recurse in case the clobber has non-EH predecestors
2473    that can be clobber only, too.. When it is false, the RESX is not necessary
2474    on the end of basic block.  */
2475 
2476 static bool
2477 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2478 {
2479   gimple_stmt_iterator gsi = gsi_last_bb (bb);
2480   edge_iterator ei;
2481   edge e;
2482 
2483   if (need_eh)
2484     {
2485       if (gsi_end_p (gsi))
2486 	return false;
2487       if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2488         return false;
2489       gsi_prev (&gsi);
2490     }
2491   else if (!single_succ_p (bb))
2492     return false;
2493 
2494   for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2495     {
2496       gimple *stmt = gsi_stmt (gsi);
2497       if (is_gimple_debug (stmt))
2498 	continue;
2499       if (gimple_clobber_p (stmt))
2500 	continue;
2501       if (gimple_code (stmt) == GIMPLE_LABEL)
2502 	break;
2503       return false;
2504     }
2505 
2506   /* See if all predecestors are either throws or clobber only BBs.  */
2507   FOR_EACH_EDGE (e, ei, bb->preds)
2508     if (!(e->flags & EDGE_EH)
2509 	&& !clobber_only_eh_bb_p (e->src, false))
2510       return false;
2511 
2512   return true;
2513 }
2514 
2515 /* Return true if STMT compute a floating point expression that may be affected
2516    by -ffast-math and similar flags.  */
2517 
2518 static bool
2519 fp_expression_p (gimple *stmt)
2520 {
2521   ssa_op_iter i;
2522   tree op;
2523 
2524   FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE)
2525     if (FLOAT_TYPE_P (TREE_TYPE (op)))
2526       return true;
2527   return false;
2528 }
2529 
2530 /* Compute function body size parameters for NODE.
2531    When EARLY is true, we compute only simple summaries without
2532    non-trivial predicates to drive the early inliner.  */
2533 
2534 static void
2535 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2536 {
2537   gcov_type time = 0;
2538   /* Estimate static overhead for function prologue/epilogue and alignment. */
2539   int size = 2;
2540   /* Benefits are scaled by probability of elimination that is in range
2541      <0,2>.  */
2542   basic_block bb;
2543   struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2544   int freq;
2545   struct inline_summary *info = inline_summaries->get (node);
2546   struct predicate bb_predicate;
2547   struct ipa_func_body_info fbi;
2548   vec<predicate_t> nonconstant_names = vNULL;
2549   int nblocks, n;
2550   int *order;
2551   predicate array_index = true_predicate ();
2552   gimple *fix_builtin_expect_stmt;
2553 
2554   gcc_assert (my_function && my_function->cfg);
2555   gcc_assert (cfun == my_function);
2556 
2557   memset(&fbi, 0, sizeof(fbi));
2558   info->conds = NULL;
2559   info->entry = NULL;
2560 
2561   /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2562      so we can produce proper inline hints.
2563 
2564      When optimizing and analyzing for early inliner, initialize node params
2565      so we can produce correct BB predicates.  */
2566 
2567   if (opt_for_fn (node->decl, optimize))
2568     {
2569       calculate_dominance_info (CDI_DOMINATORS);
2570       if (!early)
2571         loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2572       else
2573 	{
2574 	  ipa_check_create_node_params ();
2575 	  ipa_initialize_node_params (node);
2576 	}
2577 
2578       if (ipa_node_params_sum)
2579 	{
2580 	  fbi.node = node;
2581 	  fbi.info = IPA_NODE_REF (node);
2582 	  fbi.bb_infos = vNULL;
2583 	  fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2584 	  fbi.param_count = count_formal_params(node->decl);
2585 	  nonconstant_names.safe_grow_cleared
2586 	    (SSANAMES (my_function)->length ());
2587 	}
2588     }
2589 
2590   if (dump_file)
2591     fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2592 	     node->name ());
2593 
2594   /* When we run into maximal number of entries, we assign everything to the
2595      constant truth case.  Be sure to have it in list. */
2596   bb_predicate = true_predicate ();
2597   account_size_time (info, 0, 0, &bb_predicate);
2598 
2599   bb_predicate = not_inlined_predicate ();
2600   account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2601 
2602   if (fbi.info)
2603     compute_bb_predicates (&fbi, node, info);
2604   order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2605   nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2606   for (n = 0; n < nblocks; n++)
2607     {
2608       bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2609       freq = compute_call_stmt_bb_frequency (node->decl, bb);
2610       if (clobber_only_eh_bb_p (bb))
2611 	{
2612 	  if (dump_file && (dump_flags & TDF_DETAILS))
2613 	    fprintf (dump_file, "\n Ignoring BB %i;"
2614 		     " it will be optimized away by cleanup_clobbers\n",
2615 		     bb->index);
2616 	  continue;
2617 	}
2618 
2619       /* TODO: Obviously predicates can be propagated down across CFG.  */
2620       if (fbi.info)
2621 	{
2622 	  if (bb->aux)
2623 	    bb_predicate = *(struct predicate *) bb->aux;
2624 	  else
2625 	    bb_predicate = false_predicate ();
2626 	}
2627       else
2628 	bb_predicate = true_predicate ();
2629 
2630       if (dump_file && (dump_flags & TDF_DETAILS))
2631 	{
2632 	  fprintf (dump_file, "\n BB %i predicate:", bb->index);
2633 	  dump_predicate (dump_file, info->conds, &bb_predicate);
2634 	}
2635 
2636       if (fbi.info && nonconstant_names.exists ())
2637 	{
2638 	  struct predicate phi_predicate;
2639 	  bool first_phi = true;
2640 
2641 	  for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2642 	       gsi_next (&bsi))
2643 	    {
2644 	      if (first_phi
2645 		  && !phi_result_unknown_predicate (fbi.info, info, bb,
2646 						    &phi_predicate,
2647 						    nonconstant_names))
2648 		break;
2649 	      first_phi = false;
2650 	      if (dump_file && (dump_flags & TDF_DETAILS))
2651 		{
2652 		  fprintf (dump_file, "  ");
2653 		  print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2654 		}
2655 	      predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2656 					nonconstant_names);
2657 	    }
2658 	}
2659 
2660       fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2661 
2662       for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2663 	   gsi_next (&bsi))
2664 	{
2665 	  gimple *stmt = gsi_stmt (bsi);
2666 	  int this_size = estimate_num_insns (stmt, &eni_size_weights);
2667 	  int this_time = estimate_num_insns (stmt, &eni_time_weights);
2668 	  int prob;
2669 	  struct predicate will_be_nonconstant;
2670 
2671           /* This relation stmt should be folded after we remove
2672              buildin_expect call. Adjust the cost here.  */
2673 	  if (stmt == fix_builtin_expect_stmt)
2674             {
2675               this_size--;
2676               this_time--;
2677             }
2678 
2679 	  if (dump_file && (dump_flags & TDF_DETAILS))
2680 	    {
2681 	      fprintf (dump_file, "  ");
2682 	      print_gimple_stmt (dump_file, stmt, 0, 0);
2683 	      fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2684 		       ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2685 		       this_time);
2686 	    }
2687 
2688 	  if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2689 	    {
2690 	      struct predicate this_array_index;
2691 	      this_array_index =
2692 		array_index_predicate (info, nonconstant_names,
2693 				       gimple_assign_rhs1 (stmt));
2694 	      if (!false_predicate_p (&this_array_index))
2695 		array_index =
2696 		  and_predicates (info->conds, &array_index,
2697 				  &this_array_index);
2698 	    }
2699 	  if (gimple_store_p (stmt) && nonconstant_names.exists ())
2700 	    {
2701 	      struct predicate this_array_index;
2702 	      this_array_index =
2703 		array_index_predicate (info, nonconstant_names,
2704 				       gimple_get_lhs (stmt));
2705 	      if (!false_predicate_p (&this_array_index))
2706 		array_index =
2707 		  and_predicates (info->conds, &array_index,
2708 				  &this_array_index);
2709 	    }
2710 
2711 
2712 	  if (is_gimple_call (stmt)
2713 	      && !gimple_call_internal_p (stmt))
2714 	    {
2715 	      struct cgraph_edge *edge = node->get_edge (stmt);
2716 	      struct inline_edge_summary *es = inline_edge_summary (edge);
2717 
2718 	      /* Special case: results of BUILT_IN_CONSTANT_P will be always
2719 	         resolved as constant.  We however don't want to optimize
2720 	         out the cgraph edges.  */
2721 	      if (nonconstant_names.exists ()
2722 		  && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2723 		  && gimple_call_lhs (stmt)
2724 		  && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2725 		{
2726 		  struct predicate false_p = false_predicate ();
2727 		  nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2728 		    = false_p;
2729 		}
2730 	      if (ipa_node_params_sum)
2731 		{
2732 		  int count = gimple_call_num_args (stmt);
2733 		  int i;
2734 
2735 		  if (count)
2736 		    es->param.safe_grow_cleared (count);
2737 		  for (i = 0; i < count; i++)
2738 		    {
2739 		      int prob = param_change_prob (stmt, i);
2740 		      gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2741 		      es->param[i].change_prob = prob;
2742 		    }
2743 		}
2744 
2745 	      es->call_stmt_size = this_size;
2746 	      es->call_stmt_time = this_time;
2747 	      es->loop_depth = bb_loop_depth (bb);
2748 	      edge_set_predicate (edge, &bb_predicate);
2749 	    }
2750 
2751 	  /* TODO: When conditional jump or swithc is known to be constant, but
2752 	     we did not translate it into the predicates, we really can account
2753 	     just maximum of the possible paths.  */
2754 	  if (fbi.info)
2755 	    will_be_nonconstant
2756 	      = will_be_nonconstant_predicate (&fbi, info,
2757 					       stmt, nonconstant_names);
2758 	  if (this_time || this_size)
2759 	    {
2760 	      struct predicate p;
2761 
2762 	      this_time *= freq;
2763 
2764 	      prob = eliminated_by_inlining_prob (stmt);
2765 	      if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2766 		fprintf (dump_file,
2767 			 "\t\t50%% will be eliminated by inlining\n");
2768 	      if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2769 		fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2770 
2771 	      if (fbi.info)
2772 		p = and_predicates (info->conds, &bb_predicate,
2773 				    &will_be_nonconstant);
2774 	      else
2775 		p = true_predicate ();
2776 
2777 	      if (!false_predicate_p (&p)
2778 		  || (is_gimple_call (stmt)
2779 		      && !false_predicate_p (&bb_predicate)))
2780 		{
2781 		  time += this_time;
2782 		  size += this_size;
2783 		  if (time > MAX_TIME * INLINE_TIME_SCALE)
2784 		    time = MAX_TIME * INLINE_TIME_SCALE;
2785 		}
2786 
2787 	      /* We account everything but the calls.  Calls have their own
2788 	         size/time info attached to cgraph edges.  This is necessary
2789 	         in order to make the cost disappear after inlining.  */
2790 	      if (!is_gimple_call (stmt))
2791 		{
2792 		  if (prob)
2793 		    {
2794 		      struct predicate ip = not_inlined_predicate ();
2795 		      ip = and_predicates (info->conds, &ip, &p);
2796 		      account_size_time (info, this_size * prob,
2797 					 this_time * prob, &ip);
2798 		    }
2799 		  if (prob != 2)
2800 		    account_size_time (info, this_size * (2 - prob),
2801 				       this_time * (2 - prob), &p);
2802 		}
2803 
2804 	      if (!info->fp_expressions && fp_expression_p (stmt))
2805 		{
2806 		  info->fp_expressions = true;
2807 		  if (dump_file)
2808 		    fprintf (dump_file, "   fp_expression set\n");
2809 		}
2810 
2811 	      gcc_assert (time >= 0);
2812 	      gcc_assert (size >= 0);
2813 	    }
2814 	}
2815     }
2816   set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2817   time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2818   if (time > MAX_TIME)
2819     time = MAX_TIME;
2820   free (order);
2821 
2822   if (nonconstant_names.exists () && !early)
2823     {
2824       struct loop *loop;
2825       predicate loop_iterations = true_predicate ();
2826       predicate loop_stride = true_predicate ();
2827 
2828       if (dump_file && (dump_flags & TDF_DETAILS))
2829 	flow_loops_dump (dump_file, NULL, 0);
2830       scev_initialize ();
2831       FOR_EACH_LOOP (loop, 0)
2832 	{
2833 	  vec<edge> exits;
2834 	  edge ex;
2835 	  unsigned int j;
2836 	  struct tree_niter_desc niter_desc;
2837 	  bb_predicate = *(struct predicate *) loop->header->aux;
2838 
2839 	  exits = get_loop_exit_edges (loop);
2840 	  FOR_EACH_VEC_ELT (exits, j, ex)
2841 	    if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2842 		&& !is_gimple_min_invariant (niter_desc.niter))
2843 	    {
2844 	      predicate will_be_nonconstant
2845 		= will_be_nonconstant_expr_predicate (fbi.info, info,
2846 						      niter_desc.niter,
2847 						      nonconstant_names);
2848 	      if (!true_predicate_p (&will_be_nonconstant))
2849 		will_be_nonconstant = and_predicates (info->conds,
2850 						      &bb_predicate,
2851 						      &will_be_nonconstant);
2852 	      if (!true_predicate_p (&will_be_nonconstant)
2853 		  && !false_predicate_p (&will_be_nonconstant))
2854 		/* This is slightly inprecise.  We may want to represent each
2855 		   loop with independent predicate.  */
2856 		loop_iterations =
2857 		  and_predicates (info->conds, &loop_iterations,
2858 				  &will_be_nonconstant);
2859 	    }
2860 	  exits.release ();
2861 	}
2862 
2863       /* To avoid quadratic behavior we analyze stride predicates only
2864          with respect to the containing loop.  Thus we simply iterate
2865 	 over all defs in the outermost loop body.  */
2866       for (loop = loops_for_fn (cfun)->tree_root->inner;
2867 	   loop != NULL; loop = loop->next)
2868 	{
2869 	  basic_block *body = get_loop_body (loop);
2870 	  for (unsigned i = 0; i < loop->num_nodes; i++)
2871 	    {
2872 	      gimple_stmt_iterator gsi;
2873 	      bb_predicate = *(struct predicate *) body[i]->aux;
2874 	      for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2875 		   gsi_next (&gsi))
2876 		{
2877 		  gimple *stmt = gsi_stmt (gsi);
2878 
2879 		  if (!is_gimple_assign (stmt))
2880 		    continue;
2881 
2882 		  tree def = gimple_assign_lhs (stmt);
2883 		  if (TREE_CODE (def) != SSA_NAME)
2884 		    continue;
2885 
2886 		  affine_iv iv;
2887 		  if (!simple_iv (loop_containing_stmt (stmt),
2888 				  loop_containing_stmt (stmt),
2889 				  def, &iv, true)
2890 		      || is_gimple_min_invariant (iv.step))
2891 		    continue;
2892 
2893 		  predicate will_be_nonconstant
2894 		    = will_be_nonconstant_expr_predicate (fbi.info, info,
2895 							  iv.step,
2896 							  nonconstant_names);
2897 		  if (!true_predicate_p (&will_be_nonconstant))
2898 		    will_be_nonconstant
2899 		      = and_predicates (info->conds, &bb_predicate,
2900 					&will_be_nonconstant);
2901 		  if (!true_predicate_p (&will_be_nonconstant)
2902 		      && !false_predicate_p (&will_be_nonconstant))
2903 		    /* This is slightly inprecise.  We may want to represent
2904 		       each loop with independent predicate.  */
2905 		    loop_stride = and_predicates (info->conds, &loop_stride,
2906 						  &will_be_nonconstant);
2907 		}
2908 	    }
2909 	  free (body);
2910 	}
2911       set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2912 			  loop_iterations);
2913       set_hint_predicate (&inline_summaries->get (node)->loop_stride,
2914 			  loop_stride);
2915       scev_finalize ();
2916     }
2917   FOR_ALL_BB_FN (bb, my_function)
2918     {
2919       edge e;
2920       edge_iterator ei;
2921 
2922       if (bb->aux)
2923 	edge_predicate_pool.remove ((predicate *)bb->aux);
2924       bb->aux = NULL;
2925       FOR_EACH_EDGE (e, ei, bb->succs)
2926 	{
2927 	  if (e->aux)
2928 	    edge_predicate_pool.remove ((predicate *) e->aux);
2929 	  e->aux = NULL;
2930 	}
2931     }
2932   inline_summaries->get (node)->self_time = time;
2933   inline_summaries->get (node)->self_size = size;
2934   nonconstant_names.release ();
2935   ipa_release_body_info (&fbi);
2936   if (opt_for_fn (node->decl, optimize))
2937     {
2938       if (!early)
2939         loop_optimizer_finalize ();
2940       else if (!ipa_edge_args_vector)
2941 	ipa_free_all_node_params ();
2942       free_dominance_info (CDI_DOMINATORS);
2943     }
2944   if (dump_file)
2945     {
2946       fprintf (dump_file, "\n");
2947       dump_inline_summary (dump_file, node);
2948     }
2949 }
2950 
2951 
2952 /* Compute parameters of functions used by inliner.
2953    EARLY is true when we compute parameters for the early inliner  */
2954 
2955 void
2956 compute_inline_parameters (struct cgraph_node *node, bool early)
2957 {
2958   HOST_WIDE_INT self_stack_size;
2959   struct cgraph_edge *e;
2960   struct inline_summary *info;
2961 
2962   gcc_assert (!node->global.inlined_to);
2963 
2964   inline_summary_alloc ();
2965 
2966   info = inline_summaries->get (node);
2967   reset_inline_summary (node, info);
2968 
2969   /* Estimate the stack size for the function if we're optimizing.  */
2970   self_stack_size = optimize && !node->thunk.thunk_p
2971 		    ? estimated_stack_frame_size (node) : 0;
2972   info->estimated_self_stack_size = self_stack_size;
2973   info->estimated_stack_size = self_stack_size;
2974   info->stack_frame_offset = 0;
2975 
2976   if (node->thunk.thunk_p)
2977     {
2978       struct inline_edge_summary *es = inline_edge_summary (node->callees);
2979       struct predicate t = true_predicate ();
2980 
2981       node->local.can_change_signature = false;
2982       es->call_stmt_size = eni_size_weights.call_cost;
2983       es->call_stmt_time = eni_time_weights.call_cost;
2984       account_size_time (info, INLINE_SIZE_SCALE * 2,
2985 			 INLINE_TIME_SCALE * 2, &t);
2986       t = not_inlined_predicate ();
2987       account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &t);
2988       inline_update_overall_summary (node);
2989       info->self_size = info->size;
2990       info->self_time = info->time;
2991       /* We can not inline instrumentation clones.  */
2992       if (node->thunk.add_pointer_bounds_args)
2993 	{
2994           info->inlinable = false;
2995           node->callees->inline_failed = CIF_CHKP;
2996 	}
2997       else if (stdarg_p (TREE_TYPE (node->decl)))
2998 	{
2999 	  info->inlinable = false;
3000 	  node->callees->inline_failed = CIF_VARIADIC_THUNK;
3001 	}
3002       else
3003         info->inlinable = true;
3004     }
3005   else
3006     {
3007        /* Even is_gimple_min_invariant rely on current_function_decl.  */
3008        push_cfun (DECL_STRUCT_FUNCTION (node->decl));
3009 
3010        /* Can this function be inlined at all?  */
3011        if (!opt_for_fn (node->decl, optimize)
3012 	   && !lookup_attribute ("always_inline",
3013 				 DECL_ATTRIBUTES (node->decl)))
3014 	 info->inlinable = false;
3015        else
3016 	 info->inlinable = tree_inlinable_function_p (node->decl);
3017 
3018        info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun);
3019 
3020        /* Type attributes can use parameter indices to describe them.  */
3021        if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
3022 	 node->local.can_change_signature = false;
3023        else
3024 	 {
3025 	   /* Otherwise, inlinable functions always can change signature.  */
3026 	   if (info->inlinable)
3027 	     node->local.can_change_signature = true;
3028 	   else
3029 	     {
3030 	       /* Functions calling builtin_apply can not change signature.  */
3031 	       for (e = node->callees; e; e = e->next_callee)
3032 		 {
3033 		   tree cdecl = e->callee->decl;
3034 		   if (DECL_BUILT_IN (cdecl)
3035 		       && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
3036 		       && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
3037 			   || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
3038 		     break;
3039 		 }
3040 	       node->local.can_change_signature = !e;
3041 	     }
3042 	 }
3043        /* Functions called by instrumentation thunk can't change signature
3044 	  because instrumentation thunk modification is not supported.  */
3045        if (node->local.can_change_signature)
3046 	 for (e = node->callers; e; e = e->next_caller)
3047 	   if (e->caller->thunk.thunk_p
3048 	       && e->caller->thunk.add_pointer_bounds_args)
3049 	     {
3050 	       node->local.can_change_signature = false;
3051 	       break;
3052 	     }
3053        estimate_function_body_sizes (node, early);
3054        pop_cfun ();
3055      }
3056   for (e = node->callees; e; e = e->next_callee)
3057     if (e->callee->comdat_local_p ())
3058       break;
3059   node->calls_comdat_local = (e != NULL);
3060 
3061   /* Inlining characteristics are maintained by the cgraph_mark_inline.  */
3062   info->time = info->self_time;
3063   info->size = info->self_size;
3064   info->stack_frame_offset = 0;
3065   info->estimated_stack_size = info->estimated_self_stack_size;
3066   if (flag_checking)
3067     {
3068       inline_update_overall_summary (node);
3069       gcc_assert (info->time == info->self_time
3070 		  && info->size == info->self_size);
3071     }
3072 }
3073 
3074 
3075 /* Compute parameters of functions used by inliner using
3076    current_function_decl.  */
3077 
3078 static unsigned int
3079 compute_inline_parameters_for_current (void)
3080 {
3081   compute_inline_parameters (cgraph_node::get (current_function_decl), true);
3082   return 0;
3083 }
3084 
3085 namespace {
3086 
3087 const pass_data pass_data_inline_parameters =
3088 {
3089   GIMPLE_PASS, /* type */
3090   "inline_param", /* name */
3091   OPTGROUP_INLINE, /* optinfo_flags */
3092   TV_INLINE_PARAMETERS, /* tv_id */
3093   0, /* properties_required */
3094   0, /* properties_provided */
3095   0, /* properties_destroyed */
3096   0, /* todo_flags_start */
3097   0, /* todo_flags_finish */
3098 };
3099 
3100 class pass_inline_parameters : public gimple_opt_pass
3101 {
3102 public:
3103   pass_inline_parameters (gcc::context *ctxt)
3104     : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3105   {}
3106 
3107   /* opt_pass methods: */
3108   opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3109   virtual unsigned int execute (function *)
3110     {
3111       return compute_inline_parameters_for_current ();
3112     }
3113 
3114 }; // class pass_inline_parameters
3115 
3116 } // anon namespace
3117 
3118 gimple_opt_pass *
3119 make_pass_inline_parameters (gcc::context *ctxt)
3120 {
3121   return new pass_inline_parameters (ctxt);
3122 }
3123 
3124 
3125 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3126    KNOWN_CONTEXTS and KNOWN_AGGS.  */
3127 
3128 static bool
3129 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3130 			      int *size, int *time,
3131 			      vec<tree> known_vals,
3132 			      vec<ipa_polymorphic_call_context> known_contexts,
3133 			      vec<ipa_agg_jump_function_p> known_aggs)
3134 {
3135   tree target;
3136   struct cgraph_node *callee;
3137   struct inline_summary *isummary;
3138   enum availability avail;
3139   bool speculative;
3140 
3141   if (!known_vals.exists () && !known_contexts.exists ())
3142     return false;
3143   if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3144     return false;
3145 
3146   target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3147 					 known_aggs, &speculative);
3148   if (!target || speculative)
3149     return false;
3150 
3151   /* Account for difference in cost between indirect and direct calls.  */
3152   *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3153   *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3154   gcc_checking_assert (*time >= 0);
3155   gcc_checking_assert (*size >= 0);
3156 
3157   callee = cgraph_node::get (target);
3158   if (!callee || !callee->definition)
3159     return false;
3160   callee = callee->function_symbol (&avail);
3161   if (avail < AVAIL_AVAILABLE)
3162     return false;
3163   isummary = inline_summaries->get (callee);
3164   return isummary->inlinable;
3165 }
3166 
3167 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3168    handle edge E with probability PROB.
3169    Set HINTS if edge may be devirtualized.
3170    KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3171    site.  */
3172 
3173 static inline void
3174 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3175 			     int *time,
3176 			     int prob,
3177 			     vec<tree> known_vals,
3178 			     vec<ipa_polymorphic_call_context> known_contexts,
3179 			     vec<ipa_agg_jump_function_p> known_aggs,
3180 			     inline_hints *hints)
3181 {
3182   struct inline_edge_summary *es = inline_edge_summary (e);
3183   int call_size = es->call_stmt_size;
3184   int call_time = es->call_stmt_time;
3185   int cur_size;
3186   if (!e->callee
3187       && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3188 				       known_vals, known_contexts, known_aggs)
3189       && hints && e->maybe_hot_p ())
3190     *hints |= INLINE_HINT_indirect_call;
3191   cur_size = call_size * INLINE_SIZE_SCALE;
3192   *size += cur_size;
3193   if (min_size)
3194     *min_size += cur_size;
3195   *time += apply_probability ((gcov_type) call_time, prob)
3196     * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3197   if (*time > MAX_TIME * INLINE_TIME_SCALE)
3198     *time = MAX_TIME * INLINE_TIME_SCALE;
3199 }
3200 
3201 
3202 
3203 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3204    calls in NODE.  POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3205    describe context of the call site.  */
3206 
3207 static void
3208 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3209 			      int *min_size, int *time,
3210 			      inline_hints *hints,
3211 			      clause_t possible_truths,
3212 			      vec<tree> known_vals,
3213 			      vec<ipa_polymorphic_call_context> known_contexts,
3214 			      vec<ipa_agg_jump_function_p> known_aggs)
3215 {
3216   struct cgraph_edge *e;
3217   for (e = node->callees; e; e = e->next_callee)
3218     {
3219       if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3220 	continue;
3221 
3222       struct inline_edge_summary *es = inline_edge_summary (e);
3223 
3224       /* Do not care about zero sized builtins.  */
3225       if (e->inline_failed && !es->call_stmt_size)
3226 	{
3227 	  gcc_checking_assert (!es->call_stmt_time);
3228 	  continue;
3229 	}
3230       if (!es->predicate
3231 	  || evaluate_predicate (es->predicate, possible_truths))
3232 	{
3233 	  if (e->inline_failed)
3234 	    {
3235 	      /* Predicates of calls shall not use NOT_CHANGED codes,
3236 	         sowe do not need to compute probabilities.  */
3237 	      estimate_edge_size_and_time (e, size,
3238 					   es->predicate ? NULL : min_size,
3239 					   time, REG_BR_PROB_BASE,
3240 					   known_vals, known_contexts,
3241 					   known_aggs, hints);
3242 	    }
3243 	  else
3244 	    estimate_calls_size_and_time (e->callee, size, min_size, time,
3245 					  hints,
3246 					  possible_truths,
3247 					  known_vals, known_contexts,
3248 					  known_aggs);
3249 	}
3250     }
3251   for (e = node->indirect_calls; e; e = e->next_callee)
3252     {
3253       if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3254 	continue;
3255 
3256       struct inline_edge_summary *es = inline_edge_summary (e);
3257       if (!es->predicate
3258 	  || evaluate_predicate (es->predicate, possible_truths))
3259 	estimate_edge_size_and_time (e, size,
3260 				     es->predicate ? NULL : min_size,
3261 				     time, REG_BR_PROB_BASE,
3262 				     known_vals, known_contexts, known_aggs,
3263 				     hints);
3264     }
3265 }
3266 
3267 
3268 /* Estimate size and time needed to execute NODE assuming
3269    POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3270    information about NODE's arguments.  If non-NULL use also probability
3271    information present in INLINE_PARAM_SUMMARY vector.
3272    Additionally detemine hints determined by the context.  Finally compute
3273    minimal size needed for the call that is independent on the call context and
3274    can be used for fast estimates.  Return the values in RET_SIZE,
3275    RET_MIN_SIZE, RET_TIME and RET_HINTS.  */
3276 
3277 static void
3278 estimate_node_size_and_time (struct cgraph_node *node,
3279 			     clause_t possible_truths,
3280 			     vec<tree> known_vals,
3281 			     vec<ipa_polymorphic_call_context> known_contexts,
3282 			     vec<ipa_agg_jump_function_p> known_aggs,
3283 			     int *ret_size, int *ret_min_size, int *ret_time,
3284 			     inline_hints *ret_hints,
3285 			     vec<inline_param_summary>
3286 			     inline_param_summary)
3287 {
3288   struct inline_summary *info = inline_summaries->get (node);
3289   size_time_entry *e;
3290   int size = 0;
3291   int time = 0;
3292   int min_size = 0;
3293   inline_hints hints = 0;
3294   int i;
3295 
3296   if (dump_file && (dump_flags & TDF_DETAILS))
3297     {
3298       bool found = false;
3299       fprintf (dump_file, "   Estimating body: %s/%i\n"
3300 	       "   Known to be false: ", node->name (),
3301 	       node->order);
3302 
3303       for (i = predicate_not_inlined_condition;
3304 	   i < (predicate_first_dynamic_condition
3305 		+ (int) vec_safe_length (info->conds)); i++)
3306 	if (!(possible_truths & (1 << i)))
3307 	  {
3308 	    if (found)
3309 	      fprintf (dump_file, ", ");
3310 	    found = true;
3311 	    dump_condition (dump_file, info->conds, i);
3312 	  }
3313     }
3314 
3315   for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3316     if (evaluate_predicate (&e->predicate, possible_truths))
3317       {
3318 	size += e->size;
3319 	gcc_checking_assert (e->time >= 0);
3320 	gcc_checking_assert (time >= 0);
3321 	if (!inline_param_summary.exists ())
3322 	  time += e->time;
3323 	else
3324 	  {
3325 	    int prob = predicate_probability (info->conds,
3326 					      &e->predicate,
3327 					      possible_truths,
3328 					      inline_param_summary);
3329 	    gcc_checking_assert (prob >= 0);
3330 	    gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3331 	    time += apply_probability ((gcov_type) e->time, prob);
3332 	  }
3333 	if (time > MAX_TIME * INLINE_TIME_SCALE)
3334 	  time = MAX_TIME * INLINE_TIME_SCALE;
3335 	gcc_checking_assert (time >= 0);
3336 
3337       }
3338   gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3339   min_size = (*info->entry)[0].size;
3340   gcc_checking_assert (size >= 0);
3341   gcc_checking_assert (time >= 0);
3342 
3343   if (info->loop_iterations
3344       && !evaluate_predicate (info->loop_iterations, possible_truths))
3345     hints |= INLINE_HINT_loop_iterations;
3346   if (info->loop_stride
3347       && !evaluate_predicate (info->loop_stride, possible_truths))
3348     hints |= INLINE_HINT_loop_stride;
3349   if (info->array_index
3350       && !evaluate_predicate (info->array_index, possible_truths))
3351     hints |= INLINE_HINT_array_index;
3352   if (info->scc_no)
3353     hints |= INLINE_HINT_in_scc;
3354   if (DECL_DECLARED_INLINE_P (node->decl))
3355     hints |= INLINE_HINT_declared_inline;
3356 
3357   estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3358 				known_vals, known_contexts, known_aggs);
3359   gcc_checking_assert (size >= 0);
3360   gcc_checking_assert (time >= 0);
3361   time = RDIV (time, INLINE_TIME_SCALE);
3362   size = RDIV (size, INLINE_SIZE_SCALE);
3363   min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3364 
3365   if (dump_file && (dump_flags & TDF_DETAILS))
3366     fprintf (dump_file, "\n   size:%i time:%i\n", (int) size, (int) time);
3367   if (ret_time)
3368     *ret_time = time;
3369   if (ret_size)
3370     *ret_size = size;
3371   if (ret_min_size)
3372     *ret_min_size = min_size;
3373   if (ret_hints)
3374     *ret_hints = hints;
3375   return;
3376 }
3377 
3378 
3379 /* Estimate size and time needed to execute callee of EDGE assuming that
3380    parameters known to be constant at caller of EDGE are propagated.
3381    KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3382    and types for parameters.  */
3383 
3384 void
3385 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3386 				   vec<tree> known_vals,
3387 				   vec<ipa_polymorphic_call_context>
3388 				   known_contexts,
3389 				   vec<ipa_agg_jump_function_p> known_aggs,
3390 				   int *ret_size, int *ret_time,
3391 				   inline_hints *hints)
3392 {
3393   clause_t clause;
3394 
3395   clause = evaluate_conditions_for_known_args (node, false, known_vals,
3396 					       known_aggs);
3397   estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3398 			       known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3399 }
3400 
3401 /* Translate all conditions from callee representation into caller
3402    representation and symbolically evaluate predicate P into new predicate.
3403 
3404    INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3405    is summary of function predicate P is from. OPERAND_MAP is array giving
3406    callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3407    callee conditions that may be true in caller context.  TOPLEV_PREDICATE is
3408    predicate under which callee is executed.  OFFSET_MAP is an array of of
3409    offsets that need to be added to conditions, negative offset means that
3410    conditions relying on values passed by reference have to be discarded
3411    because they might not be preserved (and should be considered offset zero
3412    for other purposes).  */
3413 
3414 static struct predicate
3415 remap_predicate (struct inline_summary *info,
3416 		 struct inline_summary *callee_info,
3417 		 struct predicate *p,
3418 		 vec<int> operand_map,
3419 		 vec<int> offset_map,
3420 		 clause_t possible_truths, struct predicate *toplev_predicate)
3421 {
3422   int i;
3423   struct predicate out = true_predicate ();
3424 
3425   /* True predicate is easy.  */
3426   if (true_predicate_p (p))
3427     return *toplev_predicate;
3428   for (i = 0; p->clause[i]; i++)
3429     {
3430       clause_t clause = p->clause[i];
3431       int cond;
3432       struct predicate clause_predicate = false_predicate ();
3433 
3434       gcc_assert (i < MAX_CLAUSES);
3435 
3436       for (cond = 0; cond < NUM_CONDITIONS; cond++)
3437 	/* Do we have condition we can't disprove?   */
3438 	if (clause & possible_truths & (1 << cond))
3439 	  {
3440 	    struct predicate cond_predicate;
3441 	    /* Work out if the condition can translate to predicate in the
3442 	       inlined function.  */
3443 	    if (cond >= predicate_first_dynamic_condition)
3444 	      {
3445 		struct condition *c;
3446 
3447 		c = &(*callee_info->conds)[cond
3448 					   -
3449 					   predicate_first_dynamic_condition];
3450 		/* See if we can remap condition operand to caller's operand.
3451 		   Otherwise give up.  */
3452 		if (!operand_map.exists ()
3453 		    || (int) operand_map.length () <= c->operand_num
3454 		    || operand_map[c->operand_num] == -1
3455 		    /* TODO: For non-aggregate conditions, adding an offset is
3456 		       basically an arithmetic jump function processing which
3457 		       we should support in future.  */
3458 		    || ((!c->agg_contents || !c->by_ref)
3459 			&& offset_map[c->operand_num] > 0)
3460 		    || (c->agg_contents && c->by_ref
3461 			&& offset_map[c->operand_num] < 0))
3462 		  cond_predicate = true_predicate ();
3463 		else
3464 		  {
3465 		    struct agg_position_info ap;
3466 		    HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3467 		    if (offset_delta < 0)
3468 		      {
3469 			gcc_checking_assert (!c->agg_contents || !c->by_ref);
3470 			offset_delta = 0;
3471 		      }
3472 		    gcc_assert (!c->agg_contents
3473 				|| c->by_ref || offset_delta == 0);
3474 		    ap.offset = c->offset + offset_delta;
3475 		    ap.agg_contents = c->agg_contents;
3476 		    ap.by_ref = c->by_ref;
3477 		    cond_predicate = add_condition (info,
3478 						    operand_map[c->operand_num],
3479 						    c->size, &ap, c->code,
3480 						    c->val);
3481 		  }
3482 	      }
3483 	    /* Fixed conditions remains same, construct single
3484 	       condition predicate.  */
3485 	    else
3486 	      {
3487 		cond_predicate.clause[0] = 1 << cond;
3488 		cond_predicate.clause[1] = 0;
3489 	      }
3490 	    clause_predicate = or_predicates (info->conds, &clause_predicate,
3491 					      &cond_predicate);
3492 	  }
3493       out = and_predicates (info->conds, &out, &clause_predicate);
3494     }
3495   return and_predicates (info->conds, &out, toplev_predicate);
3496 }
3497 
3498 
3499 /* Update summary information of inline clones after inlining.
3500    Compute peak stack usage.  */
3501 
3502 static void
3503 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3504 {
3505   struct cgraph_edge *e;
3506   struct inline_summary *callee_info = inline_summaries->get (node);
3507   struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3508   HOST_WIDE_INT peak;
3509 
3510   callee_info->stack_frame_offset
3511     = caller_info->stack_frame_offset
3512     + caller_info->estimated_self_stack_size;
3513   peak = callee_info->stack_frame_offset
3514     + callee_info->estimated_self_stack_size;
3515   if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3516       inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3517   ipa_propagate_frequency (node);
3518   for (e = node->callees; e; e = e->next_callee)
3519     {
3520       if (!e->inline_failed)
3521 	inline_update_callee_summaries (e->callee, depth);
3522       inline_edge_summary (e)->loop_depth += depth;
3523     }
3524   for (e = node->indirect_calls; e; e = e->next_callee)
3525     inline_edge_summary (e)->loop_depth += depth;
3526 }
3527 
3528 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3529    When functoin A is inlined in B and A calls C with parameter that
3530    changes with probability PROB1 and C is known to be passthroug
3531    of argument if B that change with probability PROB2, the probability
3532    of change is now PROB1*PROB2.  */
3533 
3534 static void
3535 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3536 			struct cgraph_edge *edge)
3537 {
3538   if (ipa_node_params_sum)
3539     {
3540       int i;
3541       struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3542       struct inline_edge_summary *es = inline_edge_summary (edge);
3543       struct inline_edge_summary *inlined_es
3544 	= inline_edge_summary (inlined_edge);
3545 
3546       for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3547 	{
3548 	  struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3549 	  if (jfunc->type == IPA_JF_PASS_THROUGH
3550 	      || jfunc->type == IPA_JF_ANCESTOR)
3551 	    {
3552 	      int id = jfunc->type == IPA_JF_PASS_THROUGH
3553 		       ? ipa_get_jf_pass_through_formal_id (jfunc)
3554 		       : ipa_get_jf_ancestor_formal_id (jfunc);
3555 	      if (id < (int) inlined_es->param.length ())
3556 		{
3557 		  int prob1 = es->param[i].change_prob;
3558 		  int prob2 = inlined_es->param[id].change_prob;
3559 		  int prob = combine_probabilities (prob1, prob2);
3560 
3561 		  if (prob1 && prob2 && !prob)
3562 		    prob = 1;
3563 
3564 		  es->param[i].change_prob = prob;
3565 		}
3566 	    }
3567 	}
3568     }
3569 }
3570 
3571 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3572 
3573    Remap predicates of callees of NODE.  Rest of arguments match
3574    remap_predicate.
3575 
3576    Also update change probabilities.  */
3577 
3578 static void
3579 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3580 		      struct cgraph_node *node,
3581 		      struct inline_summary *info,
3582 		      struct inline_summary *callee_info,
3583 		      vec<int> operand_map,
3584 		      vec<int> offset_map,
3585 		      clause_t possible_truths,
3586 		      struct predicate *toplev_predicate)
3587 {
3588   struct cgraph_edge *e, *next;
3589   for (e = node->callees; e; e = next)
3590     {
3591       struct inline_edge_summary *es = inline_edge_summary (e);
3592       struct predicate p;
3593       next = e->next_callee;
3594 
3595       if (e->inline_failed)
3596 	{
3597 	  remap_edge_change_prob (inlined_edge, e);
3598 
3599 	  if (es->predicate)
3600 	    {
3601 	      p = remap_predicate (info, callee_info,
3602 				   es->predicate, operand_map, offset_map,
3603 				   possible_truths, toplev_predicate);
3604 	      edge_set_predicate (e, &p);
3605 	    }
3606 	  else
3607 	    edge_set_predicate (e, toplev_predicate);
3608 	}
3609       else
3610 	remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3611 			      operand_map, offset_map, possible_truths,
3612 			      toplev_predicate);
3613     }
3614   for (e = node->indirect_calls; e; e = next)
3615     {
3616       struct inline_edge_summary *es = inline_edge_summary (e);
3617       struct predicate p;
3618       next = e->next_callee;
3619 
3620       remap_edge_change_prob (inlined_edge, e);
3621       if (es->predicate)
3622 	{
3623 	  p = remap_predicate (info, callee_info,
3624 			       es->predicate, operand_map, offset_map,
3625 			       possible_truths, toplev_predicate);
3626 	  edge_set_predicate (e, &p);
3627 	}
3628       else
3629 	edge_set_predicate (e, toplev_predicate);
3630     }
3631 }
3632 
3633 /* Same as remap_predicate, but set result into hint *HINT.  */
3634 
3635 static void
3636 remap_hint_predicate (struct inline_summary *info,
3637 		      struct inline_summary *callee_info,
3638 		      struct predicate **hint,
3639 		      vec<int> operand_map,
3640 		      vec<int> offset_map,
3641 		      clause_t possible_truths,
3642 		      struct predicate *toplev_predicate)
3643 {
3644   predicate p;
3645 
3646   if (!*hint)
3647     return;
3648   p = remap_predicate (info, callee_info,
3649 		       *hint,
3650 		       operand_map, offset_map,
3651 		       possible_truths, toplev_predicate);
3652   if (!false_predicate_p (&p) && !true_predicate_p (&p))
3653     {
3654       if (!*hint)
3655 	set_hint_predicate (hint, p);
3656       else
3657 	**hint = and_predicates (info->conds, *hint, &p);
3658     }
3659 }
3660 
3661 /* We inlined EDGE.  Update summary of the function we inlined into.  */
3662 
3663 void
3664 inline_merge_summary (struct cgraph_edge *edge)
3665 {
3666   struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3667   struct cgraph_node *to = (edge->caller->global.inlined_to
3668 			    ? edge->caller->global.inlined_to : edge->caller);
3669   struct inline_summary *info = inline_summaries->get (to);
3670   clause_t clause = 0;		/* not_inline is known to be false.  */
3671   size_time_entry *e;
3672   vec<int> operand_map = vNULL;
3673   vec<int> offset_map = vNULL;
3674   int i;
3675   struct predicate toplev_predicate;
3676   struct predicate true_p = true_predicate ();
3677   struct inline_edge_summary *es = inline_edge_summary (edge);
3678 
3679   if (es->predicate)
3680     toplev_predicate = *es->predicate;
3681   else
3682     toplev_predicate = true_predicate ();
3683 
3684   info->fp_expressions |= callee_info->fp_expressions;
3685 
3686   if (callee_info->conds)
3687     evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3688   if (ipa_node_params_sum && callee_info->conds)
3689     {
3690       struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3691       int count = ipa_get_cs_argument_count (args);
3692       int i;
3693 
3694       if (count)
3695 	{
3696 	  operand_map.safe_grow_cleared (count);
3697 	  offset_map.safe_grow_cleared (count);
3698 	}
3699       for (i = 0; i < count; i++)
3700 	{
3701 	  struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3702 	  int map = -1;
3703 
3704 	  /* TODO: handle non-NOPs when merging.  */
3705 	  if (jfunc->type == IPA_JF_PASS_THROUGH)
3706 	    {
3707 	      if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3708 		map = ipa_get_jf_pass_through_formal_id (jfunc);
3709 	      if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3710 		offset_map[i] = -1;
3711 	    }
3712 	  else if (jfunc->type == IPA_JF_ANCESTOR)
3713 	    {
3714 	      HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3715 	      if (offset >= 0 && offset < INT_MAX)
3716 		{
3717 		  map = ipa_get_jf_ancestor_formal_id (jfunc);
3718 		  if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3719 		    offset = -1;
3720 		  offset_map[i] = offset;
3721 		}
3722 	    }
3723 	  operand_map[i] = map;
3724 	  gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3725 	}
3726     }
3727   for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3728     {
3729       struct predicate p = remap_predicate (info, callee_info,
3730 					    &e->predicate, operand_map,
3731 					    offset_map, clause,
3732 					    &toplev_predicate);
3733       if (!false_predicate_p (&p))
3734 	{
3735 	  gcov_type add_time = ((gcov_type) e->time * edge->frequency
3736 				+ CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3737 	  int prob = predicate_probability (callee_info->conds,
3738 					    &e->predicate,
3739 					    clause, es->param);
3740 	  add_time = apply_probability ((gcov_type) add_time, prob);
3741 	  if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3742 	    add_time = MAX_TIME * INLINE_TIME_SCALE;
3743 	  if (prob != REG_BR_PROB_BASE
3744 	      && dump_file && (dump_flags & TDF_DETAILS))
3745 	    {
3746 	      fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3747 		       (double) prob / REG_BR_PROB_BASE);
3748 	    }
3749 	  account_size_time (info, e->size, add_time, &p);
3750 	}
3751     }
3752   remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3753 			offset_map, clause, &toplev_predicate);
3754   remap_hint_predicate (info, callee_info,
3755 			&callee_info->loop_iterations,
3756 			operand_map, offset_map, clause, &toplev_predicate);
3757   remap_hint_predicate (info, callee_info,
3758 			&callee_info->loop_stride,
3759 			operand_map, offset_map, clause, &toplev_predicate);
3760   remap_hint_predicate (info, callee_info,
3761 			&callee_info->array_index,
3762 			operand_map, offset_map, clause, &toplev_predicate);
3763 
3764   inline_update_callee_summaries (edge->callee,
3765 				  inline_edge_summary (edge)->loop_depth);
3766 
3767   /* We do not maintain predicates of inlined edges, free it.  */
3768   edge_set_predicate (edge, &true_p);
3769   /* Similarly remove param summaries.  */
3770   es->param.release ();
3771   operand_map.release ();
3772   offset_map.release ();
3773 }
3774 
3775 /* For performance reasons inline_merge_summary is not updating overall size
3776    and time.  Recompute it.  */
3777 
3778 void
3779 inline_update_overall_summary (struct cgraph_node *node)
3780 {
3781   struct inline_summary *info = inline_summaries->get (node);
3782   size_time_entry *e;
3783   int i;
3784 
3785   info->size = 0;
3786   info->time = 0;
3787   for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3788     {
3789       info->size += e->size, info->time += e->time;
3790       if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3791 	info->time = MAX_TIME * INLINE_TIME_SCALE;
3792     }
3793   estimate_calls_size_and_time (node, &info->size, &info->min_size,
3794 				&info->time, NULL,
3795 				~(clause_t) (1 << predicate_false_condition),
3796 				vNULL, vNULL, vNULL);
3797   info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3798   info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3799 }
3800 
3801 /* Return hints derrived from EDGE.   */
3802 int
3803 simple_edge_hints (struct cgraph_edge *edge)
3804 {
3805   int hints = 0;
3806   struct cgraph_node *to = (edge->caller->global.inlined_to
3807 			    ? edge->caller->global.inlined_to : edge->caller);
3808   struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3809   if (inline_summaries->get (to)->scc_no
3810       && inline_summaries->get (to)->scc_no
3811 	 == inline_summaries->get (callee)->scc_no
3812       && !edge->recursive_p ())
3813     hints |= INLINE_HINT_same_scc;
3814 
3815   if (callee->lto_file_data && edge->caller->lto_file_data
3816       && edge->caller->lto_file_data != callee->lto_file_data
3817       && !callee->merged_comdat && !callee->icf_merged)
3818     hints |= INLINE_HINT_cross_module;
3819 
3820   return hints;
3821 }
3822 
3823 /* Estimate the time cost for the caller when inlining EDGE.
3824    Only to be called via estimate_edge_time, that handles the
3825    caching mechanism.
3826 
3827    When caching, also update the cache entry.  Compute both time and
3828    size, since we always need both metrics eventually.  */
3829 
3830 int
3831 do_estimate_edge_time (struct cgraph_edge *edge)
3832 {
3833   int time;
3834   int size;
3835   inline_hints hints;
3836   struct cgraph_node *callee;
3837   clause_t clause;
3838   vec<tree> known_vals;
3839   vec<ipa_polymorphic_call_context> known_contexts;
3840   vec<ipa_agg_jump_function_p> known_aggs;
3841   struct inline_edge_summary *es = inline_edge_summary (edge);
3842   int min_size;
3843 
3844   callee = edge->callee->ultimate_alias_target ();
3845 
3846   gcc_checking_assert (edge->inline_failed);
3847   evaluate_properties_for_edge (edge, true,
3848 				&clause, &known_vals, &known_contexts,
3849 				&known_aggs);
3850   estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3851 			       known_aggs, &size, &min_size, &time, &hints, es->param);
3852 
3853   /* When we have profile feedback, we can quite safely identify hot
3854      edges and for those we disable size limits.  Don't do that when
3855      probability that caller will call the callee is low however, since it
3856      may hurt optimization of the caller's hot path.  */
3857   if (edge->count && edge->maybe_hot_p ()
3858       && (edge->count * 2
3859           > (edge->caller->global.inlined_to
3860 	     ? edge->caller->global.inlined_to->count : edge->caller->count)))
3861     hints |= INLINE_HINT_known_hot;
3862 
3863   known_vals.release ();
3864   known_contexts.release ();
3865   known_aggs.release ();
3866   gcc_checking_assert (size >= 0);
3867   gcc_checking_assert (time >= 0);
3868 
3869   /* When caching, update the cache entry.  */
3870   if (edge_growth_cache.exists ())
3871     {
3872       inline_summaries->get (edge->callee)->min_size = min_size;
3873       if ((int) edge_growth_cache.length () <= edge->uid)
3874 	edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3875       edge_growth_cache[edge->uid].time = time + (time >= 0);
3876 
3877       edge_growth_cache[edge->uid].size = size + (size >= 0);
3878       hints |= simple_edge_hints (edge);
3879       edge_growth_cache[edge->uid].hints = hints + 1;
3880     }
3881   return time;
3882 }
3883 
3884 
3885 /* Return estimated callee growth after inlining EDGE.
3886    Only to be called via estimate_edge_size.  */
3887 
3888 int
3889 do_estimate_edge_size (struct cgraph_edge *edge)
3890 {
3891   int size;
3892   struct cgraph_node *callee;
3893   clause_t clause;
3894   vec<tree> known_vals;
3895   vec<ipa_polymorphic_call_context> known_contexts;
3896   vec<ipa_agg_jump_function_p> known_aggs;
3897 
3898   /* When we do caching, use do_estimate_edge_time to populate the entry.  */
3899 
3900   if (edge_growth_cache.exists ())
3901     {
3902       do_estimate_edge_time (edge);
3903       size = edge_growth_cache[edge->uid].size;
3904       gcc_checking_assert (size);
3905       return size - (size > 0);
3906     }
3907 
3908   callee = edge->callee->ultimate_alias_target ();
3909 
3910   /* Early inliner runs without caching, go ahead and do the dirty work.  */
3911   gcc_checking_assert (edge->inline_failed);
3912   evaluate_properties_for_edge (edge, true,
3913 				&clause, &known_vals, &known_contexts,
3914 				&known_aggs);
3915   estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3916 			       known_aggs, &size, NULL, NULL, NULL, vNULL);
3917   known_vals.release ();
3918   known_contexts.release ();
3919   known_aggs.release ();
3920   return size;
3921 }
3922 
3923 
3924 /* Estimate the growth of the caller when inlining EDGE.
3925    Only to be called via estimate_edge_size.  */
3926 
3927 inline_hints
3928 do_estimate_edge_hints (struct cgraph_edge *edge)
3929 {
3930   inline_hints hints;
3931   struct cgraph_node *callee;
3932   clause_t clause;
3933   vec<tree> known_vals;
3934   vec<ipa_polymorphic_call_context> known_contexts;
3935   vec<ipa_agg_jump_function_p> known_aggs;
3936 
3937   /* When we do caching, use do_estimate_edge_time to populate the entry.  */
3938 
3939   if (edge_growth_cache.exists ())
3940     {
3941       do_estimate_edge_time (edge);
3942       hints = edge_growth_cache[edge->uid].hints;
3943       gcc_checking_assert (hints);
3944       return hints - 1;
3945     }
3946 
3947   callee = edge->callee->ultimate_alias_target ();
3948 
3949   /* Early inliner runs without caching, go ahead and do the dirty work.  */
3950   gcc_checking_assert (edge->inline_failed);
3951   evaluate_properties_for_edge (edge, true,
3952 				&clause, &known_vals, &known_contexts,
3953 				&known_aggs);
3954   estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3955 			       known_aggs, NULL, NULL, NULL, &hints, vNULL);
3956   known_vals.release ();
3957   known_contexts.release ();
3958   known_aggs.release ();
3959   hints |= simple_edge_hints (edge);
3960   return hints;
3961 }
3962 
3963 
3964 /* Estimate self time of the function NODE after inlining EDGE.  */
3965 
3966 int
3967 estimate_time_after_inlining (struct cgraph_node *node,
3968 			      struct cgraph_edge *edge)
3969 {
3970   struct inline_edge_summary *es = inline_edge_summary (edge);
3971   if (!es->predicate || !false_predicate_p (es->predicate))
3972     {
3973       gcov_type time =
3974 	inline_summaries->get (node)->time + estimate_edge_time (edge);
3975       if (time < 0)
3976 	time = 0;
3977       if (time > MAX_TIME)
3978 	time = MAX_TIME;
3979       return time;
3980     }
3981   return inline_summaries->get (node)->time;
3982 }
3983 
3984 
3985 /* Estimate the size of NODE after inlining EDGE which should be an
3986    edge to either NODE or a call inlined into NODE.  */
3987 
3988 int
3989 estimate_size_after_inlining (struct cgraph_node *node,
3990 			      struct cgraph_edge *edge)
3991 {
3992   struct inline_edge_summary *es = inline_edge_summary (edge);
3993   if (!es->predicate || !false_predicate_p (es->predicate))
3994     {
3995       int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3996       gcc_assert (size >= 0);
3997       return size;
3998     }
3999   return inline_summaries->get (node)->size;
4000 }
4001 
4002 
4003 struct growth_data
4004 {
4005   struct cgraph_node *node;
4006   bool self_recursive;
4007   bool uninlinable;
4008   int growth;
4009 };
4010 
4011 
4012 /* Worker for do_estimate_growth.  Collect growth for all callers.  */
4013 
4014 static bool
4015 do_estimate_growth_1 (struct cgraph_node *node, void *data)
4016 {
4017   struct cgraph_edge *e;
4018   struct growth_data *d = (struct growth_data *) data;
4019 
4020   for (e = node->callers; e; e = e->next_caller)
4021     {
4022       gcc_checking_assert (e->inline_failed);
4023 
4024       if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4025 	{
4026 	  d->uninlinable = true;
4027           continue;
4028 	}
4029 
4030       if (e->recursive_p ())
4031 	{
4032 	  d->self_recursive = true;
4033 	  continue;
4034 	}
4035       d->growth += estimate_edge_growth (e);
4036     }
4037   return false;
4038 }
4039 
4040 
4041 /* Estimate the growth caused by inlining NODE into all callees.  */
4042 
4043 int
4044 estimate_growth (struct cgraph_node *node)
4045 {
4046   struct growth_data d = { node, false, false, 0 };
4047   struct inline_summary *info = inline_summaries->get (node);
4048 
4049   node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
4050 
4051   /* For self recursive functions the growth estimation really should be
4052      infinity.  We don't want to return very large values because the growth
4053      plays various roles in badness computation fractions.  Be sure to not
4054      return zero or negative growths. */
4055   if (d.self_recursive)
4056     d.growth = d.growth < info->size ? info->size : d.growth;
4057   else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
4058     ;
4059   else
4060     {
4061       if (node->will_be_removed_from_program_if_no_direct_calls_p ())
4062 	d.growth -= info->size;
4063       /* COMDAT functions are very often not shared across multiple units
4064          since they come from various template instantiations.
4065          Take this into account.  */
4066       else if (DECL_COMDAT (node->decl)
4067 	       && node->can_remove_if_no_direct_calls_p ())
4068 	d.growth -= (info->size
4069 		     * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
4070 		     + 50) / 100;
4071     }
4072 
4073   return d.growth;
4074 }
4075 
4076 /* Verify if there are fewer than MAX_CALLERS.  */
4077 
4078 static bool
4079 check_callers (cgraph_node *node, int *max_callers)
4080 {
4081   ipa_ref *ref;
4082 
4083   if (!node->can_remove_if_no_direct_calls_and_refs_p ())
4084     return true;
4085 
4086   for (cgraph_edge *e = node->callers; e; e = e->next_caller)
4087     {
4088       (*max_callers)--;
4089       if (!*max_callers
4090 	  || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4091 	return true;
4092     }
4093 
4094   FOR_EACH_ALIAS (node, ref)
4095     if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
4096       return true;
4097 
4098   return false;
4099 }
4100 
4101 
4102 /* Make cheap estimation if growth of NODE is likely positive knowing
4103    EDGE_GROWTH of one particular edge.
4104    We assume that most of other edges will have similar growth
4105    and skip computation if there are too many callers.  */
4106 
4107 bool
4108 growth_likely_positive (struct cgraph_node *node,
4109 		        int edge_growth)
4110 {
4111   int max_callers;
4112   struct cgraph_edge *e;
4113   gcc_checking_assert (edge_growth > 0);
4114 
4115   /* First quickly check if NODE is removable at all.  */
4116   if (DECL_EXTERNAL (node->decl))
4117     return true;
4118   if (!node->can_remove_if_no_direct_calls_and_refs_p ()
4119       || node->address_taken)
4120     return true;
4121 
4122   max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4123 
4124   for (e = node->callers; e; e = e->next_caller)
4125     {
4126       max_callers--;
4127       if (!max_callers
4128 	  || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4129 	return true;
4130     }
4131 
4132   ipa_ref *ref;
4133   FOR_EACH_ALIAS (node, ref)
4134     if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4135       return true;
4136 
4137   /* Unlike for functions called once, we play unsafe with
4138      COMDATs.  We can allow that since we know functions
4139      in consideration are small (and thus risk is small) and
4140      moreover grow estimates already accounts that COMDAT
4141      functions may or may not disappear when eliminated from
4142      current unit. With good probability making aggressive
4143      choice in all units is going to make overall program
4144      smaller.  */
4145   if (DECL_COMDAT (node->decl))
4146     {
4147       if (!node->can_remove_if_no_direct_calls_p ())
4148 	return true;
4149     }
4150   else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4151     return true;
4152 
4153   return estimate_growth (node) > 0;
4154 }
4155 
4156 
4157 /* This function performs intraprocedural analysis in NODE that is required to
4158    inline indirect calls.  */
4159 
4160 static void
4161 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4162 {
4163   ipa_analyze_node (node);
4164   if (dump_file && (dump_flags & TDF_DETAILS))
4165     {
4166       ipa_print_node_params (dump_file, node);
4167       ipa_print_node_jump_functions (dump_file, node);
4168     }
4169 }
4170 
4171 
4172 /* Note function body size.  */
4173 
4174 void
4175 inline_analyze_function (struct cgraph_node *node)
4176 {
4177   push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4178 
4179   if (dump_file)
4180     fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4181 	     node->name (), node->order);
4182   if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4183     inline_indirect_intraprocedural_analysis (node);
4184   compute_inline_parameters (node, false);
4185   if (!optimize)
4186     {
4187       struct cgraph_edge *e;
4188       for (e = node->callees; e; e = e->next_callee)
4189 	e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4190       for (e = node->indirect_calls; e; e = e->next_callee)
4191 	e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4192     }
4193 
4194   pop_cfun ();
4195 }
4196 
4197 
4198 /* Called when new function is inserted to callgraph late.  */
4199 
4200 void
4201 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4202 {
4203   inline_analyze_function (node);
4204 }
4205 
4206 /* Note function body size.  */
4207 
4208 void
4209 inline_generate_summary (void)
4210 {
4211   struct cgraph_node *node;
4212 
4213   FOR_EACH_DEFINED_FUNCTION (node)
4214     if (DECL_STRUCT_FUNCTION (node->decl))
4215       node->local.versionable = tree_versionable_function_p (node->decl);
4216 
4217   /* When not optimizing, do not bother to analyze.  Inlining is still done
4218      because edge redirection needs to happen there.  */
4219   if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4220     return;
4221 
4222   if (!inline_summaries)
4223     inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4224 
4225   inline_summaries->enable_insertion_hook ();
4226 
4227   ipa_register_cgraph_hooks ();
4228   inline_free_summary ();
4229 
4230   FOR_EACH_DEFINED_FUNCTION (node)
4231     if (!node->alias)
4232       inline_analyze_function (node);
4233 }
4234 
4235 
4236 /* Read predicate from IB.  */
4237 
4238 static struct predicate
4239 read_predicate (struct lto_input_block *ib)
4240 {
4241   struct predicate out;
4242   clause_t clause;
4243   int k = 0;
4244 
4245   do
4246     {
4247       gcc_assert (k <= MAX_CLAUSES);
4248       clause = out.clause[k++] = streamer_read_uhwi (ib);
4249     }
4250   while (clause);
4251 
4252   /* Zero-initialize the remaining clauses in OUT.  */
4253   while (k <= MAX_CLAUSES)
4254     out.clause[k++] = 0;
4255 
4256   return out;
4257 }
4258 
4259 
4260 /* Write inline summary for edge E to OB.  */
4261 
4262 static void
4263 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4264 {
4265   struct inline_edge_summary *es = inline_edge_summary (e);
4266   struct predicate p;
4267   int length, i;
4268 
4269   es->call_stmt_size = streamer_read_uhwi (ib);
4270   es->call_stmt_time = streamer_read_uhwi (ib);
4271   es->loop_depth = streamer_read_uhwi (ib);
4272   p = read_predicate (ib);
4273   edge_set_predicate (e, &p);
4274   length = streamer_read_uhwi (ib);
4275   if (length)
4276     {
4277       es->param.safe_grow_cleared (length);
4278       for (i = 0; i < length; i++)
4279 	es->param[i].change_prob = streamer_read_uhwi (ib);
4280     }
4281 }
4282 
4283 
4284 /* Stream in inline summaries from the section.  */
4285 
4286 static void
4287 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4288 		     size_t len)
4289 {
4290   const struct lto_function_header *header =
4291     (const struct lto_function_header *) data;
4292   const int cfg_offset = sizeof (struct lto_function_header);
4293   const int main_offset = cfg_offset + header->cfg_size;
4294   const int string_offset = main_offset + header->main_size;
4295   struct data_in *data_in;
4296   unsigned int i, count2, j;
4297   unsigned int f_count;
4298 
4299   lto_input_block ib ((const char *) data + main_offset, header->main_size,
4300 		      file_data->mode_table);
4301 
4302   data_in =
4303     lto_data_in_create (file_data, (const char *) data + string_offset,
4304 			header->string_size, vNULL);
4305   f_count = streamer_read_uhwi (&ib);
4306   for (i = 0; i < f_count; i++)
4307     {
4308       unsigned int index;
4309       struct cgraph_node *node;
4310       struct inline_summary *info;
4311       lto_symtab_encoder_t encoder;
4312       struct bitpack_d bp;
4313       struct cgraph_edge *e;
4314       predicate p;
4315 
4316       index = streamer_read_uhwi (&ib);
4317       encoder = file_data->symtab_node_encoder;
4318       node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4319 								index));
4320       info = inline_summaries->get (node);
4321 
4322       info->estimated_stack_size
4323 	= info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4324       info->size = info->self_size = streamer_read_uhwi (&ib);
4325       info->time = info->self_time = streamer_read_uhwi (&ib);
4326 
4327       bp = streamer_read_bitpack (&ib);
4328       info->inlinable = bp_unpack_value (&bp, 1);
4329       info->contains_cilk_spawn = bp_unpack_value (&bp, 1);
4330       info->fp_expressions = bp_unpack_value (&bp, 1);
4331 
4332       count2 = streamer_read_uhwi (&ib);
4333       gcc_assert (!info->conds);
4334       for (j = 0; j < count2; j++)
4335 	{
4336 	  struct condition c;
4337 	  c.operand_num = streamer_read_uhwi (&ib);
4338 	  c.size = streamer_read_uhwi (&ib);
4339 	  c.code = (enum tree_code) streamer_read_uhwi (&ib);
4340 	  c.val = stream_read_tree (&ib, data_in);
4341 	  bp = streamer_read_bitpack (&ib);
4342 	  c.agg_contents = bp_unpack_value (&bp, 1);
4343 	  c.by_ref = bp_unpack_value (&bp, 1);
4344 	  if (c.agg_contents)
4345 	    c.offset = streamer_read_uhwi (&ib);
4346 	  vec_safe_push (info->conds, c);
4347 	}
4348       count2 = streamer_read_uhwi (&ib);
4349       gcc_assert (!info->entry);
4350       for (j = 0; j < count2; j++)
4351 	{
4352 	  struct size_time_entry e;
4353 
4354 	  e.size = streamer_read_uhwi (&ib);
4355 	  e.time = streamer_read_uhwi (&ib);
4356 	  e.predicate = read_predicate (&ib);
4357 
4358 	  vec_safe_push (info->entry, e);
4359 	}
4360 
4361       p = read_predicate (&ib);
4362       set_hint_predicate (&info->loop_iterations, p);
4363       p = read_predicate (&ib);
4364       set_hint_predicate (&info->loop_stride, p);
4365       p = read_predicate (&ib);
4366       set_hint_predicate (&info->array_index, p);
4367       for (e = node->callees; e; e = e->next_callee)
4368 	read_inline_edge_summary (&ib, e);
4369       for (e = node->indirect_calls; e; e = e->next_callee)
4370 	read_inline_edge_summary (&ib, e);
4371     }
4372 
4373   lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4374 			 len);
4375   lto_data_in_delete (data_in);
4376 }
4377 
4378 
4379 /* Read inline summary.  Jump functions are shared among ipa-cp
4380    and inliner, so when ipa-cp is active, we don't need to write them
4381    twice.  */
4382 
4383 void
4384 inline_read_summary (void)
4385 {
4386   struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4387   struct lto_file_decl_data *file_data;
4388   unsigned int j = 0;
4389 
4390   inline_summary_alloc ();
4391 
4392   while ((file_data = file_data_vec[j++]))
4393     {
4394       size_t len;
4395       const char *data = lto_get_section_data (file_data,
4396 					       LTO_section_inline_summary,
4397 					       NULL, &len);
4398       if (data)
4399 	inline_read_section (file_data, data, len);
4400       else
4401 	/* Fatal error here.  We do not want to support compiling ltrans units
4402 	   with different version of compiler or different flags than the WPA
4403 	   unit, so this should never happen.  */
4404 	fatal_error (input_location,
4405 		     "ipa inline summary is missing in input file");
4406     }
4407   if (optimize)
4408     {
4409       ipa_register_cgraph_hooks ();
4410       if (!flag_ipa_cp)
4411 	ipa_prop_read_jump_functions ();
4412     }
4413 
4414   gcc_assert (inline_summaries);
4415   inline_summaries->enable_insertion_hook ();
4416 }
4417 
4418 
4419 /* Write predicate P to OB.  */
4420 
4421 static void
4422 write_predicate (struct output_block *ob, struct predicate *p)
4423 {
4424   int j;
4425   if (p)
4426     for (j = 0; p->clause[j]; j++)
4427       {
4428 	gcc_assert (j < MAX_CLAUSES);
4429 	streamer_write_uhwi (ob, p->clause[j]);
4430       }
4431   streamer_write_uhwi (ob, 0);
4432 }
4433 
4434 
4435 /* Write inline summary for edge E to OB.  */
4436 
4437 static void
4438 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4439 {
4440   struct inline_edge_summary *es = inline_edge_summary (e);
4441   int i;
4442 
4443   streamer_write_uhwi (ob, es->call_stmt_size);
4444   streamer_write_uhwi (ob, es->call_stmt_time);
4445   streamer_write_uhwi (ob, es->loop_depth);
4446   write_predicate (ob, es->predicate);
4447   streamer_write_uhwi (ob, es->param.length ());
4448   for (i = 0; i < (int) es->param.length (); i++)
4449     streamer_write_uhwi (ob, es->param[i].change_prob);
4450 }
4451 
4452 
4453 /* Write inline summary for node in SET.
4454    Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4455    active, we don't need to write them twice.  */
4456 
4457 void
4458 inline_write_summary (void)
4459 {
4460   struct output_block *ob = create_output_block (LTO_section_inline_summary);
4461   lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4462   unsigned int count = 0;
4463   int i;
4464 
4465   for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4466     {
4467       symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4468       cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4469       if (cnode && cnode->definition && !cnode->alias)
4470 	count++;
4471     }
4472   streamer_write_uhwi (ob, count);
4473 
4474   for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4475     {
4476       symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4477       cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4478       if (cnode && cnode->definition && !cnode->alias)
4479 	{
4480 	  struct inline_summary *info = inline_summaries->get (cnode);
4481 	  struct bitpack_d bp;
4482 	  struct cgraph_edge *edge;
4483 	  int i;
4484 	  size_time_entry *e;
4485 	  struct condition *c;
4486 
4487 	  streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode));
4488 	  streamer_write_hwi (ob, info->estimated_self_stack_size);
4489 	  streamer_write_hwi (ob, info->self_size);
4490 	  streamer_write_hwi (ob, info->self_time);
4491 	  bp = bitpack_create (ob->main_stream);
4492 	  bp_pack_value (&bp, info->inlinable, 1);
4493 	  bp_pack_value (&bp, info->contains_cilk_spawn, 1);
4494 	  bp_pack_value (&bp, info->fp_expressions, 1);
4495 	  streamer_write_bitpack (&bp);
4496 	  streamer_write_uhwi (ob, vec_safe_length (info->conds));
4497 	  for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4498 	    {
4499 	      streamer_write_uhwi (ob, c->operand_num);
4500 	      streamer_write_uhwi (ob, c->size);
4501 	      streamer_write_uhwi (ob, c->code);
4502 	      stream_write_tree (ob, c->val, true);
4503 	      bp = bitpack_create (ob->main_stream);
4504 	      bp_pack_value (&bp, c->agg_contents, 1);
4505 	      bp_pack_value (&bp, c->by_ref, 1);
4506 	      streamer_write_bitpack (&bp);
4507 	      if (c->agg_contents)
4508 		streamer_write_uhwi (ob, c->offset);
4509 	    }
4510 	  streamer_write_uhwi (ob, vec_safe_length (info->entry));
4511 	  for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4512 	    {
4513 	      streamer_write_uhwi (ob, e->size);
4514 	      streamer_write_uhwi (ob, e->time);
4515 	      write_predicate (ob, &e->predicate);
4516 	    }
4517 	  write_predicate (ob, info->loop_iterations);
4518 	  write_predicate (ob, info->loop_stride);
4519 	  write_predicate (ob, info->array_index);
4520 	  for (edge = cnode->callees; edge; edge = edge->next_callee)
4521 	    write_inline_edge_summary (ob, edge);
4522 	  for (edge = cnode->indirect_calls; edge; edge = edge->next_callee)
4523 	    write_inline_edge_summary (ob, edge);
4524 	}
4525     }
4526   streamer_write_char_stream (ob->main_stream, 0);
4527   produce_asm (ob, NULL);
4528   destroy_output_block (ob);
4529 
4530   if (optimize && !flag_ipa_cp)
4531     ipa_prop_write_jump_functions ();
4532 }
4533 
4534 
4535 /* Release inline summary.  */
4536 
4537 void
4538 inline_free_summary (void)
4539 {
4540   struct cgraph_node *node;
4541   if (edge_removal_hook_holder)
4542     symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4543   edge_removal_hook_holder = NULL;
4544   if (edge_duplication_hook_holder)
4545     symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4546   edge_duplication_hook_holder = NULL;
4547   if (!inline_edge_summary_vec.exists ())
4548     return;
4549   FOR_EACH_DEFINED_FUNCTION (node)
4550     if (!node->alias)
4551       reset_inline_summary (node, inline_summaries->get (node));
4552   inline_summaries->release ();
4553   inline_summaries = NULL;
4554   inline_edge_summary_vec.release ();
4555   edge_predicate_pool.release ();
4556 }
4557