1*38fd1498Szrj /* Analysis used by inlining decision heuristics.
2*38fd1498Szrj Copyright (C) 2003-2018 Free Software Foundation, Inc.
3*38fd1498Szrj Contributed by Jan Hubicka
4*38fd1498Szrj
5*38fd1498Szrj This file is part of GCC.
6*38fd1498Szrj
7*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
8*38fd1498Szrj the terms of the GNU General Public License as published by the Free
9*38fd1498Szrj Software Foundation; either version 3, or (at your option) any later
10*38fd1498Szrj version.
11*38fd1498Szrj
12*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
14*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15*38fd1498Szrj for more details.
16*38fd1498Szrj
17*38fd1498Szrj You should have received a copy of the GNU General Public License
18*38fd1498Szrj along with GCC; see the file COPYING3. If not see
19*38fd1498Szrj <http://www.gnu.org/licenses/>. */
20*38fd1498Szrj
21*38fd1498Szrj #include "config.h"
22*38fd1498Szrj #include "system.h"
23*38fd1498Szrj #include "coretypes.h"
24*38fd1498Szrj #include "backend.h"
25*38fd1498Szrj #include "tree.h"
26*38fd1498Szrj #include "gimple.h"
27*38fd1498Szrj #include "alloc-pool.h"
28*38fd1498Szrj #include "tree-pass.h"
29*38fd1498Szrj #include "ssa.h"
30*38fd1498Szrj #include "tree-streamer.h"
31*38fd1498Szrj #include "cgraph.h"
32*38fd1498Szrj #include "diagnostic.h"
33*38fd1498Szrj #include "fold-const.h"
34*38fd1498Szrj #include "print-tree.h"
35*38fd1498Szrj #include "tree-inline.h"
36*38fd1498Szrj #include "gimple-pretty-print.h"
37*38fd1498Szrj #include "params.h"
38*38fd1498Szrj #include "cfganal.h"
39*38fd1498Szrj #include "gimple-iterator.h"
40*38fd1498Szrj #include "tree-cfg.h"
41*38fd1498Szrj #include "tree-ssa-loop-niter.h"
42*38fd1498Szrj #include "tree-ssa-loop.h"
43*38fd1498Szrj #include "symbol-summary.h"
44*38fd1498Szrj #include "ipa-prop.h"
45*38fd1498Szrj #include "ipa-fnsummary.h"
46*38fd1498Szrj #include "ipa-inline.h"
47*38fd1498Szrj #include "cfgloop.h"
48*38fd1498Szrj #include "tree-scalar-evolution.h"
49*38fd1498Szrj #include "ipa-utils.h"
50*38fd1498Szrj #include "cfgexpand.h"
51*38fd1498Szrj #include "gimplify.h"
52*38fd1498Szrj
53*38fd1498Szrj /* Cached node/edge growths. */
54*38fd1498Szrj vec<edge_growth_cache_entry> edge_growth_cache;
55*38fd1498Szrj static struct cgraph_edge_hook_list *edge_removal_hook_holder;
56*38fd1498Szrj
57*38fd1498Szrj
58*38fd1498Szrj /* Give initial reasons why inlining would fail on EDGE. This gets either
59*38fd1498Szrj nullified or usually overwritten by more precise reasons later. */
60*38fd1498Szrj
61*38fd1498Szrj void
initialize_inline_failed(struct cgraph_edge * e)62*38fd1498Szrj initialize_inline_failed (struct cgraph_edge *e)
63*38fd1498Szrj {
64*38fd1498Szrj struct cgraph_node *callee = e->callee;
65*38fd1498Szrj
66*38fd1498Szrj if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
67*38fd1498Szrj && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
68*38fd1498Szrj ;
69*38fd1498Szrj else if (e->indirect_unknown_callee)
70*38fd1498Szrj e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
71*38fd1498Szrj else if (!callee->definition)
72*38fd1498Szrj e->inline_failed = CIF_BODY_NOT_AVAILABLE;
73*38fd1498Szrj else if (callee->local.redefined_extern_inline)
74*38fd1498Szrj e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
75*38fd1498Szrj else
76*38fd1498Szrj e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
77*38fd1498Szrj gcc_checking_assert (!e->call_stmt_cannot_inline_p
78*38fd1498Szrj || cgraph_inline_failed_type (e->inline_failed)
79*38fd1498Szrj == CIF_FINAL_ERROR);
80*38fd1498Szrj }
81*38fd1498Szrj
82*38fd1498Szrj
83*38fd1498Szrj /* Keep edge cache consistent across edge removal. */
84*38fd1498Szrj
85*38fd1498Szrj static void
inline_edge_removal_hook(struct cgraph_edge * edge,void * data ATTRIBUTE_UNUSED)86*38fd1498Szrj inline_edge_removal_hook (struct cgraph_edge *edge,
87*38fd1498Szrj void *data ATTRIBUTE_UNUSED)
88*38fd1498Szrj {
89*38fd1498Szrj reset_edge_growth_cache (edge);
90*38fd1498Szrj }
91*38fd1498Szrj
92*38fd1498Szrj
93*38fd1498Szrj /* Initialize growth caches. */
94*38fd1498Szrj
95*38fd1498Szrj void
initialize_growth_caches(void)96*38fd1498Szrj initialize_growth_caches (void)
97*38fd1498Szrj {
98*38fd1498Szrj if (!edge_removal_hook_holder)
99*38fd1498Szrj edge_removal_hook_holder =
100*38fd1498Szrj symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
101*38fd1498Szrj if (symtab->edges_max_uid)
102*38fd1498Szrj edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
103*38fd1498Szrj }
104*38fd1498Szrj
105*38fd1498Szrj
106*38fd1498Szrj /* Free growth caches. */
107*38fd1498Szrj
108*38fd1498Szrj void
free_growth_caches(void)109*38fd1498Szrj free_growth_caches (void)
110*38fd1498Szrj {
111*38fd1498Szrj if (edge_removal_hook_holder)
112*38fd1498Szrj {
113*38fd1498Szrj symtab->remove_edge_removal_hook (edge_removal_hook_holder);
114*38fd1498Szrj edge_removal_hook_holder = NULL;
115*38fd1498Szrj }
116*38fd1498Szrj edge_growth_cache.release ();
117*38fd1498Szrj }
118*38fd1498Szrj
119*38fd1498Szrj /* Return hints derrived from EDGE. */
120*38fd1498Szrj
121*38fd1498Szrj int
simple_edge_hints(struct cgraph_edge * edge)122*38fd1498Szrj simple_edge_hints (struct cgraph_edge *edge)
123*38fd1498Szrj {
124*38fd1498Szrj int hints = 0;
125*38fd1498Szrj struct cgraph_node *to = (edge->caller->global.inlined_to
126*38fd1498Szrj ? edge->caller->global.inlined_to : edge->caller);
127*38fd1498Szrj struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
128*38fd1498Szrj if (ipa_fn_summaries->get (to)->scc_no
129*38fd1498Szrj && ipa_fn_summaries->get (to)->scc_no
130*38fd1498Szrj == ipa_fn_summaries->get (callee)->scc_no
131*38fd1498Szrj && !edge->recursive_p ())
132*38fd1498Szrj hints |= INLINE_HINT_same_scc;
133*38fd1498Szrj
134*38fd1498Szrj if (callee->lto_file_data && edge->caller->lto_file_data
135*38fd1498Szrj && edge->caller->lto_file_data != callee->lto_file_data
136*38fd1498Szrj && !callee->merged_comdat && !callee->icf_merged)
137*38fd1498Szrj hints |= INLINE_HINT_cross_module;
138*38fd1498Szrj
139*38fd1498Szrj return hints;
140*38fd1498Szrj }
141*38fd1498Szrj
142*38fd1498Szrj /* Estimate the time cost for the caller when inlining EDGE.
143*38fd1498Szrj Only to be called via estimate_edge_time, that handles the
144*38fd1498Szrj caching mechanism.
145*38fd1498Szrj
146*38fd1498Szrj When caching, also update the cache entry. Compute both time and
147*38fd1498Szrj size, since we always need both metrics eventually. */
148*38fd1498Szrj
149*38fd1498Szrj sreal
do_estimate_edge_time(struct cgraph_edge * edge)150*38fd1498Szrj do_estimate_edge_time (struct cgraph_edge *edge)
151*38fd1498Szrj {
152*38fd1498Szrj sreal time, nonspec_time;
153*38fd1498Szrj int size;
154*38fd1498Szrj ipa_hints hints;
155*38fd1498Szrj struct cgraph_node *callee;
156*38fd1498Szrj clause_t clause, nonspec_clause;
157*38fd1498Szrj vec<tree> known_vals;
158*38fd1498Szrj vec<ipa_polymorphic_call_context> known_contexts;
159*38fd1498Szrj vec<ipa_agg_jump_function_p> known_aggs;
160*38fd1498Szrj struct ipa_call_summary *es = ipa_call_summaries->get (edge);
161*38fd1498Szrj int min_size;
162*38fd1498Szrj
163*38fd1498Szrj callee = edge->callee->ultimate_alias_target ();
164*38fd1498Szrj
165*38fd1498Szrj gcc_checking_assert (edge->inline_failed);
166*38fd1498Szrj evaluate_properties_for_edge (edge, true,
167*38fd1498Szrj &clause, &nonspec_clause, &known_vals,
168*38fd1498Szrj &known_contexts, &known_aggs);
169*38fd1498Szrj estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
170*38fd1498Szrj known_contexts, known_aggs, &size, &min_size,
171*38fd1498Szrj &time, &nonspec_time, &hints, es->param);
172*38fd1498Szrj
173*38fd1498Szrj /* When we have profile feedback, we can quite safely identify hot
174*38fd1498Szrj edges and for those we disable size limits. Don't do that when
175*38fd1498Szrj probability that caller will call the callee is low however, since it
176*38fd1498Szrj may hurt optimization of the caller's hot path. */
177*38fd1498Szrj if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
178*38fd1498Szrj && (edge->count.ipa ().apply_scale (2, 1)
179*38fd1498Szrj > (edge->caller->global.inlined_to
180*38fd1498Szrj ? edge->caller->global.inlined_to->count.ipa ()
181*38fd1498Szrj : edge->caller->count.ipa ())))
182*38fd1498Szrj hints |= INLINE_HINT_known_hot;
183*38fd1498Szrj
184*38fd1498Szrj known_vals.release ();
185*38fd1498Szrj known_contexts.release ();
186*38fd1498Szrj known_aggs.release ();
187*38fd1498Szrj gcc_checking_assert (size >= 0);
188*38fd1498Szrj gcc_checking_assert (time >= 0);
189*38fd1498Szrj
190*38fd1498Szrj /* When caching, update the cache entry. */
191*38fd1498Szrj if (edge_growth_cache.exists ())
192*38fd1498Szrj {
193*38fd1498Szrj ipa_fn_summaries->get (edge->callee)->min_size = min_size;
194*38fd1498Szrj if ((int) edge_growth_cache.length () <= edge->uid)
195*38fd1498Szrj edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
196*38fd1498Szrj edge_growth_cache[edge->uid].time = time;
197*38fd1498Szrj edge_growth_cache[edge->uid].nonspec_time = nonspec_time;
198*38fd1498Szrj
199*38fd1498Szrj edge_growth_cache[edge->uid].size = size + (size >= 0);
200*38fd1498Szrj hints |= simple_edge_hints (edge);
201*38fd1498Szrj edge_growth_cache[edge->uid].hints = hints + 1;
202*38fd1498Szrj }
203*38fd1498Szrj return time;
204*38fd1498Szrj }
205*38fd1498Szrj
206*38fd1498Szrj
207*38fd1498Szrj /* Return estimated callee growth after inlining EDGE.
208*38fd1498Szrj Only to be called via estimate_edge_size. */
209*38fd1498Szrj
210*38fd1498Szrj int
do_estimate_edge_size(struct cgraph_edge * edge)211*38fd1498Szrj do_estimate_edge_size (struct cgraph_edge *edge)
212*38fd1498Szrj {
213*38fd1498Szrj int size;
214*38fd1498Szrj struct cgraph_node *callee;
215*38fd1498Szrj clause_t clause, nonspec_clause;
216*38fd1498Szrj vec<tree> known_vals;
217*38fd1498Szrj vec<ipa_polymorphic_call_context> known_contexts;
218*38fd1498Szrj vec<ipa_agg_jump_function_p> known_aggs;
219*38fd1498Szrj
220*38fd1498Szrj /* When we do caching, use do_estimate_edge_time to populate the entry. */
221*38fd1498Szrj
222*38fd1498Szrj if (edge_growth_cache.exists ())
223*38fd1498Szrj {
224*38fd1498Szrj do_estimate_edge_time (edge);
225*38fd1498Szrj size = edge_growth_cache[edge->uid].size;
226*38fd1498Szrj gcc_checking_assert (size);
227*38fd1498Szrj return size - (size > 0);
228*38fd1498Szrj }
229*38fd1498Szrj
230*38fd1498Szrj callee = edge->callee->ultimate_alias_target ();
231*38fd1498Szrj
232*38fd1498Szrj /* Early inliner runs without caching, go ahead and do the dirty work. */
233*38fd1498Szrj gcc_checking_assert (edge->inline_failed);
234*38fd1498Szrj evaluate_properties_for_edge (edge, true,
235*38fd1498Szrj &clause, &nonspec_clause,
236*38fd1498Szrj &known_vals, &known_contexts,
237*38fd1498Szrj &known_aggs);
238*38fd1498Szrj estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
239*38fd1498Szrj known_contexts, known_aggs, &size, NULL, NULL,
240*38fd1498Szrj NULL, NULL, vNULL);
241*38fd1498Szrj known_vals.release ();
242*38fd1498Szrj known_contexts.release ();
243*38fd1498Szrj known_aggs.release ();
244*38fd1498Szrj return size;
245*38fd1498Szrj }
246*38fd1498Szrj
247*38fd1498Szrj
248*38fd1498Szrj /* Estimate the growth of the caller when inlining EDGE.
249*38fd1498Szrj Only to be called via estimate_edge_size. */
250*38fd1498Szrj
251*38fd1498Szrj ipa_hints
do_estimate_edge_hints(struct cgraph_edge * edge)252*38fd1498Szrj do_estimate_edge_hints (struct cgraph_edge *edge)
253*38fd1498Szrj {
254*38fd1498Szrj ipa_hints hints;
255*38fd1498Szrj struct cgraph_node *callee;
256*38fd1498Szrj clause_t clause, nonspec_clause;
257*38fd1498Szrj vec<tree> known_vals;
258*38fd1498Szrj vec<ipa_polymorphic_call_context> known_contexts;
259*38fd1498Szrj vec<ipa_agg_jump_function_p> known_aggs;
260*38fd1498Szrj
261*38fd1498Szrj /* When we do caching, use do_estimate_edge_time to populate the entry. */
262*38fd1498Szrj
263*38fd1498Szrj if (edge_growth_cache.exists ())
264*38fd1498Szrj {
265*38fd1498Szrj do_estimate_edge_time (edge);
266*38fd1498Szrj hints = edge_growth_cache[edge->uid].hints;
267*38fd1498Szrj gcc_checking_assert (hints);
268*38fd1498Szrj return hints - 1;
269*38fd1498Szrj }
270*38fd1498Szrj
271*38fd1498Szrj callee = edge->callee->ultimate_alias_target ();
272*38fd1498Szrj
273*38fd1498Szrj /* Early inliner runs without caching, go ahead and do the dirty work. */
274*38fd1498Szrj gcc_checking_assert (edge->inline_failed);
275*38fd1498Szrj evaluate_properties_for_edge (edge, true,
276*38fd1498Szrj &clause, &nonspec_clause,
277*38fd1498Szrj &known_vals, &known_contexts,
278*38fd1498Szrj &known_aggs);
279*38fd1498Szrj estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
280*38fd1498Szrj known_contexts, known_aggs, NULL, NULL,
281*38fd1498Szrj NULL, NULL, &hints, vNULL);
282*38fd1498Szrj known_vals.release ();
283*38fd1498Szrj known_contexts.release ();
284*38fd1498Szrj known_aggs.release ();
285*38fd1498Szrj hints |= simple_edge_hints (edge);
286*38fd1498Szrj return hints;
287*38fd1498Szrj }
288*38fd1498Szrj
289*38fd1498Szrj /* Estimate the size of NODE after inlining EDGE which should be an
290*38fd1498Szrj edge to either NODE or a call inlined into NODE. */
291*38fd1498Szrj
292*38fd1498Szrj int
estimate_size_after_inlining(struct cgraph_node * node,struct cgraph_edge * edge)293*38fd1498Szrj estimate_size_after_inlining (struct cgraph_node *node,
294*38fd1498Szrj struct cgraph_edge *edge)
295*38fd1498Szrj {
296*38fd1498Szrj struct ipa_call_summary *es = ipa_call_summaries->get (edge);
297*38fd1498Szrj if (!es->predicate || *es->predicate != false)
298*38fd1498Szrj {
299*38fd1498Szrj int size = ipa_fn_summaries->get (node)->size + estimate_edge_growth (edge);
300*38fd1498Szrj gcc_assert (size >= 0);
301*38fd1498Szrj return size;
302*38fd1498Szrj }
303*38fd1498Szrj return ipa_fn_summaries->get (node)->size;
304*38fd1498Szrj }
305*38fd1498Szrj
306*38fd1498Szrj
307*38fd1498Szrj struct growth_data
308*38fd1498Szrj {
309*38fd1498Szrj struct cgraph_node *node;
310*38fd1498Szrj bool self_recursive;
311*38fd1498Szrj bool uninlinable;
312*38fd1498Szrj int growth;
313*38fd1498Szrj };
314*38fd1498Szrj
315*38fd1498Szrj
316*38fd1498Szrj /* Worker for do_estimate_growth. Collect growth for all callers. */
317*38fd1498Szrj
318*38fd1498Szrj static bool
do_estimate_growth_1(struct cgraph_node * node,void * data)319*38fd1498Szrj do_estimate_growth_1 (struct cgraph_node *node, void *data)
320*38fd1498Szrj {
321*38fd1498Szrj struct cgraph_edge *e;
322*38fd1498Szrj struct growth_data *d = (struct growth_data *) data;
323*38fd1498Szrj
324*38fd1498Szrj for (e = node->callers; e; e = e->next_caller)
325*38fd1498Szrj {
326*38fd1498Szrj gcc_checking_assert (e->inline_failed);
327*38fd1498Szrj
328*38fd1498Szrj if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
329*38fd1498Szrj || !opt_for_fn (e->caller->decl, optimize))
330*38fd1498Szrj {
331*38fd1498Szrj d->uninlinable = true;
332*38fd1498Szrj continue;
333*38fd1498Szrj }
334*38fd1498Szrj
335*38fd1498Szrj if (e->recursive_p ())
336*38fd1498Szrj {
337*38fd1498Szrj d->self_recursive = true;
338*38fd1498Szrj continue;
339*38fd1498Szrj }
340*38fd1498Szrj d->growth += estimate_edge_growth (e);
341*38fd1498Szrj }
342*38fd1498Szrj return false;
343*38fd1498Szrj }
344*38fd1498Szrj
345*38fd1498Szrj
346*38fd1498Szrj /* Estimate the growth caused by inlining NODE into all callees. */
347*38fd1498Szrj
348*38fd1498Szrj int
estimate_growth(struct cgraph_node * node)349*38fd1498Szrj estimate_growth (struct cgraph_node *node)
350*38fd1498Szrj {
351*38fd1498Szrj struct growth_data d = { node, false, false, 0 };
352*38fd1498Szrj struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
353*38fd1498Szrj
354*38fd1498Szrj node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
355*38fd1498Szrj
356*38fd1498Szrj /* For self recursive functions the growth estimation really should be
357*38fd1498Szrj infinity. We don't want to return very large values because the growth
358*38fd1498Szrj plays various roles in badness computation fractions. Be sure to not
359*38fd1498Szrj return zero or negative growths. */
360*38fd1498Szrj if (d.self_recursive)
361*38fd1498Szrj d.growth = d.growth < info->size ? info->size : d.growth;
362*38fd1498Szrj else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
363*38fd1498Szrj ;
364*38fd1498Szrj else
365*38fd1498Szrj {
366*38fd1498Szrj if (node->will_be_removed_from_program_if_no_direct_calls_p ())
367*38fd1498Szrj d.growth -= info->size;
368*38fd1498Szrj /* COMDAT functions are very often not shared across multiple units
369*38fd1498Szrj since they come from various template instantiations.
370*38fd1498Szrj Take this into account. */
371*38fd1498Szrj else if (DECL_COMDAT (node->decl)
372*38fd1498Szrj && node->can_remove_if_no_direct_calls_p ())
373*38fd1498Szrj d.growth -= (info->size
374*38fd1498Szrj * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
375*38fd1498Szrj + 50) / 100;
376*38fd1498Szrj }
377*38fd1498Szrj
378*38fd1498Szrj return d.growth;
379*38fd1498Szrj }
380*38fd1498Szrj
381*38fd1498Szrj /* Verify if there are fewer than MAX_CALLERS. */
382*38fd1498Szrj
383*38fd1498Szrj static bool
check_callers(cgraph_node * node,int * max_callers)384*38fd1498Szrj check_callers (cgraph_node *node, int *max_callers)
385*38fd1498Szrj {
386*38fd1498Szrj ipa_ref *ref;
387*38fd1498Szrj
388*38fd1498Szrj if (!node->can_remove_if_no_direct_calls_and_refs_p ())
389*38fd1498Szrj return true;
390*38fd1498Szrj
391*38fd1498Szrj for (cgraph_edge *e = node->callers; e; e = e->next_caller)
392*38fd1498Szrj {
393*38fd1498Szrj (*max_callers)--;
394*38fd1498Szrj if (!*max_callers
395*38fd1498Szrj || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
396*38fd1498Szrj return true;
397*38fd1498Szrj }
398*38fd1498Szrj
399*38fd1498Szrj FOR_EACH_ALIAS (node, ref)
400*38fd1498Szrj if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
401*38fd1498Szrj return true;
402*38fd1498Szrj
403*38fd1498Szrj return false;
404*38fd1498Szrj }
405*38fd1498Szrj
406*38fd1498Szrj
407*38fd1498Szrj /* Make cheap estimation if growth of NODE is likely positive knowing
408*38fd1498Szrj EDGE_GROWTH of one particular edge.
409*38fd1498Szrj We assume that most of other edges will have similar growth
410*38fd1498Szrj and skip computation if there are too many callers. */
411*38fd1498Szrj
412*38fd1498Szrj bool
growth_likely_positive(struct cgraph_node * node,int edge_growth)413*38fd1498Szrj growth_likely_positive (struct cgraph_node *node,
414*38fd1498Szrj int edge_growth)
415*38fd1498Szrj {
416*38fd1498Szrj int max_callers;
417*38fd1498Szrj struct cgraph_edge *e;
418*38fd1498Szrj gcc_checking_assert (edge_growth > 0);
419*38fd1498Szrj
420*38fd1498Szrj /* First quickly check if NODE is removable at all. */
421*38fd1498Szrj if (DECL_EXTERNAL (node->decl))
422*38fd1498Szrj return true;
423*38fd1498Szrj if (!node->can_remove_if_no_direct_calls_and_refs_p ()
424*38fd1498Szrj || node->address_taken)
425*38fd1498Szrj return true;
426*38fd1498Szrj
427*38fd1498Szrj max_callers = ipa_fn_summaries->get (node)->size * 4 / edge_growth + 2;
428*38fd1498Szrj
429*38fd1498Szrj for (e = node->callers; e; e = e->next_caller)
430*38fd1498Szrj {
431*38fd1498Szrj max_callers--;
432*38fd1498Szrj if (!max_callers
433*38fd1498Szrj || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
434*38fd1498Szrj return true;
435*38fd1498Szrj }
436*38fd1498Szrj
437*38fd1498Szrj ipa_ref *ref;
438*38fd1498Szrj FOR_EACH_ALIAS (node, ref)
439*38fd1498Szrj if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
440*38fd1498Szrj return true;
441*38fd1498Szrj
442*38fd1498Szrj /* Unlike for functions called once, we play unsafe with
443*38fd1498Szrj COMDATs. We can allow that since we know functions
444*38fd1498Szrj in consideration are small (and thus risk is small) and
445*38fd1498Szrj moreover grow estimates already accounts that COMDAT
446*38fd1498Szrj functions may or may not disappear when eliminated from
447*38fd1498Szrj current unit. With good probability making aggressive
448*38fd1498Szrj choice in all units is going to make overall program
449*38fd1498Szrj smaller. */
450*38fd1498Szrj if (DECL_COMDAT (node->decl))
451*38fd1498Szrj {
452*38fd1498Szrj if (!node->can_remove_if_no_direct_calls_p ())
453*38fd1498Szrj return true;
454*38fd1498Szrj }
455*38fd1498Szrj else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
456*38fd1498Szrj return true;
457*38fd1498Szrj
458*38fd1498Szrj return estimate_growth (node) > 0;
459*38fd1498Szrj }
460