1 // Implementation of basic-block-related functions for RTL SSA -*- C++ -*-
2 // Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 //
4 // This file is part of GCC.
5 //
6 // GCC is free software; you can redistribute it and/or modify it under
7 // the terms of the GNU General Public License as published by the Free
8 // Software Foundation; either version 3, or (at your option) any later
9 // version.
10 //
11 // GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 // for more details.
15 //
16 // You should have received a copy of the GNU General Public License
17 // along with GCC; see the file COPYING3. If not see
18 // <http://www.gnu.org/licenses/>.
19
20 #define INCLUDE_ALGORITHM
21 #define INCLUDE_FUNCTIONAL
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "rtl.h"
27 #include "df.h"
28 #include "rtl-ssa.h"
29 #include "rtl-ssa/internals.h"
30 #include "rtl-ssa/internals.inl"
31 #include "cfganal.h"
32 #include "cfgrtl.h"
33 #include "predict.h"
34 #include "domwalk.h"
35
36 using namespace rtl_ssa;
37
38 // Prepare to build information for a function in which all register numbers
39 // are less than NUM_REGS and all basic block indices are less than
40 // NUM_BB_INDICES
build_info(unsigned int num_regs,unsigned int num_bb_indices)41 function_info::build_info::build_info (unsigned int num_regs,
42 unsigned int num_bb_indices)
43 : current_bb (nullptr),
44 current_ebb (nullptr),
45 last_access (num_regs + 1),
46 ebb_live_in_for_debug (nullptr),
47 potential_phi_regs (num_regs),
48 bb_phis (num_bb_indices),
49 bb_mem_live_out (num_bb_indices),
50 bb_to_rpo (num_bb_indices)
51 {
52 last_access.safe_grow_cleared (num_regs + 1);
53
54 bitmap_clear (potential_phi_regs);
55
56 // These arrays shouldn't need to be initialized, since we'll always
57 // write to an entry before reading from it. But poison the contents
58 // when checking, just to make sure we don't accidentally use an
59 // uninitialized value.
60 bb_phis.quick_grow (num_bb_indices);
61 bb_mem_live_out.quick_grow (num_bb_indices);
62 bb_to_rpo.quick_grow (num_bb_indices);
63 if (flag_checking)
64 {
65 // Can't do this for bb_phis because it has a constructor.
66 memset (bb_mem_live_out.address (), 0xaf,
67 num_bb_indices * sizeof (bb_mem_live_out[0]));
68 memset (bb_to_rpo.address (), 0xaf,
69 num_bb_indices * sizeof (bb_to_rpo[0]));
70 }
71
72 // Start off with an empty set of phi nodes for each block.
73 for (bb_phi_info &info : bb_phis)
74 bitmap_initialize (&info.regs, &bitmap_default_obstack);
75 }
76
~build_info()77 function_info::build_info::~build_info ()
78 {
79 for (bb_phi_info &info : bb_phis)
80 bitmap_release (&info.regs);
81 }
82
83 // A dom_walker for populating the basic blocks.
84 class function_info::bb_walker : public dom_walker
85 {
86 public:
87 bb_walker (function_info *, build_info &);
88 virtual edge before_dom_children (basic_block);
89 virtual void after_dom_children (basic_block);
90
91 private:
92 // Information about the function we're building.
93 function_info *m_function;
94 build_info &m_bi;
95
96 // We should treat the exit block as being the last child of this one.
97 // See the comment in the constructor for more information.
98 basic_block m_exit_block_dominator;
99 };
100
101 // Prepare to walk the blocks in FUNCTION using BI.
bb_walker(function_info * function,build_info & bi)102 function_info::bb_walker::bb_walker (function_info *function, build_info &bi)
103 : dom_walker (CDI_DOMINATORS, ALL_BLOCKS, bi.bb_to_rpo.address ()),
104 m_function (function),
105 m_bi (bi),
106 m_exit_block_dominator (nullptr)
107 {
108 // ??? There is no dominance information associated with the exit block,
109 // so work out its immediate dominator using predecessor blocks. We then
110 // walk the exit block just before popping its immediate dominator.
111 edge e;
112 edge_iterator ei;
113 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (m_function->m_fn)->preds)
114 if (m_exit_block_dominator)
115 m_exit_block_dominator
116 = nearest_common_dominator (CDI_DOMINATORS,
117 m_exit_block_dominator, e->src);
118 else
119 m_exit_block_dominator = e->src;
120
121 // If the exit block is unreachable, process it last.
122 if (!m_exit_block_dominator)
123 m_exit_block_dominator = ENTRY_BLOCK_PTR_FOR_FN (m_function->m_fn);
124 }
125
126 edge
before_dom_children(basic_block bb)127 function_info::bb_walker::before_dom_children (basic_block bb)
128 {
129 m_function->start_block (m_bi, m_function->bb (bb));
130 return nullptr;
131 }
132
133 void
after_dom_children(basic_block bb)134 function_info::bb_walker::after_dom_children (basic_block bb)
135 {
136 // See the comment in the constructor for details.
137 if (bb == m_exit_block_dominator)
138 {
139 before_dom_children (EXIT_BLOCK_PTR_FOR_FN (m_function->m_fn));
140 after_dom_children (EXIT_BLOCK_PTR_FOR_FN (m_function->m_fn));
141 }
142 m_function->end_block (m_bi, m_function->bb (bb));
143 }
144
145 // See the comment above the declaration.
146 void
print_identifier(pretty_printer * pp) const147 bb_info::print_identifier (pretty_printer *pp) const
148 {
149 char tmp[3 * sizeof (index ()) + 3];
150 snprintf (tmp, sizeof (tmp), "bb%d", index ());
151 pp_string (pp, tmp);
152 if (ebb_info *ebb = this->ebb ())
153 {
154 pp_space (pp);
155 pp_left_bracket (pp);
156 ebb->print_identifier (pp);
157 pp_right_bracket (pp);
158 }
159 }
160
161 // See the comment above the declaration.
162 void
print_full(pretty_printer * pp) const163 bb_info::print_full (pretty_printer *pp) const
164 {
165 pp_string (pp, "basic block ");
166 print_identifier (pp);
167 pp_colon (pp);
168
169 auto print_insn = [pp](const char *header, const insn_info *insn)
170 {
171 pp_newline_and_indent (pp, 2);
172 pp_string (pp, header);
173 pp_newline_and_indent (pp, 2);
174 if (insn)
175 pp_insn (pp, insn);
176 else
177 pp_string (pp, "<uninitialized>");
178 pp_indentation (pp) -= 4;
179 };
180
181 print_insn ("head:", head_insn ());
182
183 pp_newline (pp);
184 pp_newline_and_indent (pp, 2);
185 pp_string (pp, "contents:");
186 if (!head_insn ())
187 {
188 pp_newline_and_indent (pp, 2);
189 pp_string (pp, "<uninitialized>");
190 pp_indentation (pp) -= 2;
191 }
192 else if (auto insns = real_insns ())
193 {
194 bool is_first = true;
195 for (const insn_info *insn : insns)
196 {
197 if (is_first)
198 is_first = false;
199 else
200 pp_newline (pp);
201 pp_newline_and_indent (pp, 2);
202 pp_insn (pp, insn);
203 pp_indentation (pp) -= 2;
204 }
205 }
206 else
207 {
208 pp_newline_and_indent (pp, 2);
209 pp_string (pp, "none");
210 pp_indentation (pp) -= 2;
211 }
212 pp_indentation (pp) -= 2;
213
214 pp_newline (pp);
215 print_insn ("end:", end_insn ());
216 }
217
218 // See the comment above the declaration.
219 void
print_summary(pretty_printer * pp) const220 ebb_call_clobbers_info::print_summary (pretty_printer *pp) const
221 {
222 pp_string (pp, "call clobbers for ABI ");
223 if (m_abi)
224 pp_decimal_int (pp, m_abi->id ());
225 else
226 pp_string (pp, "<null>");
227 }
228
229 // See the comment above the declaration.
230 void
print_full(pretty_printer * pp) const231 ebb_call_clobbers_info::print_full (pretty_printer *pp) const
232 {
233 print_summary (pp);
234 pp_colon (pp);
235 pp_newline_and_indent (pp, 2);
236 auto print_node = [](pretty_printer *pp,
237 const insn_call_clobbers_note *note)
238 {
239 if (insn_info *insn = note->insn ())
240 insn->print_identifier_and_location (pp);
241 else
242 pp_string (pp, "<null>");
243 };
244 print (pp, root (), print_node);
245 pp_indentation (pp) -= 2;
246 }
247
248 // See the comment above the declaration.
249 void
print_identifier(pretty_printer * pp) const250 ebb_info::print_identifier (pretty_printer *pp) const
251 {
252 // first_bb is populated by the constructor and so should always
253 // be nonnull.
254 auto index = first_bb ()->index ();
255 char tmp[3 * sizeof (index) + 4];
256 snprintf (tmp, sizeof (tmp), "ebb%d", index);
257 pp_string (pp, tmp);
258 }
259
260 // See the comment above the declaration.
261 void
print_full(pretty_printer * pp) const262 ebb_info::print_full (pretty_printer *pp) const
263 {
264 pp_string (pp, "extended basic block ");
265 print_identifier (pp);
266 pp_colon (pp);
267
268 pp_newline_and_indent (pp, 2);
269 if (insn_info *phi_insn = this->phi_insn ())
270 {
271 phi_insn->print_identifier_and_location (pp);
272 pp_colon (pp);
273 if (auto phis = this->phis ())
274 {
275 bool is_first = true;
276 for (const phi_info *phi : phis)
277 {
278 if (is_first)
279 is_first = false;
280 else
281 pp_newline (pp);
282 pp_newline_and_indent (pp, 2);
283 pp_access (pp, phi, PP_ACCESS_SETTER);
284 pp_indentation (pp) -= 2;
285 }
286 }
287 else
288 {
289 pp_newline_and_indent (pp, 2);
290 pp_string (pp, "no phi nodes");
291 pp_indentation (pp) -= 2;
292 }
293 }
294 else
295 pp_string (pp, "no phi insn");
296 pp_indentation (pp) -= 2;
297
298 for (const bb_info *bb : bbs ())
299 {
300 pp_newline (pp);
301 pp_newline_and_indent (pp, 2);
302 pp_bb (pp, bb);
303 pp_indentation (pp) -= 2;
304 }
305
306 for (ebb_call_clobbers_info *ecc : call_clobbers ())
307 {
308 pp_newline (pp);
309 pp_newline_and_indent (pp, 2);
310 pp_ebb_call_clobbers (pp, ecc);
311 pp_indentation (pp) -= 2;
312 }
313 }
314
315 // Add a dummy use to mark that DEF is live out of BB's EBB at the end of BB.
316 void
add_live_out_use(bb_info * bb,set_info * def)317 function_info::add_live_out_use (bb_info *bb, set_info *def)
318 {
319 // There is nothing to do if DEF is an artificial definition at the end
320 // of BB. In that case the definitino is rooted at the end of the block
321 // and we wouldn't gain anything by inserting a use immediately after it.
322 // If we did want to insert a use, we'd need to associate it with a new
323 // instruction that comes after bb->end_insn ().
324 if (def->insn () == bb->end_insn ())
325 return;
326
327 // If the end of the block already has an artificial use, that use
328 // acts to make DEF live at the appropriate point.
329 use_info *use = def->last_nondebug_insn_use ();
330 if (use && use->insn () == bb->end_insn ())
331 return;
332
333 // Currently there is no need to maintain a backward link from the end
334 // instruction to the list of live-out uses. Such a list would be
335 // expensive to update if it was represented using the usual insn_info
336 // access arrays.
337 use = allocate<use_info> (bb->end_insn (), def->resource (), def);
338 use->set_is_live_out_use (true);
339 add_use (use);
340 }
341
342 // Return true if all nondebug uses of DEF are live-out uses.
343 static bool
all_uses_are_live_out_uses(set_info * def)344 all_uses_are_live_out_uses (set_info *def)
345 {
346 for (use_info *use : def->all_uses ())
347 if (!use->is_in_debug_insn () && !use->is_live_out_use ())
348 return false;
349 return true;
350 }
351
352 // SET, if nonnull, is a definition of something that is live out from BB.
353 // Return the live-out value itself.
354 set_info *
live_out_value(bb_info * bb,set_info * set)355 function_info::live_out_value (bb_info *bb, set_info *set)
356 {
357 // Degenerate phis only exist to provide a definition for uses in the
358 // same EBB. The live-out value is the same as the live-in value.
359 if (auto *phi = safe_dyn_cast<phi_info *> (set))
360 if (phi->is_degenerate ())
361 {
362 set = phi->input_value (0);
363
364 // Remove the phi if it turned out to be useless. This is
365 // mainly useful for memory, because we don't know ahead of time
366 // whether a block will use memory or not.
367 if (bb == bb->ebb ()->last_bb () && all_uses_are_live_out_uses (phi))
368 replace_phi (phi, set);
369 }
370
371 return set;
372 }
373
374 // Add PHI to EBB and enter it into the function's hash table.
375 void
append_phi(ebb_info * ebb,phi_info * phi)376 function_info::append_phi (ebb_info *ebb, phi_info *phi)
377 {
378 phi_info *first_phi = ebb->first_phi ();
379 if (first_phi)
380 first_phi->set_prev_phi (phi);
381 phi->set_next_phi (first_phi);
382 ebb->set_first_phi (phi);
383 add_def (phi);
384 }
385
386 // Remove PHI from its current position in the SSA graph.
387 void
remove_phi(phi_info * phi)388 function_info::remove_phi (phi_info *phi)
389 {
390 phi_info *next = phi->next_phi ();
391 phi_info *prev = phi->prev_phi ();
392
393 if (next)
394 next->set_prev_phi (prev);
395
396 if (prev)
397 prev->set_next_phi (next);
398 else
399 phi->ebb ()->set_first_phi (next);
400
401 remove_def (phi);
402 phi->clear_phi_links ();
403 }
404
405 // Remove PHI from the SSA graph and free its memory.
406 void
delete_phi(phi_info * phi)407 function_info::delete_phi (phi_info *phi)
408 {
409 gcc_assert (!phi->has_any_uses ());
410
411 // Remove the inputs to the phi.
412 for (use_info *input : phi->inputs ())
413 remove_use (input);
414
415 remove_phi (phi);
416
417 phi->set_next_phi (m_free_phis);
418 m_free_phis = phi;
419 }
420
421 // If possible, remove PHI and replace all uses with NEW_VALUE.
422 void
replace_phi(phi_info * phi,set_info * new_value)423 function_info::replace_phi (phi_info *phi, set_info *new_value)
424 {
425 auto update_use = [&](use_info *use)
426 {
427 remove_use (use);
428 use->set_def (new_value);
429 add_use (use);
430 };
431
432 if (new_value)
433 for (use_info *use : phi->nondebug_insn_uses ())
434 if (!use->is_live_out_use ())
435 {
436 // We need to keep the phi around for its local uses.
437 // Turn it into a degenerate phi, if it isn't already.
438 use_info *use = phi->input_use (0);
439 if (use->def () != new_value)
440 update_use (use);
441
442 if (phi->is_degenerate ())
443 return;
444
445 phi->make_degenerate (use);
446
447 // Redirect all phi users to NEW_VALUE.
448 while (use_info *phi_use = phi->last_phi_use ())
449 update_use (phi_use);
450
451 return;
452 }
453
454 // Replace the uses. We can discard uses that only existed for the
455 // sake of marking live-out values, since the resource is now transparent
456 // in the phi's EBB.
457 while (use_info *use = phi->last_use ())
458 if (use->is_live_out_use ())
459 remove_use (use);
460 else
461 update_use (use);
462
463 delete_phi (phi);
464 }
465
466 // Create and return a phi node for EBB. RESOURCE is the resource that
467 // the phi node sets (and thus that all the inputs set too). NUM_INPUTS
468 // is the number of inputs, which is 1 for a degenerate phi. INPUTS[I]
469 // is a set_info that gives the value of input I, or null if the value
470 // is either unknown or uninitialized. If NUM_INPUTS > 1, this array
471 // is allocated on the main obstack and can be reused for the use array.
472 //
473 // Add the created phi node to its basic block and enter it into the
474 // function's hash table.
475 phi_info *
create_phi(ebb_info * ebb,resource_info resource,access_info ** inputs,unsigned int num_inputs)476 function_info::create_phi (ebb_info *ebb, resource_info resource,
477 access_info **inputs, unsigned int num_inputs)
478 {
479 phi_info *phi = m_free_phis;
480 if (phi)
481 {
482 m_free_phis = phi->next_phi ();
483 *phi = phi_info (ebb->phi_insn (), resource, phi->uid ());
484 }
485 else
486 {
487 phi = allocate<phi_info> (ebb->phi_insn (), resource, m_next_phi_uid);
488 m_next_phi_uid += 1;
489 }
490
491 // Convert the array of set_infos into an array of use_infos. Also work
492 // out what mode the phi should have.
493 machine_mode new_mode = resource.mode;
494 for (unsigned int i = 0; i < num_inputs; ++i)
495 {
496 auto *input = safe_as_a<set_info *> (inputs[i]);
497 auto *use = allocate<use_info> (phi, resource, input);
498 add_use (use);
499 inputs[i] = use;
500 if (input)
501 new_mode = combine_modes (new_mode, input->mode ());
502 }
503
504 phi->set_inputs (use_array (inputs, num_inputs));
505 phi->set_mode (new_mode);
506
507 append_phi (ebb, phi);
508
509 return phi;
510 }
511
512 // Create and return a degenerate phi for EBB whose input comes from DEF.
513 // This is used in cases where DEF is known to be available on entry to
514 // EBB but was not previously used within it. If DEF is for a register,
515 // there are two cases:
516 //
517 // (1) DEF was already live on entry to EBB but was previously transparent
518 // within it.
519 //
520 // (2) DEF was not previously live on entry to EBB and is being made live
521 // by this update.
522 //
523 // At the moment, this function only handles the case in which EBB has a
524 // single predecessor block and DEF is defined in that block's EBB.
525 phi_info *
create_degenerate_phi(ebb_info * ebb,set_info * def)526 function_info::create_degenerate_phi (ebb_info *ebb, set_info *def)
527 {
528 access_info *input = def;
529 phi_info *phi = create_phi (ebb, def->resource (), &input, 1);
530 if (def->is_reg ())
531 {
532 unsigned int regno = def->regno ();
533
534 // Find the single predecessor mentioned above.
535 basic_block pred_cfg_bb = single_pred (ebb->first_bb ()->cfg_bb ());
536 bb_info *pred_bb = this->bb (pred_cfg_bb);
537
538 if (!bitmap_set_bit (DF_LR_IN (ebb->first_bb ()->cfg_bb ()), regno))
539 {
540 // The register was not previously live on entry to EBB and
541 // might not have been live on exit from PRED_BB either.
542 if (bitmap_set_bit (DF_LR_OUT (pred_cfg_bb), regno))
543 add_live_out_use (pred_bb, def);
544 }
545 else
546 {
547 // The register was previously live in to EBB. Add live-out uses
548 // at the appropriate points.
549 insn_info *next_insn = nullptr;
550 if (def_info *next_def = phi->next_def ())
551 next_insn = next_def->insn ();
552 for (bb_info *bb : ebb->bbs ())
553 {
554 if ((next_insn && *next_insn <= *bb->end_insn ())
555 || !bitmap_bit_p (DF_LR_OUT (bb->cfg_bb ()), regno))
556 break;
557 add_live_out_use (bb, def);
558 }
559 }
560 }
561 return phi;
562 }
563
564 // Create a bb_info for CFG_BB, given that no such structure currently exists.
565 bb_info *
create_bb_info(basic_block cfg_bb)566 function_info::create_bb_info (basic_block cfg_bb)
567 {
568 bb_info *bb = allocate<bb_info> (cfg_bb);
569 gcc_checking_assert (!m_bbs[cfg_bb->index]);
570 m_bbs[cfg_bb->index] = bb;
571 return bb;
572 }
573
574 // Add BB to the end of the list of blocks.
575 void
append_bb(bb_info * bb)576 function_info::append_bb (bb_info *bb)
577 {
578 if (m_last_bb)
579 m_last_bb->set_next_bb (bb);
580 else
581 m_first_bb = bb;
582 bb->set_prev_bb (m_last_bb);
583 m_last_bb = bb;
584 }
585
586 // Calculate BI.potential_phi_regs and BI.potential_phi_regs_for_debug.
587 void
calculate_potential_phi_regs(build_info & bi)588 function_info::calculate_potential_phi_regs (build_info &bi)
589 {
590 auto *lr_info = DF_LR_BB_INFO (ENTRY_BLOCK_PTR_FOR_FN (m_fn));
591 bool is_debug = MAY_HAVE_DEBUG_INSNS;
592 for (unsigned int regno = 0; regno < m_num_regs; ++regno)
593 if (regno >= DF_REG_SIZE (DF)
594 // Exclude registers that have a single definition that dominates
595 // all uses. If the definition does not dominate all uses,
596 // the register will be exposed upwards to the entry block but
597 // will not be defined by the entry block.
598 || DF_REG_DEF_COUNT (regno) > 1
599 || (!bitmap_bit_p (&lr_info->def, regno)
600 && bitmap_bit_p (&lr_info->out, regno)))
601 {
602 bitmap_set_bit (bi.potential_phi_regs, regno);
603 if (is_debug)
604 bitmap_set_bit (bi.potential_phi_regs_for_debug, regno);
605 }
606 }
607
608 // Called while building SSA form using BI. Decide where phi nodes
609 // should be placed for each register and initialize BI.bb_phis accordingly.
610 void
place_phis(build_info & bi)611 function_info::place_phis (build_info &bi)
612 {
613 unsigned int num_bb_indices = last_basic_block_for_fn (m_fn);
614
615 // Calculate dominance frontiers.
616 auto_vec<bitmap_head> frontiers;
617 frontiers.safe_grow (num_bb_indices);
618 for (unsigned int i = 0; i < num_bb_indices; ++i)
619 bitmap_initialize (&frontiers[i], &bitmap_default_obstack);
620 compute_dominance_frontiers (frontiers.address ());
621
622 // In extreme cases, the number of live-in registers can be much
623 // greater than the number of phi nodes needed in a block (see PR98863).
624 // Try to reduce the number of operations involving live-in sets by using
625 // PENDING as a staging area: registers in PENDING need phi nodes if
626 // they are live on entry to the corresponding block, but do not need
627 // phi nodes otherwise.
628 auto_vec<bitmap_head> unfiltered;
629 unfiltered.safe_grow (num_bb_indices);
630 for (unsigned int i = 0; i < num_bb_indices; ++i)
631 bitmap_initialize (&unfiltered[i], &bitmap_default_obstack);
632
633 // If block B1 defines R and if B2 is in the dominance frontier of B1,
634 // queue a possible phi node for R in B2.
635 auto_bitmap worklist;
636 for (unsigned int b1 = 0; b1 < num_bb_indices; ++b1)
637 {
638 // Only access DF information for blocks that are known to exist.
639 if (bitmap_empty_p (&frontiers[b1]))
640 continue;
641
642 // Defs in B1 that are possibly in LR_IN in the dominance frontier
643 // blocks.
644 auto_bitmap b1_def;
645 bitmap_and (b1_def, &DF_LR_BB_INFO (BASIC_BLOCK_FOR_FN (m_fn, b1))->def,
646 DF_LR_OUT (BASIC_BLOCK_FOR_FN (m_fn, b1)));
647
648 bitmap_iterator bmi;
649 unsigned int b2;
650 EXECUTE_IF_SET_IN_BITMAP (&frontiers[b1], 0, b2, bmi)
651 if (bitmap_ior_into (&unfiltered[b2], b1_def)
652 && !bitmap_empty_p (&frontiers[b2]))
653 // Propagate the (potential) new phi node definitions in B2.
654 bitmap_set_bit (worklist, b2);
655 }
656
657 while (!bitmap_empty_p (worklist))
658 {
659 unsigned int b1 = bitmap_first_set_bit (worklist);
660 bitmap_clear_bit (worklist, b1);
661
662 // Restrict the phi nodes to registers that are live on entry to
663 // the block.
664 bitmap b1_in = DF_LR_IN (BASIC_BLOCK_FOR_FN (m_fn, b1));
665 bitmap b1_phis = &bi.bb_phis[b1].regs;
666 if (!bitmap_ior_and_into (b1_phis, &unfiltered[b1], b1_in))
667 continue;
668
669 // If block B1 has a phi node for R and if B2 is in the dominance
670 // frontier of B1, queue a possible phi node for R in B2.
671 bitmap_iterator bmi;
672 unsigned int b2;
673 EXECUTE_IF_SET_IN_BITMAP (&frontiers[b1], 0, b2, bmi)
674 if (bitmap_ior_into (&unfiltered[b2], b1_phis)
675 && !bitmap_empty_p (&frontiers[b2]))
676 bitmap_set_bit (worklist, b2);
677 }
678
679 basic_block cfg_bb;
680 FOR_ALL_BB_FN (cfg_bb, m_fn)
681 {
682 // Calculate the set of phi nodes for blocks that don't have any
683 // dominance frontiers. We only need to do this once per block.
684 unsigned int i = cfg_bb->index;
685 bb_phi_info &phis = bi.bb_phis[i];
686 if (bitmap_empty_p (&frontiers[i]))
687 bitmap_and (&phis.regs, &unfiltered[i], DF_LR_IN (cfg_bb));
688
689 // Create an array that contains all phi inputs for this block.
690 // See the comment above the member variables for more information.
691 phis.num_phis = bitmap_count_bits (&phis.regs);
692 phis.num_preds = EDGE_COUNT (cfg_bb->preds);
693 unsigned int num_inputs = phis.num_phis * phis.num_preds;
694 if (num_inputs != 0)
695 {
696 phis.inputs = XOBNEWVEC (&m_temp_obstack, set_info *, num_inputs);
697 memset (phis.inputs, 0, num_inputs * sizeof (phis.inputs[0]));
698 }
699 }
700
701 // Free the temporary bitmaps.
702 for (unsigned int i = 0; i < num_bb_indices; ++i)
703 {
704 bitmap_release (&frontiers[i]);
705 bitmap_release (&unfiltered[i]);
706 }
707 }
708
709 // Called while building SSA form using BI, with BI.current_bb being
710 // the entry block.
711 //
712 // Create the entry block instructions and their definitions. The only
713 // useful instruction is the end instruction, which carries definitions
714 // for the values that are live on entry to the function. However, it
715 // seems simpler to create a head instruction too, rather than force all
716 // users of the block information to treat the entry block as a special case.
717 void
add_entry_block_defs(build_info & bi)718 function_info::add_entry_block_defs (build_info &bi)
719 {
720 bb_info *bb = bi.current_bb;
721 basic_block cfg_bb = bi.current_bb->cfg_bb ();
722 auto *lr_info = DF_LR_BB_INFO (cfg_bb);
723
724 bb->set_head_insn (append_artificial_insn (bb));
725 insn_info *insn = append_artificial_insn (bb);
726 bb->set_end_insn (insn);
727
728 start_insn_accesses ();
729
730 // Using LR to derive the liveness information means that we create an
731 // entry block definition for upwards exposed registers. These registers
732 // are sometimes genuinely uninitialized. However, some targets also
733 // create a pseudo PIC base register and only initialize it later.
734 // Handling that case correctly seems more important than optimizing
735 // uninitialized uses.
736 unsigned int regno;
737 bitmap_iterator in_bi;
738 EXECUTE_IF_SET_IN_BITMAP (&lr_info->out, 0, regno, in_bi)
739 {
740 auto *set = allocate<set_info> (insn, full_register (regno));
741 append_def (set);
742 m_temp_defs.safe_push (set);
743 bi.record_reg_def (set);
744 }
745
746 // Create a definition that reflects the state of memory on entry to
747 // the function.
748 auto *set = allocate<set_info> (insn, memory);
749 append_def (set);
750 m_temp_defs.safe_push (set);
751 bi.record_mem_def (set);
752
753 finish_insn_accesses (insn);
754 }
755
756 // Lazily calculate the value of BI.ebb_live_in_for_debug for BI.current_ebb.
757 void
calculate_ebb_live_in_for_debug(build_info & bi)758 function_info::calculate_ebb_live_in_for_debug (build_info &bi)
759 {
760 gcc_checking_assert (bitmap_empty_p (bi.tmp_ebb_live_in_for_debug));
761 bi.ebb_live_in_for_debug = bi.tmp_ebb_live_in_for_debug;
762 bitmap_and (bi.ebb_live_in_for_debug, bi.potential_phi_regs_for_debug,
763 DF_LR_IN (bi.current_ebb->first_bb ()->cfg_bb ()));
764 bitmap_tree_view (bi.ebb_live_in_for_debug);
765 }
766
767 // Called while building SSA form using BI. Create phi nodes for the
768 // current EBB.
769 void
add_phi_nodes(build_info & bi)770 function_info::add_phi_nodes (build_info &bi)
771 {
772 ebb_info *ebb = bi.current_ebb;
773 basic_block cfg_bb = ebb->first_bb ()->cfg_bb ();
774
775 // Create the register phis for this EBB.
776 bb_phi_info &phis = bi.bb_phis[cfg_bb->index];
777 unsigned int num_preds = phis.num_preds;
778 unsigned int regno;
779 bitmap_iterator in_bi;
780 EXECUTE_IF_SET_IN_BITMAP (&phis.regs, 0, regno, in_bi)
781 {
782 gcc_checking_assert (bitmap_bit_p (bi.potential_phi_regs, regno));
783
784 // Create an array of phi inputs, to be filled in later.
785 auto *inputs = XOBNEWVEC (&m_obstack, access_info *, num_preds);
786 memset (inputs, 0, sizeof (access_info *) * num_preds);
787
788 // Later code works out the correct mode of the phi. Use BLKmode
789 // as a placeholder for now.
790 phi_info *phi = create_phi (ebb, { E_BLKmode, regno },
791 inputs, num_preds);
792 bi.record_reg_def (phi);
793 }
794
795 bitmap_copy (bi.ebb_def_regs, &phis.regs);
796
797 // Collect the live-in memory definitions and record whether they're
798 // all the same.
799 m_temp_defs.reserve (num_preds);
800 set_info *mem_value = nullptr;
801 bool mem_phi_is_degenerate = true;
802 edge e;
803 edge_iterator ei;
804 FOR_EACH_EDGE (e, ei, cfg_bb->preds)
805 {
806 bb_info *pred_bb = this->bb (e->src);
807 if (pred_bb && pred_bb->head_insn ())
808 {
809 mem_value = bi.bb_mem_live_out[pred_bb->index ()];
810 m_temp_defs.quick_push (mem_value);
811 if (mem_value != m_temp_defs[0])
812 mem_phi_is_degenerate = false;
813 }
814 else
815 {
816 m_temp_defs.quick_push (nullptr);
817 mem_phi_is_degenerate = false;
818 }
819 }
820
821 // Create a phi for memory, on the assumption that something in the
822 // EBB will need it.
823 if (mem_phi_is_degenerate)
824 {
825 access_info *input[] = { mem_value };
826 mem_value = create_phi (ebb, memory, input, 1);
827 }
828 else
829 {
830 obstack_grow (&m_obstack, m_temp_defs.address (),
831 num_preds * sizeof (access_info *));
832 auto *inputs = static_cast<access_info **> (obstack_finish (&m_obstack));
833 mem_value = create_phi (ebb, memory, inputs, num_preds);
834 }
835 bi.record_mem_def (mem_value);
836 m_temp_defs.truncate (0);
837 }
838
839 // Called while building SSA form using BI.
840 //
841 // If FLAGS is DF_REF_AT_TOP, create the head insn for BI.current_bb
842 // and populate its uses and definitions. If FLAGS is 0, do the same
843 // for the end insn.
844 void
add_artificial_accesses(build_info & bi,df_ref_flags flags)845 function_info::add_artificial_accesses (build_info &bi, df_ref_flags flags)
846 {
847 bb_info *bb = bi.current_bb;
848 basic_block cfg_bb = bb->cfg_bb ();
849 auto *lr_info = DF_LR_BB_INFO (cfg_bb);
850 df_ref ref;
851
852 insn_info *insn;
853 if (flags == DF_REF_AT_TOP)
854 {
855 if (cfg_bb->index == EXIT_BLOCK)
856 insn = append_artificial_insn (bb);
857 else
858 insn = append_artificial_insn (bb, bb_note (cfg_bb));
859 bb->set_head_insn (insn);
860 }
861 else
862 {
863 insn = append_artificial_insn (bb);
864 bb->set_end_insn (insn);
865 }
866
867 start_insn_accesses ();
868
869 FOR_EACH_ARTIFICIAL_USE (ref, cfg_bb->index)
870 if ((DF_REF_FLAGS (ref) & DF_REF_AT_TOP) == flags)
871 {
872 unsigned int regno = DF_REF_REGNO (ref);
873 machine_mode mode = GET_MODE (DF_REF_REAL_REG (ref));
874
875 // A definition must be available.
876 gcc_checking_assert (bitmap_bit_p (&lr_info->in, regno)
877 || (flags != DF_REF_AT_TOP
878 && bitmap_bit_p (&lr_info->def, regno)));
879 m_temp_uses.safe_push (create_reg_use (bi, insn, { mode, regno }));
880 }
881
882 // Track the return value of memory by adding an artificial use of
883 // memory at the end of the exit block.
884 if (flags == 0 && cfg_bb->index == EXIT_BLOCK)
885 {
886 auto *use = allocate<use_info> (insn, memory, bi.current_mem_value ());
887 add_use (use);
888 m_temp_uses.safe_push (use);
889 }
890
891 FOR_EACH_ARTIFICIAL_DEF (ref, cfg_bb->index)
892 if ((DF_REF_FLAGS (ref) & DF_REF_AT_TOP) == flags)
893 {
894 unsigned int regno = DF_REF_REGNO (ref);
895 machine_mode mode = GET_MODE (DF_REF_REAL_REG (ref));
896 resource_info resource { mode, regno };
897
898 // We rely on the def set being correct.
899 gcc_checking_assert (bitmap_bit_p (&lr_info->def, regno));
900
901 // If the value isn't used later in the block and isn't live
902 // on exit, we could instead represent the definition as a
903 // clobber_info. However, that case should be relatively
904 // rare and set_info is any case more compact than clobber_info.
905 set_info *def = allocate<set_info> (insn, resource);
906 append_def (def);
907 m_temp_defs.safe_push (def);
908 bi.record_reg_def (def);
909 }
910
911 // Model the effect of a memory clobber on an incoming edge by adding
912 // a fake definition of memory at the start of the block. We don't need
913 // to add a use of the phi node because memory is implicitly always live.
914 if (flags == DF_REF_AT_TOP && has_abnormal_call_or_eh_pred_edge_p (cfg_bb))
915 {
916 set_info *def = allocate<set_info> (insn, memory);
917 append_def (def);
918 m_temp_defs.safe_push (def);
919 bi.record_mem_def (def);
920 }
921
922 finish_insn_accesses (insn);
923 }
924
925 // Called while building SSA form using BI. Create insn_infos for all
926 // relevant instructions in BI.current_bb.
927 void
add_block_contents(build_info & bi)928 function_info::add_block_contents (build_info &bi)
929 {
930 basic_block cfg_bb = bi.current_bb->cfg_bb ();
931 rtx_insn *insn;
932 FOR_BB_INSNS (cfg_bb, insn)
933 if (INSN_P (insn))
934 add_insn_to_block (bi, insn);
935 }
936
937 // Called while building SSA form using BI. Record live-out register values
938 // in the phi inputs of successor blocks and create live-out uses where
939 // appropriate. Record the live-out memory value in BI.bb_mem_live_out.
940 void
record_block_live_out(build_info & bi)941 function_info::record_block_live_out (build_info &bi)
942 {
943 bb_info *bb = bi.current_bb;
944 ebb_info *ebb = bi.current_ebb;
945 basic_block cfg_bb = bb->cfg_bb ();
946
947 // Record the live-out register values in the phi inputs of
948 // successor blocks.
949 edge e;
950 edge_iterator ei;
951 FOR_EACH_EDGE (e, ei, cfg_bb->succs)
952 {
953 bb_phi_info &phis = bi.bb_phis[e->dest->index];
954 unsigned int input_i = e->dest_idx * phis.num_phis;
955 unsigned int regno;
956 bitmap_iterator out_bi;
957 EXECUTE_IF_SET_IN_BITMAP (&phis.regs, 0, regno, out_bi)
958 {
959 phis.inputs[input_i]
960 = live_out_value (bb, bi.current_reg_value (regno));
961 input_i += 1;
962 }
963 }
964
965 // Add the set of registers that were defined in this BB to the set
966 // of potentially-live registers defined in the EBB.
967 bitmap_ior_into (bi.ebb_def_regs, &DF_LR_BB_INFO (cfg_bb)->def);
968
969 // Iterate through the registers in LIVE_OUT and see whether we need
970 // to add a live-out use for them.
971 auto record_live_out_regs = [&](bitmap live_out)
972 {
973 unsigned int regno;
974 bitmap_iterator out_bi;
975 EXECUTE_IF_AND_IN_BITMAP (bi.ebb_def_regs, live_out, 0, regno, out_bi)
976 {
977 set_info *value = live_out_value (bb, bi.current_reg_value (regno));
978 if (value && value->ebb () == ebb)
979 add_live_out_use (bb, value);
980 }
981 };
982
983 if (bb == ebb->last_bb ())
984 // All live-out registers might need live-out uses.
985 record_live_out_regs (DF_LR_OUT (cfg_bb));
986 else
987 // Registers might need live-out uses if they are live on entry
988 // to a successor block in a different EBB.
989 FOR_EACH_EDGE (e, ei, cfg_bb->succs)
990 {
991 bb_info *dest_bb = this->bb (e->dest);
992 if (dest_bb->ebb () != ebb || dest_bb == ebb->first_bb ())
993 record_live_out_regs (DF_LR_IN (e->dest));
994 }
995
996 // Record the live-out memory value.
997 bi.bb_mem_live_out[cfg_bb->index]
998 = live_out_value (bb, bi.current_mem_value ());
999 }
1000
1001 // Add BB and its contents to the SSA information.
1002 void
start_block(build_info & bi,bb_info * bb)1003 function_info::start_block (build_info &bi, bb_info *bb)
1004 {
1005 ebb_info *ebb = bb->ebb ();
1006
1007 // We (need to) add all blocks from one EBB before moving on to the next.
1008 bi.current_bb = bb;
1009 if (bb == ebb->first_bb ())
1010 bi.current_ebb = ebb;
1011 else
1012 gcc_assert (bi.current_ebb == ebb);
1013
1014 // Record the start of this block's definitions in the definitions stack.
1015 bi.old_def_stack_limit.safe_push (bi.def_stack.length ());
1016
1017 // Add the block itself.
1018 append_bb (bb);
1019
1020 // If the block starts an EBB, create the phi insn. This insn should exist
1021 // for all EBBs, even if they don't (yet) need phis.
1022 if (bb == ebb->first_bb ())
1023 ebb->set_phi_insn (append_artificial_insn (bb));
1024
1025 if (bb->index () == ENTRY_BLOCK)
1026 {
1027 add_entry_block_defs (bi);
1028 record_block_live_out (bi);
1029 return;
1030 }
1031
1032 if (EDGE_COUNT (bb->cfg_bb ()->preds) == 0)
1033 {
1034 // Leave unreachable blocks empty, since there is no useful
1035 // liveness information for them, and anything they do will
1036 // be wasted work. In a cleaned-up cfg, the only unreachable
1037 // block we should see is the exit block of a noreturn function.
1038 bb->set_head_insn (append_artificial_insn (bb));
1039 bb->set_end_insn (append_artificial_insn (bb));
1040 return;
1041 }
1042
1043 // If the block starts an EBB, create the phi nodes.
1044 if (bb == ebb->first_bb ())
1045 add_phi_nodes (bi);
1046
1047 // Process the contents of the block.
1048 add_artificial_accesses (bi, DF_REF_AT_TOP);
1049 if (bb->index () != EXIT_BLOCK)
1050 add_block_contents (bi);
1051 add_artificial_accesses (bi, df_ref_flags ());
1052 record_block_live_out (bi);
1053
1054 // If we needed to calculate a live-in set for debug purposes,
1055 // reset it to null at the end of the EBB. Convert the underlying
1056 // bitmap to an empty list view, ready for the next calculation.
1057 if (bi.ebb_live_in_for_debug && bb == ebb->last_bb ())
1058 {
1059 bitmap_clear (bi.tmp_ebb_live_in_for_debug);
1060 bitmap_list_view (bi.tmp_ebb_live_in_for_debug);
1061 bi.ebb_live_in_for_debug = nullptr;
1062 }
1063 }
1064
1065 // Finish adding BB and the blocks that it dominates to the SSA information.
1066 void
end_block(build_info & bi,bb_info * bb)1067 function_info::end_block (build_info &bi, bb_info *bb)
1068 {
1069 // Restore the register last_access information to the state it was
1070 // in before we started processing BB.
1071 unsigned int old_limit = bi.old_def_stack_limit.pop ();
1072 while (bi.def_stack.length () > old_limit)
1073 {
1074 // We pushed a definition in BB if it was the first dominating
1075 // definition (and so the previous entry was null). In other
1076 // cases we pushed the previous dominating definition.
1077 def_info *def = bi.def_stack.pop ();
1078 unsigned int regno = def->regno ();
1079 if (def->bb () == bb)
1080 def = nullptr;
1081 bi.last_access[regno + 1] = def;
1082 }
1083 }
1084
1085 // Finish setting up the phi nodes for each block, now that we've added
1086 // the contents of all blocks.
1087 void
populate_phi_inputs(build_info & bi)1088 function_info::populate_phi_inputs (build_info &bi)
1089 {
1090 auto_vec<phi_info *, 32> sorted_phis;
1091 for (ebb_info *ebb : ebbs ())
1092 {
1093 if (!ebb->first_phi ())
1094 continue;
1095
1096 // Get a sorted array of EBB's phi nodes.
1097 basic_block cfg_bb = ebb->first_bb ()->cfg_bb ();
1098 bb_phi_info &phis = bi.bb_phis[cfg_bb->index];
1099 sorted_phis.truncate (0);
1100 for (phi_info *phi : ebb->phis ())
1101 sorted_phis.safe_push (phi);
1102 std::sort (sorted_phis.address (),
1103 sorted_phis.address () + sorted_phis.length (),
1104 compare_access_infos);
1105
1106 // Set the inputs of the non-degenerate register phis. All inputs
1107 // for one edge come before all inputs for the next edge.
1108 set_info **inputs = phis.inputs;
1109 unsigned int phi_i = 0;
1110 bitmap_iterator bmi;
1111 unsigned int regno;
1112 EXECUTE_IF_SET_IN_BITMAP (&phis.regs, 0, regno, bmi)
1113 {
1114 // Skip intervening degenerate phis.
1115 while (sorted_phis[phi_i]->regno () < regno)
1116 phi_i += 1;
1117 phi_info *phi = sorted_phis[phi_i];
1118 gcc_assert (phi->regno () == regno);
1119 for (unsigned int input_i = 0; input_i < phis.num_preds; ++input_i)
1120 if (set_info *input = inputs[input_i * phis.num_phis])
1121 {
1122 use_info *use = phi->input_use (input_i);
1123 gcc_assert (!use->def ());
1124 use->set_def (input);
1125 add_use (use);
1126 }
1127 phi_i += 1;
1128 inputs += 1;
1129 }
1130
1131 // Fill in the backedge inputs to any memory phi.
1132 phi_info *mem_phi = sorted_phis.last ();
1133 if (mem_phi->is_mem () && !mem_phi->is_degenerate ())
1134 {
1135 edge e;
1136 edge_iterator ei;
1137 FOR_EACH_EDGE (e, ei, cfg_bb->preds)
1138 {
1139 use_info *use = mem_phi->input_use (e->dest_idx);
1140 if (!use->def ())
1141 {
1142 use->set_def (bi.bb_mem_live_out[e->src->index]);
1143 add_use (use);
1144 }
1145 }
1146 }
1147 }
1148 }
1149
1150 // Return true if it would be better to continue an EBB across NEW_EDGE
1151 // rather than across OLD_EDGE, given that both edges are viable candidates.
1152 // This is not a total ordering.
1153 static bool
better_ebb_edge_p(edge new_edge,edge old_edge)1154 better_ebb_edge_p (edge new_edge, edge old_edge)
1155 {
1156 // Prefer the likeliest edge.
1157 if (new_edge->probability.initialized_p ()
1158 && old_edge->probability.initialized_p ()
1159 && !(old_edge->probability == new_edge->probability))
1160 return old_edge->probability < new_edge->probability;
1161
1162 // If both edges are equally likely, prefer a fallthru edge.
1163 if (new_edge->flags & EDGE_FALLTHRU)
1164 return true;
1165 if (old_edge->flags & EDGE_FALLTHRU)
1166 return false;
1167
1168 // Otherwise just stick with OLD_EDGE.
1169 return false;
1170 }
1171
1172 // Pick and return the next basic block in an EBB that currently ends with BB.
1173 // Return null if the EBB must end with BB.
1174 static basic_block
choose_next_block_in_ebb(basic_block bb)1175 choose_next_block_in_ebb (basic_block bb)
1176 {
1177 // Although there's nothing in principle wrong with having an EBB that
1178 // starts with the entry block and includes later blocks, there's not
1179 // really much point either. Keeping the entry block separate means
1180 // that uses of arguments consistently occur through phi nodes, rather
1181 // than the arguments sometimes appearing to come from an EBB-local
1182 // definition instead.
1183 if (bb->index == ENTRY_BLOCK)
1184 return nullptr;
1185
1186 bool optimize_for_speed_p = optimize_bb_for_speed_p (bb);
1187 edge best_edge = nullptr;
1188 edge e;
1189 edge_iterator ei;
1190 FOR_EACH_EDGE (e, ei, bb->succs)
1191 if (!(e->flags & EDGE_COMPLEX)
1192 && e->dest->index != EXIT_BLOCK
1193 && single_pred_p (e->dest)
1194 && optimize_for_speed_p == optimize_bb_for_speed_p (e->dest)
1195 && (!best_edge || better_ebb_edge_p (e, best_edge)))
1196 best_edge = e;
1197
1198 return best_edge ? best_edge->dest : nullptr;
1199 }
1200
1201 // Partition the function into extended basic blocks. Create the
1202 // associated ebb_infos and bb_infos, but don't add the bb_infos
1203 // to the function list yet.
1204 void
create_ebbs(build_info & bi)1205 function_info::create_ebbs (build_info &bi)
1206 {
1207 // Compute the starting reverse postorder. We tweak this later to try
1208 // to get better EBB assignments.
1209 auto *postorder = new int[n_basic_blocks_for_fn (m_fn)];
1210 unsigned int postorder_num
1211 = pre_and_rev_post_order_compute (nullptr, postorder, true);
1212 gcc_assert (int (postorder_num) <= n_basic_blocks_for_fn (m_fn));
1213
1214 // Iterate over the blocks in reverse postorder. In cases where
1215 // multiple possible orders exist, prefer orders that chain blocks
1216 // together into EBBs. If multiple possible EBBs exist, try to pick
1217 // the ones that are most likely to be profitable.
1218 auto_vec<bb_info *, 16> bbs;
1219 unsigned int next_bb_index = 0;
1220 for (unsigned int i = 0; i < postorder_num; ++i)
1221 if (!m_bbs[postorder[i]])
1222 {
1223 // Choose and create the blocks that should form the next EBB.
1224 basic_block cfg_bb = BASIC_BLOCK_FOR_FN (m_fn, postorder[i]);
1225 do
1226 {
1227 // Record the chosen block order in a new RPO.
1228 bi.bb_to_rpo[cfg_bb->index] = next_bb_index++;
1229 bbs.safe_push (create_bb_info (cfg_bb));
1230 cfg_bb = choose_next_block_in_ebb (cfg_bb);
1231 }
1232 while (cfg_bb);
1233
1234 // Create the EBB itself.
1235 auto *ebb = allocate<ebb_info> (bbs[0], bbs.last ());
1236 for (bb_info *bb : bbs)
1237 bb->set_ebb (ebb);
1238 bbs.truncate (0);
1239 }
1240
1241 delete[] postorder;
1242 }
1243
1244 // Partition the function's blocks into EBBs and build SSA form for all
1245 // EBBs in the function.
1246 void
process_all_blocks()1247 function_info::process_all_blocks ()
1248 {
1249 auto temps = temp_watermark ();
1250 unsigned int num_bb_indices = last_basic_block_for_fn (m_fn);
1251
1252 build_info bi (m_num_regs, num_bb_indices);
1253
1254 calculate_potential_phi_regs (bi);
1255 create_ebbs (bi);
1256 place_phis (bi);
1257 bb_walker (this, bi).walk (ENTRY_BLOCK_PTR_FOR_FN (m_fn));
1258 populate_phi_inputs (bi);
1259
1260 if (flag_checking)
1261 {
1262 // The definition stack should be empty and all register definitions
1263 // should be back in their original undefined state.
1264 gcc_assert (bi.def_stack.is_empty ()
1265 && bi.old_def_stack_limit.is_empty ());
1266 for (unsigned int regno = 0; regno < m_num_regs; ++regno)
1267 gcc_assert (!bi.last_access[regno + 1]);
1268 }
1269 }
1270
1271 // Print a description of CALL_CLOBBERS to PP.
1272 void
pp_ebb_call_clobbers(pretty_printer * pp,const ebb_call_clobbers_info * call_clobbers)1273 rtl_ssa::pp_ebb_call_clobbers (pretty_printer *pp,
1274 const ebb_call_clobbers_info *call_clobbers)
1275 {
1276 if (!call_clobbers)
1277 pp_string (pp, "<null>");
1278 else
1279 call_clobbers->print_full (pp);
1280 }
1281
1282 // Print a description of BB to PP.
1283 void
pp_bb(pretty_printer * pp,const bb_info * bb)1284 rtl_ssa::pp_bb (pretty_printer *pp, const bb_info *bb)
1285 {
1286 if (!bb)
1287 pp_string (pp, "<null>");
1288 else
1289 bb->print_full (pp);
1290 }
1291
1292 // Print a description of EBB to PP
1293 void
pp_ebb(pretty_printer * pp,const ebb_info * ebb)1294 rtl_ssa::pp_ebb (pretty_printer *pp, const ebb_info *ebb)
1295 {
1296 if (!ebb)
1297 pp_string (pp, "<null>");
1298 else
1299 ebb->print_full (pp);
1300 }
1301
1302 // Print a description of CALL_CLOBBERS to FILE.
1303 void
dump(FILE * file,const ebb_call_clobbers_info * call_clobbers)1304 dump (FILE *file, const ebb_call_clobbers_info *call_clobbers)
1305 {
1306 dump_using (file, pp_ebb_call_clobbers, call_clobbers);
1307 }
1308
1309 // Print a description of BB to FILE.
1310 void
dump(FILE * file,const bb_info * bb)1311 dump (FILE *file, const bb_info *bb)
1312 {
1313 dump_using (file, pp_bb, bb);
1314 }
1315
1316 // Print a description of EBB to FILE.
1317 void
dump(FILE * file,const ebb_info * ebb)1318 dump (FILE *file, const ebb_info *ebb)
1319 {
1320 dump_using (file, pp_ebb, ebb);
1321 }
1322
1323 // Debug interfaces to the dump routines above.
debug(const ebb_call_clobbers_info * x)1324 void debug (const ebb_call_clobbers_info *x) { dump (stderr, x); }
debug(const bb_info * x)1325 void debug (const bb_info *x) { dump (stderr, x); }
debug(const ebb_info * x)1326 void debug (const ebb_info *x) { dump (stderr, x); }
1327