1 /* Allocation for dataflow support routines. 2 Copyright (C) 1999-2016 Free Software Foundation, Inc. 3 Originally contributed by Michael P. Hayes 4 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com) 5 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org) 6 and Kenneth Zadeck (zadeck@naturalbridge.com). 7 8 This file is part of GCC. 9 10 GCC is free software; you can redistribute it and/or modify it under 11 the terms of the GNU General Public License as published by the Free 12 Software Foundation; either version 3, or (at your option) any later 13 version. 14 15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 16 WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with GCC; see the file COPYING3. If not see 22 <http://www.gnu.org/licenses/>. */ 23 24 /* 25 OVERVIEW: 26 27 The files in this collection (df*.c,df.h) provide a general framework 28 for solving dataflow problems. The global dataflow is performed using 29 a good implementation of iterative dataflow analysis. 30 31 The file df-problems.c provides problem instance for the most common 32 dataflow problems: reaching defs, upward exposed uses, live variables, 33 uninitialized variables, def-use chains, and use-def chains. However, 34 the interface allows other dataflow problems to be defined as well. 35 36 Dataflow analysis is available in most of the rtl backend (the parts 37 between pass_df_initialize and pass_df_finish). It is quite likely 38 that these boundaries will be expanded in the future. The only 39 requirement is that there be a correct control flow graph. 40 41 There are three variations of the live variable problem that are 42 available whenever dataflow is available. The LR problem finds the 43 areas that can reach a use of a variable, the UR problems finds the 44 areas that can be reached from a definition of a variable. The LIVE 45 problem finds the intersection of these two areas. 46 47 There are several optional problems. These can be enabled when they 48 are needed and disabled when they are not needed. 49 50 Dataflow problems are generally solved in three layers. The bottom 51 layer is called scanning where a data structure is built for each rtl 52 insn that describes the set of defs and uses of that insn. Scanning 53 is generally kept up to date, i.e. as the insns changes, the scanned 54 version of that insn changes also. There are various mechanisms for 55 making this happen and are described in the INCREMENTAL SCANNING 56 section. 57 58 In the middle layer, basic blocks are scanned to produce transfer 59 functions which describe the effects of that block on the global 60 dataflow solution. The transfer functions are only rebuilt if the 61 some instruction within the block has changed. 62 63 The top layer is the dataflow solution itself. The dataflow solution 64 is computed by using an efficient iterative solver and the transfer 65 functions. The dataflow solution must be recomputed whenever the 66 control changes or if one of the transfer function changes. 67 68 69 USAGE: 70 71 Here is an example of using the dataflow routines. 72 73 df_[chain,live,note,rd]_add_problem (flags); 74 75 df_set_blocks (blocks); 76 77 df_analyze (); 78 79 df_dump (stderr); 80 81 df_finish_pass (false); 82 83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an 84 instance to struct df_problem, to the set of problems solved in this 85 instance of df. All calls to add a problem for a given instance of df 86 must occur before the first call to DF_ANALYZE. 87 88 Problems can be dependent on other problems. For instance, solving 89 def-use or use-def chains is dependent on solving reaching 90 definitions. As long as these dependencies are listed in the problem 91 definition, the order of adding the problems is not material. 92 Otherwise, the problems will be solved in the order of calls to 93 df_add_problem. Note that it is not necessary to have a problem. In 94 that case, df will just be used to do the scanning. 95 96 97 98 DF_SET_BLOCKS is an optional call used to define a region of the 99 function on which the analysis will be performed. The normal case is 100 to analyze the entire function and no call to df_set_blocks is made. 101 DF_SET_BLOCKS only effects the blocks that are effected when computing 102 the transfer functions and final solution. The insn level information 103 is always kept up to date. 104 105 When a subset is given, the analysis behaves as if the function only 106 contains those blocks and any edges that occur directly between the 107 blocks in the set. Care should be taken to call df_set_blocks right 108 before the call to analyze in order to eliminate the possibility that 109 optimizations that reorder blocks invalidate the bitvector. 110 111 DF_ANALYZE causes all of the defined problems to be (re)solved. When 112 DF_ANALYZE is completes, the IN and OUT sets for each basic block 113 contain the computer information. The DF_*_BB_INFO macros can be used 114 to access these bitvectors. All deferred rescannings are down before 115 the transfer functions are recomputed. 116 117 DF_DUMP can then be called to dump the information produce to some 118 file. This calls DF_DUMP_START, to print the information that is not 119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM 120 for each block to print the basic specific information. These parts 121 can all be called separately as part of a larger dump function. 122 123 124 DF_FINISH_PASS causes df_remove_problem to be called on all of the 125 optional problems. It also causes any insns whose scanning has been 126 deferred to be rescanned as well as clears all of the changeable flags. 127 Setting the pass manager TODO_df_finish flag causes this function to 128 be run. However, the pass manager will call df_finish_pass AFTER the 129 pass dumping has been done, so if you want to see the results of the 130 optional problems in the pass dumps, use the TODO flag rather than 131 calling the function yourself. 132 133 INCREMENTAL SCANNING 134 135 There are four ways of doing the incremental scanning: 136 137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan, 138 df_bb_delete, df_insn_change_bb have been added to most of 139 the low level service functions that maintain the cfg and change 140 rtl. Calling and of these routines many cause some number of insns 141 to be rescanned. 142 143 For most modern rtl passes, this is certainly the easiest way to 144 manage rescanning the insns. This technique also has the advantage 145 that the scanning information is always correct and can be relied 146 upon even after changes have been made to the instructions. This 147 technique is contra indicated in several cases: 148 149 a) If def-use chains OR use-def chains (but not both) are built, 150 using this is SIMPLY WRONG. The problem is that when a ref is 151 deleted that is the target of an edge, there is not enough 152 information to efficiently find the source of the edge and 153 delete the edge. This leaves a dangling reference that may 154 cause problems. 155 156 b) If def-use chains AND use-def chains are built, this may 157 produce unexpected results. The problem is that the incremental 158 scanning of an insn does not know how to repair the chains that 159 point into an insn when the insn changes. So the incremental 160 scanning just deletes the chains that enter and exit the insn 161 being changed. The dangling reference issue in (a) is not a 162 problem here, but if the pass is depending on the chains being 163 maintained after insns have been modified, this technique will 164 not do the correct thing. 165 166 c) If the pass modifies insns several times, this incremental 167 updating may be expensive. 168 169 d) If the pass modifies all of the insns, as does register 170 allocation, it is simply better to rescan the entire function. 171 172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and 173 df_insn_delete do not immediately change the insn but instead make 174 a note that the insn needs to be rescanned. The next call to 175 df_analyze, df_finish_pass, or df_process_deferred_rescans will 176 cause all of the pending rescans to be processed. 177 178 This is the technique of choice if either 1a, 1b, or 1c are issues 179 in the pass. In the case of 1a or 1b, a call to df_finish_pass 180 (either manually or via TODO_df_finish) should be made before the 181 next call to df_analyze or df_process_deferred_rescans. 182 183 This mode is also used by a few passes that still rely on note_uses, 184 note_stores and rtx iterators instead of using the DF data. This 185 can be said to fall under case 1c. 186 187 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN). 188 (This mode can be cleared by calling df_clear_flags 189 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to 190 be rescanned. 191 192 3) Total rescanning - In this mode the rescanning is disabled. 193 Only when insns are deleted is the df information associated with 194 it also deleted. At the end of the pass, a call must be made to 195 df_insn_rescan_all. This method is used by the register allocator 196 since it generally changes each insn multiple times (once for each ref) 197 and does not need to make use of the updated scanning information. 198 199 4) Do it yourself - In this mechanism, the pass updates the insns 200 itself using the low level df primitives. Currently no pass does 201 this, but it has the advantage that it is quite efficient given 202 that the pass generally has exact knowledge of what it is changing. 203 204 DATA STRUCTURES 205 206 Scanning produces a `struct df_ref' data structure (ref) is allocated 207 for every register reference (def or use) and this records the insn 208 and bb the ref is found within. The refs are linked together in 209 chains of uses and defs for each insn and for each register. Each ref 210 also has a chain field that links all the use refs for a def or all 211 the def refs for a use. This is used to create use-def or def-use 212 chains. 213 214 Different optimizations have different needs. Ultimately, only 215 register allocation and schedulers should be using the bitmaps 216 produced for the live register and uninitialized register problems. 217 The rest of the backend should be upgraded to using and maintaining 218 the linked information such as def use or use def chains. 219 220 221 PHILOSOPHY: 222 223 While incremental bitmaps are not worthwhile to maintain, incremental 224 chains may be perfectly reasonable. The fastest way to build chains 225 from scratch or after significant modifications is to build reaching 226 definitions (RD) and build the chains from this. 227 228 However, general algorithms for maintaining use-def or def-use chains 229 are not practical. The amount of work to recompute the chain any 230 chain after an arbitrary change is large. However, with a modest 231 amount of work it is generally possible to have the application that 232 uses the chains keep them up to date. The high level knowledge of 233 what is really happening is essential to crafting efficient 234 incremental algorithms. 235 236 As for the bit vector problems, there is no interface to give a set of 237 blocks over with to resolve the iteration. In general, restarting a 238 dataflow iteration is difficult and expensive. Again, the best way to 239 keep the dataflow information up to data (if this is really what is 240 needed) it to formulate a problem specific solution. 241 242 There are fine grained calls for creating and deleting references from 243 instructions in df-scan.c. However, these are not currently connected 244 to the engine that resolves the dataflow equations. 245 246 247 DATA STRUCTURES: 248 249 The basic object is a DF_REF (reference) and this may either be a 250 DEF (definition) or a USE of a register. 251 252 These are linked into a variety of lists; namely reg-def, reg-use, 253 insn-def, insn-use, def-use, and use-def lists. For example, the 254 reg-def lists contain all the locations that define a given register 255 while the insn-use lists contain all the locations that use a 256 register. 257 258 Note that the reg-def and reg-use chains are generally short for 259 pseudos and long for the hard registers. 260 261 ACCESSING INSNS: 262 263 1) The df insn information is kept in an array of DF_INSN_INFO objects. 264 The array is indexed by insn uid, and every DF_REF points to the 265 DF_INSN_INFO object of the insn that contains the reference. 266 267 2) Each insn has three sets of refs, which are linked into one of three 268 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS, 269 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list 270 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or 271 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the 272 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros). 273 The latter list are the list of references in REG_EQUAL or REG_EQUIV 274 notes. These macros produce a ref (or NULL), the rest of the list 275 can be obtained by traversal of the NEXT_REF field (accessed by the 276 DF_REF_NEXT_REF macro.) There is no significance to the ordering of 277 the uses or refs in an instruction. 278 279 3) Each insn has a logical uid field (LUID) which is stored in the 280 DF_INSN_INFO object for the insn. The LUID field is accessed by 281 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros. 282 When properly set, the LUID is an integer that numbers each insn in 283 the basic block, in order from the start of the block. 284 The numbers are only correct after a call to df_analyze. They will 285 rot after insns are added deleted or moved round. 286 287 ACCESSING REFS: 288 289 There are 4 ways to obtain access to refs: 290 291 1) References are divided into two categories, REAL and ARTIFICIAL. 292 293 REAL refs are associated with instructions. 294 295 ARTIFICIAL refs are associated with basic blocks. The heads of 296 these lists can be accessed by calling df_get_artificial_defs or 297 df_get_artificial_uses for the particular basic block. 298 299 Artificial defs and uses occur both at the beginning and ends of blocks. 300 301 For blocks that area at the destination of eh edges, the 302 artificial uses and defs occur at the beginning. The defs relate 303 to the registers specified in EH_RETURN_DATA_REGNO and the uses 304 relate to the registers specified in ED_USES. Logically these 305 defs and uses should really occur along the eh edge, but there is 306 no convenient way to do this. Artificial edges that occur at the 307 beginning of the block have the DF_REF_AT_TOP flag set. 308 309 Artificial uses occur at the end of all blocks. These arise from 310 the hard registers that are always live, such as the stack 311 register and are put there to keep the code from forgetting about 312 them. 313 314 Artificial defs occur at the end of the entry block. These arise 315 from registers that are live at entry to the function. 316 317 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are 318 uses that appear inside a REG_EQUAL or REG_EQUIV note.) 319 320 All of the eq_uses, uses and defs associated with each pseudo or 321 hard register may be linked in a bidirectional chain. These are 322 called reg-use or reg_def chains. If the changeable flag 323 DF_EQ_NOTES is set when the chains are built, the eq_uses will be 324 treated like uses. If it is not set they are ignored. 325 326 The first use, eq_use or def for a register can be obtained using 327 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN 328 macros. Subsequent uses for the same regno can be obtained by 329 following the next_reg field of the ref. The number of elements in 330 each of the chains can be found by using the DF_REG_USE_COUNT, 331 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros. 332 333 In previous versions of this code, these chains were ordered. It 334 has not been practical to continue this practice. 335 336 3) If def-use or use-def chains are built, these can be traversed to 337 get to other refs. If the flag DF_EQ_NOTES has been set, the chains 338 include the eq_uses. Otherwise these are ignored when building the 339 chains. 340 341 4) An array of all of the uses (and an array of all of the defs) can 342 be built. These arrays are indexed by the value in the id 343 structure. These arrays are only lazily kept up to date, and that 344 process can be expensive. To have these arrays built, call 345 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES 346 has been set the array will contain the eq_uses. Otherwise these 347 are ignored when building the array and assigning the ids. Note 348 that the values in the id field of a ref may change across calls to 349 df_analyze or df_reorganize_defs or df_reorganize_uses. 350 351 If the only use of this array is to find all of the refs, it is 352 better to traverse all of the registers and then traverse all of 353 reg-use or reg-def chains. 354 355 NOTES: 356 357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate 358 both a use and a def. These are both marked read/write to show that they 359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42)))) 360 will generate a use of reg 42 followed by a def of reg 42 (both marked 361 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41)))) 362 generates a use of reg 41 then a def of reg 41 (both marked read/write), 363 even though reg 41 is decremented before it is used for the memory 364 address in this second example. 365 366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG 367 for which the number of word_mode units covered by the outer mode is 368 smaller than that covered by the inner mode, invokes a read-modify-write 369 operation. We generate both a use and a def and again mark them 370 read/write. 371 372 Paradoxical subreg writes do not leave a trace of the old content, so they 373 are write-only operations. 374 */ 375 376 377 #include "config.h" 378 #include "system.h" 379 #include "coretypes.h" 380 #include "backend.h" 381 #include "rtl.h" 382 #include "df.h" 383 #include "emit-rtl.h" 384 #include "cfganal.h" 385 #include "tree-pass.h" 386 #include "cfgloop.h" 387 388 static void *df_get_bb_info (struct dataflow *, unsigned int); 389 static void df_set_bb_info (struct dataflow *, unsigned int, void *); 390 static void df_clear_bb_info (struct dataflow *, unsigned int); 391 #ifdef DF_DEBUG_CFG 392 static void df_set_clean_cfg (void); 393 #endif 394 395 /* The obstack on which regsets are allocated. */ 396 struct bitmap_obstack reg_obstack; 397 398 /* An obstack for bitmap not related to specific dataflow problems. 399 This obstack should e.g. be used for bitmaps with a short life time 400 such as temporary bitmaps. */ 401 402 bitmap_obstack df_bitmap_obstack; 403 404 405 /*---------------------------------------------------------------------------- 406 Functions to create, destroy and manipulate an instance of df. 407 ----------------------------------------------------------------------------*/ 408 409 struct df_d *df; 410 411 /* Add PROBLEM (and any dependent problems) to the DF instance. */ 412 413 void 414 df_add_problem (struct df_problem *problem) 415 { 416 struct dataflow *dflow; 417 int i; 418 419 /* First try to add the dependent problem. */ 420 if (problem->dependent_problem) 421 df_add_problem (problem->dependent_problem); 422 423 /* Check to see if this problem has already been defined. If it 424 has, just return that instance, if not, add it to the end of the 425 vector. */ 426 dflow = df->problems_by_index[problem->id]; 427 if (dflow) 428 return; 429 430 /* Make a new one and add it to the end. */ 431 dflow = XCNEW (struct dataflow); 432 dflow->problem = problem; 433 dflow->computed = false; 434 dflow->solutions_dirty = true; 435 df->problems_by_index[dflow->problem->id] = dflow; 436 437 /* Keep the defined problems ordered by index. This solves the 438 problem that RI will use the information from UREC if UREC has 439 been defined, or from LIVE if LIVE is defined and otherwise LR. 440 However for this to work, the computation of RI must be pushed 441 after which ever of those problems is defined, but we do not 442 require any of those except for LR to have actually been 443 defined. */ 444 df->num_problems_defined++; 445 for (i = df->num_problems_defined - 2; i >= 0; i--) 446 { 447 if (problem->id < df->problems_in_order[i]->problem->id) 448 df->problems_in_order[i+1] = df->problems_in_order[i]; 449 else 450 { 451 df->problems_in_order[i+1] = dflow; 452 return; 453 } 454 } 455 df->problems_in_order[0] = dflow; 456 } 457 458 459 /* Set the MASK flags in the DFLOW problem. The old flags are 460 returned. If a flag is not allowed to be changed this will fail if 461 checking is enabled. */ 462 int 463 df_set_flags (int changeable_flags) 464 { 465 int old_flags = df->changeable_flags; 466 df->changeable_flags |= changeable_flags; 467 return old_flags; 468 } 469 470 471 /* Clear the MASK flags in the DFLOW problem. The old flags are 472 returned. If a flag is not allowed to be changed this will fail if 473 checking is enabled. */ 474 int 475 df_clear_flags (int changeable_flags) 476 { 477 int old_flags = df->changeable_flags; 478 df->changeable_flags &= ~changeable_flags; 479 return old_flags; 480 } 481 482 483 /* Set the blocks that are to be considered for analysis. If this is 484 not called or is called with null, the entire function in 485 analyzed. */ 486 487 void 488 df_set_blocks (bitmap blocks) 489 { 490 if (blocks) 491 { 492 if (dump_file) 493 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n"); 494 if (df->blocks_to_analyze) 495 { 496 /* This block is called to change the focus from one subset 497 to another. */ 498 int p; 499 bitmap_head diff; 500 bitmap_initialize (&diff, &df_bitmap_obstack); 501 bitmap_and_compl (&diff, df->blocks_to_analyze, blocks); 502 for (p = 0; p < df->num_problems_defined; p++) 503 { 504 struct dataflow *dflow = df->problems_in_order[p]; 505 if (dflow->optional_p && dflow->problem->reset_fun) 506 dflow->problem->reset_fun (df->blocks_to_analyze); 507 else if (dflow->problem->free_blocks_on_set_blocks) 508 { 509 bitmap_iterator bi; 510 unsigned int bb_index; 511 512 EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi) 513 { 514 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index); 515 if (bb) 516 { 517 void *bb_info = df_get_bb_info (dflow, bb_index); 518 dflow->problem->free_bb_fun (bb, bb_info); 519 df_clear_bb_info (dflow, bb_index); 520 } 521 } 522 } 523 } 524 525 bitmap_clear (&diff); 526 } 527 else 528 { 529 /* This block of code is executed to change the focus from 530 the entire function to a subset. */ 531 bitmap_head blocks_to_reset; 532 bool initialized = false; 533 int p; 534 for (p = 0; p < df->num_problems_defined; p++) 535 { 536 struct dataflow *dflow = df->problems_in_order[p]; 537 if (dflow->optional_p && dflow->problem->reset_fun) 538 { 539 if (!initialized) 540 { 541 basic_block bb; 542 bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack); 543 FOR_ALL_BB_FN (bb, cfun) 544 { 545 bitmap_set_bit (&blocks_to_reset, bb->index); 546 } 547 } 548 dflow->problem->reset_fun (&blocks_to_reset); 549 } 550 } 551 if (initialized) 552 bitmap_clear (&blocks_to_reset); 553 554 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack); 555 } 556 bitmap_copy (df->blocks_to_analyze, blocks); 557 df->analyze_subset = true; 558 } 559 else 560 { 561 /* This block is executed to reset the focus to the entire 562 function. */ 563 if (dump_file) 564 fprintf (dump_file, "clearing blocks_to_analyze\n"); 565 if (df->blocks_to_analyze) 566 { 567 BITMAP_FREE (df->blocks_to_analyze); 568 df->blocks_to_analyze = NULL; 569 } 570 df->analyze_subset = false; 571 } 572 573 /* Setting the blocks causes the refs to be unorganized since only 574 the refs in the blocks are seen. */ 575 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE); 576 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE); 577 df_mark_solutions_dirty (); 578 } 579 580 581 /* Delete a DFLOW problem (and any problems that depend on this 582 problem). */ 583 584 void 585 df_remove_problem (struct dataflow *dflow) 586 { 587 struct df_problem *problem; 588 int i; 589 590 if (!dflow) 591 return; 592 593 problem = dflow->problem; 594 gcc_assert (problem->remove_problem_fun); 595 596 /* Delete any problems that depended on this problem first. */ 597 for (i = 0; i < df->num_problems_defined; i++) 598 if (df->problems_in_order[i]->problem->dependent_problem == problem) 599 df_remove_problem (df->problems_in_order[i]); 600 601 /* Now remove this problem. */ 602 for (i = 0; i < df->num_problems_defined; i++) 603 if (df->problems_in_order[i] == dflow) 604 { 605 int j; 606 for (j = i + 1; j < df->num_problems_defined; j++) 607 df->problems_in_order[j-1] = df->problems_in_order[j]; 608 df->problems_in_order[j-1] = NULL; 609 df->num_problems_defined--; 610 break; 611 } 612 613 (problem->remove_problem_fun) (); 614 df->problems_by_index[problem->id] = NULL; 615 } 616 617 618 /* Remove all of the problems that are not permanent. Scanning, LR 619 and (at -O2 or higher) LIVE are permanent, the rest are removable. 620 Also clear all of the changeable_flags. */ 621 622 void 623 df_finish_pass (bool verify ATTRIBUTE_UNUSED) 624 { 625 int i; 626 627 #ifdef ENABLE_DF_CHECKING 628 int saved_flags; 629 #endif 630 631 if (!df) 632 return; 633 634 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE); 635 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE); 636 637 #ifdef ENABLE_DF_CHECKING 638 saved_flags = df->changeable_flags; 639 #endif 640 641 /* We iterate over problems by index as each problem removed will 642 lead to problems_in_order to be reordered. */ 643 for (i = 0; i < DF_LAST_PROBLEM_PLUS1; i++) 644 { 645 struct dataflow *dflow = df->problems_by_index[i]; 646 647 if (dflow && dflow->optional_p) 648 df_remove_problem (dflow); 649 } 650 651 /* Clear all of the flags. */ 652 df->changeable_flags = 0; 653 df_process_deferred_rescans (); 654 655 /* Set the focus back to the whole function. */ 656 if (df->blocks_to_analyze) 657 { 658 BITMAP_FREE (df->blocks_to_analyze); 659 df->blocks_to_analyze = NULL; 660 df_mark_solutions_dirty (); 661 df->analyze_subset = false; 662 } 663 664 #ifdef ENABLE_DF_CHECKING 665 /* Verification will fail in DF_NO_INSN_RESCAN. */ 666 if (!(saved_flags & DF_NO_INSN_RESCAN)) 667 { 668 df_lr_verify_transfer_functions (); 669 if (df_live) 670 df_live_verify_transfer_functions (); 671 } 672 673 #ifdef DF_DEBUG_CFG 674 df_set_clean_cfg (); 675 #endif 676 #endif 677 678 if (flag_checking && verify) 679 df->changeable_flags |= DF_VERIFY_SCHEDULED; 680 } 681 682 683 /* Set up the dataflow instance for the entire back end. */ 684 685 static unsigned int 686 rest_of_handle_df_initialize (void) 687 { 688 gcc_assert (!df); 689 df = XCNEW (struct df_d); 690 df->changeable_flags = 0; 691 692 bitmap_obstack_initialize (&df_bitmap_obstack); 693 694 /* Set this to a conservative value. Stack_ptr_mod will compute it 695 correctly later. */ 696 crtl->sp_is_unchanging = 0; 697 698 df_scan_add_problem (); 699 df_scan_alloc (NULL); 700 701 /* These three problems are permanent. */ 702 df_lr_add_problem (); 703 if (optimize > 1) 704 df_live_add_problem (); 705 706 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun)); 707 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun)); 708 df->n_blocks = post_order_compute (df->postorder, true, true); 709 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted); 710 gcc_assert (df->n_blocks == df->n_blocks_inverted); 711 712 df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER); 713 714 df_hard_reg_init (); 715 /* After reload, some ports add certain bits to regs_ever_live so 716 this cannot be reset. */ 717 df_compute_regs_ever_live (true); 718 df_scan_blocks (); 719 df_compute_regs_ever_live (false); 720 return 0; 721 } 722 723 724 namespace { 725 726 const pass_data pass_data_df_initialize_opt = 727 { 728 RTL_PASS, /* type */ 729 "dfinit", /* name */ 730 OPTGROUP_NONE, /* optinfo_flags */ 731 TV_DF_SCAN, /* tv_id */ 732 0, /* properties_required */ 733 0, /* properties_provided */ 734 0, /* properties_destroyed */ 735 0, /* todo_flags_start */ 736 0, /* todo_flags_finish */ 737 }; 738 739 class pass_df_initialize_opt : public rtl_opt_pass 740 { 741 public: 742 pass_df_initialize_opt (gcc::context *ctxt) 743 : rtl_opt_pass (pass_data_df_initialize_opt, ctxt) 744 {} 745 746 /* opt_pass methods: */ 747 virtual bool gate (function *) { return optimize > 0; } 748 virtual unsigned int execute (function *) 749 { 750 return rest_of_handle_df_initialize (); 751 } 752 753 }; // class pass_df_initialize_opt 754 755 } // anon namespace 756 757 rtl_opt_pass * 758 make_pass_df_initialize_opt (gcc::context *ctxt) 759 { 760 return new pass_df_initialize_opt (ctxt); 761 } 762 763 764 namespace { 765 766 const pass_data pass_data_df_initialize_no_opt = 767 { 768 RTL_PASS, /* type */ 769 "no-opt dfinit", /* name */ 770 OPTGROUP_NONE, /* optinfo_flags */ 771 TV_DF_SCAN, /* tv_id */ 772 0, /* properties_required */ 773 0, /* properties_provided */ 774 0, /* properties_destroyed */ 775 0, /* todo_flags_start */ 776 0, /* todo_flags_finish */ 777 }; 778 779 class pass_df_initialize_no_opt : public rtl_opt_pass 780 { 781 public: 782 pass_df_initialize_no_opt (gcc::context *ctxt) 783 : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt) 784 {} 785 786 /* opt_pass methods: */ 787 virtual bool gate (function *) { return optimize == 0; } 788 virtual unsigned int execute (function *) 789 { 790 return rest_of_handle_df_initialize (); 791 } 792 793 }; // class pass_df_initialize_no_opt 794 795 } // anon namespace 796 797 rtl_opt_pass * 798 make_pass_df_initialize_no_opt (gcc::context *ctxt) 799 { 800 return new pass_df_initialize_no_opt (ctxt); 801 } 802 803 804 /* Free all the dataflow info and the DF structure. This should be 805 called from the df_finish macro which also NULLs the parm. */ 806 807 static unsigned int 808 rest_of_handle_df_finish (void) 809 { 810 int i; 811 812 gcc_assert (df); 813 814 for (i = 0; i < df->num_problems_defined; i++) 815 { 816 struct dataflow *dflow = df->problems_in_order[i]; 817 dflow->problem->free_fun (); 818 } 819 820 free (df->postorder); 821 free (df->postorder_inverted); 822 free (df->hard_regs_live_count); 823 free (df); 824 df = NULL; 825 826 bitmap_obstack_release (&df_bitmap_obstack); 827 return 0; 828 } 829 830 831 namespace { 832 833 const pass_data pass_data_df_finish = 834 { 835 RTL_PASS, /* type */ 836 "dfinish", /* name */ 837 OPTGROUP_NONE, /* optinfo_flags */ 838 TV_NONE, /* tv_id */ 839 0, /* properties_required */ 840 0, /* properties_provided */ 841 0, /* properties_destroyed */ 842 0, /* todo_flags_start */ 843 0, /* todo_flags_finish */ 844 }; 845 846 class pass_df_finish : public rtl_opt_pass 847 { 848 public: 849 pass_df_finish (gcc::context *ctxt) 850 : rtl_opt_pass (pass_data_df_finish, ctxt) 851 {} 852 853 /* opt_pass methods: */ 854 virtual unsigned int execute (function *) 855 { 856 return rest_of_handle_df_finish (); 857 } 858 859 }; // class pass_df_finish 860 861 } // anon namespace 862 863 rtl_opt_pass * 864 make_pass_df_finish (gcc::context *ctxt) 865 { 866 return new pass_df_finish (ctxt); 867 } 868 869 870 871 872 873 /*---------------------------------------------------------------------------- 874 The general data flow analysis engine. 875 ----------------------------------------------------------------------------*/ 876 877 /* Return time BB when it was visited for last time. */ 878 #define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux) 879 880 /* Helper function for df_worklist_dataflow. 881 Propagate the dataflow forward. 882 Given a BB_INDEX, do the dataflow propagation 883 and set bits on for successors in PENDING 884 if the out set of the dataflow has changed. 885 886 AGE specify time when BB was visited last time. 887 AGE of 0 means we are visiting for first time and need to 888 compute transfer function to initialize datastructures. 889 Otherwise we re-do transfer function only if something change 890 while computing confluence functions. 891 We need to compute confluence only of basic block that are younger 892 then last visit of the BB. 893 894 Return true if BB info has changed. This is always the case 895 in the first visit. */ 896 897 static bool 898 df_worklist_propagate_forward (struct dataflow *dataflow, 899 unsigned bb_index, 900 unsigned *bbindex_to_postorder, 901 bitmap pending, 902 sbitmap considered, 903 ptrdiff_t age) 904 { 905 edge e; 906 edge_iterator ei; 907 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index); 908 bool changed = !age; 909 910 /* Calculate <conf_op> of incoming edges. */ 911 if (EDGE_COUNT (bb->preds) > 0) 912 FOR_EACH_EDGE (e, ei, bb->preds) 913 { 914 if (age <= BB_LAST_CHANGE_AGE (e->src) 915 && bitmap_bit_p (considered, e->src->index)) 916 changed |= dataflow->problem->con_fun_n (e); 917 } 918 else if (dataflow->problem->con_fun_0) 919 dataflow->problem->con_fun_0 (bb); 920 921 if (changed 922 && dataflow->problem->trans_fun (bb_index)) 923 { 924 /* The out set of this block has changed. 925 Propagate to the outgoing blocks. */ 926 FOR_EACH_EDGE (e, ei, bb->succs) 927 { 928 unsigned ob_index = e->dest->index; 929 930 if (bitmap_bit_p (considered, ob_index)) 931 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]); 932 } 933 return true; 934 } 935 return false; 936 } 937 938 939 /* Helper function for df_worklist_dataflow. 940 Propagate the dataflow backward. */ 941 942 static bool 943 df_worklist_propagate_backward (struct dataflow *dataflow, 944 unsigned bb_index, 945 unsigned *bbindex_to_postorder, 946 bitmap pending, 947 sbitmap considered, 948 ptrdiff_t age) 949 { 950 edge e; 951 edge_iterator ei; 952 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index); 953 bool changed = !age; 954 955 /* Calculate <conf_op> of incoming edges. */ 956 if (EDGE_COUNT (bb->succs) > 0) 957 FOR_EACH_EDGE (e, ei, bb->succs) 958 { 959 if (age <= BB_LAST_CHANGE_AGE (e->dest) 960 && bitmap_bit_p (considered, e->dest->index)) 961 changed |= dataflow->problem->con_fun_n (e); 962 } 963 else if (dataflow->problem->con_fun_0) 964 dataflow->problem->con_fun_0 (bb); 965 966 if (changed 967 && dataflow->problem->trans_fun (bb_index)) 968 { 969 /* The out set of this block has changed. 970 Propagate to the outgoing blocks. */ 971 FOR_EACH_EDGE (e, ei, bb->preds) 972 { 973 unsigned ob_index = e->src->index; 974 975 if (bitmap_bit_p (considered, ob_index)) 976 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]); 977 } 978 return true; 979 } 980 return false; 981 } 982 983 /* Main dataflow solver loop. 984 985 DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we 986 need to visit. 987 BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and 988 BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position. 989 PENDING will be freed. 990 991 The worklists are bitmaps indexed by postorder positions. 992 993 The function implements standard algorithm for dataflow solving with two 994 worklists (we are processing WORKLIST and storing new BBs to visit in 995 PENDING). 996 997 As an optimization we maintain ages when BB was changed (stored in bb->aux) 998 and when it was last visited (stored in last_visit_age). This avoids need 999 to re-do confluence function for edges to basic blocks whose source 1000 did not change since destination was visited last time. */ 1001 1002 static void 1003 df_worklist_dataflow_doublequeue (struct dataflow *dataflow, 1004 bitmap pending, 1005 sbitmap considered, 1006 int *blocks_in_postorder, 1007 unsigned *bbindex_to_postorder, 1008 int n_blocks) 1009 { 1010 enum df_flow_dir dir = dataflow->problem->dir; 1011 int dcount = 0; 1012 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack); 1013 int age = 0; 1014 bool changed; 1015 vec<int> last_visit_age = vNULL; 1016 int prev_age; 1017 basic_block bb; 1018 int i; 1019 1020 last_visit_age.safe_grow_cleared (n_blocks); 1021 1022 /* Double-queueing. Worklist is for the current iteration, 1023 and pending is for the next. */ 1024 while (!bitmap_empty_p (pending)) 1025 { 1026 bitmap_iterator bi; 1027 unsigned int index; 1028 1029 std::swap (pending, worklist); 1030 1031 EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi) 1032 { 1033 unsigned bb_index; 1034 dcount++; 1035 1036 bitmap_clear_bit (pending, index); 1037 bb_index = blocks_in_postorder[index]; 1038 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index); 1039 prev_age = last_visit_age[index]; 1040 if (dir == DF_FORWARD) 1041 changed = df_worklist_propagate_forward (dataflow, bb_index, 1042 bbindex_to_postorder, 1043 pending, considered, 1044 prev_age); 1045 else 1046 changed = df_worklist_propagate_backward (dataflow, bb_index, 1047 bbindex_to_postorder, 1048 pending, considered, 1049 prev_age); 1050 last_visit_age[index] = ++age; 1051 if (changed) 1052 bb->aux = (void *)(ptrdiff_t)age; 1053 } 1054 bitmap_clear (worklist); 1055 } 1056 for (i = 0; i < n_blocks; i++) 1057 BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL; 1058 1059 BITMAP_FREE (worklist); 1060 BITMAP_FREE (pending); 1061 last_visit_age.release (); 1062 1063 /* Dump statistics. */ 1064 if (dump_file) 1065 fprintf (dump_file, "df_worklist_dataflow_doublequeue:" 1066 "n_basic_blocks %d n_edges %d" 1067 " count %d (%5.2g)\n", 1068 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun), 1069 dcount, dcount / (float)n_basic_blocks_for_fn (cfun)); 1070 } 1071 1072 /* Worklist-based dataflow solver. It uses sbitmap as a worklist, 1073 with "n"-th bit representing the n-th block in the reverse-postorder order. 1074 The solver is a double-queue algorithm similar to the "double stack" solver 1075 from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited". 1076 The only significant difference is that the worklist in this implementation 1077 is always sorted in RPO of the CFG visiting direction. */ 1078 1079 void 1080 df_worklist_dataflow (struct dataflow *dataflow, 1081 bitmap blocks_to_consider, 1082 int *blocks_in_postorder, 1083 int n_blocks) 1084 { 1085 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack); 1086 sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun)); 1087 bitmap_iterator bi; 1088 unsigned int *bbindex_to_postorder; 1089 int i; 1090 unsigned int index; 1091 enum df_flow_dir dir = dataflow->problem->dir; 1092 1093 gcc_assert (dir != DF_NONE); 1094 1095 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */ 1096 bbindex_to_postorder = XNEWVEC (unsigned int, 1097 last_basic_block_for_fn (cfun)); 1098 1099 /* Initialize the array to an out-of-bound value. */ 1100 for (i = 0; i < last_basic_block_for_fn (cfun); i++) 1101 bbindex_to_postorder[i] = last_basic_block_for_fn (cfun); 1102 1103 /* Initialize the considered map. */ 1104 bitmap_clear (considered); 1105 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi) 1106 { 1107 bitmap_set_bit (considered, index); 1108 } 1109 1110 /* Initialize the mapping of block index to postorder. */ 1111 for (i = 0; i < n_blocks; i++) 1112 { 1113 bbindex_to_postorder[blocks_in_postorder[i]] = i; 1114 /* Add all blocks to the worklist. */ 1115 bitmap_set_bit (pending, i); 1116 } 1117 1118 /* Initialize the problem. */ 1119 if (dataflow->problem->init_fun) 1120 dataflow->problem->init_fun (blocks_to_consider); 1121 1122 /* Solve it. */ 1123 df_worklist_dataflow_doublequeue (dataflow, pending, considered, 1124 blocks_in_postorder, 1125 bbindex_to_postorder, 1126 n_blocks); 1127 sbitmap_free (considered); 1128 free (bbindex_to_postorder); 1129 } 1130 1131 1132 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving 1133 the order of the remaining entries. Returns the length of the resulting 1134 list. */ 1135 1136 static unsigned 1137 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks) 1138 { 1139 unsigned act, last; 1140 1141 for (act = 0, last = 0; act < len; act++) 1142 if (bitmap_bit_p (blocks, list[act])) 1143 list[last++] = list[act]; 1144 1145 return last; 1146 } 1147 1148 1149 /* Execute dataflow analysis on a single dataflow problem. 1150 1151 BLOCKS_TO_CONSIDER are the blocks whose solution can either be 1152 examined or will be computed. For calls from DF_ANALYZE, this is 1153 the set of blocks that has been passed to DF_SET_BLOCKS. 1154 */ 1155 1156 void 1157 df_analyze_problem (struct dataflow *dflow, 1158 bitmap blocks_to_consider, 1159 int *postorder, int n_blocks) 1160 { 1161 timevar_push (dflow->problem->tv_id); 1162 1163 /* (Re)Allocate the datastructures necessary to solve the problem. */ 1164 if (dflow->problem->alloc_fun) 1165 dflow->problem->alloc_fun (blocks_to_consider); 1166 1167 #ifdef ENABLE_DF_CHECKING 1168 if (dflow->problem->verify_start_fun) 1169 dflow->problem->verify_start_fun (); 1170 #endif 1171 1172 /* Set up the problem and compute the local information. */ 1173 if (dflow->problem->local_compute_fun) 1174 dflow->problem->local_compute_fun (blocks_to_consider); 1175 1176 /* Solve the equations. */ 1177 if (dflow->problem->dataflow_fun) 1178 dflow->problem->dataflow_fun (dflow, blocks_to_consider, 1179 postorder, n_blocks); 1180 1181 /* Massage the solution. */ 1182 if (dflow->problem->finalize_fun) 1183 dflow->problem->finalize_fun (blocks_to_consider); 1184 1185 #ifdef ENABLE_DF_CHECKING 1186 if (dflow->problem->verify_end_fun) 1187 dflow->problem->verify_end_fun (); 1188 #endif 1189 1190 timevar_pop (dflow->problem->tv_id); 1191 1192 dflow->computed = true; 1193 } 1194 1195 1196 /* Analyze dataflow info. */ 1197 1198 static void 1199 df_analyze_1 (void) 1200 { 1201 int i; 1202 1203 /* These should be the same. */ 1204 gcc_assert (df->n_blocks == df->n_blocks_inverted); 1205 1206 /* We need to do this before the df_verify_all because this is 1207 not kept incrementally up to date. */ 1208 df_compute_regs_ever_live (false); 1209 df_process_deferred_rescans (); 1210 1211 if (dump_file) 1212 fprintf (dump_file, "df_analyze called\n"); 1213 1214 #ifndef ENABLE_DF_CHECKING 1215 if (df->changeable_flags & DF_VERIFY_SCHEDULED) 1216 #endif 1217 df_verify (); 1218 1219 /* Skip over the DF_SCAN problem. */ 1220 for (i = 1; i < df->num_problems_defined; i++) 1221 { 1222 struct dataflow *dflow = df->problems_in_order[i]; 1223 if (dflow->solutions_dirty) 1224 { 1225 if (dflow->problem->dir == DF_FORWARD) 1226 df_analyze_problem (dflow, 1227 df->blocks_to_analyze, 1228 df->postorder_inverted, 1229 df->n_blocks_inverted); 1230 else 1231 df_analyze_problem (dflow, 1232 df->blocks_to_analyze, 1233 df->postorder, 1234 df->n_blocks); 1235 } 1236 } 1237 1238 if (!df->analyze_subset) 1239 { 1240 BITMAP_FREE (df->blocks_to_analyze); 1241 df->blocks_to_analyze = NULL; 1242 } 1243 1244 #ifdef DF_DEBUG_CFG 1245 df_set_clean_cfg (); 1246 #endif 1247 } 1248 1249 /* Analyze dataflow info. */ 1250 1251 void 1252 df_analyze (void) 1253 { 1254 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack); 1255 int i; 1256 1257 free (df->postorder); 1258 free (df->postorder_inverted); 1259 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun)); 1260 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun)); 1261 df->n_blocks = post_order_compute (df->postorder, true, true); 1262 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted); 1263 1264 for (i = 0; i < df->n_blocks; i++) 1265 bitmap_set_bit (current_all_blocks, df->postorder[i]); 1266 1267 if (flag_checking) 1268 { 1269 /* Verify that POSTORDER_INVERTED only contains blocks reachable from 1270 the ENTRY block. */ 1271 for (i = 0; i < df->n_blocks_inverted; i++) 1272 gcc_assert (bitmap_bit_p (current_all_blocks, 1273 df->postorder_inverted[i])); 1274 } 1275 1276 /* Make sure that we have pruned any unreachable blocks from these 1277 sets. */ 1278 if (df->analyze_subset) 1279 { 1280 bitmap_and_into (df->blocks_to_analyze, current_all_blocks); 1281 df->n_blocks = df_prune_to_subcfg (df->postorder, 1282 df->n_blocks, df->blocks_to_analyze); 1283 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted, 1284 df->n_blocks_inverted, 1285 df->blocks_to_analyze); 1286 BITMAP_FREE (current_all_blocks); 1287 } 1288 else 1289 { 1290 df->blocks_to_analyze = current_all_blocks; 1291 current_all_blocks = NULL; 1292 } 1293 1294 df_analyze_1 (); 1295 } 1296 1297 /* Compute the reverse top sort order of the sub-CFG specified by LOOP. 1298 Returns the number of blocks which is always loop->num_nodes. */ 1299 1300 static int 1301 loop_post_order_compute (int *post_order, struct loop *loop) 1302 { 1303 edge_iterator *stack; 1304 int sp; 1305 int post_order_num = 0; 1306 bitmap visited; 1307 1308 /* Allocate stack for back-tracking up CFG. */ 1309 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1); 1310 sp = 0; 1311 1312 /* Allocate bitmap to track nodes that have been visited. */ 1313 visited = BITMAP_ALLOC (NULL); 1314 1315 /* Push the first edge on to the stack. */ 1316 stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs); 1317 1318 while (sp) 1319 { 1320 edge_iterator ei; 1321 basic_block src; 1322 basic_block dest; 1323 1324 /* Look at the edge on the top of the stack. */ 1325 ei = stack[sp - 1]; 1326 src = ei_edge (ei)->src; 1327 dest = ei_edge (ei)->dest; 1328 1329 /* Check if the edge destination has been visited yet and mark it 1330 if not so. */ 1331 if (flow_bb_inside_loop_p (loop, dest) 1332 && bitmap_set_bit (visited, dest->index)) 1333 { 1334 if (EDGE_COUNT (dest->succs) > 0) 1335 /* Since the DEST node has been visited for the first 1336 time, check its successors. */ 1337 stack[sp++] = ei_start (dest->succs); 1338 else 1339 post_order[post_order_num++] = dest->index; 1340 } 1341 else 1342 { 1343 if (ei_one_before_end_p (ei) 1344 && src != loop_preheader_edge (loop)->src) 1345 post_order[post_order_num++] = src->index; 1346 1347 if (!ei_one_before_end_p (ei)) 1348 ei_next (&stack[sp - 1]); 1349 else 1350 sp--; 1351 } 1352 } 1353 1354 free (stack); 1355 BITMAP_FREE (visited); 1356 1357 return post_order_num; 1358 } 1359 1360 /* Compute the reverse top sort order of the inverted sub-CFG specified 1361 by LOOP. Returns the number of blocks which is always loop->num_nodes. */ 1362 1363 static int 1364 loop_inverted_post_order_compute (int *post_order, struct loop *loop) 1365 { 1366 basic_block bb; 1367 edge_iterator *stack; 1368 int sp; 1369 int post_order_num = 0; 1370 bitmap visited; 1371 1372 /* Allocate stack for back-tracking up CFG. */ 1373 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1); 1374 sp = 0; 1375 1376 /* Allocate bitmap to track nodes that have been visited. */ 1377 visited = BITMAP_ALLOC (NULL); 1378 1379 /* Put all latches into the initial work list. In theory we'd want 1380 to start from loop exits but then we'd have the special case of 1381 endless loops. It doesn't really matter for DF iteration order and 1382 handling latches last is probably even better. */ 1383 stack[sp++] = ei_start (loop->header->preds); 1384 bitmap_set_bit (visited, loop->header->index); 1385 1386 /* The inverted traversal loop. */ 1387 while (sp) 1388 { 1389 edge_iterator ei; 1390 basic_block pred; 1391 1392 /* Look at the edge on the top of the stack. */ 1393 ei = stack[sp - 1]; 1394 bb = ei_edge (ei)->dest; 1395 pred = ei_edge (ei)->src; 1396 1397 /* Check if the predecessor has been visited yet and mark it 1398 if not so. */ 1399 if (flow_bb_inside_loop_p (loop, pred) 1400 && bitmap_set_bit (visited, pred->index)) 1401 { 1402 if (EDGE_COUNT (pred->preds) > 0) 1403 /* Since the predecessor node has been visited for the first 1404 time, check its predecessors. */ 1405 stack[sp++] = ei_start (pred->preds); 1406 else 1407 post_order[post_order_num++] = pred->index; 1408 } 1409 else 1410 { 1411 if (flow_bb_inside_loop_p (loop, bb) 1412 && ei_one_before_end_p (ei)) 1413 post_order[post_order_num++] = bb->index; 1414 1415 if (!ei_one_before_end_p (ei)) 1416 ei_next (&stack[sp - 1]); 1417 else 1418 sp--; 1419 } 1420 } 1421 1422 free (stack); 1423 BITMAP_FREE (visited); 1424 return post_order_num; 1425 } 1426 1427 1428 /* Analyze dataflow info for the basic blocks contained in LOOP. */ 1429 1430 void 1431 df_analyze_loop (struct loop *loop) 1432 { 1433 free (df->postorder); 1434 free (df->postorder_inverted); 1435 1436 df->postorder = XNEWVEC (int, loop->num_nodes); 1437 df->postorder_inverted = XNEWVEC (int, loop->num_nodes); 1438 df->n_blocks = loop_post_order_compute (df->postorder, loop); 1439 df->n_blocks_inverted 1440 = loop_inverted_post_order_compute (df->postorder_inverted, loop); 1441 gcc_assert ((unsigned) df->n_blocks == loop->num_nodes); 1442 gcc_assert ((unsigned) df->n_blocks_inverted == loop->num_nodes); 1443 1444 bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack); 1445 for (int i = 0; i < df->n_blocks; ++i) 1446 bitmap_set_bit (blocks, df->postorder[i]); 1447 df_set_blocks (blocks); 1448 BITMAP_FREE (blocks); 1449 1450 df_analyze_1 (); 1451 } 1452 1453 1454 /* Return the number of basic blocks from the last call to df_analyze. */ 1455 1456 int 1457 df_get_n_blocks (enum df_flow_dir dir) 1458 { 1459 gcc_assert (dir != DF_NONE); 1460 1461 if (dir == DF_FORWARD) 1462 { 1463 gcc_assert (df->postorder_inverted); 1464 return df->n_blocks_inverted; 1465 } 1466 1467 gcc_assert (df->postorder); 1468 return df->n_blocks; 1469 } 1470 1471 1472 /* Return a pointer to the array of basic blocks in the reverse postorder. 1473 Depending on the direction of the dataflow problem, 1474 it returns either the usual reverse postorder array 1475 or the reverse postorder of inverted traversal. */ 1476 int * 1477 df_get_postorder (enum df_flow_dir dir) 1478 { 1479 gcc_assert (dir != DF_NONE); 1480 1481 if (dir == DF_FORWARD) 1482 { 1483 gcc_assert (df->postorder_inverted); 1484 return df->postorder_inverted; 1485 } 1486 gcc_assert (df->postorder); 1487 return df->postorder; 1488 } 1489 1490 static struct df_problem user_problem; 1491 static struct dataflow user_dflow; 1492 1493 /* Interface for calling iterative dataflow with user defined 1494 confluence and transfer functions. All that is necessary is to 1495 supply DIR, a direction, CONF_FUN_0, a confluence function for 1496 blocks with no logical preds (or NULL), CONF_FUN_N, the normal 1497 confluence function, TRANS_FUN, the basic block transfer function, 1498 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in 1499 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */ 1500 1501 void 1502 df_simple_dataflow (enum df_flow_dir dir, 1503 df_init_function init_fun, 1504 df_confluence_function_0 con_fun_0, 1505 df_confluence_function_n con_fun_n, 1506 df_transfer_function trans_fun, 1507 bitmap blocks, int * postorder, int n_blocks) 1508 { 1509 memset (&user_problem, 0, sizeof (struct df_problem)); 1510 user_problem.dir = dir; 1511 user_problem.init_fun = init_fun; 1512 user_problem.con_fun_0 = con_fun_0; 1513 user_problem.con_fun_n = con_fun_n; 1514 user_problem.trans_fun = trans_fun; 1515 user_dflow.problem = &user_problem; 1516 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks); 1517 } 1518 1519 1520 1521 /*---------------------------------------------------------------------------- 1522 Functions to support limited incremental change. 1523 ----------------------------------------------------------------------------*/ 1524 1525 1526 /* Get basic block info. */ 1527 1528 static void * 1529 df_get_bb_info (struct dataflow *dflow, unsigned int index) 1530 { 1531 if (dflow->block_info == NULL) 1532 return NULL; 1533 if (index >= dflow->block_info_size) 1534 return NULL; 1535 return (void *)((char *)dflow->block_info 1536 + index * dflow->problem->block_info_elt_size); 1537 } 1538 1539 1540 /* Set basic block info. */ 1541 1542 static void 1543 df_set_bb_info (struct dataflow *dflow, unsigned int index, 1544 void *bb_info) 1545 { 1546 gcc_assert (dflow->block_info); 1547 memcpy ((char *)dflow->block_info 1548 + index * dflow->problem->block_info_elt_size, 1549 bb_info, dflow->problem->block_info_elt_size); 1550 } 1551 1552 1553 /* Clear basic block info. */ 1554 1555 static void 1556 df_clear_bb_info (struct dataflow *dflow, unsigned int index) 1557 { 1558 gcc_assert (dflow->block_info); 1559 gcc_assert (dflow->block_info_size > index); 1560 memset ((char *)dflow->block_info 1561 + index * dflow->problem->block_info_elt_size, 1562 0, dflow->problem->block_info_elt_size); 1563 } 1564 1565 1566 /* Mark the solutions as being out of date. */ 1567 1568 void 1569 df_mark_solutions_dirty (void) 1570 { 1571 if (df) 1572 { 1573 int p; 1574 for (p = 1; p < df->num_problems_defined; p++) 1575 df->problems_in_order[p]->solutions_dirty = true; 1576 } 1577 } 1578 1579 1580 /* Return true if BB needs it's transfer functions recomputed. */ 1581 1582 bool 1583 df_get_bb_dirty (basic_block bb) 1584 { 1585 return bitmap_bit_p ((df_live 1586 ? df_live : df_lr)->out_of_date_transfer_functions, 1587 bb->index); 1588 } 1589 1590 1591 /* Mark BB as needing it's transfer functions as being out of 1592 date. */ 1593 1594 void 1595 df_set_bb_dirty (basic_block bb) 1596 { 1597 bb->flags |= BB_MODIFIED; 1598 if (df) 1599 { 1600 int p; 1601 for (p = 1; p < df->num_problems_defined; p++) 1602 { 1603 struct dataflow *dflow = df->problems_in_order[p]; 1604 if (dflow->out_of_date_transfer_functions) 1605 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index); 1606 } 1607 df_mark_solutions_dirty (); 1608 } 1609 } 1610 1611 1612 /* Grow the bb_info array. */ 1613 1614 void 1615 df_grow_bb_info (struct dataflow *dflow) 1616 { 1617 unsigned int new_size = last_basic_block_for_fn (cfun) + 1; 1618 if (dflow->block_info_size < new_size) 1619 { 1620 new_size += new_size / 4; 1621 dflow->block_info 1622 = (void *)XRESIZEVEC (char, (char *)dflow->block_info, 1623 new_size 1624 * dflow->problem->block_info_elt_size); 1625 memset ((char *)dflow->block_info 1626 + dflow->block_info_size 1627 * dflow->problem->block_info_elt_size, 1628 0, 1629 (new_size - dflow->block_info_size) 1630 * dflow->problem->block_info_elt_size); 1631 dflow->block_info_size = new_size; 1632 } 1633 } 1634 1635 1636 /* Clear the dirty bits. This is called from places that delete 1637 blocks. */ 1638 static void 1639 df_clear_bb_dirty (basic_block bb) 1640 { 1641 int p; 1642 for (p = 1; p < df->num_problems_defined; p++) 1643 { 1644 struct dataflow *dflow = df->problems_in_order[p]; 1645 if (dflow->out_of_date_transfer_functions) 1646 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index); 1647 } 1648 } 1649 1650 /* Called from the rtl_compact_blocks to reorganize the problems basic 1651 block info. */ 1652 1653 void 1654 df_compact_blocks (void) 1655 { 1656 int i, p; 1657 basic_block bb; 1658 void *problem_temps; 1659 bitmap_head tmp; 1660 1661 bitmap_initialize (&tmp, &df_bitmap_obstack); 1662 for (p = 0; p < df->num_problems_defined; p++) 1663 { 1664 struct dataflow *dflow = df->problems_in_order[p]; 1665 1666 /* Need to reorganize the out_of_date_transfer_functions for the 1667 dflow problem. */ 1668 if (dflow->out_of_date_transfer_functions) 1669 { 1670 bitmap_copy (&tmp, dflow->out_of_date_transfer_functions); 1671 bitmap_clear (dflow->out_of_date_transfer_functions); 1672 if (bitmap_bit_p (&tmp, ENTRY_BLOCK)) 1673 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK); 1674 if (bitmap_bit_p (&tmp, EXIT_BLOCK)) 1675 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK); 1676 1677 i = NUM_FIXED_BLOCKS; 1678 FOR_EACH_BB_FN (bb, cfun) 1679 { 1680 if (bitmap_bit_p (&tmp, bb->index)) 1681 bitmap_set_bit (dflow->out_of_date_transfer_functions, i); 1682 i++; 1683 } 1684 } 1685 1686 /* Now shuffle the block info for the problem. */ 1687 if (dflow->problem->free_bb_fun) 1688 { 1689 int size = (last_basic_block_for_fn (cfun) 1690 * dflow->problem->block_info_elt_size); 1691 problem_temps = XNEWVAR (char, size); 1692 df_grow_bb_info (dflow); 1693 memcpy (problem_temps, dflow->block_info, size); 1694 1695 /* Copy the bb info from the problem tmps to the proper 1696 place in the block_info vector. Null out the copied 1697 item. The entry and exit blocks never move. */ 1698 i = NUM_FIXED_BLOCKS; 1699 FOR_EACH_BB_FN (bb, cfun) 1700 { 1701 df_set_bb_info (dflow, i, 1702 (char *)problem_temps 1703 + bb->index * dflow->problem->block_info_elt_size); 1704 i++; 1705 } 1706 memset ((char *)dflow->block_info 1707 + i * dflow->problem->block_info_elt_size, 0, 1708 (last_basic_block_for_fn (cfun) - i) 1709 * dflow->problem->block_info_elt_size); 1710 free (problem_temps); 1711 } 1712 } 1713 1714 /* Shuffle the bits in the basic_block indexed arrays. */ 1715 1716 if (df->blocks_to_analyze) 1717 { 1718 if (bitmap_bit_p (&tmp, ENTRY_BLOCK)) 1719 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK); 1720 if (bitmap_bit_p (&tmp, EXIT_BLOCK)) 1721 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK); 1722 bitmap_copy (&tmp, df->blocks_to_analyze); 1723 bitmap_clear (df->blocks_to_analyze); 1724 i = NUM_FIXED_BLOCKS; 1725 FOR_EACH_BB_FN (bb, cfun) 1726 { 1727 if (bitmap_bit_p (&tmp, bb->index)) 1728 bitmap_set_bit (df->blocks_to_analyze, i); 1729 i++; 1730 } 1731 } 1732 1733 bitmap_clear (&tmp); 1734 1735 i = NUM_FIXED_BLOCKS; 1736 FOR_EACH_BB_FN (bb, cfun) 1737 { 1738 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb); 1739 bb->index = i; 1740 i++; 1741 } 1742 1743 gcc_assert (i == n_basic_blocks_for_fn (cfun)); 1744 1745 for (; i < last_basic_block_for_fn (cfun); i++) 1746 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL); 1747 1748 #ifdef DF_DEBUG_CFG 1749 if (!df_lr->solutions_dirty) 1750 df_set_clean_cfg (); 1751 #endif 1752 } 1753 1754 1755 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a 1756 block. There is no excuse for people to do this kind of thing. */ 1757 1758 void 1759 df_bb_replace (int old_index, basic_block new_block) 1760 { 1761 int new_block_index = new_block->index; 1762 int p; 1763 1764 if (dump_file) 1765 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index); 1766 1767 gcc_assert (df); 1768 gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL); 1769 1770 for (p = 0; p < df->num_problems_defined; p++) 1771 { 1772 struct dataflow *dflow = df->problems_in_order[p]; 1773 if (dflow->block_info) 1774 { 1775 df_grow_bb_info (dflow); 1776 df_set_bb_info (dflow, old_index, 1777 df_get_bb_info (dflow, new_block_index)); 1778 } 1779 } 1780 1781 df_clear_bb_dirty (new_block); 1782 SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block); 1783 new_block->index = old_index; 1784 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index)); 1785 SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL); 1786 } 1787 1788 1789 /* Free all of the per basic block dataflow from all of the problems. 1790 This is typically called before a basic block is deleted and the 1791 problem will be reanalyzed. */ 1792 1793 void 1794 df_bb_delete (int bb_index) 1795 { 1796 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index); 1797 int i; 1798 1799 if (!df) 1800 return; 1801 1802 for (i = 0; i < df->num_problems_defined; i++) 1803 { 1804 struct dataflow *dflow = df->problems_in_order[i]; 1805 if (dflow->problem->free_bb_fun) 1806 { 1807 void *bb_info = df_get_bb_info (dflow, bb_index); 1808 if (bb_info) 1809 { 1810 dflow->problem->free_bb_fun (bb, bb_info); 1811 df_clear_bb_info (dflow, bb_index); 1812 } 1813 } 1814 } 1815 df_clear_bb_dirty (bb); 1816 df_mark_solutions_dirty (); 1817 } 1818 1819 1820 /* Verify that there is a place for everything and everything is in 1821 its place. This is too expensive to run after every pass in the 1822 mainline. However this is an excellent debugging tool if the 1823 dataflow information is not being updated properly. You can just 1824 sprinkle calls in until you find the place that is changing an 1825 underlying structure without calling the proper updating 1826 routine. */ 1827 1828 void 1829 df_verify (void) 1830 { 1831 df_scan_verify (); 1832 #ifdef ENABLE_DF_CHECKING 1833 df_lr_verify_transfer_functions (); 1834 if (df_live) 1835 df_live_verify_transfer_functions (); 1836 #endif 1837 } 1838 1839 #ifdef DF_DEBUG_CFG 1840 1841 /* Compute an array of ints that describes the cfg. This can be used 1842 to discover places where the cfg is modified by the appropriate 1843 calls have not been made to the keep df informed. The internals of 1844 this are unexciting, the key is that two instances of this can be 1845 compared to see if any changes have been made to the cfg. */ 1846 1847 static int * 1848 df_compute_cfg_image (void) 1849 { 1850 basic_block bb; 1851 int size = 2 + (2 * n_basic_blocks_for_fn (cfun)); 1852 int i; 1853 int * map; 1854 1855 FOR_ALL_BB_FN (bb, cfun) 1856 { 1857 size += EDGE_COUNT (bb->succs); 1858 } 1859 1860 map = XNEWVEC (int, size); 1861 map[0] = size; 1862 i = 1; 1863 FOR_ALL_BB_FN (bb, cfun) 1864 { 1865 edge_iterator ei; 1866 edge e; 1867 1868 map[i++] = bb->index; 1869 FOR_EACH_EDGE (e, ei, bb->succs) 1870 map[i++] = e->dest->index; 1871 map[i++] = -1; 1872 } 1873 map[i] = -1; 1874 return map; 1875 } 1876 1877 static int *saved_cfg = NULL; 1878 1879 1880 /* This function compares the saved version of the cfg with the 1881 current cfg and aborts if the two are identical. The function 1882 silently returns if the cfg has been marked as dirty or the two are 1883 the same. */ 1884 1885 void 1886 df_check_cfg_clean (void) 1887 { 1888 int *new_map; 1889 1890 if (!df) 1891 return; 1892 1893 if (df_lr->solutions_dirty) 1894 return; 1895 1896 if (saved_cfg == NULL) 1897 return; 1898 1899 new_map = df_compute_cfg_image (); 1900 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0); 1901 free (new_map); 1902 } 1903 1904 1905 /* This function builds a cfg fingerprint and squirrels it away in 1906 saved_cfg. */ 1907 1908 static void 1909 df_set_clean_cfg (void) 1910 { 1911 free (saved_cfg); 1912 saved_cfg = df_compute_cfg_image (); 1913 } 1914 1915 #endif /* DF_DEBUG_CFG */ 1916 /*---------------------------------------------------------------------------- 1917 PUBLIC INTERFACES TO QUERY INFORMATION. 1918 ----------------------------------------------------------------------------*/ 1919 1920 1921 /* Return first def of REGNO within BB. */ 1922 1923 df_ref 1924 df_bb_regno_first_def_find (basic_block bb, unsigned int regno) 1925 { 1926 rtx_insn *insn; 1927 df_ref def; 1928 1929 FOR_BB_INSNS (bb, insn) 1930 { 1931 if (!INSN_P (insn)) 1932 continue; 1933 1934 FOR_EACH_INSN_DEF (def, insn) 1935 if (DF_REF_REGNO (def) == regno) 1936 return def; 1937 } 1938 return NULL; 1939 } 1940 1941 1942 /* Return last def of REGNO within BB. */ 1943 1944 df_ref 1945 df_bb_regno_last_def_find (basic_block bb, unsigned int regno) 1946 { 1947 rtx_insn *insn; 1948 df_ref def; 1949 1950 FOR_BB_INSNS_REVERSE (bb, insn) 1951 { 1952 if (!INSN_P (insn)) 1953 continue; 1954 1955 FOR_EACH_INSN_DEF (def, insn) 1956 if (DF_REF_REGNO (def) == regno) 1957 return def; 1958 } 1959 1960 return NULL; 1961 } 1962 1963 /* Finds the reference corresponding to the definition of REG in INSN. 1964 DF is the dataflow object. */ 1965 1966 df_ref 1967 df_find_def (rtx_insn *insn, rtx reg) 1968 { 1969 df_ref def; 1970 1971 if (GET_CODE (reg) == SUBREG) 1972 reg = SUBREG_REG (reg); 1973 gcc_assert (REG_P (reg)); 1974 1975 FOR_EACH_INSN_DEF (def, insn) 1976 if (DF_REF_REGNO (def) == REGNO (reg)) 1977 return def; 1978 1979 return NULL; 1980 } 1981 1982 1983 /* Return true if REG is defined in INSN, zero otherwise. */ 1984 1985 bool 1986 df_reg_defined (rtx_insn *insn, rtx reg) 1987 { 1988 return df_find_def (insn, reg) != NULL; 1989 } 1990 1991 1992 /* Finds the reference corresponding to the use of REG in INSN. 1993 DF is the dataflow object. */ 1994 1995 df_ref 1996 df_find_use (rtx_insn *insn, rtx reg) 1997 { 1998 df_ref use; 1999 2000 if (GET_CODE (reg) == SUBREG) 2001 reg = SUBREG_REG (reg); 2002 gcc_assert (REG_P (reg)); 2003 2004 df_insn_info *insn_info = DF_INSN_INFO_GET (insn); 2005 FOR_EACH_INSN_INFO_USE (use, insn_info) 2006 if (DF_REF_REGNO (use) == REGNO (reg)) 2007 return use; 2008 if (df->changeable_flags & DF_EQ_NOTES) 2009 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info) 2010 if (DF_REF_REGNO (use) == REGNO (reg)) 2011 return use; 2012 return NULL; 2013 } 2014 2015 2016 /* Return true if REG is referenced in INSN, zero otherwise. */ 2017 2018 bool 2019 df_reg_used (rtx_insn *insn, rtx reg) 2020 { 2021 return df_find_use (insn, reg) != NULL; 2022 } 2023 2024 2025 /*---------------------------------------------------------------------------- 2026 Debugging and printing functions. 2027 ----------------------------------------------------------------------------*/ 2028 2029 /* Write information about registers and basic blocks into FILE. 2030 This is part of making a debugging dump. */ 2031 2032 void 2033 dump_regset (regset r, FILE *outf) 2034 { 2035 unsigned i; 2036 reg_set_iterator rsi; 2037 2038 if (r == NULL) 2039 { 2040 fputs (" (nil)", outf); 2041 return; 2042 } 2043 2044 EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi) 2045 { 2046 fprintf (outf, " %d", i); 2047 if (i < FIRST_PSEUDO_REGISTER) 2048 fprintf (outf, " [%s]", 2049 reg_names[i]); 2050 } 2051 } 2052 2053 /* Print a human-readable representation of R on the standard error 2054 stream. This function is designed to be used from within the 2055 debugger. */ 2056 extern void debug_regset (regset); 2057 DEBUG_FUNCTION void 2058 debug_regset (regset r) 2059 { 2060 dump_regset (r, stderr); 2061 putc ('\n', stderr); 2062 } 2063 2064 /* Write information about registers and basic blocks into FILE. 2065 This is part of making a debugging dump. */ 2066 2067 void 2068 df_print_regset (FILE *file, bitmap r) 2069 { 2070 unsigned int i; 2071 bitmap_iterator bi; 2072 2073 if (r == NULL) 2074 fputs (" (nil)", file); 2075 else 2076 { 2077 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi) 2078 { 2079 fprintf (file, " %d", i); 2080 if (i < FIRST_PSEUDO_REGISTER) 2081 fprintf (file, " [%s]", reg_names[i]); 2082 } 2083 } 2084 fprintf (file, "\n"); 2085 } 2086 2087 2088 /* Write information about registers and basic blocks into FILE. The 2089 bitmap is in the form used by df_byte_lr. This is part of making a 2090 debugging dump. */ 2091 2092 void 2093 df_print_word_regset (FILE *file, bitmap r) 2094 { 2095 unsigned int max_reg = max_reg_num (); 2096 2097 if (r == NULL) 2098 fputs (" (nil)", file); 2099 else 2100 { 2101 unsigned int i; 2102 for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++) 2103 { 2104 bool found = (bitmap_bit_p (r, 2 * i) 2105 || bitmap_bit_p (r, 2 * i + 1)); 2106 if (found) 2107 { 2108 int word; 2109 const char * sep = ""; 2110 fprintf (file, " %d", i); 2111 fprintf (file, "("); 2112 for (word = 0; word < 2; word++) 2113 if (bitmap_bit_p (r, 2 * i + word)) 2114 { 2115 fprintf (file, "%s%d", sep, word); 2116 sep = ", "; 2117 } 2118 fprintf (file, ")"); 2119 } 2120 } 2121 } 2122 fprintf (file, "\n"); 2123 } 2124 2125 2126 /* Dump dataflow info. */ 2127 2128 void 2129 df_dump (FILE *file) 2130 { 2131 basic_block bb; 2132 df_dump_start (file); 2133 2134 FOR_ALL_BB_FN (bb, cfun) 2135 { 2136 df_print_bb_index (bb, file); 2137 df_dump_top (bb, file); 2138 df_dump_bottom (bb, file); 2139 } 2140 2141 fprintf (file, "\n"); 2142 } 2143 2144 2145 /* Dump dataflow info for df->blocks_to_analyze. */ 2146 2147 void 2148 df_dump_region (FILE *file) 2149 { 2150 if (df->blocks_to_analyze) 2151 { 2152 bitmap_iterator bi; 2153 unsigned int bb_index; 2154 2155 fprintf (file, "\n\nstarting region dump\n"); 2156 df_dump_start (file); 2157 2158 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi) 2159 { 2160 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index); 2161 dump_bb (file, bb, 0, TDF_DETAILS); 2162 } 2163 fprintf (file, "\n"); 2164 } 2165 else 2166 df_dump (file); 2167 } 2168 2169 2170 /* Dump the introductory information for each problem defined. */ 2171 2172 void 2173 df_dump_start (FILE *file) 2174 { 2175 int i; 2176 2177 if (!df || !file) 2178 return; 2179 2180 fprintf (file, "\n\n%s\n", current_function_name ()); 2181 fprintf (file, "\nDataflow summary:\n"); 2182 if (df->blocks_to_analyze) 2183 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n", 2184 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ()); 2185 2186 for (i = 0; i < df->num_problems_defined; i++) 2187 { 2188 struct dataflow *dflow = df->problems_in_order[i]; 2189 if (dflow->computed) 2190 { 2191 df_dump_problem_function fun = dflow->problem->dump_start_fun; 2192 if (fun) 2193 fun (file); 2194 } 2195 } 2196 } 2197 2198 2199 /* Dump the top or bottom of the block information for BB. */ 2200 static void 2201 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top) 2202 { 2203 int i; 2204 2205 if (!df || !file) 2206 return; 2207 2208 for (i = 0; i < df->num_problems_defined; i++) 2209 { 2210 struct dataflow *dflow = df->problems_in_order[i]; 2211 if (dflow->computed) 2212 { 2213 df_dump_bb_problem_function bbfun; 2214 2215 if (top) 2216 bbfun = dflow->problem->dump_top_fun; 2217 else 2218 bbfun = dflow->problem->dump_bottom_fun; 2219 2220 if (bbfun) 2221 bbfun (bb, file); 2222 } 2223 } 2224 } 2225 2226 /* Dump the top of the block information for BB. */ 2227 2228 void 2229 df_dump_top (basic_block bb, FILE *file) 2230 { 2231 df_dump_bb_problem_data (bb, file, /*top=*/true); 2232 } 2233 2234 /* Dump the bottom of the block information for BB. */ 2235 2236 void 2237 df_dump_bottom (basic_block bb, FILE *file) 2238 { 2239 df_dump_bb_problem_data (bb, file, /*top=*/false); 2240 } 2241 2242 2243 /* Dump information about INSN just before or after dumping INSN itself. */ 2244 static void 2245 df_dump_insn_problem_data (const rtx_insn *insn, FILE *file, bool top) 2246 { 2247 int i; 2248 2249 if (!df || !file) 2250 return; 2251 2252 for (i = 0; i < df->num_problems_defined; i++) 2253 { 2254 struct dataflow *dflow = df->problems_in_order[i]; 2255 if (dflow->computed) 2256 { 2257 df_dump_insn_problem_function insnfun; 2258 2259 if (top) 2260 insnfun = dflow->problem->dump_insn_top_fun; 2261 else 2262 insnfun = dflow->problem->dump_insn_bottom_fun; 2263 2264 if (insnfun) 2265 insnfun (insn, file); 2266 } 2267 } 2268 } 2269 2270 /* Dump information about INSN before dumping INSN itself. */ 2271 2272 void 2273 df_dump_insn_top (const rtx_insn *insn, FILE *file) 2274 { 2275 df_dump_insn_problem_data (insn, file, /*top=*/true); 2276 } 2277 2278 /* Dump information about INSN after dumping INSN itself. */ 2279 2280 void 2281 df_dump_insn_bottom (const rtx_insn *insn, FILE *file) 2282 { 2283 df_dump_insn_problem_data (insn, file, /*top=*/false); 2284 } 2285 2286 2287 static void 2288 df_ref_dump (df_ref ref, FILE *file) 2289 { 2290 fprintf (file, "%c%d(%d)", 2291 DF_REF_REG_DEF_P (ref) 2292 ? 'd' 2293 : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u', 2294 DF_REF_ID (ref), 2295 DF_REF_REGNO (ref)); 2296 } 2297 2298 void 2299 df_refs_chain_dump (df_ref ref, bool follow_chain, FILE *file) 2300 { 2301 fprintf (file, "{ "); 2302 for (; ref; ref = DF_REF_NEXT_LOC (ref)) 2303 { 2304 df_ref_dump (ref, file); 2305 if (follow_chain) 2306 df_chain_dump (DF_REF_CHAIN (ref), file); 2307 } 2308 fprintf (file, "}"); 2309 } 2310 2311 2312 /* Dump either a ref-def or reg-use chain. */ 2313 2314 void 2315 df_regs_chain_dump (df_ref ref, FILE *file) 2316 { 2317 fprintf (file, "{ "); 2318 while (ref) 2319 { 2320 df_ref_dump (ref, file); 2321 ref = DF_REF_NEXT_REG (ref); 2322 } 2323 fprintf (file, "}"); 2324 } 2325 2326 2327 static void 2328 df_mws_dump (struct df_mw_hardreg *mws, FILE *file) 2329 { 2330 for (; mws; mws = DF_MWS_NEXT (mws)) 2331 fprintf (file, "mw %c r[%d..%d]\n", 2332 DF_MWS_REG_DEF_P (mws) ? 'd' : 'u', 2333 mws->start_regno, mws->end_regno); 2334 } 2335 2336 2337 static void 2338 df_insn_uid_debug (unsigned int uid, 2339 bool follow_chain, FILE *file) 2340 { 2341 fprintf (file, "insn %d luid %d", 2342 uid, DF_INSN_UID_LUID (uid)); 2343 2344 if (DF_INSN_UID_DEFS (uid)) 2345 { 2346 fprintf (file, " defs "); 2347 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file); 2348 } 2349 2350 if (DF_INSN_UID_USES (uid)) 2351 { 2352 fprintf (file, " uses "); 2353 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file); 2354 } 2355 2356 if (DF_INSN_UID_EQ_USES (uid)) 2357 { 2358 fprintf (file, " eq uses "); 2359 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file); 2360 } 2361 2362 if (DF_INSN_UID_MWS (uid)) 2363 { 2364 fprintf (file, " mws "); 2365 df_mws_dump (DF_INSN_UID_MWS (uid), file); 2366 } 2367 fprintf (file, "\n"); 2368 } 2369 2370 2371 DEBUG_FUNCTION void 2372 df_insn_debug (rtx_insn *insn, bool follow_chain, FILE *file) 2373 { 2374 df_insn_uid_debug (INSN_UID (insn), follow_chain, file); 2375 } 2376 2377 DEBUG_FUNCTION void 2378 df_insn_debug_regno (rtx_insn *insn, FILE *file) 2379 { 2380 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); 2381 2382 fprintf (file, "insn %d bb %d luid %d defs ", 2383 INSN_UID (insn), BLOCK_FOR_INSN (insn)->index, 2384 DF_INSN_INFO_LUID (insn_info)); 2385 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file); 2386 2387 fprintf (file, " uses "); 2388 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file); 2389 2390 fprintf (file, " eq_uses "); 2391 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file); 2392 fprintf (file, "\n"); 2393 } 2394 2395 DEBUG_FUNCTION void 2396 df_regno_debug (unsigned int regno, FILE *file) 2397 { 2398 fprintf (file, "reg %d defs ", regno); 2399 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file); 2400 fprintf (file, " uses "); 2401 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file); 2402 fprintf (file, " eq_uses "); 2403 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file); 2404 fprintf (file, "\n"); 2405 } 2406 2407 2408 DEBUG_FUNCTION void 2409 df_ref_debug (df_ref ref, FILE *file) 2410 { 2411 fprintf (file, "%c%d ", 2412 DF_REF_REG_DEF_P (ref) ? 'd' : 'u', 2413 DF_REF_ID (ref)); 2414 fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ", 2415 DF_REF_REGNO (ref), 2416 DF_REF_BBNO (ref), 2417 DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref), 2418 DF_REF_FLAGS (ref), 2419 DF_REF_TYPE (ref)); 2420 if (DF_REF_LOC (ref)) 2421 { 2422 if (flag_dump_noaddr) 2423 fprintf (file, "loc #(#) chain "); 2424 else 2425 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref), 2426 (void *)*DF_REF_LOC (ref)); 2427 } 2428 else 2429 fprintf (file, "chain "); 2430 df_chain_dump (DF_REF_CHAIN (ref), file); 2431 fprintf (file, "\n"); 2432 } 2433 2434 /* Functions for debugging from GDB. */ 2435 2436 DEBUG_FUNCTION void 2437 debug_df_insn (rtx_insn *insn) 2438 { 2439 df_insn_debug (insn, true, stderr); 2440 debug_rtx (insn); 2441 } 2442 2443 2444 DEBUG_FUNCTION void 2445 debug_df_reg (rtx reg) 2446 { 2447 df_regno_debug (REGNO (reg), stderr); 2448 } 2449 2450 2451 DEBUG_FUNCTION void 2452 debug_df_regno (unsigned int regno) 2453 { 2454 df_regno_debug (regno, stderr); 2455 } 2456 2457 2458 DEBUG_FUNCTION void 2459 debug_df_ref (df_ref ref) 2460 { 2461 df_ref_debug (ref, stderr); 2462 } 2463 2464 2465 DEBUG_FUNCTION void 2466 debug_df_defno (unsigned int defno) 2467 { 2468 df_ref_debug (DF_DEFS_GET (defno), stderr); 2469 } 2470 2471 2472 DEBUG_FUNCTION void 2473 debug_df_useno (unsigned int defno) 2474 { 2475 df_ref_debug (DF_USES_GET (defno), stderr); 2476 } 2477 2478 2479 DEBUG_FUNCTION void 2480 debug_df_chain (struct df_link *link) 2481 { 2482 df_chain_dump (link, stderr); 2483 fputc ('\n', stderr); 2484 } 2485