1 /* SSA Jump Threading 2 Copyright (C) 2005-2016 Free Software Foundation, Inc. 3 Contributed by Jeff Law <law@redhat.com> 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 GCC is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "backend.h" 25 #include "tree.h" 26 #include "gimple.h" 27 #include "predict.h" 28 #include "ssa.h" 29 #include "fold-const.h" 30 #include "cfgloop.h" 31 #include "gimple-iterator.h" 32 #include "tree-cfg.h" 33 #include "tree-ssa-threadupdate.h" 34 #include "params.h" 35 #include "tree-ssa-scopedtables.h" 36 #include "tree-ssa-threadedge.h" 37 #include "tree-ssa-threadbackward.h" 38 #include "tree-ssa-dom.h" 39 #include "gimple-fold.h" 40 41 /* To avoid code explosion due to jump threading, we limit the 42 number of statements we are going to copy. This variable 43 holds the number of statements currently seen that we'll have 44 to copy as part of the jump threading process. */ 45 static int stmt_count; 46 47 /* Array to record value-handles per SSA_NAME. */ 48 vec<tree> ssa_name_values; 49 50 typedef tree (pfn_simplify) (gimple *, gimple *, class avail_exprs_stack *); 51 52 /* Set the value for the SSA name NAME to VALUE. */ 53 54 void 55 set_ssa_name_value (tree name, tree value) 56 { 57 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ()) 58 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1); 59 if (value && TREE_OVERFLOW_P (value)) 60 value = drop_tree_overflow (value); 61 ssa_name_values[SSA_NAME_VERSION (name)] = value; 62 } 63 64 /* Initialize the per SSA_NAME value-handles array. Returns it. */ 65 void 66 threadedge_initialize_values (void) 67 { 68 gcc_assert (!ssa_name_values.exists ()); 69 ssa_name_values.create (num_ssa_names); 70 } 71 72 /* Free the per SSA_NAME value-handle array. */ 73 void 74 threadedge_finalize_values (void) 75 { 76 ssa_name_values.release (); 77 } 78 79 /* Return TRUE if we may be able to thread an incoming edge into 80 BB to an outgoing edge from BB. Return FALSE otherwise. */ 81 82 bool 83 potentially_threadable_block (basic_block bb) 84 { 85 gimple_stmt_iterator gsi; 86 87 /* Special case. We can get blocks that are forwarders, but are 88 not optimized away because they forward from outside a loop 89 to the loop header. We want to thread through them as we can 90 sometimes thread to the loop exit, which is obviously profitable. 91 the interesting case here is when the block has PHIs. */ 92 if (gsi_end_p (gsi_start_nondebug_bb (bb)) 93 && !gsi_end_p (gsi_start_phis (bb))) 94 return true; 95 96 /* If BB has a single successor or a single predecessor, then 97 there is no threading opportunity. */ 98 if (single_succ_p (bb) || single_pred_p (bb)) 99 return false; 100 101 /* If BB does not end with a conditional, switch or computed goto, 102 then there is no threading opportunity. */ 103 gsi = gsi_last_bb (bb); 104 if (gsi_end_p (gsi) 105 || ! gsi_stmt (gsi) 106 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND 107 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO 108 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH)) 109 return false; 110 111 return true; 112 } 113 114 /* Return the LHS of any ASSERT_EXPR where OP appears as the first 115 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates 116 BB. If no such ASSERT_EXPR is found, return OP. */ 117 118 static tree 119 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt) 120 { 121 imm_use_iterator imm_iter; 122 gimple *use_stmt; 123 use_operand_p use_p; 124 125 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op) 126 { 127 use_stmt = USE_STMT (use_p); 128 if (use_stmt != stmt 129 && gimple_assign_single_p (use_stmt) 130 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR 131 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op 132 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt))) 133 { 134 return gimple_assign_lhs (use_stmt); 135 } 136 } 137 return op; 138 } 139 140 /* Record temporary equivalences created by PHIs at the target of the 141 edge E. Record unwind information for the equivalences onto STACK. 142 143 If a PHI which prevents threading is encountered, then return FALSE 144 indicating we should not thread this edge, else return TRUE. 145 146 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs 147 of any equivalences recorded. We use this to make invalidation after 148 traversing back edges less painful. */ 149 150 static bool 151 record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies) 152 { 153 gphi_iterator gsi; 154 155 /* Each PHI creates a temporary equivalence, record them. 156 These are context sensitive equivalences and will be removed 157 later. */ 158 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) 159 { 160 gphi *phi = gsi.phi (); 161 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e); 162 tree dst = gimple_phi_result (phi); 163 164 /* If the desired argument is not the same as this PHI's result 165 and it is set by a PHI in E->dest, then we can not thread 166 through E->dest. */ 167 if (src != dst 168 && TREE_CODE (src) == SSA_NAME 169 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI 170 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest) 171 return false; 172 173 /* We consider any non-virtual PHI as a statement since it 174 count result in a constant assignment or copy operation. */ 175 if (!virtual_operand_p (dst)) 176 stmt_count++; 177 178 const_and_copies->record_const_or_copy (dst, src); 179 } 180 return true; 181 } 182 183 /* Valueize hook for gimple_fold_stmt_to_constant_1. */ 184 185 static tree 186 threadedge_valueize (tree t) 187 { 188 if (TREE_CODE (t) == SSA_NAME) 189 { 190 tree tem = SSA_NAME_VALUE (t); 191 if (tem) 192 return tem; 193 } 194 return t; 195 } 196 197 /* Try to simplify each statement in E->dest, ultimately leading to 198 a simplification of the COND_EXPR at the end of E->dest. 199 200 Record unwind information for temporary equivalences onto STACK. 201 202 Use SIMPLIFY (a pointer to a callback function) to further simplify 203 statements using pass specific information. 204 205 We might consider marking just those statements which ultimately 206 feed the COND_EXPR. It's not clear if the overhead of bookkeeping 207 would be recovered by trying to simplify fewer statements. 208 209 If we are able to simplify a statement into the form 210 SSA_NAME = (SSA_NAME | gimple invariant), then we can record 211 a context sensitive equivalence which may help us simplify 212 later statements in E->dest. */ 213 214 static gimple * 215 record_temporary_equivalences_from_stmts_at_dest (edge e, 216 const_and_copies *const_and_copies, 217 avail_exprs_stack *avail_exprs_stack, 218 pfn_simplify simplify) 219 { 220 gimple *stmt = NULL; 221 gimple_stmt_iterator gsi; 222 int max_stmt_count; 223 224 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS); 225 226 /* Walk through each statement in the block recording equivalences 227 we discover. Note any equivalences we discover are context 228 sensitive (ie, are dependent on traversing E) and must be unwound 229 when we're finished processing E. */ 230 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) 231 { 232 tree cached_lhs = NULL; 233 234 stmt = gsi_stmt (gsi); 235 236 /* Ignore empty statements and labels. */ 237 if (gimple_code (stmt) == GIMPLE_NOP 238 || gimple_code (stmt) == GIMPLE_LABEL 239 || is_gimple_debug (stmt)) 240 continue; 241 242 /* If the statement has volatile operands, then we assume we 243 can not thread through this block. This is overly 244 conservative in some ways. */ 245 if (gimple_code (stmt) == GIMPLE_ASM 246 && gimple_asm_volatile_p (as_a <gasm *> (stmt))) 247 return NULL; 248 249 /* If the statement is a unique builtin, we can not thread 250 through here. */ 251 if (gimple_code (stmt) == GIMPLE_CALL 252 && gimple_call_internal_p (stmt) 253 && gimple_call_internal_unique_p (stmt)) 254 return NULL; 255 256 /* If duplicating this block is going to cause too much code 257 expansion, then do not thread through this block. */ 258 stmt_count++; 259 if (stmt_count > max_stmt_count) 260 return NULL; 261 262 /* If this is not a statement that sets an SSA_NAME to a new 263 value, then do not try to simplify this statement as it will 264 not simplify in any way that is helpful for jump threading. */ 265 if ((gimple_code (stmt) != GIMPLE_ASSIGN 266 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) 267 && (gimple_code (stmt) != GIMPLE_CALL 268 || gimple_call_lhs (stmt) == NULL_TREE 269 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)) 270 continue; 271 272 /* The result of __builtin_object_size depends on all the arguments 273 of a phi node. Temporarily using only one edge produces invalid 274 results. For example 275 276 if (x < 6) 277 goto l; 278 else 279 goto l; 280 281 l: 282 r = PHI <&w[2].a[1](2), &a.a[6](3)> 283 __builtin_object_size (r, 0) 284 285 The result of __builtin_object_size is defined to be the maximum of 286 remaining bytes. If we use only one edge on the phi, the result will 287 change to be the remaining bytes for the corresponding phi argument. 288 289 Similarly for __builtin_constant_p: 290 291 r = PHI <1(2), 2(3)> 292 __builtin_constant_p (r) 293 294 Both PHI arguments are constant, but x ? 1 : 2 is still not 295 constant. */ 296 297 if (is_gimple_call (stmt)) 298 { 299 tree fndecl = gimple_call_fndecl (stmt); 300 if (fndecl 301 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE 302 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)) 303 continue; 304 } 305 306 /* At this point we have a statement which assigns an RHS to an 307 SSA_VAR on the LHS. We want to try and simplify this statement 308 to expose more context sensitive equivalences which in turn may 309 allow us to simplify the condition at the end of the loop. 310 311 Handle simple copy operations as well as implied copies from 312 ASSERT_EXPRs. */ 313 if (gimple_assign_single_p (stmt) 314 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME) 315 cached_lhs = gimple_assign_rhs1 (stmt); 316 else if (gimple_assign_single_p (stmt) 317 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR) 318 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0); 319 else 320 { 321 /* A statement that is not a trivial copy or ASSERT_EXPR. 322 Try to fold the new expression. Inserting the 323 expression into the hash table is unlikely to help. */ 324 /* ??? The DOM callback below can be changed to setting 325 the mprts_hook around the call to thread_across_edge, 326 avoiding the use substitution. The VRP hook should be 327 changed to properly valueize operands itself using 328 SSA_NAME_VALUE in addition to its own lattice. */ 329 cached_lhs = gimple_fold_stmt_to_constant_1 (stmt, 330 threadedge_valueize); 331 if (!cached_lhs 332 || (TREE_CODE (cached_lhs) != SSA_NAME 333 && !is_gimple_min_invariant (cached_lhs))) 334 { 335 /* We're going to temporarily copy propagate the operands 336 and see if that allows us to simplify this statement. */ 337 tree *copy; 338 ssa_op_iter iter; 339 use_operand_p use_p; 340 unsigned int num, i = 0; 341 342 num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES); 343 copy = XALLOCAVEC (tree, num); 344 345 /* Make a copy of the uses & vuses into USES_COPY, then cprop into 346 the operands. */ 347 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) 348 { 349 tree tmp = NULL; 350 tree use = USE_FROM_PTR (use_p); 351 352 copy[i++] = use; 353 if (TREE_CODE (use) == SSA_NAME) 354 tmp = SSA_NAME_VALUE (use); 355 if (tmp) 356 SET_USE (use_p, tmp); 357 } 358 359 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack); 360 361 /* Restore the statement's original uses/defs. */ 362 i = 0; 363 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) 364 SET_USE (use_p, copy[i++]); 365 } 366 } 367 368 /* Record the context sensitive equivalence if we were able 369 to simplify this statement. */ 370 if (cached_lhs 371 && (TREE_CODE (cached_lhs) == SSA_NAME 372 || is_gimple_min_invariant (cached_lhs))) 373 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt), 374 cached_lhs); 375 } 376 return stmt; 377 } 378 379 /* Simplify the control statement at the end of the block E->dest. 380 381 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND 382 is available to use/clobber in DUMMY_COND. 383 384 Use SIMPLIFY (a pointer to a callback function) to further simplify 385 a condition using pass specific information. 386 387 Return the simplified condition or NULL if simplification could 388 not be performed. 389 390 The available expression table is referenced via AVAIL_EXPRS_STACK. */ 391 392 static tree 393 simplify_control_stmt_condition (edge e, 394 gimple *stmt, 395 class avail_exprs_stack *avail_exprs_stack, 396 gcond *dummy_cond, 397 pfn_simplify simplify, 398 bool handle_dominating_asserts) 399 { 400 tree cond, cached_lhs; 401 enum gimple_code code = gimple_code (stmt); 402 403 /* For comparisons, we have to update both operands, then try 404 to simplify the comparison. */ 405 if (code == GIMPLE_COND) 406 { 407 tree op0, op1; 408 enum tree_code cond_code; 409 410 op0 = gimple_cond_lhs (stmt); 411 op1 = gimple_cond_rhs (stmt); 412 cond_code = gimple_cond_code (stmt); 413 414 /* Get the current value of both operands. */ 415 if (TREE_CODE (op0) == SSA_NAME) 416 { 417 for (int i = 0; i < 2; i++) 418 { 419 if (TREE_CODE (op0) == SSA_NAME 420 && SSA_NAME_VALUE (op0)) 421 op0 = SSA_NAME_VALUE (op0); 422 else 423 break; 424 } 425 } 426 427 if (TREE_CODE (op1) == SSA_NAME) 428 { 429 for (int i = 0; i < 2; i++) 430 { 431 if (TREE_CODE (op1) == SSA_NAME 432 && SSA_NAME_VALUE (op1)) 433 op1 = SSA_NAME_VALUE (op1); 434 else 435 break; 436 } 437 } 438 439 if (handle_dominating_asserts) 440 { 441 /* Now see if the operand was consumed by an ASSERT_EXPR 442 which dominates E->src. If so, we want to replace the 443 operand with the LHS of the ASSERT_EXPR. */ 444 if (TREE_CODE (op0) == SSA_NAME) 445 op0 = lhs_of_dominating_assert (op0, e->src, stmt); 446 447 if (TREE_CODE (op1) == SSA_NAME) 448 op1 = lhs_of_dominating_assert (op1, e->src, stmt); 449 } 450 451 /* We may need to canonicalize the comparison. For 452 example, op0 might be a constant while op1 is an 453 SSA_NAME. Failure to canonicalize will cause us to 454 miss threading opportunities. */ 455 if (tree_swap_operands_p (op0, op1, false)) 456 { 457 cond_code = swap_tree_comparison (cond_code); 458 std::swap (op0, op1); 459 } 460 461 /* Stuff the operator and operands into our dummy conditional 462 expression. */ 463 gimple_cond_set_code (dummy_cond, cond_code); 464 gimple_cond_set_lhs (dummy_cond, op0); 465 gimple_cond_set_rhs (dummy_cond, op1); 466 467 /* We absolutely do not care about any type conversions 468 we only care about a zero/nonzero value. */ 469 fold_defer_overflow_warnings (); 470 471 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1); 472 if (cached_lhs) 473 while (CONVERT_EXPR_P (cached_lhs)) 474 cached_lhs = TREE_OPERAND (cached_lhs, 0); 475 476 fold_undefer_overflow_warnings ((cached_lhs 477 && is_gimple_min_invariant (cached_lhs)), 478 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL); 479 480 /* If we have not simplified the condition down to an invariant, 481 then use the pass specific callback to simplify the condition. */ 482 if (!cached_lhs 483 || !is_gimple_min_invariant (cached_lhs)) 484 cached_lhs = (*simplify) (dummy_cond, stmt, avail_exprs_stack); 485 486 /* If we were testing an integer/pointer against a constant, then 487 we can use the FSM code to trace the value of the SSA_NAME. If 488 a value is found, then the condition will collapse to a constant. 489 490 Return the SSA_NAME we want to trace back rather than the full 491 expression and give the FSM threader a chance to find its value. */ 492 if (cached_lhs == NULL) 493 { 494 /* Recover the original operands. They may have been simplified 495 using context sensitive equivalences. Those context sensitive 496 equivalences may not be valid on paths found by the FSM optimizer. */ 497 tree op0 = gimple_cond_lhs (stmt); 498 tree op1 = gimple_cond_rhs (stmt); 499 500 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0)) 501 || POINTER_TYPE_P (TREE_TYPE (op0))) 502 && TREE_CODE (op0) == SSA_NAME 503 && TREE_CODE (op1) == INTEGER_CST) 504 return op0; 505 } 506 507 return cached_lhs; 508 } 509 510 if (code == GIMPLE_SWITCH) 511 cond = gimple_switch_index (as_a <gswitch *> (stmt)); 512 else if (code == GIMPLE_GOTO) 513 cond = gimple_goto_dest (stmt); 514 else 515 gcc_unreachable (); 516 517 /* We can have conditionals which just test the state of a variable 518 rather than use a relational operator. These are simpler to handle. */ 519 if (TREE_CODE (cond) == SSA_NAME) 520 { 521 tree original_lhs = cond; 522 cached_lhs = cond; 523 524 /* Get the variable's current value from the equivalence chains. 525 526 It is possible to get loops in the SSA_NAME_VALUE chains 527 (consider threading the backedge of a loop where we have 528 a loop invariant SSA_NAME used in the condition). */ 529 if (cached_lhs) 530 { 531 for (int i = 0; i < 2; i++) 532 { 533 if (TREE_CODE (cached_lhs) == SSA_NAME 534 && SSA_NAME_VALUE (cached_lhs)) 535 cached_lhs = SSA_NAME_VALUE (cached_lhs); 536 else 537 break; 538 } 539 } 540 541 /* If we're dominated by a suitable ASSERT_EXPR, then 542 update CACHED_LHS appropriately. */ 543 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME) 544 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt); 545 546 /* If we haven't simplified to an invariant yet, then use the 547 pass specific callback to try and simplify it further. */ 548 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs)) 549 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack); 550 551 /* We couldn't find an invariant. But, callers of this 552 function may be able to do something useful with the 553 unmodified destination. */ 554 if (!cached_lhs) 555 cached_lhs = original_lhs; 556 } 557 else 558 cached_lhs = NULL; 559 560 return cached_lhs; 561 } 562 563 /* Copy debug stmts from DEST's chain of single predecessors up to 564 SRC, so that we don't lose the bindings as PHI nodes are introduced 565 when DEST gains new predecessors. */ 566 void 567 propagate_threaded_block_debug_into (basic_block dest, basic_block src) 568 { 569 if (!MAY_HAVE_DEBUG_STMTS) 570 return; 571 572 if (!single_pred_p (dest)) 573 return; 574 575 gcc_checking_assert (dest != src); 576 577 gimple_stmt_iterator gsi = gsi_after_labels (dest); 578 int i = 0; 579 const int alloc_count = 16; // ?? Should this be a PARAM? 580 581 /* Estimate the number of debug vars overridden in the beginning of 582 DEST, to tell how many we're going to need to begin with. */ 583 for (gimple_stmt_iterator si = gsi; 584 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si)) 585 { 586 gimple *stmt = gsi_stmt (si); 587 if (!is_gimple_debug (stmt)) 588 break; 589 i++; 590 } 591 592 auto_vec<tree, alloc_count> fewvars; 593 hash_set<tree> *vars = NULL; 594 595 /* If we're already starting with 3/4 of alloc_count, go for a 596 hash_set, otherwise start with an unordered stack-allocated 597 VEC. */ 598 if (i * 4 > alloc_count * 3) 599 vars = new hash_set<tree>; 600 601 /* Now go through the initial debug stmts in DEST again, this time 602 actually inserting in VARS or FEWVARS. Don't bother checking for 603 duplicates in FEWVARS. */ 604 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si)) 605 { 606 gimple *stmt = gsi_stmt (si); 607 if (!is_gimple_debug (stmt)) 608 break; 609 610 tree var; 611 612 if (gimple_debug_bind_p (stmt)) 613 var = gimple_debug_bind_get_var (stmt); 614 else if (gimple_debug_source_bind_p (stmt)) 615 var = gimple_debug_source_bind_get_var (stmt); 616 else 617 gcc_unreachable (); 618 619 if (vars) 620 vars->add (var); 621 else 622 fewvars.quick_push (var); 623 } 624 625 basic_block bb = dest; 626 627 do 628 { 629 bb = single_pred (bb); 630 for (gimple_stmt_iterator si = gsi_last_bb (bb); 631 !gsi_end_p (si); gsi_prev (&si)) 632 { 633 gimple *stmt = gsi_stmt (si); 634 if (!is_gimple_debug (stmt)) 635 continue; 636 637 tree var; 638 639 if (gimple_debug_bind_p (stmt)) 640 var = gimple_debug_bind_get_var (stmt); 641 else if (gimple_debug_source_bind_p (stmt)) 642 var = gimple_debug_source_bind_get_var (stmt); 643 else 644 gcc_unreachable (); 645 646 /* Discard debug bind overlaps. ??? Unlike stmts from src, 647 copied into a new block that will precede BB, debug bind 648 stmts in bypassed BBs may actually be discarded if 649 they're overwritten by subsequent debug bind stmts, which 650 might be a problem once we introduce stmt frontier notes 651 or somesuch. Adding `&& bb == src' to the condition 652 below will preserve all potentially relevant debug 653 notes. */ 654 if (vars && vars->add (var)) 655 continue; 656 else if (!vars) 657 { 658 int i = fewvars.length (); 659 while (i--) 660 if (fewvars[i] == var) 661 break; 662 if (i >= 0) 663 continue; 664 665 if (fewvars.length () < (unsigned) alloc_count) 666 fewvars.quick_push (var); 667 else 668 { 669 vars = new hash_set<tree>; 670 for (i = 0; i < alloc_count; i++) 671 vars->add (fewvars[i]); 672 fewvars.release (); 673 vars->add (var); 674 } 675 } 676 677 stmt = gimple_copy (stmt); 678 /* ??? Should we drop the location of the copy to denote 679 they're artificial bindings? */ 680 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); 681 } 682 } 683 while (bb != src && single_pred_p (bb)); 684 685 if (vars) 686 delete vars; 687 else if (fewvars.exists ()) 688 fewvars.release (); 689 } 690 691 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it 692 need not be duplicated as part of the CFG/SSA updating process). 693 694 If it is threadable, add it to PATH and VISITED and recurse, ultimately 695 returning TRUE from the toplevel call. Otherwise do nothing and 696 return false. 697 698 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to 699 try and simplify the condition at the end of TAKEN_EDGE->dest. 700 701 The available expression table is referenced via AVAIL_EXPRS_STACK. */ 702 703 static bool 704 thread_around_empty_blocks (edge taken_edge, 705 gcond *dummy_cond, 706 class avail_exprs_stack *avail_exprs_stack, 707 bool handle_dominating_asserts, 708 pfn_simplify simplify, 709 bitmap visited, 710 vec<jump_thread_edge *> *path) 711 { 712 basic_block bb = taken_edge->dest; 713 gimple_stmt_iterator gsi; 714 gimple *stmt; 715 tree cond; 716 717 /* The key property of these blocks is that they need not be duplicated 718 when threading. Thus they can not have visible side effects such 719 as PHI nodes. */ 720 if (!gsi_end_p (gsi_start_phis (bb))) 721 return false; 722 723 /* Skip over DEBUG statements at the start of the block. */ 724 gsi = gsi_start_nondebug_bb (bb); 725 726 /* If the block has no statements, but does have a single successor, then 727 it's just a forwarding block and we can thread through it trivially. 728 729 However, note that just threading through empty blocks with single 730 successors is not inherently profitable. For the jump thread to 731 be profitable, we must avoid a runtime conditional. 732 733 By taking the return value from the recursive call, we get the 734 desired effect of returning TRUE when we found a profitable jump 735 threading opportunity and FALSE otherwise. 736 737 This is particularly important when this routine is called after 738 processing a joiner block. Returning TRUE too aggressively in 739 that case results in pointless duplication of the joiner block. */ 740 if (gsi_end_p (gsi)) 741 { 742 if (single_succ_p (bb)) 743 { 744 taken_edge = single_succ_edge (bb); 745 746 if ((taken_edge->flags & EDGE_DFS_BACK) != 0) 747 return false; 748 749 if (!bitmap_bit_p (visited, taken_edge->dest->index)) 750 { 751 jump_thread_edge *x 752 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK); 753 path->safe_push (x); 754 bitmap_set_bit (visited, taken_edge->dest->index); 755 return thread_around_empty_blocks (taken_edge, 756 dummy_cond, 757 avail_exprs_stack, 758 handle_dominating_asserts, 759 simplify, 760 visited, 761 path); 762 } 763 } 764 765 /* We have a block with no statements, but multiple successors? */ 766 return false; 767 } 768 769 /* The only real statements this block can have are a control 770 flow altering statement. Anything else stops the thread. */ 771 stmt = gsi_stmt (gsi); 772 if (gimple_code (stmt) != GIMPLE_COND 773 && gimple_code (stmt) != GIMPLE_GOTO 774 && gimple_code (stmt) != GIMPLE_SWITCH) 775 return false; 776 777 /* Extract and simplify the condition. */ 778 cond = simplify_control_stmt_condition (taken_edge, stmt, 779 avail_exprs_stack, dummy_cond, 780 simplify, handle_dominating_asserts); 781 782 /* If the condition can be statically computed and we have not already 783 visited the destination edge, then add the taken edge to our thread 784 path. */ 785 if (cond && is_gimple_min_invariant (cond)) 786 { 787 taken_edge = find_taken_edge (bb, cond); 788 789 if ((taken_edge->flags & EDGE_DFS_BACK) != 0) 790 return false; 791 792 if (bitmap_bit_p (visited, taken_edge->dest->index)) 793 return false; 794 bitmap_set_bit (visited, taken_edge->dest->index); 795 796 jump_thread_edge *x 797 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK); 798 path->safe_push (x); 799 800 thread_around_empty_blocks (taken_edge, 801 dummy_cond, 802 avail_exprs_stack, 803 handle_dominating_asserts, 804 simplify, 805 visited, 806 path); 807 return true; 808 } 809 810 return false; 811 } 812 813 /* We are exiting E->src, see if E->dest ends with a conditional 814 jump which has a known value when reached via E. 815 816 E->dest can have arbitrary side effects which, if threading is 817 successful, will be maintained. 818 819 Special care is necessary if E is a back edge in the CFG as we 820 may have already recorded equivalences for E->dest into our 821 various tables, including the result of the conditional at 822 the end of E->dest. Threading opportunities are severely 823 limited in that case to avoid short-circuiting the loop 824 incorrectly. 825 826 DUMMY_COND is a shared cond_expr used by condition simplification as scratch, 827 to avoid allocating memory. 828 829 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of 830 the simplified condition with left-hand sides of ASSERT_EXPRs they are 831 used in. 832 833 STACK is used to undo temporary equivalences created during the walk of 834 E->dest. 835 836 SIMPLIFY is a pass-specific function used to simplify statements. 837 838 Our caller is responsible for restoring the state of the expression 839 and const_and_copies stacks. 840 841 Positive return value is success. Zero return value is failure, but 842 the block can still be duplicated as a joiner in a jump thread path, 843 negative indicates the block should not be duplicated and thus is not 844 suitable for a joiner in a jump threading path. */ 845 846 static int 847 thread_through_normal_block (edge e, 848 gcond *dummy_cond, 849 bool handle_dominating_asserts, 850 const_and_copies *const_and_copies, 851 avail_exprs_stack *avail_exprs_stack, 852 pfn_simplify simplify, 853 vec<jump_thread_edge *> *path, 854 bitmap visited) 855 { 856 /* We want to record any equivalences created by traversing E. */ 857 if (!handle_dominating_asserts) 858 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack); 859 860 /* PHIs create temporary equivalences. 861 Note that if we found a PHI that made the block non-threadable, then 862 we need to bubble that up to our caller in the same manner we do 863 when we prematurely stop processing statements below. */ 864 if (!record_temporary_equivalences_from_phis (e, const_and_copies)) 865 return -1; 866 867 /* Now walk each statement recording any context sensitive 868 temporary equivalences we can detect. */ 869 gimple *stmt 870 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies, 871 avail_exprs_stack, 872 simplify); 873 874 /* There's two reasons STMT might be null, and distinguishing 875 between them is important. 876 877 First the block may not have had any statements. For example, it 878 might have some PHIs and unconditionally transfer control elsewhere. 879 Such blocks are suitable for jump threading, particularly as a 880 joiner block. 881 882 The second reason would be if we did not process all the statements 883 in the block (because there were too many to make duplicating the 884 block profitable. If we did not look at all the statements, then 885 we may not have invalidated everything needing invalidation. Thus 886 we must signal to our caller that this block is not suitable for 887 use as a joiner in a threading path. */ 888 if (!stmt) 889 { 890 /* First case. The statement simply doesn't have any instructions, but 891 does have PHIs. */ 892 if (gsi_end_p (gsi_start_nondebug_bb (e->dest)) 893 && !gsi_end_p (gsi_start_phis (e->dest))) 894 return 0; 895 896 /* Second case. */ 897 return -1; 898 } 899 900 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm 901 will be taken. */ 902 if (gimple_code (stmt) == GIMPLE_COND 903 || gimple_code (stmt) == GIMPLE_GOTO 904 || gimple_code (stmt) == GIMPLE_SWITCH) 905 { 906 tree cond; 907 908 /* Extract and simplify the condition. */ 909 cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack, 910 dummy_cond, simplify, 911 handle_dominating_asserts); 912 913 if (!cond) 914 return 0; 915 916 if (is_gimple_min_invariant (cond)) 917 { 918 edge taken_edge = find_taken_edge (e->dest, cond); 919 basic_block dest = (taken_edge ? taken_edge->dest : NULL); 920 921 /* DEST could be NULL for a computed jump to an absolute 922 address. */ 923 if (dest == NULL 924 || dest == e->dest 925 || (taken_edge->flags & EDGE_DFS_BACK) != 0 926 || bitmap_bit_p (visited, dest->index)) 927 return 0; 928 929 /* Only push the EDGE_START_JUMP_THREAD marker if this is 930 first edge on the path. */ 931 if (path->length () == 0) 932 { 933 jump_thread_edge *x 934 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD); 935 path->safe_push (x); 936 } 937 938 jump_thread_edge *x 939 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK); 940 path->safe_push (x); 941 942 /* See if we can thread through DEST as well, this helps capture 943 secondary effects of threading without having to re-run DOM or 944 VRP. 945 946 We don't want to thread back to a block we have already 947 visited. This may be overly conservative. */ 948 bitmap_set_bit (visited, dest->index); 949 bitmap_set_bit (visited, e->dest->index); 950 thread_around_empty_blocks (taken_edge, 951 dummy_cond, 952 avail_exprs_stack, 953 handle_dominating_asserts, 954 simplify, 955 visited, 956 path); 957 return 1; 958 } 959 } 960 return 0; 961 } 962 963 /* We are exiting E->src, see if E->dest ends with a conditional 964 jump which has a known value when reached via E. 965 966 DUMMY_COND is a shared cond_expr used by condition simplification as scratch, 967 to avoid allocating memory. 968 969 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of 970 the simplified condition with left-hand sides of ASSERT_EXPRs they are 971 used in. 972 973 CONST_AND_COPIES is used to undo temporary equivalences created during the 974 walk of E->dest. 975 976 The available expression table is referenced vai AVAIL_EXPRS_STACK. 977 978 SIMPLIFY is a pass-specific function used to simplify statements. */ 979 980 void 981 thread_across_edge (gcond *dummy_cond, 982 edge e, 983 bool handle_dominating_asserts, 984 class const_and_copies *const_and_copies, 985 class avail_exprs_stack *avail_exprs_stack, 986 tree (*simplify) (gimple *, gimple *, 987 class avail_exprs_stack *)) 988 { 989 bitmap visited = BITMAP_ALLOC (NULL); 990 991 stmt_count = 0; 992 993 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> (); 994 bitmap_clear (visited); 995 bitmap_set_bit (visited, e->src->index); 996 bitmap_set_bit (visited, e->dest->index); 997 998 int threaded; 999 if ((e->flags & EDGE_DFS_BACK) == 0) 1000 threaded = thread_through_normal_block (e, dummy_cond, 1001 handle_dominating_asserts, 1002 const_and_copies, 1003 avail_exprs_stack, 1004 simplify, path, 1005 visited); 1006 else 1007 threaded = 0; 1008 1009 if (threaded > 0) 1010 { 1011 propagate_threaded_block_debug_into (path->last ()->e->dest, 1012 e->dest); 1013 const_and_copies->pop_to_marker (); 1014 BITMAP_FREE (visited); 1015 register_jump_thread (path); 1016 return; 1017 } 1018 else 1019 { 1020 /* Negative and zero return values indicate no threading was possible, 1021 thus there should be no edges on the thread path and no need to walk 1022 through the vector entries. */ 1023 gcc_assert (path->length () == 0); 1024 path->release (); 1025 delete path; 1026 1027 find_jump_threads_backwards (e); 1028 1029 /* A negative status indicates the target block was deemed too big to 1030 duplicate. Just quit now rather than trying to use the block as 1031 a joiner in a jump threading path. 1032 1033 This prevents unnecessary code growth, but more importantly if we 1034 do not look at all the statements in the block, then we may have 1035 missed some invalidations if we had traversed a backedge! */ 1036 if (threaded < 0) 1037 { 1038 BITMAP_FREE (visited); 1039 const_and_copies->pop_to_marker (); 1040 return; 1041 } 1042 } 1043 1044 /* We were unable to determine what out edge from E->dest is taken. However, 1045 we might still be able to thread through successors of E->dest. This 1046 often occurs when E->dest is a joiner block which then fans back out 1047 based on redundant tests. 1048 1049 If so, we'll copy E->dest and redirect the appropriate predecessor to 1050 the copy. Within the copy of E->dest, we'll thread one or more edges 1051 to points deeper in the CFG. 1052 1053 This is a stopgap until we have a more structured approach to path 1054 isolation. */ 1055 { 1056 edge taken_edge; 1057 edge_iterator ei; 1058 bool found; 1059 1060 /* If E->dest has abnormal outgoing edges, then there's no guarantee 1061 we can safely redirect any of the edges. Just punt those cases. */ 1062 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs) 1063 if (taken_edge->flags & EDGE_ABNORMAL) 1064 { 1065 const_and_copies->pop_to_marker (); 1066 BITMAP_FREE (visited); 1067 return; 1068 } 1069 1070 /* Look at each successor of E->dest to see if we can thread through it. */ 1071 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs) 1072 { 1073 if ((e->flags & EDGE_DFS_BACK) != 0 1074 || (taken_edge->flags & EDGE_DFS_BACK) != 0) 1075 { 1076 find_jump_threads_backwards (taken_edge); 1077 continue; 1078 } 1079 1080 /* Push a fresh marker so we can unwind the equivalences created 1081 for each of E->dest's successors. */ 1082 const_and_copies->push_marker (); 1083 if (avail_exprs_stack) 1084 avail_exprs_stack->push_marker (); 1085 1086 /* Avoid threading to any block we have already visited. */ 1087 bitmap_clear (visited); 1088 bitmap_set_bit (visited, e->src->index); 1089 bitmap_set_bit (visited, e->dest->index); 1090 bitmap_set_bit (visited, taken_edge->dest->index); 1091 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> (); 1092 1093 /* Record whether or not we were able to thread through a successor 1094 of E->dest. */ 1095 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD); 1096 path->safe_push (x); 1097 1098 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK); 1099 path->safe_push (x); 1100 found = false; 1101 found = thread_around_empty_blocks (taken_edge, 1102 dummy_cond, 1103 avail_exprs_stack, 1104 handle_dominating_asserts, 1105 simplify, 1106 visited, 1107 path); 1108 1109 if (!found) 1110 found = thread_through_normal_block (path->last ()->e, dummy_cond, 1111 handle_dominating_asserts, 1112 const_and_copies, 1113 avail_exprs_stack, 1114 simplify, path, 1115 visited) > 0; 1116 1117 /* If we were able to thread through a successor of E->dest, then 1118 record the jump threading opportunity. */ 1119 if (found) 1120 { 1121 propagate_threaded_block_debug_into (path->last ()->e->dest, 1122 taken_edge->dest); 1123 register_jump_thread (path); 1124 } 1125 else 1126 { 1127 find_jump_threads_backwards (path->last ()->e); 1128 delete_jump_thread_path (path); 1129 } 1130 1131 /* And unwind the equivalence table. */ 1132 if (avail_exprs_stack) 1133 avail_exprs_stack->pop_to_marker (); 1134 const_and_copies->pop_to_marker (); 1135 } 1136 BITMAP_FREE (visited); 1137 } 1138 1139 const_and_copies->pop_to_marker (); 1140 } 1141