1 /* SSA Dominator optimizations for trees 2 Copyright (C) 2001-2018 Free Software Foundation, Inc. 3 Contributed by Diego Novillo <dnovillo@redhat.com> 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 GCC is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "backend.h" 25 #include "tree.h" 26 #include "gimple.h" 27 #include "tree-pass.h" 28 #include "ssa.h" 29 #include "gimple-pretty-print.h" 30 #include "fold-const.h" 31 #include "cfganal.h" 32 #include "cfgloop.h" 33 #include "gimple-fold.h" 34 #include "tree-eh.h" 35 #include "tree-inline.h" 36 #include "gimple-iterator.h" 37 #include "tree-cfg.h" 38 #include "tree-into-ssa.h" 39 #include "domwalk.h" 40 #include "tree-ssa-propagate.h" 41 #include "tree-ssa-threadupdate.h" 42 #include "params.h" 43 #include "tree-ssa-scopedtables.h" 44 #include "tree-ssa-threadedge.h" 45 #include "tree-ssa-dom.h" 46 #include "gimplify.h" 47 #include "tree-cfgcleanup.h" 48 #include "dbgcnt.h" 49 #include "alloc-pool.h" 50 #include "tree-vrp.h" 51 #include "vr-values.h" 52 #include "gimple-ssa-evrp-analyze.h" 53 54 /* This file implements optimizations on the dominator tree. */ 55 56 /* Structure for recording edge equivalences. 57 58 Computing and storing the edge equivalences instead of creating 59 them on-demand can save significant amounts of time, particularly 60 for pathological cases involving switch statements. 61 62 These structures live for a single iteration of the dominator 63 optimizer in the edge's AUX field. At the end of an iteration we 64 free each of these structures. */ 65 class edge_info 66 { 67 public: 68 typedef std::pair <tree, tree> equiv_pair; 69 edge_info (edge); 70 ~edge_info (); 71 72 /* Record a simple LHS = RHS equivalence. This may trigger 73 calls to derive_equivalences. */ 74 void record_simple_equiv (tree, tree); 75 76 /* If traversing this edge creates simple equivalences, we store 77 them as LHS/RHS pairs within this vector. */ 78 vec<equiv_pair> simple_equivalences; 79 80 /* Traversing an edge may also indicate one or more particular conditions 81 are true or false. */ 82 vec<cond_equivalence> cond_equivalences; 83 84 private: 85 /* Derive equivalences by walking the use-def chains. */ 86 void derive_equivalences (tree, tree, int); 87 }; 88 89 /* Track whether or not we have changed the control flow graph. */ 90 static bool cfg_altered; 91 92 /* Bitmap of blocks that have had EH statements cleaned. We should 93 remove their dead edges eventually. */ 94 static bitmap need_eh_cleanup; 95 static vec<gimple *> need_noreturn_fixup; 96 97 /* Statistics for dominator optimizations. */ 98 struct opt_stats_d 99 { 100 long num_stmts; 101 long num_exprs_considered; 102 long num_re; 103 long num_const_prop; 104 long num_copy_prop; 105 }; 106 107 static struct opt_stats_d opt_stats; 108 109 /* Local functions. */ 110 static void record_equality (tree, tree, class const_and_copies *); 111 static void record_equivalences_from_phis (basic_block); 112 static void record_equivalences_from_incoming_edge (basic_block, 113 class const_and_copies *, 114 class avail_exprs_stack *); 115 static void eliminate_redundant_computations (gimple_stmt_iterator *, 116 class const_and_copies *, 117 class avail_exprs_stack *); 118 static void record_equivalences_from_stmt (gimple *, int, 119 class avail_exprs_stack *); 120 static void dump_dominator_optimization_stats (FILE *file, 121 hash_table<expr_elt_hasher> *); 122 123 /* Constructor for EDGE_INFO. An EDGE_INFO instance is always 124 associated with an edge E. */ 125 126 edge_info::edge_info (edge e) 127 { 128 /* Free the old one associated with E, if it exists and 129 associate our new object with E. */ 130 free_dom_edge_info (e); 131 e->aux = this; 132 133 /* And initialize the embedded vectors. */ 134 simple_equivalences = vNULL; 135 cond_equivalences = vNULL; 136 } 137 138 /* Destructor just needs to release the vectors. */ 139 140 edge_info::~edge_info (void) 141 { 142 this->cond_equivalences.release (); 143 this->simple_equivalences.release (); 144 } 145 146 /* NAME is known to have the value VALUE, which must be a constant. 147 148 Walk through its use-def chain to see if there are other equivalences 149 we might be able to derive. 150 151 RECURSION_LIMIT controls how far back we recurse through the use-def 152 chains. */ 153 154 void 155 edge_info::derive_equivalences (tree name, tree value, int recursion_limit) 156 { 157 if (TREE_CODE (name) != SSA_NAME || TREE_CODE (value) != INTEGER_CST) 158 return; 159 160 /* This records the equivalence for the toplevel object. Do 161 this before checking the recursion limit. */ 162 simple_equivalences.safe_push (equiv_pair (name, value)); 163 164 /* Limit how far up the use-def chains we are willing to walk. */ 165 if (recursion_limit == 0) 166 return; 167 168 /* We can walk up the use-def chains to potentially find more 169 equivalences. */ 170 gimple *def_stmt = SSA_NAME_DEF_STMT (name); 171 if (is_gimple_assign (def_stmt)) 172 { 173 enum tree_code code = gimple_assign_rhs_code (def_stmt); 174 switch (code) 175 { 176 /* If the result of an OR is zero, then its operands are, too. */ 177 case BIT_IOR_EXPR: 178 if (integer_zerop (value)) 179 { 180 tree rhs1 = gimple_assign_rhs1 (def_stmt); 181 tree rhs2 = gimple_assign_rhs2 (def_stmt); 182 183 value = build_zero_cst (TREE_TYPE (rhs1)); 184 derive_equivalences (rhs1, value, recursion_limit - 1); 185 value = build_zero_cst (TREE_TYPE (rhs2)); 186 derive_equivalences (rhs2, value, recursion_limit - 1); 187 } 188 break; 189 190 /* If the result of an AND is nonzero, then its operands are, too. */ 191 case BIT_AND_EXPR: 192 if (!integer_zerop (value)) 193 { 194 tree rhs1 = gimple_assign_rhs1 (def_stmt); 195 tree rhs2 = gimple_assign_rhs2 (def_stmt); 196 197 /* If either operand has a boolean range, then we 198 know its value must be one, otherwise we just know it 199 is nonzero. The former is clearly useful, I haven't 200 seen cases where the latter is helpful yet. */ 201 if (TREE_CODE (rhs1) == SSA_NAME) 202 { 203 if (ssa_name_has_boolean_range (rhs1)) 204 { 205 value = build_one_cst (TREE_TYPE (rhs1)); 206 derive_equivalences (rhs1, value, recursion_limit - 1); 207 } 208 } 209 if (TREE_CODE (rhs2) == SSA_NAME) 210 { 211 if (ssa_name_has_boolean_range (rhs2)) 212 { 213 value = build_one_cst (TREE_TYPE (rhs2)); 214 derive_equivalences (rhs2, value, recursion_limit - 1); 215 } 216 } 217 } 218 break; 219 220 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was 221 set via a widening type conversion, then we may be able to record 222 additional equivalences. */ 223 case NOP_EXPR: 224 case CONVERT_EXPR: 225 { 226 tree rhs = gimple_assign_rhs1 (def_stmt); 227 tree rhs_type = TREE_TYPE (rhs); 228 if (INTEGRAL_TYPE_P (rhs_type) 229 && (TYPE_PRECISION (TREE_TYPE (name)) 230 >= TYPE_PRECISION (rhs_type)) 231 && int_fits_type_p (value, rhs_type)) 232 derive_equivalences (rhs, 233 fold_convert (rhs_type, value), 234 recursion_limit - 1); 235 break; 236 } 237 238 /* We can invert the operation of these codes trivially if 239 one of the RHS operands is a constant to produce a known 240 value for the other RHS operand. */ 241 case POINTER_PLUS_EXPR: 242 case PLUS_EXPR: 243 { 244 tree rhs1 = gimple_assign_rhs1 (def_stmt); 245 tree rhs2 = gimple_assign_rhs2 (def_stmt); 246 247 /* If either argument is a constant, then we can compute 248 a constant value for the nonconstant argument. */ 249 if (TREE_CODE (rhs1) == INTEGER_CST 250 && TREE_CODE (rhs2) == SSA_NAME) 251 derive_equivalences (rhs2, 252 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1), 253 value, rhs1), 254 recursion_limit - 1); 255 else if (TREE_CODE (rhs2) == INTEGER_CST 256 && TREE_CODE (rhs1) == SSA_NAME) 257 derive_equivalences (rhs1, 258 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1), 259 value, rhs2), 260 recursion_limit - 1); 261 break; 262 } 263 264 /* If one of the operands is a constant, then we can compute 265 the value of the other operand. If both operands are 266 SSA_NAMEs, then they must be equal if the result is zero. */ 267 case MINUS_EXPR: 268 { 269 tree rhs1 = gimple_assign_rhs1 (def_stmt); 270 tree rhs2 = gimple_assign_rhs2 (def_stmt); 271 272 /* If either argument is a constant, then we can compute 273 a constant value for the nonconstant argument. */ 274 if (TREE_CODE (rhs1) == INTEGER_CST 275 && TREE_CODE (rhs2) == SSA_NAME) 276 derive_equivalences (rhs2, 277 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1), 278 rhs1, value), 279 recursion_limit - 1); 280 else if (TREE_CODE (rhs2) == INTEGER_CST 281 && TREE_CODE (rhs1) == SSA_NAME) 282 derive_equivalences (rhs1, 283 fold_binary (PLUS_EXPR, TREE_TYPE (rhs1), 284 value, rhs2), 285 recursion_limit - 1); 286 else if (integer_zerop (value)) 287 { 288 tree cond = build2 (EQ_EXPR, boolean_type_node, 289 gimple_assign_rhs1 (def_stmt), 290 gimple_assign_rhs2 (def_stmt)); 291 tree inverted = invert_truthvalue (cond); 292 record_conditions (&this->cond_equivalences, cond, inverted); 293 } 294 break; 295 } 296 297 case EQ_EXPR: 298 case NE_EXPR: 299 { 300 if ((code == EQ_EXPR && integer_onep (value)) 301 || (code == NE_EXPR && integer_zerop (value))) 302 { 303 tree rhs1 = gimple_assign_rhs1 (def_stmt); 304 tree rhs2 = gimple_assign_rhs2 (def_stmt); 305 306 /* If either argument is a constant, then record the 307 other argument as being the same as that constant. 308 309 If neither operand is a constant, then we have a 310 conditional name == name equivalence. */ 311 if (TREE_CODE (rhs1) == INTEGER_CST) 312 derive_equivalences (rhs2, rhs1, recursion_limit - 1); 313 else if (TREE_CODE (rhs2) == INTEGER_CST) 314 derive_equivalences (rhs1, rhs2, recursion_limit - 1); 315 } 316 else 317 { 318 tree cond = build2 (code, boolean_type_node, 319 gimple_assign_rhs1 (def_stmt), 320 gimple_assign_rhs2 (def_stmt)); 321 tree inverted = invert_truthvalue (cond); 322 if (integer_zerop (value)) 323 std::swap (cond, inverted); 324 record_conditions (&this->cond_equivalences, cond, inverted); 325 } 326 break; 327 } 328 329 /* For BIT_NOT and NEGATE, we can just apply the operation to the 330 VALUE to get the new equivalence. It will always be a constant 331 so we can recurse. */ 332 case BIT_NOT_EXPR: 333 case NEGATE_EXPR: 334 { 335 tree rhs = gimple_assign_rhs1 (def_stmt); 336 tree res; 337 /* If this is a NOT and the operand has a boolean range, then we 338 know its value must be zero or one. We are not supposed to 339 have a BIT_NOT_EXPR for boolean types with precision > 1 in 340 the general case, see e.g. the handling of TRUTH_NOT_EXPR in 341 the gimplifier, but it can be generated by match.pd out of 342 a BIT_XOR_EXPR wrapped in a BIT_AND_EXPR. Now the handling 343 of BIT_AND_EXPR above already forces a specific semantics for 344 boolean types with precision > 1 so we must do the same here, 345 otherwise we could change the semantics of TRUTH_NOT_EXPR for 346 boolean types with precision > 1. */ 347 if (code == BIT_NOT_EXPR 348 && TREE_CODE (rhs) == SSA_NAME 349 && ssa_name_has_boolean_range (rhs)) 350 { 351 if ((TREE_INT_CST_LOW (value) & 1) == 0) 352 res = build_one_cst (TREE_TYPE (rhs)); 353 else 354 res = build_zero_cst (TREE_TYPE (rhs)); 355 } 356 else 357 res = fold_build1 (code, TREE_TYPE (rhs), value); 358 derive_equivalences (rhs, res, recursion_limit - 1); 359 break; 360 } 361 362 default: 363 { 364 if (TREE_CODE_CLASS (code) == tcc_comparison) 365 { 366 tree cond = build2 (code, boolean_type_node, 367 gimple_assign_rhs1 (def_stmt), 368 gimple_assign_rhs2 (def_stmt)); 369 tree inverted = invert_truthvalue (cond); 370 if (integer_zerop (value)) 371 std::swap (cond, inverted); 372 record_conditions (&this->cond_equivalences, cond, inverted); 373 break; 374 } 375 break; 376 } 377 } 378 } 379 } 380 381 void 382 edge_info::record_simple_equiv (tree lhs, tree rhs) 383 { 384 /* If the RHS is a constant, then we may be able to derive 385 further equivalences. Else just record the name = name 386 equivalence. */ 387 if (TREE_CODE (rhs) == INTEGER_CST) 388 derive_equivalences (lhs, rhs, 4); 389 else 390 simple_equivalences.safe_push (equiv_pair (lhs, rhs)); 391 } 392 393 /* Free the edge_info data attached to E, if it exists. */ 394 395 void 396 free_dom_edge_info (edge e) 397 { 398 class edge_info *edge_info = (struct edge_info *)e->aux; 399 400 if (edge_info) 401 delete edge_info; 402 } 403 404 /* Free all EDGE_INFO structures associated with edges in the CFG. 405 If a particular edge can be threaded, copy the redirection 406 target from the EDGE_INFO structure into the edge's AUX field 407 as required by code to update the CFG and SSA graph for 408 jump threading. */ 409 410 static void 411 free_all_edge_infos (void) 412 { 413 basic_block bb; 414 edge_iterator ei; 415 edge e; 416 417 FOR_EACH_BB_FN (bb, cfun) 418 { 419 FOR_EACH_EDGE (e, ei, bb->preds) 420 { 421 free_dom_edge_info (e); 422 e->aux = NULL; 423 } 424 } 425 } 426 427 /* We have finished optimizing BB, record any information implied by 428 taking a specific outgoing edge from BB. */ 429 430 static void 431 record_edge_info (basic_block bb) 432 { 433 gimple_stmt_iterator gsi = gsi_last_bb (bb); 434 class edge_info *edge_info; 435 436 if (! gsi_end_p (gsi)) 437 { 438 gimple *stmt = gsi_stmt (gsi); 439 location_t loc = gimple_location (stmt); 440 441 if (gimple_code (stmt) == GIMPLE_SWITCH) 442 { 443 gswitch *switch_stmt = as_a <gswitch *> (stmt); 444 tree index = gimple_switch_index (switch_stmt); 445 446 if (TREE_CODE (index) == SSA_NAME) 447 { 448 int i; 449 int n_labels = gimple_switch_num_labels (switch_stmt); 450 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun)); 451 edge e; 452 edge_iterator ei; 453 454 for (i = 0; i < n_labels; i++) 455 { 456 tree label = gimple_switch_label (switch_stmt, i); 457 basic_block target_bb = label_to_block (CASE_LABEL (label)); 458 if (CASE_HIGH (label) 459 || !CASE_LOW (label) 460 || info[target_bb->index]) 461 info[target_bb->index] = error_mark_node; 462 else 463 info[target_bb->index] = label; 464 } 465 466 FOR_EACH_EDGE (e, ei, bb->succs) 467 { 468 basic_block target_bb = e->dest; 469 tree label = info[target_bb->index]; 470 471 if (label != NULL && label != error_mark_node) 472 { 473 tree x = fold_convert_loc (loc, TREE_TYPE (index), 474 CASE_LOW (label)); 475 edge_info = new class edge_info (e); 476 edge_info->record_simple_equiv (index, x); 477 } 478 } 479 free (info); 480 } 481 } 482 483 /* A COND_EXPR may create equivalences too. */ 484 if (gimple_code (stmt) == GIMPLE_COND) 485 { 486 edge true_edge; 487 edge false_edge; 488 489 tree op0 = gimple_cond_lhs (stmt); 490 tree op1 = gimple_cond_rhs (stmt); 491 enum tree_code code = gimple_cond_code (stmt); 492 493 extract_true_false_edges_from_block (bb, &true_edge, &false_edge); 494 495 /* Special case comparing booleans against a constant as we 496 know the value of OP0 on both arms of the branch. i.e., we 497 can record an equivalence for OP0 rather than COND. 498 499 However, don't do this if the constant isn't zero or one. 500 Such conditionals will get optimized more thoroughly during 501 the domwalk. */ 502 if ((code == EQ_EXPR || code == NE_EXPR) 503 && TREE_CODE (op0) == SSA_NAME 504 && ssa_name_has_boolean_range (op0) 505 && is_gimple_min_invariant (op1) 506 && (integer_zerop (op1) || integer_onep (op1))) 507 { 508 tree true_val = constant_boolean_node (true, TREE_TYPE (op0)); 509 tree false_val = constant_boolean_node (false, TREE_TYPE (op0)); 510 511 if (code == EQ_EXPR) 512 { 513 edge_info = new class edge_info (true_edge); 514 edge_info->record_simple_equiv (op0, 515 (integer_zerop (op1) 516 ? false_val : true_val)); 517 edge_info = new class edge_info (false_edge); 518 edge_info->record_simple_equiv (op0, 519 (integer_zerop (op1) 520 ? true_val : false_val)); 521 } 522 else 523 { 524 edge_info = new class edge_info (true_edge); 525 edge_info->record_simple_equiv (op0, 526 (integer_zerop (op1) 527 ? true_val : false_val)); 528 edge_info = new class edge_info (false_edge); 529 edge_info->record_simple_equiv (op0, 530 (integer_zerop (op1) 531 ? false_val : true_val)); 532 } 533 } 534 /* This can show up in the IL as a result of copy propagation 535 it will eventually be canonicalized, but we have to cope 536 with this case within the pass. */ 537 else if (is_gimple_min_invariant (op0) 538 && TREE_CODE (op1) == SSA_NAME) 539 { 540 tree cond = build2 (code, boolean_type_node, op0, op1); 541 tree inverted = invert_truthvalue_loc (loc, cond); 542 bool can_infer_simple_equiv 543 = !(HONOR_SIGNED_ZEROS (op0) 544 && real_zerop (op0)); 545 struct edge_info *edge_info; 546 547 edge_info = new class edge_info (true_edge); 548 record_conditions (&edge_info->cond_equivalences, cond, inverted); 549 550 if (can_infer_simple_equiv && code == EQ_EXPR) 551 edge_info->record_simple_equiv (op1, op0); 552 553 edge_info = new class edge_info (false_edge); 554 record_conditions (&edge_info->cond_equivalences, inverted, cond); 555 556 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR) 557 edge_info->record_simple_equiv (op1, op0); 558 } 559 560 else if (TREE_CODE (op0) == SSA_NAME 561 && (TREE_CODE (op1) == SSA_NAME 562 || is_gimple_min_invariant (op1))) 563 { 564 tree cond = build2 (code, boolean_type_node, op0, op1); 565 tree inverted = invert_truthvalue_loc (loc, cond); 566 bool can_infer_simple_equiv 567 = !(HONOR_SIGNED_ZEROS (op1) 568 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1))); 569 struct edge_info *edge_info; 570 571 edge_info = new class edge_info (true_edge); 572 record_conditions (&edge_info->cond_equivalences, cond, inverted); 573 574 if (can_infer_simple_equiv && code == EQ_EXPR) 575 edge_info->record_simple_equiv (op0, op1); 576 577 edge_info = new class edge_info (false_edge); 578 record_conditions (&edge_info->cond_equivalences, inverted, cond); 579 580 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR) 581 edge_info->record_simple_equiv (op0, op1); 582 } 583 } 584 } 585 } 586 587 588 class dom_opt_dom_walker : public dom_walker 589 { 590 public: 591 dom_opt_dom_walker (cdi_direction direction, 592 class const_and_copies *const_and_copies, 593 class avail_exprs_stack *avail_exprs_stack, 594 gcond *dummy_cond) 595 : dom_walker (direction, REACHABLE_BLOCKS), 596 m_const_and_copies (const_and_copies), 597 m_avail_exprs_stack (avail_exprs_stack), 598 m_dummy_cond (dummy_cond) { } 599 600 virtual edge before_dom_children (basic_block); 601 virtual void after_dom_children (basic_block); 602 603 private: 604 605 /* Unwindable equivalences, both const/copy and expression varieties. */ 606 class const_and_copies *m_const_and_copies; 607 class avail_exprs_stack *m_avail_exprs_stack; 608 609 /* VRP data. */ 610 class evrp_range_analyzer evrp_range_analyzer; 611 612 /* Dummy condition to avoid creating lots of throw away statements. */ 613 gcond *m_dummy_cond; 614 615 /* Optimize a single statement within a basic block using the 616 various tables mantained by DOM. Returns the taken edge if 617 the statement is a conditional with a statically determined 618 value. */ 619 edge optimize_stmt (basic_block, gimple_stmt_iterator *, bool *); 620 }; 621 622 /* Jump threading, redundancy elimination and const/copy propagation. 623 624 This pass may expose new symbols that need to be renamed into SSA. For 625 every new symbol exposed, its corresponding bit will be set in 626 VARS_TO_RENAME. */ 627 628 namespace { 629 630 const pass_data pass_data_dominator = 631 { 632 GIMPLE_PASS, /* type */ 633 "dom", /* name */ 634 OPTGROUP_NONE, /* optinfo_flags */ 635 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */ 636 ( PROP_cfg | PROP_ssa ), /* properties_required */ 637 0, /* properties_provided */ 638 0, /* properties_destroyed */ 639 0, /* todo_flags_start */ 640 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */ 641 }; 642 643 class pass_dominator : public gimple_opt_pass 644 { 645 public: 646 pass_dominator (gcc::context *ctxt) 647 : gimple_opt_pass (pass_data_dominator, ctxt), 648 may_peel_loop_headers_p (false) 649 {} 650 651 /* opt_pass methods: */ 652 opt_pass * clone () { return new pass_dominator (m_ctxt); } 653 void set_pass_param (unsigned int n, bool param) 654 { 655 gcc_assert (n == 0); 656 may_peel_loop_headers_p = param; 657 } 658 virtual bool gate (function *) { return flag_tree_dom != 0; } 659 virtual unsigned int execute (function *); 660 661 private: 662 /* This flag is used to prevent loops from being peeled repeatedly in jump 663 threading; it will be removed once we preserve loop structures throughout 664 the compilation -- we will be able to mark the affected loops directly in 665 jump threading, and avoid peeling them next time. */ 666 bool may_peel_loop_headers_p; 667 }; // class pass_dominator 668 669 unsigned int 670 pass_dominator::execute (function *fun) 671 { 672 memset (&opt_stats, 0, sizeof (opt_stats)); 673 674 /* Create our hash tables. */ 675 hash_table<expr_elt_hasher> *avail_exprs 676 = new hash_table<expr_elt_hasher> (1024); 677 class avail_exprs_stack *avail_exprs_stack 678 = new class avail_exprs_stack (avail_exprs); 679 class const_and_copies *const_and_copies = new class const_and_copies (); 680 need_eh_cleanup = BITMAP_ALLOC (NULL); 681 need_noreturn_fixup.create (0); 682 683 calculate_dominance_info (CDI_DOMINATORS); 684 cfg_altered = false; 685 686 /* We need to know loop structures in order to avoid destroying them 687 in jump threading. Note that we still can e.g. thread through loop 688 headers to an exit edge, or through loop header to the loop body, assuming 689 that we update the loop info. 690 691 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due 692 to several overly conservative bail-outs in jump threading, case 693 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is 694 missing. We should improve jump threading in future then 695 LOOPS_HAVE_PREHEADERS won't be needed here. */ 696 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES); 697 698 /* Initialize the value-handle array. */ 699 threadedge_initialize_values (); 700 701 /* We need accurate information regarding back edges in the CFG 702 for jump threading; this may include back edges that are not part of 703 a single loop. */ 704 mark_dfs_back_edges (); 705 706 /* We want to create the edge info structures before the dominator walk 707 so that they'll be in place for the jump threader, particularly when 708 threading through a join block. 709 710 The conditions will be lazily updated with global equivalences as 711 we reach them during the dominator walk. */ 712 basic_block bb; 713 FOR_EACH_BB_FN (bb, fun) 714 record_edge_info (bb); 715 716 gcond *dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node, 717 integer_zero_node, NULL, NULL); 718 719 /* Recursively walk the dominator tree optimizing statements. */ 720 dom_opt_dom_walker walker (CDI_DOMINATORS, const_and_copies, 721 avail_exprs_stack, dummy_cond); 722 walker.walk (fun->cfg->x_entry_block_ptr); 723 724 /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing 725 edge. When found, remove jump threads which contain any outgoing 726 edge from the affected block. */ 727 if (cfg_altered) 728 { 729 FOR_EACH_BB_FN (bb, fun) 730 { 731 edge_iterator ei; 732 edge e; 733 734 /* First see if there are any edges without EDGE_EXECUTABLE 735 set. */ 736 bool found = false; 737 FOR_EACH_EDGE (e, ei, bb->succs) 738 { 739 if ((e->flags & EDGE_EXECUTABLE) == 0) 740 { 741 found = true; 742 break; 743 } 744 } 745 746 /* If there were any such edges found, then remove jump threads 747 containing any edge leaving BB. */ 748 if (found) 749 FOR_EACH_EDGE (e, ei, bb->succs) 750 remove_jump_threads_including (e); 751 } 752 } 753 754 { 755 gimple_stmt_iterator gsi; 756 basic_block bb; 757 FOR_EACH_BB_FN (bb, fun) 758 { 759 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 760 update_stmt_if_modified (gsi_stmt (gsi)); 761 } 762 } 763 764 /* If we exposed any new variables, go ahead and put them into 765 SSA form now, before we handle jump threading. This simplifies 766 interactions between rewriting of _DECL nodes into SSA form 767 and rewriting SSA_NAME nodes into SSA form after block 768 duplication and CFG manipulation. */ 769 update_ssa (TODO_update_ssa); 770 771 free_all_edge_infos (); 772 773 /* Thread jumps, creating duplicate blocks as needed. */ 774 cfg_altered |= thread_through_all_blocks (may_peel_loop_headers_p); 775 776 if (cfg_altered) 777 free_dominance_info (CDI_DOMINATORS); 778 779 /* Removal of statements may make some EH edges dead. Purge 780 such edges from the CFG as needed. */ 781 if (!bitmap_empty_p (need_eh_cleanup)) 782 { 783 unsigned i; 784 bitmap_iterator bi; 785 786 /* Jump threading may have created forwarder blocks from blocks 787 needing EH cleanup; the new successor of these blocks, which 788 has inherited from the original block, needs the cleanup. 789 Don't clear bits in the bitmap, as that can break the bitmap 790 iterator. */ 791 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi) 792 { 793 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i); 794 if (bb == NULL) 795 continue; 796 while (single_succ_p (bb) 797 && (single_succ_edge (bb)->flags 798 & (EDGE_EH|EDGE_DFS_BACK)) == 0) 799 bb = single_succ (bb); 800 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun)) 801 continue; 802 if ((unsigned) bb->index != i) 803 bitmap_set_bit (need_eh_cleanup, bb->index); 804 } 805 806 gimple_purge_all_dead_eh_edges (need_eh_cleanup); 807 bitmap_clear (need_eh_cleanup); 808 } 809 810 /* Fixup stmts that became noreturn calls. This may require splitting 811 blocks and thus isn't possible during the dominator walk or before 812 jump threading finished. Do this in reverse order so we don't 813 inadvertedly remove a stmt we want to fixup by visiting a dominating 814 now noreturn call first. */ 815 while (!need_noreturn_fixup.is_empty ()) 816 { 817 gimple *stmt = need_noreturn_fixup.pop (); 818 if (dump_file && dump_flags & TDF_DETAILS) 819 { 820 fprintf (dump_file, "Fixing up noreturn call "); 821 print_gimple_stmt (dump_file, stmt, 0); 822 fprintf (dump_file, "\n"); 823 } 824 fixup_noreturn_call (stmt); 825 } 826 827 statistics_counter_event (fun, "Redundant expressions eliminated", 828 opt_stats.num_re); 829 statistics_counter_event (fun, "Constants propagated", 830 opt_stats.num_const_prop); 831 statistics_counter_event (fun, "Copies propagated", 832 opt_stats.num_copy_prop); 833 834 /* Debugging dumps. */ 835 if (dump_file && (dump_flags & TDF_STATS)) 836 dump_dominator_optimization_stats (dump_file, avail_exprs); 837 838 loop_optimizer_finalize (); 839 840 /* Delete our main hashtable. */ 841 delete avail_exprs; 842 avail_exprs = NULL; 843 844 /* Free asserted bitmaps and stacks. */ 845 BITMAP_FREE (need_eh_cleanup); 846 need_noreturn_fixup.release (); 847 delete avail_exprs_stack; 848 delete const_and_copies; 849 850 /* Free the value-handle array. */ 851 threadedge_finalize_values (); 852 853 return 0; 854 } 855 856 } // anon namespace 857 858 gimple_opt_pass * 859 make_pass_dominator (gcc::context *ctxt) 860 { 861 return new pass_dominator (ctxt); 862 } 863 864 /* A hack until we remove threading from tree-vrp.c and bring the 865 simplification routine into the dom_opt_dom_walker class. */ 866 static class vr_values *x_vr_values; 867 868 /* A trivial wrapper so that we can present the generic jump 869 threading code with a simple API for simplifying statements. */ 870 static tree 871 simplify_stmt_for_jump_threading (gimple *stmt, 872 gimple *within_stmt ATTRIBUTE_UNUSED, 873 class avail_exprs_stack *avail_exprs_stack, 874 basic_block bb ATTRIBUTE_UNUSED) 875 { 876 /* First query our hash table to see if the the expression is available 877 there. A non-NULL return value will be either a constant or another 878 SSA_NAME. */ 879 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true); 880 if (cached_lhs) 881 return cached_lhs; 882 883 /* If the hash table query failed, query VRP information. This is 884 essentially the same as tree-vrp's simplification routine. The 885 copy in tree-vrp is scheduled for removal in gcc-9. */ 886 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) 887 { 888 cached_lhs 889 = x_vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt), 890 gimple_cond_lhs (cond_stmt), 891 gimple_cond_rhs (cond_stmt), 892 within_stmt); 893 return cached_lhs; 894 } 895 896 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt)) 897 { 898 tree op = gimple_switch_index (switch_stmt); 899 if (TREE_CODE (op) != SSA_NAME) 900 return NULL_TREE; 901 902 value_range *vr = x_vr_values->get_value_range (op); 903 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE) 904 || symbolic_range_p (vr)) 905 return NULL_TREE; 906 907 if (vr->type == VR_RANGE) 908 { 909 size_t i, j; 910 911 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j); 912 913 if (i == j) 914 { 915 tree label = gimple_switch_label (switch_stmt, i); 916 917 if (CASE_HIGH (label) != NULL_TREE 918 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0 919 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0) 920 : (tree_int_cst_equal (CASE_LOW (label), vr->min) 921 && tree_int_cst_equal (vr->min, vr->max))) 922 return label; 923 924 if (i > j) 925 return gimple_switch_label (switch_stmt, 0); 926 } 927 } 928 929 if (vr->type == VR_ANTI_RANGE) 930 { 931 unsigned n = gimple_switch_num_labels (switch_stmt); 932 tree min_label = gimple_switch_label (switch_stmt, 1); 933 tree max_label = gimple_switch_label (switch_stmt, n - 1); 934 935 /* The default label will be taken only if the anti-range of the 936 operand is entirely outside the bounds of all the (non-default) 937 case labels. */ 938 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0 939 && (CASE_HIGH (max_label) != NULL_TREE 940 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0 941 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0)) 942 return gimple_switch_label (switch_stmt, 0); 943 } 944 return NULL_TREE; 945 } 946 947 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt)) 948 { 949 tree lhs = gimple_assign_lhs (assign_stmt); 950 if (TREE_CODE (lhs) == SSA_NAME 951 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) 952 || POINTER_TYPE_P (TREE_TYPE (lhs))) 953 && stmt_interesting_for_vrp (stmt)) 954 { 955 edge dummy_e; 956 tree dummy_tree; 957 value_range new_vr = VR_INITIALIZER; 958 x_vr_values->extract_range_from_stmt (stmt, &dummy_e, 959 &dummy_tree, &new_vr); 960 if (range_int_cst_singleton_p (&new_vr)) 961 return new_vr.min; 962 } 963 } 964 return NULL; 965 } 966 967 /* Valueize hook for gimple_fold_stmt_to_constant_1. */ 968 969 static tree 970 dom_valueize (tree t) 971 { 972 if (TREE_CODE (t) == SSA_NAME) 973 { 974 tree tem = SSA_NAME_VALUE (t); 975 if (tem) 976 return tem; 977 } 978 return t; 979 } 980 981 /* We have just found an equivalence for LHS on an edge E. 982 Look backwards to other uses of LHS and see if we can derive 983 additional equivalences that are valid on edge E. */ 984 static void 985 back_propagate_equivalences (tree lhs, edge e, 986 class const_and_copies *const_and_copies) 987 { 988 use_operand_p use_p; 989 imm_use_iterator iter; 990 bitmap domby = NULL; 991 basic_block dest = e->dest; 992 993 /* Iterate over the uses of LHS to see if any dominate E->dest. 994 If so, they may create useful equivalences too. 995 996 ??? If the code gets re-organized to a worklist to catch more 997 indirect opportunities and it is made to handle PHIs then this 998 should only consider use_stmts in basic-blocks we have already visited. */ 999 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs) 1000 { 1001 gimple *use_stmt = USE_STMT (use_p); 1002 1003 /* Often the use is in DEST, which we trivially know we can't use. 1004 This is cheaper than the dominator set tests below. */ 1005 if (dest == gimple_bb (use_stmt)) 1006 continue; 1007 1008 /* Filter out statements that can never produce a useful 1009 equivalence. */ 1010 tree lhs2 = gimple_get_lhs (use_stmt); 1011 if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME) 1012 continue; 1013 1014 /* Profiling has shown the domination tests here can be fairly 1015 expensive. We get significant improvements by building the 1016 set of blocks that dominate BB. We can then just test 1017 for set membership below. 1018 1019 We also initialize the set lazily since often the only uses 1020 are going to be in the same block as DEST. */ 1021 if (!domby) 1022 { 1023 domby = BITMAP_ALLOC (NULL); 1024 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, dest); 1025 while (bb) 1026 { 1027 bitmap_set_bit (domby, bb->index); 1028 bb = get_immediate_dominator (CDI_DOMINATORS, bb); 1029 } 1030 } 1031 1032 /* This tests if USE_STMT does not dominate DEST. */ 1033 if (!bitmap_bit_p (domby, gimple_bb (use_stmt)->index)) 1034 continue; 1035 1036 /* At this point USE_STMT dominates DEST and may result in a 1037 useful equivalence. Try to simplify its RHS to a constant 1038 or SSA_NAME. */ 1039 tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize, 1040 no_follow_ssa_edges); 1041 if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res))) 1042 record_equality (lhs2, res, const_and_copies); 1043 } 1044 1045 if (domby) 1046 BITMAP_FREE (domby); 1047 } 1048 1049 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied 1050 by traversing edge E (which are cached in E->aux). 1051 1052 Callers are responsible for managing the unwinding markers. */ 1053 void 1054 record_temporary_equivalences (edge e, 1055 class const_and_copies *const_and_copies, 1056 class avail_exprs_stack *avail_exprs_stack) 1057 { 1058 int i; 1059 class edge_info *edge_info = (class edge_info *) e->aux; 1060 1061 /* If we have info associated with this edge, record it into 1062 our equivalence tables. */ 1063 if (edge_info) 1064 { 1065 cond_equivalence *eq; 1066 /* If we have 0 = COND or 1 = COND equivalences, record them 1067 into our expression hash tables. */ 1068 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i) 1069 avail_exprs_stack->record_cond (eq); 1070 1071 edge_info::equiv_pair *seq; 1072 for (i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i) 1073 { 1074 tree lhs = seq->first; 1075 if (!lhs || TREE_CODE (lhs) != SSA_NAME) 1076 continue; 1077 1078 /* Record the simple NAME = VALUE equivalence. */ 1079 tree rhs = seq->second; 1080 1081 /* If this is a SSA_NAME = SSA_NAME equivalence and one operand is 1082 cheaper to compute than the other, then set up the equivalence 1083 such that we replace the expensive one with the cheap one. 1084 1085 If they are the same cost to compute, then do not record 1086 anything. */ 1087 if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME) 1088 { 1089 gimple *rhs_def = SSA_NAME_DEF_STMT (rhs); 1090 int rhs_cost = estimate_num_insns (rhs_def, &eni_size_weights); 1091 1092 gimple *lhs_def = SSA_NAME_DEF_STMT (lhs); 1093 int lhs_cost = estimate_num_insns (lhs_def, &eni_size_weights); 1094 1095 if (rhs_cost > lhs_cost) 1096 record_equality (rhs, lhs, const_and_copies); 1097 else if (rhs_cost < lhs_cost) 1098 record_equality (lhs, rhs, const_and_copies); 1099 } 1100 else 1101 record_equality (lhs, rhs, const_and_copies); 1102 1103 1104 /* Any equivalence found for LHS may result in additional 1105 equivalences for other uses of LHS that we have already 1106 processed. */ 1107 back_propagate_equivalences (lhs, e, const_and_copies); 1108 } 1109 } 1110 } 1111 1112 /* PHI nodes can create equivalences too. 1113 1114 Ignoring any alternatives which are the same as the result, if 1115 all the alternatives are equal, then the PHI node creates an 1116 equivalence. */ 1117 1118 static void 1119 record_equivalences_from_phis (basic_block bb) 1120 { 1121 gphi_iterator gsi; 1122 1123 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 1124 { 1125 gphi *phi = gsi.phi (); 1126 1127 tree lhs = gimple_phi_result (phi); 1128 tree rhs = NULL; 1129 size_t i; 1130 1131 for (i = 0; i < gimple_phi_num_args (phi); i++) 1132 { 1133 tree t = gimple_phi_arg_def (phi, i); 1134 1135 /* Ignore alternatives which are the same as our LHS. Since 1136 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we 1137 can simply compare pointers. */ 1138 if (lhs == t) 1139 continue; 1140 1141 /* If the associated edge is not marked as executable, then it 1142 can be ignored. */ 1143 if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0) 1144 continue; 1145 1146 t = dom_valueize (t); 1147 1148 /* If T is an SSA_NAME and its associated edge is a backedge, 1149 then quit as we can not utilize this equivalence. */ 1150 if (TREE_CODE (t) == SSA_NAME 1151 && (gimple_phi_arg_edge (phi, i)->flags & EDGE_DFS_BACK)) 1152 break; 1153 1154 /* If we have not processed an alternative yet, then set 1155 RHS to this alternative. */ 1156 if (rhs == NULL) 1157 rhs = t; 1158 /* If we have processed an alternative (stored in RHS), then 1159 see if it is equal to this one. If it isn't, then stop 1160 the search. */ 1161 else if (! operand_equal_for_phi_arg_p (rhs, t)) 1162 break; 1163 } 1164 1165 /* If we had no interesting alternatives, then all the RHS alternatives 1166 must have been the same as LHS. */ 1167 if (!rhs) 1168 rhs = lhs; 1169 1170 /* If we managed to iterate through each PHI alternative without 1171 breaking out of the loop, then we have a PHI which may create 1172 a useful equivalence. We do not need to record unwind data for 1173 this, since this is a true assignment and not an equivalence 1174 inferred from a comparison. All uses of this ssa name are dominated 1175 by this assignment, so unwinding just costs time and space. */ 1176 if (i == gimple_phi_num_args (phi) 1177 && may_propagate_copy (lhs, rhs)) 1178 set_ssa_name_value (lhs, rhs); 1179 } 1180 } 1181 1182 /* Record any equivalences created by the incoming edge to BB into 1183 CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one 1184 incoming edge, then no equivalence is created. */ 1185 1186 static void 1187 record_equivalences_from_incoming_edge (basic_block bb, 1188 class const_and_copies *const_and_copies, 1189 class avail_exprs_stack *avail_exprs_stack) 1190 { 1191 edge e; 1192 basic_block parent; 1193 1194 /* If our parent block ended with a control statement, then we may be 1195 able to record some equivalences based on which outgoing edge from 1196 the parent was followed. */ 1197 parent = get_immediate_dominator (CDI_DOMINATORS, bb); 1198 1199 e = single_pred_edge_ignoring_loop_edges (bb, true); 1200 1201 /* If we had a single incoming edge from our parent block, then enter 1202 any data associated with the edge into our tables. */ 1203 if (e && e->src == parent) 1204 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack); 1205 } 1206 1207 /* Dump statistics for the hash table HTAB. */ 1208 1209 static void 1210 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab) 1211 { 1212 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n", 1213 (long) htab.size (), 1214 (long) htab.elements (), 1215 htab.collisions ()); 1216 } 1217 1218 /* Dump SSA statistics on FILE. */ 1219 1220 static void 1221 dump_dominator_optimization_stats (FILE *file, 1222 hash_table<expr_elt_hasher> *avail_exprs) 1223 { 1224 fprintf (file, "Total number of statements: %6ld\n\n", 1225 opt_stats.num_stmts); 1226 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n", 1227 opt_stats.num_exprs_considered); 1228 1229 fprintf (file, "\nHash table statistics:\n"); 1230 1231 fprintf (file, " avail_exprs: "); 1232 htab_statistics (file, *avail_exprs); 1233 } 1234 1235 1236 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR. 1237 This constrains the cases in which we may treat this as assignment. */ 1238 1239 static void 1240 record_equality (tree x, tree y, class const_and_copies *const_and_copies) 1241 { 1242 tree prev_x = NULL, prev_y = NULL; 1243 1244 if (tree_swap_operands_p (x, y)) 1245 std::swap (x, y); 1246 1247 /* Most of the time tree_swap_operands_p does what we want. But there 1248 are cases where we know one operand is better for copy propagation than 1249 the other. Given no other code cares about ordering of equality 1250 comparison operators for that purpose, we just handle the special cases 1251 here. */ 1252 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME) 1253 { 1254 /* If one operand is a single use operand, then make it 1255 X. This will preserve its single use properly and if this 1256 conditional is eliminated, the computation of X can be 1257 eliminated as well. */ 1258 if (has_single_use (y) && ! has_single_use (x)) 1259 std::swap (x, y); 1260 } 1261 if (TREE_CODE (x) == SSA_NAME) 1262 prev_x = SSA_NAME_VALUE (x); 1263 if (TREE_CODE (y) == SSA_NAME) 1264 prev_y = SSA_NAME_VALUE (y); 1265 1266 /* If one of the previous values is invariant, or invariant in more loops 1267 (by depth), then use that. 1268 Otherwise it doesn't matter which value we choose, just so 1269 long as we canonicalize on one value. */ 1270 if (is_gimple_min_invariant (y)) 1271 ; 1272 else if (is_gimple_min_invariant (x)) 1273 prev_x = x, x = y, y = prev_x, prev_x = prev_y; 1274 else if (prev_x && is_gimple_min_invariant (prev_x)) 1275 x = y, y = prev_x, prev_x = prev_y; 1276 else if (prev_y) 1277 y = prev_y; 1278 1279 /* After the swapping, we must have one SSA_NAME. */ 1280 if (TREE_CODE (x) != SSA_NAME) 1281 return; 1282 1283 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a 1284 variable compared against zero. If we're honoring signed zeros, 1285 then we cannot record this value unless we know that the value is 1286 nonzero. */ 1287 if (HONOR_SIGNED_ZEROS (x) 1288 && (TREE_CODE (y) != REAL_CST 1289 || real_equal (&dconst0, &TREE_REAL_CST (y)))) 1290 return; 1291 1292 const_and_copies->record_const_or_copy (x, y, prev_x); 1293 } 1294 1295 /* Returns true when STMT is a simple iv increment. It detects the 1296 following situation: 1297 1298 i_1 = phi (..., i_k) 1299 [...] 1300 i_j = i_{j-1} for each j : 2 <= j <= k-1 1301 [...] 1302 i_k = i_{k-1} +/- ... */ 1303 1304 bool 1305 simple_iv_increment_p (gimple *stmt) 1306 { 1307 enum tree_code code; 1308 tree lhs, preinc; 1309 gimple *phi; 1310 size_t i; 1311 1312 if (gimple_code (stmt) != GIMPLE_ASSIGN) 1313 return false; 1314 1315 lhs = gimple_assign_lhs (stmt); 1316 if (TREE_CODE (lhs) != SSA_NAME) 1317 return false; 1318 1319 code = gimple_assign_rhs_code (stmt); 1320 if (code != PLUS_EXPR 1321 && code != MINUS_EXPR 1322 && code != POINTER_PLUS_EXPR) 1323 return false; 1324 1325 preinc = gimple_assign_rhs1 (stmt); 1326 if (TREE_CODE (preinc) != SSA_NAME) 1327 return false; 1328 1329 phi = SSA_NAME_DEF_STMT (preinc); 1330 while (gimple_code (phi) != GIMPLE_PHI) 1331 { 1332 /* Follow trivial copies, but not the DEF used in a back edge, 1333 so that we don't prevent coalescing. */ 1334 if (!gimple_assign_ssa_name_copy_p (phi)) 1335 return false; 1336 preinc = gimple_assign_rhs1 (phi); 1337 phi = SSA_NAME_DEF_STMT (preinc); 1338 } 1339 1340 for (i = 0; i < gimple_phi_num_args (phi); i++) 1341 if (gimple_phi_arg_def (phi, i) == lhs) 1342 return true; 1343 1344 return false; 1345 } 1346 1347 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the 1348 successors of BB. */ 1349 1350 static void 1351 cprop_into_successor_phis (basic_block bb, 1352 class const_and_copies *const_and_copies) 1353 { 1354 edge e; 1355 edge_iterator ei; 1356 1357 FOR_EACH_EDGE (e, ei, bb->succs) 1358 { 1359 int indx; 1360 gphi_iterator gsi; 1361 1362 /* If this is an abnormal edge, then we do not want to copy propagate 1363 into the PHI alternative associated with this edge. */ 1364 if (e->flags & EDGE_ABNORMAL) 1365 continue; 1366 1367 gsi = gsi_start_phis (e->dest); 1368 if (gsi_end_p (gsi)) 1369 continue; 1370 1371 /* We may have an equivalence associated with this edge. While 1372 we can not propagate it into non-dominated blocks, we can 1373 propagate them into PHIs in non-dominated blocks. */ 1374 1375 /* Push the unwind marker so we can reset the const and copies 1376 table back to its original state after processing this edge. */ 1377 const_and_copies->push_marker (); 1378 1379 /* Extract and record any simple NAME = VALUE equivalences. 1380 1381 Don't bother with [01] = COND equivalences, they're not useful 1382 here. */ 1383 class edge_info *edge_info = (class edge_info *) e->aux; 1384 1385 if (edge_info) 1386 { 1387 edge_info::equiv_pair *seq; 1388 for (int i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i) 1389 { 1390 tree lhs = seq->first; 1391 tree rhs = seq->second; 1392 1393 if (lhs && TREE_CODE (lhs) == SSA_NAME) 1394 const_and_copies->record_const_or_copy (lhs, rhs); 1395 } 1396 1397 } 1398 1399 indx = e->dest_idx; 1400 for ( ; !gsi_end_p (gsi); gsi_next (&gsi)) 1401 { 1402 tree new_val; 1403 use_operand_p orig_p; 1404 tree orig_val; 1405 gphi *phi = gsi.phi (); 1406 1407 /* The alternative may be associated with a constant, so verify 1408 it is an SSA_NAME before doing anything with it. */ 1409 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx); 1410 orig_val = get_use_from_ptr (orig_p); 1411 if (TREE_CODE (orig_val) != SSA_NAME) 1412 continue; 1413 1414 /* If we have *ORIG_P in our constant/copy table, then replace 1415 ORIG_P with its value in our constant/copy table. */ 1416 new_val = SSA_NAME_VALUE (orig_val); 1417 if (new_val 1418 && new_val != orig_val 1419 && may_propagate_copy (orig_val, new_val)) 1420 propagate_value (orig_p, new_val); 1421 } 1422 1423 const_and_copies->pop_to_marker (); 1424 } 1425 } 1426 1427 edge 1428 dom_opt_dom_walker::before_dom_children (basic_block bb) 1429 { 1430 gimple_stmt_iterator gsi; 1431 1432 if (dump_file && (dump_flags & TDF_DETAILS)) 1433 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index); 1434 1435 evrp_range_analyzer.enter (bb); 1436 1437 /* Push a marker on the stacks of local information so that we know how 1438 far to unwind when we finalize this block. */ 1439 m_avail_exprs_stack->push_marker (); 1440 m_const_and_copies->push_marker (); 1441 1442 record_equivalences_from_incoming_edge (bb, m_const_and_copies, 1443 m_avail_exprs_stack); 1444 1445 /* PHI nodes can create equivalences too. */ 1446 record_equivalences_from_phis (bb); 1447 1448 /* Create equivalences from redundant PHIs. PHIs are only truly 1449 redundant when they exist in the same block, so push another 1450 marker and unwind right afterwards. */ 1451 m_avail_exprs_stack->push_marker (); 1452 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 1453 eliminate_redundant_computations (&gsi, m_const_and_copies, 1454 m_avail_exprs_stack); 1455 m_avail_exprs_stack->pop_to_marker (); 1456 1457 edge taken_edge = NULL; 1458 /* Initialize visited flag ahead of us, it has undefined state on 1459 pass entry. */ 1460 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 1461 gimple_set_visited (gsi_stmt (gsi), false); 1462 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);) 1463 { 1464 /* Do not optimize a stmt twice, substitution might end up with 1465 _3 = _3 which is not valid. */ 1466 if (gimple_visited_p (gsi_stmt (gsi))) 1467 { 1468 gsi_next (&gsi); 1469 continue; 1470 } 1471 1472 /* Compute range information and optimize the stmt. */ 1473 evrp_range_analyzer.record_ranges_from_stmt (gsi_stmt (gsi), false); 1474 bool removed_p = false; 1475 taken_edge = this->optimize_stmt (bb, &gsi, &removed_p); 1476 if (!removed_p) 1477 gimple_set_visited (gsi_stmt (gsi), true); 1478 1479 /* Go back and visit stmts inserted by folding after substituting 1480 into the stmt at gsi. */ 1481 if (gsi_end_p (gsi)) 1482 { 1483 gcc_checking_assert (removed_p); 1484 gsi = gsi_last_bb (bb); 1485 while (!gsi_end_p (gsi) && !gimple_visited_p (gsi_stmt (gsi))) 1486 gsi_prev (&gsi); 1487 } 1488 else 1489 { 1490 do 1491 { 1492 gsi_prev (&gsi); 1493 } 1494 while (!gsi_end_p (gsi) && !gimple_visited_p (gsi_stmt (gsi))); 1495 } 1496 if (gsi_end_p (gsi)) 1497 gsi = gsi_start_bb (bb); 1498 else 1499 gsi_next (&gsi); 1500 } 1501 1502 /* Now prepare to process dominated blocks. */ 1503 record_edge_info (bb); 1504 cprop_into_successor_phis (bb, m_const_and_copies); 1505 if (taken_edge && !dbg_cnt (dom_unreachable_edges)) 1506 return NULL; 1507 1508 return taken_edge; 1509 } 1510 1511 /* We have finished processing the dominator children of BB, perform 1512 any finalization actions in preparation for leaving this node in 1513 the dominator tree. */ 1514 1515 void 1516 dom_opt_dom_walker::after_dom_children (basic_block bb) 1517 { 1518 x_vr_values = evrp_range_analyzer.get_vr_values (); 1519 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies, 1520 m_avail_exprs_stack, 1521 &evrp_range_analyzer, 1522 simplify_stmt_for_jump_threading); 1523 x_vr_values = NULL; 1524 1525 /* These remove expressions local to BB from the tables. */ 1526 m_avail_exprs_stack->pop_to_marker (); 1527 m_const_and_copies->pop_to_marker (); 1528 evrp_range_analyzer.leave (bb); 1529 } 1530 1531 /* Search for redundant computations in STMT. If any are found, then 1532 replace them with the variable holding the result of the computation. 1533 1534 If safe, record this expression into AVAIL_EXPRS_STACK and 1535 CONST_AND_COPIES. */ 1536 1537 static void 1538 eliminate_redundant_computations (gimple_stmt_iterator* gsi, 1539 class const_and_copies *const_and_copies, 1540 class avail_exprs_stack *avail_exprs_stack) 1541 { 1542 tree expr_type; 1543 tree cached_lhs; 1544 tree def; 1545 bool insert = true; 1546 bool assigns_var_p = false; 1547 1548 gimple *stmt = gsi_stmt (*gsi); 1549 1550 if (gimple_code (stmt) == GIMPLE_PHI) 1551 def = gimple_phi_result (stmt); 1552 else 1553 def = gimple_get_lhs (stmt); 1554 1555 /* Certain expressions on the RHS can be optimized away, but can not 1556 themselves be entered into the hash tables. */ 1557 if (! def 1558 || TREE_CODE (def) != SSA_NAME 1559 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def) 1560 || gimple_vdef (stmt) 1561 /* Do not record equivalences for increments of ivs. This would create 1562 overlapping live ranges for a very questionable gain. */ 1563 || simple_iv_increment_p (stmt)) 1564 insert = false; 1565 1566 /* Check if the expression has been computed before. */ 1567 cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true); 1568 1569 opt_stats.num_exprs_considered++; 1570 1571 /* Get the type of the expression we are trying to optimize. */ 1572 if (is_gimple_assign (stmt)) 1573 { 1574 expr_type = TREE_TYPE (gimple_assign_lhs (stmt)); 1575 assigns_var_p = true; 1576 } 1577 else if (gimple_code (stmt) == GIMPLE_COND) 1578 expr_type = boolean_type_node; 1579 else if (is_gimple_call (stmt)) 1580 { 1581 gcc_assert (gimple_call_lhs (stmt)); 1582 expr_type = TREE_TYPE (gimple_call_lhs (stmt)); 1583 assigns_var_p = true; 1584 } 1585 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt)) 1586 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt)); 1587 else if (gimple_code (stmt) == GIMPLE_PHI) 1588 /* We can't propagate into a phi, so the logic below doesn't apply. 1589 Instead record an equivalence between the cached LHS and the 1590 PHI result of this statement, provided they are in the same block. 1591 This should be sufficient to kill the redundant phi. */ 1592 { 1593 if (def && cached_lhs) 1594 const_and_copies->record_const_or_copy (def, cached_lhs); 1595 return; 1596 } 1597 else 1598 gcc_unreachable (); 1599 1600 if (!cached_lhs) 1601 return; 1602 1603 /* It is safe to ignore types here since we have already done 1604 type checking in the hashing and equality routines. In fact 1605 type checking here merely gets in the way of constant 1606 propagation. Also, make sure that it is safe to propagate 1607 CACHED_LHS into the expression in STMT. */ 1608 if ((TREE_CODE (cached_lhs) != SSA_NAME 1609 && (assigns_var_p 1610 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))) 1611 || may_propagate_copy_into_stmt (stmt, cached_lhs)) 1612 { 1613 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME 1614 || is_gimple_min_invariant (cached_lhs)); 1615 1616 if (dump_file && (dump_flags & TDF_DETAILS)) 1617 { 1618 fprintf (dump_file, " Replaced redundant expr '"); 1619 print_gimple_expr (dump_file, stmt, 0, dump_flags); 1620 fprintf (dump_file, "' with '"); 1621 print_generic_expr (dump_file, cached_lhs, dump_flags); 1622 fprintf (dump_file, "'\n"); 1623 } 1624 1625 opt_stats.num_re++; 1626 1627 if (assigns_var_p 1628 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))) 1629 cached_lhs = fold_convert (expr_type, cached_lhs); 1630 1631 propagate_tree_value_into_stmt (gsi, cached_lhs); 1632 1633 /* Since it is always necessary to mark the result as modified, 1634 perhaps we should move this into propagate_tree_value_into_stmt 1635 itself. */ 1636 gimple_set_modified (gsi_stmt (*gsi), true); 1637 } 1638 } 1639 1640 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either 1641 the available expressions table or the const_and_copies table. 1642 Detect and record those equivalences into AVAIL_EXPRS_STACK. 1643 1644 We handle only very simple copy equivalences here. The heavy 1645 lifing is done by eliminate_redundant_computations. */ 1646 1647 static void 1648 record_equivalences_from_stmt (gimple *stmt, int may_optimize_p, 1649 class avail_exprs_stack *avail_exprs_stack) 1650 { 1651 tree lhs; 1652 enum tree_code lhs_code; 1653 1654 gcc_assert (is_gimple_assign (stmt)); 1655 1656 lhs = gimple_assign_lhs (stmt); 1657 lhs_code = TREE_CODE (lhs); 1658 1659 if (lhs_code == SSA_NAME 1660 && gimple_assign_single_p (stmt)) 1661 { 1662 tree rhs = gimple_assign_rhs1 (stmt); 1663 1664 /* If the RHS of the assignment is a constant or another variable that 1665 may be propagated, register it in the CONST_AND_COPIES table. We 1666 do not need to record unwind data for this, since this is a true 1667 assignment and not an equivalence inferred from a comparison. All 1668 uses of this ssa name are dominated by this assignment, so unwinding 1669 just costs time and space. */ 1670 if (may_optimize_p 1671 && (TREE_CODE (rhs) == SSA_NAME 1672 || is_gimple_min_invariant (rhs))) 1673 { 1674 rhs = dom_valueize (rhs); 1675 1676 if (dump_file && (dump_flags & TDF_DETAILS)) 1677 { 1678 fprintf (dump_file, "==== ASGN "); 1679 print_generic_expr (dump_file, lhs); 1680 fprintf (dump_file, " = "); 1681 print_generic_expr (dump_file, rhs); 1682 fprintf (dump_file, "\n"); 1683 } 1684 1685 set_ssa_name_value (lhs, rhs); 1686 } 1687 } 1688 1689 /* Make sure we can propagate &x + CST. */ 1690 if (lhs_code == SSA_NAME 1691 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR 1692 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR 1693 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST) 1694 { 1695 tree op0 = gimple_assign_rhs1 (stmt); 1696 tree op1 = gimple_assign_rhs2 (stmt); 1697 tree new_rhs 1698 = build_fold_addr_expr (fold_build2 (MEM_REF, 1699 TREE_TYPE (TREE_TYPE (op0)), 1700 unshare_expr (op0), 1701 fold_convert (ptr_type_node, 1702 op1))); 1703 if (dump_file && (dump_flags & TDF_DETAILS)) 1704 { 1705 fprintf (dump_file, "==== ASGN "); 1706 print_generic_expr (dump_file, lhs); 1707 fprintf (dump_file, " = "); 1708 print_generic_expr (dump_file, new_rhs); 1709 fprintf (dump_file, "\n"); 1710 } 1711 1712 set_ssa_name_value (lhs, new_rhs); 1713 } 1714 1715 /* A memory store, even an aliased store, creates a useful 1716 equivalence. By exchanging the LHS and RHS, creating suitable 1717 vops and recording the result in the available expression table, 1718 we may be able to expose more redundant loads. */ 1719 if (!gimple_has_volatile_ops (stmt) 1720 && gimple_references_memory_p (stmt) 1721 && gimple_assign_single_p (stmt) 1722 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME 1723 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) 1724 && !is_gimple_reg (lhs)) 1725 { 1726 tree rhs = gimple_assign_rhs1 (stmt); 1727 gassign *new_stmt; 1728 1729 /* Build a new statement with the RHS and LHS exchanged. */ 1730 if (TREE_CODE (rhs) == SSA_NAME) 1731 { 1732 /* NOTE tuples. The call to gimple_build_assign below replaced 1733 a call to build_gimple_modify_stmt, which did not set the 1734 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so 1735 may cause an SSA validation failure, as the LHS may be a 1736 default-initialized name and should have no definition. I'm 1737 a bit dubious of this, as the artificial statement that we 1738 generate here may in fact be ill-formed, but it is simply 1739 used as an internal device in this pass, and never becomes 1740 part of the CFG. */ 1741 gimple *defstmt = SSA_NAME_DEF_STMT (rhs); 1742 new_stmt = gimple_build_assign (rhs, lhs); 1743 SSA_NAME_DEF_STMT (rhs) = defstmt; 1744 } 1745 else 1746 new_stmt = gimple_build_assign (rhs, lhs); 1747 1748 gimple_set_vuse (new_stmt, gimple_vdef (stmt)); 1749 1750 /* Finally enter the statement into the available expression 1751 table. */ 1752 avail_exprs_stack->lookup_avail_expr (new_stmt, true, true); 1753 } 1754 } 1755 1756 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from 1757 CONST_AND_COPIES. */ 1758 1759 static void 1760 cprop_operand (gimple *stmt, use_operand_p op_p) 1761 { 1762 tree val; 1763 tree op = USE_FROM_PTR (op_p); 1764 1765 /* If the operand has a known constant value or it is known to be a 1766 copy of some other variable, use the value or copy stored in 1767 CONST_AND_COPIES. */ 1768 val = SSA_NAME_VALUE (op); 1769 if (val && val != op) 1770 { 1771 /* Do not replace hard register operands in asm statements. */ 1772 if (gimple_code (stmt) == GIMPLE_ASM 1773 && !may_propagate_copy_into_asm (op)) 1774 return; 1775 1776 /* Certain operands are not allowed to be copy propagated due 1777 to their interaction with exception handling and some GCC 1778 extensions. */ 1779 if (!may_propagate_copy (op, val)) 1780 return; 1781 1782 /* Do not propagate copies into BIVs. 1783 See PR23821 and PR62217 for how this can disturb IV and 1784 number of iteration analysis. */ 1785 if (TREE_CODE (val) != INTEGER_CST) 1786 { 1787 gimple *def = SSA_NAME_DEF_STMT (op); 1788 if (gimple_code (def) == GIMPLE_PHI 1789 && gimple_bb (def)->loop_father->header == gimple_bb (def)) 1790 return; 1791 } 1792 1793 /* Dump details. */ 1794 if (dump_file && (dump_flags & TDF_DETAILS)) 1795 { 1796 fprintf (dump_file, " Replaced '"); 1797 print_generic_expr (dump_file, op, dump_flags); 1798 fprintf (dump_file, "' with %s '", 1799 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable")); 1800 print_generic_expr (dump_file, val, dump_flags); 1801 fprintf (dump_file, "'\n"); 1802 } 1803 1804 if (TREE_CODE (val) != SSA_NAME) 1805 opt_stats.num_const_prop++; 1806 else 1807 opt_stats.num_copy_prop++; 1808 1809 propagate_value (op_p, val); 1810 1811 /* And note that we modified this statement. This is now 1812 safe, even if we changed virtual operands since we will 1813 rescan the statement and rewrite its operands again. */ 1814 gimple_set_modified (stmt, true); 1815 } 1816 } 1817 1818 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current 1819 known value for that SSA_NAME (or NULL if no value is known). 1820 1821 Propagate values from CONST_AND_COPIES into the uses, vuses and 1822 vdef_ops of STMT. */ 1823 1824 static void 1825 cprop_into_stmt (gimple *stmt) 1826 { 1827 use_operand_p op_p; 1828 ssa_op_iter iter; 1829 tree last_copy_propagated_op = NULL; 1830 1831 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE) 1832 { 1833 tree old_op = USE_FROM_PTR (op_p); 1834 1835 /* If we have A = B and B = A in the copy propagation tables 1836 (due to an equality comparison), avoid substituting B for A 1837 then A for B in the trivially discovered cases. This allows 1838 optimization of statements were A and B appear as input 1839 operands. */ 1840 if (old_op != last_copy_propagated_op) 1841 { 1842 cprop_operand (stmt, op_p); 1843 1844 tree new_op = USE_FROM_PTR (op_p); 1845 if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME) 1846 last_copy_propagated_op = new_op; 1847 } 1848 } 1849 } 1850 1851 /* If STMT contains a relational test, try to convert it into an 1852 equality test if there is only a single value which can ever 1853 make the test true. 1854 1855 For example, if the expression hash table contains: 1856 1857 TRUE = (i <= 1) 1858 1859 And we have a test within statement of i >= 1, then we can safely 1860 rewrite the test as i == 1 since there only a single value where 1861 the test is true. 1862 1863 This is similar to code in VRP. */ 1864 1865 static void 1866 test_for_singularity (gimple *stmt, gcond *dummy_cond, 1867 avail_exprs_stack *avail_exprs_stack) 1868 { 1869 /* We want to support gimple conditionals as well as assignments 1870 where the RHS contains a conditional. */ 1871 if (is_gimple_assign (stmt) || gimple_code (stmt) == GIMPLE_COND) 1872 { 1873 enum tree_code code = ERROR_MARK; 1874 tree lhs, rhs; 1875 1876 /* Extract the condition of interest from both forms we support. */ 1877 if (is_gimple_assign (stmt)) 1878 { 1879 code = gimple_assign_rhs_code (stmt); 1880 lhs = gimple_assign_rhs1 (stmt); 1881 rhs = gimple_assign_rhs2 (stmt); 1882 } 1883 else if (gimple_code (stmt) == GIMPLE_COND) 1884 { 1885 code = gimple_cond_code (as_a <gcond *> (stmt)); 1886 lhs = gimple_cond_lhs (as_a <gcond *> (stmt)); 1887 rhs = gimple_cond_rhs (as_a <gcond *> (stmt)); 1888 } 1889 1890 /* We're looking for a relational test using LE/GE. Also note we can 1891 canonicalize LT/GT tests against constants into LE/GT tests. */ 1892 if (code == LE_EXPR || code == GE_EXPR 1893 || ((code == LT_EXPR || code == GT_EXPR) 1894 && TREE_CODE (rhs) == INTEGER_CST)) 1895 { 1896 /* For LT_EXPR and GT_EXPR, canonicalize to LE_EXPR and GE_EXPR. */ 1897 if (code == LT_EXPR) 1898 rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (rhs), 1899 rhs, build_int_cst (TREE_TYPE (rhs), 1)); 1900 1901 if (code == GT_EXPR) 1902 rhs = fold_build2 (PLUS_EXPR, TREE_TYPE (rhs), 1903 rhs, build_int_cst (TREE_TYPE (rhs), 1)); 1904 1905 /* Determine the code we want to check for in the hash table. */ 1906 enum tree_code test_code; 1907 if (code == GE_EXPR || code == GT_EXPR) 1908 test_code = LE_EXPR; 1909 else 1910 test_code = GE_EXPR; 1911 1912 /* Update the dummy statement so we can query the hash tables. */ 1913 gimple_cond_set_code (dummy_cond, test_code); 1914 gimple_cond_set_lhs (dummy_cond, lhs); 1915 gimple_cond_set_rhs (dummy_cond, rhs); 1916 tree cached_lhs 1917 = avail_exprs_stack->lookup_avail_expr (dummy_cond, false, false); 1918 1919 /* If the lookup returned 1 (true), then the expression we 1920 queried was in the hash table. As a result there is only 1921 one value that makes the original conditional true. Update 1922 STMT accordingly. */ 1923 if (cached_lhs && integer_onep (cached_lhs)) 1924 { 1925 if (is_gimple_assign (stmt)) 1926 { 1927 gimple_assign_set_rhs_code (stmt, EQ_EXPR); 1928 gimple_assign_set_rhs2 (stmt, rhs); 1929 gimple_set_modified (stmt, true); 1930 } 1931 else 1932 { 1933 gimple_set_modified (stmt, true); 1934 gimple_cond_set_code (as_a <gcond *> (stmt), EQ_EXPR); 1935 gimple_cond_set_rhs (as_a <gcond *> (stmt), rhs); 1936 gimple_set_modified (stmt, true); 1937 } 1938 } 1939 } 1940 } 1941 } 1942 1943 /* Optimize the statement in block BB pointed to by iterator SI. 1944 1945 We try to perform some simplistic global redundancy elimination and 1946 constant propagation: 1947 1948 1- To detect global redundancy, we keep track of expressions that have 1949 been computed in this block and its dominators. If we find that the 1950 same expression is computed more than once, we eliminate repeated 1951 computations by using the target of the first one. 1952 1953 2- Constant values and copy assignments. This is used to do very 1954 simplistic constant and copy propagation. When a constant or copy 1955 assignment is found, we map the value on the RHS of the assignment to 1956 the variable in the LHS in the CONST_AND_COPIES table. 1957 1958 3- Very simple redundant store elimination is performed. 1959 1960 4- We can simpify a condition to a constant or from a relational 1961 condition to an equality condition. */ 1962 1963 edge 1964 dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator *si, 1965 bool *removed_p) 1966 { 1967 gimple *stmt, *old_stmt; 1968 bool may_optimize_p; 1969 bool modified_p = false; 1970 bool was_noreturn; 1971 edge retval = NULL; 1972 1973 old_stmt = stmt = gsi_stmt (*si); 1974 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt); 1975 1976 if (dump_file && (dump_flags & TDF_DETAILS)) 1977 { 1978 fprintf (dump_file, "Optimizing statement "); 1979 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); 1980 } 1981 1982 update_stmt_if_modified (stmt); 1983 opt_stats.num_stmts++; 1984 1985 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */ 1986 cprop_into_stmt (stmt); 1987 1988 /* If the statement has been modified with constant replacements, 1989 fold its RHS before checking for redundant computations. */ 1990 if (gimple_modified_p (stmt)) 1991 { 1992 tree rhs = NULL; 1993 1994 /* Try to fold the statement making sure that STMT is kept 1995 up to date. */ 1996 if (fold_stmt (si)) 1997 { 1998 stmt = gsi_stmt (*si); 1999 gimple_set_modified (stmt, true); 2000 2001 if (dump_file && (dump_flags & TDF_DETAILS)) 2002 { 2003 fprintf (dump_file, " Folded to: "); 2004 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); 2005 } 2006 } 2007 2008 /* We only need to consider cases that can yield a gimple operand. */ 2009 if (gimple_assign_single_p (stmt)) 2010 rhs = gimple_assign_rhs1 (stmt); 2011 else if (gimple_code (stmt) == GIMPLE_GOTO) 2012 rhs = gimple_goto_dest (stmt); 2013 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt)) 2014 /* This should never be an ADDR_EXPR. */ 2015 rhs = gimple_switch_index (swtch_stmt); 2016 2017 if (rhs && TREE_CODE (rhs) == ADDR_EXPR) 2018 recompute_tree_invariant_for_addr_expr (rhs); 2019 2020 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called, 2021 even if fold_stmt updated the stmt already and thus cleared 2022 gimple_modified_p flag on it. */ 2023 modified_p = true; 2024 } 2025 2026 /* Check for redundant computations. Do this optimization only 2027 for assignments that have no volatile ops and conditionals. */ 2028 may_optimize_p = (!gimple_has_side_effects (stmt) 2029 && (is_gimple_assign (stmt) 2030 || (is_gimple_call (stmt) 2031 && gimple_call_lhs (stmt) != NULL_TREE) 2032 || gimple_code (stmt) == GIMPLE_COND 2033 || gimple_code (stmt) == GIMPLE_SWITCH)); 2034 2035 if (may_optimize_p) 2036 { 2037 if (gimple_code (stmt) == GIMPLE_CALL) 2038 { 2039 /* Resolve __builtin_constant_p. If it hasn't been 2040 folded to integer_one_node by now, it's fairly 2041 certain that the value simply isn't constant. */ 2042 tree callee = gimple_call_fndecl (stmt); 2043 if (callee 2044 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL 2045 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P) 2046 { 2047 propagate_tree_value_into_stmt (si, integer_zero_node); 2048 stmt = gsi_stmt (*si); 2049 } 2050 } 2051 2052 if (gimple_code (stmt) == GIMPLE_COND) 2053 { 2054 tree lhs = gimple_cond_lhs (stmt); 2055 tree rhs = gimple_cond_rhs (stmt); 2056 2057 /* If the LHS has a range [0..1] and the RHS has a range ~[0..1], 2058 then this conditional is computable at compile time. We can just 2059 shove either 0 or 1 into the LHS, mark the statement as modified 2060 and all the right things will just happen below. 2061 2062 Note this would apply to any case where LHS has a range 2063 narrower than its type implies and RHS is outside that 2064 narrower range. Future work. */ 2065 if (TREE_CODE (lhs) == SSA_NAME 2066 && ssa_name_has_boolean_range (lhs) 2067 && TREE_CODE (rhs) == INTEGER_CST 2068 && ! (integer_zerop (rhs) || integer_onep (rhs))) 2069 { 2070 gimple_cond_set_lhs (as_a <gcond *> (stmt), 2071 fold_convert (TREE_TYPE (lhs), 2072 integer_zero_node)); 2073 gimple_set_modified (stmt, true); 2074 } 2075 else if (TREE_CODE (lhs) == SSA_NAME) 2076 { 2077 /* Exploiting EVRP data is not yet fully integrated into DOM 2078 but we need to do something for this case to avoid regressing 2079 udr4.f90 and new1.C which have unexecutable blocks with 2080 undefined behavior that get diagnosed if they're left in the 2081 IL because we've attached range information to new 2082 SSA_NAMES. */ 2083 update_stmt_if_modified (stmt); 2084 edge taken_edge = NULL; 2085 evrp_range_analyzer.vrp_visit_cond_stmt (as_a <gcond *> (stmt), 2086 &taken_edge); 2087 if (taken_edge) 2088 { 2089 if (taken_edge->flags & EDGE_TRUE_VALUE) 2090 gimple_cond_make_true (as_a <gcond *> (stmt)); 2091 else if (taken_edge->flags & EDGE_FALSE_VALUE) 2092 gimple_cond_make_false (as_a <gcond *> (stmt)); 2093 else 2094 gcc_unreachable (); 2095 gimple_set_modified (stmt, true); 2096 update_stmt (stmt); 2097 cfg_altered = true; 2098 return taken_edge; 2099 } 2100 } 2101 } 2102 2103 update_stmt_if_modified (stmt); 2104 eliminate_redundant_computations (si, m_const_and_copies, 2105 m_avail_exprs_stack); 2106 stmt = gsi_stmt (*si); 2107 2108 /* Perform simple redundant store elimination. */ 2109 if (gimple_assign_single_p (stmt) 2110 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) 2111 { 2112 tree lhs = gimple_assign_lhs (stmt); 2113 tree rhs = gimple_assign_rhs1 (stmt); 2114 tree cached_lhs; 2115 gassign *new_stmt; 2116 rhs = dom_valueize (rhs); 2117 /* Build a new statement with the RHS and LHS exchanged. */ 2118 if (TREE_CODE (rhs) == SSA_NAME) 2119 { 2120 gimple *defstmt = SSA_NAME_DEF_STMT (rhs); 2121 new_stmt = gimple_build_assign (rhs, lhs); 2122 SSA_NAME_DEF_STMT (rhs) = defstmt; 2123 } 2124 else 2125 new_stmt = gimple_build_assign (rhs, lhs); 2126 gimple_set_vuse (new_stmt, gimple_vuse (stmt)); 2127 cached_lhs = m_avail_exprs_stack->lookup_avail_expr (new_stmt, false, 2128 false); 2129 if (cached_lhs && operand_equal_p (rhs, cached_lhs, 0)) 2130 { 2131 basic_block bb = gimple_bb (stmt); 2132 unlink_stmt_vdef (stmt); 2133 if (gsi_remove (si, true)) 2134 { 2135 bitmap_set_bit (need_eh_cleanup, bb->index); 2136 if (dump_file && (dump_flags & TDF_DETAILS)) 2137 fprintf (dump_file, " Flagged to clear EH edges.\n"); 2138 } 2139 release_defs (stmt); 2140 *removed_p = true; 2141 return retval; 2142 } 2143 } 2144 2145 /* If this statement was not redundant, we may still be able to simplify 2146 it, which may in turn allow other part of DOM or other passes to do 2147 a better job. */ 2148 test_for_singularity (stmt, m_dummy_cond, m_avail_exprs_stack); 2149 } 2150 2151 /* Record any additional equivalences created by this statement. */ 2152 if (is_gimple_assign (stmt)) 2153 record_equivalences_from_stmt (stmt, may_optimize_p, m_avail_exprs_stack); 2154 2155 /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may 2156 know where it goes. */ 2157 if (gimple_modified_p (stmt) || modified_p) 2158 { 2159 tree val = NULL; 2160 2161 if (gimple_code (stmt) == GIMPLE_COND) 2162 val = fold_binary_loc (gimple_location (stmt), 2163 gimple_cond_code (stmt), boolean_type_node, 2164 gimple_cond_lhs (stmt), 2165 gimple_cond_rhs (stmt)); 2166 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt)) 2167 val = gimple_switch_index (swtch_stmt); 2168 2169 if (val && TREE_CODE (val) == INTEGER_CST) 2170 { 2171 retval = find_taken_edge (bb, val); 2172 if (retval) 2173 { 2174 /* Fix the condition to be either true or false. */ 2175 if (gimple_code (stmt) == GIMPLE_COND) 2176 { 2177 if (integer_zerop (val)) 2178 gimple_cond_make_false (as_a <gcond *> (stmt)); 2179 else if (integer_onep (val)) 2180 gimple_cond_make_true (as_a <gcond *> (stmt)); 2181 else 2182 gcc_unreachable (); 2183 2184 gimple_set_modified (stmt, true); 2185 } 2186 2187 /* Further simplifications may be possible. */ 2188 cfg_altered = true; 2189 } 2190 } 2191 2192 update_stmt_if_modified (stmt); 2193 2194 /* If we simplified a statement in such a way as to be shown that it 2195 cannot trap, update the eh information and the cfg to match. */ 2196 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)) 2197 { 2198 bitmap_set_bit (need_eh_cleanup, bb->index); 2199 if (dump_file && (dump_flags & TDF_DETAILS)) 2200 fprintf (dump_file, " Flagged to clear EH edges.\n"); 2201 } 2202 2203 if (!was_noreturn 2204 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt)) 2205 need_noreturn_fixup.safe_push (stmt); 2206 } 2207 return retval; 2208 } 2209