1 /* Data references and dependences detectors. 2 Copyright (C) 2003-2019 Free Software Foundation, Inc. 3 Contributed by Sebastian Pop <pop@cri.ensmp.fr> 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it under 8 the terms of the GNU General Public License as published by the Free 9 Software Foundation; either version 3, or (at your option) any later 10 version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 /* This pass walks a given loop structure searching for array 22 references. The information about the array accesses is recorded 23 in DATA_REFERENCE structures. 24 25 The basic test for determining the dependences is: 26 given two access functions chrec1 and chrec2 to a same array, and 27 x and y two vectors from the iteration domain, the same element of 28 the array is accessed twice at iterations x and y if and only if: 29 | chrec1 (x) == chrec2 (y). 30 31 The goals of this analysis are: 32 33 - to determine the independence: the relation between two 34 independent accesses is qualified with the chrec_known (this 35 information allows a loop parallelization), 36 37 - when two data references access the same data, to qualify the 38 dependence relation with classic dependence representations: 39 40 - distance vectors 41 - direction vectors 42 - loop carried level dependence 43 - polyhedron dependence 44 or with the chains of recurrences based representation, 45 46 - to define a knowledge base for storing the data dependence 47 information, 48 49 - to define an interface to access this data. 50 51 52 Definitions: 53 54 - subscript: given two array accesses a subscript is the tuple 55 composed of the access functions for a given dimension. Example: 56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts: 57 (f1, g1), (f2, g2), (f3, g3). 58 59 - Diophantine equation: an equation whose coefficients and 60 solutions are integer constants, for example the equation 61 | 3*x + 2*y = 1 62 has an integer solution x = 1 and y = -1. 63 64 References: 65 66 - "Advanced Compilation for High Performance Computing" by Randy 67 Allen and Ken Kennedy. 68 http://citeseer.ist.psu.edu/goff91practical.html 69 70 - "Loop Transformations for Restructuring Compilers - The Foundations" 71 by Utpal Banerjee. 72 73 74 */ 75 76 #include "config.h" 77 #include "system.h" 78 #include "coretypes.h" 79 #include "backend.h" 80 #include "rtl.h" 81 #include "tree.h" 82 #include "gimple.h" 83 #include "gimple-pretty-print.h" 84 #include "alias.h" 85 #include "fold-const.h" 86 #include "expr.h" 87 #include "gimple-iterator.h" 88 #include "tree-ssa-loop-niter.h" 89 #include "tree-ssa-loop.h" 90 #include "tree-ssa.h" 91 #include "cfgloop.h" 92 #include "tree-data-ref.h" 93 #include "tree-scalar-evolution.h" 94 #include "dumpfile.h" 95 #include "tree-affine.h" 96 #include "params.h" 97 #include "builtins.h" 98 #include "tree-eh.h" 99 #include "ssa.h" 100 101 static struct datadep_stats 102 { 103 int num_dependence_tests; 104 int num_dependence_dependent; 105 int num_dependence_independent; 106 int num_dependence_undetermined; 107 108 int num_subscript_tests; 109 int num_subscript_undetermined; 110 int num_same_subscript_function; 111 112 int num_ziv; 113 int num_ziv_independent; 114 int num_ziv_dependent; 115 int num_ziv_unimplemented; 116 117 int num_siv; 118 int num_siv_independent; 119 int num_siv_dependent; 120 int num_siv_unimplemented; 121 122 int num_miv; 123 int num_miv_independent; 124 int num_miv_dependent; 125 int num_miv_unimplemented; 126 } dependence_stats; 127 128 static bool subscript_dependence_tester_1 (struct data_dependence_relation *, 129 unsigned int, unsigned int, 130 struct loop *); 131 /* Returns true iff A divides B. */ 132 133 static inline bool 134 tree_fold_divides_p (const_tree a, const_tree b) 135 { 136 gcc_assert (TREE_CODE (a) == INTEGER_CST); 137 gcc_assert (TREE_CODE (b) == INTEGER_CST); 138 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR, b, a)); 139 } 140 141 /* Returns true iff A divides B. */ 142 143 static inline bool 144 int_divides_p (int a, int b) 145 { 146 return ((b % a) == 0); 147 } 148 149 /* Return true if reference REF contains a union access. */ 150 151 static bool 152 ref_contains_union_access_p (tree ref) 153 { 154 while (handled_component_p (ref)) 155 { 156 ref = TREE_OPERAND (ref, 0); 157 if (TREE_CODE (TREE_TYPE (ref)) == UNION_TYPE 158 || TREE_CODE (TREE_TYPE (ref)) == QUAL_UNION_TYPE) 159 return true; 160 } 161 return false; 162 } 163 164 165 166 /* Dump into FILE all the data references from DATAREFS. */ 167 168 static void 169 dump_data_references (FILE *file, vec<data_reference_p> datarefs) 170 { 171 unsigned int i; 172 struct data_reference *dr; 173 174 FOR_EACH_VEC_ELT (datarefs, i, dr) 175 dump_data_reference (file, dr); 176 } 177 178 /* Unified dump into FILE all the data references from DATAREFS. */ 179 180 DEBUG_FUNCTION void 181 debug (vec<data_reference_p> &ref) 182 { 183 dump_data_references (stderr, ref); 184 } 185 186 DEBUG_FUNCTION void 187 debug (vec<data_reference_p> *ptr) 188 { 189 if (ptr) 190 debug (*ptr); 191 else 192 fprintf (stderr, "<nil>\n"); 193 } 194 195 196 /* Dump into STDERR all the data references from DATAREFS. */ 197 198 DEBUG_FUNCTION void 199 debug_data_references (vec<data_reference_p> datarefs) 200 { 201 dump_data_references (stderr, datarefs); 202 } 203 204 /* Print to STDERR the data_reference DR. */ 205 206 DEBUG_FUNCTION void 207 debug_data_reference (struct data_reference *dr) 208 { 209 dump_data_reference (stderr, dr); 210 } 211 212 /* Dump function for a DATA_REFERENCE structure. */ 213 214 void 215 dump_data_reference (FILE *outf, 216 struct data_reference *dr) 217 { 218 unsigned int i; 219 220 fprintf (outf, "#(Data Ref: \n"); 221 fprintf (outf, "# bb: %d \n", gimple_bb (DR_STMT (dr))->index); 222 fprintf (outf, "# stmt: "); 223 print_gimple_stmt (outf, DR_STMT (dr), 0); 224 fprintf (outf, "# ref: "); 225 print_generic_stmt (outf, DR_REF (dr)); 226 fprintf (outf, "# base_object: "); 227 print_generic_stmt (outf, DR_BASE_OBJECT (dr)); 228 229 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++) 230 { 231 fprintf (outf, "# Access function %d: ", i); 232 print_generic_stmt (outf, DR_ACCESS_FN (dr, i)); 233 } 234 fprintf (outf, "#)\n"); 235 } 236 237 /* Unified dump function for a DATA_REFERENCE structure. */ 238 239 DEBUG_FUNCTION void 240 debug (data_reference &ref) 241 { 242 dump_data_reference (stderr, &ref); 243 } 244 245 DEBUG_FUNCTION void 246 debug (data_reference *ptr) 247 { 248 if (ptr) 249 debug (*ptr); 250 else 251 fprintf (stderr, "<nil>\n"); 252 } 253 254 255 /* Dumps the affine function described by FN to the file OUTF. */ 256 257 DEBUG_FUNCTION void 258 dump_affine_function (FILE *outf, affine_fn fn) 259 { 260 unsigned i; 261 tree coef; 262 263 print_generic_expr (outf, fn[0], TDF_SLIM); 264 for (i = 1; fn.iterate (i, &coef); i++) 265 { 266 fprintf (outf, " + "); 267 print_generic_expr (outf, coef, TDF_SLIM); 268 fprintf (outf, " * x_%u", i); 269 } 270 } 271 272 /* Dumps the conflict function CF to the file OUTF. */ 273 274 DEBUG_FUNCTION void 275 dump_conflict_function (FILE *outf, conflict_function *cf) 276 { 277 unsigned i; 278 279 if (cf->n == NO_DEPENDENCE) 280 fprintf (outf, "no dependence"); 281 else if (cf->n == NOT_KNOWN) 282 fprintf (outf, "not known"); 283 else 284 { 285 for (i = 0; i < cf->n; i++) 286 { 287 if (i != 0) 288 fprintf (outf, " "); 289 fprintf (outf, "["); 290 dump_affine_function (outf, cf->fns[i]); 291 fprintf (outf, "]"); 292 } 293 } 294 } 295 296 /* Dump function for a SUBSCRIPT structure. */ 297 298 DEBUG_FUNCTION void 299 dump_subscript (FILE *outf, struct subscript *subscript) 300 { 301 conflict_function *cf = SUB_CONFLICTS_IN_A (subscript); 302 303 fprintf (outf, "\n (subscript \n"); 304 fprintf (outf, " iterations_that_access_an_element_twice_in_A: "); 305 dump_conflict_function (outf, cf); 306 if (CF_NONTRIVIAL_P (cf)) 307 { 308 tree last_iteration = SUB_LAST_CONFLICT (subscript); 309 fprintf (outf, "\n last_conflict: "); 310 print_generic_expr (outf, last_iteration); 311 } 312 313 cf = SUB_CONFLICTS_IN_B (subscript); 314 fprintf (outf, "\n iterations_that_access_an_element_twice_in_B: "); 315 dump_conflict_function (outf, cf); 316 if (CF_NONTRIVIAL_P (cf)) 317 { 318 tree last_iteration = SUB_LAST_CONFLICT (subscript); 319 fprintf (outf, "\n last_conflict: "); 320 print_generic_expr (outf, last_iteration); 321 } 322 323 fprintf (outf, "\n (Subscript distance: "); 324 print_generic_expr (outf, SUB_DISTANCE (subscript)); 325 fprintf (outf, " ))\n"); 326 } 327 328 /* Print the classic direction vector DIRV to OUTF. */ 329 330 DEBUG_FUNCTION void 331 print_direction_vector (FILE *outf, 332 lambda_vector dirv, 333 int length) 334 { 335 int eq; 336 337 for (eq = 0; eq < length; eq++) 338 { 339 enum data_dependence_direction dir = ((enum data_dependence_direction) 340 dirv[eq]); 341 342 switch (dir) 343 { 344 case dir_positive: 345 fprintf (outf, " +"); 346 break; 347 case dir_negative: 348 fprintf (outf, " -"); 349 break; 350 case dir_equal: 351 fprintf (outf, " ="); 352 break; 353 case dir_positive_or_equal: 354 fprintf (outf, " +="); 355 break; 356 case dir_positive_or_negative: 357 fprintf (outf, " +-"); 358 break; 359 case dir_negative_or_equal: 360 fprintf (outf, " -="); 361 break; 362 case dir_star: 363 fprintf (outf, " *"); 364 break; 365 default: 366 fprintf (outf, "indep"); 367 break; 368 } 369 } 370 fprintf (outf, "\n"); 371 } 372 373 /* Print a vector of direction vectors. */ 374 375 DEBUG_FUNCTION void 376 print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects, 377 int length) 378 { 379 unsigned j; 380 lambda_vector v; 381 382 FOR_EACH_VEC_ELT (dir_vects, j, v) 383 print_direction_vector (outf, v, length); 384 } 385 386 /* Print out a vector VEC of length N to OUTFILE. */ 387 388 DEBUG_FUNCTION void 389 print_lambda_vector (FILE * outfile, lambda_vector vector, int n) 390 { 391 int i; 392 393 for (i = 0; i < n; i++) 394 fprintf (outfile, "%3d ", (int)vector[i]); 395 fprintf (outfile, "\n"); 396 } 397 398 /* Print a vector of distance vectors. */ 399 400 DEBUG_FUNCTION void 401 print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects, 402 int length) 403 { 404 unsigned j; 405 lambda_vector v; 406 407 FOR_EACH_VEC_ELT (dist_vects, j, v) 408 print_lambda_vector (outf, v, length); 409 } 410 411 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */ 412 413 DEBUG_FUNCTION void 414 dump_data_dependence_relation (FILE *outf, 415 struct data_dependence_relation *ddr) 416 { 417 struct data_reference *dra, *drb; 418 419 fprintf (outf, "(Data Dep: \n"); 420 421 if (!ddr || DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) 422 { 423 if (ddr) 424 { 425 dra = DDR_A (ddr); 426 drb = DDR_B (ddr); 427 if (dra) 428 dump_data_reference (outf, dra); 429 else 430 fprintf (outf, " (nil)\n"); 431 if (drb) 432 dump_data_reference (outf, drb); 433 else 434 fprintf (outf, " (nil)\n"); 435 } 436 fprintf (outf, " (don't know)\n)\n"); 437 return; 438 } 439 440 dra = DDR_A (ddr); 441 drb = DDR_B (ddr); 442 dump_data_reference (outf, dra); 443 dump_data_reference (outf, drb); 444 445 if (DDR_ARE_DEPENDENT (ddr) == chrec_known) 446 fprintf (outf, " (no dependence)\n"); 447 448 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE) 449 { 450 unsigned int i; 451 struct loop *loopi; 452 453 subscript *sub; 454 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub) 455 { 456 fprintf (outf, " access_fn_A: "); 457 print_generic_stmt (outf, SUB_ACCESS_FN (sub, 0)); 458 fprintf (outf, " access_fn_B: "); 459 print_generic_stmt (outf, SUB_ACCESS_FN (sub, 1)); 460 dump_subscript (outf, sub); 461 } 462 463 fprintf (outf, " inner loop index: %d\n", DDR_INNER_LOOP (ddr)); 464 fprintf (outf, " loop nest: ("); 465 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr), i, loopi) 466 fprintf (outf, "%d ", loopi->num); 467 fprintf (outf, ")\n"); 468 469 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++) 470 { 471 fprintf (outf, " distance_vector: "); 472 print_lambda_vector (outf, DDR_DIST_VECT (ddr, i), 473 DDR_NB_LOOPS (ddr)); 474 } 475 476 for (i = 0; i < DDR_NUM_DIR_VECTS (ddr); i++) 477 { 478 fprintf (outf, " direction_vector: "); 479 print_direction_vector (outf, DDR_DIR_VECT (ddr, i), 480 DDR_NB_LOOPS (ddr)); 481 } 482 } 483 484 fprintf (outf, ")\n"); 485 } 486 487 /* Debug version. */ 488 489 DEBUG_FUNCTION void 490 debug_data_dependence_relation (struct data_dependence_relation *ddr) 491 { 492 dump_data_dependence_relation (stderr, ddr); 493 } 494 495 /* Dump into FILE all the dependence relations from DDRS. */ 496 497 DEBUG_FUNCTION void 498 dump_data_dependence_relations (FILE *file, 499 vec<ddr_p> ddrs) 500 { 501 unsigned int i; 502 struct data_dependence_relation *ddr; 503 504 FOR_EACH_VEC_ELT (ddrs, i, ddr) 505 dump_data_dependence_relation (file, ddr); 506 } 507 508 DEBUG_FUNCTION void 509 debug (vec<ddr_p> &ref) 510 { 511 dump_data_dependence_relations (stderr, ref); 512 } 513 514 DEBUG_FUNCTION void 515 debug (vec<ddr_p> *ptr) 516 { 517 if (ptr) 518 debug (*ptr); 519 else 520 fprintf (stderr, "<nil>\n"); 521 } 522 523 524 /* Dump to STDERR all the dependence relations from DDRS. */ 525 526 DEBUG_FUNCTION void 527 debug_data_dependence_relations (vec<ddr_p> ddrs) 528 { 529 dump_data_dependence_relations (stderr, ddrs); 530 } 531 532 /* Dumps the distance and direction vectors in FILE. DDRS contains 533 the dependence relations, and VECT_SIZE is the size of the 534 dependence vectors, or in other words the number of loops in the 535 considered nest. */ 536 537 DEBUG_FUNCTION void 538 dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs) 539 { 540 unsigned int i, j; 541 struct data_dependence_relation *ddr; 542 lambda_vector v; 543 544 FOR_EACH_VEC_ELT (ddrs, i, ddr) 545 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr)) 546 { 547 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v) 548 { 549 fprintf (file, "DISTANCE_V ("); 550 print_lambda_vector (file, v, DDR_NB_LOOPS (ddr)); 551 fprintf (file, ")\n"); 552 } 553 554 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v) 555 { 556 fprintf (file, "DIRECTION_V ("); 557 print_direction_vector (file, v, DDR_NB_LOOPS (ddr)); 558 fprintf (file, ")\n"); 559 } 560 } 561 562 fprintf (file, "\n\n"); 563 } 564 565 /* Dumps the data dependence relations DDRS in FILE. */ 566 567 DEBUG_FUNCTION void 568 dump_ddrs (FILE *file, vec<ddr_p> ddrs) 569 { 570 unsigned int i; 571 struct data_dependence_relation *ddr; 572 573 FOR_EACH_VEC_ELT (ddrs, i, ddr) 574 dump_data_dependence_relation (file, ddr); 575 576 fprintf (file, "\n\n"); 577 } 578 579 DEBUG_FUNCTION void 580 debug_ddrs (vec<ddr_p> ddrs) 581 { 582 dump_ddrs (stderr, ddrs); 583 } 584 585 static void 586 split_constant_offset (tree exp, tree *var, tree *off, 587 hash_map<tree, std::pair<tree, tree> > &cache, 588 unsigned *limit); 589 590 /* Helper function for split_constant_offset. Expresses OP0 CODE OP1 591 (the type of the result is TYPE) as VAR + OFF, where OFF is a nonzero 592 constant of type ssizetype, and returns true. If we cannot do this 593 with OFF nonzero, OFF and VAR are set to NULL_TREE instead and false 594 is returned. */ 595 596 static bool 597 split_constant_offset_1 (tree type, tree op0, enum tree_code code, tree op1, 598 tree *var, tree *off, 599 hash_map<tree, std::pair<tree, tree> > &cache, 600 unsigned *limit) 601 { 602 tree var0, var1; 603 tree off0, off1; 604 enum tree_code ocode = code; 605 606 *var = NULL_TREE; 607 *off = NULL_TREE; 608 609 switch (code) 610 { 611 case INTEGER_CST: 612 *var = build_int_cst (type, 0); 613 *off = fold_convert (ssizetype, op0); 614 return true; 615 616 case POINTER_PLUS_EXPR: 617 ocode = PLUS_EXPR; 618 /* FALLTHROUGH */ 619 case PLUS_EXPR: 620 case MINUS_EXPR: 621 if (TREE_CODE (op1) == INTEGER_CST) 622 { 623 split_constant_offset (op0, &var0, &off0, cache, limit); 624 *var = var0; 625 *off = size_binop (ocode, off0, fold_convert (ssizetype, op1)); 626 return true; 627 } 628 split_constant_offset (op0, &var0, &off0, cache, limit); 629 split_constant_offset (op1, &var1, &off1, cache, limit); 630 *var = fold_build2 (code, type, var0, var1); 631 *off = size_binop (ocode, off0, off1); 632 return true; 633 634 case MULT_EXPR: 635 if (TREE_CODE (op1) != INTEGER_CST) 636 return false; 637 638 split_constant_offset (op0, &var0, &off0, cache, limit); 639 *var = fold_build2 (MULT_EXPR, type, var0, op1); 640 *off = size_binop (MULT_EXPR, off0, fold_convert (ssizetype, op1)); 641 return true; 642 643 case ADDR_EXPR: 644 { 645 tree base, poffset; 646 poly_int64 pbitsize, pbitpos, pbytepos; 647 machine_mode pmode; 648 int punsignedp, preversep, pvolatilep; 649 650 op0 = TREE_OPERAND (op0, 0); 651 base 652 = get_inner_reference (op0, &pbitsize, &pbitpos, &poffset, &pmode, 653 &punsignedp, &preversep, &pvolatilep); 654 655 if (!multiple_p (pbitpos, BITS_PER_UNIT, &pbytepos)) 656 return false; 657 base = build_fold_addr_expr (base); 658 off0 = ssize_int (pbytepos); 659 660 if (poffset) 661 { 662 split_constant_offset (poffset, &poffset, &off1, cache, limit); 663 off0 = size_binop (PLUS_EXPR, off0, off1); 664 if (POINTER_TYPE_P (TREE_TYPE (base))) 665 base = fold_build_pointer_plus (base, poffset); 666 else 667 base = fold_build2 (PLUS_EXPR, TREE_TYPE (base), base, 668 fold_convert (TREE_TYPE (base), poffset)); 669 } 670 671 var0 = fold_convert (type, base); 672 673 /* If variable length types are involved, punt, otherwise casts 674 might be converted into ARRAY_REFs in gimplify_conversion. 675 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which 676 possibly no longer appears in current GIMPLE, might resurface. 677 This perhaps could run 678 if (CONVERT_EXPR_P (var0)) 679 { 680 gimplify_conversion (&var0); 681 // Attempt to fill in any within var0 found ARRAY_REF's 682 // element size from corresponding op embedded ARRAY_REF, 683 // if unsuccessful, just punt. 684 } */ 685 while (POINTER_TYPE_P (type)) 686 type = TREE_TYPE (type); 687 if (int_size_in_bytes (type) < 0) 688 return false; 689 690 *var = var0; 691 *off = off0; 692 return true; 693 } 694 695 case SSA_NAME: 696 { 697 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0)) 698 return false; 699 700 gimple *def_stmt = SSA_NAME_DEF_STMT (op0); 701 enum tree_code subcode; 702 703 if (gimple_code (def_stmt) != GIMPLE_ASSIGN) 704 return false; 705 706 subcode = gimple_assign_rhs_code (def_stmt); 707 708 /* We are using a cache to avoid un-CSEing large amounts of code. */ 709 bool use_cache = false; 710 if (!has_single_use (op0) 711 && (subcode == POINTER_PLUS_EXPR 712 || subcode == PLUS_EXPR 713 || subcode == MINUS_EXPR 714 || subcode == MULT_EXPR 715 || subcode == ADDR_EXPR 716 || CONVERT_EXPR_CODE_P (subcode))) 717 { 718 use_cache = true; 719 bool existed; 720 std::pair<tree, tree> &e = cache.get_or_insert (op0, &existed); 721 if (existed) 722 { 723 if (integer_zerop (e.second)) 724 return false; 725 *var = e.first; 726 *off = e.second; 727 return true; 728 } 729 e = std::make_pair (op0, ssize_int (0)); 730 } 731 732 if (*limit == 0) 733 return false; 734 --*limit; 735 736 var0 = gimple_assign_rhs1 (def_stmt); 737 var1 = gimple_assign_rhs2 (def_stmt); 738 739 bool res = split_constant_offset_1 (type, var0, subcode, var1, 740 var, off, cache, limit); 741 if (res && use_cache) 742 *cache.get (op0) = std::make_pair (*var, *off); 743 return res; 744 } 745 CASE_CONVERT: 746 { 747 /* We must not introduce undefined overflow, and we must not change 748 the value. Hence we're okay if the inner type doesn't overflow 749 to start with (pointer or signed), the outer type also is an 750 integer or pointer and the outer precision is at least as large 751 as the inner. */ 752 tree itype = TREE_TYPE (op0); 753 if ((POINTER_TYPE_P (itype) 754 || (INTEGRAL_TYPE_P (itype) && !TYPE_OVERFLOW_TRAPS (itype))) 755 && TYPE_PRECISION (type) >= TYPE_PRECISION (itype) 756 && (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))) 757 { 758 if (INTEGRAL_TYPE_P (itype) && TYPE_OVERFLOW_WRAPS (itype)) 759 { 760 /* Split the unconverted operand and try to prove that 761 wrapping isn't a problem. */ 762 tree tmp_var, tmp_off; 763 split_constant_offset (op0, &tmp_var, &tmp_off, cache, limit); 764 765 /* See whether we have an SSA_NAME whose range is known 766 to be [A, B]. */ 767 if (TREE_CODE (tmp_var) != SSA_NAME) 768 return false; 769 wide_int var_min, var_max; 770 value_range_kind vr_type = get_range_info (tmp_var, &var_min, 771 &var_max); 772 wide_int var_nonzero = get_nonzero_bits (tmp_var); 773 signop sgn = TYPE_SIGN (itype); 774 if (intersect_range_with_nonzero_bits (vr_type, &var_min, 775 &var_max, var_nonzero, 776 sgn) != VR_RANGE) 777 return false; 778 779 /* See whether the range of OP0 (i.e. TMP_VAR + TMP_OFF) 780 is known to be [A + TMP_OFF, B + TMP_OFF], with all 781 operations done in ITYPE. The addition must overflow 782 at both ends of the range or at neither. */ 783 wi::overflow_type overflow[2]; 784 unsigned int prec = TYPE_PRECISION (itype); 785 wide_int woff = wi::to_wide (tmp_off, prec); 786 wide_int op0_min = wi::add (var_min, woff, sgn, &overflow[0]); 787 wi::add (var_max, woff, sgn, &overflow[1]); 788 if ((overflow[0] != wi::OVF_NONE) != (overflow[1] != wi::OVF_NONE)) 789 return false; 790 791 /* Calculate (ssizetype) OP0 - (ssizetype) TMP_VAR. */ 792 widest_int diff = (widest_int::from (op0_min, sgn) 793 - widest_int::from (var_min, sgn)); 794 var0 = tmp_var; 795 *off = wide_int_to_tree (ssizetype, diff); 796 } 797 else 798 split_constant_offset (op0, &var0, off, cache, limit); 799 *var = fold_convert (type, var0); 800 return true; 801 } 802 return false; 803 } 804 805 default: 806 return false; 807 } 808 } 809 810 /* Expresses EXP as VAR + OFF, where off is a constant. The type of OFF 811 will be ssizetype. */ 812 813 static void 814 split_constant_offset (tree exp, tree *var, tree *off, 815 hash_map<tree, std::pair<tree, tree> > &cache, 816 unsigned *limit) 817 { 818 tree type = TREE_TYPE (exp), op0, op1, e, o; 819 enum tree_code code; 820 821 *var = exp; 822 *off = ssize_int (0); 823 824 if (tree_is_chrec (exp) 825 || get_gimple_rhs_class (TREE_CODE (exp)) == GIMPLE_TERNARY_RHS) 826 return; 827 828 code = TREE_CODE (exp); 829 extract_ops_from_tree (exp, &code, &op0, &op1); 830 if (split_constant_offset_1 (type, op0, code, op1, &e, &o, cache, limit)) 831 { 832 *var = e; 833 *off = o; 834 } 835 } 836 837 void 838 split_constant_offset (tree exp, tree *var, tree *off) 839 { 840 unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT); 841 static hash_map<tree, std::pair<tree, tree> > *cache; 842 if (!cache) 843 cache = new hash_map<tree, std::pair<tree, tree> > (37); 844 split_constant_offset (exp, var, off, *cache, &limit); 845 cache->empty (); 846 } 847 848 /* Returns the address ADDR of an object in a canonical shape (without nop 849 casts, and with type of pointer to the object). */ 850 851 static tree 852 canonicalize_base_object_address (tree addr) 853 { 854 tree orig = addr; 855 856 STRIP_NOPS (addr); 857 858 /* The base address may be obtained by casting from integer, in that case 859 keep the cast. */ 860 if (!POINTER_TYPE_P (TREE_TYPE (addr))) 861 return orig; 862 863 if (TREE_CODE (addr) != ADDR_EXPR) 864 return addr; 865 866 return build_fold_addr_expr (TREE_OPERAND (addr, 0)); 867 } 868 869 /* Analyze the behavior of memory reference REF within STMT. 870 There are two modes: 871 872 - BB analysis. In this case we simply split the address into base, 873 init and offset components, without reference to any containing loop. 874 The resulting base and offset are general expressions and they can 875 vary arbitrarily from one iteration of the containing loop to the next. 876 The step is always zero. 877 878 - loop analysis. In this case we analyze the reference both wrt LOOP 879 and on the basis that the reference occurs (is "used") in LOOP; 880 see the comment above analyze_scalar_evolution_in_loop for more 881 information about this distinction. The base, init, offset and 882 step fields are all invariant in LOOP. 883 884 Perform BB analysis if LOOP is null, or if LOOP is the function's 885 dummy outermost loop. In other cases perform loop analysis. 886 887 Return true if the analysis succeeded and store the results in DRB if so. 888 BB analysis can only fail for bitfield or reversed-storage accesses. */ 889 890 opt_result 891 dr_analyze_innermost (innermost_loop_behavior *drb, tree ref, 892 struct loop *loop, const gimple *stmt) 893 { 894 poly_int64 pbitsize, pbitpos; 895 tree base, poffset; 896 machine_mode pmode; 897 int punsignedp, preversep, pvolatilep; 898 affine_iv base_iv, offset_iv; 899 tree init, dinit, step; 900 bool in_loop = (loop && loop->num); 901 902 if (dump_file && (dump_flags & TDF_DETAILS)) 903 fprintf (dump_file, "analyze_innermost: "); 904 905 base = get_inner_reference (ref, &pbitsize, &pbitpos, &poffset, &pmode, 906 &punsignedp, &preversep, &pvolatilep); 907 gcc_assert (base != NULL_TREE); 908 909 poly_int64 pbytepos; 910 if (!multiple_p (pbitpos, BITS_PER_UNIT, &pbytepos)) 911 return opt_result::failure_at (stmt, 912 "failed: bit offset alignment.\n"); 913 914 if (preversep) 915 return opt_result::failure_at (stmt, 916 "failed: reverse storage order.\n"); 917 918 /* Calculate the alignment and misalignment for the inner reference. */ 919 unsigned int HOST_WIDE_INT bit_base_misalignment; 920 unsigned int bit_base_alignment; 921 get_object_alignment_1 (base, &bit_base_alignment, &bit_base_misalignment); 922 923 /* There are no bitfield references remaining in BASE, so the values 924 we got back must be whole bytes. */ 925 gcc_assert (bit_base_alignment % BITS_PER_UNIT == 0 926 && bit_base_misalignment % BITS_PER_UNIT == 0); 927 unsigned int base_alignment = bit_base_alignment / BITS_PER_UNIT; 928 poly_int64 base_misalignment = bit_base_misalignment / BITS_PER_UNIT; 929 930 if (TREE_CODE (base) == MEM_REF) 931 { 932 if (!integer_zerop (TREE_OPERAND (base, 1))) 933 { 934 /* Subtract MOFF from the base and add it to POFFSET instead. 935 Adjust the misalignment to reflect the amount we subtracted. */ 936 poly_offset_int moff = mem_ref_offset (base); 937 base_misalignment -= moff.force_shwi (); 938 tree mofft = wide_int_to_tree (sizetype, moff); 939 if (!poffset) 940 poffset = mofft; 941 else 942 poffset = size_binop (PLUS_EXPR, poffset, mofft); 943 } 944 base = TREE_OPERAND (base, 0); 945 } 946 else 947 base = build_fold_addr_expr (base); 948 949 if (in_loop) 950 { 951 if (!simple_iv (loop, loop, base, &base_iv, true)) 952 return opt_result::failure_at 953 (stmt, "failed: evolution of base is not affine.\n"); 954 } 955 else 956 { 957 base_iv.base = base; 958 base_iv.step = ssize_int (0); 959 base_iv.no_overflow = true; 960 } 961 962 if (!poffset) 963 { 964 offset_iv.base = ssize_int (0); 965 offset_iv.step = ssize_int (0); 966 } 967 else 968 { 969 if (!in_loop) 970 { 971 offset_iv.base = poffset; 972 offset_iv.step = ssize_int (0); 973 } 974 else if (!simple_iv (loop, loop, poffset, &offset_iv, true)) 975 return opt_result::failure_at 976 (stmt, "failed: evolution of offset is not affine.\n"); 977 } 978 979 init = ssize_int (pbytepos); 980 981 /* Subtract any constant component from the base and add it to INIT instead. 982 Adjust the misalignment to reflect the amount we subtracted. */ 983 split_constant_offset (base_iv.base, &base_iv.base, &dinit); 984 init = size_binop (PLUS_EXPR, init, dinit); 985 base_misalignment -= TREE_INT_CST_LOW (dinit); 986 987 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit); 988 init = size_binop (PLUS_EXPR, init, dinit); 989 990 step = size_binop (PLUS_EXPR, 991 fold_convert (ssizetype, base_iv.step), 992 fold_convert (ssizetype, offset_iv.step)); 993 994 base = canonicalize_base_object_address (base_iv.base); 995 996 /* See if get_pointer_alignment can guarantee a higher alignment than 997 the one we calculated above. */ 998 unsigned int HOST_WIDE_INT alt_misalignment; 999 unsigned int alt_alignment; 1000 get_pointer_alignment_1 (base, &alt_alignment, &alt_misalignment); 1001 1002 /* As above, these values must be whole bytes. */ 1003 gcc_assert (alt_alignment % BITS_PER_UNIT == 0 1004 && alt_misalignment % BITS_PER_UNIT == 0); 1005 alt_alignment /= BITS_PER_UNIT; 1006 alt_misalignment /= BITS_PER_UNIT; 1007 1008 if (base_alignment < alt_alignment) 1009 { 1010 base_alignment = alt_alignment; 1011 base_misalignment = alt_misalignment; 1012 } 1013 1014 drb->base_address = base; 1015 drb->offset = fold_convert (ssizetype, offset_iv.base); 1016 drb->init = init; 1017 drb->step = step; 1018 if (known_misalignment (base_misalignment, base_alignment, 1019 &drb->base_misalignment)) 1020 drb->base_alignment = base_alignment; 1021 else 1022 { 1023 drb->base_alignment = known_alignment (base_misalignment); 1024 drb->base_misalignment = 0; 1025 } 1026 drb->offset_alignment = highest_pow2_factor (offset_iv.base); 1027 drb->step_alignment = highest_pow2_factor (step); 1028 1029 if (dump_file && (dump_flags & TDF_DETAILS)) 1030 fprintf (dump_file, "success.\n"); 1031 1032 return opt_result::success (); 1033 } 1034 1035 /* Return true if OP is a valid component reference for a DR access 1036 function. This accepts a subset of what handled_component_p accepts. */ 1037 1038 static bool 1039 access_fn_component_p (tree op) 1040 { 1041 switch (TREE_CODE (op)) 1042 { 1043 case REALPART_EXPR: 1044 case IMAGPART_EXPR: 1045 case ARRAY_REF: 1046 return true; 1047 1048 case COMPONENT_REF: 1049 return TREE_CODE (TREE_TYPE (TREE_OPERAND (op, 0))) == RECORD_TYPE; 1050 1051 default: 1052 return false; 1053 } 1054 } 1055 1056 /* Determines the base object and the list of indices of memory reference 1057 DR, analyzed in LOOP and instantiated before NEST. */ 1058 1059 static void 1060 dr_analyze_indices (struct data_reference *dr, edge nest, loop_p loop) 1061 { 1062 vec<tree> access_fns = vNULL; 1063 tree ref, op; 1064 tree base, off, access_fn; 1065 1066 /* If analyzing a basic-block there are no indices to analyze 1067 and thus no access functions. */ 1068 if (!nest) 1069 { 1070 DR_BASE_OBJECT (dr) = DR_REF (dr); 1071 DR_ACCESS_FNS (dr).create (0); 1072 return; 1073 } 1074 1075 ref = DR_REF (dr); 1076 1077 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses 1078 into a two element array with a constant index. The base is 1079 then just the immediate underlying object. */ 1080 if (TREE_CODE (ref) == REALPART_EXPR) 1081 { 1082 ref = TREE_OPERAND (ref, 0); 1083 access_fns.safe_push (integer_zero_node); 1084 } 1085 else if (TREE_CODE (ref) == IMAGPART_EXPR) 1086 { 1087 ref = TREE_OPERAND (ref, 0); 1088 access_fns.safe_push (integer_one_node); 1089 } 1090 1091 /* Analyze access functions of dimensions we know to be independent. 1092 The list of component references handled here should be kept in 1093 sync with access_fn_component_p. */ 1094 while (handled_component_p (ref)) 1095 { 1096 if (TREE_CODE (ref) == ARRAY_REF) 1097 { 1098 op = TREE_OPERAND (ref, 1); 1099 access_fn = analyze_scalar_evolution (loop, op); 1100 access_fn = instantiate_scev (nest, loop, access_fn); 1101 access_fns.safe_push (access_fn); 1102 } 1103 else if (TREE_CODE (ref) == COMPONENT_REF 1104 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == RECORD_TYPE) 1105 { 1106 /* For COMPONENT_REFs of records (but not unions!) use the 1107 FIELD_DECL offset as constant access function so we can 1108 disambiguate a[i].f1 and a[i].f2. */ 1109 tree off = component_ref_field_offset (ref); 1110 off = size_binop (PLUS_EXPR, 1111 size_binop (MULT_EXPR, 1112 fold_convert (bitsizetype, off), 1113 bitsize_int (BITS_PER_UNIT)), 1114 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1))); 1115 access_fns.safe_push (off); 1116 } 1117 else 1118 /* If we have an unhandled component we could not translate 1119 to an access function stop analyzing. We have determined 1120 our base object in this case. */ 1121 break; 1122 1123 ref = TREE_OPERAND (ref, 0); 1124 } 1125 1126 /* If the address operand of a MEM_REF base has an evolution in the 1127 analyzed nest, add it as an additional independent access-function. */ 1128 if (TREE_CODE (ref) == MEM_REF) 1129 { 1130 op = TREE_OPERAND (ref, 0); 1131 access_fn = analyze_scalar_evolution (loop, op); 1132 access_fn = instantiate_scev (nest, loop, access_fn); 1133 if (TREE_CODE (access_fn) == POLYNOMIAL_CHREC) 1134 { 1135 tree orig_type; 1136 tree memoff = TREE_OPERAND (ref, 1); 1137 base = initial_condition (access_fn); 1138 orig_type = TREE_TYPE (base); 1139 STRIP_USELESS_TYPE_CONVERSION (base); 1140 split_constant_offset (base, &base, &off); 1141 STRIP_USELESS_TYPE_CONVERSION (base); 1142 /* Fold the MEM_REF offset into the evolutions initial 1143 value to make more bases comparable. */ 1144 if (!integer_zerop (memoff)) 1145 { 1146 off = size_binop (PLUS_EXPR, off, 1147 fold_convert (ssizetype, memoff)); 1148 memoff = build_int_cst (TREE_TYPE (memoff), 0); 1149 } 1150 /* Adjust the offset so it is a multiple of the access type 1151 size and thus we separate bases that can possibly be used 1152 to produce partial overlaps (which the access_fn machinery 1153 cannot handle). */ 1154 wide_int rem; 1155 if (TYPE_SIZE_UNIT (TREE_TYPE (ref)) 1156 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST 1157 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref)))) 1158 rem = wi::mod_trunc 1159 (wi::to_wide (off), 1160 wi::to_wide (TYPE_SIZE_UNIT (TREE_TYPE (ref))), 1161 SIGNED); 1162 else 1163 /* If we can't compute the remainder simply force the initial 1164 condition to zero. */ 1165 rem = wi::to_wide (off); 1166 off = wide_int_to_tree (ssizetype, wi::to_wide (off) - rem); 1167 memoff = wide_int_to_tree (TREE_TYPE (memoff), rem); 1168 /* And finally replace the initial condition. */ 1169 access_fn = chrec_replace_initial_condition 1170 (access_fn, fold_convert (orig_type, off)); 1171 /* ??? This is still not a suitable base object for 1172 dr_may_alias_p - the base object needs to be an 1173 access that covers the object as whole. With 1174 an evolution in the pointer this cannot be 1175 guaranteed. 1176 As a band-aid, mark the access so we can special-case 1177 it in dr_may_alias_p. */ 1178 tree old = ref; 1179 ref = fold_build2_loc (EXPR_LOCATION (ref), 1180 MEM_REF, TREE_TYPE (ref), 1181 base, memoff); 1182 MR_DEPENDENCE_CLIQUE (ref) = MR_DEPENDENCE_CLIQUE (old); 1183 MR_DEPENDENCE_BASE (ref) = MR_DEPENDENCE_BASE (old); 1184 DR_UNCONSTRAINED_BASE (dr) = true; 1185 access_fns.safe_push (access_fn); 1186 } 1187 } 1188 else if (DECL_P (ref)) 1189 { 1190 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */ 1191 ref = build2 (MEM_REF, TREE_TYPE (ref), 1192 build_fold_addr_expr (ref), 1193 build_int_cst (reference_alias_ptr_type (ref), 0)); 1194 } 1195 1196 DR_BASE_OBJECT (dr) = ref; 1197 DR_ACCESS_FNS (dr) = access_fns; 1198 } 1199 1200 /* Extracts the alias analysis information from the memory reference DR. */ 1201 1202 static void 1203 dr_analyze_alias (struct data_reference *dr) 1204 { 1205 tree ref = DR_REF (dr); 1206 tree base = get_base_address (ref), addr; 1207 1208 if (INDIRECT_REF_P (base) 1209 || TREE_CODE (base) == MEM_REF) 1210 { 1211 addr = TREE_OPERAND (base, 0); 1212 if (TREE_CODE (addr) == SSA_NAME) 1213 DR_PTR_INFO (dr) = SSA_NAME_PTR_INFO (addr); 1214 } 1215 } 1216 1217 /* Frees data reference DR. */ 1218 1219 void 1220 free_data_ref (data_reference_p dr) 1221 { 1222 DR_ACCESS_FNS (dr).release (); 1223 free (dr); 1224 } 1225 1226 /* Analyze memory reference MEMREF, which is accessed in STMT. 1227 The reference is a read if IS_READ is true, otherwise it is a write. 1228 IS_CONDITIONAL_IN_STMT indicates that the reference is conditional 1229 within STMT, i.e. that it might not occur even if STMT is executed 1230 and runs to completion. 1231 1232 Return the data_reference description of MEMREF. NEST is the outermost 1233 loop in which the reference should be instantiated, LOOP is the loop 1234 in which the data reference should be analyzed. */ 1235 1236 struct data_reference * 1237 create_data_ref (edge nest, loop_p loop, tree memref, gimple *stmt, 1238 bool is_read, bool is_conditional_in_stmt) 1239 { 1240 struct data_reference *dr; 1241 1242 if (dump_file && (dump_flags & TDF_DETAILS)) 1243 { 1244 fprintf (dump_file, "Creating dr for "); 1245 print_generic_expr (dump_file, memref, TDF_SLIM); 1246 fprintf (dump_file, "\n"); 1247 } 1248 1249 dr = XCNEW (struct data_reference); 1250 DR_STMT (dr) = stmt; 1251 DR_REF (dr) = memref; 1252 DR_IS_READ (dr) = is_read; 1253 DR_IS_CONDITIONAL_IN_STMT (dr) = is_conditional_in_stmt; 1254 1255 dr_analyze_innermost (&DR_INNERMOST (dr), memref, 1256 nest != NULL ? loop : NULL, stmt); 1257 dr_analyze_indices (dr, nest, loop); 1258 dr_analyze_alias (dr); 1259 1260 if (dump_file && (dump_flags & TDF_DETAILS)) 1261 { 1262 unsigned i; 1263 fprintf (dump_file, "\tbase_address: "); 1264 print_generic_expr (dump_file, DR_BASE_ADDRESS (dr), TDF_SLIM); 1265 fprintf (dump_file, "\n\toffset from base address: "); 1266 print_generic_expr (dump_file, DR_OFFSET (dr), TDF_SLIM); 1267 fprintf (dump_file, "\n\tconstant offset from base address: "); 1268 print_generic_expr (dump_file, DR_INIT (dr), TDF_SLIM); 1269 fprintf (dump_file, "\n\tstep: "); 1270 print_generic_expr (dump_file, DR_STEP (dr), TDF_SLIM); 1271 fprintf (dump_file, "\n\tbase alignment: %d", DR_BASE_ALIGNMENT (dr)); 1272 fprintf (dump_file, "\n\tbase misalignment: %d", 1273 DR_BASE_MISALIGNMENT (dr)); 1274 fprintf (dump_file, "\n\toffset alignment: %d", 1275 DR_OFFSET_ALIGNMENT (dr)); 1276 fprintf (dump_file, "\n\tstep alignment: %d", DR_STEP_ALIGNMENT (dr)); 1277 fprintf (dump_file, "\n\tbase_object: "); 1278 print_generic_expr (dump_file, DR_BASE_OBJECT (dr), TDF_SLIM); 1279 fprintf (dump_file, "\n"); 1280 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++) 1281 { 1282 fprintf (dump_file, "\tAccess function %d: ", i); 1283 print_generic_stmt (dump_file, DR_ACCESS_FN (dr, i), TDF_SLIM); 1284 } 1285 } 1286 1287 return dr; 1288 } 1289 1290 /* A helper function computes order between two tree epxressions T1 and T2. 1291 This is used in comparator functions sorting objects based on the order 1292 of tree expressions. The function returns -1, 0, or 1. */ 1293 1294 int 1295 data_ref_compare_tree (tree t1, tree t2) 1296 { 1297 int i, cmp; 1298 enum tree_code code; 1299 char tclass; 1300 1301 if (t1 == t2) 1302 return 0; 1303 if (t1 == NULL) 1304 return -1; 1305 if (t2 == NULL) 1306 return 1; 1307 1308 STRIP_USELESS_TYPE_CONVERSION (t1); 1309 STRIP_USELESS_TYPE_CONVERSION (t2); 1310 if (t1 == t2) 1311 return 0; 1312 1313 if (TREE_CODE (t1) != TREE_CODE (t2) 1314 && ! (CONVERT_EXPR_P (t1) && CONVERT_EXPR_P (t2))) 1315 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1; 1316 1317 code = TREE_CODE (t1); 1318 switch (code) 1319 { 1320 case INTEGER_CST: 1321 return tree_int_cst_compare (t1, t2); 1322 1323 case STRING_CST: 1324 if (TREE_STRING_LENGTH (t1) != TREE_STRING_LENGTH (t2)) 1325 return TREE_STRING_LENGTH (t1) < TREE_STRING_LENGTH (t2) ? -1 : 1; 1326 return memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2), 1327 TREE_STRING_LENGTH (t1)); 1328 1329 case SSA_NAME: 1330 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2)) 1331 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1; 1332 break; 1333 1334 default: 1335 if (POLY_INT_CST_P (t1)) 1336 return compare_sizes_for_sort (wi::to_poly_widest (t1), 1337 wi::to_poly_widest (t2)); 1338 1339 tclass = TREE_CODE_CLASS (code); 1340 1341 /* For decls, compare their UIDs. */ 1342 if (tclass == tcc_declaration) 1343 { 1344 if (DECL_UID (t1) != DECL_UID (t2)) 1345 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1; 1346 break; 1347 } 1348 /* For expressions, compare their operands recursively. */ 1349 else if (IS_EXPR_CODE_CLASS (tclass)) 1350 { 1351 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i) 1352 { 1353 cmp = data_ref_compare_tree (TREE_OPERAND (t1, i), 1354 TREE_OPERAND (t2, i)); 1355 if (cmp != 0) 1356 return cmp; 1357 } 1358 } 1359 else 1360 gcc_unreachable (); 1361 } 1362 1363 return 0; 1364 } 1365 1366 /* Return TRUE it's possible to resolve data dependence DDR by runtime alias 1367 check. */ 1368 1369 opt_result 1370 runtime_alias_check_p (ddr_p ddr, struct loop *loop, bool speed_p) 1371 { 1372 if (dump_enabled_p ()) 1373 dump_printf (MSG_NOTE, 1374 "consider run-time aliasing test between %T and %T\n", 1375 DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr))); 1376 1377 if (!speed_p) 1378 return opt_result::failure_at (DR_STMT (DDR_A (ddr)), 1379 "runtime alias check not supported when" 1380 " optimizing for size.\n"); 1381 1382 /* FORNOW: We don't support versioning with outer-loop in either 1383 vectorization or loop distribution. */ 1384 if (loop != NULL && loop->inner != NULL) 1385 return opt_result::failure_at (DR_STMT (DDR_A (ddr)), 1386 "runtime alias check not supported for" 1387 " outer loop.\n"); 1388 1389 return opt_result::success (); 1390 } 1391 1392 /* Operator == between two dr_with_seg_len objects. 1393 1394 This equality operator is used to make sure two data refs 1395 are the same one so that we will consider to combine the 1396 aliasing checks of those two pairs of data dependent data 1397 refs. */ 1398 1399 static bool 1400 operator == (const dr_with_seg_len& d1, 1401 const dr_with_seg_len& d2) 1402 { 1403 return (operand_equal_p (DR_BASE_ADDRESS (d1.dr), 1404 DR_BASE_ADDRESS (d2.dr), 0) 1405 && data_ref_compare_tree (DR_OFFSET (d1.dr), DR_OFFSET (d2.dr)) == 0 1406 && data_ref_compare_tree (DR_INIT (d1.dr), DR_INIT (d2.dr)) == 0 1407 && data_ref_compare_tree (d1.seg_len, d2.seg_len) == 0 1408 && known_eq (d1.access_size, d2.access_size) 1409 && d1.align == d2.align); 1410 } 1411 1412 /* Comparison function for sorting objects of dr_with_seg_len_pair_t 1413 so that we can combine aliasing checks in one scan. */ 1414 1415 static int 1416 comp_dr_with_seg_len_pair (const void *pa_, const void *pb_) 1417 { 1418 const dr_with_seg_len_pair_t* pa = (const dr_with_seg_len_pair_t *) pa_; 1419 const dr_with_seg_len_pair_t* pb = (const dr_with_seg_len_pair_t *) pb_; 1420 const dr_with_seg_len &a1 = pa->first, &a2 = pa->second; 1421 const dr_with_seg_len &b1 = pb->first, &b2 = pb->second; 1422 1423 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks 1424 if a and c have the same basic address snd step, and b and d have the same 1425 address and step. Therefore, if any a&c or b&d don't have the same address 1426 and step, we don't care the order of those two pairs after sorting. */ 1427 int comp_res; 1428 1429 if ((comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (a1.dr), 1430 DR_BASE_ADDRESS (b1.dr))) != 0) 1431 return comp_res; 1432 if ((comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (a2.dr), 1433 DR_BASE_ADDRESS (b2.dr))) != 0) 1434 return comp_res; 1435 if ((comp_res = data_ref_compare_tree (DR_STEP (a1.dr), 1436 DR_STEP (b1.dr))) != 0) 1437 return comp_res; 1438 if ((comp_res = data_ref_compare_tree (DR_STEP (a2.dr), 1439 DR_STEP (b2.dr))) != 0) 1440 return comp_res; 1441 if ((comp_res = data_ref_compare_tree (DR_OFFSET (a1.dr), 1442 DR_OFFSET (b1.dr))) != 0) 1443 return comp_res; 1444 if ((comp_res = data_ref_compare_tree (DR_INIT (a1.dr), 1445 DR_INIT (b1.dr))) != 0) 1446 return comp_res; 1447 if ((comp_res = data_ref_compare_tree (DR_OFFSET (a2.dr), 1448 DR_OFFSET (b2.dr))) != 0) 1449 return comp_res; 1450 if ((comp_res = data_ref_compare_tree (DR_INIT (a2.dr), 1451 DR_INIT (b2.dr))) != 0) 1452 return comp_res; 1453 1454 return 0; 1455 } 1456 1457 /* Merge alias checks recorded in ALIAS_PAIRS and remove redundant ones. 1458 FACTOR is number of iterations that each data reference is accessed. 1459 1460 Basically, for each pair of dependent data refs store_ptr_0 & load_ptr_0, 1461 we create an expression: 1462 1463 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0) 1464 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0)) 1465 1466 for aliasing checks. However, in some cases we can decrease the number 1467 of checks by combining two checks into one. For example, suppose we have 1468 another pair of data refs store_ptr_0 & load_ptr_1, and if the following 1469 condition is satisfied: 1470 1471 load_ptr_0 < load_ptr_1 && 1472 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0 1473 1474 (this condition means, in each iteration of vectorized loop, the accessed 1475 memory of store_ptr_0 cannot be between the memory of load_ptr_0 and 1476 load_ptr_1.) 1477 1478 we then can use only the following expression to finish the alising checks 1479 between store_ptr_0 & load_ptr_0 and store_ptr_0 & load_ptr_1: 1480 1481 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0) 1482 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0)) 1483 1484 Note that we only consider that load_ptr_0 and load_ptr_1 have the same 1485 basic address. */ 1486 1487 void 1488 prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs, 1489 poly_uint64) 1490 { 1491 /* Sort the collected data ref pairs so that we can scan them once to 1492 combine all possible aliasing checks. */ 1493 alias_pairs->qsort (comp_dr_with_seg_len_pair); 1494 1495 /* Scan the sorted dr pairs and check if we can combine alias checks 1496 of two neighboring dr pairs. */ 1497 for (size_t i = 1; i < alias_pairs->length (); ++i) 1498 { 1499 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */ 1500 dr_with_seg_len *dr_a1 = &(*alias_pairs)[i-1].first, 1501 *dr_b1 = &(*alias_pairs)[i-1].second, 1502 *dr_a2 = &(*alias_pairs)[i].first, 1503 *dr_b2 = &(*alias_pairs)[i].second; 1504 1505 /* Remove duplicate data ref pairs. */ 1506 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2) 1507 { 1508 if (dump_enabled_p ()) 1509 dump_printf (MSG_NOTE, "found equal ranges %T, %T and %T, %T\n", 1510 DR_REF (dr_a1->dr), DR_REF (dr_b1->dr), 1511 DR_REF (dr_a2->dr), DR_REF (dr_b2->dr)); 1512 alias_pairs->ordered_remove (i--); 1513 continue; 1514 } 1515 1516 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2) 1517 { 1518 /* We consider the case that DR_B1 and DR_B2 are same memrefs, 1519 and DR_A1 and DR_A2 are two consecutive memrefs. */ 1520 if (*dr_a1 == *dr_a2) 1521 { 1522 std::swap (dr_a1, dr_b1); 1523 std::swap (dr_a2, dr_b2); 1524 } 1525 1526 poly_int64 init_a1, init_a2; 1527 /* Only consider cases in which the distance between the initial 1528 DR_A1 and the initial DR_A2 is known at compile time. */ 1529 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr), 1530 DR_BASE_ADDRESS (dr_a2->dr), 0) 1531 || !operand_equal_p (DR_OFFSET (dr_a1->dr), 1532 DR_OFFSET (dr_a2->dr), 0) 1533 || !poly_int_tree_p (DR_INIT (dr_a1->dr), &init_a1) 1534 || !poly_int_tree_p (DR_INIT (dr_a2->dr), &init_a2)) 1535 continue; 1536 1537 /* Don't combine if we can't tell which one comes first. */ 1538 if (!ordered_p (init_a1, init_a2)) 1539 continue; 1540 1541 /* Make sure dr_a1 starts left of dr_a2. */ 1542 if (maybe_gt (init_a1, init_a2)) 1543 { 1544 std::swap (*dr_a1, *dr_a2); 1545 std::swap (init_a1, init_a2); 1546 } 1547 1548 /* Work out what the segment length would be if we did combine 1549 DR_A1 and DR_A2: 1550 1551 - If DR_A1 and DR_A2 have equal lengths, that length is 1552 also the combined length. 1553 1554 - If DR_A1 and DR_A2 both have negative "lengths", the combined 1555 length is the lower bound on those lengths. 1556 1557 - If DR_A1 and DR_A2 both have positive lengths, the combined 1558 length is the upper bound on those lengths. 1559 1560 Other cases are unlikely to give a useful combination. 1561 1562 The lengths both have sizetype, so the sign is taken from 1563 the step instead. */ 1564 if (!operand_equal_p (dr_a1->seg_len, dr_a2->seg_len, 0)) 1565 { 1566 poly_uint64 seg_len_a1, seg_len_a2; 1567 if (!poly_int_tree_p (dr_a1->seg_len, &seg_len_a1) 1568 || !poly_int_tree_p (dr_a2->seg_len, &seg_len_a2)) 1569 continue; 1570 1571 tree indicator_a = dr_direction_indicator (dr_a1->dr); 1572 if (TREE_CODE (indicator_a) != INTEGER_CST) 1573 continue; 1574 1575 tree indicator_b = dr_direction_indicator (dr_a2->dr); 1576 if (TREE_CODE (indicator_b) != INTEGER_CST) 1577 continue; 1578 1579 int sign_a = tree_int_cst_sgn (indicator_a); 1580 int sign_b = tree_int_cst_sgn (indicator_b); 1581 1582 poly_uint64 new_seg_len; 1583 if (sign_a <= 0 && sign_b <= 0) 1584 new_seg_len = lower_bound (seg_len_a1, seg_len_a2); 1585 else if (sign_a >= 0 && sign_b >= 0) 1586 new_seg_len = upper_bound (seg_len_a1, seg_len_a2); 1587 else 1588 continue; 1589 1590 dr_a1->seg_len = build_int_cst (TREE_TYPE (dr_a1->seg_len), 1591 new_seg_len); 1592 dr_a1->align = MIN (dr_a1->align, known_alignment (new_seg_len)); 1593 } 1594 1595 /* This is always positive due to the swap above. */ 1596 poly_uint64 diff = init_a2 - init_a1; 1597 1598 /* The new check will start at DR_A1. Make sure that its access 1599 size encompasses the initial DR_A2. */ 1600 if (maybe_lt (dr_a1->access_size, diff + dr_a2->access_size)) 1601 { 1602 dr_a1->access_size = upper_bound (dr_a1->access_size, 1603 diff + dr_a2->access_size); 1604 unsigned int new_align = known_alignment (dr_a1->access_size); 1605 dr_a1->align = MIN (dr_a1->align, new_align); 1606 } 1607 if (dump_enabled_p ()) 1608 dump_printf (MSG_NOTE, "merging ranges for %T, %T and %T, %T\n", 1609 DR_REF (dr_a1->dr), DR_REF (dr_b1->dr), 1610 DR_REF (dr_a2->dr), DR_REF (dr_b2->dr)); 1611 alias_pairs->ordered_remove (i); 1612 i--; 1613 } 1614 } 1615 } 1616 1617 /* Given LOOP's two data references and segment lengths described by DR_A 1618 and DR_B, create expression checking if the two addresses ranges intersect 1619 with each other based on index of the two addresses. This can only be 1620 done if DR_A and DR_B referring to the same (array) object and the index 1621 is the only difference. For example: 1622 1623 DR_A DR_B 1624 data-ref arr[i] arr[j] 1625 base_object arr arr 1626 index {i_0, +, 1}_loop {j_0, +, 1}_loop 1627 1628 The addresses and their index are like: 1629 1630 |<- ADDR_A ->| |<- ADDR_B ->| 1631 -------------------------------------------------------> 1632 | | | | | | | | | | 1633 -------------------------------------------------------> 1634 i_0 ... i_0+4 j_0 ... j_0+4 1635 1636 We can create expression based on index rather than address: 1637 1638 (i_0 + 4 < j_0 || j_0 + 4 < i_0) 1639 1640 Note evolution step of index needs to be considered in comparison. */ 1641 1642 static bool 1643 create_intersect_range_checks_index (struct loop *loop, tree *cond_expr, 1644 const dr_with_seg_len& dr_a, 1645 const dr_with_seg_len& dr_b) 1646 { 1647 if (integer_zerop (DR_STEP (dr_a.dr)) 1648 || integer_zerop (DR_STEP (dr_b.dr)) 1649 || DR_NUM_DIMENSIONS (dr_a.dr) != DR_NUM_DIMENSIONS (dr_b.dr)) 1650 return false; 1651 1652 poly_uint64 seg_len1, seg_len2; 1653 if (!poly_int_tree_p (dr_a.seg_len, &seg_len1) 1654 || !poly_int_tree_p (dr_b.seg_len, &seg_len2)) 1655 return false; 1656 1657 if (!tree_fits_shwi_p (DR_STEP (dr_a.dr))) 1658 return false; 1659 1660 if (!operand_equal_p (DR_BASE_OBJECT (dr_a.dr), DR_BASE_OBJECT (dr_b.dr), 0)) 1661 return false; 1662 1663 if (!operand_equal_p (DR_STEP (dr_a.dr), DR_STEP (dr_b.dr), 0)) 1664 return false; 1665 1666 gcc_assert (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST); 1667 1668 bool neg_step = tree_int_cst_compare (DR_STEP (dr_a.dr), size_zero_node) < 0; 1669 unsigned HOST_WIDE_INT abs_step = tree_to_shwi (DR_STEP (dr_a.dr)); 1670 if (neg_step) 1671 { 1672 abs_step = -abs_step; 1673 seg_len1 = -seg_len1; 1674 seg_len2 = -seg_len2; 1675 } 1676 else 1677 { 1678 /* Include the access size in the length, so that we only have one 1679 tree addition below. */ 1680 seg_len1 += dr_a.access_size; 1681 seg_len2 += dr_b.access_size; 1682 } 1683 1684 /* Infer the number of iterations with which the memory segment is accessed 1685 by DR. In other words, alias is checked if memory segment accessed by 1686 DR_A in some iterations intersect with memory segment accessed by DR_B 1687 in the same amount iterations. 1688 Note segnment length is a linear function of number of iterations with 1689 DR_STEP as the coefficient. */ 1690 poly_uint64 niter_len1, niter_len2; 1691 if (!can_div_trunc_p (seg_len1 + abs_step - 1, abs_step, &niter_len1) 1692 || !can_div_trunc_p (seg_len2 + abs_step - 1, abs_step, &niter_len2)) 1693 return false; 1694 1695 poly_uint64 niter_access1 = 0, niter_access2 = 0; 1696 if (neg_step) 1697 { 1698 /* Divide each access size by the byte step, rounding up. */ 1699 if (!can_div_trunc_p (dr_a.access_size - abs_step - 1, 1700 abs_step, &niter_access1) 1701 || !can_div_trunc_p (dr_b.access_size + abs_step - 1, 1702 abs_step, &niter_access2)) 1703 return false; 1704 } 1705 1706 unsigned int i; 1707 for (i = 0; i < DR_NUM_DIMENSIONS (dr_a.dr); i++) 1708 { 1709 tree access1 = DR_ACCESS_FN (dr_a.dr, i); 1710 tree access2 = DR_ACCESS_FN (dr_b.dr, i); 1711 /* Two indices must be the same if they are not scev, or not scev wrto 1712 current loop being vecorized. */ 1713 if (TREE_CODE (access1) != POLYNOMIAL_CHREC 1714 || TREE_CODE (access2) != POLYNOMIAL_CHREC 1715 || CHREC_VARIABLE (access1) != (unsigned)loop->num 1716 || CHREC_VARIABLE (access2) != (unsigned)loop->num) 1717 { 1718 if (operand_equal_p (access1, access2, 0)) 1719 continue; 1720 1721 return false; 1722 } 1723 /* The two indices must have the same step. */ 1724 if (!operand_equal_p (CHREC_RIGHT (access1), CHREC_RIGHT (access2), 0)) 1725 return false; 1726 1727 tree idx_step = CHREC_RIGHT (access1); 1728 /* Index must have const step, otherwise DR_STEP won't be constant. */ 1729 gcc_assert (TREE_CODE (idx_step) == INTEGER_CST); 1730 /* Index must evaluate in the same direction as DR. */ 1731 gcc_assert (!neg_step || tree_int_cst_sign_bit (idx_step) == 1); 1732 1733 tree min1 = CHREC_LEFT (access1); 1734 tree min2 = CHREC_LEFT (access2); 1735 if (!types_compatible_p (TREE_TYPE (min1), TREE_TYPE (min2))) 1736 return false; 1737 1738 /* Ideally, alias can be checked against loop's control IV, but we 1739 need to prove linear mapping between control IV and reference 1740 index. Although that should be true, we check against (array) 1741 index of data reference. Like segment length, index length is 1742 linear function of the number of iterations with index_step as 1743 the coefficient, i.e, niter_len * idx_step. */ 1744 tree idx_len1 = fold_build2 (MULT_EXPR, TREE_TYPE (min1), idx_step, 1745 build_int_cst (TREE_TYPE (min1), 1746 niter_len1)); 1747 tree idx_len2 = fold_build2 (MULT_EXPR, TREE_TYPE (min2), idx_step, 1748 build_int_cst (TREE_TYPE (min2), 1749 niter_len2)); 1750 tree max1 = fold_build2 (PLUS_EXPR, TREE_TYPE (min1), min1, idx_len1); 1751 tree max2 = fold_build2 (PLUS_EXPR, TREE_TYPE (min2), min2, idx_len2); 1752 /* Adjust ranges for negative step. */ 1753 if (neg_step) 1754 { 1755 /* IDX_LEN1 and IDX_LEN2 are negative in this case. */ 1756 std::swap (min1, max1); 1757 std::swap (min2, max2); 1758 1759 /* As with the lengths just calculated, we've measured the access 1760 sizes in iterations, so multiply them by the index step. */ 1761 tree idx_access1 1762 = fold_build2 (MULT_EXPR, TREE_TYPE (min1), idx_step, 1763 build_int_cst (TREE_TYPE (min1), niter_access1)); 1764 tree idx_access2 1765 = fold_build2 (MULT_EXPR, TREE_TYPE (min2), idx_step, 1766 build_int_cst (TREE_TYPE (min2), niter_access2)); 1767 1768 /* MINUS_EXPR because the above values are negative. */ 1769 max1 = fold_build2 (MINUS_EXPR, TREE_TYPE (max1), max1, idx_access1); 1770 max2 = fold_build2 (MINUS_EXPR, TREE_TYPE (max2), max2, idx_access2); 1771 } 1772 tree part_cond_expr 1773 = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, 1774 fold_build2 (LE_EXPR, boolean_type_node, max1, min2), 1775 fold_build2 (LE_EXPR, boolean_type_node, max2, min1)); 1776 if (*cond_expr) 1777 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, 1778 *cond_expr, part_cond_expr); 1779 else 1780 *cond_expr = part_cond_expr; 1781 } 1782 return true; 1783 } 1784 1785 /* If ALIGN is nonzero, set up *SEQ_MIN_OUT and *SEQ_MAX_OUT so that for 1786 every address ADDR accessed by D: 1787 1788 *SEQ_MIN_OUT <= ADDR (== ADDR & -ALIGN) <= *SEQ_MAX_OUT 1789 1790 In this case, every element accessed by D is aligned to at least 1791 ALIGN bytes. 1792 1793 If ALIGN is zero then instead set *SEG_MAX_OUT so that: 1794 1795 *SEQ_MIN_OUT <= ADDR < *SEQ_MAX_OUT. */ 1796 1797 static void 1798 get_segment_min_max (const dr_with_seg_len &d, tree *seg_min_out, 1799 tree *seg_max_out, HOST_WIDE_INT align) 1800 { 1801 /* Each access has the following pattern: 1802 1803 <- |seg_len| -> 1804 <--- A: -ve step ---> 1805 +-----+-------+-----+-------+-----+ 1806 | n-1 | ,.... | 0 | ..... | n-1 | 1807 +-----+-------+-----+-------+-----+ 1808 <--- B: +ve step ---> 1809 <- |seg_len| -> 1810 | 1811 base address 1812 1813 where "n" is the number of scalar iterations covered by the segment. 1814 (This should be VF for a particular pair if we know that both steps 1815 are the same, otherwise it will be the full number of scalar loop 1816 iterations.) 1817 1818 A is the range of bytes accessed when the step is negative, 1819 B is the range when the step is positive. 1820 1821 If the access size is "access_size" bytes, the lowest addressed byte is: 1822 1823 base + (step < 0 ? seg_len : 0) [LB] 1824 1825 and the highest addressed byte is always below: 1826 1827 base + (step < 0 ? 0 : seg_len) + access_size [UB] 1828 1829 Thus: 1830 1831 LB <= ADDR < UB 1832 1833 If ALIGN is nonzero, all three values are aligned to at least ALIGN 1834 bytes, so: 1835 1836 LB <= ADDR <= UB - ALIGN 1837 1838 where "- ALIGN" folds naturally with the "+ access_size" and often 1839 cancels it out. 1840 1841 We don't try to simplify LB and UB beyond this (e.g. by using 1842 MIN and MAX based on whether seg_len rather than the stride is 1843 negative) because it is possible for the absolute size of the 1844 segment to overflow the range of a ssize_t. 1845 1846 Keeping the pointer_plus outside of the cond_expr should allow 1847 the cond_exprs to be shared with other alias checks. */ 1848 tree indicator = dr_direction_indicator (d.dr); 1849 tree neg_step = fold_build2 (LT_EXPR, boolean_type_node, 1850 fold_convert (ssizetype, indicator), 1851 ssize_int (0)); 1852 tree addr_base = fold_build_pointer_plus (DR_BASE_ADDRESS (d.dr), 1853 DR_OFFSET (d.dr)); 1854 addr_base = fold_build_pointer_plus (addr_base, DR_INIT (d.dr)); 1855 tree seg_len 1856 = fold_convert (sizetype, rewrite_to_non_trapping_overflow (d.seg_len)); 1857 1858 tree min_reach = fold_build3 (COND_EXPR, sizetype, neg_step, 1859 seg_len, size_zero_node); 1860 tree max_reach = fold_build3 (COND_EXPR, sizetype, neg_step, 1861 size_zero_node, seg_len); 1862 max_reach = fold_build2 (PLUS_EXPR, sizetype, max_reach, 1863 size_int (d.access_size - align)); 1864 1865 *seg_min_out = fold_build_pointer_plus (addr_base, min_reach); 1866 *seg_max_out = fold_build_pointer_plus (addr_base, max_reach); 1867 } 1868 1869 /* Given two data references and segment lengths described by DR_A and DR_B, 1870 create expression checking if the two addresses ranges intersect with 1871 each other: 1872 1873 ((DR_A_addr_0 + DR_A_segment_length_0) <= DR_B_addr_0) 1874 || (DR_B_addr_0 + DER_B_segment_length_0) <= DR_A_addr_0)) */ 1875 1876 static void 1877 create_intersect_range_checks (struct loop *loop, tree *cond_expr, 1878 const dr_with_seg_len& dr_a, 1879 const dr_with_seg_len& dr_b) 1880 { 1881 *cond_expr = NULL_TREE; 1882 if (create_intersect_range_checks_index (loop, cond_expr, dr_a, dr_b)) 1883 return; 1884 1885 unsigned HOST_WIDE_INT min_align; 1886 tree_code cmp_code; 1887 if (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST 1888 && TREE_CODE (DR_STEP (dr_b.dr)) == INTEGER_CST) 1889 { 1890 /* In this case adding access_size to seg_len is likely to give 1891 a simple X * step, where X is either the number of scalar 1892 iterations or the vectorization factor. We're better off 1893 keeping that, rather than subtracting an alignment from it. 1894 1895 In this case the maximum values are exclusive and so there is 1896 no alias if the maximum of one segment equals the minimum 1897 of another. */ 1898 min_align = 0; 1899 cmp_code = LE_EXPR; 1900 } 1901 else 1902 { 1903 /* Calculate the minimum alignment shared by all four pointers, 1904 then arrange for this alignment to be subtracted from the 1905 exclusive maximum values to get inclusive maximum values. 1906 This "- min_align" is cumulative with a "+ access_size" 1907 in the calculation of the maximum values. In the best 1908 (and common) case, the two cancel each other out, leaving 1909 us with an inclusive bound based only on seg_len. In the 1910 worst case we're simply adding a smaller number than before. 1911 1912 Because the maximum values are inclusive, there is an alias 1913 if the maximum value of one segment is equal to the minimum 1914 value of the other. */ 1915 min_align = MIN (dr_a.align, dr_b.align); 1916 cmp_code = LT_EXPR; 1917 } 1918 1919 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max; 1920 get_segment_min_max (dr_a, &seg_a_min, &seg_a_max, min_align); 1921 get_segment_min_max (dr_b, &seg_b_min, &seg_b_max, min_align); 1922 1923 *cond_expr 1924 = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, 1925 fold_build2 (cmp_code, boolean_type_node, seg_a_max, seg_b_min), 1926 fold_build2 (cmp_code, boolean_type_node, seg_b_max, seg_a_min)); 1927 } 1928 1929 /* Create a conditional expression that represents the run-time checks for 1930 overlapping of address ranges represented by a list of data references 1931 pairs passed in ALIAS_PAIRS. Data references are in LOOP. The returned 1932 COND_EXPR is the conditional expression to be used in the if statement 1933 that controls which version of the loop gets executed at runtime. */ 1934 1935 void 1936 create_runtime_alias_checks (struct loop *loop, 1937 vec<dr_with_seg_len_pair_t> *alias_pairs, 1938 tree * cond_expr) 1939 { 1940 tree part_cond_expr; 1941 1942 fold_defer_overflow_warnings (); 1943 for (size_t i = 0, s = alias_pairs->length (); i < s; ++i) 1944 { 1945 const dr_with_seg_len& dr_a = (*alias_pairs)[i].first; 1946 const dr_with_seg_len& dr_b = (*alias_pairs)[i].second; 1947 1948 if (dump_enabled_p ()) 1949 dump_printf (MSG_NOTE, 1950 "create runtime check for data references %T and %T\n", 1951 DR_REF (dr_a.dr), DR_REF (dr_b.dr)); 1952 1953 /* Create condition expression for each pair data references. */ 1954 create_intersect_range_checks (loop, &part_cond_expr, dr_a, dr_b); 1955 if (*cond_expr) 1956 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, 1957 *cond_expr, part_cond_expr); 1958 else 1959 *cond_expr = part_cond_expr; 1960 } 1961 fold_undefer_and_ignore_overflow_warnings (); 1962 } 1963 1964 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical 1965 expressions. */ 1966 static bool 1967 dr_equal_offsets_p1 (tree offset1, tree offset2) 1968 { 1969 bool res; 1970 1971 STRIP_NOPS (offset1); 1972 STRIP_NOPS (offset2); 1973 1974 if (offset1 == offset2) 1975 return true; 1976 1977 if (TREE_CODE (offset1) != TREE_CODE (offset2) 1978 || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1))) 1979 return false; 1980 1981 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 0), 1982 TREE_OPERAND (offset2, 0)); 1983 1984 if (!res || !BINARY_CLASS_P (offset1)) 1985 return res; 1986 1987 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 1), 1988 TREE_OPERAND (offset2, 1)); 1989 1990 return res; 1991 } 1992 1993 /* Check if DRA and DRB have equal offsets. */ 1994 bool 1995 dr_equal_offsets_p (struct data_reference *dra, 1996 struct data_reference *drb) 1997 { 1998 tree offset1, offset2; 1999 2000 offset1 = DR_OFFSET (dra); 2001 offset2 = DR_OFFSET (drb); 2002 2003 return dr_equal_offsets_p1 (offset1, offset2); 2004 } 2005 2006 /* Returns true if FNA == FNB. */ 2007 2008 static bool 2009 affine_function_equal_p (affine_fn fna, affine_fn fnb) 2010 { 2011 unsigned i, n = fna.length (); 2012 2013 if (n != fnb.length ()) 2014 return false; 2015 2016 for (i = 0; i < n; i++) 2017 if (!operand_equal_p (fna[i], fnb[i], 0)) 2018 return false; 2019 2020 return true; 2021 } 2022 2023 /* If all the functions in CF are the same, returns one of them, 2024 otherwise returns NULL. */ 2025 2026 static affine_fn 2027 common_affine_function (conflict_function *cf) 2028 { 2029 unsigned i; 2030 affine_fn comm; 2031 2032 if (!CF_NONTRIVIAL_P (cf)) 2033 return affine_fn (); 2034 2035 comm = cf->fns[0]; 2036 2037 for (i = 1; i < cf->n; i++) 2038 if (!affine_function_equal_p (comm, cf->fns[i])) 2039 return affine_fn (); 2040 2041 return comm; 2042 } 2043 2044 /* Returns the base of the affine function FN. */ 2045 2046 static tree 2047 affine_function_base (affine_fn fn) 2048 { 2049 return fn[0]; 2050 } 2051 2052 /* Returns true if FN is a constant. */ 2053 2054 static bool 2055 affine_function_constant_p (affine_fn fn) 2056 { 2057 unsigned i; 2058 tree coef; 2059 2060 for (i = 1; fn.iterate (i, &coef); i++) 2061 if (!integer_zerop (coef)) 2062 return false; 2063 2064 return true; 2065 } 2066 2067 /* Returns true if FN is the zero constant function. */ 2068 2069 static bool 2070 affine_function_zero_p (affine_fn fn) 2071 { 2072 return (integer_zerop (affine_function_base (fn)) 2073 && affine_function_constant_p (fn)); 2074 } 2075 2076 /* Returns a signed integer type with the largest precision from TA 2077 and TB. */ 2078 2079 static tree 2080 signed_type_for_types (tree ta, tree tb) 2081 { 2082 if (TYPE_PRECISION (ta) > TYPE_PRECISION (tb)) 2083 return signed_type_for (ta); 2084 else 2085 return signed_type_for (tb); 2086 } 2087 2088 /* Applies operation OP on affine functions FNA and FNB, and returns the 2089 result. */ 2090 2091 static affine_fn 2092 affine_fn_op (enum tree_code op, affine_fn fna, affine_fn fnb) 2093 { 2094 unsigned i, n, m; 2095 affine_fn ret; 2096 tree coef; 2097 2098 if (fnb.length () > fna.length ()) 2099 { 2100 n = fna.length (); 2101 m = fnb.length (); 2102 } 2103 else 2104 { 2105 n = fnb.length (); 2106 m = fna.length (); 2107 } 2108 2109 ret.create (m); 2110 for (i = 0; i < n; i++) 2111 { 2112 tree type = signed_type_for_types (TREE_TYPE (fna[i]), 2113 TREE_TYPE (fnb[i])); 2114 ret.quick_push (fold_build2 (op, type, fna[i], fnb[i])); 2115 } 2116 2117 for (; fna.iterate (i, &coef); i++) 2118 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)), 2119 coef, integer_zero_node)); 2120 for (; fnb.iterate (i, &coef); i++) 2121 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)), 2122 integer_zero_node, coef)); 2123 2124 return ret; 2125 } 2126 2127 /* Returns the sum of affine functions FNA and FNB. */ 2128 2129 static affine_fn 2130 affine_fn_plus (affine_fn fna, affine_fn fnb) 2131 { 2132 return affine_fn_op (PLUS_EXPR, fna, fnb); 2133 } 2134 2135 /* Returns the difference of affine functions FNA and FNB. */ 2136 2137 static affine_fn 2138 affine_fn_minus (affine_fn fna, affine_fn fnb) 2139 { 2140 return affine_fn_op (MINUS_EXPR, fna, fnb); 2141 } 2142 2143 /* Frees affine function FN. */ 2144 2145 static void 2146 affine_fn_free (affine_fn fn) 2147 { 2148 fn.release (); 2149 } 2150 2151 /* Determine for each subscript in the data dependence relation DDR 2152 the distance. */ 2153 2154 static void 2155 compute_subscript_distance (struct data_dependence_relation *ddr) 2156 { 2157 conflict_function *cf_a, *cf_b; 2158 affine_fn fn_a, fn_b, diff; 2159 2160 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE) 2161 { 2162 unsigned int i; 2163 2164 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 2165 { 2166 struct subscript *subscript; 2167 2168 subscript = DDR_SUBSCRIPT (ddr, i); 2169 cf_a = SUB_CONFLICTS_IN_A (subscript); 2170 cf_b = SUB_CONFLICTS_IN_B (subscript); 2171 2172 fn_a = common_affine_function (cf_a); 2173 fn_b = common_affine_function (cf_b); 2174 if (!fn_a.exists () || !fn_b.exists ()) 2175 { 2176 SUB_DISTANCE (subscript) = chrec_dont_know; 2177 return; 2178 } 2179 diff = affine_fn_minus (fn_a, fn_b); 2180 2181 if (affine_function_constant_p (diff)) 2182 SUB_DISTANCE (subscript) = affine_function_base (diff); 2183 else 2184 SUB_DISTANCE (subscript) = chrec_dont_know; 2185 2186 affine_fn_free (diff); 2187 } 2188 } 2189 } 2190 2191 /* Returns the conflict function for "unknown". */ 2192 2193 static conflict_function * 2194 conflict_fn_not_known (void) 2195 { 2196 conflict_function *fn = XCNEW (conflict_function); 2197 fn->n = NOT_KNOWN; 2198 2199 return fn; 2200 } 2201 2202 /* Returns the conflict function for "independent". */ 2203 2204 static conflict_function * 2205 conflict_fn_no_dependence (void) 2206 { 2207 conflict_function *fn = XCNEW (conflict_function); 2208 fn->n = NO_DEPENDENCE; 2209 2210 return fn; 2211 } 2212 2213 /* Returns true if the address of OBJ is invariant in LOOP. */ 2214 2215 static bool 2216 object_address_invariant_in_loop_p (const struct loop *loop, const_tree obj) 2217 { 2218 while (handled_component_p (obj)) 2219 { 2220 if (TREE_CODE (obj) == ARRAY_REF) 2221 { 2222 for (int i = 1; i < 4; ++i) 2223 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, i), 2224 loop->num)) 2225 return false; 2226 } 2227 else if (TREE_CODE (obj) == COMPONENT_REF) 2228 { 2229 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2), 2230 loop->num)) 2231 return false; 2232 } 2233 obj = TREE_OPERAND (obj, 0); 2234 } 2235 2236 if (!INDIRECT_REF_P (obj) 2237 && TREE_CODE (obj) != MEM_REF) 2238 return true; 2239 2240 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 0), 2241 loop->num); 2242 } 2243 2244 /* Returns false if we can prove that data references A and B do not alias, 2245 true otherwise. If LOOP_NEST is false no cross-iteration aliases are 2246 considered. */ 2247 2248 bool 2249 dr_may_alias_p (const struct data_reference *a, const struct data_reference *b, 2250 struct loop *loop_nest) 2251 { 2252 tree addr_a = DR_BASE_OBJECT (a); 2253 tree addr_b = DR_BASE_OBJECT (b); 2254 2255 /* If we are not processing a loop nest but scalar code we 2256 do not need to care about possible cross-iteration dependences 2257 and thus can process the full original reference. Do so, 2258 similar to how loop invariant motion applies extra offset-based 2259 disambiguation. */ 2260 if (!loop_nest) 2261 { 2262 aff_tree off1, off2; 2263 poly_widest_int size1, size2; 2264 get_inner_reference_aff (DR_REF (a), &off1, &size1); 2265 get_inner_reference_aff (DR_REF (b), &off2, &size2); 2266 aff_combination_scale (&off1, -1); 2267 aff_combination_add (&off2, &off1); 2268 if (aff_comb_cannot_overlap_p (&off2, size1, size2)) 2269 return false; 2270 } 2271 2272 if ((TREE_CODE (addr_a) == MEM_REF || TREE_CODE (addr_a) == TARGET_MEM_REF) 2273 && (TREE_CODE (addr_b) == MEM_REF || TREE_CODE (addr_b) == TARGET_MEM_REF) 2274 /* For cross-iteration dependences the cliques must be valid for the 2275 whole loop, not just individual iterations. */ 2276 && (!loop_nest 2277 || MR_DEPENDENCE_CLIQUE (addr_a) == 1 2278 || MR_DEPENDENCE_CLIQUE (addr_a) == loop_nest->owned_clique) 2279 && MR_DEPENDENCE_CLIQUE (addr_a) == MR_DEPENDENCE_CLIQUE (addr_b) 2280 && MR_DEPENDENCE_BASE (addr_a) != MR_DEPENDENCE_BASE (addr_b)) 2281 return false; 2282 2283 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we 2284 do not know the size of the base-object. So we cannot do any 2285 offset/overlap based analysis but have to rely on points-to 2286 information only. */ 2287 if (TREE_CODE (addr_a) == MEM_REF 2288 && (DR_UNCONSTRAINED_BASE (a) 2289 || TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME)) 2290 { 2291 /* For true dependences we can apply TBAA. */ 2292 if (flag_strict_aliasing 2293 && DR_IS_WRITE (a) && DR_IS_READ (b) 2294 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)), 2295 get_alias_set (DR_REF (b)))) 2296 return false; 2297 if (TREE_CODE (addr_b) == MEM_REF) 2298 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0), 2299 TREE_OPERAND (addr_b, 0)); 2300 else 2301 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0), 2302 build_fold_addr_expr (addr_b)); 2303 } 2304 else if (TREE_CODE (addr_b) == MEM_REF 2305 && (DR_UNCONSTRAINED_BASE (b) 2306 || TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME)) 2307 { 2308 /* For true dependences we can apply TBAA. */ 2309 if (flag_strict_aliasing 2310 && DR_IS_WRITE (a) && DR_IS_READ (b) 2311 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)), 2312 get_alias_set (DR_REF (b)))) 2313 return false; 2314 if (TREE_CODE (addr_a) == MEM_REF) 2315 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0), 2316 TREE_OPERAND (addr_b, 0)); 2317 else 2318 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a), 2319 TREE_OPERAND (addr_b, 0)); 2320 } 2321 2322 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object 2323 that is being subsetted in the loop nest. */ 2324 if (DR_IS_WRITE (a) && DR_IS_WRITE (b)) 2325 return refs_output_dependent_p (addr_a, addr_b); 2326 else if (DR_IS_READ (a) && DR_IS_WRITE (b)) 2327 return refs_anti_dependent_p (addr_a, addr_b); 2328 return refs_may_alias_p (addr_a, addr_b); 2329 } 2330 2331 /* REF_A and REF_B both satisfy access_fn_component_p. Return true 2332 if it is meaningful to compare their associated access functions 2333 when checking for dependencies. */ 2334 2335 static bool 2336 access_fn_components_comparable_p (tree ref_a, tree ref_b) 2337 { 2338 /* Allow pairs of component refs from the following sets: 2339 2340 { REALPART_EXPR, IMAGPART_EXPR } 2341 { COMPONENT_REF } 2342 { ARRAY_REF }. */ 2343 tree_code code_a = TREE_CODE (ref_a); 2344 tree_code code_b = TREE_CODE (ref_b); 2345 if (code_a == IMAGPART_EXPR) 2346 code_a = REALPART_EXPR; 2347 if (code_b == IMAGPART_EXPR) 2348 code_b = REALPART_EXPR; 2349 if (code_a != code_b) 2350 return false; 2351 2352 if (TREE_CODE (ref_a) == COMPONENT_REF) 2353 /* ??? We cannot simply use the type of operand #0 of the refs here as 2354 the Fortran compiler smuggles type punning into COMPONENT_REFs. 2355 Use the DECL_CONTEXT of the FIELD_DECLs instead. */ 2356 return (DECL_CONTEXT (TREE_OPERAND (ref_a, 1)) 2357 == DECL_CONTEXT (TREE_OPERAND (ref_b, 1))); 2358 2359 return types_compatible_p (TREE_TYPE (TREE_OPERAND (ref_a, 0)), 2360 TREE_TYPE (TREE_OPERAND (ref_b, 0))); 2361 } 2362 2363 /* Initialize a data dependence relation between data accesses A and 2364 B. NB_LOOPS is the number of loops surrounding the references: the 2365 size of the classic distance/direction vectors. */ 2366 2367 struct data_dependence_relation * 2368 initialize_data_dependence_relation (struct data_reference *a, 2369 struct data_reference *b, 2370 vec<loop_p> loop_nest) 2371 { 2372 struct data_dependence_relation *res; 2373 unsigned int i; 2374 2375 res = XCNEW (struct data_dependence_relation); 2376 DDR_A (res) = a; 2377 DDR_B (res) = b; 2378 DDR_LOOP_NEST (res).create (0); 2379 DDR_SUBSCRIPTS (res).create (0); 2380 DDR_DIR_VECTS (res).create (0); 2381 DDR_DIST_VECTS (res).create (0); 2382 2383 if (a == NULL || b == NULL) 2384 { 2385 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 2386 return res; 2387 } 2388 2389 /* If the data references do not alias, then they are independent. */ 2390 if (!dr_may_alias_p (a, b, loop_nest.exists () ? loop_nest[0] : NULL)) 2391 { 2392 DDR_ARE_DEPENDENT (res) = chrec_known; 2393 return res; 2394 } 2395 2396 unsigned int num_dimensions_a = DR_NUM_DIMENSIONS (a); 2397 unsigned int num_dimensions_b = DR_NUM_DIMENSIONS (b); 2398 if (num_dimensions_a == 0 || num_dimensions_b == 0) 2399 { 2400 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 2401 return res; 2402 } 2403 2404 /* For unconstrained bases, the root (highest-indexed) subscript 2405 describes a variation in the base of the original DR_REF rather 2406 than a component access. We have no type that accurately describes 2407 the new DR_BASE_OBJECT (whose TREE_TYPE describes the type *after* 2408 applying this subscript) so limit the search to the last real 2409 component access. 2410 2411 E.g. for: 2412 2413 void 2414 f (int a[][8], int b[][8]) 2415 { 2416 for (int i = 0; i < 8; ++i) 2417 a[i * 2][0] = b[i][0]; 2418 } 2419 2420 the a and b accesses have a single ARRAY_REF component reference [0] 2421 but have two subscripts. */ 2422 if (DR_UNCONSTRAINED_BASE (a)) 2423 num_dimensions_a -= 1; 2424 if (DR_UNCONSTRAINED_BASE (b)) 2425 num_dimensions_b -= 1; 2426 2427 /* These structures describe sequences of component references in 2428 DR_REF (A) and DR_REF (B). Each component reference is tied to a 2429 specific access function. */ 2430 struct { 2431 /* The sequence starts at DR_ACCESS_FN (A, START_A) of A and 2432 DR_ACCESS_FN (B, START_B) of B (inclusive) and extends to higher 2433 indices. In C notation, these are the indices of the rightmost 2434 component references; e.g. for a sequence .b.c.d, the start 2435 index is for .d. */ 2436 unsigned int start_a; 2437 unsigned int start_b; 2438 2439 /* The sequence contains LENGTH consecutive access functions from 2440 each DR. */ 2441 unsigned int length; 2442 2443 /* The enclosing objects for the A and B sequences respectively, 2444 i.e. the objects to which DR_ACCESS_FN (A, START_A + LENGTH - 1) 2445 and DR_ACCESS_FN (B, START_B + LENGTH - 1) are applied. */ 2446 tree object_a; 2447 tree object_b; 2448 } full_seq = {}, struct_seq = {}; 2449 2450 /* Before each iteration of the loop: 2451 2452 - REF_A is what you get after applying DR_ACCESS_FN (A, INDEX_A) and 2453 - REF_B is what you get after applying DR_ACCESS_FN (B, INDEX_B). */ 2454 unsigned int index_a = 0; 2455 unsigned int index_b = 0; 2456 tree ref_a = DR_REF (a); 2457 tree ref_b = DR_REF (b); 2458 2459 /* Now walk the component references from the final DR_REFs back up to 2460 the enclosing base objects. Each component reference corresponds 2461 to one access function in the DR, with access function 0 being for 2462 the final DR_REF and the highest-indexed access function being the 2463 one that is applied to the base of the DR. 2464 2465 Look for a sequence of component references whose access functions 2466 are comparable (see access_fn_components_comparable_p). If more 2467 than one such sequence exists, pick the one nearest the base 2468 (which is the leftmost sequence in C notation). Store this sequence 2469 in FULL_SEQ. 2470 2471 For example, if we have: 2472 2473 struct foo { struct bar s; ... } (*a)[10], (*b)[10]; 2474 2475 A: a[0][i].s.c.d 2476 B: __real b[0][i].s.e[i].f 2477 2478 (where d is the same type as the real component of f) then the access 2479 functions would be: 2480 2481 0 1 2 3 2482 A: .d .c .s [i] 2483 2484 0 1 2 3 4 5 2485 B: __real .f [i] .e .s [i] 2486 2487 The A0/B2 column isn't comparable, since .d is a COMPONENT_REF 2488 and [i] is an ARRAY_REF. However, the A1/B3 column contains two 2489 COMPONENT_REF accesses for struct bar, so is comparable. Likewise 2490 the A2/B4 column contains two COMPONENT_REF accesses for struct foo, 2491 so is comparable. The A3/B5 column contains two ARRAY_REFs that 2492 index foo[10] arrays, so is again comparable. The sequence is 2493 therefore: 2494 2495 A: [1, 3] (i.e. [i].s.c) 2496 B: [3, 5] (i.e. [i].s.e) 2497 2498 Also look for sequences of component references whose access 2499 functions are comparable and whose enclosing objects have the same 2500 RECORD_TYPE. Store this sequence in STRUCT_SEQ. In the above 2501 example, STRUCT_SEQ would be: 2502 2503 A: [1, 2] (i.e. s.c) 2504 B: [3, 4] (i.e. s.e) */ 2505 while (index_a < num_dimensions_a && index_b < num_dimensions_b) 2506 { 2507 /* REF_A and REF_B must be one of the component access types 2508 allowed by dr_analyze_indices. */ 2509 gcc_checking_assert (access_fn_component_p (ref_a)); 2510 gcc_checking_assert (access_fn_component_p (ref_b)); 2511 2512 /* Get the immediately-enclosing objects for REF_A and REF_B, 2513 i.e. the references *before* applying DR_ACCESS_FN (A, INDEX_A) 2514 and DR_ACCESS_FN (B, INDEX_B). */ 2515 tree object_a = TREE_OPERAND (ref_a, 0); 2516 tree object_b = TREE_OPERAND (ref_b, 0); 2517 2518 tree type_a = TREE_TYPE (object_a); 2519 tree type_b = TREE_TYPE (object_b); 2520 if (access_fn_components_comparable_p (ref_a, ref_b)) 2521 { 2522 /* This pair of component accesses is comparable for dependence 2523 analysis, so we can include DR_ACCESS_FN (A, INDEX_A) and 2524 DR_ACCESS_FN (B, INDEX_B) in the sequence. */ 2525 if (full_seq.start_a + full_seq.length != index_a 2526 || full_seq.start_b + full_seq.length != index_b) 2527 { 2528 /* The accesses don't extend the current sequence, 2529 so start a new one here. */ 2530 full_seq.start_a = index_a; 2531 full_seq.start_b = index_b; 2532 full_seq.length = 0; 2533 } 2534 2535 /* Add this pair of references to the sequence. */ 2536 full_seq.length += 1; 2537 full_seq.object_a = object_a; 2538 full_seq.object_b = object_b; 2539 2540 /* If the enclosing objects are structures (and thus have the 2541 same RECORD_TYPE), record the new sequence in STRUCT_SEQ. */ 2542 if (TREE_CODE (type_a) == RECORD_TYPE) 2543 struct_seq = full_seq; 2544 2545 /* Move to the next containing reference for both A and B. */ 2546 ref_a = object_a; 2547 ref_b = object_b; 2548 index_a += 1; 2549 index_b += 1; 2550 continue; 2551 } 2552 2553 /* Try to approach equal type sizes. */ 2554 if (!COMPLETE_TYPE_P (type_a) 2555 || !COMPLETE_TYPE_P (type_b) 2556 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_a)) 2557 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_b))) 2558 break; 2559 2560 unsigned HOST_WIDE_INT size_a = tree_to_uhwi (TYPE_SIZE_UNIT (type_a)); 2561 unsigned HOST_WIDE_INT size_b = tree_to_uhwi (TYPE_SIZE_UNIT (type_b)); 2562 if (size_a <= size_b) 2563 { 2564 index_a += 1; 2565 ref_a = object_a; 2566 } 2567 if (size_b <= size_a) 2568 { 2569 index_b += 1; 2570 ref_b = object_b; 2571 } 2572 } 2573 2574 /* See whether FULL_SEQ ends at the base and whether the two bases 2575 are equal. We do not care about TBAA or alignment info so we can 2576 use OEP_ADDRESS_OF to avoid false negatives. */ 2577 tree base_a = DR_BASE_OBJECT (a); 2578 tree base_b = DR_BASE_OBJECT (b); 2579 bool same_base_p = (full_seq.start_a + full_seq.length == num_dimensions_a 2580 && full_seq.start_b + full_seq.length == num_dimensions_b 2581 && DR_UNCONSTRAINED_BASE (a) == DR_UNCONSTRAINED_BASE (b) 2582 && operand_equal_p (base_a, base_b, OEP_ADDRESS_OF) 2583 && types_compatible_p (TREE_TYPE (base_a), 2584 TREE_TYPE (base_b)) 2585 && (!loop_nest.exists () 2586 || (object_address_invariant_in_loop_p 2587 (loop_nest[0], base_a)))); 2588 2589 /* If the bases are the same, we can include the base variation too. 2590 E.g. the b accesses in: 2591 2592 for (int i = 0; i < n; ++i) 2593 b[i + 4][0] = b[i][0]; 2594 2595 have a definite dependence distance of 4, while for: 2596 2597 for (int i = 0; i < n; ++i) 2598 a[i + 4][0] = b[i][0]; 2599 2600 the dependence distance depends on the gap between a and b. 2601 2602 If the bases are different then we can only rely on the sequence 2603 rooted at a structure access, since arrays are allowed to overlap 2604 arbitrarily and change shape arbitrarily. E.g. we treat this as 2605 valid code: 2606 2607 int a[256]; 2608 ... 2609 ((int (*)[4][3]) &a[1])[i][0] += ((int (*)[4][3]) &a[2])[i][0]; 2610 2611 where two lvalues with the same int[4][3] type overlap, and where 2612 both lvalues are distinct from the object's declared type. */ 2613 if (same_base_p) 2614 { 2615 if (DR_UNCONSTRAINED_BASE (a)) 2616 full_seq.length += 1; 2617 } 2618 else 2619 full_seq = struct_seq; 2620 2621 /* Punt if we didn't find a suitable sequence. */ 2622 if (full_seq.length == 0) 2623 { 2624 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 2625 return res; 2626 } 2627 2628 if (!same_base_p) 2629 { 2630 /* Partial overlap is possible for different bases when strict aliasing 2631 is not in effect. It's also possible if either base involves a union 2632 access; e.g. for: 2633 2634 struct s1 { int a[2]; }; 2635 struct s2 { struct s1 b; int c; }; 2636 struct s3 { int d; struct s1 e; }; 2637 union u { struct s2 f; struct s3 g; } *p, *q; 2638 2639 the s1 at "p->f.b" (base "p->f") partially overlaps the s1 at 2640 "p->g.e" (base "p->g") and might partially overlap the s1 at 2641 "q->g.e" (base "q->g"). */ 2642 if (!flag_strict_aliasing 2643 || ref_contains_union_access_p (full_seq.object_a) 2644 || ref_contains_union_access_p (full_seq.object_b)) 2645 { 2646 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 2647 return res; 2648 } 2649 2650 DDR_COULD_BE_INDEPENDENT_P (res) = true; 2651 if (!loop_nest.exists () 2652 || (object_address_invariant_in_loop_p (loop_nest[0], 2653 full_seq.object_a) 2654 && object_address_invariant_in_loop_p (loop_nest[0], 2655 full_seq.object_b))) 2656 { 2657 DDR_OBJECT_A (res) = full_seq.object_a; 2658 DDR_OBJECT_B (res) = full_seq.object_b; 2659 } 2660 } 2661 2662 DDR_AFFINE_P (res) = true; 2663 DDR_ARE_DEPENDENT (res) = NULL_TREE; 2664 DDR_SUBSCRIPTS (res).create (full_seq.length); 2665 DDR_LOOP_NEST (res) = loop_nest; 2666 DDR_INNER_LOOP (res) = 0; 2667 DDR_SELF_REFERENCE (res) = false; 2668 2669 for (i = 0; i < full_seq.length; ++i) 2670 { 2671 struct subscript *subscript; 2672 2673 subscript = XNEW (struct subscript); 2674 SUB_ACCESS_FN (subscript, 0) = DR_ACCESS_FN (a, full_seq.start_a + i); 2675 SUB_ACCESS_FN (subscript, 1) = DR_ACCESS_FN (b, full_seq.start_b + i); 2676 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known (); 2677 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known (); 2678 SUB_LAST_CONFLICT (subscript) = chrec_dont_know; 2679 SUB_DISTANCE (subscript) = chrec_dont_know; 2680 DDR_SUBSCRIPTS (res).safe_push (subscript); 2681 } 2682 2683 return res; 2684 } 2685 2686 /* Frees memory used by the conflict function F. */ 2687 2688 static void 2689 free_conflict_function (conflict_function *f) 2690 { 2691 unsigned i; 2692 2693 if (CF_NONTRIVIAL_P (f)) 2694 { 2695 for (i = 0; i < f->n; i++) 2696 affine_fn_free (f->fns[i]); 2697 } 2698 free (f); 2699 } 2700 2701 /* Frees memory used by SUBSCRIPTS. */ 2702 2703 static void 2704 free_subscripts (vec<subscript_p> subscripts) 2705 { 2706 unsigned i; 2707 subscript_p s; 2708 2709 FOR_EACH_VEC_ELT (subscripts, i, s) 2710 { 2711 free_conflict_function (s->conflicting_iterations_in_a); 2712 free_conflict_function (s->conflicting_iterations_in_b); 2713 free (s); 2714 } 2715 subscripts.release (); 2716 } 2717 2718 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap 2719 description. */ 2720 2721 static inline void 2722 finalize_ddr_dependent (struct data_dependence_relation *ddr, 2723 tree chrec) 2724 { 2725 DDR_ARE_DEPENDENT (ddr) = chrec; 2726 free_subscripts (DDR_SUBSCRIPTS (ddr)); 2727 DDR_SUBSCRIPTS (ddr).create (0); 2728 } 2729 2730 /* The dependence relation DDR cannot be represented by a distance 2731 vector. */ 2732 2733 static inline void 2734 non_affine_dependence_relation (struct data_dependence_relation *ddr) 2735 { 2736 if (dump_file && (dump_flags & TDF_DETAILS)) 2737 fprintf (dump_file, "(Dependence relation cannot be represented by distance vector.) \n"); 2738 2739 DDR_AFFINE_P (ddr) = false; 2740 } 2741 2742 2743 2744 /* This section contains the classic Banerjee tests. */ 2745 2746 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index 2747 variables, i.e., if the ZIV (Zero Index Variable) test is true. */ 2748 2749 static inline bool 2750 ziv_subscript_p (const_tree chrec_a, const_tree chrec_b) 2751 { 2752 return (evolution_function_is_constant_p (chrec_a) 2753 && evolution_function_is_constant_p (chrec_b)); 2754 } 2755 2756 /* Returns true iff CHREC_A and CHREC_B are dependent on an index 2757 variable, i.e., if the SIV (Single Index Variable) test is true. */ 2758 2759 static bool 2760 siv_subscript_p (const_tree chrec_a, const_tree chrec_b) 2761 { 2762 if ((evolution_function_is_constant_p (chrec_a) 2763 && evolution_function_is_univariate_p (chrec_b)) 2764 || (evolution_function_is_constant_p (chrec_b) 2765 && evolution_function_is_univariate_p (chrec_a))) 2766 return true; 2767 2768 if (evolution_function_is_univariate_p (chrec_a) 2769 && evolution_function_is_univariate_p (chrec_b)) 2770 { 2771 switch (TREE_CODE (chrec_a)) 2772 { 2773 case POLYNOMIAL_CHREC: 2774 switch (TREE_CODE (chrec_b)) 2775 { 2776 case POLYNOMIAL_CHREC: 2777 if (CHREC_VARIABLE (chrec_a) != CHREC_VARIABLE (chrec_b)) 2778 return false; 2779 /* FALLTHRU */ 2780 2781 default: 2782 return true; 2783 } 2784 2785 default: 2786 return true; 2787 } 2788 } 2789 2790 return false; 2791 } 2792 2793 /* Creates a conflict function with N dimensions. The affine functions 2794 in each dimension follow. */ 2795 2796 static conflict_function * 2797 conflict_fn (unsigned n, ...) 2798 { 2799 unsigned i; 2800 conflict_function *ret = XCNEW (conflict_function); 2801 va_list ap; 2802 2803 gcc_assert (n > 0 && n <= MAX_DIM); 2804 va_start (ap, n); 2805 2806 ret->n = n; 2807 for (i = 0; i < n; i++) 2808 ret->fns[i] = va_arg (ap, affine_fn); 2809 va_end (ap); 2810 2811 return ret; 2812 } 2813 2814 /* Returns constant affine function with value CST. */ 2815 2816 static affine_fn 2817 affine_fn_cst (tree cst) 2818 { 2819 affine_fn fn; 2820 fn.create (1); 2821 fn.quick_push (cst); 2822 return fn; 2823 } 2824 2825 /* Returns affine function with single variable, CST + COEF * x_DIM. */ 2826 2827 static affine_fn 2828 affine_fn_univar (tree cst, unsigned dim, tree coef) 2829 { 2830 affine_fn fn; 2831 fn.create (dim + 1); 2832 unsigned i; 2833 2834 gcc_assert (dim > 0); 2835 fn.quick_push (cst); 2836 for (i = 1; i < dim; i++) 2837 fn.quick_push (integer_zero_node); 2838 fn.quick_push (coef); 2839 return fn; 2840 } 2841 2842 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and 2843 *OVERLAPS_B are initialized to the functions that describe the 2844 relation between the elements accessed twice by CHREC_A and 2845 CHREC_B. For k >= 0, the following property is verified: 2846 2847 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */ 2848 2849 static void 2850 analyze_ziv_subscript (tree chrec_a, 2851 tree chrec_b, 2852 conflict_function **overlaps_a, 2853 conflict_function **overlaps_b, 2854 tree *last_conflicts) 2855 { 2856 tree type, difference; 2857 dependence_stats.num_ziv++; 2858 2859 if (dump_file && (dump_flags & TDF_DETAILS)) 2860 fprintf (dump_file, "(analyze_ziv_subscript \n"); 2861 2862 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b)); 2863 chrec_a = chrec_convert (type, chrec_a, NULL); 2864 chrec_b = chrec_convert (type, chrec_b, NULL); 2865 difference = chrec_fold_minus (type, chrec_a, chrec_b); 2866 2867 switch (TREE_CODE (difference)) 2868 { 2869 case INTEGER_CST: 2870 if (integer_zerop (difference)) 2871 { 2872 /* The difference is equal to zero: the accessed index 2873 overlaps for each iteration in the loop. */ 2874 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2875 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2876 *last_conflicts = chrec_dont_know; 2877 dependence_stats.num_ziv_dependent++; 2878 } 2879 else 2880 { 2881 /* The accesses do not overlap. */ 2882 *overlaps_a = conflict_fn_no_dependence (); 2883 *overlaps_b = conflict_fn_no_dependence (); 2884 *last_conflicts = integer_zero_node; 2885 dependence_stats.num_ziv_independent++; 2886 } 2887 break; 2888 2889 default: 2890 /* We're not sure whether the indexes overlap. For the moment, 2891 conservatively answer "don't know". */ 2892 if (dump_file && (dump_flags & TDF_DETAILS)) 2893 fprintf (dump_file, "ziv test failed: difference is non-integer.\n"); 2894 2895 *overlaps_a = conflict_fn_not_known (); 2896 *overlaps_b = conflict_fn_not_known (); 2897 *last_conflicts = chrec_dont_know; 2898 dependence_stats.num_ziv_unimplemented++; 2899 break; 2900 } 2901 2902 if (dump_file && (dump_flags & TDF_DETAILS)) 2903 fprintf (dump_file, ")\n"); 2904 } 2905 2906 /* Similar to max_stmt_executions_int, but returns the bound as a tree, 2907 and only if it fits to the int type. If this is not the case, or the 2908 bound on the number of iterations of LOOP could not be derived, returns 2909 chrec_dont_know. */ 2910 2911 static tree 2912 max_stmt_executions_tree (struct loop *loop) 2913 { 2914 widest_int nit; 2915 2916 if (!max_stmt_executions (loop, &nit)) 2917 return chrec_dont_know; 2918 2919 if (!wi::fits_to_tree_p (nit, unsigned_type_node)) 2920 return chrec_dont_know; 2921 2922 return wide_int_to_tree (unsigned_type_node, nit); 2923 } 2924 2925 /* Determine whether the CHREC is always positive/negative. If the expression 2926 cannot be statically analyzed, return false, otherwise set the answer into 2927 VALUE. */ 2928 2929 static bool 2930 chrec_is_positive (tree chrec, bool *value) 2931 { 2932 bool value0, value1, value2; 2933 tree end_value, nb_iter; 2934 2935 switch (TREE_CODE (chrec)) 2936 { 2937 case POLYNOMIAL_CHREC: 2938 if (!chrec_is_positive (CHREC_LEFT (chrec), &value0) 2939 || !chrec_is_positive (CHREC_RIGHT (chrec), &value1)) 2940 return false; 2941 2942 /* FIXME -- overflows. */ 2943 if (value0 == value1) 2944 { 2945 *value = value0; 2946 return true; 2947 } 2948 2949 /* Otherwise the chrec is under the form: "{-197, +, 2}_1", 2950 and the proof consists in showing that the sign never 2951 changes during the execution of the loop, from 0 to 2952 loop->nb_iterations. */ 2953 if (!evolution_function_is_affine_p (chrec)) 2954 return false; 2955 2956 nb_iter = number_of_latch_executions (get_chrec_loop (chrec)); 2957 if (chrec_contains_undetermined (nb_iter)) 2958 return false; 2959 2960 #if 0 2961 /* TODO -- If the test is after the exit, we may decrease the number of 2962 iterations by one. */ 2963 if (after_exit) 2964 nb_iter = chrec_fold_minus (type, nb_iter, build_int_cst (type, 1)); 2965 #endif 2966 2967 end_value = chrec_apply (CHREC_VARIABLE (chrec), chrec, nb_iter); 2968 2969 if (!chrec_is_positive (end_value, &value2)) 2970 return false; 2971 2972 *value = value0; 2973 return value0 == value1; 2974 2975 case INTEGER_CST: 2976 switch (tree_int_cst_sgn (chrec)) 2977 { 2978 case -1: 2979 *value = false; 2980 break; 2981 case 1: 2982 *value = true; 2983 break; 2984 default: 2985 return false; 2986 } 2987 return true; 2988 2989 default: 2990 return false; 2991 } 2992 } 2993 2994 2995 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a 2996 constant, and CHREC_B is an affine function. *OVERLAPS_A and 2997 *OVERLAPS_B are initialized to the functions that describe the 2998 relation between the elements accessed twice by CHREC_A and 2999 CHREC_B. For k >= 0, the following property is verified: 3000 3001 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */ 3002 3003 static void 3004 analyze_siv_subscript_cst_affine (tree chrec_a, 3005 tree chrec_b, 3006 conflict_function **overlaps_a, 3007 conflict_function **overlaps_b, 3008 tree *last_conflicts) 3009 { 3010 bool value0, value1, value2; 3011 tree type, difference, tmp; 3012 3013 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b)); 3014 chrec_a = chrec_convert (type, chrec_a, NULL); 3015 chrec_b = chrec_convert (type, chrec_b, NULL); 3016 difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a); 3017 3018 /* Special case overlap in the first iteration. */ 3019 if (integer_zerop (difference)) 3020 { 3021 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3022 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3023 *last_conflicts = integer_one_node; 3024 return; 3025 } 3026 3027 if (!chrec_is_positive (initial_condition (difference), &value0)) 3028 { 3029 if (dump_file && (dump_flags & TDF_DETAILS)) 3030 fprintf (dump_file, "siv test failed: chrec is not positive.\n"); 3031 3032 dependence_stats.num_siv_unimplemented++; 3033 *overlaps_a = conflict_fn_not_known (); 3034 *overlaps_b = conflict_fn_not_known (); 3035 *last_conflicts = chrec_dont_know; 3036 return; 3037 } 3038 else 3039 { 3040 if (value0 == false) 3041 { 3042 if (TREE_CODE (chrec_b) != POLYNOMIAL_CHREC 3043 || !chrec_is_positive (CHREC_RIGHT (chrec_b), &value1)) 3044 { 3045 if (dump_file && (dump_flags & TDF_DETAILS)) 3046 fprintf (dump_file, "siv test failed: chrec not positive.\n"); 3047 3048 *overlaps_a = conflict_fn_not_known (); 3049 *overlaps_b = conflict_fn_not_known (); 3050 *last_conflicts = chrec_dont_know; 3051 dependence_stats.num_siv_unimplemented++; 3052 return; 3053 } 3054 else 3055 { 3056 if (value1 == true) 3057 { 3058 /* Example: 3059 chrec_a = 12 3060 chrec_b = {10, +, 1} 3061 */ 3062 3063 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference)) 3064 { 3065 HOST_WIDE_INT numiter; 3066 struct loop *loop = get_chrec_loop (chrec_b); 3067 3068 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3069 tmp = fold_build2 (EXACT_DIV_EXPR, type, 3070 fold_build1 (ABS_EXPR, type, difference), 3071 CHREC_RIGHT (chrec_b)); 3072 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp)); 3073 *last_conflicts = integer_one_node; 3074 3075 3076 /* Perform weak-zero siv test to see if overlap is 3077 outside the loop bounds. */ 3078 numiter = max_stmt_executions_int (loop); 3079 3080 if (numiter >= 0 3081 && compare_tree_int (tmp, numiter) > 0) 3082 { 3083 free_conflict_function (*overlaps_a); 3084 free_conflict_function (*overlaps_b); 3085 *overlaps_a = conflict_fn_no_dependence (); 3086 *overlaps_b = conflict_fn_no_dependence (); 3087 *last_conflicts = integer_zero_node; 3088 dependence_stats.num_siv_independent++; 3089 return; 3090 } 3091 dependence_stats.num_siv_dependent++; 3092 return; 3093 } 3094 3095 /* When the step does not divide the difference, there are 3096 no overlaps. */ 3097 else 3098 { 3099 *overlaps_a = conflict_fn_no_dependence (); 3100 *overlaps_b = conflict_fn_no_dependence (); 3101 *last_conflicts = integer_zero_node; 3102 dependence_stats.num_siv_independent++; 3103 return; 3104 } 3105 } 3106 3107 else 3108 { 3109 /* Example: 3110 chrec_a = 12 3111 chrec_b = {10, +, -1} 3112 3113 In this case, chrec_a will not overlap with chrec_b. */ 3114 *overlaps_a = conflict_fn_no_dependence (); 3115 *overlaps_b = conflict_fn_no_dependence (); 3116 *last_conflicts = integer_zero_node; 3117 dependence_stats.num_siv_independent++; 3118 return; 3119 } 3120 } 3121 } 3122 else 3123 { 3124 if (TREE_CODE (chrec_b) != POLYNOMIAL_CHREC 3125 || !chrec_is_positive (CHREC_RIGHT (chrec_b), &value2)) 3126 { 3127 if (dump_file && (dump_flags & TDF_DETAILS)) 3128 fprintf (dump_file, "siv test failed: chrec not positive.\n"); 3129 3130 *overlaps_a = conflict_fn_not_known (); 3131 *overlaps_b = conflict_fn_not_known (); 3132 *last_conflicts = chrec_dont_know; 3133 dependence_stats.num_siv_unimplemented++; 3134 return; 3135 } 3136 else 3137 { 3138 if (value2 == false) 3139 { 3140 /* Example: 3141 chrec_a = 3 3142 chrec_b = {10, +, -1} 3143 */ 3144 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference)) 3145 { 3146 HOST_WIDE_INT numiter; 3147 struct loop *loop = get_chrec_loop (chrec_b); 3148 3149 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3150 tmp = fold_build2 (EXACT_DIV_EXPR, type, difference, 3151 CHREC_RIGHT (chrec_b)); 3152 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp)); 3153 *last_conflicts = integer_one_node; 3154 3155 /* Perform weak-zero siv test to see if overlap is 3156 outside the loop bounds. */ 3157 numiter = max_stmt_executions_int (loop); 3158 3159 if (numiter >= 0 3160 && compare_tree_int (tmp, numiter) > 0) 3161 { 3162 free_conflict_function (*overlaps_a); 3163 free_conflict_function (*overlaps_b); 3164 *overlaps_a = conflict_fn_no_dependence (); 3165 *overlaps_b = conflict_fn_no_dependence (); 3166 *last_conflicts = integer_zero_node; 3167 dependence_stats.num_siv_independent++; 3168 return; 3169 } 3170 dependence_stats.num_siv_dependent++; 3171 return; 3172 } 3173 3174 /* When the step does not divide the difference, there 3175 are no overlaps. */ 3176 else 3177 { 3178 *overlaps_a = conflict_fn_no_dependence (); 3179 *overlaps_b = conflict_fn_no_dependence (); 3180 *last_conflicts = integer_zero_node; 3181 dependence_stats.num_siv_independent++; 3182 return; 3183 } 3184 } 3185 else 3186 { 3187 /* Example: 3188 chrec_a = 3 3189 chrec_b = {4, +, 1} 3190 3191 In this case, chrec_a will not overlap with chrec_b. */ 3192 *overlaps_a = conflict_fn_no_dependence (); 3193 *overlaps_b = conflict_fn_no_dependence (); 3194 *last_conflicts = integer_zero_node; 3195 dependence_stats.num_siv_independent++; 3196 return; 3197 } 3198 } 3199 } 3200 } 3201 } 3202 3203 /* Helper recursive function for initializing the matrix A. Returns 3204 the initial value of CHREC. */ 3205 3206 static tree 3207 initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult) 3208 { 3209 gcc_assert (chrec); 3210 3211 switch (TREE_CODE (chrec)) 3212 { 3213 case POLYNOMIAL_CHREC: 3214 if (!cst_and_fits_in_hwi (CHREC_RIGHT (chrec))) 3215 return chrec_dont_know; 3216 A[index][0] = mult * int_cst_value (CHREC_RIGHT (chrec)); 3217 return initialize_matrix_A (A, CHREC_LEFT (chrec), index + 1, mult); 3218 3219 case PLUS_EXPR: 3220 case MULT_EXPR: 3221 case MINUS_EXPR: 3222 { 3223 tree op0 = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult); 3224 tree op1 = initialize_matrix_A (A, TREE_OPERAND (chrec, 1), index, mult); 3225 3226 return chrec_fold_op (TREE_CODE (chrec), chrec_type (chrec), op0, op1); 3227 } 3228 3229 CASE_CONVERT: 3230 { 3231 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult); 3232 return chrec_convert (chrec_type (chrec), op, NULL); 3233 } 3234 3235 case BIT_NOT_EXPR: 3236 { 3237 /* Handle ~X as -1 - X. */ 3238 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult); 3239 return chrec_fold_op (MINUS_EXPR, chrec_type (chrec), 3240 build_int_cst (TREE_TYPE (chrec), -1), op); 3241 } 3242 3243 case INTEGER_CST: 3244 return chrec; 3245 3246 default: 3247 gcc_unreachable (); 3248 return NULL_TREE; 3249 } 3250 } 3251 3252 #define FLOOR_DIV(x,y) ((x) / (y)) 3253 3254 /* Solves the special case of the Diophantine equation: 3255 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B) 3256 3257 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the 3258 number of iterations that loops X and Y run. The overlaps will be 3259 constructed as evolutions in dimension DIM. */ 3260 3261 static void 3262 compute_overlap_steps_for_affine_univar (HOST_WIDE_INT niter, 3263 HOST_WIDE_INT step_a, 3264 HOST_WIDE_INT step_b, 3265 affine_fn *overlaps_a, 3266 affine_fn *overlaps_b, 3267 tree *last_conflicts, int dim) 3268 { 3269 if (((step_a > 0 && step_b > 0) 3270 || (step_a < 0 && step_b < 0))) 3271 { 3272 HOST_WIDE_INT step_overlaps_a, step_overlaps_b; 3273 HOST_WIDE_INT gcd_steps_a_b, last_conflict, tau2; 3274 3275 gcd_steps_a_b = gcd (step_a, step_b); 3276 step_overlaps_a = step_b / gcd_steps_a_b; 3277 step_overlaps_b = step_a / gcd_steps_a_b; 3278 3279 if (niter > 0) 3280 { 3281 tau2 = FLOOR_DIV (niter, step_overlaps_a); 3282 tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b)); 3283 last_conflict = tau2; 3284 *last_conflicts = build_int_cst (NULL_TREE, last_conflict); 3285 } 3286 else 3287 *last_conflicts = chrec_dont_know; 3288 3289 *overlaps_a = affine_fn_univar (integer_zero_node, dim, 3290 build_int_cst (NULL_TREE, 3291 step_overlaps_a)); 3292 *overlaps_b = affine_fn_univar (integer_zero_node, dim, 3293 build_int_cst (NULL_TREE, 3294 step_overlaps_b)); 3295 } 3296 3297 else 3298 { 3299 *overlaps_a = affine_fn_cst (integer_zero_node); 3300 *overlaps_b = affine_fn_cst (integer_zero_node); 3301 *last_conflicts = integer_zero_node; 3302 } 3303 } 3304 3305 /* Solves the special case of a Diophantine equation where CHREC_A is 3306 an affine bivariate function, and CHREC_B is an affine univariate 3307 function. For example, 3308 3309 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z 3310 3311 has the following overlapping functions: 3312 3313 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v 3314 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v 3315 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v 3316 3317 FORNOW: This is a specialized implementation for a case occurring in 3318 a common benchmark. Implement the general algorithm. */ 3319 3320 static void 3321 compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b, 3322 conflict_function **overlaps_a, 3323 conflict_function **overlaps_b, 3324 tree *last_conflicts) 3325 { 3326 bool xz_p, yz_p, xyz_p; 3327 HOST_WIDE_INT step_x, step_y, step_z; 3328 HOST_WIDE_INT niter_x, niter_y, niter_z, niter; 3329 affine_fn overlaps_a_xz, overlaps_b_xz; 3330 affine_fn overlaps_a_yz, overlaps_b_yz; 3331 affine_fn overlaps_a_xyz, overlaps_b_xyz; 3332 affine_fn ova1, ova2, ovb; 3333 tree last_conflicts_xz, last_conflicts_yz, last_conflicts_xyz; 3334 3335 step_x = int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a))); 3336 step_y = int_cst_value (CHREC_RIGHT (chrec_a)); 3337 step_z = int_cst_value (CHREC_RIGHT (chrec_b)); 3338 3339 niter_x = max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a))); 3340 niter_y = max_stmt_executions_int (get_chrec_loop (chrec_a)); 3341 niter_z = max_stmt_executions_int (get_chrec_loop (chrec_b)); 3342 3343 if (niter_x < 0 || niter_y < 0 || niter_z < 0) 3344 { 3345 if (dump_file && (dump_flags & TDF_DETAILS)) 3346 fprintf (dump_file, "overlap steps test failed: no iteration counts.\n"); 3347 3348 *overlaps_a = conflict_fn_not_known (); 3349 *overlaps_b = conflict_fn_not_known (); 3350 *last_conflicts = chrec_dont_know; 3351 return; 3352 } 3353 3354 niter = MIN (niter_x, niter_z); 3355 compute_overlap_steps_for_affine_univar (niter, step_x, step_z, 3356 &overlaps_a_xz, 3357 &overlaps_b_xz, 3358 &last_conflicts_xz, 1); 3359 niter = MIN (niter_y, niter_z); 3360 compute_overlap_steps_for_affine_univar (niter, step_y, step_z, 3361 &overlaps_a_yz, 3362 &overlaps_b_yz, 3363 &last_conflicts_yz, 2); 3364 niter = MIN (niter_x, niter_z); 3365 niter = MIN (niter_y, niter); 3366 compute_overlap_steps_for_affine_univar (niter, step_x + step_y, step_z, 3367 &overlaps_a_xyz, 3368 &overlaps_b_xyz, 3369 &last_conflicts_xyz, 3); 3370 3371 xz_p = !integer_zerop (last_conflicts_xz); 3372 yz_p = !integer_zerop (last_conflicts_yz); 3373 xyz_p = !integer_zerop (last_conflicts_xyz); 3374 3375 if (xz_p || yz_p || xyz_p) 3376 { 3377 ova1 = affine_fn_cst (integer_zero_node); 3378 ova2 = affine_fn_cst (integer_zero_node); 3379 ovb = affine_fn_cst (integer_zero_node); 3380 if (xz_p) 3381 { 3382 affine_fn t0 = ova1; 3383 affine_fn t2 = ovb; 3384 3385 ova1 = affine_fn_plus (ova1, overlaps_a_xz); 3386 ovb = affine_fn_plus (ovb, overlaps_b_xz); 3387 affine_fn_free (t0); 3388 affine_fn_free (t2); 3389 *last_conflicts = last_conflicts_xz; 3390 } 3391 if (yz_p) 3392 { 3393 affine_fn t0 = ova2; 3394 affine_fn t2 = ovb; 3395 3396 ova2 = affine_fn_plus (ova2, overlaps_a_yz); 3397 ovb = affine_fn_plus (ovb, overlaps_b_yz); 3398 affine_fn_free (t0); 3399 affine_fn_free (t2); 3400 *last_conflicts = last_conflicts_yz; 3401 } 3402 if (xyz_p) 3403 { 3404 affine_fn t0 = ova1; 3405 affine_fn t2 = ova2; 3406 affine_fn t4 = ovb; 3407 3408 ova1 = affine_fn_plus (ova1, overlaps_a_xyz); 3409 ova2 = affine_fn_plus (ova2, overlaps_a_xyz); 3410 ovb = affine_fn_plus (ovb, overlaps_b_xyz); 3411 affine_fn_free (t0); 3412 affine_fn_free (t2); 3413 affine_fn_free (t4); 3414 *last_conflicts = last_conflicts_xyz; 3415 } 3416 *overlaps_a = conflict_fn (2, ova1, ova2); 3417 *overlaps_b = conflict_fn (1, ovb); 3418 } 3419 else 3420 { 3421 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3422 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3423 *last_conflicts = integer_zero_node; 3424 } 3425 3426 affine_fn_free (overlaps_a_xz); 3427 affine_fn_free (overlaps_b_xz); 3428 affine_fn_free (overlaps_a_yz); 3429 affine_fn_free (overlaps_b_yz); 3430 affine_fn_free (overlaps_a_xyz); 3431 affine_fn_free (overlaps_b_xyz); 3432 } 3433 3434 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */ 3435 3436 static void 3437 lambda_vector_copy (lambda_vector vec1, lambda_vector vec2, 3438 int size) 3439 { 3440 memcpy (vec2, vec1, size * sizeof (*vec1)); 3441 } 3442 3443 /* Copy the elements of M x N matrix MAT1 to MAT2. */ 3444 3445 static void 3446 lambda_matrix_copy (lambda_matrix mat1, lambda_matrix mat2, 3447 int m, int n) 3448 { 3449 int i; 3450 3451 for (i = 0; i < m; i++) 3452 lambda_vector_copy (mat1[i], mat2[i], n); 3453 } 3454 3455 /* Store the N x N identity matrix in MAT. */ 3456 3457 static void 3458 lambda_matrix_id (lambda_matrix mat, int size) 3459 { 3460 int i, j; 3461 3462 for (i = 0; i < size; i++) 3463 for (j = 0; j < size; j++) 3464 mat[i][j] = (i == j) ? 1 : 0; 3465 } 3466 3467 /* Return the index of the first nonzero element of vector VEC1 between 3468 START and N. We must have START <= N. 3469 Returns N if VEC1 is the zero vector. */ 3470 3471 static int 3472 lambda_vector_first_nz (lambda_vector vec1, int n, int start) 3473 { 3474 int j = start; 3475 while (j < n && vec1[j] == 0) 3476 j++; 3477 return j; 3478 } 3479 3480 /* Add a multiple of row R1 of matrix MAT with N columns to row R2: 3481 R2 = R2 + CONST1 * R1. */ 3482 3483 static void 3484 lambda_matrix_row_add (lambda_matrix mat, int n, int r1, int r2, 3485 lambda_int const1) 3486 { 3487 int i; 3488 3489 if (const1 == 0) 3490 return; 3491 3492 for (i = 0; i < n; i++) 3493 mat[r2][i] += const1 * mat[r1][i]; 3494 } 3495 3496 /* Multiply vector VEC1 of length SIZE by a constant CONST1, 3497 and store the result in VEC2. */ 3498 3499 static void 3500 lambda_vector_mult_const (lambda_vector vec1, lambda_vector vec2, 3501 int size, lambda_int const1) 3502 { 3503 int i; 3504 3505 if (const1 == 0) 3506 lambda_vector_clear (vec2, size); 3507 else 3508 for (i = 0; i < size; i++) 3509 vec2[i] = const1 * vec1[i]; 3510 } 3511 3512 /* Negate vector VEC1 with length SIZE and store it in VEC2. */ 3513 3514 static void 3515 lambda_vector_negate (lambda_vector vec1, lambda_vector vec2, 3516 int size) 3517 { 3518 lambda_vector_mult_const (vec1, vec2, size, -1); 3519 } 3520 3521 /* Negate row R1 of matrix MAT which has N columns. */ 3522 3523 static void 3524 lambda_matrix_row_negate (lambda_matrix mat, int n, int r1) 3525 { 3526 lambda_vector_negate (mat[r1], mat[r1], n); 3527 } 3528 3529 /* Return true if two vectors are equal. */ 3530 3531 static bool 3532 lambda_vector_equal (lambda_vector vec1, lambda_vector vec2, int size) 3533 { 3534 int i; 3535 for (i = 0; i < size; i++) 3536 if (vec1[i] != vec2[i]) 3537 return false; 3538 return true; 3539 } 3540 3541 /* Given an M x N integer matrix A, this function determines an M x 3542 M unimodular matrix U, and an M x N echelon matrix S such that 3543 "U.A = S". This decomposition is also known as "right Hermite". 3544 3545 Ref: Algorithm 2.1 page 33 in "Loop Transformations for 3546 Restructuring Compilers" Utpal Banerjee. */ 3547 3548 static void 3549 lambda_matrix_right_hermite (lambda_matrix A, int m, int n, 3550 lambda_matrix S, lambda_matrix U) 3551 { 3552 int i, j, i0 = 0; 3553 3554 lambda_matrix_copy (A, S, m, n); 3555 lambda_matrix_id (U, m); 3556 3557 for (j = 0; j < n; j++) 3558 { 3559 if (lambda_vector_first_nz (S[j], m, i0) < m) 3560 { 3561 ++i0; 3562 for (i = m - 1; i >= i0; i--) 3563 { 3564 while (S[i][j] != 0) 3565 { 3566 lambda_int sigma, factor, a, b; 3567 3568 a = S[i-1][j]; 3569 b = S[i][j]; 3570 sigma = (a * b < 0) ? -1: 1; 3571 a = abs_hwi (a); 3572 b = abs_hwi (b); 3573 factor = sigma * (a / b); 3574 3575 lambda_matrix_row_add (S, n, i, i-1, -factor); 3576 std::swap (S[i], S[i-1]); 3577 3578 lambda_matrix_row_add (U, m, i, i-1, -factor); 3579 std::swap (U[i], U[i-1]); 3580 } 3581 } 3582 } 3583 } 3584 } 3585 3586 /* Determines the overlapping elements due to accesses CHREC_A and 3587 CHREC_B, that are affine functions. This function cannot handle 3588 symbolic evolution functions, ie. when initial conditions are 3589 parameters, because it uses lambda matrices of integers. */ 3590 3591 static void 3592 analyze_subscript_affine_affine (tree chrec_a, 3593 tree chrec_b, 3594 conflict_function **overlaps_a, 3595 conflict_function **overlaps_b, 3596 tree *last_conflicts) 3597 { 3598 unsigned nb_vars_a, nb_vars_b, dim; 3599 HOST_WIDE_INT gamma, gcd_alpha_beta; 3600 lambda_matrix A, U, S; 3601 struct obstack scratch_obstack; 3602 3603 if (eq_evolutions_p (chrec_a, chrec_b)) 3604 { 3605 /* The accessed index overlaps for each iteration in the 3606 loop. */ 3607 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3608 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3609 *last_conflicts = chrec_dont_know; 3610 return; 3611 } 3612 if (dump_file && (dump_flags & TDF_DETAILS)) 3613 fprintf (dump_file, "(analyze_subscript_affine_affine \n"); 3614 3615 /* For determining the initial intersection, we have to solve a 3616 Diophantine equation. This is the most time consuming part. 3617 3618 For answering to the question: "Is there a dependence?" we have 3619 to prove that there exists a solution to the Diophantine 3620 equation, and that the solution is in the iteration domain, 3621 i.e. the solution is positive or zero, and that the solution 3622 happens before the upper bound loop.nb_iterations. Otherwise 3623 there is no dependence. This function outputs a description of 3624 the iterations that hold the intersections. */ 3625 3626 nb_vars_a = nb_vars_in_chrec (chrec_a); 3627 nb_vars_b = nb_vars_in_chrec (chrec_b); 3628 3629 gcc_obstack_init (&scratch_obstack); 3630 3631 dim = nb_vars_a + nb_vars_b; 3632 U = lambda_matrix_new (dim, dim, &scratch_obstack); 3633 A = lambda_matrix_new (dim, 1, &scratch_obstack); 3634 S = lambda_matrix_new (dim, 1, &scratch_obstack); 3635 3636 tree init_a = initialize_matrix_A (A, chrec_a, 0, 1); 3637 tree init_b = initialize_matrix_A (A, chrec_b, nb_vars_a, -1); 3638 if (init_a == chrec_dont_know 3639 || init_b == chrec_dont_know) 3640 { 3641 if (dump_file && (dump_flags & TDF_DETAILS)) 3642 fprintf (dump_file, "affine-affine test failed: " 3643 "representation issue.\n"); 3644 *overlaps_a = conflict_fn_not_known (); 3645 *overlaps_b = conflict_fn_not_known (); 3646 *last_conflicts = chrec_dont_know; 3647 goto end_analyze_subs_aa; 3648 } 3649 gamma = int_cst_value (init_b) - int_cst_value (init_a); 3650 3651 /* Don't do all the hard work of solving the Diophantine equation 3652 when we already know the solution: for example, 3653 | {3, +, 1}_1 3654 | {3, +, 4}_2 3655 | gamma = 3 - 3 = 0. 3656 Then the first overlap occurs during the first iterations: 3657 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x) 3658 */ 3659 if (gamma == 0) 3660 { 3661 if (nb_vars_a == 1 && nb_vars_b == 1) 3662 { 3663 HOST_WIDE_INT step_a, step_b; 3664 HOST_WIDE_INT niter, niter_a, niter_b; 3665 affine_fn ova, ovb; 3666 3667 niter_a = max_stmt_executions_int (get_chrec_loop (chrec_a)); 3668 niter_b = max_stmt_executions_int (get_chrec_loop (chrec_b)); 3669 niter = MIN (niter_a, niter_b); 3670 step_a = int_cst_value (CHREC_RIGHT (chrec_a)); 3671 step_b = int_cst_value (CHREC_RIGHT (chrec_b)); 3672 3673 compute_overlap_steps_for_affine_univar (niter, step_a, step_b, 3674 &ova, &ovb, 3675 last_conflicts, 1); 3676 *overlaps_a = conflict_fn (1, ova); 3677 *overlaps_b = conflict_fn (1, ovb); 3678 } 3679 3680 else if (nb_vars_a == 2 && nb_vars_b == 1) 3681 compute_overlap_steps_for_affine_1_2 3682 (chrec_a, chrec_b, overlaps_a, overlaps_b, last_conflicts); 3683 3684 else if (nb_vars_a == 1 && nb_vars_b == 2) 3685 compute_overlap_steps_for_affine_1_2 3686 (chrec_b, chrec_a, overlaps_b, overlaps_a, last_conflicts); 3687 3688 else 3689 { 3690 if (dump_file && (dump_flags & TDF_DETAILS)) 3691 fprintf (dump_file, "affine-affine test failed: too many variables.\n"); 3692 *overlaps_a = conflict_fn_not_known (); 3693 *overlaps_b = conflict_fn_not_known (); 3694 *last_conflicts = chrec_dont_know; 3695 } 3696 goto end_analyze_subs_aa; 3697 } 3698 3699 /* U.A = S */ 3700 lambda_matrix_right_hermite (A, dim, 1, S, U); 3701 3702 if (S[0][0] < 0) 3703 { 3704 S[0][0] *= -1; 3705 lambda_matrix_row_negate (U, dim, 0); 3706 } 3707 gcd_alpha_beta = S[0][0]; 3708 3709 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5, 3710 but that is a quite strange case. Instead of ICEing, answer 3711 don't know. */ 3712 if (gcd_alpha_beta == 0) 3713 { 3714 *overlaps_a = conflict_fn_not_known (); 3715 *overlaps_b = conflict_fn_not_known (); 3716 *last_conflicts = chrec_dont_know; 3717 goto end_analyze_subs_aa; 3718 } 3719 3720 /* The classic "gcd-test". */ 3721 if (!int_divides_p (gcd_alpha_beta, gamma)) 3722 { 3723 /* The "gcd-test" has determined that there is no integer 3724 solution, i.e. there is no dependence. */ 3725 *overlaps_a = conflict_fn_no_dependence (); 3726 *overlaps_b = conflict_fn_no_dependence (); 3727 *last_conflicts = integer_zero_node; 3728 } 3729 3730 /* Both access functions are univariate. This includes SIV and MIV cases. */ 3731 else if (nb_vars_a == 1 && nb_vars_b == 1) 3732 { 3733 /* Both functions should have the same evolution sign. */ 3734 if (((A[0][0] > 0 && -A[1][0] > 0) 3735 || (A[0][0] < 0 && -A[1][0] < 0))) 3736 { 3737 /* The solutions are given by: 3738 | 3739 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0] 3740 | [u21 u22] [y0] 3741 3742 For a given integer t. Using the following variables, 3743 3744 | i0 = u11 * gamma / gcd_alpha_beta 3745 | j0 = u12 * gamma / gcd_alpha_beta 3746 | i1 = u21 3747 | j1 = u22 3748 3749 the solutions are: 3750 3751 | x0 = i0 + i1 * t, 3752 | y0 = j0 + j1 * t. */ 3753 HOST_WIDE_INT i0, j0, i1, j1; 3754 3755 i0 = U[0][0] * gamma / gcd_alpha_beta; 3756 j0 = U[0][1] * gamma / gcd_alpha_beta; 3757 i1 = U[1][0]; 3758 j1 = U[1][1]; 3759 3760 if ((i1 == 0 && i0 < 0) 3761 || (j1 == 0 && j0 < 0)) 3762 { 3763 /* There is no solution. 3764 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations" 3765 falls in here, but for the moment we don't look at the 3766 upper bound of the iteration domain. */ 3767 *overlaps_a = conflict_fn_no_dependence (); 3768 *overlaps_b = conflict_fn_no_dependence (); 3769 *last_conflicts = integer_zero_node; 3770 goto end_analyze_subs_aa; 3771 } 3772 3773 if (i1 > 0 && j1 > 0) 3774 { 3775 HOST_WIDE_INT niter_a 3776 = max_stmt_executions_int (get_chrec_loop (chrec_a)); 3777 HOST_WIDE_INT niter_b 3778 = max_stmt_executions_int (get_chrec_loop (chrec_b)); 3779 HOST_WIDE_INT niter = MIN (niter_a, niter_b); 3780 3781 /* (X0, Y0) is a solution of the Diophantine equation: 3782 "chrec_a (X0) = chrec_b (Y0)". */ 3783 HOST_WIDE_INT tau1 = MAX (CEIL (-i0, i1), 3784 CEIL (-j0, j1)); 3785 HOST_WIDE_INT x0 = i1 * tau1 + i0; 3786 HOST_WIDE_INT y0 = j1 * tau1 + j0; 3787 3788 /* (X1, Y1) is the smallest positive solution of the eq 3789 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the 3790 first conflict occurs. */ 3791 HOST_WIDE_INT min_multiple = MIN (x0 / i1, y0 / j1); 3792 HOST_WIDE_INT x1 = x0 - i1 * min_multiple; 3793 HOST_WIDE_INT y1 = y0 - j1 * min_multiple; 3794 3795 if (niter > 0) 3796 { 3797 /* If the overlap occurs outside of the bounds of the 3798 loop, there is no dependence. */ 3799 if (x1 >= niter_a || y1 >= niter_b) 3800 { 3801 *overlaps_a = conflict_fn_no_dependence (); 3802 *overlaps_b = conflict_fn_no_dependence (); 3803 *last_conflicts = integer_zero_node; 3804 goto end_analyze_subs_aa; 3805 } 3806 3807 /* max stmt executions can get quite large, avoid 3808 overflows by using wide ints here. */ 3809 widest_int tau2 3810 = wi::smin (wi::sdiv_floor (wi::sub (niter_a, i0), i1), 3811 wi::sdiv_floor (wi::sub (niter_b, j0), j1)); 3812 widest_int last_conflict = wi::sub (tau2, (x1 - i0)/i1); 3813 if (wi::min_precision (last_conflict, SIGNED) 3814 <= TYPE_PRECISION (integer_type_node)) 3815 *last_conflicts 3816 = build_int_cst (integer_type_node, 3817 last_conflict.to_shwi ()); 3818 else 3819 *last_conflicts = chrec_dont_know; 3820 } 3821 else 3822 *last_conflicts = chrec_dont_know; 3823 3824 *overlaps_a 3825 = conflict_fn (1, 3826 affine_fn_univar (build_int_cst (NULL_TREE, x1), 3827 1, 3828 build_int_cst (NULL_TREE, i1))); 3829 *overlaps_b 3830 = conflict_fn (1, 3831 affine_fn_univar (build_int_cst (NULL_TREE, y1), 3832 1, 3833 build_int_cst (NULL_TREE, j1))); 3834 } 3835 else 3836 { 3837 /* FIXME: For the moment, the upper bound of the 3838 iteration domain for i and j is not checked. */ 3839 if (dump_file && (dump_flags & TDF_DETAILS)) 3840 fprintf (dump_file, "affine-affine test failed: unimplemented.\n"); 3841 *overlaps_a = conflict_fn_not_known (); 3842 *overlaps_b = conflict_fn_not_known (); 3843 *last_conflicts = chrec_dont_know; 3844 } 3845 } 3846 else 3847 { 3848 if (dump_file && (dump_flags & TDF_DETAILS)) 3849 fprintf (dump_file, "affine-affine test failed: unimplemented.\n"); 3850 *overlaps_a = conflict_fn_not_known (); 3851 *overlaps_b = conflict_fn_not_known (); 3852 *last_conflicts = chrec_dont_know; 3853 } 3854 } 3855 else 3856 { 3857 if (dump_file && (dump_flags & TDF_DETAILS)) 3858 fprintf (dump_file, "affine-affine test failed: unimplemented.\n"); 3859 *overlaps_a = conflict_fn_not_known (); 3860 *overlaps_b = conflict_fn_not_known (); 3861 *last_conflicts = chrec_dont_know; 3862 } 3863 3864 end_analyze_subs_aa: 3865 obstack_free (&scratch_obstack, NULL); 3866 if (dump_file && (dump_flags & TDF_DETAILS)) 3867 { 3868 fprintf (dump_file, " (overlaps_a = "); 3869 dump_conflict_function (dump_file, *overlaps_a); 3870 fprintf (dump_file, ")\n (overlaps_b = "); 3871 dump_conflict_function (dump_file, *overlaps_b); 3872 fprintf (dump_file, "))\n"); 3873 } 3874 } 3875 3876 /* Returns true when analyze_subscript_affine_affine can be used for 3877 determining the dependence relation between chrec_a and chrec_b, 3878 that contain symbols. This function modifies chrec_a and chrec_b 3879 such that the analysis result is the same, and such that they don't 3880 contain symbols, and then can safely be passed to the analyzer. 3881 3882 Example: The analysis of the following tuples of evolutions produce 3883 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1 3884 vs. {0, +, 1}_1 3885 3886 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1) 3887 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1) 3888 */ 3889 3890 static bool 3891 can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b) 3892 { 3893 tree diff, type, left_a, left_b, right_b; 3894 3895 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a)) 3896 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b))) 3897 /* FIXME: For the moment not handled. Might be refined later. */ 3898 return false; 3899 3900 type = chrec_type (*chrec_a); 3901 left_a = CHREC_LEFT (*chrec_a); 3902 left_b = chrec_convert (type, CHREC_LEFT (*chrec_b), NULL); 3903 diff = chrec_fold_minus (type, left_a, left_b); 3904 3905 if (!evolution_function_is_constant_p (diff)) 3906 return false; 3907 3908 if (dump_file && (dump_flags & TDF_DETAILS)) 3909 fprintf (dump_file, "can_use_subscript_aff_aff_for_symbolic \n"); 3910 3911 *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a), 3912 diff, CHREC_RIGHT (*chrec_a)); 3913 right_b = chrec_convert (type, CHREC_RIGHT (*chrec_b), NULL); 3914 *chrec_b = build_polynomial_chrec (CHREC_VARIABLE (*chrec_b), 3915 build_int_cst (type, 0), 3916 right_b); 3917 return true; 3918 } 3919 3920 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and 3921 *OVERLAPS_B are initialized to the functions that describe the 3922 relation between the elements accessed twice by CHREC_A and 3923 CHREC_B. For k >= 0, the following property is verified: 3924 3925 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */ 3926 3927 static void 3928 analyze_siv_subscript (tree chrec_a, 3929 tree chrec_b, 3930 conflict_function **overlaps_a, 3931 conflict_function **overlaps_b, 3932 tree *last_conflicts, 3933 int loop_nest_num) 3934 { 3935 dependence_stats.num_siv++; 3936 3937 if (dump_file && (dump_flags & TDF_DETAILS)) 3938 fprintf (dump_file, "(analyze_siv_subscript \n"); 3939 3940 if (evolution_function_is_constant_p (chrec_a) 3941 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num)) 3942 analyze_siv_subscript_cst_affine (chrec_a, chrec_b, 3943 overlaps_a, overlaps_b, last_conflicts); 3944 3945 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num) 3946 && evolution_function_is_constant_p (chrec_b)) 3947 analyze_siv_subscript_cst_affine (chrec_b, chrec_a, 3948 overlaps_b, overlaps_a, last_conflicts); 3949 3950 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num) 3951 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num)) 3952 { 3953 if (!chrec_contains_symbols (chrec_a) 3954 && !chrec_contains_symbols (chrec_b)) 3955 { 3956 analyze_subscript_affine_affine (chrec_a, chrec_b, 3957 overlaps_a, overlaps_b, 3958 last_conflicts); 3959 3960 if (CF_NOT_KNOWN_P (*overlaps_a) 3961 || CF_NOT_KNOWN_P (*overlaps_b)) 3962 dependence_stats.num_siv_unimplemented++; 3963 else if (CF_NO_DEPENDENCE_P (*overlaps_a) 3964 || CF_NO_DEPENDENCE_P (*overlaps_b)) 3965 dependence_stats.num_siv_independent++; 3966 else 3967 dependence_stats.num_siv_dependent++; 3968 } 3969 else if (can_use_analyze_subscript_affine_affine (&chrec_a, 3970 &chrec_b)) 3971 { 3972 analyze_subscript_affine_affine (chrec_a, chrec_b, 3973 overlaps_a, overlaps_b, 3974 last_conflicts); 3975 3976 if (CF_NOT_KNOWN_P (*overlaps_a) 3977 || CF_NOT_KNOWN_P (*overlaps_b)) 3978 dependence_stats.num_siv_unimplemented++; 3979 else if (CF_NO_DEPENDENCE_P (*overlaps_a) 3980 || CF_NO_DEPENDENCE_P (*overlaps_b)) 3981 dependence_stats.num_siv_independent++; 3982 else 3983 dependence_stats.num_siv_dependent++; 3984 } 3985 else 3986 goto siv_subscript_dontknow; 3987 } 3988 3989 else 3990 { 3991 siv_subscript_dontknow:; 3992 if (dump_file && (dump_flags & TDF_DETAILS)) 3993 fprintf (dump_file, " siv test failed: unimplemented"); 3994 *overlaps_a = conflict_fn_not_known (); 3995 *overlaps_b = conflict_fn_not_known (); 3996 *last_conflicts = chrec_dont_know; 3997 dependence_stats.num_siv_unimplemented++; 3998 } 3999 4000 if (dump_file && (dump_flags & TDF_DETAILS)) 4001 fprintf (dump_file, ")\n"); 4002 } 4003 4004 /* Returns false if we can prove that the greatest common divisor of the steps 4005 of CHREC does not divide CST, false otherwise. */ 4006 4007 static bool 4008 gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst) 4009 { 4010 HOST_WIDE_INT cd = 0, val; 4011 tree step; 4012 4013 if (!tree_fits_shwi_p (cst)) 4014 return true; 4015 val = tree_to_shwi (cst); 4016 4017 while (TREE_CODE (chrec) == POLYNOMIAL_CHREC) 4018 { 4019 step = CHREC_RIGHT (chrec); 4020 if (!tree_fits_shwi_p (step)) 4021 return true; 4022 cd = gcd (cd, tree_to_shwi (step)); 4023 chrec = CHREC_LEFT (chrec); 4024 } 4025 4026 return val % cd == 0; 4027 } 4028 4029 /* Analyze a MIV (Multiple Index Variable) subscript with respect to 4030 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the 4031 functions that describe the relation between the elements accessed 4032 twice by CHREC_A and CHREC_B. For k >= 0, the following property 4033 is verified: 4034 4035 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */ 4036 4037 static void 4038 analyze_miv_subscript (tree chrec_a, 4039 tree chrec_b, 4040 conflict_function **overlaps_a, 4041 conflict_function **overlaps_b, 4042 tree *last_conflicts, 4043 struct loop *loop_nest) 4044 { 4045 tree type, difference; 4046 4047 dependence_stats.num_miv++; 4048 if (dump_file && (dump_flags & TDF_DETAILS)) 4049 fprintf (dump_file, "(analyze_miv_subscript \n"); 4050 4051 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b)); 4052 chrec_a = chrec_convert (type, chrec_a, NULL); 4053 chrec_b = chrec_convert (type, chrec_b, NULL); 4054 difference = chrec_fold_minus (type, chrec_a, chrec_b); 4055 4056 if (eq_evolutions_p (chrec_a, chrec_b)) 4057 { 4058 /* Access functions are the same: all the elements are accessed 4059 in the same order. */ 4060 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 4061 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 4062 *last_conflicts = max_stmt_executions_tree (get_chrec_loop (chrec_a)); 4063 dependence_stats.num_miv_dependent++; 4064 } 4065 4066 else if (evolution_function_is_constant_p (difference) 4067 && evolution_function_is_affine_multivariate_p (chrec_a, 4068 loop_nest->num) 4069 && !gcd_of_steps_may_divide_p (chrec_a, difference)) 4070 { 4071 /* testsuite/.../ssa-chrec-33.c 4072 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2 4073 4074 The difference is 1, and all the evolution steps are multiples 4075 of 2, consequently there are no overlapping elements. */ 4076 *overlaps_a = conflict_fn_no_dependence (); 4077 *overlaps_b = conflict_fn_no_dependence (); 4078 *last_conflicts = integer_zero_node; 4079 dependence_stats.num_miv_independent++; 4080 } 4081 4082 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest->num) 4083 && !chrec_contains_symbols (chrec_a, loop_nest) 4084 && evolution_function_is_affine_in_loop (chrec_b, loop_nest->num) 4085 && !chrec_contains_symbols (chrec_b, loop_nest)) 4086 { 4087 /* testsuite/.../ssa-chrec-35.c 4088 {0, +, 1}_2 vs. {0, +, 1}_3 4089 the overlapping elements are respectively located at iterations: 4090 {0, +, 1}_x and {0, +, 1}_x, 4091 in other words, we have the equality: 4092 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x) 4093 4094 Other examples: 4095 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) = 4096 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y) 4097 4098 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) = 4099 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) 4100 */ 4101 analyze_subscript_affine_affine (chrec_a, chrec_b, 4102 overlaps_a, overlaps_b, last_conflicts); 4103 4104 if (CF_NOT_KNOWN_P (*overlaps_a) 4105 || CF_NOT_KNOWN_P (*overlaps_b)) 4106 dependence_stats.num_miv_unimplemented++; 4107 else if (CF_NO_DEPENDENCE_P (*overlaps_a) 4108 || CF_NO_DEPENDENCE_P (*overlaps_b)) 4109 dependence_stats.num_miv_independent++; 4110 else 4111 dependence_stats.num_miv_dependent++; 4112 } 4113 4114 else 4115 { 4116 /* When the analysis is too difficult, answer "don't know". */ 4117 if (dump_file && (dump_flags & TDF_DETAILS)) 4118 fprintf (dump_file, "analyze_miv_subscript test failed: unimplemented.\n"); 4119 4120 *overlaps_a = conflict_fn_not_known (); 4121 *overlaps_b = conflict_fn_not_known (); 4122 *last_conflicts = chrec_dont_know; 4123 dependence_stats.num_miv_unimplemented++; 4124 } 4125 4126 if (dump_file && (dump_flags & TDF_DETAILS)) 4127 fprintf (dump_file, ")\n"); 4128 } 4129 4130 /* Determines the iterations for which CHREC_A is equal to CHREC_B in 4131 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and 4132 OVERLAP_ITERATIONS_B are initialized with two functions that 4133 describe the iterations that contain conflicting elements. 4134 4135 Remark: For an integer k >= 0, the following equality is true: 4136 4137 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)). 4138 */ 4139 4140 static void 4141 analyze_overlapping_iterations (tree chrec_a, 4142 tree chrec_b, 4143 conflict_function **overlap_iterations_a, 4144 conflict_function **overlap_iterations_b, 4145 tree *last_conflicts, struct loop *loop_nest) 4146 { 4147 unsigned int lnn = loop_nest->num; 4148 4149 dependence_stats.num_subscript_tests++; 4150 4151 if (dump_file && (dump_flags & TDF_DETAILS)) 4152 { 4153 fprintf (dump_file, "(analyze_overlapping_iterations \n"); 4154 fprintf (dump_file, " (chrec_a = "); 4155 print_generic_expr (dump_file, chrec_a); 4156 fprintf (dump_file, ")\n (chrec_b = "); 4157 print_generic_expr (dump_file, chrec_b); 4158 fprintf (dump_file, ")\n"); 4159 } 4160 4161 if (chrec_a == NULL_TREE 4162 || chrec_b == NULL_TREE 4163 || chrec_contains_undetermined (chrec_a) 4164 || chrec_contains_undetermined (chrec_b)) 4165 { 4166 dependence_stats.num_subscript_undetermined++; 4167 4168 *overlap_iterations_a = conflict_fn_not_known (); 4169 *overlap_iterations_b = conflict_fn_not_known (); 4170 } 4171 4172 /* If they are the same chrec, and are affine, they overlap 4173 on every iteration. */ 4174 else if (eq_evolutions_p (chrec_a, chrec_b) 4175 && (evolution_function_is_affine_multivariate_p (chrec_a, lnn) 4176 || operand_equal_p (chrec_a, chrec_b, 0))) 4177 { 4178 dependence_stats.num_same_subscript_function++; 4179 *overlap_iterations_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 4180 *overlap_iterations_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 4181 *last_conflicts = chrec_dont_know; 4182 } 4183 4184 /* If they aren't the same, and aren't affine, we can't do anything 4185 yet. */ 4186 else if ((chrec_contains_symbols (chrec_a) 4187 || chrec_contains_symbols (chrec_b)) 4188 && (!evolution_function_is_affine_multivariate_p (chrec_a, lnn) 4189 || !evolution_function_is_affine_multivariate_p (chrec_b, lnn))) 4190 { 4191 dependence_stats.num_subscript_undetermined++; 4192 *overlap_iterations_a = conflict_fn_not_known (); 4193 *overlap_iterations_b = conflict_fn_not_known (); 4194 } 4195 4196 else if (ziv_subscript_p (chrec_a, chrec_b)) 4197 analyze_ziv_subscript (chrec_a, chrec_b, 4198 overlap_iterations_a, overlap_iterations_b, 4199 last_conflicts); 4200 4201 else if (siv_subscript_p (chrec_a, chrec_b)) 4202 analyze_siv_subscript (chrec_a, chrec_b, 4203 overlap_iterations_a, overlap_iterations_b, 4204 last_conflicts, lnn); 4205 4206 else 4207 analyze_miv_subscript (chrec_a, chrec_b, 4208 overlap_iterations_a, overlap_iterations_b, 4209 last_conflicts, loop_nest); 4210 4211 if (dump_file && (dump_flags & TDF_DETAILS)) 4212 { 4213 fprintf (dump_file, " (overlap_iterations_a = "); 4214 dump_conflict_function (dump_file, *overlap_iterations_a); 4215 fprintf (dump_file, ")\n (overlap_iterations_b = "); 4216 dump_conflict_function (dump_file, *overlap_iterations_b); 4217 fprintf (dump_file, "))\n"); 4218 } 4219 } 4220 4221 /* Helper function for uniquely inserting distance vectors. */ 4222 4223 static void 4224 save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v) 4225 { 4226 unsigned i; 4227 lambda_vector v; 4228 4229 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v) 4230 if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr))) 4231 return; 4232 4233 DDR_DIST_VECTS (ddr).safe_push (dist_v); 4234 } 4235 4236 /* Helper function for uniquely inserting direction vectors. */ 4237 4238 static void 4239 save_dir_v (struct data_dependence_relation *ddr, lambda_vector dir_v) 4240 { 4241 unsigned i; 4242 lambda_vector v; 4243 4244 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v) 4245 if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr))) 4246 return; 4247 4248 DDR_DIR_VECTS (ddr).safe_push (dir_v); 4249 } 4250 4251 /* Add a distance of 1 on all the loops outer than INDEX. If we 4252 haven't yet determined a distance for this outer loop, push a new 4253 distance vector composed of the previous distance, and a distance 4254 of 1 for this outer loop. Example: 4255 4256 | loop_1 4257 | loop_2 4258 | A[10] 4259 | endloop_2 4260 | endloop_1 4261 4262 Saved vectors are of the form (dist_in_1, dist_in_2). First, we 4263 save (0, 1), then we have to save (1, 0). */ 4264 4265 static void 4266 add_outer_distances (struct data_dependence_relation *ddr, 4267 lambda_vector dist_v, int index) 4268 { 4269 /* For each outer loop where init_v is not set, the accesses are 4270 in dependence of distance 1 in the loop. */ 4271 while (--index >= 0) 4272 { 4273 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4274 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr)); 4275 save_v[index] = 1; 4276 save_dist_v (ddr, save_v); 4277 } 4278 } 4279 4280 /* Return false when fail to represent the data dependence as a 4281 distance vector. A_INDEX is the index of the first reference 4282 (0 for DDR_A, 1 for DDR_B) and B_INDEX is the index of the 4283 second reference. INIT_B is set to true when a component has been 4284 added to the distance vector DIST_V. INDEX_CARRY is then set to 4285 the index in DIST_V that carries the dependence. */ 4286 4287 static bool 4288 build_classic_dist_vector_1 (struct data_dependence_relation *ddr, 4289 unsigned int a_index, unsigned int b_index, 4290 lambda_vector dist_v, bool *init_b, 4291 int *index_carry) 4292 { 4293 unsigned i; 4294 lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4295 struct loop *loop = DDR_LOOP_NEST (ddr)[0]; 4296 4297 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 4298 { 4299 tree access_fn_a, access_fn_b; 4300 struct subscript *subscript = DDR_SUBSCRIPT (ddr, i); 4301 4302 if (chrec_contains_undetermined (SUB_DISTANCE (subscript))) 4303 { 4304 non_affine_dependence_relation (ddr); 4305 return false; 4306 } 4307 4308 access_fn_a = SUB_ACCESS_FN (subscript, a_index); 4309 access_fn_b = SUB_ACCESS_FN (subscript, b_index); 4310 4311 if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC 4312 && TREE_CODE (access_fn_b) == POLYNOMIAL_CHREC) 4313 { 4314 HOST_WIDE_INT dist; 4315 int index; 4316 int var_a = CHREC_VARIABLE (access_fn_a); 4317 int var_b = CHREC_VARIABLE (access_fn_b); 4318 4319 if (var_a != var_b 4320 || chrec_contains_undetermined (SUB_DISTANCE (subscript))) 4321 { 4322 non_affine_dependence_relation (ddr); 4323 return false; 4324 } 4325 4326 /* When data references are collected in a loop while data 4327 dependences are analyzed in loop nest nested in the loop, we 4328 would have more number of access functions than number of 4329 loops. Skip access functions of loops not in the loop nest. 4330 4331 See PR89725 for more information. */ 4332 if (flow_loop_nested_p (get_loop (cfun, var_a), loop)) 4333 continue; 4334 4335 dist = int_cst_value (SUB_DISTANCE (subscript)); 4336 index = index_in_loop_nest (var_a, DDR_LOOP_NEST (ddr)); 4337 *index_carry = MIN (index, *index_carry); 4338 4339 /* This is the subscript coupling test. If we have already 4340 recorded a distance for this loop (a distance coming from 4341 another subscript), it should be the same. For example, 4342 in the following code, there is no dependence: 4343 4344 | loop i = 0, N, 1 4345 | T[i+1][i] = ... 4346 | ... = T[i][i] 4347 | endloop 4348 */ 4349 if (init_v[index] != 0 && dist_v[index] != dist) 4350 { 4351 finalize_ddr_dependent (ddr, chrec_known); 4352 return false; 4353 } 4354 4355 dist_v[index] = dist; 4356 init_v[index] = 1; 4357 *init_b = true; 4358 } 4359 else if (!operand_equal_p (access_fn_a, access_fn_b, 0)) 4360 { 4361 /* This can be for example an affine vs. constant dependence 4362 (T[i] vs. T[3]) that is not an affine dependence and is 4363 not representable as a distance vector. */ 4364 non_affine_dependence_relation (ddr); 4365 return false; 4366 } 4367 } 4368 4369 return true; 4370 } 4371 4372 /* Return true when the DDR contains only constant access functions. */ 4373 4374 static bool 4375 constant_access_functions (const struct data_dependence_relation *ddr) 4376 { 4377 unsigned i; 4378 subscript *sub; 4379 4380 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub) 4381 if (!evolution_function_is_constant_p (SUB_ACCESS_FN (sub, 0)) 4382 || !evolution_function_is_constant_p (SUB_ACCESS_FN (sub, 1))) 4383 return false; 4384 4385 return true; 4386 } 4387 4388 /* Helper function for the case where DDR_A and DDR_B are the same 4389 multivariate access function with a constant step. For an example 4390 see pr34635-1.c. */ 4391 4392 static void 4393 add_multivariate_self_dist (struct data_dependence_relation *ddr, tree c_2) 4394 { 4395 int x_1, x_2; 4396 tree c_1 = CHREC_LEFT (c_2); 4397 tree c_0 = CHREC_LEFT (c_1); 4398 lambda_vector dist_v; 4399 HOST_WIDE_INT v1, v2, cd; 4400 4401 /* Polynomials with more than 2 variables are not handled yet. When 4402 the evolution steps are parameters, it is not possible to 4403 represent the dependence using classical distance vectors. */ 4404 if (TREE_CODE (c_0) != INTEGER_CST 4405 || TREE_CODE (CHREC_RIGHT (c_1)) != INTEGER_CST 4406 || TREE_CODE (CHREC_RIGHT (c_2)) != INTEGER_CST) 4407 { 4408 DDR_AFFINE_P (ddr) = false; 4409 return; 4410 } 4411 4412 x_2 = index_in_loop_nest (CHREC_VARIABLE (c_2), DDR_LOOP_NEST (ddr)); 4413 x_1 = index_in_loop_nest (CHREC_VARIABLE (c_1), DDR_LOOP_NEST (ddr)); 4414 4415 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */ 4416 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4417 v1 = int_cst_value (CHREC_RIGHT (c_1)); 4418 v2 = int_cst_value (CHREC_RIGHT (c_2)); 4419 cd = gcd (v1, v2); 4420 v1 /= cd; 4421 v2 /= cd; 4422 4423 if (v2 < 0) 4424 { 4425 v2 = -v2; 4426 v1 = -v1; 4427 } 4428 4429 dist_v[x_1] = v2; 4430 dist_v[x_2] = -v1; 4431 save_dist_v (ddr, dist_v); 4432 4433 add_outer_distances (ddr, dist_v, x_1); 4434 } 4435 4436 /* Helper function for the case where DDR_A and DDR_B are the same 4437 access functions. */ 4438 4439 static void 4440 add_other_self_distances (struct data_dependence_relation *ddr) 4441 { 4442 lambda_vector dist_v; 4443 unsigned i; 4444 int index_carry = DDR_NB_LOOPS (ddr); 4445 subscript *sub; 4446 struct loop *loop = DDR_LOOP_NEST (ddr)[0]; 4447 4448 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub) 4449 { 4450 tree access_fun = SUB_ACCESS_FN (sub, 0); 4451 4452 if (TREE_CODE (access_fun) == POLYNOMIAL_CHREC) 4453 { 4454 if (!evolution_function_is_univariate_p (access_fun, loop->num)) 4455 { 4456 if (DDR_NUM_SUBSCRIPTS (ddr) != 1) 4457 { 4458 DDR_ARE_DEPENDENT (ddr) = chrec_dont_know; 4459 return; 4460 } 4461 4462 access_fun = SUB_ACCESS_FN (DDR_SUBSCRIPT (ddr, 0), 0); 4463 4464 if (TREE_CODE (CHREC_LEFT (access_fun)) == POLYNOMIAL_CHREC) 4465 add_multivariate_self_dist (ddr, access_fun); 4466 else 4467 /* The evolution step is not constant: it varies in 4468 the outer loop, so this cannot be represented by a 4469 distance vector. For example in pr34635.c the 4470 evolution is {0, +, {0, +, 4}_1}_2. */ 4471 DDR_AFFINE_P (ddr) = false; 4472 4473 return; 4474 } 4475 4476 /* When data references are collected in a loop while data 4477 dependences are analyzed in loop nest nested in the loop, we 4478 would have more number of access functions than number of 4479 loops. Skip access functions of loops not in the loop nest. 4480 4481 See PR89725 for more information. */ 4482 if (flow_loop_nested_p (get_loop (cfun, CHREC_VARIABLE (access_fun)), 4483 loop)) 4484 continue; 4485 4486 index_carry = MIN (index_carry, 4487 index_in_loop_nest (CHREC_VARIABLE (access_fun), 4488 DDR_LOOP_NEST (ddr))); 4489 } 4490 } 4491 4492 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4493 add_outer_distances (ddr, dist_v, index_carry); 4494 } 4495 4496 static void 4497 insert_innermost_unit_dist_vector (struct data_dependence_relation *ddr) 4498 { 4499 lambda_vector dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4500 4501 dist_v[DDR_INNER_LOOP (ddr)] = 1; 4502 save_dist_v (ddr, dist_v); 4503 } 4504 4505 /* Adds a unit distance vector to DDR when there is a 0 overlap. This 4506 is the case for example when access functions are the same and 4507 equal to a constant, as in: 4508 4509 | loop_1 4510 | A[3] = ... 4511 | ... = A[3] 4512 | endloop_1 4513 4514 in which case the distance vectors are (0) and (1). */ 4515 4516 static void 4517 add_distance_for_zero_overlaps (struct data_dependence_relation *ddr) 4518 { 4519 unsigned i, j; 4520 4521 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 4522 { 4523 subscript_p sub = DDR_SUBSCRIPT (ddr, i); 4524 conflict_function *ca = SUB_CONFLICTS_IN_A (sub); 4525 conflict_function *cb = SUB_CONFLICTS_IN_B (sub); 4526 4527 for (j = 0; j < ca->n; j++) 4528 if (affine_function_zero_p (ca->fns[j])) 4529 { 4530 insert_innermost_unit_dist_vector (ddr); 4531 return; 4532 } 4533 4534 for (j = 0; j < cb->n; j++) 4535 if (affine_function_zero_p (cb->fns[j])) 4536 { 4537 insert_innermost_unit_dist_vector (ddr); 4538 return; 4539 } 4540 } 4541 } 4542 4543 /* Return true when the DDR contains two data references that have the 4544 same access functions. */ 4545 4546 static inline bool 4547 same_access_functions (const struct data_dependence_relation *ddr) 4548 { 4549 unsigned i; 4550 subscript *sub; 4551 4552 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub) 4553 if (!eq_evolutions_p (SUB_ACCESS_FN (sub, 0), 4554 SUB_ACCESS_FN (sub, 1))) 4555 return false; 4556 4557 return true; 4558 } 4559 4560 /* Compute the classic per loop distance vector. DDR is the data 4561 dependence relation to build a vector from. Return false when fail 4562 to represent the data dependence as a distance vector. */ 4563 4564 static bool 4565 build_classic_dist_vector (struct data_dependence_relation *ddr, 4566 struct loop *loop_nest) 4567 { 4568 bool init_b = false; 4569 int index_carry = DDR_NB_LOOPS (ddr); 4570 lambda_vector dist_v; 4571 4572 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE) 4573 return false; 4574 4575 if (same_access_functions (ddr)) 4576 { 4577 /* Save the 0 vector. */ 4578 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4579 save_dist_v (ddr, dist_v); 4580 4581 if (constant_access_functions (ddr)) 4582 add_distance_for_zero_overlaps (ddr); 4583 4584 if (DDR_NB_LOOPS (ddr) > 1) 4585 add_other_self_distances (ddr); 4586 4587 return true; 4588 } 4589 4590 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4591 if (!build_classic_dist_vector_1 (ddr, 0, 1, dist_v, &init_b, &index_carry)) 4592 return false; 4593 4594 /* Save the distance vector if we initialized one. */ 4595 if (init_b) 4596 { 4597 /* Verify a basic constraint: classic distance vectors should 4598 always be lexicographically positive. 4599 4600 Data references are collected in the order of execution of 4601 the program, thus for the following loop 4602 4603 | for (i = 1; i < 100; i++) 4604 | for (j = 1; j < 100; j++) 4605 | { 4606 | t = T[j+1][i-1]; // A 4607 | T[j][i] = t + 2; // B 4608 | } 4609 4610 references are collected following the direction of the wind: 4611 A then B. The data dependence tests are performed also 4612 following this order, such that we're looking at the distance 4613 separating the elements accessed by A from the elements later 4614 accessed by B. But in this example, the distance returned by 4615 test_dep (A, B) is lexicographically negative (-1, 1), that 4616 means that the access A occurs later than B with respect to 4617 the outer loop, ie. we're actually looking upwind. In this 4618 case we solve test_dep (B, A) looking downwind to the 4619 lexicographically positive solution, that returns the 4620 distance vector (1, -1). */ 4621 if (!lambda_vector_lexico_pos (dist_v, DDR_NB_LOOPS (ddr))) 4622 { 4623 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4624 if (!subscript_dependence_tester_1 (ddr, 1, 0, loop_nest)) 4625 return false; 4626 compute_subscript_distance (ddr); 4627 if (!build_classic_dist_vector_1 (ddr, 1, 0, save_v, &init_b, 4628 &index_carry)) 4629 return false; 4630 save_dist_v (ddr, save_v); 4631 DDR_REVERSED_P (ddr) = true; 4632 4633 /* In this case there is a dependence forward for all the 4634 outer loops: 4635 4636 | for (k = 1; k < 100; k++) 4637 | for (i = 1; i < 100; i++) 4638 | for (j = 1; j < 100; j++) 4639 | { 4640 | t = T[j+1][i-1]; // A 4641 | T[j][i] = t + 2; // B 4642 | } 4643 4644 the vectors are: 4645 (0, 1, -1) 4646 (1, 1, -1) 4647 (1, -1, 1) 4648 */ 4649 if (DDR_NB_LOOPS (ddr) > 1) 4650 { 4651 add_outer_distances (ddr, save_v, index_carry); 4652 add_outer_distances (ddr, dist_v, index_carry); 4653 } 4654 } 4655 else 4656 { 4657 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4658 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr)); 4659 4660 if (DDR_NB_LOOPS (ddr) > 1) 4661 { 4662 lambda_vector opposite_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4663 4664 if (!subscript_dependence_tester_1 (ddr, 1, 0, loop_nest)) 4665 return false; 4666 compute_subscript_distance (ddr); 4667 if (!build_classic_dist_vector_1 (ddr, 1, 0, opposite_v, &init_b, 4668 &index_carry)) 4669 return false; 4670 4671 save_dist_v (ddr, save_v); 4672 add_outer_distances (ddr, dist_v, index_carry); 4673 add_outer_distances (ddr, opposite_v, index_carry); 4674 } 4675 else 4676 save_dist_v (ddr, save_v); 4677 } 4678 } 4679 else 4680 { 4681 /* There is a distance of 1 on all the outer loops: Example: 4682 there is a dependence of distance 1 on loop_1 for the array A. 4683 4684 | loop_1 4685 | A[5] = ... 4686 | endloop 4687 */ 4688 add_outer_distances (ddr, dist_v, 4689 lambda_vector_first_nz (dist_v, 4690 DDR_NB_LOOPS (ddr), 0)); 4691 } 4692 4693 if (dump_file && (dump_flags & TDF_DETAILS)) 4694 { 4695 unsigned i; 4696 4697 fprintf (dump_file, "(build_classic_dist_vector\n"); 4698 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++) 4699 { 4700 fprintf (dump_file, " dist_vector = ("); 4701 print_lambda_vector (dump_file, DDR_DIST_VECT (ddr, i), 4702 DDR_NB_LOOPS (ddr)); 4703 fprintf (dump_file, " )\n"); 4704 } 4705 fprintf (dump_file, ")\n"); 4706 } 4707 4708 return true; 4709 } 4710 4711 /* Return the direction for a given distance. 4712 FIXME: Computing dir this way is suboptimal, since dir can catch 4713 cases that dist is unable to represent. */ 4714 4715 static inline enum data_dependence_direction 4716 dir_from_dist (int dist) 4717 { 4718 if (dist > 0) 4719 return dir_positive; 4720 else if (dist < 0) 4721 return dir_negative; 4722 else 4723 return dir_equal; 4724 } 4725 4726 /* Compute the classic per loop direction vector. DDR is the data 4727 dependence relation to build a vector from. */ 4728 4729 static void 4730 build_classic_dir_vector (struct data_dependence_relation *ddr) 4731 { 4732 unsigned i, j; 4733 lambda_vector dist_v; 4734 4735 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) 4736 { 4737 lambda_vector dir_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 4738 4739 for (j = 0; j < DDR_NB_LOOPS (ddr); j++) 4740 dir_v[j] = dir_from_dist (dist_v[j]); 4741 4742 save_dir_v (ddr, dir_v); 4743 } 4744 } 4745 4746 /* Helper function. Returns true when there is a dependence between the 4747 data references. A_INDEX is the index of the first reference (0 for 4748 DDR_A, 1 for DDR_B) and B_INDEX is the index of the second reference. */ 4749 4750 static bool 4751 subscript_dependence_tester_1 (struct data_dependence_relation *ddr, 4752 unsigned int a_index, unsigned int b_index, 4753 struct loop *loop_nest) 4754 { 4755 unsigned int i; 4756 tree last_conflicts; 4757 struct subscript *subscript; 4758 tree res = NULL_TREE; 4759 4760 for (i = 0; DDR_SUBSCRIPTS (ddr).iterate (i, &subscript); i++) 4761 { 4762 conflict_function *overlaps_a, *overlaps_b; 4763 4764 analyze_overlapping_iterations (SUB_ACCESS_FN (subscript, a_index), 4765 SUB_ACCESS_FN (subscript, b_index), 4766 &overlaps_a, &overlaps_b, 4767 &last_conflicts, loop_nest); 4768 4769 if (SUB_CONFLICTS_IN_A (subscript)) 4770 free_conflict_function (SUB_CONFLICTS_IN_A (subscript)); 4771 if (SUB_CONFLICTS_IN_B (subscript)) 4772 free_conflict_function (SUB_CONFLICTS_IN_B (subscript)); 4773 4774 SUB_CONFLICTS_IN_A (subscript) = overlaps_a; 4775 SUB_CONFLICTS_IN_B (subscript) = overlaps_b; 4776 SUB_LAST_CONFLICT (subscript) = last_conflicts; 4777 4778 /* If there is any undetermined conflict function we have to 4779 give a conservative answer in case we cannot prove that 4780 no dependence exists when analyzing another subscript. */ 4781 if (CF_NOT_KNOWN_P (overlaps_a) 4782 || CF_NOT_KNOWN_P (overlaps_b)) 4783 { 4784 res = chrec_dont_know; 4785 continue; 4786 } 4787 4788 /* When there is a subscript with no dependence we can stop. */ 4789 else if (CF_NO_DEPENDENCE_P (overlaps_a) 4790 || CF_NO_DEPENDENCE_P (overlaps_b)) 4791 { 4792 res = chrec_known; 4793 break; 4794 } 4795 } 4796 4797 if (res == NULL_TREE) 4798 return true; 4799 4800 if (res == chrec_known) 4801 dependence_stats.num_dependence_independent++; 4802 else 4803 dependence_stats.num_dependence_undetermined++; 4804 finalize_ddr_dependent (ddr, res); 4805 return false; 4806 } 4807 4808 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */ 4809 4810 static void 4811 subscript_dependence_tester (struct data_dependence_relation *ddr, 4812 struct loop *loop_nest) 4813 { 4814 if (subscript_dependence_tester_1 (ddr, 0, 1, loop_nest)) 4815 dependence_stats.num_dependence_dependent++; 4816 4817 compute_subscript_distance (ddr); 4818 if (build_classic_dist_vector (ddr, loop_nest)) 4819 build_classic_dir_vector (ddr); 4820 } 4821 4822 /* Returns true when all the access functions of A are affine or 4823 constant with respect to LOOP_NEST. */ 4824 4825 static bool 4826 access_functions_are_affine_or_constant_p (const struct data_reference *a, 4827 const struct loop *loop_nest) 4828 { 4829 unsigned int i; 4830 vec<tree> fns = DR_ACCESS_FNS (a); 4831 tree t; 4832 4833 FOR_EACH_VEC_ELT (fns, i, t) 4834 if (!evolution_function_is_invariant_p (t, loop_nest->num) 4835 && !evolution_function_is_affine_multivariate_p (t, loop_nest->num)) 4836 return false; 4837 4838 return true; 4839 } 4840 4841 /* This computes the affine dependence relation between A and B with 4842 respect to LOOP_NEST. CHREC_KNOWN is used for representing the 4843 independence between two accesses, while CHREC_DONT_KNOW is used 4844 for representing the unknown relation. 4845 4846 Note that it is possible to stop the computation of the dependence 4847 relation the first time we detect a CHREC_KNOWN element for a given 4848 subscript. */ 4849 4850 void 4851 compute_affine_dependence (struct data_dependence_relation *ddr, 4852 struct loop *loop_nest) 4853 { 4854 struct data_reference *dra = DDR_A (ddr); 4855 struct data_reference *drb = DDR_B (ddr); 4856 4857 if (dump_file && (dump_flags & TDF_DETAILS)) 4858 { 4859 fprintf (dump_file, "(compute_affine_dependence\n"); 4860 fprintf (dump_file, " stmt_a: "); 4861 print_gimple_stmt (dump_file, DR_STMT (dra), 0, TDF_SLIM); 4862 fprintf (dump_file, " stmt_b: "); 4863 print_gimple_stmt (dump_file, DR_STMT (drb), 0, TDF_SLIM); 4864 } 4865 4866 /* Analyze only when the dependence relation is not yet known. */ 4867 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE) 4868 { 4869 dependence_stats.num_dependence_tests++; 4870 4871 if (access_functions_are_affine_or_constant_p (dra, loop_nest) 4872 && access_functions_are_affine_or_constant_p (drb, loop_nest)) 4873 subscript_dependence_tester (ddr, loop_nest); 4874 4875 /* As a last case, if the dependence cannot be determined, or if 4876 the dependence is considered too difficult to determine, answer 4877 "don't know". */ 4878 else 4879 { 4880 dependence_stats.num_dependence_undetermined++; 4881 4882 if (dump_file && (dump_flags & TDF_DETAILS)) 4883 { 4884 fprintf (dump_file, "Data ref a:\n"); 4885 dump_data_reference (dump_file, dra); 4886 fprintf (dump_file, "Data ref b:\n"); 4887 dump_data_reference (dump_file, drb); 4888 fprintf (dump_file, "affine dependence test not usable: access function not affine or constant.\n"); 4889 } 4890 finalize_ddr_dependent (ddr, chrec_dont_know); 4891 } 4892 } 4893 4894 if (dump_file && (dump_flags & TDF_DETAILS)) 4895 { 4896 if (DDR_ARE_DEPENDENT (ddr) == chrec_known) 4897 fprintf (dump_file, ") -> no dependence\n"); 4898 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) 4899 fprintf (dump_file, ") -> dependence analysis failed\n"); 4900 else 4901 fprintf (dump_file, ")\n"); 4902 } 4903 } 4904 4905 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all 4906 the data references in DATAREFS, in the LOOP_NEST. When 4907 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self 4908 relations. Return true when successful, i.e. data references number 4909 is small enough to be handled. */ 4910 4911 bool 4912 compute_all_dependences (vec<data_reference_p> datarefs, 4913 vec<ddr_p> *dependence_relations, 4914 vec<loop_p> loop_nest, 4915 bool compute_self_and_rr) 4916 { 4917 struct data_dependence_relation *ddr; 4918 struct data_reference *a, *b; 4919 unsigned int i, j; 4920 4921 if ((int) datarefs.length () 4922 > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS)) 4923 { 4924 struct data_dependence_relation *ddr; 4925 4926 /* Insert a single relation into dependence_relations: 4927 chrec_dont_know. */ 4928 ddr = initialize_data_dependence_relation (NULL, NULL, loop_nest); 4929 dependence_relations->safe_push (ddr); 4930 return false; 4931 } 4932 4933 FOR_EACH_VEC_ELT (datarefs, i, a) 4934 for (j = i + 1; datarefs.iterate (j, &b); j++) 4935 if (DR_IS_WRITE (a) || DR_IS_WRITE (b) || compute_self_and_rr) 4936 { 4937 ddr = initialize_data_dependence_relation (a, b, loop_nest); 4938 dependence_relations->safe_push (ddr); 4939 if (loop_nest.exists ()) 4940 compute_affine_dependence (ddr, loop_nest[0]); 4941 } 4942 4943 if (compute_self_and_rr) 4944 FOR_EACH_VEC_ELT (datarefs, i, a) 4945 { 4946 ddr = initialize_data_dependence_relation (a, a, loop_nest); 4947 dependence_relations->safe_push (ddr); 4948 if (loop_nest.exists ()) 4949 compute_affine_dependence (ddr, loop_nest[0]); 4950 } 4951 4952 return true; 4953 } 4954 4955 /* Describes a location of a memory reference. */ 4956 4957 struct data_ref_loc 4958 { 4959 /* The memory reference. */ 4960 tree ref; 4961 4962 /* True if the memory reference is read. */ 4963 bool is_read; 4964 4965 /* True if the data reference is conditional within the containing 4966 statement, i.e. if it might not occur even when the statement 4967 is executed and runs to completion. */ 4968 bool is_conditional_in_stmt; 4969 }; 4970 4971 4972 /* Stores the locations of memory references in STMT to REFERENCES. Returns 4973 true if STMT clobbers memory, false otherwise. */ 4974 4975 static bool 4976 get_references_in_stmt (gimple *stmt, vec<data_ref_loc, va_heap> *references) 4977 { 4978 bool clobbers_memory = false; 4979 data_ref_loc ref; 4980 tree op0, op1; 4981 enum gimple_code stmt_code = gimple_code (stmt); 4982 4983 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects. 4984 As we cannot model data-references to not spelled out 4985 accesses give up if they may occur. */ 4986 if (stmt_code == GIMPLE_CALL 4987 && !(gimple_call_flags (stmt) & ECF_CONST)) 4988 { 4989 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */ 4990 if (gimple_call_internal_p (stmt)) 4991 switch (gimple_call_internal_fn (stmt)) 4992 { 4993 case IFN_GOMP_SIMD_LANE: 4994 { 4995 struct loop *loop = gimple_bb (stmt)->loop_father; 4996 tree uid = gimple_call_arg (stmt, 0); 4997 gcc_assert (TREE_CODE (uid) == SSA_NAME); 4998 if (loop == NULL 4999 || loop->simduid != SSA_NAME_VAR (uid)) 5000 clobbers_memory = true; 5001 break; 5002 } 5003 case IFN_MASK_LOAD: 5004 case IFN_MASK_STORE: 5005 break; 5006 default: 5007 clobbers_memory = true; 5008 break; 5009 } 5010 else 5011 clobbers_memory = true; 5012 } 5013 else if (stmt_code == GIMPLE_ASM 5014 && (gimple_asm_volatile_p (as_a <gasm *> (stmt)) 5015 || gimple_vuse (stmt))) 5016 clobbers_memory = true; 5017 5018 if (!gimple_vuse (stmt)) 5019 return clobbers_memory; 5020 5021 if (stmt_code == GIMPLE_ASSIGN) 5022 { 5023 tree base; 5024 op0 = gimple_assign_lhs (stmt); 5025 op1 = gimple_assign_rhs1 (stmt); 5026 5027 if (DECL_P (op1) 5028 || (REFERENCE_CLASS_P (op1) 5029 && (base = get_base_address (op1)) 5030 && TREE_CODE (base) != SSA_NAME 5031 && !is_gimple_min_invariant (base))) 5032 { 5033 ref.ref = op1; 5034 ref.is_read = true; 5035 ref.is_conditional_in_stmt = false; 5036 references->safe_push (ref); 5037 } 5038 } 5039 else if (stmt_code == GIMPLE_CALL) 5040 { 5041 unsigned i, n; 5042 tree ptr, type; 5043 unsigned int align; 5044 5045 ref.is_read = false; 5046 if (gimple_call_internal_p (stmt)) 5047 switch (gimple_call_internal_fn (stmt)) 5048 { 5049 case IFN_MASK_LOAD: 5050 if (gimple_call_lhs (stmt) == NULL_TREE) 5051 break; 5052 ref.is_read = true; 5053 /* FALLTHRU */ 5054 case IFN_MASK_STORE: 5055 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0); 5056 align = tree_to_shwi (gimple_call_arg (stmt, 1)); 5057 if (ref.is_read) 5058 type = TREE_TYPE (gimple_call_lhs (stmt)); 5059 else 5060 type = TREE_TYPE (gimple_call_arg (stmt, 3)); 5061 if (TYPE_ALIGN (type) != align) 5062 type = build_aligned_type (type, align); 5063 ref.is_conditional_in_stmt = true; 5064 ref.ref = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), 5065 ptr); 5066 references->safe_push (ref); 5067 return false; 5068 default: 5069 break; 5070 } 5071 5072 op0 = gimple_call_lhs (stmt); 5073 n = gimple_call_num_args (stmt); 5074 for (i = 0; i < n; i++) 5075 { 5076 op1 = gimple_call_arg (stmt, i); 5077 5078 if (DECL_P (op1) 5079 || (REFERENCE_CLASS_P (op1) && get_base_address (op1))) 5080 { 5081 ref.ref = op1; 5082 ref.is_read = true; 5083 ref.is_conditional_in_stmt = false; 5084 references->safe_push (ref); 5085 } 5086 } 5087 } 5088 else 5089 return clobbers_memory; 5090 5091 if (op0 5092 && (DECL_P (op0) 5093 || (REFERENCE_CLASS_P (op0) && get_base_address (op0)))) 5094 { 5095 ref.ref = op0; 5096 ref.is_read = false; 5097 ref.is_conditional_in_stmt = false; 5098 references->safe_push (ref); 5099 } 5100 return clobbers_memory; 5101 } 5102 5103 5104 /* Returns true if the loop-nest has any data reference. */ 5105 5106 bool 5107 loop_nest_has_data_refs (loop_p loop) 5108 { 5109 basic_block *bbs = get_loop_body (loop); 5110 auto_vec<data_ref_loc, 3> references; 5111 5112 for (unsigned i = 0; i < loop->num_nodes; i++) 5113 { 5114 basic_block bb = bbs[i]; 5115 gimple_stmt_iterator bsi; 5116 5117 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) 5118 { 5119 gimple *stmt = gsi_stmt (bsi); 5120 get_references_in_stmt (stmt, &references); 5121 if (references.length ()) 5122 { 5123 free (bbs); 5124 return true; 5125 } 5126 } 5127 } 5128 free (bbs); 5129 return false; 5130 } 5131 5132 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable 5133 reference, returns false, otherwise returns true. NEST is the outermost 5134 loop of the loop nest in which the references should be analyzed. */ 5135 5136 opt_result 5137 find_data_references_in_stmt (struct loop *nest, gimple *stmt, 5138 vec<data_reference_p> *datarefs) 5139 { 5140 unsigned i; 5141 auto_vec<data_ref_loc, 2> references; 5142 data_ref_loc *ref; 5143 data_reference_p dr; 5144 5145 if (get_references_in_stmt (stmt, &references)) 5146 return opt_result::failure_at (stmt, "statement clobbers memory: %G", 5147 stmt); 5148 5149 FOR_EACH_VEC_ELT (references, i, ref) 5150 { 5151 dr = create_data_ref (nest ? loop_preheader_edge (nest) : NULL, 5152 loop_containing_stmt (stmt), ref->ref, 5153 stmt, ref->is_read, ref->is_conditional_in_stmt); 5154 gcc_assert (dr != NULL); 5155 datarefs->safe_push (dr); 5156 } 5157 5158 return opt_result::success (); 5159 } 5160 5161 /* Stores the data references in STMT to DATAREFS. If there is an 5162 unanalyzable reference, returns false, otherwise returns true. 5163 NEST is the outermost loop of the loop nest in which the references 5164 should be instantiated, LOOP is the loop in which the references 5165 should be analyzed. */ 5166 5167 bool 5168 graphite_find_data_references_in_stmt (edge nest, loop_p loop, gimple *stmt, 5169 vec<data_reference_p> *datarefs) 5170 { 5171 unsigned i; 5172 auto_vec<data_ref_loc, 2> references; 5173 data_ref_loc *ref; 5174 bool ret = true; 5175 data_reference_p dr; 5176 5177 if (get_references_in_stmt (stmt, &references)) 5178 return false; 5179 5180 FOR_EACH_VEC_ELT (references, i, ref) 5181 { 5182 dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read, 5183 ref->is_conditional_in_stmt); 5184 gcc_assert (dr != NULL); 5185 datarefs->safe_push (dr); 5186 } 5187 5188 return ret; 5189 } 5190 5191 /* Search the data references in LOOP, and record the information into 5192 DATAREFS. Returns chrec_dont_know when failing to analyze a 5193 difficult case, returns NULL_TREE otherwise. */ 5194 5195 tree 5196 find_data_references_in_bb (struct loop *loop, basic_block bb, 5197 vec<data_reference_p> *datarefs) 5198 { 5199 gimple_stmt_iterator bsi; 5200 5201 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) 5202 { 5203 gimple *stmt = gsi_stmt (bsi); 5204 5205 if (!find_data_references_in_stmt (loop, stmt, datarefs)) 5206 { 5207 struct data_reference *res; 5208 res = XCNEW (struct data_reference); 5209 datarefs->safe_push (res); 5210 5211 return chrec_dont_know; 5212 } 5213 } 5214 5215 return NULL_TREE; 5216 } 5217 5218 /* Search the data references in LOOP, and record the information into 5219 DATAREFS. Returns chrec_dont_know when failing to analyze a 5220 difficult case, returns NULL_TREE otherwise. 5221 5222 TODO: This function should be made smarter so that it can handle address 5223 arithmetic as if they were array accesses, etc. */ 5224 5225 tree 5226 find_data_references_in_loop (struct loop *loop, 5227 vec<data_reference_p> *datarefs) 5228 { 5229 basic_block bb, *bbs; 5230 unsigned int i; 5231 5232 bbs = get_loop_body_in_dom_order (loop); 5233 5234 for (i = 0; i < loop->num_nodes; i++) 5235 { 5236 bb = bbs[i]; 5237 5238 if (find_data_references_in_bb (loop, bb, datarefs) == chrec_dont_know) 5239 { 5240 free (bbs); 5241 return chrec_dont_know; 5242 } 5243 } 5244 free (bbs); 5245 5246 return NULL_TREE; 5247 } 5248 5249 /* Return the alignment in bytes that DRB is guaranteed to have at all 5250 times. */ 5251 5252 unsigned int 5253 dr_alignment (innermost_loop_behavior *drb) 5254 { 5255 /* Get the alignment of BASE_ADDRESS + INIT. */ 5256 unsigned int alignment = drb->base_alignment; 5257 unsigned int misalignment = (drb->base_misalignment 5258 + TREE_INT_CST_LOW (drb->init)); 5259 if (misalignment != 0) 5260 alignment = MIN (alignment, misalignment & -misalignment); 5261 5262 /* Cap it to the alignment of OFFSET. */ 5263 if (!integer_zerop (drb->offset)) 5264 alignment = MIN (alignment, drb->offset_alignment); 5265 5266 /* Cap it to the alignment of STEP. */ 5267 if (!integer_zerop (drb->step)) 5268 alignment = MIN (alignment, drb->step_alignment); 5269 5270 return alignment; 5271 } 5272 5273 /* If BASE is a pointer-typed SSA name, try to find the object that it 5274 is based on. Return this object X on success and store the alignment 5275 in bytes of BASE - &X in *ALIGNMENT_OUT. */ 5276 5277 static tree 5278 get_base_for_alignment_1 (tree base, unsigned int *alignment_out) 5279 { 5280 if (TREE_CODE (base) != SSA_NAME || !POINTER_TYPE_P (TREE_TYPE (base))) 5281 return NULL_TREE; 5282 5283 gimple *def = SSA_NAME_DEF_STMT (base); 5284 base = analyze_scalar_evolution (loop_containing_stmt (def), base); 5285 5286 /* Peel chrecs and record the minimum alignment preserved by 5287 all steps. */ 5288 unsigned int alignment = MAX_OFILE_ALIGNMENT / BITS_PER_UNIT; 5289 while (TREE_CODE (base) == POLYNOMIAL_CHREC) 5290 { 5291 unsigned int step_alignment = highest_pow2_factor (CHREC_RIGHT (base)); 5292 alignment = MIN (alignment, step_alignment); 5293 base = CHREC_LEFT (base); 5294 } 5295 5296 /* Punt if the expression is too complicated to handle. */ 5297 if (tree_contains_chrecs (base, NULL) || !POINTER_TYPE_P (TREE_TYPE (base))) 5298 return NULL_TREE; 5299 5300 /* The only useful cases are those for which a dereference folds to something 5301 other than an INDIRECT_REF. */ 5302 tree ref_type = TREE_TYPE (TREE_TYPE (base)); 5303 tree ref = fold_indirect_ref_1 (UNKNOWN_LOCATION, ref_type, base); 5304 if (!ref) 5305 return NULL_TREE; 5306 5307 /* Analyze the base to which the steps we peeled were applied. */ 5308 poly_int64 bitsize, bitpos, bytepos; 5309 machine_mode mode; 5310 int unsignedp, reversep, volatilep; 5311 tree offset; 5312 base = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode, 5313 &unsignedp, &reversep, &volatilep); 5314 if (!base || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)) 5315 return NULL_TREE; 5316 5317 /* Restrict the alignment to that guaranteed by the offsets. */ 5318 unsigned int bytepos_alignment = known_alignment (bytepos); 5319 if (bytepos_alignment != 0) 5320 alignment = MIN (alignment, bytepos_alignment); 5321 if (offset) 5322 { 5323 unsigned int offset_alignment = highest_pow2_factor (offset); 5324 alignment = MIN (alignment, offset_alignment); 5325 } 5326 5327 *alignment_out = alignment; 5328 return base; 5329 } 5330 5331 /* Return the object whose alignment would need to be changed in order 5332 to increase the alignment of ADDR. Store the maximum achievable 5333 alignment in *MAX_ALIGNMENT. */ 5334 5335 tree 5336 get_base_for_alignment (tree addr, unsigned int *max_alignment) 5337 { 5338 tree base = get_base_for_alignment_1 (addr, max_alignment); 5339 if (base) 5340 return base; 5341 5342 if (TREE_CODE (addr) == ADDR_EXPR) 5343 addr = TREE_OPERAND (addr, 0); 5344 *max_alignment = MAX_OFILE_ALIGNMENT / BITS_PER_UNIT; 5345 return addr; 5346 } 5347 5348 /* Recursive helper function. */ 5349 5350 static bool 5351 find_loop_nest_1 (struct loop *loop, vec<loop_p> *loop_nest) 5352 { 5353 /* Inner loops of the nest should not contain siblings. Example: 5354 when there are two consecutive loops, 5355 5356 | loop_0 5357 | loop_1 5358 | A[{0, +, 1}_1] 5359 | endloop_1 5360 | loop_2 5361 | A[{0, +, 1}_2] 5362 | endloop_2 5363 | endloop_0 5364 5365 the dependence relation cannot be captured by the distance 5366 abstraction. */ 5367 if (loop->next) 5368 return false; 5369 5370 loop_nest->safe_push (loop); 5371 if (loop->inner) 5372 return find_loop_nest_1 (loop->inner, loop_nest); 5373 return true; 5374 } 5375 5376 /* Return false when the LOOP is not well nested. Otherwise return 5377 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will 5378 contain the loops from the outermost to the innermost, as they will 5379 appear in the classic distance vector. */ 5380 5381 bool 5382 find_loop_nest (struct loop *loop, vec<loop_p> *loop_nest) 5383 { 5384 loop_nest->safe_push (loop); 5385 if (loop->inner) 5386 return find_loop_nest_1 (loop->inner, loop_nest); 5387 return true; 5388 } 5389 5390 /* Returns true when the data dependences have been computed, false otherwise. 5391 Given a loop nest LOOP, the following vectors are returned: 5392 DATAREFS is initialized to all the array elements contained in this loop, 5393 DEPENDENCE_RELATIONS contains the relations between the data references. 5394 Compute read-read and self relations if 5395 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */ 5396 5397 bool 5398 compute_data_dependences_for_loop (struct loop *loop, 5399 bool compute_self_and_read_read_dependences, 5400 vec<loop_p> *loop_nest, 5401 vec<data_reference_p> *datarefs, 5402 vec<ddr_p> *dependence_relations) 5403 { 5404 bool res = true; 5405 5406 memset (&dependence_stats, 0, sizeof (dependence_stats)); 5407 5408 /* If the loop nest is not well formed, or one of the data references 5409 is not computable, give up without spending time to compute other 5410 dependences. */ 5411 if (!loop 5412 || !find_loop_nest (loop, loop_nest) 5413 || find_data_references_in_loop (loop, datarefs) == chrec_dont_know 5414 || !compute_all_dependences (*datarefs, dependence_relations, *loop_nest, 5415 compute_self_and_read_read_dependences)) 5416 res = false; 5417 5418 if (dump_file && (dump_flags & TDF_STATS)) 5419 { 5420 fprintf (dump_file, "Dependence tester statistics:\n"); 5421 5422 fprintf (dump_file, "Number of dependence tests: %d\n", 5423 dependence_stats.num_dependence_tests); 5424 fprintf (dump_file, "Number of dependence tests classified dependent: %d\n", 5425 dependence_stats.num_dependence_dependent); 5426 fprintf (dump_file, "Number of dependence tests classified independent: %d\n", 5427 dependence_stats.num_dependence_independent); 5428 fprintf (dump_file, "Number of undetermined dependence tests: %d\n", 5429 dependence_stats.num_dependence_undetermined); 5430 5431 fprintf (dump_file, "Number of subscript tests: %d\n", 5432 dependence_stats.num_subscript_tests); 5433 fprintf (dump_file, "Number of undetermined subscript tests: %d\n", 5434 dependence_stats.num_subscript_undetermined); 5435 fprintf (dump_file, "Number of same subscript function: %d\n", 5436 dependence_stats.num_same_subscript_function); 5437 5438 fprintf (dump_file, "Number of ziv tests: %d\n", 5439 dependence_stats.num_ziv); 5440 fprintf (dump_file, "Number of ziv tests returning dependent: %d\n", 5441 dependence_stats.num_ziv_dependent); 5442 fprintf (dump_file, "Number of ziv tests returning independent: %d\n", 5443 dependence_stats.num_ziv_independent); 5444 fprintf (dump_file, "Number of ziv tests unimplemented: %d\n", 5445 dependence_stats.num_ziv_unimplemented); 5446 5447 fprintf (dump_file, "Number of siv tests: %d\n", 5448 dependence_stats.num_siv); 5449 fprintf (dump_file, "Number of siv tests returning dependent: %d\n", 5450 dependence_stats.num_siv_dependent); 5451 fprintf (dump_file, "Number of siv tests returning independent: %d\n", 5452 dependence_stats.num_siv_independent); 5453 fprintf (dump_file, "Number of siv tests unimplemented: %d\n", 5454 dependence_stats.num_siv_unimplemented); 5455 5456 fprintf (dump_file, "Number of miv tests: %d\n", 5457 dependence_stats.num_miv); 5458 fprintf (dump_file, "Number of miv tests returning dependent: %d\n", 5459 dependence_stats.num_miv_dependent); 5460 fprintf (dump_file, "Number of miv tests returning independent: %d\n", 5461 dependence_stats.num_miv_independent); 5462 fprintf (dump_file, "Number of miv tests unimplemented: %d\n", 5463 dependence_stats.num_miv_unimplemented); 5464 } 5465 5466 return res; 5467 } 5468 5469 /* Free the memory used by a data dependence relation DDR. */ 5470 5471 void 5472 free_dependence_relation (struct data_dependence_relation *ddr) 5473 { 5474 if (ddr == NULL) 5475 return; 5476 5477 if (DDR_SUBSCRIPTS (ddr).exists ()) 5478 free_subscripts (DDR_SUBSCRIPTS (ddr)); 5479 DDR_DIST_VECTS (ddr).release (); 5480 DDR_DIR_VECTS (ddr).release (); 5481 5482 free (ddr); 5483 } 5484 5485 /* Free the memory used by the data dependence relations from 5486 DEPENDENCE_RELATIONS. */ 5487 5488 void 5489 free_dependence_relations (vec<ddr_p> dependence_relations) 5490 { 5491 unsigned int i; 5492 struct data_dependence_relation *ddr; 5493 5494 FOR_EACH_VEC_ELT (dependence_relations, i, ddr) 5495 if (ddr) 5496 free_dependence_relation (ddr); 5497 5498 dependence_relations.release (); 5499 } 5500 5501 /* Free the memory used by the data references from DATAREFS. */ 5502 5503 void 5504 free_data_refs (vec<data_reference_p> datarefs) 5505 { 5506 unsigned int i; 5507 struct data_reference *dr; 5508 5509 FOR_EACH_VEC_ELT (datarefs, i, dr) 5510 free_data_ref (dr); 5511 datarefs.release (); 5512 } 5513 5514 /* Common routine implementing both dr_direction_indicator and 5515 dr_zero_step_indicator. Return USEFUL_MIN if the indicator is known 5516 to be >= USEFUL_MIN and -1 if the indicator is known to be negative. 5517 Return the step as the indicator otherwise. */ 5518 5519 static tree 5520 dr_step_indicator (struct data_reference *dr, int useful_min) 5521 { 5522 tree step = DR_STEP (dr); 5523 if (!step) 5524 return NULL_TREE; 5525 STRIP_NOPS (step); 5526 /* Look for cases where the step is scaled by a positive constant 5527 integer, which will often be the access size. If the multiplication 5528 doesn't change the sign (due to overflow effects) then we can 5529 test the unscaled value instead. */ 5530 if (TREE_CODE (step) == MULT_EXPR 5531 && TREE_CODE (TREE_OPERAND (step, 1)) == INTEGER_CST 5532 && tree_int_cst_sgn (TREE_OPERAND (step, 1)) > 0) 5533 { 5534 tree factor = TREE_OPERAND (step, 1); 5535 step = TREE_OPERAND (step, 0); 5536 5537 /* Strip widening and truncating conversions as well as nops. */ 5538 if (CONVERT_EXPR_P (step) 5539 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (step, 0)))) 5540 step = TREE_OPERAND (step, 0); 5541 tree type = TREE_TYPE (step); 5542 5543 /* Get the range of step values that would not cause overflow. */ 5544 widest_int minv = (wi::to_widest (TYPE_MIN_VALUE (ssizetype)) 5545 / wi::to_widest (factor)); 5546 widest_int maxv = (wi::to_widest (TYPE_MAX_VALUE (ssizetype)) 5547 / wi::to_widest (factor)); 5548 5549 /* Get the range of values that the unconverted step actually has. */ 5550 wide_int step_min, step_max; 5551 if (TREE_CODE (step) != SSA_NAME 5552 || get_range_info (step, &step_min, &step_max) != VR_RANGE) 5553 { 5554 step_min = wi::to_wide (TYPE_MIN_VALUE (type)); 5555 step_max = wi::to_wide (TYPE_MAX_VALUE (type)); 5556 } 5557 5558 /* Check whether the unconverted step has an acceptable range. */ 5559 signop sgn = TYPE_SIGN (type); 5560 if (wi::les_p (minv, widest_int::from (step_min, sgn)) 5561 && wi::ges_p (maxv, widest_int::from (step_max, sgn))) 5562 { 5563 if (wi::ge_p (step_min, useful_min, sgn)) 5564 return ssize_int (useful_min); 5565 else if (wi::lt_p (step_max, 0, sgn)) 5566 return ssize_int (-1); 5567 else 5568 return fold_convert (ssizetype, step); 5569 } 5570 } 5571 return DR_STEP (dr); 5572 } 5573 5574 /* Return a value that is negative iff DR has a negative step. */ 5575 5576 tree 5577 dr_direction_indicator (struct data_reference *dr) 5578 { 5579 return dr_step_indicator (dr, 0); 5580 } 5581 5582 /* Return a value that is zero iff DR has a zero step. */ 5583 5584 tree 5585 dr_zero_step_indicator (struct data_reference *dr) 5586 { 5587 return dr_step_indicator (dr, 1); 5588 } 5589 5590 /* Return true if DR is known to have a nonnegative (but possibly zero) 5591 step. */ 5592 5593 bool 5594 dr_known_forward_stride_p (struct data_reference *dr) 5595 { 5596 tree indicator = dr_direction_indicator (dr); 5597 tree neg_step_val = fold_binary (LT_EXPR, boolean_type_node, 5598 fold_convert (ssizetype, indicator), 5599 ssize_int (0)); 5600 return neg_step_val && integer_zerop (neg_step_val); 5601 } 5602