1 /* Data references and dependences detectors. 2 Copyright (C) 2003-2017 Free Software Foundation, Inc. 3 Contributed by Sebastian Pop <pop@cri.ensmp.fr> 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it under 8 the terms of the GNU General Public License as published by the Free 9 Software Foundation; either version 3, or (at your option) any later 10 version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 /* This pass walks a given loop structure searching for array 22 references. The information about the array accesses is recorded 23 in DATA_REFERENCE structures. 24 25 The basic test for determining the dependences is: 26 given two access functions chrec1 and chrec2 to a same array, and 27 x and y two vectors from the iteration domain, the same element of 28 the array is accessed twice at iterations x and y if and only if: 29 | chrec1 (x) == chrec2 (y). 30 31 The goals of this analysis are: 32 33 - to determine the independence: the relation between two 34 independent accesses is qualified with the chrec_known (this 35 information allows a loop parallelization), 36 37 - when two data references access the same data, to qualify the 38 dependence relation with classic dependence representations: 39 40 - distance vectors 41 - direction vectors 42 - loop carried level dependence 43 - polyhedron dependence 44 or with the chains of recurrences based representation, 45 46 - to define a knowledge base for storing the data dependence 47 information, 48 49 - to define an interface to access this data. 50 51 52 Definitions: 53 54 - subscript: given two array accesses a subscript is the tuple 55 composed of the access functions for a given dimension. Example: 56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts: 57 (f1, g1), (f2, g2), (f3, g3). 58 59 - Diophantine equation: an equation whose coefficients and 60 solutions are integer constants, for example the equation 61 | 3*x + 2*y = 1 62 has an integer solution x = 1 and y = -1. 63 64 References: 65 66 - "Advanced Compilation for High Performance Computing" by Randy 67 Allen and Ken Kennedy. 68 http://citeseer.ist.psu.edu/goff91practical.html 69 70 - "Loop Transformations for Restructuring Compilers - The Foundations" 71 by Utpal Banerjee. 72 73 74 */ 75 76 #include "config.h" 77 #include "system.h" 78 #include "coretypes.h" 79 #include "backend.h" 80 #include "rtl.h" 81 #include "tree.h" 82 #include "gimple.h" 83 #include "gimple-pretty-print.h" 84 #include "alias.h" 85 #include "fold-const.h" 86 #include "expr.h" 87 #include "gimple-iterator.h" 88 #include "tree-ssa-loop-niter.h" 89 #include "tree-ssa-loop.h" 90 #include "tree-ssa.h" 91 #include "cfgloop.h" 92 #include "tree-data-ref.h" 93 #include "tree-scalar-evolution.h" 94 #include "dumpfile.h" 95 #include "tree-affine.h" 96 #include "params.h" 97 98 static struct datadep_stats 99 { 100 int num_dependence_tests; 101 int num_dependence_dependent; 102 int num_dependence_independent; 103 int num_dependence_undetermined; 104 105 int num_subscript_tests; 106 int num_subscript_undetermined; 107 int num_same_subscript_function; 108 109 int num_ziv; 110 int num_ziv_independent; 111 int num_ziv_dependent; 112 int num_ziv_unimplemented; 113 114 int num_siv; 115 int num_siv_independent; 116 int num_siv_dependent; 117 int num_siv_unimplemented; 118 119 int num_miv; 120 int num_miv_independent; 121 int num_miv_dependent; 122 int num_miv_unimplemented; 123 } dependence_stats; 124 125 static bool subscript_dependence_tester_1 (struct data_dependence_relation *, 126 struct data_reference *, 127 struct data_reference *, 128 struct loop *); 129 /* Returns true iff A divides B. */ 130 131 static inline bool 132 tree_fold_divides_p (const_tree a, const_tree b) 133 { 134 gcc_assert (TREE_CODE (a) == INTEGER_CST); 135 gcc_assert (TREE_CODE (b) == INTEGER_CST); 136 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR, b, a)); 137 } 138 139 /* Returns true iff A divides B. */ 140 141 static inline bool 142 int_divides_p (int a, int b) 143 { 144 return ((b % a) == 0); 145 } 146 147 148 149 /* Dump into FILE all the data references from DATAREFS. */ 150 151 static void 152 dump_data_references (FILE *file, vec<data_reference_p> datarefs) 153 { 154 unsigned int i; 155 struct data_reference *dr; 156 157 FOR_EACH_VEC_ELT (datarefs, i, dr) 158 dump_data_reference (file, dr); 159 } 160 161 /* Unified dump into FILE all the data references from DATAREFS. */ 162 163 DEBUG_FUNCTION void 164 debug (vec<data_reference_p> &ref) 165 { 166 dump_data_references (stderr, ref); 167 } 168 169 DEBUG_FUNCTION void 170 debug (vec<data_reference_p> *ptr) 171 { 172 if (ptr) 173 debug (*ptr); 174 else 175 fprintf (stderr, "<nil>\n"); 176 } 177 178 179 /* Dump into STDERR all the data references from DATAREFS. */ 180 181 DEBUG_FUNCTION void 182 debug_data_references (vec<data_reference_p> datarefs) 183 { 184 dump_data_references (stderr, datarefs); 185 } 186 187 /* Print to STDERR the data_reference DR. */ 188 189 DEBUG_FUNCTION void 190 debug_data_reference (struct data_reference *dr) 191 { 192 dump_data_reference (stderr, dr); 193 } 194 195 /* Dump function for a DATA_REFERENCE structure. */ 196 197 void 198 dump_data_reference (FILE *outf, 199 struct data_reference *dr) 200 { 201 unsigned int i; 202 203 fprintf (outf, "#(Data Ref: \n"); 204 fprintf (outf, "# bb: %d \n", gimple_bb (DR_STMT (dr))->index); 205 fprintf (outf, "# stmt: "); 206 print_gimple_stmt (outf, DR_STMT (dr), 0, 0); 207 fprintf (outf, "# ref: "); 208 print_generic_stmt (outf, DR_REF (dr), 0); 209 fprintf (outf, "# base_object: "); 210 print_generic_stmt (outf, DR_BASE_OBJECT (dr), 0); 211 212 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++) 213 { 214 fprintf (outf, "# Access function %d: ", i); 215 print_generic_stmt (outf, DR_ACCESS_FN (dr, i), 0); 216 } 217 fprintf (outf, "#)\n"); 218 } 219 220 /* Unified dump function for a DATA_REFERENCE structure. */ 221 222 DEBUG_FUNCTION void 223 debug (data_reference &ref) 224 { 225 dump_data_reference (stderr, &ref); 226 } 227 228 DEBUG_FUNCTION void 229 debug (data_reference *ptr) 230 { 231 if (ptr) 232 debug (*ptr); 233 else 234 fprintf (stderr, "<nil>\n"); 235 } 236 237 238 /* Dumps the affine function described by FN to the file OUTF. */ 239 240 DEBUG_FUNCTION void 241 dump_affine_function (FILE *outf, affine_fn fn) 242 { 243 unsigned i; 244 tree coef; 245 246 print_generic_expr (outf, fn[0], TDF_SLIM); 247 for (i = 1; fn.iterate (i, &coef); i++) 248 { 249 fprintf (outf, " + "); 250 print_generic_expr (outf, coef, TDF_SLIM); 251 fprintf (outf, " * x_%u", i); 252 } 253 } 254 255 /* Dumps the conflict function CF to the file OUTF. */ 256 257 DEBUG_FUNCTION void 258 dump_conflict_function (FILE *outf, conflict_function *cf) 259 { 260 unsigned i; 261 262 if (cf->n == NO_DEPENDENCE) 263 fprintf (outf, "no dependence"); 264 else if (cf->n == NOT_KNOWN) 265 fprintf (outf, "not known"); 266 else 267 { 268 for (i = 0; i < cf->n; i++) 269 { 270 if (i != 0) 271 fprintf (outf, " "); 272 fprintf (outf, "["); 273 dump_affine_function (outf, cf->fns[i]); 274 fprintf (outf, "]"); 275 } 276 } 277 } 278 279 /* Dump function for a SUBSCRIPT structure. */ 280 281 DEBUG_FUNCTION void 282 dump_subscript (FILE *outf, struct subscript *subscript) 283 { 284 conflict_function *cf = SUB_CONFLICTS_IN_A (subscript); 285 286 fprintf (outf, "\n (subscript \n"); 287 fprintf (outf, " iterations_that_access_an_element_twice_in_A: "); 288 dump_conflict_function (outf, cf); 289 if (CF_NONTRIVIAL_P (cf)) 290 { 291 tree last_iteration = SUB_LAST_CONFLICT (subscript); 292 fprintf (outf, "\n last_conflict: "); 293 print_generic_expr (outf, last_iteration, 0); 294 } 295 296 cf = SUB_CONFLICTS_IN_B (subscript); 297 fprintf (outf, "\n iterations_that_access_an_element_twice_in_B: "); 298 dump_conflict_function (outf, cf); 299 if (CF_NONTRIVIAL_P (cf)) 300 { 301 tree last_iteration = SUB_LAST_CONFLICT (subscript); 302 fprintf (outf, "\n last_conflict: "); 303 print_generic_expr (outf, last_iteration, 0); 304 } 305 306 fprintf (outf, "\n (Subscript distance: "); 307 print_generic_expr (outf, SUB_DISTANCE (subscript), 0); 308 fprintf (outf, " ))\n"); 309 } 310 311 /* Print the classic direction vector DIRV to OUTF. */ 312 313 DEBUG_FUNCTION void 314 print_direction_vector (FILE *outf, 315 lambda_vector dirv, 316 int length) 317 { 318 int eq; 319 320 for (eq = 0; eq < length; eq++) 321 { 322 enum data_dependence_direction dir = ((enum data_dependence_direction) 323 dirv[eq]); 324 325 switch (dir) 326 { 327 case dir_positive: 328 fprintf (outf, " +"); 329 break; 330 case dir_negative: 331 fprintf (outf, " -"); 332 break; 333 case dir_equal: 334 fprintf (outf, " ="); 335 break; 336 case dir_positive_or_equal: 337 fprintf (outf, " +="); 338 break; 339 case dir_positive_or_negative: 340 fprintf (outf, " +-"); 341 break; 342 case dir_negative_or_equal: 343 fprintf (outf, " -="); 344 break; 345 case dir_star: 346 fprintf (outf, " *"); 347 break; 348 default: 349 fprintf (outf, "indep"); 350 break; 351 } 352 } 353 fprintf (outf, "\n"); 354 } 355 356 /* Print a vector of direction vectors. */ 357 358 DEBUG_FUNCTION void 359 print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects, 360 int length) 361 { 362 unsigned j; 363 lambda_vector v; 364 365 FOR_EACH_VEC_ELT (dir_vects, j, v) 366 print_direction_vector (outf, v, length); 367 } 368 369 /* Print out a vector VEC of length N to OUTFILE. */ 370 371 DEBUG_FUNCTION void 372 print_lambda_vector (FILE * outfile, lambda_vector vector, int n) 373 { 374 int i; 375 376 for (i = 0; i < n; i++) 377 fprintf (outfile, "%3d ", vector[i]); 378 fprintf (outfile, "\n"); 379 } 380 381 /* Print a vector of distance vectors. */ 382 383 DEBUG_FUNCTION void 384 print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects, 385 int length) 386 { 387 unsigned j; 388 lambda_vector v; 389 390 FOR_EACH_VEC_ELT (dist_vects, j, v) 391 print_lambda_vector (outf, v, length); 392 } 393 394 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */ 395 396 DEBUG_FUNCTION void 397 dump_data_dependence_relation (FILE *outf, 398 struct data_dependence_relation *ddr) 399 { 400 struct data_reference *dra, *drb; 401 402 fprintf (outf, "(Data Dep: \n"); 403 404 if (!ddr || DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) 405 { 406 if (ddr) 407 { 408 dra = DDR_A (ddr); 409 drb = DDR_B (ddr); 410 if (dra) 411 dump_data_reference (outf, dra); 412 else 413 fprintf (outf, " (nil)\n"); 414 if (drb) 415 dump_data_reference (outf, drb); 416 else 417 fprintf (outf, " (nil)\n"); 418 } 419 fprintf (outf, " (don't know)\n)\n"); 420 return; 421 } 422 423 dra = DDR_A (ddr); 424 drb = DDR_B (ddr); 425 dump_data_reference (outf, dra); 426 dump_data_reference (outf, drb); 427 428 if (DDR_ARE_DEPENDENT (ddr) == chrec_known) 429 fprintf (outf, " (no dependence)\n"); 430 431 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE) 432 { 433 unsigned int i; 434 struct loop *loopi; 435 436 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 437 { 438 fprintf (outf, " access_fn_A: "); 439 print_generic_stmt (outf, DR_ACCESS_FN (dra, i), 0); 440 fprintf (outf, " access_fn_B: "); 441 print_generic_stmt (outf, DR_ACCESS_FN (drb, i), 0); 442 dump_subscript (outf, DDR_SUBSCRIPT (ddr, i)); 443 } 444 445 fprintf (outf, " inner loop index: %d\n", DDR_INNER_LOOP (ddr)); 446 fprintf (outf, " loop nest: ("); 447 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr), i, loopi) 448 fprintf (outf, "%d ", loopi->num); 449 fprintf (outf, ")\n"); 450 451 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++) 452 { 453 fprintf (outf, " distance_vector: "); 454 print_lambda_vector (outf, DDR_DIST_VECT (ddr, i), 455 DDR_NB_LOOPS (ddr)); 456 } 457 458 for (i = 0; i < DDR_NUM_DIR_VECTS (ddr); i++) 459 { 460 fprintf (outf, " direction_vector: "); 461 print_direction_vector (outf, DDR_DIR_VECT (ddr, i), 462 DDR_NB_LOOPS (ddr)); 463 } 464 } 465 466 fprintf (outf, ")\n"); 467 } 468 469 /* Debug version. */ 470 471 DEBUG_FUNCTION void 472 debug_data_dependence_relation (struct data_dependence_relation *ddr) 473 { 474 dump_data_dependence_relation (stderr, ddr); 475 } 476 477 /* Dump into FILE all the dependence relations from DDRS. */ 478 479 DEBUG_FUNCTION void 480 dump_data_dependence_relations (FILE *file, 481 vec<ddr_p> ddrs) 482 { 483 unsigned int i; 484 struct data_dependence_relation *ddr; 485 486 FOR_EACH_VEC_ELT (ddrs, i, ddr) 487 dump_data_dependence_relation (file, ddr); 488 } 489 490 DEBUG_FUNCTION void 491 debug (vec<ddr_p> &ref) 492 { 493 dump_data_dependence_relations (stderr, ref); 494 } 495 496 DEBUG_FUNCTION void 497 debug (vec<ddr_p> *ptr) 498 { 499 if (ptr) 500 debug (*ptr); 501 else 502 fprintf (stderr, "<nil>\n"); 503 } 504 505 506 /* Dump to STDERR all the dependence relations from DDRS. */ 507 508 DEBUG_FUNCTION void 509 debug_data_dependence_relations (vec<ddr_p> ddrs) 510 { 511 dump_data_dependence_relations (stderr, ddrs); 512 } 513 514 /* Dumps the distance and direction vectors in FILE. DDRS contains 515 the dependence relations, and VECT_SIZE is the size of the 516 dependence vectors, or in other words the number of loops in the 517 considered nest. */ 518 519 DEBUG_FUNCTION void 520 dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs) 521 { 522 unsigned int i, j; 523 struct data_dependence_relation *ddr; 524 lambda_vector v; 525 526 FOR_EACH_VEC_ELT (ddrs, i, ddr) 527 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr)) 528 { 529 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v) 530 { 531 fprintf (file, "DISTANCE_V ("); 532 print_lambda_vector (file, v, DDR_NB_LOOPS (ddr)); 533 fprintf (file, ")\n"); 534 } 535 536 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v) 537 { 538 fprintf (file, "DIRECTION_V ("); 539 print_direction_vector (file, v, DDR_NB_LOOPS (ddr)); 540 fprintf (file, ")\n"); 541 } 542 } 543 544 fprintf (file, "\n\n"); 545 } 546 547 /* Dumps the data dependence relations DDRS in FILE. */ 548 549 DEBUG_FUNCTION void 550 dump_ddrs (FILE *file, vec<ddr_p> ddrs) 551 { 552 unsigned int i; 553 struct data_dependence_relation *ddr; 554 555 FOR_EACH_VEC_ELT (ddrs, i, ddr) 556 dump_data_dependence_relation (file, ddr); 557 558 fprintf (file, "\n\n"); 559 } 560 561 DEBUG_FUNCTION void 562 debug_ddrs (vec<ddr_p> ddrs) 563 { 564 dump_ddrs (stderr, ddrs); 565 } 566 567 /* Helper function for split_constant_offset. Expresses OP0 CODE OP1 568 (the type of the result is TYPE) as VAR + OFF, where OFF is a nonzero 569 constant of type ssizetype, and returns true. If we cannot do this 570 with OFF nonzero, OFF and VAR are set to NULL_TREE instead and false 571 is returned. */ 572 573 static bool 574 split_constant_offset_1 (tree type, tree op0, enum tree_code code, tree op1, 575 tree *var, tree *off) 576 { 577 tree var0, var1; 578 tree off0, off1; 579 enum tree_code ocode = code; 580 581 *var = NULL_TREE; 582 *off = NULL_TREE; 583 584 switch (code) 585 { 586 case INTEGER_CST: 587 *var = build_int_cst (type, 0); 588 *off = fold_convert (ssizetype, op0); 589 return true; 590 591 case POINTER_PLUS_EXPR: 592 ocode = PLUS_EXPR; 593 /* FALLTHROUGH */ 594 case PLUS_EXPR: 595 case MINUS_EXPR: 596 split_constant_offset (op0, &var0, &off0); 597 split_constant_offset (op1, &var1, &off1); 598 *var = fold_build2 (code, type, var0, var1); 599 *off = size_binop (ocode, off0, off1); 600 return true; 601 602 case MULT_EXPR: 603 if (TREE_CODE (op1) != INTEGER_CST) 604 return false; 605 606 split_constant_offset (op0, &var0, &off0); 607 *var = fold_build2 (MULT_EXPR, type, var0, op1); 608 *off = size_binop (MULT_EXPR, off0, fold_convert (ssizetype, op1)); 609 return true; 610 611 case ADDR_EXPR: 612 { 613 tree base, poffset; 614 HOST_WIDE_INT pbitsize, pbitpos; 615 machine_mode pmode; 616 int punsignedp, preversep, pvolatilep; 617 618 op0 = TREE_OPERAND (op0, 0); 619 base 620 = get_inner_reference (op0, &pbitsize, &pbitpos, &poffset, &pmode, 621 &punsignedp, &preversep, &pvolatilep); 622 623 if (pbitpos % BITS_PER_UNIT != 0) 624 return false; 625 base = build_fold_addr_expr (base); 626 off0 = ssize_int (pbitpos / BITS_PER_UNIT); 627 628 if (poffset) 629 { 630 split_constant_offset (poffset, &poffset, &off1); 631 off0 = size_binop (PLUS_EXPR, off0, off1); 632 if (POINTER_TYPE_P (TREE_TYPE (base))) 633 base = fold_build_pointer_plus (base, poffset); 634 else 635 base = fold_build2 (PLUS_EXPR, TREE_TYPE (base), base, 636 fold_convert (TREE_TYPE (base), poffset)); 637 } 638 639 var0 = fold_convert (type, base); 640 641 /* If variable length types are involved, punt, otherwise casts 642 might be converted into ARRAY_REFs in gimplify_conversion. 643 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which 644 possibly no longer appears in current GIMPLE, might resurface. 645 This perhaps could run 646 if (CONVERT_EXPR_P (var0)) 647 { 648 gimplify_conversion (&var0); 649 // Attempt to fill in any within var0 found ARRAY_REF's 650 // element size from corresponding op embedded ARRAY_REF, 651 // if unsuccessful, just punt. 652 } */ 653 while (POINTER_TYPE_P (type)) 654 type = TREE_TYPE (type); 655 if (int_size_in_bytes (type) < 0) 656 return false; 657 658 *var = var0; 659 *off = off0; 660 return true; 661 } 662 663 case SSA_NAME: 664 { 665 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0)) 666 return false; 667 668 gimple *def_stmt = SSA_NAME_DEF_STMT (op0); 669 enum tree_code subcode; 670 671 if (gimple_code (def_stmt) != GIMPLE_ASSIGN) 672 return false; 673 674 var0 = gimple_assign_rhs1 (def_stmt); 675 subcode = gimple_assign_rhs_code (def_stmt); 676 var1 = gimple_assign_rhs2 (def_stmt); 677 678 return split_constant_offset_1 (type, var0, subcode, var1, var, off); 679 } 680 CASE_CONVERT: 681 { 682 /* We must not introduce undefined overflow, and we must not change the value. 683 Hence we're okay if the inner type doesn't overflow to start with 684 (pointer or signed), the outer type also is an integer or pointer 685 and the outer precision is at least as large as the inner. */ 686 tree itype = TREE_TYPE (op0); 687 if ((POINTER_TYPE_P (itype) 688 || (INTEGRAL_TYPE_P (itype) && TYPE_OVERFLOW_UNDEFINED (itype))) 689 && TYPE_PRECISION (type) >= TYPE_PRECISION (itype) 690 && (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))) 691 { 692 split_constant_offset (op0, &var0, off); 693 *var = fold_convert (type, var0); 694 return true; 695 } 696 return false; 697 } 698 699 default: 700 return false; 701 } 702 } 703 704 /* Expresses EXP as VAR + OFF, where off is a constant. The type of OFF 705 will be ssizetype. */ 706 707 void 708 split_constant_offset (tree exp, tree *var, tree *off) 709 { 710 tree type = TREE_TYPE (exp), otype, op0, op1, e, o; 711 enum tree_code code; 712 713 *var = exp; 714 *off = ssize_int (0); 715 STRIP_NOPS (exp); 716 717 if (tree_is_chrec (exp) 718 || get_gimple_rhs_class (TREE_CODE (exp)) == GIMPLE_TERNARY_RHS) 719 return; 720 721 otype = TREE_TYPE (exp); 722 code = TREE_CODE (exp); 723 extract_ops_from_tree (exp, &code, &op0, &op1); 724 if (split_constant_offset_1 (otype, op0, code, op1, &e, &o)) 725 { 726 *var = fold_convert (type, e); 727 *off = o; 728 } 729 } 730 731 /* Returns the address ADDR of an object in a canonical shape (without nop 732 casts, and with type of pointer to the object). */ 733 734 static tree 735 canonicalize_base_object_address (tree addr) 736 { 737 tree orig = addr; 738 739 STRIP_NOPS (addr); 740 741 /* The base address may be obtained by casting from integer, in that case 742 keep the cast. */ 743 if (!POINTER_TYPE_P (TREE_TYPE (addr))) 744 return orig; 745 746 if (TREE_CODE (addr) != ADDR_EXPR) 747 return addr; 748 749 return build_fold_addr_expr (TREE_OPERAND (addr, 0)); 750 } 751 752 /* Analyzes the behavior of the memory reference DR in the innermost loop or 753 basic block that contains it. Returns true if analysis succeed or false 754 otherwise. */ 755 756 bool 757 dr_analyze_innermost (struct data_reference *dr, struct loop *nest) 758 { 759 gimple *stmt = DR_STMT (dr); 760 struct loop *loop = loop_containing_stmt (stmt); 761 tree ref = DR_REF (dr); 762 HOST_WIDE_INT pbitsize, pbitpos; 763 tree base, poffset; 764 machine_mode pmode; 765 int punsignedp, preversep, pvolatilep; 766 affine_iv base_iv, offset_iv; 767 tree init, dinit, step; 768 bool in_loop = (loop && loop->num); 769 770 if (dump_file && (dump_flags & TDF_DETAILS)) 771 fprintf (dump_file, "analyze_innermost: "); 772 773 base = get_inner_reference (ref, &pbitsize, &pbitpos, &poffset, &pmode, 774 &punsignedp, &preversep, &pvolatilep); 775 gcc_assert (base != NULL_TREE); 776 777 if (pbitpos % BITS_PER_UNIT != 0) 778 { 779 if (dump_file && (dump_flags & TDF_DETAILS)) 780 fprintf (dump_file, "failed: bit offset alignment.\n"); 781 return false; 782 } 783 784 if (preversep) 785 { 786 if (dump_file && (dump_flags & TDF_DETAILS)) 787 fprintf (dump_file, "failed: reverse storage order.\n"); 788 return false; 789 } 790 791 if (TREE_CODE (base) == MEM_REF) 792 { 793 if (!integer_zerop (TREE_OPERAND (base, 1))) 794 { 795 offset_int moff = mem_ref_offset (base); 796 tree mofft = wide_int_to_tree (sizetype, moff); 797 if (!poffset) 798 poffset = mofft; 799 else 800 poffset = size_binop (PLUS_EXPR, poffset, mofft); 801 } 802 base = TREE_OPERAND (base, 0); 803 } 804 else 805 base = build_fold_addr_expr (base); 806 807 if (in_loop) 808 { 809 if (!simple_iv (loop, loop_containing_stmt (stmt), base, &base_iv, 810 nest ? true : false)) 811 { 812 if (nest) 813 { 814 if (dump_file && (dump_flags & TDF_DETAILS)) 815 fprintf (dump_file, "failed: evolution of base is not" 816 " affine.\n"); 817 return false; 818 } 819 else 820 { 821 base_iv.base = base; 822 base_iv.step = ssize_int (0); 823 base_iv.no_overflow = true; 824 } 825 } 826 } 827 else 828 { 829 base_iv.base = base; 830 base_iv.step = ssize_int (0); 831 base_iv.no_overflow = true; 832 } 833 834 if (!poffset) 835 { 836 offset_iv.base = ssize_int (0); 837 offset_iv.step = ssize_int (0); 838 } 839 else 840 { 841 if (!in_loop) 842 { 843 offset_iv.base = poffset; 844 offset_iv.step = ssize_int (0); 845 } 846 else if (!simple_iv (loop, loop_containing_stmt (stmt), 847 poffset, &offset_iv, 848 nest ? true : false)) 849 { 850 if (nest) 851 { 852 if (dump_file && (dump_flags & TDF_DETAILS)) 853 fprintf (dump_file, "failed: evolution of offset is not" 854 " affine.\n"); 855 return false; 856 } 857 else 858 { 859 offset_iv.base = poffset; 860 offset_iv.step = ssize_int (0); 861 } 862 } 863 } 864 865 init = ssize_int (pbitpos / BITS_PER_UNIT); 866 split_constant_offset (base_iv.base, &base_iv.base, &dinit); 867 init = size_binop (PLUS_EXPR, init, dinit); 868 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit); 869 init = size_binop (PLUS_EXPR, init, dinit); 870 871 step = size_binop (PLUS_EXPR, 872 fold_convert (ssizetype, base_iv.step), 873 fold_convert (ssizetype, offset_iv.step)); 874 875 DR_BASE_ADDRESS (dr) = canonicalize_base_object_address (base_iv.base); 876 877 DR_OFFSET (dr) = fold_convert (ssizetype, offset_iv.base); 878 DR_INIT (dr) = init; 879 DR_STEP (dr) = step; 880 881 DR_ALIGNED_TO (dr) = size_int (highest_pow2_factor (offset_iv.base)); 882 883 if (dump_file && (dump_flags & TDF_DETAILS)) 884 fprintf (dump_file, "success.\n"); 885 886 return true; 887 } 888 889 /* Determines the base object and the list of indices of memory reference 890 DR, analyzed in LOOP and instantiated in loop nest NEST. */ 891 892 static void 893 dr_analyze_indices (struct data_reference *dr, loop_p nest, loop_p loop) 894 { 895 vec<tree> access_fns = vNULL; 896 tree ref, op; 897 tree base, off, access_fn; 898 basic_block before_loop; 899 900 /* If analyzing a basic-block there are no indices to analyze 901 and thus no access functions. */ 902 if (!nest) 903 { 904 DR_BASE_OBJECT (dr) = DR_REF (dr); 905 DR_ACCESS_FNS (dr).create (0); 906 return; 907 } 908 909 ref = DR_REF (dr); 910 before_loop = block_before_loop (nest); 911 912 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses 913 into a two element array with a constant index. The base is 914 then just the immediate underlying object. */ 915 if (TREE_CODE (ref) == REALPART_EXPR) 916 { 917 ref = TREE_OPERAND (ref, 0); 918 access_fns.safe_push (integer_zero_node); 919 } 920 else if (TREE_CODE (ref) == IMAGPART_EXPR) 921 { 922 ref = TREE_OPERAND (ref, 0); 923 access_fns.safe_push (integer_one_node); 924 } 925 926 /* Analyze access functions of dimensions we know to be independent. */ 927 while (handled_component_p (ref)) 928 { 929 if (TREE_CODE (ref) == ARRAY_REF) 930 { 931 op = TREE_OPERAND (ref, 1); 932 access_fn = analyze_scalar_evolution (loop, op); 933 access_fn = instantiate_scev (before_loop, loop, access_fn); 934 access_fns.safe_push (access_fn); 935 } 936 else if (TREE_CODE (ref) == COMPONENT_REF 937 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == RECORD_TYPE) 938 { 939 /* For COMPONENT_REFs of records (but not unions!) use the 940 FIELD_DECL offset as constant access function so we can 941 disambiguate a[i].f1 and a[i].f2. */ 942 tree off = component_ref_field_offset (ref); 943 off = size_binop (PLUS_EXPR, 944 size_binop (MULT_EXPR, 945 fold_convert (bitsizetype, off), 946 bitsize_int (BITS_PER_UNIT)), 947 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1))); 948 access_fns.safe_push (off); 949 } 950 else 951 /* If we have an unhandled component we could not translate 952 to an access function stop analyzing. We have determined 953 our base object in this case. */ 954 break; 955 956 ref = TREE_OPERAND (ref, 0); 957 } 958 959 /* If the address operand of a MEM_REF base has an evolution in the 960 analyzed nest, add it as an additional independent access-function. */ 961 if (TREE_CODE (ref) == MEM_REF) 962 { 963 op = TREE_OPERAND (ref, 0); 964 access_fn = analyze_scalar_evolution (loop, op); 965 access_fn = instantiate_scev (before_loop, loop, access_fn); 966 if (TREE_CODE (access_fn) == POLYNOMIAL_CHREC) 967 { 968 tree orig_type; 969 tree memoff = TREE_OPERAND (ref, 1); 970 base = initial_condition (access_fn); 971 orig_type = TREE_TYPE (base); 972 STRIP_USELESS_TYPE_CONVERSION (base); 973 split_constant_offset (base, &base, &off); 974 STRIP_USELESS_TYPE_CONVERSION (base); 975 /* Fold the MEM_REF offset into the evolutions initial 976 value to make more bases comparable. */ 977 if (!integer_zerop (memoff)) 978 { 979 off = size_binop (PLUS_EXPR, off, 980 fold_convert (ssizetype, memoff)); 981 memoff = build_int_cst (TREE_TYPE (memoff), 0); 982 } 983 /* Adjust the offset so it is a multiple of the access type 984 size and thus we separate bases that can possibly be used 985 to produce partial overlaps (which the access_fn machinery 986 cannot handle). */ 987 wide_int rem; 988 if (TYPE_SIZE_UNIT (TREE_TYPE (ref)) 989 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST 990 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref)))) 991 rem = wi::mod_trunc (off, TYPE_SIZE_UNIT (TREE_TYPE (ref)), SIGNED); 992 else 993 /* If we can't compute the remainder simply force the initial 994 condition to zero. */ 995 rem = off; 996 off = wide_int_to_tree (ssizetype, wi::sub (off, rem)); 997 memoff = wide_int_to_tree (TREE_TYPE (memoff), rem); 998 /* And finally replace the initial condition. */ 999 access_fn = chrec_replace_initial_condition 1000 (access_fn, fold_convert (orig_type, off)); 1001 /* ??? This is still not a suitable base object for 1002 dr_may_alias_p - the base object needs to be an 1003 access that covers the object as whole. With 1004 an evolution in the pointer this cannot be 1005 guaranteed. 1006 As a band-aid, mark the access so we can special-case 1007 it in dr_may_alias_p. */ 1008 tree old = ref; 1009 ref = fold_build2_loc (EXPR_LOCATION (ref), 1010 MEM_REF, TREE_TYPE (ref), 1011 base, memoff); 1012 MR_DEPENDENCE_CLIQUE (ref) = MR_DEPENDENCE_CLIQUE (old); 1013 MR_DEPENDENCE_BASE (ref) = MR_DEPENDENCE_BASE (old); 1014 DR_UNCONSTRAINED_BASE (dr) = true; 1015 access_fns.safe_push (access_fn); 1016 } 1017 } 1018 else if (DECL_P (ref)) 1019 { 1020 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */ 1021 ref = build2 (MEM_REF, TREE_TYPE (ref), 1022 build_fold_addr_expr (ref), 1023 build_int_cst (reference_alias_ptr_type (ref), 0)); 1024 } 1025 1026 DR_BASE_OBJECT (dr) = ref; 1027 DR_ACCESS_FNS (dr) = access_fns; 1028 } 1029 1030 /* Extracts the alias analysis information from the memory reference DR. */ 1031 1032 static void 1033 dr_analyze_alias (struct data_reference *dr) 1034 { 1035 tree ref = DR_REF (dr); 1036 tree base = get_base_address (ref), addr; 1037 1038 if (INDIRECT_REF_P (base) 1039 || TREE_CODE (base) == MEM_REF) 1040 { 1041 addr = TREE_OPERAND (base, 0); 1042 if (TREE_CODE (addr) == SSA_NAME) 1043 DR_PTR_INFO (dr) = SSA_NAME_PTR_INFO (addr); 1044 } 1045 } 1046 1047 /* Frees data reference DR. */ 1048 1049 void 1050 free_data_ref (data_reference_p dr) 1051 { 1052 DR_ACCESS_FNS (dr).release (); 1053 free (dr); 1054 } 1055 1056 /* Analyzes memory reference MEMREF accessed in STMT. The reference 1057 is read if IS_READ is true, write otherwise. Returns the 1058 data_reference description of MEMREF. NEST is the outermost loop 1059 in which the reference should be instantiated, LOOP is the loop in 1060 which the data reference should be analyzed. */ 1061 1062 struct data_reference * 1063 create_data_ref (loop_p nest, loop_p loop, tree memref, gimple *stmt, 1064 bool is_read) 1065 { 1066 struct data_reference *dr; 1067 1068 if (dump_file && (dump_flags & TDF_DETAILS)) 1069 { 1070 fprintf (dump_file, "Creating dr for "); 1071 print_generic_expr (dump_file, memref, TDF_SLIM); 1072 fprintf (dump_file, "\n"); 1073 } 1074 1075 dr = XCNEW (struct data_reference); 1076 DR_STMT (dr) = stmt; 1077 DR_REF (dr) = memref; 1078 DR_IS_READ (dr) = is_read; 1079 1080 dr_analyze_innermost (dr, nest); 1081 dr_analyze_indices (dr, nest, loop); 1082 dr_analyze_alias (dr); 1083 1084 if (dump_file && (dump_flags & TDF_DETAILS)) 1085 { 1086 unsigned i; 1087 fprintf (dump_file, "\tbase_address: "); 1088 print_generic_expr (dump_file, DR_BASE_ADDRESS (dr), TDF_SLIM); 1089 fprintf (dump_file, "\n\toffset from base address: "); 1090 print_generic_expr (dump_file, DR_OFFSET (dr), TDF_SLIM); 1091 fprintf (dump_file, "\n\tconstant offset from base address: "); 1092 print_generic_expr (dump_file, DR_INIT (dr), TDF_SLIM); 1093 fprintf (dump_file, "\n\tstep: "); 1094 print_generic_expr (dump_file, DR_STEP (dr), TDF_SLIM); 1095 fprintf (dump_file, "\n\taligned to: "); 1096 print_generic_expr (dump_file, DR_ALIGNED_TO (dr), TDF_SLIM); 1097 fprintf (dump_file, "\n\tbase_object: "); 1098 print_generic_expr (dump_file, DR_BASE_OBJECT (dr), TDF_SLIM); 1099 fprintf (dump_file, "\n"); 1100 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++) 1101 { 1102 fprintf (dump_file, "\tAccess function %d: ", i); 1103 print_generic_stmt (dump_file, DR_ACCESS_FN (dr, i), TDF_SLIM); 1104 } 1105 } 1106 1107 return dr; 1108 } 1109 1110 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical 1111 expressions. */ 1112 static bool 1113 dr_equal_offsets_p1 (tree offset1, tree offset2) 1114 { 1115 bool res; 1116 1117 STRIP_NOPS (offset1); 1118 STRIP_NOPS (offset2); 1119 1120 if (offset1 == offset2) 1121 return true; 1122 1123 if (TREE_CODE (offset1) != TREE_CODE (offset2) 1124 || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1))) 1125 return false; 1126 1127 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 0), 1128 TREE_OPERAND (offset2, 0)); 1129 1130 if (!res || !BINARY_CLASS_P (offset1)) 1131 return res; 1132 1133 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 1), 1134 TREE_OPERAND (offset2, 1)); 1135 1136 return res; 1137 } 1138 1139 /* Check if DRA and DRB have equal offsets. */ 1140 bool 1141 dr_equal_offsets_p (struct data_reference *dra, 1142 struct data_reference *drb) 1143 { 1144 tree offset1, offset2; 1145 1146 offset1 = DR_OFFSET (dra); 1147 offset2 = DR_OFFSET (drb); 1148 1149 return dr_equal_offsets_p1 (offset1, offset2); 1150 } 1151 1152 /* Returns true if FNA == FNB. */ 1153 1154 static bool 1155 affine_function_equal_p (affine_fn fna, affine_fn fnb) 1156 { 1157 unsigned i, n = fna.length (); 1158 1159 if (n != fnb.length ()) 1160 return false; 1161 1162 for (i = 0; i < n; i++) 1163 if (!operand_equal_p (fna[i], fnb[i], 0)) 1164 return false; 1165 1166 return true; 1167 } 1168 1169 /* If all the functions in CF are the same, returns one of them, 1170 otherwise returns NULL. */ 1171 1172 static affine_fn 1173 common_affine_function (conflict_function *cf) 1174 { 1175 unsigned i; 1176 affine_fn comm; 1177 1178 if (!CF_NONTRIVIAL_P (cf)) 1179 return affine_fn (); 1180 1181 comm = cf->fns[0]; 1182 1183 for (i = 1; i < cf->n; i++) 1184 if (!affine_function_equal_p (comm, cf->fns[i])) 1185 return affine_fn (); 1186 1187 return comm; 1188 } 1189 1190 /* Returns the base of the affine function FN. */ 1191 1192 static tree 1193 affine_function_base (affine_fn fn) 1194 { 1195 return fn[0]; 1196 } 1197 1198 /* Returns true if FN is a constant. */ 1199 1200 static bool 1201 affine_function_constant_p (affine_fn fn) 1202 { 1203 unsigned i; 1204 tree coef; 1205 1206 for (i = 1; fn.iterate (i, &coef); i++) 1207 if (!integer_zerop (coef)) 1208 return false; 1209 1210 return true; 1211 } 1212 1213 /* Returns true if FN is the zero constant function. */ 1214 1215 static bool 1216 affine_function_zero_p (affine_fn fn) 1217 { 1218 return (integer_zerop (affine_function_base (fn)) 1219 && affine_function_constant_p (fn)); 1220 } 1221 1222 /* Returns a signed integer type with the largest precision from TA 1223 and TB. */ 1224 1225 static tree 1226 signed_type_for_types (tree ta, tree tb) 1227 { 1228 if (TYPE_PRECISION (ta) > TYPE_PRECISION (tb)) 1229 return signed_type_for (ta); 1230 else 1231 return signed_type_for (tb); 1232 } 1233 1234 /* Applies operation OP on affine functions FNA and FNB, and returns the 1235 result. */ 1236 1237 static affine_fn 1238 affine_fn_op (enum tree_code op, affine_fn fna, affine_fn fnb) 1239 { 1240 unsigned i, n, m; 1241 affine_fn ret; 1242 tree coef; 1243 1244 if (fnb.length () > fna.length ()) 1245 { 1246 n = fna.length (); 1247 m = fnb.length (); 1248 } 1249 else 1250 { 1251 n = fnb.length (); 1252 m = fna.length (); 1253 } 1254 1255 ret.create (m); 1256 for (i = 0; i < n; i++) 1257 { 1258 tree type = signed_type_for_types (TREE_TYPE (fna[i]), 1259 TREE_TYPE (fnb[i])); 1260 ret.quick_push (fold_build2 (op, type, fna[i], fnb[i])); 1261 } 1262 1263 for (; fna.iterate (i, &coef); i++) 1264 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)), 1265 coef, integer_zero_node)); 1266 for (; fnb.iterate (i, &coef); i++) 1267 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)), 1268 integer_zero_node, coef)); 1269 1270 return ret; 1271 } 1272 1273 /* Returns the sum of affine functions FNA and FNB. */ 1274 1275 static affine_fn 1276 affine_fn_plus (affine_fn fna, affine_fn fnb) 1277 { 1278 return affine_fn_op (PLUS_EXPR, fna, fnb); 1279 } 1280 1281 /* Returns the difference of affine functions FNA and FNB. */ 1282 1283 static affine_fn 1284 affine_fn_minus (affine_fn fna, affine_fn fnb) 1285 { 1286 return affine_fn_op (MINUS_EXPR, fna, fnb); 1287 } 1288 1289 /* Frees affine function FN. */ 1290 1291 static void 1292 affine_fn_free (affine_fn fn) 1293 { 1294 fn.release (); 1295 } 1296 1297 /* Determine for each subscript in the data dependence relation DDR 1298 the distance. */ 1299 1300 static void 1301 compute_subscript_distance (struct data_dependence_relation *ddr) 1302 { 1303 conflict_function *cf_a, *cf_b; 1304 affine_fn fn_a, fn_b, diff; 1305 1306 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE) 1307 { 1308 unsigned int i; 1309 1310 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 1311 { 1312 struct subscript *subscript; 1313 1314 subscript = DDR_SUBSCRIPT (ddr, i); 1315 cf_a = SUB_CONFLICTS_IN_A (subscript); 1316 cf_b = SUB_CONFLICTS_IN_B (subscript); 1317 1318 fn_a = common_affine_function (cf_a); 1319 fn_b = common_affine_function (cf_b); 1320 if (!fn_a.exists () || !fn_b.exists ()) 1321 { 1322 SUB_DISTANCE (subscript) = chrec_dont_know; 1323 return; 1324 } 1325 diff = affine_fn_minus (fn_a, fn_b); 1326 1327 if (affine_function_constant_p (diff)) 1328 SUB_DISTANCE (subscript) = affine_function_base (diff); 1329 else 1330 SUB_DISTANCE (subscript) = chrec_dont_know; 1331 1332 affine_fn_free (diff); 1333 } 1334 } 1335 } 1336 1337 /* Returns the conflict function for "unknown". */ 1338 1339 static conflict_function * 1340 conflict_fn_not_known (void) 1341 { 1342 conflict_function *fn = XCNEW (conflict_function); 1343 fn->n = NOT_KNOWN; 1344 1345 return fn; 1346 } 1347 1348 /* Returns the conflict function for "independent". */ 1349 1350 static conflict_function * 1351 conflict_fn_no_dependence (void) 1352 { 1353 conflict_function *fn = XCNEW (conflict_function); 1354 fn->n = NO_DEPENDENCE; 1355 1356 return fn; 1357 } 1358 1359 /* Returns true if the address of OBJ is invariant in LOOP. */ 1360 1361 static bool 1362 object_address_invariant_in_loop_p (const struct loop *loop, const_tree obj) 1363 { 1364 while (handled_component_p (obj)) 1365 { 1366 if (TREE_CODE (obj) == ARRAY_REF) 1367 { 1368 /* Index of the ARRAY_REF was zeroed in analyze_indices, thus we only 1369 need to check the stride and the lower bound of the reference. */ 1370 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2), 1371 loop->num) 1372 || chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 3), 1373 loop->num)) 1374 return false; 1375 } 1376 else if (TREE_CODE (obj) == COMPONENT_REF) 1377 { 1378 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2), 1379 loop->num)) 1380 return false; 1381 } 1382 obj = TREE_OPERAND (obj, 0); 1383 } 1384 1385 if (!INDIRECT_REF_P (obj) 1386 && TREE_CODE (obj) != MEM_REF) 1387 return true; 1388 1389 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 0), 1390 loop->num); 1391 } 1392 1393 /* Returns false if we can prove that data references A and B do not alias, 1394 true otherwise. If LOOP_NEST is false no cross-iteration aliases are 1395 considered. */ 1396 1397 bool 1398 dr_may_alias_p (const struct data_reference *a, const struct data_reference *b, 1399 bool loop_nest) 1400 { 1401 tree addr_a = DR_BASE_OBJECT (a); 1402 tree addr_b = DR_BASE_OBJECT (b); 1403 1404 /* If we are not processing a loop nest but scalar code we 1405 do not need to care about possible cross-iteration dependences 1406 and thus can process the full original reference. Do so, 1407 similar to how loop invariant motion applies extra offset-based 1408 disambiguation. */ 1409 if (!loop_nest) 1410 { 1411 aff_tree off1, off2; 1412 widest_int size1, size2; 1413 get_inner_reference_aff (DR_REF (a), &off1, &size1); 1414 get_inner_reference_aff (DR_REF (b), &off2, &size2); 1415 aff_combination_scale (&off1, -1); 1416 aff_combination_add (&off2, &off1); 1417 if (aff_comb_cannot_overlap_p (&off2, size1, size2)) 1418 return false; 1419 } 1420 1421 if ((TREE_CODE (addr_a) == MEM_REF || TREE_CODE (addr_a) == TARGET_MEM_REF) 1422 && (TREE_CODE (addr_b) == MEM_REF || TREE_CODE (addr_b) == TARGET_MEM_REF) 1423 && MR_DEPENDENCE_CLIQUE (addr_a) == MR_DEPENDENCE_CLIQUE (addr_b) 1424 && MR_DEPENDENCE_BASE (addr_a) != MR_DEPENDENCE_BASE (addr_b)) 1425 return false; 1426 1427 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we 1428 do not know the size of the base-object. So we cannot do any 1429 offset/overlap based analysis but have to rely on points-to 1430 information only. */ 1431 if (TREE_CODE (addr_a) == MEM_REF 1432 && (DR_UNCONSTRAINED_BASE (a) 1433 || TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME)) 1434 { 1435 /* For true dependences we can apply TBAA. */ 1436 if (flag_strict_aliasing 1437 && DR_IS_WRITE (a) && DR_IS_READ (b) 1438 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)), 1439 get_alias_set (DR_REF (b)))) 1440 return false; 1441 if (TREE_CODE (addr_b) == MEM_REF) 1442 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0), 1443 TREE_OPERAND (addr_b, 0)); 1444 else 1445 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0), 1446 build_fold_addr_expr (addr_b)); 1447 } 1448 else if (TREE_CODE (addr_b) == MEM_REF 1449 && (DR_UNCONSTRAINED_BASE (b) 1450 || TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME)) 1451 { 1452 /* For true dependences we can apply TBAA. */ 1453 if (flag_strict_aliasing 1454 && DR_IS_WRITE (a) && DR_IS_READ (b) 1455 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)), 1456 get_alias_set (DR_REF (b)))) 1457 return false; 1458 if (TREE_CODE (addr_a) == MEM_REF) 1459 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0), 1460 TREE_OPERAND (addr_b, 0)); 1461 else 1462 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a), 1463 TREE_OPERAND (addr_b, 0)); 1464 } 1465 1466 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object 1467 that is being subsetted in the loop nest. */ 1468 if (DR_IS_WRITE (a) && DR_IS_WRITE (b)) 1469 return refs_output_dependent_p (addr_a, addr_b); 1470 else if (DR_IS_READ (a) && DR_IS_WRITE (b)) 1471 return refs_anti_dependent_p (addr_a, addr_b); 1472 return refs_may_alias_p (addr_a, addr_b); 1473 } 1474 1475 /* Initialize a data dependence relation between data accesses A and 1476 B. NB_LOOPS is the number of loops surrounding the references: the 1477 size of the classic distance/direction vectors. */ 1478 1479 struct data_dependence_relation * 1480 initialize_data_dependence_relation (struct data_reference *a, 1481 struct data_reference *b, 1482 vec<loop_p> loop_nest) 1483 { 1484 struct data_dependence_relation *res; 1485 unsigned int i; 1486 1487 res = XNEW (struct data_dependence_relation); 1488 DDR_A (res) = a; 1489 DDR_B (res) = b; 1490 DDR_LOOP_NEST (res).create (0); 1491 DDR_REVERSED_P (res) = false; 1492 DDR_SUBSCRIPTS (res).create (0); 1493 DDR_DIR_VECTS (res).create (0); 1494 DDR_DIST_VECTS (res).create (0); 1495 1496 if (a == NULL || b == NULL) 1497 { 1498 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 1499 return res; 1500 } 1501 1502 /* If the data references do not alias, then they are independent. */ 1503 if (!dr_may_alias_p (a, b, loop_nest.exists ())) 1504 { 1505 DDR_ARE_DEPENDENT (res) = chrec_known; 1506 return res; 1507 } 1508 1509 /* The case where the references are exactly the same. */ 1510 if (operand_equal_p (DR_REF (a), DR_REF (b), 0)) 1511 { 1512 if ((loop_nest.exists () 1513 && !object_address_invariant_in_loop_p (loop_nest[0], 1514 DR_BASE_OBJECT (a))) 1515 || DR_NUM_DIMENSIONS (a) == 0) 1516 { 1517 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 1518 return res; 1519 } 1520 DDR_AFFINE_P (res) = true; 1521 DDR_ARE_DEPENDENT (res) = NULL_TREE; 1522 DDR_SUBSCRIPTS (res).create (DR_NUM_DIMENSIONS (a)); 1523 DDR_LOOP_NEST (res) = loop_nest; 1524 DDR_INNER_LOOP (res) = 0; 1525 DDR_SELF_REFERENCE (res) = true; 1526 for (i = 0; i < DR_NUM_DIMENSIONS (a); i++) 1527 { 1528 struct subscript *subscript; 1529 1530 subscript = XNEW (struct subscript); 1531 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known (); 1532 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known (); 1533 SUB_LAST_CONFLICT (subscript) = chrec_dont_know; 1534 SUB_DISTANCE (subscript) = chrec_dont_know; 1535 DDR_SUBSCRIPTS (res).safe_push (subscript); 1536 } 1537 return res; 1538 } 1539 1540 /* If the references do not access the same object, we do not know 1541 whether they alias or not. We do not care about TBAA or alignment 1542 info so we can use OEP_ADDRESS_OF to avoid false negatives. 1543 But the accesses have to use compatible types as otherwise the 1544 built indices would not match. */ 1545 if (!operand_equal_p (DR_BASE_OBJECT (a), DR_BASE_OBJECT (b), OEP_ADDRESS_OF) 1546 || !types_compatible_p (TREE_TYPE (DR_BASE_OBJECT (a)), 1547 TREE_TYPE (DR_BASE_OBJECT (b)))) 1548 { 1549 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 1550 return res; 1551 } 1552 1553 /* If the base of the object is not invariant in the loop nest, we cannot 1554 analyze it. TODO -- in fact, it would suffice to record that there may 1555 be arbitrary dependences in the loops where the base object varies. */ 1556 if ((loop_nest.exists () 1557 && !object_address_invariant_in_loop_p (loop_nest[0], DR_BASE_OBJECT (a))) 1558 || DR_NUM_DIMENSIONS (a) == 0) 1559 { 1560 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 1561 return res; 1562 } 1563 1564 /* If the number of dimensions of the access to not agree we can have 1565 a pointer access to a component of the array element type and an 1566 array access while the base-objects are still the same. Punt. */ 1567 if (DR_NUM_DIMENSIONS (a) != DR_NUM_DIMENSIONS (b)) 1568 { 1569 DDR_ARE_DEPENDENT (res) = chrec_dont_know; 1570 return res; 1571 } 1572 1573 DDR_AFFINE_P (res) = true; 1574 DDR_ARE_DEPENDENT (res) = NULL_TREE; 1575 DDR_SUBSCRIPTS (res).create (DR_NUM_DIMENSIONS (a)); 1576 DDR_LOOP_NEST (res) = loop_nest; 1577 DDR_INNER_LOOP (res) = 0; 1578 DDR_SELF_REFERENCE (res) = false; 1579 1580 for (i = 0; i < DR_NUM_DIMENSIONS (a); i++) 1581 { 1582 struct subscript *subscript; 1583 1584 subscript = XNEW (struct subscript); 1585 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known (); 1586 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known (); 1587 SUB_LAST_CONFLICT (subscript) = chrec_dont_know; 1588 SUB_DISTANCE (subscript) = chrec_dont_know; 1589 DDR_SUBSCRIPTS (res).safe_push (subscript); 1590 } 1591 1592 return res; 1593 } 1594 1595 /* Frees memory used by the conflict function F. */ 1596 1597 static void 1598 free_conflict_function (conflict_function *f) 1599 { 1600 unsigned i; 1601 1602 if (CF_NONTRIVIAL_P (f)) 1603 { 1604 for (i = 0; i < f->n; i++) 1605 affine_fn_free (f->fns[i]); 1606 } 1607 free (f); 1608 } 1609 1610 /* Frees memory used by SUBSCRIPTS. */ 1611 1612 static void 1613 free_subscripts (vec<subscript_p> subscripts) 1614 { 1615 unsigned i; 1616 subscript_p s; 1617 1618 FOR_EACH_VEC_ELT (subscripts, i, s) 1619 { 1620 free_conflict_function (s->conflicting_iterations_in_a); 1621 free_conflict_function (s->conflicting_iterations_in_b); 1622 free (s); 1623 } 1624 subscripts.release (); 1625 } 1626 1627 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap 1628 description. */ 1629 1630 static inline void 1631 finalize_ddr_dependent (struct data_dependence_relation *ddr, 1632 tree chrec) 1633 { 1634 DDR_ARE_DEPENDENT (ddr) = chrec; 1635 free_subscripts (DDR_SUBSCRIPTS (ddr)); 1636 DDR_SUBSCRIPTS (ddr).create (0); 1637 } 1638 1639 /* The dependence relation DDR cannot be represented by a distance 1640 vector. */ 1641 1642 static inline void 1643 non_affine_dependence_relation (struct data_dependence_relation *ddr) 1644 { 1645 if (dump_file && (dump_flags & TDF_DETAILS)) 1646 fprintf (dump_file, "(Dependence relation cannot be represented by distance vector.) \n"); 1647 1648 DDR_AFFINE_P (ddr) = false; 1649 } 1650 1651 1652 1653 /* This section contains the classic Banerjee tests. */ 1654 1655 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index 1656 variables, i.e., if the ZIV (Zero Index Variable) test is true. */ 1657 1658 static inline bool 1659 ziv_subscript_p (const_tree chrec_a, const_tree chrec_b) 1660 { 1661 return (evolution_function_is_constant_p (chrec_a) 1662 && evolution_function_is_constant_p (chrec_b)); 1663 } 1664 1665 /* Returns true iff CHREC_A and CHREC_B are dependent on an index 1666 variable, i.e., if the SIV (Single Index Variable) test is true. */ 1667 1668 static bool 1669 siv_subscript_p (const_tree chrec_a, const_tree chrec_b) 1670 { 1671 if ((evolution_function_is_constant_p (chrec_a) 1672 && evolution_function_is_univariate_p (chrec_b)) 1673 || (evolution_function_is_constant_p (chrec_b) 1674 && evolution_function_is_univariate_p (chrec_a))) 1675 return true; 1676 1677 if (evolution_function_is_univariate_p (chrec_a) 1678 && evolution_function_is_univariate_p (chrec_b)) 1679 { 1680 switch (TREE_CODE (chrec_a)) 1681 { 1682 case POLYNOMIAL_CHREC: 1683 switch (TREE_CODE (chrec_b)) 1684 { 1685 case POLYNOMIAL_CHREC: 1686 if (CHREC_VARIABLE (chrec_a) != CHREC_VARIABLE (chrec_b)) 1687 return false; 1688 /* FALLTHRU */ 1689 1690 default: 1691 return true; 1692 } 1693 1694 default: 1695 return true; 1696 } 1697 } 1698 1699 return false; 1700 } 1701 1702 /* Creates a conflict function with N dimensions. The affine functions 1703 in each dimension follow. */ 1704 1705 static conflict_function * 1706 conflict_fn (unsigned n, ...) 1707 { 1708 unsigned i; 1709 conflict_function *ret = XCNEW (conflict_function); 1710 va_list ap; 1711 1712 gcc_assert (0 < n && n <= MAX_DIM); 1713 va_start (ap, n); 1714 1715 ret->n = n; 1716 for (i = 0; i < n; i++) 1717 ret->fns[i] = va_arg (ap, affine_fn); 1718 va_end (ap); 1719 1720 return ret; 1721 } 1722 1723 /* Returns constant affine function with value CST. */ 1724 1725 static affine_fn 1726 affine_fn_cst (tree cst) 1727 { 1728 affine_fn fn; 1729 fn.create (1); 1730 fn.quick_push (cst); 1731 return fn; 1732 } 1733 1734 /* Returns affine function with single variable, CST + COEF * x_DIM. */ 1735 1736 static affine_fn 1737 affine_fn_univar (tree cst, unsigned dim, tree coef) 1738 { 1739 affine_fn fn; 1740 fn.create (dim + 1); 1741 unsigned i; 1742 1743 gcc_assert (dim > 0); 1744 fn.quick_push (cst); 1745 for (i = 1; i < dim; i++) 1746 fn.quick_push (integer_zero_node); 1747 fn.quick_push (coef); 1748 return fn; 1749 } 1750 1751 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and 1752 *OVERLAPS_B are initialized to the functions that describe the 1753 relation between the elements accessed twice by CHREC_A and 1754 CHREC_B. For k >= 0, the following property is verified: 1755 1756 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */ 1757 1758 static void 1759 analyze_ziv_subscript (tree chrec_a, 1760 tree chrec_b, 1761 conflict_function **overlaps_a, 1762 conflict_function **overlaps_b, 1763 tree *last_conflicts) 1764 { 1765 tree type, difference; 1766 dependence_stats.num_ziv++; 1767 1768 if (dump_file && (dump_flags & TDF_DETAILS)) 1769 fprintf (dump_file, "(analyze_ziv_subscript \n"); 1770 1771 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b)); 1772 chrec_a = chrec_convert (type, chrec_a, NULL); 1773 chrec_b = chrec_convert (type, chrec_b, NULL); 1774 difference = chrec_fold_minus (type, chrec_a, chrec_b); 1775 1776 switch (TREE_CODE (difference)) 1777 { 1778 case INTEGER_CST: 1779 if (integer_zerop (difference)) 1780 { 1781 /* The difference is equal to zero: the accessed index 1782 overlaps for each iteration in the loop. */ 1783 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 1784 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 1785 *last_conflicts = chrec_dont_know; 1786 dependence_stats.num_ziv_dependent++; 1787 } 1788 else 1789 { 1790 /* The accesses do not overlap. */ 1791 *overlaps_a = conflict_fn_no_dependence (); 1792 *overlaps_b = conflict_fn_no_dependence (); 1793 *last_conflicts = integer_zero_node; 1794 dependence_stats.num_ziv_independent++; 1795 } 1796 break; 1797 1798 default: 1799 /* We're not sure whether the indexes overlap. For the moment, 1800 conservatively answer "don't know". */ 1801 if (dump_file && (dump_flags & TDF_DETAILS)) 1802 fprintf (dump_file, "ziv test failed: difference is non-integer.\n"); 1803 1804 *overlaps_a = conflict_fn_not_known (); 1805 *overlaps_b = conflict_fn_not_known (); 1806 *last_conflicts = chrec_dont_know; 1807 dependence_stats.num_ziv_unimplemented++; 1808 break; 1809 } 1810 1811 if (dump_file && (dump_flags & TDF_DETAILS)) 1812 fprintf (dump_file, ")\n"); 1813 } 1814 1815 /* Similar to max_stmt_executions_int, but returns the bound as a tree, 1816 and only if it fits to the int type. If this is not the case, or the 1817 bound on the number of iterations of LOOP could not be derived, returns 1818 chrec_dont_know. */ 1819 1820 static tree 1821 max_stmt_executions_tree (struct loop *loop) 1822 { 1823 widest_int nit; 1824 1825 if (!max_stmt_executions (loop, &nit)) 1826 return chrec_dont_know; 1827 1828 if (!wi::fits_to_tree_p (nit, unsigned_type_node)) 1829 return chrec_dont_know; 1830 1831 return wide_int_to_tree (unsigned_type_node, nit); 1832 } 1833 1834 /* Determine whether the CHREC is always positive/negative. If the expression 1835 cannot be statically analyzed, return false, otherwise set the answer into 1836 VALUE. */ 1837 1838 static bool 1839 chrec_is_positive (tree chrec, bool *value) 1840 { 1841 bool value0, value1, value2; 1842 tree end_value, nb_iter; 1843 1844 switch (TREE_CODE (chrec)) 1845 { 1846 case POLYNOMIAL_CHREC: 1847 if (!chrec_is_positive (CHREC_LEFT (chrec), &value0) 1848 || !chrec_is_positive (CHREC_RIGHT (chrec), &value1)) 1849 return false; 1850 1851 /* FIXME -- overflows. */ 1852 if (value0 == value1) 1853 { 1854 *value = value0; 1855 return true; 1856 } 1857 1858 /* Otherwise the chrec is under the form: "{-197, +, 2}_1", 1859 and the proof consists in showing that the sign never 1860 changes during the execution of the loop, from 0 to 1861 loop->nb_iterations. */ 1862 if (!evolution_function_is_affine_p (chrec)) 1863 return false; 1864 1865 nb_iter = number_of_latch_executions (get_chrec_loop (chrec)); 1866 if (chrec_contains_undetermined (nb_iter)) 1867 return false; 1868 1869 #if 0 1870 /* TODO -- If the test is after the exit, we may decrease the number of 1871 iterations by one. */ 1872 if (after_exit) 1873 nb_iter = chrec_fold_minus (type, nb_iter, build_int_cst (type, 1)); 1874 #endif 1875 1876 end_value = chrec_apply (CHREC_VARIABLE (chrec), chrec, nb_iter); 1877 1878 if (!chrec_is_positive (end_value, &value2)) 1879 return false; 1880 1881 *value = value0; 1882 return value0 == value1; 1883 1884 case INTEGER_CST: 1885 switch (tree_int_cst_sgn (chrec)) 1886 { 1887 case -1: 1888 *value = false; 1889 break; 1890 case 1: 1891 *value = true; 1892 break; 1893 default: 1894 return false; 1895 } 1896 return true; 1897 1898 default: 1899 return false; 1900 } 1901 } 1902 1903 1904 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a 1905 constant, and CHREC_B is an affine function. *OVERLAPS_A and 1906 *OVERLAPS_B are initialized to the functions that describe the 1907 relation between the elements accessed twice by CHREC_A and 1908 CHREC_B. For k >= 0, the following property is verified: 1909 1910 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */ 1911 1912 static void 1913 analyze_siv_subscript_cst_affine (tree chrec_a, 1914 tree chrec_b, 1915 conflict_function **overlaps_a, 1916 conflict_function **overlaps_b, 1917 tree *last_conflicts) 1918 { 1919 bool value0, value1, value2; 1920 tree type, difference, tmp; 1921 1922 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b)); 1923 chrec_a = chrec_convert (type, chrec_a, NULL); 1924 chrec_b = chrec_convert (type, chrec_b, NULL); 1925 difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a); 1926 1927 /* Special case overlap in the first iteration. */ 1928 if (integer_zerop (difference)) 1929 { 1930 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 1931 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 1932 *last_conflicts = integer_one_node; 1933 return; 1934 } 1935 1936 if (!chrec_is_positive (initial_condition (difference), &value0)) 1937 { 1938 if (dump_file && (dump_flags & TDF_DETAILS)) 1939 fprintf (dump_file, "siv test failed: chrec is not positive.\n"); 1940 1941 dependence_stats.num_siv_unimplemented++; 1942 *overlaps_a = conflict_fn_not_known (); 1943 *overlaps_b = conflict_fn_not_known (); 1944 *last_conflicts = chrec_dont_know; 1945 return; 1946 } 1947 else 1948 { 1949 if (value0 == false) 1950 { 1951 if (!chrec_is_positive (CHREC_RIGHT (chrec_b), &value1)) 1952 { 1953 if (dump_file && (dump_flags & TDF_DETAILS)) 1954 fprintf (dump_file, "siv test failed: chrec not positive.\n"); 1955 1956 *overlaps_a = conflict_fn_not_known (); 1957 *overlaps_b = conflict_fn_not_known (); 1958 *last_conflicts = chrec_dont_know; 1959 dependence_stats.num_siv_unimplemented++; 1960 return; 1961 } 1962 else 1963 { 1964 if (value1 == true) 1965 { 1966 /* Example: 1967 chrec_a = 12 1968 chrec_b = {10, +, 1} 1969 */ 1970 1971 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference)) 1972 { 1973 HOST_WIDE_INT numiter; 1974 struct loop *loop = get_chrec_loop (chrec_b); 1975 1976 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 1977 tmp = fold_build2 (EXACT_DIV_EXPR, type, 1978 fold_build1 (ABS_EXPR, type, difference), 1979 CHREC_RIGHT (chrec_b)); 1980 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp)); 1981 *last_conflicts = integer_one_node; 1982 1983 1984 /* Perform weak-zero siv test to see if overlap is 1985 outside the loop bounds. */ 1986 numiter = max_stmt_executions_int (loop); 1987 1988 if (numiter >= 0 1989 && compare_tree_int (tmp, numiter) > 0) 1990 { 1991 free_conflict_function (*overlaps_a); 1992 free_conflict_function (*overlaps_b); 1993 *overlaps_a = conflict_fn_no_dependence (); 1994 *overlaps_b = conflict_fn_no_dependence (); 1995 *last_conflicts = integer_zero_node; 1996 dependence_stats.num_siv_independent++; 1997 return; 1998 } 1999 dependence_stats.num_siv_dependent++; 2000 return; 2001 } 2002 2003 /* When the step does not divide the difference, there are 2004 no overlaps. */ 2005 else 2006 { 2007 *overlaps_a = conflict_fn_no_dependence (); 2008 *overlaps_b = conflict_fn_no_dependence (); 2009 *last_conflicts = integer_zero_node; 2010 dependence_stats.num_siv_independent++; 2011 return; 2012 } 2013 } 2014 2015 else 2016 { 2017 /* Example: 2018 chrec_a = 12 2019 chrec_b = {10, +, -1} 2020 2021 In this case, chrec_a will not overlap with chrec_b. */ 2022 *overlaps_a = conflict_fn_no_dependence (); 2023 *overlaps_b = conflict_fn_no_dependence (); 2024 *last_conflicts = integer_zero_node; 2025 dependence_stats.num_siv_independent++; 2026 return; 2027 } 2028 } 2029 } 2030 else 2031 { 2032 if (!chrec_is_positive (CHREC_RIGHT (chrec_b), &value2)) 2033 { 2034 if (dump_file && (dump_flags & TDF_DETAILS)) 2035 fprintf (dump_file, "siv test failed: chrec not positive.\n"); 2036 2037 *overlaps_a = conflict_fn_not_known (); 2038 *overlaps_b = conflict_fn_not_known (); 2039 *last_conflicts = chrec_dont_know; 2040 dependence_stats.num_siv_unimplemented++; 2041 return; 2042 } 2043 else 2044 { 2045 if (value2 == false) 2046 { 2047 /* Example: 2048 chrec_a = 3 2049 chrec_b = {10, +, -1} 2050 */ 2051 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference)) 2052 { 2053 HOST_WIDE_INT numiter; 2054 struct loop *loop = get_chrec_loop (chrec_b); 2055 2056 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2057 tmp = fold_build2 (EXACT_DIV_EXPR, type, difference, 2058 CHREC_RIGHT (chrec_b)); 2059 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp)); 2060 *last_conflicts = integer_one_node; 2061 2062 /* Perform weak-zero siv test to see if overlap is 2063 outside the loop bounds. */ 2064 numiter = max_stmt_executions_int (loop); 2065 2066 if (numiter >= 0 2067 && compare_tree_int (tmp, numiter) > 0) 2068 { 2069 free_conflict_function (*overlaps_a); 2070 free_conflict_function (*overlaps_b); 2071 *overlaps_a = conflict_fn_no_dependence (); 2072 *overlaps_b = conflict_fn_no_dependence (); 2073 *last_conflicts = integer_zero_node; 2074 dependence_stats.num_siv_independent++; 2075 return; 2076 } 2077 dependence_stats.num_siv_dependent++; 2078 return; 2079 } 2080 2081 /* When the step does not divide the difference, there 2082 are no overlaps. */ 2083 else 2084 { 2085 *overlaps_a = conflict_fn_no_dependence (); 2086 *overlaps_b = conflict_fn_no_dependence (); 2087 *last_conflicts = integer_zero_node; 2088 dependence_stats.num_siv_independent++; 2089 return; 2090 } 2091 } 2092 else 2093 { 2094 /* Example: 2095 chrec_a = 3 2096 chrec_b = {4, +, 1} 2097 2098 In this case, chrec_a will not overlap with chrec_b. */ 2099 *overlaps_a = conflict_fn_no_dependence (); 2100 *overlaps_b = conflict_fn_no_dependence (); 2101 *last_conflicts = integer_zero_node; 2102 dependence_stats.num_siv_independent++; 2103 return; 2104 } 2105 } 2106 } 2107 } 2108 } 2109 2110 /* Helper recursive function for initializing the matrix A. Returns 2111 the initial value of CHREC. */ 2112 2113 static tree 2114 initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult) 2115 { 2116 gcc_assert (chrec); 2117 2118 switch (TREE_CODE (chrec)) 2119 { 2120 case POLYNOMIAL_CHREC: 2121 A[index][0] = mult * int_cst_value (CHREC_RIGHT (chrec)); 2122 return initialize_matrix_A (A, CHREC_LEFT (chrec), index + 1, mult); 2123 2124 case PLUS_EXPR: 2125 case MULT_EXPR: 2126 case MINUS_EXPR: 2127 { 2128 tree op0 = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult); 2129 tree op1 = initialize_matrix_A (A, TREE_OPERAND (chrec, 1), index, mult); 2130 2131 return chrec_fold_op (TREE_CODE (chrec), chrec_type (chrec), op0, op1); 2132 } 2133 2134 CASE_CONVERT: 2135 { 2136 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult); 2137 return chrec_convert (chrec_type (chrec), op, NULL); 2138 } 2139 2140 case BIT_NOT_EXPR: 2141 { 2142 /* Handle ~X as -1 - X. */ 2143 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult); 2144 return chrec_fold_op (MINUS_EXPR, chrec_type (chrec), 2145 build_int_cst (TREE_TYPE (chrec), -1), op); 2146 } 2147 2148 case INTEGER_CST: 2149 return chrec; 2150 2151 default: 2152 gcc_unreachable (); 2153 return NULL_TREE; 2154 } 2155 } 2156 2157 #define FLOOR_DIV(x,y) ((x) / (y)) 2158 2159 /* Solves the special case of the Diophantine equation: 2160 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B) 2161 2162 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the 2163 number of iterations that loops X and Y run. The overlaps will be 2164 constructed as evolutions in dimension DIM. */ 2165 2166 static void 2167 compute_overlap_steps_for_affine_univar (HOST_WIDE_INT niter, 2168 HOST_WIDE_INT step_a, 2169 HOST_WIDE_INT step_b, 2170 affine_fn *overlaps_a, 2171 affine_fn *overlaps_b, 2172 tree *last_conflicts, int dim) 2173 { 2174 if (((step_a > 0 && step_b > 0) 2175 || (step_a < 0 && step_b < 0))) 2176 { 2177 HOST_WIDE_INT step_overlaps_a, step_overlaps_b; 2178 HOST_WIDE_INT gcd_steps_a_b, last_conflict, tau2; 2179 2180 gcd_steps_a_b = gcd (step_a, step_b); 2181 step_overlaps_a = step_b / gcd_steps_a_b; 2182 step_overlaps_b = step_a / gcd_steps_a_b; 2183 2184 if (niter > 0) 2185 { 2186 tau2 = FLOOR_DIV (niter, step_overlaps_a); 2187 tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b)); 2188 last_conflict = tau2; 2189 *last_conflicts = build_int_cst (NULL_TREE, last_conflict); 2190 } 2191 else 2192 *last_conflicts = chrec_dont_know; 2193 2194 *overlaps_a = affine_fn_univar (integer_zero_node, dim, 2195 build_int_cst (NULL_TREE, 2196 step_overlaps_a)); 2197 *overlaps_b = affine_fn_univar (integer_zero_node, dim, 2198 build_int_cst (NULL_TREE, 2199 step_overlaps_b)); 2200 } 2201 2202 else 2203 { 2204 *overlaps_a = affine_fn_cst (integer_zero_node); 2205 *overlaps_b = affine_fn_cst (integer_zero_node); 2206 *last_conflicts = integer_zero_node; 2207 } 2208 } 2209 2210 /* Solves the special case of a Diophantine equation where CHREC_A is 2211 an affine bivariate function, and CHREC_B is an affine univariate 2212 function. For example, 2213 2214 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z 2215 2216 has the following overlapping functions: 2217 2218 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v 2219 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v 2220 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v 2221 2222 FORNOW: This is a specialized implementation for a case occurring in 2223 a common benchmark. Implement the general algorithm. */ 2224 2225 static void 2226 compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b, 2227 conflict_function **overlaps_a, 2228 conflict_function **overlaps_b, 2229 tree *last_conflicts) 2230 { 2231 bool xz_p, yz_p, xyz_p; 2232 HOST_WIDE_INT step_x, step_y, step_z; 2233 HOST_WIDE_INT niter_x, niter_y, niter_z, niter; 2234 affine_fn overlaps_a_xz, overlaps_b_xz; 2235 affine_fn overlaps_a_yz, overlaps_b_yz; 2236 affine_fn overlaps_a_xyz, overlaps_b_xyz; 2237 affine_fn ova1, ova2, ovb; 2238 tree last_conflicts_xz, last_conflicts_yz, last_conflicts_xyz; 2239 2240 step_x = int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a))); 2241 step_y = int_cst_value (CHREC_RIGHT (chrec_a)); 2242 step_z = int_cst_value (CHREC_RIGHT (chrec_b)); 2243 2244 niter_x = max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a))); 2245 niter_y = max_stmt_executions_int (get_chrec_loop (chrec_a)); 2246 niter_z = max_stmt_executions_int (get_chrec_loop (chrec_b)); 2247 2248 if (niter_x < 0 || niter_y < 0 || niter_z < 0) 2249 { 2250 if (dump_file && (dump_flags & TDF_DETAILS)) 2251 fprintf (dump_file, "overlap steps test failed: no iteration counts.\n"); 2252 2253 *overlaps_a = conflict_fn_not_known (); 2254 *overlaps_b = conflict_fn_not_known (); 2255 *last_conflicts = chrec_dont_know; 2256 return; 2257 } 2258 2259 niter = MIN (niter_x, niter_z); 2260 compute_overlap_steps_for_affine_univar (niter, step_x, step_z, 2261 &overlaps_a_xz, 2262 &overlaps_b_xz, 2263 &last_conflicts_xz, 1); 2264 niter = MIN (niter_y, niter_z); 2265 compute_overlap_steps_for_affine_univar (niter, step_y, step_z, 2266 &overlaps_a_yz, 2267 &overlaps_b_yz, 2268 &last_conflicts_yz, 2); 2269 niter = MIN (niter_x, niter_z); 2270 niter = MIN (niter_y, niter); 2271 compute_overlap_steps_for_affine_univar (niter, step_x + step_y, step_z, 2272 &overlaps_a_xyz, 2273 &overlaps_b_xyz, 2274 &last_conflicts_xyz, 3); 2275 2276 xz_p = !integer_zerop (last_conflicts_xz); 2277 yz_p = !integer_zerop (last_conflicts_yz); 2278 xyz_p = !integer_zerop (last_conflicts_xyz); 2279 2280 if (xz_p || yz_p || xyz_p) 2281 { 2282 ova1 = affine_fn_cst (integer_zero_node); 2283 ova2 = affine_fn_cst (integer_zero_node); 2284 ovb = affine_fn_cst (integer_zero_node); 2285 if (xz_p) 2286 { 2287 affine_fn t0 = ova1; 2288 affine_fn t2 = ovb; 2289 2290 ova1 = affine_fn_plus (ova1, overlaps_a_xz); 2291 ovb = affine_fn_plus (ovb, overlaps_b_xz); 2292 affine_fn_free (t0); 2293 affine_fn_free (t2); 2294 *last_conflicts = last_conflicts_xz; 2295 } 2296 if (yz_p) 2297 { 2298 affine_fn t0 = ova2; 2299 affine_fn t2 = ovb; 2300 2301 ova2 = affine_fn_plus (ova2, overlaps_a_yz); 2302 ovb = affine_fn_plus (ovb, overlaps_b_yz); 2303 affine_fn_free (t0); 2304 affine_fn_free (t2); 2305 *last_conflicts = last_conflicts_yz; 2306 } 2307 if (xyz_p) 2308 { 2309 affine_fn t0 = ova1; 2310 affine_fn t2 = ova2; 2311 affine_fn t4 = ovb; 2312 2313 ova1 = affine_fn_plus (ova1, overlaps_a_xyz); 2314 ova2 = affine_fn_plus (ova2, overlaps_a_xyz); 2315 ovb = affine_fn_plus (ovb, overlaps_b_xyz); 2316 affine_fn_free (t0); 2317 affine_fn_free (t2); 2318 affine_fn_free (t4); 2319 *last_conflicts = last_conflicts_xyz; 2320 } 2321 *overlaps_a = conflict_fn (2, ova1, ova2); 2322 *overlaps_b = conflict_fn (1, ovb); 2323 } 2324 else 2325 { 2326 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2327 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2328 *last_conflicts = integer_zero_node; 2329 } 2330 2331 affine_fn_free (overlaps_a_xz); 2332 affine_fn_free (overlaps_b_xz); 2333 affine_fn_free (overlaps_a_yz); 2334 affine_fn_free (overlaps_b_yz); 2335 affine_fn_free (overlaps_a_xyz); 2336 affine_fn_free (overlaps_b_xyz); 2337 } 2338 2339 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */ 2340 2341 static void 2342 lambda_vector_copy (lambda_vector vec1, lambda_vector vec2, 2343 int size) 2344 { 2345 memcpy (vec2, vec1, size * sizeof (*vec1)); 2346 } 2347 2348 /* Copy the elements of M x N matrix MAT1 to MAT2. */ 2349 2350 static void 2351 lambda_matrix_copy (lambda_matrix mat1, lambda_matrix mat2, 2352 int m, int n) 2353 { 2354 int i; 2355 2356 for (i = 0; i < m; i++) 2357 lambda_vector_copy (mat1[i], mat2[i], n); 2358 } 2359 2360 /* Store the N x N identity matrix in MAT. */ 2361 2362 static void 2363 lambda_matrix_id (lambda_matrix mat, int size) 2364 { 2365 int i, j; 2366 2367 for (i = 0; i < size; i++) 2368 for (j = 0; j < size; j++) 2369 mat[i][j] = (i == j) ? 1 : 0; 2370 } 2371 2372 /* Return the first nonzero element of vector VEC1 between START and N. 2373 We must have START <= N. Returns N if VEC1 is the zero vector. */ 2374 2375 static int 2376 lambda_vector_first_nz (lambda_vector vec1, int n, int start) 2377 { 2378 int j = start; 2379 while (j < n && vec1[j] == 0) 2380 j++; 2381 return j; 2382 } 2383 2384 /* Add a multiple of row R1 of matrix MAT with N columns to row R2: 2385 R2 = R2 + CONST1 * R1. */ 2386 2387 static void 2388 lambda_matrix_row_add (lambda_matrix mat, int n, int r1, int r2, int const1) 2389 { 2390 int i; 2391 2392 if (const1 == 0) 2393 return; 2394 2395 for (i = 0; i < n; i++) 2396 mat[r2][i] += const1 * mat[r1][i]; 2397 } 2398 2399 /* Multiply vector VEC1 of length SIZE by a constant CONST1, 2400 and store the result in VEC2. */ 2401 2402 static void 2403 lambda_vector_mult_const (lambda_vector vec1, lambda_vector vec2, 2404 int size, int const1) 2405 { 2406 int i; 2407 2408 if (const1 == 0) 2409 lambda_vector_clear (vec2, size); 2410 else 2411 for (i = 0; i < size; i++) 2412 vec2[i] = const1 * vec1[i]; 2413 } 2414 2415 /* Negate vector VEC1 with length SIZE and store it in VEC2. */ 2416 2417 static void 2418 lambda_vector_negate (lambda_vector vec1, lambda_vector vec2, 2419 int size) 2420 { 2421 lambda_vector_mult_const (vec1, vec2, size, -1); 2422 } 2423 2424 /* Negate row R1 of matrix MAT which has N columns. */ 2425 2426 static void 2427 lambda_matrix_row_negate (lambda_matrix mat, int n, int r1) 2428 { 2429 lambda_vector_negate (mat[r1], mat[r1], n); 2430 } 2431 2432 /* Return true if two vectors are equal. */ 2433 2434 static bool 2435 lambda_vector_equal (lambda_vector vec1, lambda_vector vec2, int size) 2436 { 2437 int i; 2438 for (i = 0; i < size; i++) 2439 if (vec1[i] != vec2[i]) 2440 return false; 2441 return true; 2442 } 2443 2444 /* Given an M x N integer matrix A, this function determines an M x 2445 M unimodular matrix U, and an M x N echelon matrix S such that 2446 "U.A = S". This decomposition is also known as "right Hermite". 2447 2448 Ref: Algorithm 2.1 page 33 in "Loop Transformations for 2449 Restructuring Compilers" Utpal Banerjee. */ 2450 2451 static void 2452 lambda_matrix_right_hermite (lambda_matrix A, int m, int n, 2453 lambda_matrix S, lambda_matrix U) 2454 { 2455 int i, j, i0 = 0; 2456 2457 lambda_matrix_copy (A, S, m, n); 2458 lambda_matrix_id (U, m); 2459 2460 for (j = 0; j < n; j++) 2461 { 2462 if (lambda_vector_first_nz (S[j], m, i0) < m) 2463 { 2464 ++i0; 2465 for (i = m - 1; i >= i0; i--) 2466 { 2467 while (S[i][j] != 0) 2468 { 2469 int sigma, factor, a, b; 2470 2471 a = S[i-1][j]; 2472 b = S[i][j]; 2473 sigma = (a * b < 0) ? -1: 1; 2474 a = abs (a); 2475 b = abs (b); 2476 factor = sigma * (a / b); 2477 2478 lambda_matrix_row_add (S, n, i, i-1, -factor); 2479 std::swap (S[i], S[i-1]); 2480 2481 lambda_matrix_row_add (U, m, i, i-1, -factor); 2482 std::swap (U[i], U[i-1]); 2483 } 2484 } 2485 } 2486 } 2487 } 2488 2489 /* Determines the overlapping elements due to accesses CHREC_A and 2490 CHREC_B, that are affine functions. This function cannot handle 2491 symbolic evolution functions, ie. when initial conditions are 2492 parameters, because it uses lambda matrices of integers. */ 2493 2494 static void 2495 analyze_subscript_affine_affine (tree chrec_a, 2496 tree chrec_b, 2497 conflict_function **overlaps_a, 2498 conflict_function **overlaps_b, 2499 tree *last_conflicts) 2500 { 2501 unsigned nb_vars_a, nb_vars_b, dim; 2502 HOST_WIDE_INT init_a, init_b, gamma, gcd_alpha_beta; 2503 lambda_matrix A, U, S; 2504 struct obstack scratch_obstack; 2505 2506 if (eq_evolutions_p (chrec_a, chrec_b)) 2507 { 2508 /* The accessed index overlaps for each iteration in the 2509 loop. */ 2510 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2511 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2512 *last_conflicts = chrec_dont_know; 2513 return; 2514 } 2515 if (dump_file && (dump_flags & TDF_DETAILS)) 2516 fprintf (dump_file, "(analyze_subscript_affine_affine \n"); 2517 2518 /* For determining the initial intersection, we have to solve a 2519 Diophantine equation. This is the most time consuming part. 2520 2521 For answering to the question: "Is there a dependence?" we have 2522 to prove that there exists a solution to the Diophantine 2523 equation, and that the solution is in the iteration domain, 2524 i.e. the solution is positive or zero, and that the solution 2525 happens before the upper bound loop.nb_iterations. Otherwise 2526 there is no dependence. This function outputs a description of 2527 the iterations that hold the intersections. */ 2528 2529 nb_vars_a = nb_vars_in_chrec (chrec_a); 2530 nb_vars_b = nb_vars_in_chrec (chrec_b); 2531 2532 gcc_obstack_init (&scratch_obstack); 2533 2534 dim = nb_vars_a + nb_vars_b; 2535 U = lambda_matrix_new (dim, dim, &scratch_obstack); 2536 A = lambda_matrix_new (dim, 1, &scratch_obstack); 2537 S = lambda_matrix_new (dim, 1, &scratch_obstack); 2538 2539 init_a = int_cst_value (initialize_matrix_A (A, chrec_a, 0, 1)); 2540 init_b = int_cst_value (initialize_matrix_A (A, chrec_b, nb_vars_a, -1)); 2541 gamma = init_b - init_a; 2542 2543 /* Don't do all the hard work of solving the Diophantine equation 2544 when we already know the solution: for example, 2545 | {3, +, 1}_1 2546 | {3, +, 4}_2 2547 | gamma = 3 - 3 = 0. 2548 Then the first overlap occurs during the first iterations: 2549 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x) 2550 */ 2551 if (gamma == 0) 2552 { 2553 if (nb_vars_a == 1 && nb_vars_b == 1) 2554 { 2555 HOST_WIDE_INT step_a, step_b; 2556 HOST_WIDE_INT niter, niter_a, niter_b; 2557 affine_fn ova, ovb; 2558 2559 niter_a = max_stmt_executions_int (get_chrec_loop (chrec_a)); 2560 niter_b = max_stmt_executions_int (get_chrec_loop (chrec_b)); 2561 niter = MIN (niter_a, niter_b); 2562 step_a = int_cst_value (CHREC_RIGHT (chrec_a)); 2563 step_b = int_cst_value (CHREC_RIGHT (chrec_b)); 2564 2565 compute_overlap_steps_for_affine_univar (niter, step_a, step_b, 2566 &ova, &ovb, 2567 last_conflicts, 1); 2568 *overlaps_a = conflict_fn (1, ova); 2569 *overlaps_b = conflict_fn (1, ovb); 2570 } 2571 2572 else if (nb_vars_a == 2 && nb_vars_b == 1) 2573 compute_overlap_steps_for_affine_1_2 2574 (chrec_a, chrec_b, overlaps_a, overlaps_b, last_conflicts); 2575 2576 else if (nb_vars_a == 1 && nb_vars_b == 2) 2577 compute_overlap_steps_for_affine_1_2 2578 (chrec_b, chrec_a, overlaps_b, overlaps_a, last_conflicts); 2579 2580 else 2581 { 2582 if (dump_file && (dump_flags & TDF_DETAILS)) 2583 fprintf (dump_file, "affine-affine test failed: too many variables.\n"); 2584 *overlaps_a = conflict_fn_not_known (); 2585 *overlaps_b = conflict_fn_not_known (); 2586 *last_conflicts = chrec_dont_know; 2587 } 2588 goto end_analyze_subs_aa; 2589 } 2590 2591 /* U.A = S */ 2592 lambda_matrix_right_hermite (A, dim, 1, S, U); 2593 2594 if (S[0][0] < 0) 2595 { 2596 S[0][0] *= -1; 2597 lambda_matrix_row_negate (U, dim, 0); 2598 } 2599 gcd_alpha_beta = S[0][0]; 2600 2601 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5, 2602 but that is a quite strange case. Instead of ICEing, answer 2603 don't know. */ 2604 if (gcd_alpha_beta == 0) 2605 { 2606 *overlaps_a = conflict_fn_not_known (); 2607 *overlaps_b = conflict_fn_not_known (); 2608 *last_conflicts = chrec_dont_know; 2609 goto end_analyze_subs_aa; 2610 } 2611 2612 /* The classic "gcd-test". */ 2613 if (!int_divides_p (gcd_alpha_beta, gamma)) 2614 { 2615 /* The "gcd-test" has determined that there is no integer 2616 solution, i.e. there is no dependence. */ 2617 *overlaps_a = conflict_fn_no_dependence (); 2618 *overlaps_b = conflict_fn_no_dependence (); 2619 *last_conflicts = integer_zero_node; 2620 } 2621 2622 /* Both access functions are univariate. This includes SIV and MIV cases. */ 2623 else if (nb_vars_a == 1 && nb_vars_b == 1) 2624 { 2625 /* Both functions should have the same evolution sign. */ 2626 if (((A[0][0] > 0 && -A[1][0] > 0) 2627 || (A[0][0] < 0 && -A[1][0] < 0))) 2628 { 2629 /* The solutions are given by: 2630 | 2631 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0] 2632 | [u21 u22] [y0] 2633 2634 For a given integer t. Using the following variables, 2635 2636 | i0 = u11 * gamma / gcd_alpha_beta 2637 | j0 = u12 * gamma / gcd_alpha_beta 2638 | i1 = u21 2639 | j1 = u22 2640 2641 the solutions are: 2642 2643 | x0 = i0 + i1 * t, 2644 | y0 = j0 + j1 * t. */ 2645 HOST_WIDE_INT i0, j0, i1, j1; 2646 2647 i0 = U[0][0] * gamma / gcd_alpha_beta; 2648 j0 = U[0][1] * gamma / gcd_alpha_beta; 2649 i1 = U[1][0]; 2650 j1 = U[1][1]; 2651 2652 if ((i1 == 0 && i0 < 0) 2653 || (j1 == 0 && j0 < 0)) 2654 { 2655 /* There is no solution. 2656 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations" 2657 falls in here, but for the moment we don't look at the 2658 upper bound of the iteration domain. */ 2659 *overlaps_a = conflict_fn_no_dependence (); 2660 *overlaps_b = conflict_fn_no_dependence (); 2661 *last_conflicts = integer_zero_node; 2662 goto end_analyze_subs_aa; 2663 } 2664 2665 if (i1 > 0 && j1 > 0) 2666 { 2667 HOST_WIDE_INT niter_a 2668 = max_stmt_executions_int (get_chrec_loop (chrec_a)); 2669 HOST_WIDE_INT niter_b 2670 = max_stmt_executions_int (get_chrec_loop (chrec_b)); 2671 HOST_WIDE_INT niter = MIN (niter_a, niter_b); 2672 2673 /* (X0, Y0) is a solution of the Diophantine equation: 2674 "chrec_a (X0) = chrec_b (Y0)". */ 2675 HOST_WIDE_INT tau1 = MAX (CEIL (-i0, i1), 2676 CEIL (-j0, j1)); 2677 HOST_WIDE_INT x0 = i1 * tau1 + i0; 2678 HOST_WIDE_INT y0 = j1 * tau1 + j0; 2679 2680 /* (X1, Y1) is the smallest positive solution of the eq 2681 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the 2682 first conflict occurs. */ 2683 HOST_WIDE_INT min_multiple = MIN (x0 / i1, y0 / j1); 2684 HOST_WIDE_INT x1 = x0 - i1 * min_multiple; 2685 HOST_WIDE_INT y1 = y0 - j1 * min_multiple; 2686 2687 if (niter > 0) 2688 { 2689 HOST_WIDE_INT tau2 = MIN (FLOOR_DIV (niter_a - i0, i1), 2690 FLOOR_DIV (niter_b - j0, j1)); 2691 HOST_WIDE_INT last_conflict = tau2 - (x1 - i0)/i1; 2692 2693 /* If the overlap occurs outside of the bounds of the 2694 loop, there is no dependence. */ 2695 if (x1 >= niter_a || y1 >= niter_b) 2696 { 2697 *overlaps_a = conflict_fn_no_dependence (); 2698 *overlaps_b = conflict_fn_no_dependence (); 2699 *last_conflicts = integer_zero_node; 2700 goto end_analyze_subs_aa; 2701 } 2702 else 2703 *last_conflicts = build_int_cst (NULL_TREE, last_conflict); 2704 } 2705 else 2706 *last_conflicts = chrec_dont_know; 2707 2708 *overlaps_a 2709 = conflict_fn (1, 2710 affine_fn_univar (build_int_cst (NULL_TREE, x1), 2711 1, 2712 build_int_cst (NULL_TREE, i1))); 2713 *overlaps_b 2714 = conflict_fn (1, 2715 affine_fn_univar (build_int_cst (NULL_TREE, y1), 2716 1, 2717 build_int_cst (NULL_TREE, j1))); 2718 } 2719 else 2720 { 2721 /* FIXME: For the moment, the upper bound of the 2722 iteration domain for i and j is not checked. */ 2723 if (dump_file && (dump_flags & TDF_DETAILS)) 2724 fprintf (dump_file, "affine-affine test failed: unimplemented.\n"); 2725 *overlaps_a = conflict_fn_not_known (); 2726 *overlaps_b = conflict_fn_not_known (); 2727 *last_conflicts = chrec_dont_know; 2728 } 2729 } 2730 else 2731 { 2732 if (dump_file && (dump_flags & TDF_DETAILS)) 2733 fprintf (dump_file, "affine-affine test failed: unimplemented.\n"); 2734 *overlaps_a = conflict_fn_not_known (); 2735 *overlaps_b = conflict_fn_not_known (); 2736 *last_conflicts = chrec_dont_know; 2737 } 2738 } 2739 else 2740 { 2741 if (dump_file && (dump_flags & TDF_DETAILS)) 2742 fprintf (dump_file, "affine-affine test failed: unimplemented.\n"); 2743 *overlaps_a = conflict_fn_not_known (); 2744 *overlaps_b = conflict_fn_not_known (); 2745 *last_conflicts = chrec_dont_know; 2746 } 2747 2748 end_analyze_subs_aa: 2749 obstack_free (&scratch_obstack, NULL); 2750 if (dump_file && (dump_flags & TDF_DETAILS)) 2751 { 2752 fprintf (dump_file, " (overlaps_a = "); 2753 dump_conflict_function (dump_file, *overlaps_a); 2754 fprintf (dump_file, ")\n (overlaps_b = "); 2755 dump_conflict_function (dump_file, *overlaps_b); 2756 fprintf (dump_file, "))\n"); 2757 } 2758 } 2759 2760 /* Returns true when analyze_subscript_affine_affine can be used for 2761 determining the dependence relation between chrec_a and chrec_b, 2762 that contain symbols. This function modifies chrec_a and chrec_b 2763 such that the analysis result is the same, and such that they don't 2764 contain symbols, and then can safely be passed to the analyzer. 2765 2766 Example: The analysis of the following tuples of evolutions produce 2767 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1 2768 vs. {0, +, 1}_1 2769 2770 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1) 2771 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1) 2772 */ 2773 2774 static bool 2775 can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b) 2776 { 2777 tree diff, type, left_a, left_b, right_b; 2778 2779 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a)) 2780 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b))) 2781 /* FIXME: For the moment not handled. Might be refined later. */ 2782 return false; 2783 2784 type = chrec_type (*chrec_a); 2785 left_a = CHREC_LEFT (*chrec_a); 2786 left_b = chrec_convert (type, CHREC_LEFT (*chrec_b), NULL); 2787 diff = chrec_fold_minus (type, left_a, left_b); 2788 2789 if (!evolution_function_is_constant_p (diff)) 2790 return false; 2791 2792 if (dump_file && (dump_flags & TDF_DETAILS)) 2793 fprintf (dump_file, "can_use_subscript_aff_aff_for_symbolic \n"); 2794 2795 *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a), 2796 diff, CHREC_RIGHT (*chrec_a)); 2797 right_b = chrec_convert (type, CHREC_RIGHT (*chrec_b), NULL); 2798 *chrec_b = build_polynomial_chrec (CHREC_VARIABLE (*chrec_b), 2799 build_int_cst (type, 0), 2800 right_b); 2801 return true; 2802 } 2803 2804 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and 2805 *OVERLAPS_B are initialized to the functions that describe the 2806 relation between the elements accessed twice by CHREC_A and 2807 CHREC_B. For k >= 0, the following property is verified: 2808 2809 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */ 2810 2811 static void 2812 analyze_siv_subscript (tree chrec_a, 2813 tree chrec_b, 2814 conflict_function **overlaps_a, 2815 conflict_function **overlaps_b, 2816 tree *last_conflicts, 2817 int loop_nest_num) 2818 { 2819 dependence_stats.num_siv++; 2820 2821 if (dump_file && (dump_flags & TDF_DETAILS)) 2822 fprintf (dump_file, "(analyze_siv_subscript \n"); 2823 2824 if (evolution_function_is_constant_p (chrec_a) 2825 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num)) 2826 analyze_siv_subscript_cst_affine (chrec_a, chrec_b, 2827 overlaps_a, overlaps_b, last_conflicts); 2828 2829 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num) 2830 && evolution_function_is_constant_p (chrec_b)) 2831 analyze_siv_subscript_cst_affine (chrec_b, chrec_a, 2832 overlaps_b, overlaps_a, last_conflicts); 2833 2834 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num) 2835 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num)) 2836 { 2837 if (!chrec_contains_symbols (chrec_a) 2838 && !chrec_contains_symbols (chrec_b)) 2839 { 2840 analyze_subscript_affine_affine (chrec_a, chrec_b, 2841 overlaps_a, overlaps_b, 2842 last_conflicts); 2843 2844 if (CF_NOT_KNOWN_P (*overlaps_a) 2845 || CF_NOT_KNOWN_P (*overlaps_b)) 2846 dependence_stats.num_siv_unimplemented++; 2847 else if (CF_NO_DEPENDENCE_P (*overlaps_a) 2848 || CF_NO_DEPENDENCE_P (*overlaps_b)) 2849 dependence_stats.num_siv_independent++; 2850 else 2851 dependence_stats.num_siv_dependent++; 2852 } 2853 else if (can_use_analyze_subscript_affine_affine (&chrec_a, 2854 &chrec_b)) 2855 { 2856 analyze_subscript_affine_affine (chrec_a, chrec_b, 2857 overlaps_a, overlaps_b, 2858 last_conflicts); 2859 2860 if (CF_NOT_KNOWN_P (*overlaps_a) 2861 || CF_NOT_KNOWN_P (*overlaps_b)) 2862 dependence_stats.num_siv_unimplemented++; 2863 else if (CF_NO_DEPENDENCE_P (*overlaps_a) 2864 || CF_NO_DEPENDENCE_P (*overlaps_b)) 2865 dependence_stats.num_siv_independent++; 2866 else 2867 dependence_stats.num_siv_dependent++; 2868 } 2869 else 2870 goto siv_subscript_dontknow; 2871 } 2872 2873 else 2874 { 2875 siv_subscript_dontknow:; 2876 if (dump_file && (dump_flags & TDF_DETAILS)) 2877 fprintf (dump_file, " siv test failed: unimplemented"); 2878 *overlaps_a = conflict_fn_not_known (); 2879 *overlaps_b = conflict_fn_not_known (); 2880 *last_conflicts = chrec_dont_know; 2881 dependence_stats.num_siv_unimplemented++; 2882 } 2883 2884 if (dump_file && (dump_flags & TDF_DETAILS)) 2885 fprintf (dump_file, ")\n"); 2886 } 2887 2888 /* Returns false if we can prove that the greatest common divisor of the steps 2889 of CHREC does not divide CST, false otherwise. */ 2890 2891 static bool 2892 gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst) 2893 { 2894 HOST_WIDE_INT cd = 0, val; 2895 tree step; 2896 2897 if (!tree_fits_shwi_p (cst)) 2898 return true; 2899 val = tree_to_shwi (cst); 2900 2901 while (TREE_CODE (chrec) == POLYNOMIAL_CHREC) 2902 { 2903 step = CHREC_RIGHT (chrec); 2904 if (!tree_fits_shwi_p (step)) 2905 return true; 2906 cd = gcd (cd, tree_to_shwi (step)); 2907 chrec = CHREC_LEFT (chrec); 2908 } 2909 2910 return val % cd == 0; 2911 } 2912 2913 /* Analyze a MIV (Multiple Index Variable) subscript with respect to 2914 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the 2915 functions that describe the relation between the elements accessed 2916 twice by CHREC_A and CHREC_B. For k >= 0, the following property 2917 is verified: 2918 2919 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */ 2920 2921 static void 2922 analyze_miv_subscript (tree chrec_a, 2923 tree chrec_b, 2924 conflict_function **overlaps_a, 2925 conflict_function **overlaps_b, 2926 tree *last_conflicts, 2927 struct loop *loop_nest) 2928 { 2929 tree type, difference; 2930 2931 dependence_stats.num_miv++; 2932 if (dump_file && (dump_flags & TDF_DETAILS)) 2933 fprintf (dump_file, "(analyze_miv_subscript \n"); 2934 2935 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b)); 2936 chrec_a = chrec_convert (type, chrec_a, NULL); 2937 chrec_b = chrec_convert (type, chrec_b, NULL); 2938 difference = chrec_fold_minus (type, chrec_a, chrec_b); 2939 2940 if (eq_evolutions_p (chrec_a, chrec_b)) 2941 { 2942 /* Access functions are the same: all the elements are accessed 2943 in the same order. */ 2944 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2945 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 2946 *last_conflicts = max_stmt_executions_tree (get_chrec_loop (chrec_a)); 2947 dependence_stats.num_miv_dependent++; 2948 } 2949 2950 else if (evolution_function_is_constant_p (difference) 2951 /* For the moment, the following is verified: 2952 evolution_function_is_affine_multivariate_p (chrec_a, 2953 loop_nest->num) */ 2954 && !gcd_of_steps_may_divide_p (chrec_a, difference)) 2955 { 2956 /* testsuite/.../ssa-chrec-33.c 2957 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2 2958 2959 The difference is 1, and all the evolution steps are multiples 2960 of 2, consequently there are no overlapping elements. */ 2961 *overlaps_a = conflict_fn_no_dependence (); 2962 *overlaps_b = conflict_fn_no_dependence (); 2963 *last_conflicts = integer_zero_node; 2964 dependence_stats.num_miv_independent++; 2965 } 2966 2967 else if (evolution_function_is_affine_multivariate_p (chrec_a, loop_nest->num) 2968 && !chrec_contains_symbols (chrec_a) 2969 && evolution_function_is_affine_multivariate_p (chrec_b, loop_nest->num) 2970 && !chrec_contains_symbols (chrec_b)) 2971 { 2972 /* testsuite/.../ssa-chrec-35.c 2973 {0, +, 1}_2 vs. {0, +, 1}_3 2974 the overlapping elements are respectively located at iterations: 2975 {0, +, 1}_x and {0, +, 1}_x, 2976 in other words, we have the equality: 2977 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x) 2978 2979 Other examples: 2980 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) = 2981 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y) 2982 2983 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) = 2984 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) 2985 */ 2986 analyze_subscript_affine_affine (chrec_a, chrec_b, 2987 overlaps_a, overlaps_b, last_conflicts); 2988 2989 if (CF_NOT_KNOWN_P (*overlaps_a) 2990 || CF_NOT_KNOWN_P (*overlaps_b)) 2991 dependence_stats.num_miv_unimplemented++; 2992 else if (CF_NO_DEPENDENCE_P (*overlaps_a) 2993 || CF_NO_DEPENDENCE_P (*overlaps_b)) 2994 dependence_stats.num_miv_independent++; 2995 else 2996 dependence_stats.num_miv_dependent++; 2997 } 2998 2999 else 3000 { 3001 /* When the analysis is too difficult, answer "don't know". */ 3002 if (dump_file && (dump_flags & TDF_DETAILS)) 3003 fprintf (dump_file, "analyze_miv_subscript test failed: unimplemented.\n"); 3004 3005 *overlaps_a = conflict_fn_not_known (); 3006 *overlaps_b = conflict_fn_not_known (); 3007 *last_conflicts = chrec_dont_know; 3008 dependence_stats.num_miv_unimplemented++; 3009 } 3010 3011 if (dump_file && (dump_flags & TDF_DETAILS)) 3012 fprintf (dump_file, ")\n"); 3013 } 3014 3015 /* Determines the iterations for which CHREC_A is equal to CHREC_B in 3016 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and 3017 OVERLAP_ITERATIONS_B are initialized with two functions that 3018 describe the iterations that contain conflicting elements. 3019 3020 Remark: For an integer k >= 0, the following equality is true: 3021 3022 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)). 3023 */ 3024 3025 static void 3026 analyze_overlapping_iterations (tree chrec_a, 3027 tree chrec_b, 3028 conflict_function **overlap_iterations_a, 3029 conflict_function **overlap_iterations_b, 3030 tree *last_conflicts, struct loop *loop_nest) 3031 { 3032 unsigned int lnn = loop_nest->num; 3033 3034 dependence_stats.num_subscript_tests++; 3035 3036 if (dump_file && (dump_flags & TDF_DETAILS)) 3037 { 3038 fprintf (dump_file, "(analyze_overlapping_iterations \n"); 3039 fprintf (dump_file, " (chrec_a = "); 3040 print_generic_expr (dump_file, chrec_a, 0); 3041 fprintf (dump_file, ")\n (chrec_b = "); 3042 print_generic_expr (dump_file, chrec_b, 0); 3043 fprintf (dump_file, ")\n"); 3044 } 3045 3046 if (chrec_a == NULL_TREE 3047 || chrec_b == NULL_TREE 3048 || chrec_contains_undetermined (chrec_a) 3049 || chrec_contains_undetermined (chrec_b)) 3050 { 3051 dependence_stats.num_subscript_undetermined++; 3052 3053 *overlap_iterations_a = conflict_fn_not_known (); 3054 *overlap_iterations_b = conflict_fn_not_known (); 3055 } 3056 3057 /* If they are the same chrec, and are affine, they overlap 3058 on every iteration. */ 3059 else if (eq_evolutions_p (chrec_a, chrec_b) 3060 && (evolution_function_is_affine_multivariate_p (chrec_a, lnn) 3061 || operand_equal_p (chrec_a, chrec_b, 0))) 3062 { 3063 dependence_stats.num_same_subscript_function++; 3064 *overlap_iterations_a = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3065 *overlap_iterations_b = conflict_fn (1, affine_fn_cst (integer_zero_node)); 3066 *last_conflicts = chrec_dont_know; 3067 } 3068 3069 /* If they aren't the same, and aren't affine, we can't do anything 3070 yet. */ 3071 else if ((chrec_contains_symbols (chrec_a) 3072 || chrec_contains_symbols (chrec_b)) 3073 && (!evolution_function_is_affine_multivariate_p (chrec_a, lnn) 3074 || !evolution_function_is_affine_multivariate_p (chrec_b, lnn))) 3075 { 3076 dependence_stats.num_subscript_undetermined++; 3077 *overlap_iterations_a = conflict_fn_not_known (); 3078 *overlap_iterations_b = conflict_fn_not_known (); 3079 } 3080 3081 else if (ziv_subscript_p (chrec_a, chrec_b)) 3082 analyze_ziv_subscript (chrec_a, chrec_b, 3083 overlap_iterations_a, overlap_iterations_b, 3084 last_conflicts); 3085 3086 else if (siv_subscript_p (chrec_a, chrec_b)) 3087 analyze_siv_subscript (chrec_a, chrec_b, 3088 overlap_iterations_a, overlap_iterations_b, 3089 last_conflicts, lnn); 3090 3091 else 3092 analyze_miv_subscript (chrec_a, chrec_b, 3093 overlap_iterations_a, overlap_iterations_b, 3094 last_conflicts, loop_nest); 3095 3096 if (dump_file && (dump_flags & TDF_DETAILS)) 3097 { 3098 fprintf (dump_file, " (overlap_iterations_a = "); 3099 dump_conflict_function (dump_file, *overlap_iterations_a); 3100 fprintf (dump_file, ")\n (overlap_iterations_b = "); 3101 dump_conflict_function (dump_file, *overlap_iterations_b); 3102 fprintf (dump_file, "))\n"); 3103 } 3104 } 3105 3106 /* Helper function for uniquely inserting distance vectors. */ 3107 3108 static void 3109 save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v) 3110 { 3111 unsigned i; 3112 lambda_vector v; 3113 3114 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v) 3115 if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr))) 3116 return; 3117 3118 DDR_DIST_VECTS (ddr).safe_push (dist_v); 3119 } 3120 3121 /* Helper function for uniquely inserting direction vectors. */ 3122 3123 static void 3124 save_dir_v (struct data_dependence_relation *ddr, lambda_vector dir_v) 3125 { 3126 unsigned i; 3127 lambda_vector v; 3128 3129 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v) 3130 if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr))) 3131 return; 3132 3133 DDR_DIR_VECTS (ddr).safe_push (dir_v); 3134 } 3135 3136 /* Add a distance of 1 on all the loops outer than INDEX. If we 3137 haven't yet determined a distance for this outer loop, push a new 3138 distance vector composed of the previous distance, and a distance 3139 of 1 for this outer loop. Example: 3140 3141 | loop_1 3142 | loop_2 3143 | A[10] 3144 | endloop_2 3145 | endloop_1 3146 3147 Saved vectors are of the form (dist_in_1, dist_in_2). First, we 3148 save (0, 1), then we have to save (1, 0). */ 3149 3150 static void 3151 add_outer_distances (struct data_dependence_relation *ddr, 3152 lambda_vector dist_v, int index) 3153 { 3154 /* For each outer loop where init_v is not set, the accesses are 3155 in dependence of distance 1 in the loop. */ 3156 while (--index >= 0) 3157 { 3158 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3159 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr)); 3160 save_v[index] = 1; 3161 save_dist_v (ddr, save_v); 3162 } 3163 } 3164 3165 /* Return false when fail to represent the data dependence as a 3166 distance vector. INIT_B is set to true when a component has been 3167 added to the distance vector DIST_V. INDEX_CARRY is then set to 3168 the index in DIST_V that carries the dependence. */ 3169 3170 static bool 3171 build_classic_dist_vector_1 (struct data_dependence_relation *ddr, 3172 struct data_reference *ddr_a, 3173 struct data_reference *ddr_b, 3174 lambda_vector dist_v, bool *init_b, 3175 int *index_carry) 3176 { 3177 unsigned i; 3178 lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3179 3180 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 3181 { 3182 tree access_fn_a, access_fn_b; 3183 struct subscript *subscript = DDR_SUBSCRIPT (ddr, i); 3184 3185 if (chrec_contains_undetermined (SUB_DISTANCE (subscript))) 3186 { 3187 non_affine_dependence_relation (ddr); 3188 return false; 3189 } 3190 3191 access_fn_a = DR_ACCESS_FN (ddr_a, i); 3192 access_fn_b = DR_ACCESS_FN (ddr_b, i); 3193 3194 if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC 3195 && TREE_CODE (access_fn_b) == POLYNOMIAL_CHREC) 3196 { 3197 HOST_WIDE_INT dist; 3198 int index; 3199 int var_a = CHREC_VARIABLE (access_fn_a); 3200 int var_b = CHREC_VARIABLE (access_fn_b); 3201 3202 if (var_a != var_b 3203 || chrec_contains_undetermined (SUB_DISTANCE (subscript))) 3204 { 3205 non_affine_dependence_relation (ddr); 3206 return false; 3207 } 3208 3209 dist = int_cst_value (SUB_DISTANCE (subscript)); 3210 index = index_in_loop_nest (var_a, DDR_LOOP_NEST (ddr)); 3211 *index_carry = MIN (index, *index_carry); 3212 3213 /* This is the subscript coupling test. If we have already 3214 recorded a distance for this loop (a distance coming from 3215 another subscript), it should be the same. For example, 3216 in the following code, there is no dependence: 3217 3218 | loop i = 0, N, 1 3219 | T[i+1][i] = ... 3220 | ... = T[i][i] 3221 | endloop 3222 */ 3223 if (init_v[index] != 0 && dist_v[index] != dist) 3224 { 3225 finalize_ddr_dependent (ddr, chrec_known); 3226 return false; 3227 } 3228 3229 dist_v[index] = dist; 3230 init_v[index] = 1; 3231 *init_b = true; 3232 } 3233 else if (!operand_equal_p (access_fn_a, access_fn_b, 0)) 3234 { 3235 /* This can be for example an affine vs. constant dependence 3236 (T[i] vs. T[3]) that is not an affine dependence and is 3237 not representable as a distance vector. */ 3238 non_affine_dependence_relation (ddr); 3239 return false; 3240 } 3241 } 3242 3243 return true; 3244 } 3245 3246 /* Return true when the DDR contains only constant access functions. */ 3247 3248 static bool 3249 constant_access_functions (const struct data_dependence_relation *ddr) 3250 { 3251 unsigned i; 3252 3253 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 3254 if (!evolution_function_is_constant_p (DR_ACCESS_FN (DDR_A (ddr), i)) 3255 || !evolution_function_is_constant_p (DR_ACCESS_FN (DDR_B (ddr), i))) 3256 return false; 3257 3258 return true; 3259 } 3260 3261 /* Helper function for the case where DDR_A and DDR_B are the same 3262 multivariate access function with a constant step. For an example 3263 see pr34635-1.c. */ 3264 3265 static void 3266 add_multivariate_self_dist (struct data_dependence_relation *ddr, tree c_2) 3267 { 3268 int x_1, x_2; 3269 tree c_1 = CHREC_LEFT (c_2); 3270 tree c_0 = CHREC_LEFT (c_1); 3271 lambda_vector dist_v; 3272 HOST_WIDE_INT v1, v2, cd; 3273 3274 /* Polynomials with more than 2 variables are not handled yet. When 3275 the evolution steps are parameters, it is not possible to 3276 represent the dependence using classical distance vectors. */ 3277 if (TREE_CODE (c_0) != INTEGER_CST 3278 || TREE_CODE (CHREC_RIGHT (c_1)) != INTEGER_CST 3279 || TREE_CODE (CHREC_RIGHT (c_2)) != INTEGER_CST) 3280 { 3281 DDR_AFFINE_P (ddr) = false; 3282 return; 3283 } 3284 3285 x_2 = index_in_loop_nest (CHREC_VARIABLE (c_2), DDR_LOOP_NEST (ddr)); 3286 x_1 = index_in_loop_nest (CHREC_VARIABLE (c_1), DDR_LOOP_NEST (ddr)); 3287 3288 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */ 3289 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3290 v1 = int_cst_value (CHREC_RIGHT (c_1)); 3291 v2 = int_cst_value (CHREC_RIGHT (c_2)); 3292 cd = gcd (v1, v2); 3293 v1 /= cd; 3294 v2 /= cd; 3295 3296 if (v2 < 0) 3297 { 3298 v2 = -v2; 3299 v1 = -v1; 3300 } 3301 3302 dist_v[x_1] = v2; 3303 dist_v[x_2] = -v1; 3304 save_dist_v (ddr, dist_v); 3305 3306 add_outer_distances (ddr, dist_v, x_1); 3307 } 3308 3309 /* Helper function for the case where DDR_A and DDR_B are the same 3310 access functions. */ 3311 3312 static void 3313 add_other_self_distances (struct data_dependence_relation *ddr) 3314 { 3315 lambda_vector dist_v; 3316 unsigned i; 3317 int index_carry = DDR_NB_LOOPS (ddr); 3318 3319 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 3320 { 3321 tree access_fun = DR_ACCESS_FN (DDR_A (ddr), i); 3322 3323 if (TREE_CODE (access_fun) == POLYNOMIAL_CHREC) 3324 { 3325 if (!evolution_function_is_univariate_p (access_fun)) 3326 { 3327 if (DDR_NUM_SUBSCRIPTS (ddr) != 1) 3328 { 3329 DDR_ARE_DEPENDENT (ddr) = chrec_dont_know; 3330 return; 3331 } 3332 3333 access_fun = DR_ACCESS_FN (DDR_A (ddr), 0); 3334 3335 if (TREE_CODE (CHREC_LEFT (access_fun)) == POLYNOMIAL_CHREC) 3336 add_multivariate_self_dist (ddr, access_fun); 3337 else 3338 /* The evolution step is not constant: it varies in 3339 the outer loop, so this cannot be represented by a 3340 distance vector. For example in pr34635.c the 3341 evolution is {0, +, {0, +, 4}_1}_2. */ 3342 DDR_AFFINE_P (ddr) = false; 3343 3344 return; 3345 } 3346 3347 index_carry = MIN (index_carry, 3348 index_in_loop_nest (CHREC_VARIABLE (access_fun), 3349 DDR_LOOP_NEST (ddr))); 3350 } 3351 } 3352 3353 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3354 add_outer_distances (ddr, dist_v, index_carry); 3355 } 3356 3357 static void 3358 insert_innermost_unit_dist_vector (struct data_dependence_relation *ddr) 3359 { 3360 lambda_vector dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3361 3362 dist_v[DDR_INNER_LOOP (ddr)] = 1; 3363 save_dist_v (ddr, dist_v); 3364 } 3365 3366 /* Adds a unit distance vector to DDR when there is a 0 overlap. This 3367 is the case for example when access functions are the same and 3368 equal to a constant, as in: 3369 3370 | loop_1 3371 | A[3] = ... 3372 | ... = A[3] 3373 | endloop_1 3374 3375 in which case the distance vectors are (0) and (1). */ 3376 3377 static void 3378 add_distance_for_zero_overlaps (struct data_dependence_relation *ddr) 3379 { 3380 unsigned i, j; 3381 3382 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++) 3383 { 3384 subscript_p sub = DDR_SUBSCRIPT (ddr, i); 3385 conflict_function *ca = SUB_CONFLICTS_IN_A (sub); 3386 conflict_function *cb = SUB_CONFLICTS_IN_B (sub); 3387 3388 for (j = 0; j < ca->n; j++) 3389 if (affine_function_zero_p (ca->fns[j])) 3390 { 3391 insert_innermost_unit_dist_vector (ddr); 3392 return; 3393 } 3394 3395 for (j = 0; j < cb->n; j++) 3396 if (affine_function_zero_p (cb->fns[j])) 3397 { 3398 insert_innermost_unit_dist_vector (ddr); 3399 return; 3400 } 3401 } 3402 } 3403 3404 /* Compute the classic per loop distance vector. DDR is the data 3405 dependence relation to build a vector from. Return false when fail 3406 to represent the data dependence as a distance vector. */ 3407 3408 static bool 3409 build_classic_dist_vector (struct data_dependence_relation *ddr, 3410 struct loop *loop_nest) 3411 { 3412 bool init_b = false; 3413 int index_carry = DDR_NB_LOOPS (ddr); 3414 lambda_vector dist_v; 3415 3416 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE) 3417 return false; 3418 3419 if (same_access_functions (ddr)) 3420 { 3421 /* Save the 0 vector. */ 3422 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3423 save_dist_v (ddr, dist_v); 3424 3425 if (constant_access_functions (ddr)) 3426 add_distance_for_zero_overlaps (ddr); 3427 3428 if (DDR_NB_LOOPS (ddr) > 1) 3429 add_other_self_distances (ddr); 3430 3431 return true; 3432 } 3433 3434 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3435 if (!build_classic_dist_vector_1 (ddr, DDR_A (ddr), DDR_B (ddr), 3436 dist_v, &init_b, &index_carry)) 3437 return false; 3438 3439 /* Save the distance vector if we initialized one. */ 3440 if (init_b) 3441 { 3442 /* Verify a basic constraint: classic distance vectors should 3443 always be lexicographically positive. 3444 3445 Data references are collected in the order of execution of 3446 the program, thus for the following loop 3447 3448 | for (i = 1; i < 100; i++) 3449 | for (j = 1; j < 100; j++) 3450 | { 3451 | t = T[j+1][i-1]; // A 3452 | T[j][i] = t + 2; // B 3453 | } 3454 3455 references are collected following the direction of the wind: 3456 A then B. The data dependence tests are performed also 3457 following this order, such that we're looking at the distance 3458 separating the elements accessed by A from the elements later 3459 accessed by B. But in this example, the distance returned by 3460 test_dep (A, B) is lexicographically negative (-1, 1), that 3461 means that the access A occurs later than B with respect to 3462 the outer loop, ie. we're actually looking upwind. In this 3463 case we solve test_dep (B, A) looking downwind to the 3464 lexicographically positive solution, that returns the 3465 distance vector (1, -1). */ 3466 if (!lambda_vector_lexico_pos (dist_v, DDR_NB_LOOPS (ddr))) 3467 { 3468 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3469 if (!subscript_dependence_tester_1 (ddr, DDR_B (ddr), DDR_A (ddr), 3470 loop_nest)) 3471 return false; 3472 compute_subscript_distance (ddr); 3473 if (!build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr), 3474 save_v, &init_b, &index_carry)) 3475 return false; 3476 save_dist_v (ddr, save_v); 3477 DDR_REVERSED_P (ddr) = true; 3478 3479 /* In this case there is a dependence forward for all the 3480 outer loops: 3481 3482 | for (k = 1; k < 100; k++) 3483 | for (i = 1; i < 100; i++) 3484 | for (j = 1; j < 100; j++) 3485 | { 3486 | t = T[j+1][i-1]; // A 3487 | T[j][i] = t + 2; // B 3488 | } 3489 3490 the vectors are: 3491 (0, 1, -1) 3492 (1, 1, -1) 3493 (1, -1, 1) 3494 */ 3495 if (DDR_NB_LOOPS (ddr) > 1) 3496 { 3497 add_outer_distances (ddr, save_v, index_carry); 3498 add_outer_distances (ddr, dist_v, index_carry); 3499 } 3500 } 3501 else 3502 { 3503 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3504 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr)); 3505 3506 if (DDR_NB_LOOPS (ddr) > 1) 3507 { 3508 lambda_vector opposite_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3509 3510 if (!subscript_dependence_tester_1 (ddr, DDR_B (ddr), 3511 DDR_A (ddr), loop_nest)) 3512 return false; 3513 compute_subscript_distance (ddr); 3514 if (!build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr), 3515 opposite_v, &init_b, 3516 &index_carry)) 3517 return false; 3518 3519 save_dist_v (ddr, save_v); 3520 add_outer_distances (ddr, dist_v, index_carry); 3521 add_outer_distances (ddr, opposite_v, index_carry); 3522 } 3523 else 3524 save_dist_v (ddr, save_v); 3525 } 3526 } 3527 else 3528 { 3529 /* There is a distance of 1 on all the outer loops: Example: 3530 there is a dependence of distance 1 on loop_1 for the array A. 3531 3532 | loop_1 3533 | A[5] = ... 3534 | endloop 3535 */ 3536 add_outer_distances (ddr, dist_v, 3537 lambda_vector_first_nz (dist_v, 3538 DDR_NB_LOOPS (ddr), 0)); 3539 } 3540 3541 if (dump_file && (dump_flags & TDF_DETAILS)) 3542 { 3543 unsigned i; 3544 3545 fprintf (dump_file, "(build_classic_dist_vector\n"); 3546 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++) 3547 { 3548 fprintf (dump_file, " dist_vector = ("); 3549 print_lambda_vector (dump_file, DDR_DIST_VECT (ddr, i), 3550 DDR_NB_LOOPS (ddr)); 3551 fprintf (dump_file, " )\n"); 3552 } 3553 fprintf (dump_file, ")\n"); 3554 } 3555 3556 return true; 3557 } 3558 3559 /* Return the direction for a given distance. 3560 FIXME: Computing dir this way is suboptimal, since dir can catch 3561 cases that dist is unable to represent. */ 3562 3563 static inline enum data_dependence_direction 3564 dir_from_dist (int dist) 3565 { 3566 if (dist > 0) 3567 return dir_positive; 3568 else if (dist < 0) 3569 return dir_negative; 3570 else 3571 return dir_equal; 3572 } 3573 3574 /* Compute the classic per loop direction vector. DDR is the data 3575 dependence relation to build a vector from. */ 3576 3577 static void 3578 build_classic_dir_vector (struct data_dependence_relation *ddr) 3579 { 3580 unsigned i, j; 3581 lambda_vector dist_v; 3582 3583 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) 3584 { 3585 lambda_vector dir_v = lambda_vector_new (DDR_NB_LOOPS (ddr)); 3586 3587 for (j = 0; j < DDR_NB_LOOPS (ddr); j++) 3588 dir_v[j] = dir_from_dist (dist_v[j]); 3589 3590 save_dir_v (ddr, dir_v); 3591 } 3592 } 3593 3594 /* Helper function. Returns true when there is a dependence between 3595 data references DRA and DRB. */ 3596 3597 static bool 3598 subscript_dependence_tester_1 (struct data_dependence_relation *ddr, 3599 struct data_reference *dra, 3600 struct data_reference *drb, 3601 struct loop *loop_nest) 3602 { 3603 unsigned int i; 3604 tree last_conflicts; 3605 struct subscript *subscript; 3606 tree res = NULL_TREE; 3607 3608 for (i = 0; DDR_SUBSCRIPTS (ddr).iterate (i, &subscript); i++) 3609 { 3610 conflict_function *overlaps_a, *overlaps_b; 3611 3612 analyze_overlapping_iterations (DR_ACCESS_FN (dra, i), 3613 DR_ACCESS_FN (drb, i), 3614 &overlaps_a, &overlaps_b, 3615 &last_conflicts, loop_nest); 3616 3617 if (SUB_CONFLICTS_IN_A (subscript)) 3618 free_conflict_function (SUB_CONFLICTS_IN_A (subscript)); 3619 if (SUB_CONFLICTS_IN_B (subscript)) 3620 free_conflict_function (SUB_CONFLICTS_IN_B (subscript)); 3621 3622 SUB_CONFLICTS_IN_A (subscript) = overlaps_a; 3623 SUB_CONFLICTS_IN_B (subscript) = overlaps_b; 3624 SUB_LAST_CONFLICT (subscript) = last_conflicts; 3625 3626 /* If there is any undetermined conflict function we have to 3627 give a conservative answer in case we cannot prove that 3628 no dependence exists when analyzing another subscript. */ 3629 if (CF_NOT_KNOWN_P (overlaps_a) 3630 || CF_NOT_KNOWN_P (overlaps_b)) 3631 { 3632 res = chrec_dont_know; 3633 continue; 3634 } 3635 3636 /* When there is a subscript with no dependence we can stop. */ 3637 else if (CF_NO_DEPENDENCE_P (overlaps_a) 3638 || CF_NO_DEPENDENCE_P (overlaps_b)) 3639 { 3640 res = chrec_known; 3641 break; 3642 } 3643 } 3644 3645 if (res == NULL_TREE) 3646 return true; 3647 3648 if (res == chrec_known) 3649 dependence_stats.num_dependence_independent++; 3650 else 3651 dependence_stats.num_dependence_undetermined++; 3652 finalize_ddr_dependent (ddr, res); 3653 return false; 3654 } 3655 3656 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */ 3657 3658 static void 3659 subscript_dependence_tester (struct data_dependence_relation *ddr, 3660 struct loop *loop_nest) 3661 { 3662 if (subscript_dependence_tester_1 (ddr, DDR_A (ddr), DDR_B (ddr), loop_nest)) 3663 dependence_stats.num_dependence_dependent++; 3664 3665 compute_subscript_distance (ddr); 3666 if (build_classic_dist_vector (ddr, loop_nest)) 3667 build_classic_dir_vector (ddr); 3668 } 3669 3670 /* Returns true when all the access functions of A are affine or 3671 constant with respect to LOOP_NEST. */ 3672 3673 static bool 3674 access_functions_are_affine_or_constant_p (const struct data_reference *a, 3675 const struct loop *loop_nest) 3676 { 3677 unsigned int i; 3678 vec<tree> fns = DR_ACCESS_FNS (a); 3679 tree t; 3680 3681 FOR_EACH_VEC_ELT (fns, i, t) 3682 if (!evolution_function_is_invariant_p (t, loop_nest->num) 3683 && !evolution_function_is_affine_multivariate_p (t, loop_nest->num)) 3684 return false; 3685 3686 return true; 3687 } 3688 3689 /* This computes the affine dependence relation between A and B with 3690 respect to LOOP_NEST. CHREC_KNOWN is used for representing the 3691 independence between two accesses, while CHREC_DONT_KNOW is used 3692 for representing the unknown relation. 3693 3694 Note that it is possible to stop the computation of the dependence 3695 relation the first time we detect a CHREC_KNOWN element for a given 3696 subscript. */ 3697 3698 void 3699 compute_affine_dependence (struct data_dependence_relation *ddr, 3700 struct loop *loop_nest) 3701 { 3702 struct data_reference *dra = DDR_A (ddr); 3703 struct data_reference *drb = DDR_B (ddr); 3704 3705 if (dump_file && (dump_flags & TDF_DETAILS)) 3706 { 3707 fprintf (dump_file, "(compute_affine_dependence\n"); 3708 fprintf (dump_file, " stmt_a: "); 3709 print_gimple_stmt (dump_file, DR_STMT (dra), 0, TDF_SLIM); 3710 fprintf (dump_file, " stmt_b: "); 3711 print_gimple_stmt (dump_file, DR_STMT (drb), 0, TDF_SLIM); 3712 } 3713 3714 /* Analyze only when the dependence relation is not yet known. */ 3715 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE) 3716 { 3717 dependence_stats.num_dependence_tests++; 3718 3719 if (access_functions_are_affine_or_constant_p (dra, loop_nest) 3720 && access_functions_are_affine_or_constant_p (drb, loop_nest)) 3721 subscript_dependence_tester (ddr, loop_nest); 3722 3723 /* As a last case, if the dependence cannot be determined, or if 3724 the dependence is considered too difficult to determine, answer 3725 "don't know". */ 3726 else 3727 { 3728 dependence_stats.num_dependence_undetermined++; 3729 3730 if (dump_file && (dump_flags & TDF_DETAILS)) 3731 { 3732 fprintf (dump_file, "Data ref a:\n"); 3733 dump_data_reference (dump_file, dra); 3734 fprintf (dump_file, "Data ref b:\n"); 3735 dump_data_reference (dump_file, drb); 3736 fprintf (dump_file, "affine dependence test not usable: access function not affine or constant.\n"); 3737 } 3738 finalize_ddr_dependent (ddr, chrec_dont_know); 3739 } 3740 } 3741 3742 if (dump_file && (dump_flags & TDF_DETAILS)) 3743 { 3744 if (DDR_ARE_DEPENDENT (ddr) == chrec_known) 3745 fprintf (dump_file, ") -> no dependence\n"); 3746 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) 3747 fprintf (dump_file, ") -> dependence analysis failed\n"); 3748 else 3749 fprintf (dump_file, ")\n"); 3750 } 3751 } 3752 3753 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all 3754 the data references in DATAREFS, in the LOOP_NEST. When 3755 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self 3756 relations. Return true when successful, i.e. data references number 3757 is small enough to be handled. */ 3758 3759 bool 3760 compute_all_dependences (vec<data_reference_p> datarefs, 3761 vec<ddr_p> *dependence_relations, 3762 vec<loop_p> loop_nest, 3763 bool compute_self_and_rr) 3764 { 3765 struct data_dependence_relation *ddr; 3766 struct data_reference *a, *b; 3767 unsigned int i, j; 3768 3769 if ((int) datarefs.length () 3770 > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS)) 3771 { 3772 struct data_dependence_relation *ddr; 3773 3774 /* Insert a single relation into dependence_relations: 3775 chrec_dont_know. */ 3776 ddr = initialize_data_dependence_relation (NULL, NULL, loop_nest); 3777 dependence_relations->safe_push (ddr); 3778 return false; 3779 } 3780 3781 FOR_EACH_VEC_ELT (datarefs, i, a) 3782 for (j = i + 1; datarefs.iterate (j, &b); j++) 3783 if (DR_IS_WRITE (a) || DR_IS_WRITE (b) || compute_self_and_rr) 3784 { 3785 ddr = initialize_data_dependence_relation (a, b, loop_nest); 3786 dependence_relations->safe_push (ddr); 3787 if (loop_nest.exists ()) 3788 compute_affine_dependence (ddr, loop_nest[0]); 3789 } 3790 3791 if (compute_self_and_rr) 3792 FOR_EACH_VEC_ELT (datarefs, i, a) 3793 { 3794 ddr = initialize_data_dependence_relation (a, a, loop_nest); 3795 dependence_relations->safe_push (ddr); 3796 if (loop_nest.exists ()) 3797 compute_affine_dependence (ddr, loop_nest[0]); 3798 } 3799 3800 return true; 3801 } 3802 3803 /* Describes a location of a memory reference. */ 3804 3805 struct data_ref_loc 3806 { 3807 /* The memory reference. */ 3808 tree ref; 3809 3810 /* True if the memory reference is read. */ 3811 bool is_read; 3812 }; 3813 3814 3815 /* Stores the locations of memory references in STMT to REFERENCES. Returns 3816 true if STMT clobbers memory, false otherwise. */ 3817 3818 static bool 3819 get_references_in_stmt (gimple *stmt, vec<data_ref_loc, va_heap> *references) 3820 { 3821 bool clobbers_memory = false; 3822 data_ref_loc ref; 3823 tree op0, op1; 3824 enum gimple_code stmt_code = gimple_code (stmt); 3825 3826 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects. 3827 As we cannot model data-references to not spelled out 3828 accesses give up if they may occur. */ 3829 if (stmt_code == GIMPLE_CALL 3830 && !(gimple_call_flags (stmt) & ECF_CONST)) 3831 { 3832 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */ 3833 if (gimple_call_internal_p (stmt)) 3834 switch (gimple_call_internal_fn (stmt)) 3835 { 3836 case IFN_GOMP_SIMD_LANE: 3837 { 3838 struct loop *loop = gimple_bb (stmt)->loop_father; 3839 tree uid = gimple_call_arg (stmt, 0); 3840 gcc_assert (TREE_CODE (uid) == SSA_NAME); 3841 if (loop == NULL 3842 || loop->simduid != SSA_NAME_VAR (uid)) 3843 clobbers_memory = true; 3844 break; 3845 } 3846 case IFN_MASK_LOAD: 3847 case IFN_MASK_STORE: 3848 break; 3849 default: 3850 clobbers_memory = true; 3851 break; 3852 } 3853 else 3854 clobbers_memory = true; 3855 } 3856 else if (stmt_code == GIMPLE_ASM 3857 && (gimple_asm_volatile_p (as_a <gasm *> (stmt)) 3858 || gimple_vuse (stmt))) 3859 clobbers_memory = true; 3860 3861 if (!gimple_vuse (stmt)) 3862 return clobbers_memory; 3863 3864 if (stmt_code == GIMPLE_ASSIGN) 3865 { 3866 tree base; 3867 op0 = gimple_assign_lhs (stmt); 3868 op1 = gimple_assign_rhs1 (stmt); 3869 3870 if (DECL_P (op1) 3871 || (REFERENCE_CLASS_P (op1) 3872 && (base = get_base_address (op1)) 3873 && TREE_CODE (base) != SSA_NAME 3874 && !is_gimple_min_invariant (base))) 3875 { 3876 ref.ref = op1; 3877 ref.is_read = true; 3878 references->safe_push (ref); 3879 } 3880 } 3881 else if (stmt_code == GIMPLE_CALL) 3882 { 3883 unsigned i, n; 3884 tree ptr, type; 3885 unsigned int align; 3886 3887 ref.is_read = false; 3888 if (gimple_call_internal_p (stmt)) 3889 switch (gimple_call_internal_fn (stmt)) 3890 { 3891 case IFN_MASK_LOAD: 3892 if (gimple_call_lhs (stmt) == NULL_TREE) 3893 break; 3894 ref.is_read = true; 3895 /* FALLTHRU */ 3896 case IFN_MASK_STORE: 3897 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0); 3898 align = tree_to_shwi (gimple_call_arg (stmt, 1)); 3899 if (ref.is_read) 3900 type = TREE_TYPE (gimple_call_lhs (stmt)); 3901 else 3902 type = TREE_TYPE (gimple_call_arg (stmt, 3)); 3903 if (TYPE_ALIGN (type) != align) 3904 type = build_aligned_type (type, align); 3905 ref.ref = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), 3906 ptr); 3907 references->safe_push (ref); 3908 return false; 3909 default: 3910 break; 3911 } 3912 3913 op0 = gimple_call_lhs (stmt); 3914 n = gimple_call_num_args (stmt); 3915 for (i = 0; i < n; i++) 3916 { 3917 op1 = gimple_call_arg (stmt, i); 3918 3919 if (DECL_P (op1) 3920 || (REFERENCE_CLASS_P (op1) && get_base_address (op1))) 3921 { 3922 ref.ref = op1; 3923 ref.is_read = true; 3924 references->safe_push (ref); 3925 } 3926 } 3927 } 3928 else 3929 return clobbers_memory; 3930 3931 if (op0 3932 && (DECL_P (op0) 3933 || (REFERENCE_CLASS_P (op0) && get_base_address (op0)))) 3934 { 3935 ref.ref = op0; 3936 ref.is_read = false; 3937 references->safe_push (ref); 3938 } 3939 return clobbers_memory; 3940 } 3941 3942 3943 /* Returns true if the loop-nest has any data reference. */ 3944 3945 bool 3946 loop_nest_has_data_refs (loop_p loop) 3947 { 3948 basic_block *bbs = get_loop_body (loop); 3949 auto_vec<data_ref_loc, 3> references; 3950 3951 for (unsigned i = 0; i < loop->num_nodes; i++) 3952 { 3953 basic_block bb = bbs[i]; 3954 gimple_stmt_iterator bsi; 3955 3956 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) 3957 { 3958 gimple *stmt = gsi_stmt (bsi); 3959 get_references_in_stmt (stmt, &references); 3960 if (references.length ()) 3961 { 3962 free (bbs); 3963 return true; 3964 } 3965 } 3966 } 3967 free (bbs); 3968 3969 if (loop->inner) 3970 { 3971 loop = loop->inner; 3972 while (loop) 3973 { 3974 if (loop_nest_has_data_refs (loop)) 3975 return true; 3976 loop = loop->next; 3977 } 3978 } 3979 return false; 3980 } 3981 3982 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable 3983 reference, returns false, otherwise returns true. NEST is the outermost 3984 loop of the loop nest in which the references should be analyzed. */ 3985 3986 bool 3987 find_data_references_in_stmt (struct loop *nest, gimple *stmt, 3988 vec<data_reference_p> *datarefs) 3989 { 3990 unsigned i; 3991 auto_vec<data_ref_loc, 2> references; 3992 data_ref_loc *ref; 3993 bool ret = true; 3994 data_reference_p dr; 3995 3996 if (get_references_in_stmt (stmt, &references)) 3997 return false; 3998 3999 FOR_EACH_VEC_ELT (references, i, ref) 4000 { 4001 dr = create_data_ref (nest, loop_containing_stmt (stmt), 4002 ref->ref, stmt, ref->is_read); 4003 gcc_assert (dr != NULL); 4004 datarefs->safe_push (dr); 4005 } 4006 4007 return ret; 4008 } 4009 4010 /* Stores the data references in STMT to DATAREFS. If there is an 4011 unanalyzable reference, returns false, otherwise returns true. 4012 NEST is the outermost loop of the loop nest in which the references 4013 should be instantiated, LOOP is the loop in which the references 4014 should be analyzed. */ 4015 4016 bool 4017 graphite_find_data_references_in_stmt (loop_p nest, loop_p loop, gimple *stmt, 4018 vec<data_reference_p> *datarefs) 4019 { 4020 unsigned i; 4021 auto_vec<data_ref_loc, 2> references; 4022 data_ref_loc *ref; 4023 bool ret = true; 4024 data_reference_p dr; 4025 4026 if (get_references_in_stmt (stmt, &references)) 4027 return false; 4028 4029 FOR_EACH_VEC_ELT (references, i, ref) 4030 { 4031 dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read); 4032 gcc_assert (dr != NULL); 4033 datarefs->safe_push (dr); 4034 } 4035 4036 return ret; 4037 } 4038 4039 /* Search the data references in LOOP, and record the information into 4040 DATAREFS. Returns chrec_dont_know when failing to analyze a 4041 difficult case, returns NULL_TREE otherwise. */ 4042 4043 tree 4044 find_data_references_in_bb (struct loop *loop, basic_block bb, 4045 vec<data_reference_p> *datarefs) 4046 { 4047 gimple_stmt_iterator bsi; 4048 4049 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) 4050 { 4051 gimple *stmt = gsi_stmt (bsi); 4052 4053 if (!find_data_references_in_stmt (loop, stmt, datarefs)) 4054 { 4055 struct data_reference *res; 4056 res = XCNEW (struct data_reference); 4057 datarefs->safe_push (res); 4058 4059 return chrec_dont_know; 4060 } 4061 } 4062 4063 return NULL_TREE; 4064 } 4065 4066 /* Search the data references in LOOP, and record the information into 4067 DATAREFS. Returns chrec_dont_know when failing to analyze a 4068 difficult case, returns NULL_TREE otherwise. 4069 4070 TODO: This function should be made smarter so that it can handle address 4071 arithmetic as if they were array accesses, etc. */ 4072 4073 tree 4074 find_data_references_in_loop (struct loop *loop, 4075 vec<data_reference_p> *datarefs) 4076 { 4077 basic_block bb, *bbs; 4078 unsigned int i; 4079 4080 bbs = get_loop_body_in_dom_order (loop); 4081 4082 for (i = 0; i < loop->num_nodes; i++) 4083 { 4084 bb = bbs[i]; 4085 4086 if (find_data_references_in_bb (loop, bb, datarefs) == chrec_dont_know) 4087 { 4088 free (bbs); 4089 return chrec_dont_know; 4090 } 4091 } 4092 free (bbs); 4093 4094 return NULL_TREE; 4095 } 4096 4097 /* Recursive helper function. */ 4098 4099 static bool 4100 find_loop_nest_1 (struct loop *loop, vec<loop_p> *loop_nest) 4101 { 4102 /* Inner loops of the nest should not contain siblings. Example: 4103 when there are two consecutive loops, 4104 4105 | loop_0 4106 | loop_1 4107 | A[{0, +, 1}_1] 4108 | endloop_1 4109 | loop_2 4110 | A[{0, +, 1}_2] 4111 | endloop_2 4112 | endloop_0 4113 4114 the dependence relation cannot be captured by the distance 4115 abstraction. */ 4116 if (loop->next) 4117 return false; 4118 4119 loop_nest->safe_push (loop); 4120 if (loop->inner) 4121 return find_loop_nest_1 (loop->inner, loop_nest); 4122 return true; 4123 } 4124 4125 /* Return false when the LOOP is not well nested. Otherwise return 4126 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will 4127 contain the loops from the outermost to the innermost, as they will 4128 appear in the classic distance vector. */ 4129 4130 bool 4131 find_loop_nest (struct loop *loop, vec<loop_p> *loop_nest) 4132 { 4133 loop_nest->safe_push (loop); 4134 if (loop->inner) 4135 return find_loop_nest_1 (loop->inner, loop_nest); 4136 return true; 4137 } 4138 4139 /* Returns true when the data dependences have been computed, false otherwise. 4140 Given a loop nest LOOP, the following vectors are returned: 4141 DATAREFS is initialized to all the array elements contained in this loop, 4142 DEPENDENCE_RELATIONS contains the relations between the data references. 4143 Compute read-read and self relations if 4144 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */ 4145 4146 bool 4147 compute_data_dependences_for_loop (struct loop *loop, 4148 bool compute_self_and_read_read_dependences, 4149 vec<loop_p> *loop_nest, 4150 vec<data_reference_p> *datarefs, 4151 vec<ddr_p> *dependence_relations) 4152 { 4153 bool res = true; 4154 4155 memset (&dependence_stats, 0, sizeof (dependence_stats)); 4156 4157 /* If the loop nest is not well formed, or one of the data references 4158 is not computable, give up without spending time to compute other 4159 dependences. */ 4160 if (!loop 4161 || !find_loop_nest (loop, loop_nest) 4162 || find_data_references_in_loop (loop, datarefs) == chrec_dont_know 4163 || !compute_all_dependences (*datarefs, dependence_relations, *loop_nest, 4164 compute_self_and_read_read_dependences)) 4165 res = false; 4166 4167 if (dump_file && (dump_flags & TDF_STATS)) 4168 { 4169 fprintf (dump_file, "Dependence tester statistics:\n"); 4170 4171 fprintf (dump_file, "Number of dependence tests: %d\n", 4172 dependence_stats.num_dependence_tests); 4173 fprintf (dump_file, "Number of dependence tests classified dependent: %d\n", 4174 dependence_stats.num_dependence_dependent); 4175 fprintf (dump_file, "Number of dependence tests classified independent: %d\n", 4176 dependence_stats.num_dependence_independent); 4177 fprintf (dump_file, "Number of undetermined dependence tests: %d\n", 4178 dependence_stats.num_dependence_undetermined); 4179 4180 fprintf (dump_file, "Number of subscript tests: %d\n", 4181 dependence_stats.num_subscript_tests); 4182 fprintf (dump_file, "Number of undetermined subscript tests: %d\n", 4183 dependence_stats.num_subscript_undetermined); 4184 fprintf (dump_file, "Number of same subscript function: %d\n", 4185 dependence_stats.num_same_subscript_function); 4186 4187 fprintf (dump_file, "Number of ziv tests: %d\n", 4188 dependence_stats.num_ziv); 4189 fprintf (dump_file, "Number of ziv tests returning dependent: %d\n", 4190 dependence_stats.num_ziv_dependent); 4191 fprintf (dump_file, "Number of ziv tests returning independent: %d\n", 4192 dependence_stats.num_ziv_independent); 4193 fprintf (dump_file, "Number of ziv tests unimplemented: %d\n", 4194 dependence_stats.num_ziv_unimplemented); 4195 4196 fprintf (dump_file, "Number of siv tests: %d\n", 4197 dependence_stats.num_siv); 4198 fprintf (dump_file, "Number of siv tests returning dependent: %d\n", 4199 dependence_stats.num_siv_dependent); 4200 fprintf (dump_file, "Number of siv tests returning independent: %d\n", 4201 dependence_stats.num_siv_independent); 4202 fprintf (dump_file, "Number of siv tests unimplemented: %d\n", 4203 dependence_stats.num_siv_unimplemented); 4204 4205 fprintf (dump_file, "Number of miv tests: %d\n", 4206 dependence_stats.num_miv); 4207 fprintf (dump_file, "Number of miv tests returning dependent: %d\n", 4208 dependence_stats.num_miv_dependent); 4209 fprintf (dump_file, "Number of miv tests returning independent: %d\n", 4210 dependence_stats.num_miv_independent); 4211 fprintf (dump_file, "Number of miv tests unimplemented: %d\n", 4212 dependence_stats.num_miv_unimplemented); 4213 } 4214 4215 return res; 4216 } 4217 4218 /* Free the memory used by a data dependence relation DDR. */ 4219 4220 void 4221 free_dependence_relation (struct data_dependence_relation *ddr) 4222 { 4223 if (ddr == NULL) 4224 return; 4225 4226 if (DDR_SUBSCRIPTS (ddr).exists ()) 4227 free_subscripts (DDR_SUBSCRIPTS (ddr)); 4228 DDR_DIST_VECTS (ddr).release (); 4229 DDR_DIR_VECTS (ddr).release (); 4230 4231 free (ddr); 4232 } 4233 4234 /* Free the memory used by the data dependence relations from 4235 DEPENDENCE_RELATIONS. */ 4236 4237 void 4238 free_dependence_relations (vec<ddr_p> dependence_relations) 4239 { 4240 unsigned int i; 4241 struct data_dependence_relation *ddr; 4242 4243 FOR_EACH_VEC_ELT (dependence_relations, i, ddr) 4244 if (ddr) 4245 free_dependence_relation (ddr); 4246 4247 dependence_relations.release (); 4248 } 4249 4250 /* Free the memory used by the data references from DATAREFS. */ 4251 4252 void 4253 free_data_refs (vec<data_reference_p> datarefs) 4254 { 4255 unsigned int i; 4256 struct data_reference *dr; 4257 4258 FOR_EACH_VEC_ELT (datarefs, i, dr) 4259 free_data_ref (dr); 4260 datarefs.release (); 4261 } 4262