1 /* This file contains routines to construct OpenACC and OpenMP constructs, 2 called from parsing in the C and C++ front ends. 3 4 Copyright (C) 2005-2017 Free Software Foundation, Inc. 5 Contributed by Richard Henderson <rth@redhat.com>, 6 Diego Novillo <dnovillo@redhat.com>. 7 8 This file is part of GCC. 9 10 GCC is free software; you can redistribute it and/or modify it under 11 the terms of the GNU General Public License as published by the Free 12 Software Foundation; either version 3, or (at your option) any later 13 version. 14 15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 16 WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with GCC; see the file COPYING3. If not see 22 <http://www.gnu.org/licenses/>. */ 23 24 #include "config.h" 25 #include "system.h" 26 #include "coretypes.h" 27 #include "options.h" 28 #include "c-common.h" 29 #include "gimple-expr.h" 30 #include "c-pragma.h" 31 #include "omp-general.h" 32 #include "gomp-constants.h" 33 34 35 /* Complete a #pragma oacc wait construct. LOC is the location of 36 the #pragma. */ 37 38 tree 39 c_finish_oacc_wait (location_t loc, tree parms, tree clauses) 40 { 41 const int nparms = list_length (parms); 42 tree stmt, t; 43 vec<tree, va_gc> *args; 44 45 vec_alloc (args, nparms + 2); 46 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT); 47 48 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC)) 49 t = OMP_CLAUSE_ASYNC_EXPR (clauses); 50 else 51 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC); 52 53 args->quick_push (t); 54 args->quick_push (build_int_cst (integer_type_node, nparms)); 55 56 for (t = parms; t; t = TREE_CHAIN (t)) 57 { 58 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST) 59 args->quick_push (build_int_cst (integer_type_node, 60 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t)))); 61 else 62 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t)); 63 } 64 65 stmt = build_call_expr_loc_vec (loc, stmt, args); 66 67 vec_free (args); 68 69 return stmt; 70 } 71 72 /* Complete a #pragma omp master construct. STMT is the structured-block 73 that follows the pragma. LOC is the l*/ 74 75 tree 76 c_finish_omp_master (location_t loc, tree stmt) 77 { 78 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); 79 SET_EXPR_LOCATION (t, loc); 80 return t; 81 } 82 83 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block 84 that follows the pragma. LOC is the l*/ 85 86 tree 87 c_finish_omp_taskgroup (location_t loc, tree stmt) 88 { 89 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt)); 90 SET_EXPR_LOCATION (t, loc); 91 return t; 92 } 93 94 /* Complete a #pragma omp critical construct. STMT is the structured-block 95 that follows the pragma, NAME is the identifier in the pragma, or null 96 if it was omitted. LOC is the location of the #pragma. */ 97 98 tree 99 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses) 100 { 101 tree stmt = make_node (OMP_CRITICAL); 102 TREE_TYPE (stmt) = void_type_node; 103 OMP_CRITICAL_BODY (stmt) = body; 104 OMP_CRITICAL_NAME (stmt) = name; 105 OMP_CRITICAL_CLAUSES (stmt) = clauses; 106 SET_EXPR_LOCATION (stmt, loc); 107 return add_stmt (stmt); 108 } 109 110 /* Complete a #pragma omp ordered construct. STMT is the structured-block 111 that follows the pragma. LOC is the location of the #pragma. */ 112 113 tree 114 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt) 115 { 116 tree t = make_node (OMP_ORDERED); 117 TREE_TYPE (t) = void_type_node; 118 OMP_ORDERED_BODY (t) = stmt; 119 OMP_ORDERED_CLAUSES (t) = clauses; 120 SET_EXPR_LOCATION (t, loc); 121 return add_stmt (t); 122 } 123 124 125 /* Complete a #pragma omp barrier construct. LOC is the location of 126 the #pragma. */ 127 128 void 129 c_finish_omp_barrier (location_t loc) 130 { 131 tree x; 132 133 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); 134 x = build_call_expr_loc (loc, x, 0); 135 add_stmt (x); 136 } 137 138 139 /* Complete a #pragma omp taskwait construct. LOC is the location of the 140 pragma. */ 141 142 void 143 c_finish_omp_taskwait (location_t loc) 144 { 145 tree x; 146 147 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); 148 x = build_call_expr_loc (loc, x, 0); 149 add_stmt (x); 150 } 151 152 153 /* Complete a #pragma omp taskyield construct. LOC is the location of the 154 pragma. */ 155 156 void 157 c_finish_omp_taskyield (location_t loc) 158 { 159 tree x; 160 161 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); 162 x = build_call_expr_loc (loc, x, 0); 163 add_stmt (x); 164 } 165 166 167 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC 168 the expression to be implemented atomically is LHS opcode= RHS. 169 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS 170 opcode= RHS with the new or old content of LHS returned. 171 LOC is the location of the atomic statement. The value returned 172 is either error_mark_node (if the construct was erroneous) or an 173 OMP_ATOMIC* node which should be added to the current statement 174 tree with add_stmt. If TEST is set, avoid calling save_expr 175 or create_tmp_var*. */ 176 177 tree 178 c_finish_omp_atomic (location_t loc, enum tree_code code, 179 enum tree_code opcode, tree lhs, tree rhs, 180 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst, 181 bool test) 182 { 183 tree x, type, addr, pre = NULL_TREE; 184 185 if (lhs == error_mark_node || rhs == error_mark_node 186 || v == error_mark_node || lhs1 == error_mark_node 187 || rhs1 == error_mark_node) 188 return error_mark_node; 189 190 /* ??? According to one reading of the OpenMP spec, complex type are 191 supported, but there are no atomic stores for any architecture. 192 But at least icc 9.0 doesn't support complex types here either. 193 And lets not even talk about vector types... */ 194 type = TREE_TYPE (lhs); 195 if (!INTEGRAL_TYPE_P (type) 196 && !POINTER_TYPE_P (type) 197 && !SCALAR_FLOAT_TYPE_P (type)) 198 { 199 error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); 200 return error_mark_node; 201 } 202 if (TYPE_ATOMIC (type)) 203 { 204 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>"); 205 return error_mark_node; 206 } 207 208 if (opcode == RDIV_EXPR) 209 opcode = TRUNC_DIV_EXPR; 210 211 /* ??? Validate that rhs does not overlap lhs. */ 212 213 /* Take and save the address of the lhs. From then on we'll reference it 214 via indirection. */ 215 addr = build_unary_op (loc, ADDR_EXPR, lhs, false); 216 if (addr == error_mark_node) 217 return error_mark_node; 218 if (!test) 219 addr = save_expr (addr); 220 if (!test 221 && TREE_CODE (addr) != SAVE_EXPR 222 && (TREE_CODE (addr) != ADDR_EXPR 223 || !VAR_P (TREE_OPERAND (addr, 0)))) 224 { 225 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize 226 it even after unsharing function body. */ 227 tree var = create_tmp_var_raw (TREE_TYPE (addr)); 228 DECL_CONTEXT (var) = current_function_decl; 229 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); 230 } 231 lhs = build_indirect_ref (loc, addr, RO_NULL); 232 233 if (code == OMP_ATOMIC_READ) 234 { 235 x = build1 (OMP_ATOMIC_READ, type, addr); 236 SET_EXPR_LOCATION (x, loc); 237 OMP_ATOMIC_SEQ_CST (x) = seq_cst; 238 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, 239 loc, x, NULL_TREE); 240 } 241 242 /* There are lots of warnings, errors, and conversions that need to happen 243 in the course of interpreting a statement. Use the normal mechanisms 244 to do this, and then take it apart again. */ 245 if (swapped) 246 { 247 rhs = build_binary_op (loc, opcode, rhs, lhs, 1); 248 opcode = NOP_EXPR; 249 } 250 bool save = in_late_binary_op; 251 in_late_binary_op = true; 252 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); 253 in_late_binary_op = save; 254 if (x == error_mark_node) 255 return error_mark_node; 256 if (TREE_CODE (x) == COMPOUND_EXPR) 257 { 258 pre = TREE_OPERAND (x, 0); 259 gcc_assert (TREE_CODE (pre) == SAVE_EXPR); 260 x = TREE_OPERAND (x, 1); 261 } 262 gcc_assert (TREE_CODE (x) == MODIFY_EXPR); 263 rhs = TREE_OPERAND (x, 1); 264 265 /* Punt the actual generation of atomic operations to common code. */ 266 if (code == OMP_ATOMIC) 267 type = void_type_node; 268 x = build2 (code, type, addr, rhs); 269 SET_EXPR_LOCATION (x, loc); 270 OMP_ATOMIC_SEQ_CST (x) = seq_cst; 271 272 /* Generally it is hard to prove lhs1 and lhs are the same memory 273 location, just diagnose different variables. */ 274 if (rhs1 275 && VAR_P (rhs1) 276 && VAR_P (lhs) 277 && rhs1 != lhs 278 && !test) 279 { 280 if (code == OMP_ATOMIC) 281 error_at (loc, "%<#pragma omp atomic update%> uses two different " 282 "variables for memory"); 283 else 284 error_at (loc, "%<#pragma omp atomic capture%> uses two different " 285 "variables for memory"); 286 return error_mark_node; 287 } 288 289 if (code != OMP_ATOMIC) 290 { 291 /* Generally it is hard to prove lhs1 and lhs are the same memory 292 location, just diagnose different variables. */ 293 if (lhs1 && VAR_P (lhs1) && VAR_P (lhs)) 294 { 295 if (lhs1 != lhs && !test) 296 { 297 error_at (loc, "%<#pragma omp atomic capture%> uses two " 298 "different variables for memory"); 299 return error_mark_node; 300 } 301 } 302 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, 303 loc, x, NULL_TREE); 304 if (rhs1 && rhs1 != lhs) 305 { 306 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); 307 if (rhs1addr == error_mark_node) 308 return error_mark_node; 309 x = omit_one_operand_loc (loc, type, x, rhs1addr); 310 } 311 if (lhs1 && lhs1 != lhs) 312 { 313 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false); 314 if (lhs1addr == error_mark_node) 315 return error_mark_node; 316 if (code == OMP_ATOMIC_CAPTURE_OLD) 317 x = omit_one_operand_loc (loc, type, x, lhs1addr); 318 else 319 { 320 if (!test) 321 x = save_expr (x); 322 x = omit_two_operands_loc (loc, type, x, x, lhs1addr); 323 } 324 } 325 } 326 else if (rhs1 && rhs1 != lhs) 327 { 328 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); 329 if (rhs1addr == error_mark_node) 330 return error_mark_node; 331 x = omit_one_operand_loc (loc, type, x, rhs1addr); 332 } 333 334 if (pre) 335 x = omit_one_operand_loc (loc, type, x, pre); 336 return x; 337 } 338 339 340 /* Complete a #pragma omp flush construct. We don't do anything with 341 the variable list that the syntax allows. LOC is the location of 342 the #pragma. */ 343 344 void 345 c_finish_omp_flush (location_t loc) 346 { 347 tree x; 348 349 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); 350 x = build_call_expr_loc (loc, x, 0); 351 add_stmt (x); 352 } 353 354 355 /* Check and canonicalize OMP_FOR increment expression. 356 Helper function for c_finish_omp_for. */ 357 358 static tree 359 check_omp_for_incr_expr (location_t loc, tree exp, tree decl) 360 { 361 tree t; 362 363 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) 364 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) 365 return error_mark_node; 366 367 if (exp == decl) 368 return build_int_cst (TREE_TYPE (exp), 0); 369 370 switch (TREE_CODE (exp)) 371 { 372 CASE_CONVERT: 373 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 374 if (t != error_mark_node) 375 return fold_convert_loc (loc, TREE_TYPE (exp), t); 376 break; 377 case MINUS_EXPR: 378 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 379 if (t != error_mark_node) 380 return fold_build2_loc (loc, MINUS_EXPR, 381 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); 382 break; 383 case PLUS_EXPR: 384 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 385 if (t != error_mark_node) 386 return fold_build2_loc (loc, PLUS_EXPR, 387 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); 388 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); 389 if (t != error_mark_node) 390 return fold_build2_loc (loc, PLUS_EXPR, 391 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); 392 break; 393 case COMPOUND_EXPR: 394 { 395 /* cp_build_modify_expr forces preevaluation of the RHS to make 396 sure that it is evaluated before the lvalue-rvalue conversion 397 is applied to the LHS. Reconstruct the original expression. */ 398 tree op0 = TREE_OPERAND (exp, 0); 399 if (TREE_CODE (op0) == TARGET_EXPR 400 && !VOID_TYPE_P (TREE_TYPE (op0))) 401 { 402 tree op1 = TREE_OPERAND (exp, 1); 403 tree temp = TARGET_EXPR_SLOT (op0); 404 if (BINARY_CLASS_P (op1) 405 && TREE_OPERAND (op1, 1) == temp) 406 { 407 op1 = copy_node (op1); 408 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); 409 return check_omp_for_incr_expr (loc, op1, decl); 410 } 411 } 412 break; 413 } 414 default: 415 break; 416 } 417 418 return error_mark_node; 419 } 420 421 /* If the OMP_FOR increment expression in INCR is of pointer type, 422 canonicalize it into an expression handled by gimplify_omp_for() 423 and return it. DECL is the iteration variable. */ 424 425 static tree 426 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) 427 { 428 if (POINTER_TYPE_P (TREE_TYPE (decl)) 429 && TREE_OPERAND (incr, 1)) 430 { 431 tree t = fold_convert_loc (loc, 432 sizetype, TREE_OPERAND (incr, 1)); 433 434 if (TREE_CODE (incr) == POSTDECREMENT_EXPR 435 || TREE_CODE (incr) == PREDECREMENT_EXPR) 436 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); 437 t = fold_build_pointer_plus (decl, t); 438 incr = build2 (MODIFY_EXPR, void_type_node, decl, t); 439 } 440 return incr; 441 } 442 443 /* Validate and generate OMP_FOR. 444 DECLV is a vector of iteration variables, for each collapsed loop. 445 446 ORIG_DECLV, if non-NULL, is a vector with the original iteration 447 variables (prior to any transformations, by say, C++ iterators). 448 449 INITV, CONDV and INCRV are vectors containing initialization 450 expressions, controlling predicates and increment expressions. 451 BODY is the body of the loop and PRE_BODY statements that go before 452 the loop. */ 453 454 tree 455 c_finish_omp_for (location_t locus, enum tree_code code, tree declv, 456 tree orig_declv, tree initv, tree condv, tree incrv, 457 tree body, tree pre_body) 458 { 459 location_t elocus; 460 bool fail = false; 461 int i; 462 463 if ((code == CILK_SIMD || code == CILK_FOR) 464 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0))) 465 fail = true; 466 467 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); 468 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); 469 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); 470 for (i = 0; i < TREE_VEC_LENGTH (declv); i++) 471 { 472 tree decl = TREE_VEC_ELT (declv, i); 473 tree init = TREE_VEC_ELT (initv, i); 474 tree cond = TREE_VEC_ELT (condv, i); 475 tree incr = TREE_VEC_ELT (incrv, i); 476 477 elocus = locus; 478 if (EXPR_HAS_LOCATION (init)) 479 elocus = EXPR_LOCATION (init); 480 481 /* Validate the iteration variable. */ 482 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) 483 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) 484 { 485 error_at (elocus, "invalid type for iteration variable %qE", decl); 486 fail = true; 487 } 488 else if (TYPE_ATOMIC (TREE_TYPE (decl))) 489 { 490 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl); 491 fail = true; 492 /* _Atomic iterator confuses stuff too much, so we risk ICE 493 trying to diagnose it further. */ 494 continue; 495 } 496 497 /* In the case of "for (int i = 0...)", init will be a decl. It should 498 have a DECL_INITIAL that we can turn into an assignment. */ 499 if (init == decl) 500 { 501 elocus = DECL_SOURCE_LOCATION (decl); 502 503 init = DECL_INITIAL (decl); 504 if (init == NULL) 505 { 506 error_at (elocus, "%qE is not initialized", decl); 507 init = integer_zero_node; 508 fail = true; 509 } 510 DECL_INITIAL (decl) = NULL_TREE; 511 512 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, 513 /* FIXME diagnostics: This should 514 be the location of the INIT. */ 515 elocus, 516 init, 517 NULL_TREE); 518 } 519 if (init != error_mark_node) 520 { 521 gcc_assert (TREE_CODE (init) == MODIFY_EXPR); 522 gcc_assert (TREE_OPERAND (init, 0) == decl); 523 } 524 525 if (cond == NULL_TREE) 526 { 527 error_at (elocus, "missing controlling predicate"); 528 fail = true; 529 } 530 else 531 { 532 bool cond_ok = false; 533 534 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with 535 evaluation of the vla VAR_DECL. We need to readd 536 them to the non-decl operand. See PR45784. */ 537 while (TREE_CODE (cond) == COMPOUND_EXPR) 538 cond = TREE_OPERAND (cond, 1); 539 540 if (EXPR_HAS_LOCATION (cond)) 541 elocus = EXPR_LOCATION (cond); 542 543 if (TREE_CODE (cond) == LT_EXPR 544 || TREE_CODE (cond) == LE_EXPR 545 || TREE_CODE (cond) == GT_EXPR 546 || TREE_CODE (cond) == GE_EXPR 547 || TREE_CODE (cond) == NE_EXPR 548 || TREE_CODE (cond) == EQ_EXPR) 549 { 550 tree op0 = TREE_OPERAND (cond, 0); 551 tree op1 = TREE_OPERAND (cond, 1); 552 553 /* 2.5.1. The comparison in the condition is computed in 554 the type of DECL, otherwise the behavior is undefined. 555 556 For example: 557 long n; int i; 558 i < n; 559 560 according to ISO will be evaluated as: 561 (long)i < n; 562 563 We want to force: 564 i < (int)n; */ 565 if (TREE_CODE (op0) == NOP_EXPR 566 && decl == TREE_OPERAND (op0, 0)) 567 { 568 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); 569 TREE_OPERAND (cond, 1) 570 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), 571 TREE_OPERAND (cond, 1)); 572 } 573 else if (TREE_CODE (op1) == NOP_EXPR 574 && decl == TREE_OPERAND (op1, 0)) 575 { 576 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); 577 TREE_OPERAND (cond, 0) 578 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), 579 TREE_OPERAND (cond, 0)); 580 } 581 582 if (decl == TREE_OPERAND (cond, 0)) 583 cond_ok = true; 584 else if (decl == TREE_OPERAND (cond, 1)) 585 { 586 TREE_SET_CODE (cond, 587 swap_tree_comparison (TREE_CODE (cond))); 588 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); 589 TREE_OPERAND (cond, 0) = decl; 590 cond_ok = true; 591 } 592 593 if (TREE_CODE (cond) == NE_EXPR 594 || TREE_CODE (cond) == EQ_EXPR) 595 { 596 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) 597 { 598 if (code != CILK_SIMD && code != CILK_FOR) 599 cond_ok = false; 600 } 601 else if (operand_equal_p (TREE_OPERAND (cond, 1), 602 TYPE_MIN_VALUE (TREE_TYPE (decl)), 603 0)) 604 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR 605 ? GT_EXPR : LE_EXPR); 606 else if (operand_equal_p (TREE_OPERAND (cond, 1), 607 TYPE_MAX_VALUE (TREE_TYPE (decl)), 608 0)) 609 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR 610 ? LT_EXPR : GE_EXPR); 611 else if (code != CILK_SIMD && code != CILK_FOR) 612 cond_ok = false; 613 } 614 615 if (cond_ok && TREE_VEC_ELT (condv, i) != cond) 616 { 617 tree ce = NULL_TREE, *pce = &ce; 618 tree type = TREE_TYPE (TREE_OPERAND (cond, 1)); 619 for (tree c = TREE_VEC_ELT (condv, i); c != cond; 620 c = TREE_OPERAND (c, 1)) 621 { 622 *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0), 623 TREE_OPERAND (cond, 1)); 624 pce = &TREE_OPERAND (*pce, 1); 625 } 626 TREE_OPERAND (cond, 1) = ce; 627 TREE_VEC_ELT (condv, i) = cond; 628 } 629 } 630 631 if (!cond_ok) 632 { 633 error_at (elocus, "invalid controlling predicate"); 634 fail = true; 635 } 636 } 637 638 if (incr == NULL_TREE) 639 { 640 error_at (elocus, "missing increment expression"); 641 fail = true; 642 } 643 else 644 { 645 bool incr_ok = false; 646 647 if (EXPR_HAS_LOCATION (incr)) 648 elocus = EXPR_LOCATION (incr); 649 650 /* Check all the valid increment expressions: v++, v--, ++v, --v, 651 v = v + incr, v = incr + v and v = v - incr. */ 652 switch (TREE_CODE (incr)) 653 { 654 case POSTINCREMENT_EXPR: 655 case PREINCREMENT_EXPR: 656 case POSTDECREMENT_EXPR: 657 case PREDECREMENT_EXPR: 658 if (TREE_OPERAND (incr, 0) != decl) 659 break; 660 661 incr_ok = true; 662 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); 663 break; 664 665 case COMPOUND_EXPR: 666 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR 667 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) 668 break; 669 incr = TREE_OPERAND (incr, 1); 670 /* FALLTHRU */ 671 case MODIFY_EXPR: 672 if (TREE_OPERAND (incr, 0) != decl) 673 break; 674 if (TREE_OPERAND (incr, 1) == decl) 675 break; 676 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR 677 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl 678 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) 679 incr_ok = true; 680 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR 681 || (TREE_CODE (TREE_OPERAND (incr, 1)) 682 == POINTER_PLUS_EXPR)) 683 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) 684 incr_ok = true; 685 else 686 { 687 tree t = check_omp_for_incr_expr (elocus, 688 TREE_OPERAND (incr, 1), 689 decl); 690 if (t != error_mark_node) 691 { 692 incr_ok = true; 693 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); 694 incr = build2 (MODIFY_EXPR, void_type_node, decl, t); 695 } 696 } 697 break; 698 699 default: 700 break; 701 } 702 if (!incr_ok) 703 { 704 error_at (elocus, "invalid increment expression"); 705 fail = true; 706 } 707 } 708 709 TREE_VEC_ELT (initv, i) = init; 710 TREE_VEC_ELT (incrv, i) = incr; 711 } 712 713 if (fail) 714 return NULL; 715 else 716 { 717 tree t = make_node (code); 718 719 TREE_TYPE (t) = void_type_node; 720 OMP_FOR_INIT (t) = initv; 721 OMP_FOR_COND (t) = condv; 722 OMP_FOR_INCR (t) = incrv; 723 OMP_FOR_BODY (t) = body; 724 OMP_FOR_PRE_BODY (t) = pre_body; 725 OMP_FOR_ORIG_DECLS (t) = orig_declv; 726 727 SET_EXPR_LOCATION (t, locus); 728 return t; 729 } 730 } 731 732 /* Type for passing data in between c_omp_check_loop_iv and 733 c_omp_check_loop_iv_r. */ 734 735 struct c_omp_check_loop_iv_data 736 { 737 tree declv; 738 bool fail; 739 location_t stmt_loc; 740 location_t expr_loc; 741 int kind; 742 walk_tree_lh lh; 743 hash_set<tree> *ppset; 744 }; 745 746 /* Helper function called via walk_tree, to diagnose uses 747 of associated loop IVs inside of lb, b and incr expressions 748 of OpenMP loops. */ 749 750 static tree 751 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data) 752 { 753 struct c_omp_check_loop_iv_data *d 754 = (struct c_omp_check_loop_iv_data *) data; 755 if (DECL_P (*tp)) 756 { 757 int i; 758 for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++) 759 if (*tp == TREE_VEC_ELT (d->declv, i)) 760 { 761 location_t loc = d->expr_loc; 762 if (loc == UNKNOWN_LOCATION) 763 loc = d->stmt_loc; 764 switch (d->kind) 765 { 766 case 0: 767 error_at (loc, "initializer expression refers to " 768 "iteration variable %qD", *tp); 769 break; 770 case 1: 771 error_at (loc, "condition expression refers to " 772 "iteration variable %qD", *tp); 773 break; 774 case 2: 775 error_at (loc, "increment expression refers to " 776 "iteration variable %qD", *tp); 777 break; 778 } 779 d->fail = true; 780 } 781 } 782 /* Don't walk dtors added by C++ wrap_cleanups_r. */ 783 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR 784 && TRY_CATCH_IS_CLEANUP (*tp)) 785 { 786 *walk_subtrees = 0; 787 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data, 788 d->ppset, d->lh); 789 } 790 791 return NULL_TREE; 792 } 793 794 /* Diagnose invalid references to loop iterators in lb, b and incr 795 expressions. */ 796 797 bool 798 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh) 799 { 800 hash_set<tree> pset; 801 struct c_omp_check_loop_iv_data data; 802 int i; 803 804 data.declv = declv; 805 data.fail = false; 806 data.stmt_loc = EXPR_LOCATION (stmt); 807 data.lh = lh; 808 data.ppset = &pset; 809 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++) 810 { 811 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i); 812 gcc_assert (TREE_CODE (init) == MODIFY_EXPR); 813 tree decl = TREE_OPERAND (init, 0); 814 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i); 815 gcc_assert (COMPARISON_CLASS_P (cond)); 816 gcc_assert (TREE_OPERAND (cond, 0) == decl); 817 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i); 818 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1)); 819 data.kind = 0; 820 walk_tree_1 (&TREE_OPERAND (init, 1), 821 c_omp_check_loop_iv_r, &data, &pset, lh); 822 /* Don't warn for C++ random access iterators here, the 823 expression then involves the subtraction and always refers 824 to the original value. The C++ FE needs to warn on those 825 earlier. */ 826 if (decl == TREE_VEC_ELT (declv, i)) 827 { 828 data.expr_loc = EXPR_LOCATION (cond); 829 data.kind = 1; 830 walk_tree_1 (&TREE_OPERAND (cond, 1), 831 c_omp_check_loop_iv_r, &data, &pset, lh); 832 } 833 if (TREE_CODE (incr) == MODIFY_EXPR) 834 { 835 gcc_assert (TREE_OPERAND (incr, 0) == decl); 836 incr = TREE_OPERAND (incr, 1); 837 data.kind = 2; 838 if (TREE_CODE (incr) == PLUS_EXPR 839 && TREE_OPERAND (incr, 1) == decl) 840 { 841 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0)); 842 walk_tree_1 (&TREE_OPERAND (incr, 0), 843 c_omp_check_loop_iv_r, &data, &pset, lh); 844 } 845 else 846 { 847 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1)); 848 walk_tree_1 (&TREE_OPERAND (incr, 1), 849 c_omp_check_loop_iv_r, &data, &pset, lh); 850 } 851 } 852 } 853 return !data.fail; 854 } 855 856 /* Similar, but allows to check the init or cond expressions individually. */ 857 858 bool 859 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl, 860 tree init, tree cond, walk_tree_lh lh) 861 { 862 hash_set<tree> pset; 863 struct c_omp_check_loop_iv_data data; 864 865 data.declv = declv; 866 data.fail = false; 867 data.stmt_loc = stmt_loc; 868 data.lh = lh; 869 data.ppset = &pset; 870 if (init) 871 { 872 data.expr_loc = EXPR_LOCATION (init); 873 data.kind = 0; 874 walk_tree_1 (&init, 875 c_omp_check_loop_iv_r, &data, &pset, lh); 876 } 877 if (cond) 878 { 879 gcc_assert (COMPARISON_CLASS_P (cond)); 880 data.expr_loc = EXPR_LOCATION (init); 881 data.kind = 1; 882 if (TREE_OPERAND (cond, 0) == decl) 883 walk_tree_1 (&TREE_OPERAND (cond, 1), 884 c_omp_check_loop_iv_r, &data, &pset, lh); 885 else 886 walk_tree_1 (&TREE_OPERAND (cond, 0), 887 c_omp_check_loop_iv_r, &data, &pset, lh); 888 } 889 return !data.fail; 890 } 891 892 /* This function splits clauses for OpenACC combined loop 893 constructs. OpenACC combined loop constructs are: 894 #pragma acc kernels loop 895 #pragma acc parallel loop */ 896 897 tree 898 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses, 899 bool is_parallel) 900 { 901 tree next, loop_clauses, nc; 902 903 loop_clauses = *not_loop_clauses = NULL_TREE; 904 for (; clauses ; clauses = next) 905 { 906 next = OMP_CLAUSE_CHAIN (clauses); 907 908 switch (OMP_CLAUSE_CODE (clauses)) 909 { 910 /* Loop clauses. */ 911 case OMP_CLAUSE_COLLAPSE: 912 case OMP_CLAUSE_TILE: 913 case OMP_CLAUSE_GANG: 914 case OMP_CLAUSE_WORKER: 915 case OMP_CLAUSE_VECTOR: 916 case OMP_CLAUSE_AUTO: 917 case OMP_CLAUSE_SEQ: 918 case OMP_CLAUSE_INDEPENDENT: 919 case OMP_CLAUSE_PRIVATE: 920 OMP_CLAUSE_CHAIN (clauses) = loop_clauses; 921 loop_clauses = clauses; 922 break; 923 924 /* Reductions must be duplicated on both constructs. */ 925 case OMP_CLAUSE_REDUCTION: 926 if (is_parallel) 927 { 928 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 929 OMP_CLAUSE_REDUCTION); 930 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses); 931 OMP_CLAUSE_REDUCTION_CODE (nc) 932 = OMP_CLAUSE_REDUCTION_CODE (clauses); 933 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses; 934 *not_loop_clauses = nc; 935 } 936 937 OMP_CLAUSE_CHAIN (clauses) = loop_clauses; 938 loop_clauses = clauses; 939 break; 940 941 /* Parallel/kernels clauses. */ 942 default: 943 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses; 944 *not_loop_clauses = clauses; 945 break; 946 } 947 } 948 949 return loop_clauses; 950 } 951 952 /* This function attempts to split or duplicate clauses for OpenMP 953 combined/composite constructs. Right now there are 21 different 954 constructs. CODE is the innermost construct in the combined construct, 955 and MASK allows to determine which constructs are combined together, 956 as every construct has at least one clause that no other construct 957 has (except for OMP_SECTIONS, but that can be only combined with parallel). 958 OpenMP combined/composite constructs are: 959 #pragma omp distribute parallel for 960 #pragma omp distribute parallel for simd 961 #pragma omp distribute simd 962 #pragma omp for simd 963 #pragma omp parallel for 964 #pragma omp parallel for simd 965 #pragma omp parallel sections 966 #pragma omp target parallel 967 #pragma omp target parallel for 968 #pragma omp target parallel for simd 969 #pragma omp target teams 970 #pragma omp target teams distribute 971 #pragma omp target teams distribute parallel for 972 #pragma omp target teams distribute parallel for simd 973 #pragma omp target teams distribute simd 974 #pragma omp target simd 975 #pragma omp taskloop simd 976 #pragma omp teams distribute 977 #pragma omp teams distribute parallel for 978 #pragma omp teams distribute parallel for simd 979 #pragma omp teams distribute simd */ 980 981 void 982 c_omp_split_clauses (location_t loc, enum tree_code code, 983 omp_clause_mask mask, tree clauses, tree *cclauses) 984 { 985 tree next, c; 986 enum c_omp_clause_split s; 987 int i; 988 989 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) 990 cclauses[i] = NULL; 991 /* Add implicit nowait clause on 992 #pragma omp parallel {for,for simd,sections}. */ 993 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 994 switch (code) 995 { 996 case OMP_FOR: 997 case OMP_SIMD: 998 cclauses[C_OMP_CLAUSE_SPLIT_FOR] 999 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); 1000 break; 1001 case OMP_SECTIONS: 1002 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] 1003 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); 1004 break; 1005 default: 1006 break; 1007 } 1008 1009 for (; clauses ; clauses = next) 1010 { 1011 next = OMP_CLAUSE_CHAIN (clauses); 1012 1013 switch (OMP_CLAUSE_CODE (clauses)) 1014 { 1015 /* First the clauses that are unique to some constructs. */ 1016 case OMP_CLAUSE_DEVICE: 1017 case OMP_CLAUSE_MAP: 1018 case OMP_CLAUSE_IS_DEVICE_PTR: 1019 case OMP_CLAUSE_DEFAULTMAP: 1020 case OMP_CLAUSE_DEPEND: 1021 s = C_OMP_CLAUSE_SPLIT_TARGET; 1022 break; 1023 case OMP_CLAUSE_NUM_TEAMS: 1024 case OMP_CLAUSE_THREAD_LIMIT: 1025 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1026 break; 1027 case OMP_CLAUSE_DIST_SCHEDULE: 1028 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1029 break; 1030 case OMP_CLAUSE_COPYIN: 1031 case OMP_CLAUSE_NUM_THREADS: 1032 case OMP_CLAUSE_PROC_BIND: 1033 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1034 break; 1035 case OMP_CLAUSE_ORDERED: 1036 s = C_OMP_CLAUSE_SPLIT_FOR; 1037 break; 1038 case OMP_CLAUSE_SCHEDULE: 1039 s = C_OMP_CLAUSE_SPLIT_FOR; 1040 if (code != OMP_SIMD) 1041 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; 1042 break; 1043 case OMP_CLAUSE_SAFELEN: 1044 case OMP_CLAUSE_SIMDLEN: 1045 case OMP_CLAUSE_ALIGNED: 1046 s = C_OMP_CLAUSE_SPLIT_SIMD; 1047 break; 1048 case OMP_CLAUSE_GRAINSIZE: 1049 case OMP_CLAUSE_NUM_TASKS: 1050 case OMP_CLAUSE_FINAL: 1051 case OMP_CLAUSE_UNTIED: 1052 case OMP_CLAUSE_MERGEABLE: 1053 case OMP_CLAUSE_NOGROUP: 1054 case OMP_CLAUSE_PRIORITY: 1055 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1056 break; 1057 /* Duplicate this to all of taskloop, distribute, for and simd. */ 1058 case OMP_CLAUSE_COLLAPSE: 1059 if (code == OMP_SIMD) 1060 { 1061 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) 1062 | (OMP_CLAUSE_MASK_1 1063 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) 1064 | (OMP_CLAUSE_MASK_1 1065 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) 1066 { 1067 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1068 OMP_CLAUSE_COLLAPSE); 1069 OMP_CLAUSE_COLLAPSE_EXPR (c) 1070 = OMP_CLAUSE_COLLAPSE_EXPR (clauses); 1071 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1072 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1073 } 1074 else 1075 { 1076 /* This must be #pragma omp target simd */ 1077 s = C_OMP_CLAUSE_SPLIT_SIMD; 1078 break; 1079 } 1080 } 1081 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1082 { 1083 if ((mask & (OMP_CLAUSE_MASK_1 1084 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1085 { 1086 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1087 OMP_CLAUSE_COLLAPSE); 1088 OMP_CLAUSE_COLLAPSE_EXPR (c) 1089 = OMP_CLAUSE_COLLAPSE_EXPR (clauses); 1090 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; 1091 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; 1092 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1093 } 1094 else 1095 s = C_OMP_CLAUSE_SPLIT_FOR; 1096 } 1097 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1098 != 0) 1099 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1100 else 1101 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1102 break; 1103 /* Private clause is supported on all constructs, 1104 it is enough to put it on the innermost one. For 1105 #pragma omp {for,sections} put it on parallel though, 1106 as that's what we did for OpenMP 3.1. */ 1107 case OMP_CLAUSE_PRIVATE: 1108 switch (code) 1109 { 1110 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; 1111 case OMP_FOR: case OMP_SECTIONS: 1112 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; 1113 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; 1114 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; 1115 default: gcc_unreachable (); 1116 } 1117 break; 1118 /* Firstprivate clause is supported on all constructs but 1119 simd. Put it on the outermost of those and duplicate on teams 1120 and parallel. */ 1121 case OMP_CLAUSE_FIRSTPRIVATE: 1122 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) 1123 != 0) 1124 { 1125 if (code == OMP_SIMD 1126 && (mask & ((OMP_CLAUSE_MASK_1 1127 << PRAGMA_OMP_CLAUSE_NUM_THREADS) 1128 | (OMP_CLAUSE_MASK_1 1129 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) 1130 { 1131 /* This must be #pragma omp target simd. */ 1132 s = C_OMP_CLAUSE_SPLIT_TARGET; 1133 break; 1134 } 1135 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1136 OMP_CLAUSE_FIRSTPRIVATE); 1137 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1138 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; 1139 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; 1140 } 1141 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1142 != 0) 1143 { 1144 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) 1145 | (OMP_CLAUSE_MASK_1 1146 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) 1147 { 1148 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1149 OMP_CLAUSE_FIRSTPRIVATE); 1150 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1151 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; 1152 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; 1153 if ((mask & (OMP_CLAUSE_MASK_1 1154 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) 1155 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1156 else 1157 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1158 } 1159 else 1160 /* This must be 1161 #pragma omp parallel{, for{, simd}, sections} 1162 or 1163 #pragma omp target parallel. */ 1164 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1165 } 1166 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1167 != 0) 1168 { 1169 /* This must be one of 1170 #pragma omp {,target }teams distribute 1171 #pragma omp target teams 1172 #pragma omp {,target }teams distribute simd. */ 1173 gcc_assert (code == OMP_DISTRIBUTE 1174 || code == OMP_TEAMS 1175 || code == OMP_SIMD); 1176 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1177 } 1178 else if ((mask & (OMP_CLAUSE_MASK_1 1179 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1180 { 1181 /* This must be #pragma omp distribute simd. */ 1182 gcc_assert (code == OMP_SIMD); 1183 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1184 } 1185 else if ((mask & (OMP_CLAUSE_MASK_1 1186 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1187 { 1188 /* This must be #pragma omp taskloop simd. */ 1189 gcc_assert (code == OMP_SIMD); 1190 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1191 } 1192 else 1193 { 1194 /* This must be #pragma omp for simd. */ 1195 gcc_assert (code == OMP_SIMD); 1196 s = C_OMP_CLAUSE_SPLIT_FOR; 1197 } 1198 break; 1199 /* Lastprivate is allowed on distribute, for, sections and simd. In 1200 parallel {for{, simd},sections} we actually want to put it on 1201 parallel rather than for or sections. */ 1202 case OMP_CLAUSE_LASTPRIVATE: 1203 if (code == OMP_DISTRIBUTE) 1204 { 1205 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1206 break; 1207 } 1208 if ((mask & (OMP_CLAUSE_MASK_1 1209 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1210 { 1211 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1212 OMP_CLAUSE_LASTPRIVATE); 1213 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1214 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; 1215 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; 1216 } 1217 if (code == OMP_FOR || code == OMP_SECTIONS) 1218 { 1219 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1220 != 0) 1221 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1222 else 1223 s = C_OMP_CLAUSE_SPLIT_FOR; 1224 break; 1225 } 1226 gcc_assert (code == OMP_SIMD); 1227 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1228 { 1229 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1230 OMP_CLAUSE_LASTPRIVATE); 1231 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1232 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1233 != 0) 1234 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1235 else 1236 s = C_OMP_CLAUSE_SPLIT_FOR; 1237 OMP_CLAUSE_CHAIN (c) = cclauses[s]; 1238 cclauses[s] = c; 1239 } 1240 s = C_OMP_CLAUSE_SPLIT_SIMD; 1241 break; 1242 /* Shared and default clauses are allowed on parallel, teams and 1243 taskloop. */ 1244 case OMP_CLAUSE_SHARED: 1245 case OMP_CLAUSE_DEFAULT: 1246 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1247 != 0) 1248 { 1249 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1250 break; 1251 } 1252 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1253 != 0) 1254 { 1255 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1256 == 0) 1257 { 1258 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1259 break; 1260 } 1261 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1262 OMP_CLAUSE_CODE (clauses)); 1263 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) 1264 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1265 else 1266 OMP_CLAUSE_DEFAULT_KIND (c) 1267 = OMP_CLAUSE_DEFAULT_KIND (clauses); 1268 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; 1269 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; 1270 } 1271 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1272 break; 1273 /* Reduction is allowed on simd, for, parallel, sections and teams. 1274 Duplicate it on all of them, but omit on for or sections if 1275 parallel is present. */ 1276 case OMP_CLAUSE_REDUCTION: 1277 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1278 { 1279 if (code == OMP_SIMD) 1280 { 1281 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1282 OMP_CLAUSE_REDUCTION); 1283 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1284 OMP_CLAUSE_REDUCTION_CODE (c) 1285 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1286 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1287 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1288 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1289 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1290 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1291 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1292 } 1293 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1294 != 0) 1295 { 1296 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1297 OMP_CLAUSE_REDUCTION); 1298 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1299 OMP_CLAUSE_REDUCTION_CODE (c) 1300 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1301 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1302 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1303 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1304 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1305 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; 1306 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; 1307 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1308 } 1309 else if ((mask & (OMP_CLAUSE_MASK_1 1310 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1311 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1312 else 1313 s = C_OMP_CLAUSE_SPLIT_FOR; 1314 } 1315 else if (code == OMP_SECTIONS || code == OMP_PARALLEL) 1316 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1317 else if (code == OMP_SIMD) 1318 s = C_OMP_CLAUSE_SPLIT_SIMD; 1319 else 1320 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1321 break; 1322 case OMP_CLAUSE_IF: 1323 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1324 != 0) 1325 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1326 else if ((mask & (OMP_CLAUSE_MASK_1 1327 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1328 { 1329 if ((mask & (OMP_CLAUSE_MASK_1 1330 << PRAGMA_OMP_CLAUSE_MAP)) != 0) 1331 { 1332 if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL) 1333 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1334 else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET) 1335 s = C_OMP_CLAUSE_SPLIT_TARGET; 1336 else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK) 1337 { 1338 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1339 OMP_CLAUSE_IF); 1340 OMP_CLAUSE_IF_MODIFIER (c) 1341 = OMP_CLAUSE_IF_MODIFIER (clauses); 1342 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); 1343 OMP_CLAUSE_CHAIN (c) 1344 = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; 1345 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; 1346 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1347 } 1348 else 1349 { 1350 error_at (OMP_CLAUSE_LOCATION (clauses), 1351 "expected %<parallel%> or %<target%> %<if%> " 1352 "clause modifier"); 1353 continue; 1354 } 1355 } 1356 else 1357 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1358 } 1359 else 1360 s = C_OMP_CLAUSE_SPLIT_TARGET; 1361 break; 1362 case OMP_CLAUSE_LINEAR: 1363 /* Linear clause is allowed on simd and for. Put it on the 1364 innermost construct. */ 1365 if (code == OMP_SIMD) 1366 s = C_OMP_CLAUSE_SPLIT_SIMD; 1367 else 1368 s = C_OMP_CLAUSE_SPLIT_FOR; 1369 break; 1370 case OMP_CLAUSE_NOWAIT: 1371 /* Nowait clause is allowed on target, for and sections, but 1372 is not allowed on parallel for or parallel sections. Therefore, 1373 put it on target construct if present, because that can only 1374 be combined with parallel for{, simd} and not with for{, simd}, 1375 otherwise to the worksharing construct. */ 1376 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) 1377 != 0) 1378 s = C_OMP_CLAUSE_SPLIT_TARGET; 1379 else 1380 s = C_OMP_CLAUSE_SPLIT_FOR; 1381 break; 1382 default: 1383 gcc_unreachable (); 1384 } 1385 OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; 1386 cclauses[s] = clauses; 1387 } 1388 1389 if (!flag_checking) 1390 return; 1391 1392 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) 1393 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); 1394 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) 1395 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); 1396 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) 1397 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); 1398 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) 1399 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); 1400 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) 1401 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 1402 && code != OMP_SECTIONS) 1403 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); 1404 if (code != OMP_SIMD) 1405 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); 1406 } 1407 1408 1409 /* qsort callback to compare #pragma omp declare simd clauses. */ 1410 1411 static int 1412 c_omp_declare_simd_clause_cmp (const void *p, const void *q) 1413 { 1414 tree a = *(const tree *) p; 1415 tree b = *(const tree *) q; 1416 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b)) 1417 { 1418 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b)) 1419 return -1; 1420 return 1; 1421 } 1422 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN 1423 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH 1424 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) 1425 { 1426 int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); 1427 int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); 1428 if (c < d) 1429 return 1; 1430 if (c > d) 1431 return -1; 1432 } 1433 return 0; 1434 } 1435 1436 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd 1437 CLAUSES on FNDECL into argument indexes and sort them. */ 1438 1439 tree 1440 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) 1441 { 1442 tree c; 1443 vec<tree> clvec = vNULL; 1444 1445 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) 1446 { 1447 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN 1448 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH 1449 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) 1450 { 1451 tree decl = OMP_CLAUSE_DECL (c); 1452 tree arg; 1453 int idx; 1454 for (arg = parms, idx = 0; arg; 1455 arg = TREE_CHAIN (arg), idx++) 1456 if (arg == decl) 1457 break; 1458 if (arg == NULL_TREE) 1459 { 1460 error_at (OMP_CLAUSE_LOCATION (c), 1461 "%qD is not an function argument", decl); 1462 continue; 1463 } 1464 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); 1465 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR 1466 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) 1467 { 1468 decl = OMP_CLAUSE_LINEAR_STEP (c); 1469 for (arg = parms, idx = 0; arg; 1470 arg = TREE_CHAIN (arg), idx++) 1471 if (arg == decl) 1472 break; 1473 if (arg == NULL_TREE) 1474 { 1475 error_at (OMP_CLAUSE_LOCATION (c), 1476 "%qD is not an function argument", decl); 1477 continue; 1478 } 1479 OMP_CLAUSE_LINEAR_STEP (c) 1480 = build_int_cst (integer_type_node, idx); 1481 } 1482 } 1483 clvec.safe_push (c); 1484 } 1485 if (!clvec.is_empty ()) 1486 { 1487 unsigned int len = clvec.length (), i; 1488 clvec.qsort (c_omp_declare_simd_clause_cmp); 1489 clauses = clvec[0]; 1490 for (i = 0; i < len; i++) 1491 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; 1492 } 1493 else 1494 clauses = NULL_TREE; 1495 clvec.release (); 1496 return clauses; 1497 } 1498 1499 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ 1500 1501 void 1502 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) 1503 { 1504 tree c; 1505 1506 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) 1507 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN 1508 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH 1509 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) 1510 { 1511 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; 1512 tree arg; 1513 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; 1514 arg = TREE_CHAIN (arg), i++) 1515 if (i == idx) 1516 break; 1517 gcc_assert (arg); 1518 OMP_CLAUSE_DECL (c) = arg; 1519 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR 1520 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) 1521 { 1522 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c)); 1523 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; 1524 arg = TREE_CHAIN (arg), i++) 1525 if (i == idx) 1526 break; 1527 gcc_assert (arg); 1528 OMP_CLAUSE_LINEAR_STEP (c) = arg; 1529 } 1530 } 1531 } 1532 1533 /* True if OpenMP sharing attribute of DECL is predetermined. */ 1534 1535 enum omp_clause_default_kind 1536 c_omp_predetermined_sharing (tree decl) 1537 { 1538 /* Variables with const-qualified type having no mutable member 1539 are predetermined shared. */ 1540 if (TREE_READONLY (decl)) 1541 return OMP_CLAUSE_DEFAULT_SHARED; 1542 1543 /* Predetermine artificial variables holding integral values, those 1544 are usually result of gimplify_one_sizepos or SAVE_EXPR 1545 gimplification. */ 1546 if (VAR_P (decl) 1547 && DECL_ARTIFICIAL (decl) 1548 && INTEGRAL_TYPE_P (TREE_TYPE (decl))) 1549 return OMP_CLAUSE_DEFAULT_SHARED; 1550 1551 return OMP_CLAUSE_DEFAULT_UNSPECIFIED; 1552 } 1553