1 /* This file contains routines to construct OpenACC and OpenMP constructs, 2 called from parsing in the C and C++ front ends. 3 4 Copyright (C) 2005-2019 Free Software Foundation, Inc. 5 Contributed by Richard Henderson <rth@redhat.com>, 6 Diego Novillo <dnovillo@redhat.com>. 7 8 This file is part of GCC. 9 10 GCC is free software; you can redistribute it and/or modify it under 11 the terms of the GNU General Public License as published by the Free 12 Software Foundation; either version 3, or (at your option) any later 13 version. 14 15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 16 WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with GCC; see the file COPYING3. If not see 22 <http://www.gnu.org/licenses/>. */ 23 24 #include "config.h" 25 #include "system.h" 26 #include "coretypes.h" 27 #include "options.h" 28 #include "c-common.h" 29 #include "gimple-expr.h" 30 #include "c-pragma.h" 31 #include "stringpool.h" 32 #include "omp-general.h" 33 #include "gomp-constants.h" 34 #include "memmodel.h" 35 36 37 /* Complete a #pragma oacc wait construct. LOC is the location of 38 the #pragma. */ 39 40 tree 41 c_finish_oacc_wait (location_t loc, tree parms, tree clauses) 42 { 43 const int nparms = list_length (parms); 44 tree stmt, t; 45 vec<tree, va_gc> *args; 46 47 vec_alloc (args, nparms + 2); 48 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT); 49 50 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC)) 51 t = OMP_CLAUSE_ASYNC_EXPR (clauses); 52 else 53 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC); 54 55 args->quick_push (t); 56 args->quick_push (build_int_cst (integer_type_node, nparms)); 57 58 for (t = parms; t; t = TREE_CHAIN (t)) 59 { 60 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST) 61 args->quick_push (build_int_cst (integer_type_node, 62 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t)))); 63 else 64 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t)); 65 } 66 67 stmt = build_call_expr_loc_vec (loc, stmt, args); 68 69 vec_free (args); 70 71 return stmt; 72 } 73 74 /* Complete a #pragma omp master construct. STMT is the structured-block 75 that follows the pragma. LOC is the location of the #pragma. */ 76 77 tree 78 c_finish_omp_master (location_t loc, tree stmt) 79 { 80 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); 81 SET_EXPR_LOCATION (t, loc); 82 return t; 83 } 84 85 /* Complete a #pragma omp taskgroup construct. BODY is the structured-block 86 that follows the pragma. LOC is the location of the #pragma. */ 87 88 tree 89 c_finish_omp_taskgroup (location_t loc, tree body, tree clauses) 90 { 91 tree stmt = make_node (OMP_TASKGROUP); 92 TREE_TYPE (stmt) = void_type_node; 93 OMP_TASKGROUP_BODY (stmt) = body; 94 OMP_TASKGROUP_CLAUSES (stmt) = clauses; 95 SET_EXPR_LOCATION (stmt, loc); 96 return add_stmt (stmt); 97 } 98 99 /* Complete a #pragma omp critical construct. BODY is the structured-block 100 that follows the pragma, NAME is the identifier in the pragma, or null 101 if it was omitted. LOC is the location of the #pragma. */ 102 103 tree 104 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses) 105 { 106 tree stmt = make_node (OMP_CRITICAL); 107 TREE_TYPE (stmt) = void_type_node; 108 OMP_CRITICAL_BODY (stmt) = body; 109 OMP_CRITICAL_NAME (stmt) = name; 110 OMP_CRITICAL_CLAUSES (stmt) = clauses; 111 SET_EXPR_LOCATION (stmt, loc); 112 return add_stmt (stmt); 113 } 114 115 /* Complete a #pragma omp ordered construct. STMT is the structured-block 116 that follows the pragma. LOC is the location of the #pragma. */ 117 118 tree 119 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt) 120 { 121 tree t = make_node (OMP_ORDERED); 122 TREE_TYPE (t) = void_type_node; 123 OMP_ORDERED_BODY (t) = stmt; 124 if (!flag_openmp /* flag_openmp_simd */ 125 && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD 126 || OMP_CLAUSE_CHAIN (clauses))) 127 clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD); 128 OMP_ORDERED_CLAUSES (t) = clauses; 129 SET_EXPR_LOCATION (t, loc); 130 return add_stmt (t); 131 } 132 133 134 /* Complete a #pragma omp barrier construct. LOC is the location of 135 the #pragma. */ 136 137 void 138 c_finish_omp_barrier (location_t loc) 139 { 140 tree x; 141 142 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); 143 x = build_call_expr_loc (loc, x, 0); 144 add_stmt (x); 145 } 146 147 148 /* Complete a #pragma omp taskwait construct. LOC is the location of the 149 pragma. */ 150 151 void 152 c_finish_omp_taskwait (location_t loc) 153 { 154 tree x; 155 156 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); 157 x = build_call_expr_loc (loc, x, 0); 158 add_stmt (x); 159 } 160 161 162 /* Complete a #pragma omp taskyield construct. LOC is the location of the 163 pragma. */ 164 165 void 166 c_finish_omp_taskyield (location_t loc) 167 { 168 tree x; 169 170 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); 171 x = build_call_expr_loc (loc, x, 0); 172 add_stmt (x); 173 } 174 175 176 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC 177 the expression to be implemented atomically is LHS opcode= RHS. 178 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS 179 opcode= RHS with the new or old content of LHS returned. 180 LOC is the location of the atomic statement. The value returned 181 is either error_mark_node (if the construct was erroneous) or an 182 OMP_ATOMIC* node which should be added to the current statement 183 tree with add_stmt. If TEST is set, avoid calling save_expr 184 or create_tmp_var*. */ 185 186 tree 187 c_finish_omp_atomic (location_t loc, enum tree_code code, 188 enum tree_code opcode, tree lhs, tree rhs, 189 tree v, tree lhs1, tree rhs1, bool swapped, 190 enum omp_memory_order memory_order, bool test) 191 { 192 tree x, type, addr, pre = NULL_TREE; 193 HOST_WIDE_INT bitpos = 0, bitsize = 0; 194 195 if (lhs == error_mark_node || rhs == error_mark_node 196 || v == error_mark_node || lhs1 == error_mark_node 197 || rhs1 == error_mark_node) 198 return error_mark_node; 199 200 /* ??? According to one reading of the OpenMP spec, complex type are 201 supported, but there are no atomic stores for any architecture. 202 But at least icc 9.0 doesn't support complex types here either. 203 And lets not even talk about vector types... */ 204 type = TREE_TYPE (lhs); 205 if (!INTEGRAL_TYPE_P (type) 206 && !POINTER_TYPE_P (type) 207 && !SCALAR_FLOAT_TYPE_P (type)) 208 { 209 error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); 210 return error_mark_node; 211 } 212 if (TYPE_ATOMIC (type)) 213 { 214 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>"); 215 return error_mark_node; 216 } 217 218 if (opcode == RDIV_EXPR) 219 opcode = TRUNC_DIV_EXPR; 220 221 /* ??? Validate that rhs does not overlap lhs. */ 222 tree blhs = NULL; 223 if (TREE_CODE (lhs) == COMPONENT_REF 224 && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL 225 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1)) 226 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1))) 227 { 228 tree field = TREE_OPERAND (lhs, 1); 229 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); 230 if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)) 231 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))) 232 bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) 233 - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT; 234 else 235 bitpos = 0; 236 bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) 237 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); 238 gcc_assert (tree_fits_shwi_p (DECL_SIZE (field))); 239 bitsize = tree_to_shwi (DECL_SIZE (field)); 240 blhs = lhs; 241 type = TREE_TYPE (repr); 242 lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0), 243 repr, TREE_OPERAND (lhs, 2)); 244 } 245 246 /* Take and save the address of the lhs. From then on we'll reference it 247 via indirection. */ 248 addr = build_unary_op (loc, ADDR_EXPR, lhs, false); 249 if (addr == error_mark_node) 250 return error_mark_node; 251 if (!test) 252 addr = save_expr (addr); 253 if (!test 254 && TREE_CODE (addr) != SAVE_EXPR 255 && (TREE_CODE (addr) != ADDR_EXPR 256 || !VAR_P (TREE_OPERAND (addr, 0)))) 257 { 258 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize 259 it even after unsharing function body. */ 260 tree var = create_tmp_var_raw (TREE_TYPE (addr)); 261 DECL_CONTEXT (var) = current_function_decl; 262 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); 263 } 264 tree orig_lhs = lhs; 265 lhs = build_indirect_ref (loc, addr, RO_NULL); 266 tree new_lhs = lhs; 267 268 if (code == OMP_ATOMIC_READ) 269 { 270 x = build1 (OMP_ATOMIC_READ, type, addr); 271 SET_EXPR_LOCATION (x, loc); 272 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order; 273 if (blhs) 274 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x, 275 bitsize_int (bitsize), bitsize_int (bitpos)); 276 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, 277 loc, x, NULL_TREE); 278 } 279 280 /* There are lots of warnings, errors, and conversions that need to happen 281 in the course of interpreting a statement. Use the normal mechanisms 282 to do this, and then take it apart again. */ 283 if (blhs) 284 { 285 lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs, 286 bitsize_int (bitsize), bitsize_int (bitpos)); 287 if (swapped) 288 rhs = build_binary_op (loc, opcode, rhs, lhs, true); 289 else if (opcode != NOP_EXPR) 290 rhs = build_binary_op (loc, opcode, lhs, rhs, true); 291 opcode = NOP_EXPR; 292 } 293 else if (swapped) 294 { 295 rhs = build_binary_op (loc, opcode, rhs, lhs, true); 296 opcode = NOP_EXPR; 297 } 298 bool save = in_late_binary_op; 299 in_late_binary_op = true; 300 x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode, 301 loc, rhs, NULL_TREE); 302 in_late_binary_op = save; 303 if (x == error_mark_node) 304 return error_mark_node; 305 if (TREE_CODE (x) == COMPOUND_EXPR) 306 { 307 pre = TREE_OPERAND (x, 0); 308 gcc_assert (TREE_CODE (pre) == SAVE_EXPR || tree_invariant_p (pre)); 309 x = TREE_OPERAND (x, 1); 310 } 311 gcc_assert (TREE_CODE (x) == MODIFY_EXPR); 312 rhs = TREE_OPERAND (x, 1); 313 314 if (blhs) 315 rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs, 316 rhs, bitsize_int (bitpos)); 317 318 /* Punt the actual generation of atomic operations to common code. */ 319 if (code == OMP_ATOMIC) 320 type = void_type_node; 321 x = build2 (code, type, addr, rhs); 322 SET_EXPR_LOCATION (x, loc); 323 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order; 324 325 /* Generally it is hard to prove lhs1 and lhs are the same memory 326 location, just diagnose different variables. */ 327 if (rhs1 328 && VAR_P (rhs1) 329 && VAR_P (orig_lhs) 330 && rhs1 != orig_lhs 331 && !test) 332 { 333 if (code == OMP_ATOMIC) 334 error_at (loc, "%<#pragma omp atomic update%> uses two different " 335 "variables for memory"); 336 else 337 error_at (loc, "%<#pragma omp atomic capture%> uses two different " 338 "variables for memory"); 339 return error_mark_node; 340 } 341 342 if (lhs1 343 && lhs1 != orig_lhs 344 && TREE_CODE (lhs1) == COMPONENT_REF 345 && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL 346 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1)) 347 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1))) 348 { 349 tree field = TREE_OPERAND (lhs1, 1); 350 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); 351 lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0), 352 repr, TREE_OPERAND (lhs1, 2)); 353 } 354 if (rhs1 355 && rhs1 != orig_lhs 356 && TREE_CODE (rhs1) == COMPONENT_REF 357 && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL 358 && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1)) 359 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1))) 360 { 361 tree field = TREE_OPERAND (rhs1, 1); 362 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); 363 rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0), 364 repr, TREE_OPERAND (rhs1, 2)); 365 } 366 367 if (code != OMP_ATOMIC) 368 { 369 /* Generally it is hard to prove lhs1 and lhs are the same memory 370 location, just diagnose different variables. */ 371 if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs)) 372 { 373 if (lhs1 != orig_lhs && !test) 374 { 375 error_at (loc, "%<#pragma omp atomic capture%> uses two " 376 "different variables for memory"); 377 return error_mark_node; 378 } 379 } 380 if (blhs) 381 { 382 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x, 383 bitsize_int (bitsize), bitsize_int (bitpos)); 384 type = TREE_TYPE (blhs); 385 } 386 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, 387 loc, x, NULL_TREE); 388 if (rhs1 && rhs1 != orig_lhs) 389 { 390 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); 391 if (rhs1addr == error_mark_node) 392 return error_mark_node; 393 x = omit_one_operand_loc (loc, type, x, rhs1addr); 394 } 395 if (lhs1 && lhs1 != orig_lhs) 396 { 397 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false); 398 if (lhs1addr == error_mark_node) 399 return error_mark_node; 400 if (code == OMP_ATOMIC_CAPTURE_OLD) 401 x = omit_one_operand_loc (loc, type, x, lhs1addr); 402 else 403 { 404 if (!test) 405 x = save_expr (x); 406 x = omit_two_operands_loc (loc, type, x, x, lhs1addr); 407 } 408 } 409 } 410 else if (rhs1 && rhs1 != orig_lhs) 411 { 412 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); 413 if (rhs1addr == error_mark_node) 414 return error_mark_node; 415 x = omit_one_operand_loc (loc, type, x, rhs1addr); 416 } 417 418 if (pre) 419 x = omit_one_operand_loc (loc, type, x, pre); 420 return x; 421 } 422 423 424 /* Return true if TYPE is the implementation's omp_depend_t. */ 425 426 bool 427 c_omp_depend_t_p (tree type) 428 { 429 type = TYPE_MAIN_VARIANT (type); 430 return (TREE_CODE (type) == RECORD_TYPE 431 && TYPE_NAME (type) 432 && ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL 433 ? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type)) 434 == get_identifier ("omp_depend_t")) 435 && (!TYPE_CONTEXT (type) 436 || TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL) 437 && COMPLETE_TYPE_P (type) 438 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST 439 && !compare_tree_int (TYPE_SIZE (type), 440 2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node)))); 441 } 442 443 444 /* Complete a #pragma omp depobj construct. LOC is the location of the 445 #pragma. */ 446 447 void 448 c_finish_omp_depobj (location_t loc, tree depobj, 449 enum omp_clause_depend_kind kind, tree clause) 450 { 451 tree t = NULL_TREE; 452 if (!error_operand_p (depobj)) 453 { 454 if (!c_omp_depend_t_p (TREE_TYPE (depobj))) 455 { 456 error_at (EXPR_LOC_OR_LOC (depobj, loc), 457 "type of %<depobj%> expression is not %<omp_depend_t%>"); 458 depobj = error_mark_node; 459 } 460 else if (TYPE_READONLY (TREE_TYPE (depobj))) 461 { 462 error_at (EXPR_LOC_OR_LOC (depobj, loc), 463 "%<const%> qualified %<depobj%> expression"); 464 depobj = error_mark_node; 465 } 466 } 467 else 468 depobj = error_mark_node; 469 470 if (clause == error_mark_node) 471 return; 472 473 if (clause) 474 { 475 gcc_assert (TREE_CODE (clause) == OMP_CLAUSE 476 && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND); 477 if (OMP_CLAUSE_CHAIN (clause)) 478 error_at (OMP_CLAUSE_LOCATION (clause), 479 "more than one locator in %<depend%> clause on %<depobj%> " 480 "construct"); 481 switch (OMP_CLAUSE_DEPEND_KIND (clause)) 482 { 483 case OMP_CLAUSE_DEPEND_DEPOBJ: 484 error_at (OMP_CLAUSE_LOCATION (clause), 485 "%<depobj%> dependence type specified in %<depend%> " 486 "clause on %<depobj%> construct"); 487 return; 488 case OMP_CLAUSE_DEPEND_SOURCE: 489 case OMP_CLAUSE_DEPEND_SINK: 490 error_at (OMP_CLAUSE_LOCATION (clause), 491 "%<depend(%s)%> is only allowed in %<omp ordered%>", 492 OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE 493 ? "source" : "sink"); 494 return; 495 case OMP_CLAUSE_DEPEND_IN: 496 case OMP_CLAUSE_DEPEND_OUT: 497 case OMP_CLAUSE_DEPEND_INOUT: 498 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: 499 kind = OMP_CLAUSE_DEPEND_KIND (clause); 500 t = OMP_CLAUSE_DECL (clause); 501 gcc_assert (t); 502 if (TREE_CODE (t) == TREE_LIST 503 && TREE_PURPOSE (t) 504 && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC) 505 { 506 error_at (OMP_CLAUSE_LOCATION (clause), 507 "%<iterator%> modifier may not be specified on " 508 "%<depobj%> construct"); 509 return; 510 } 511 if (TREE_CODE (t) == COMPOUND_EXPR) 512 { 513 tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1)); 514 t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0), 515 t1); 516 } 517 else 518 t = build_fold_addr_expr (t); 519 break; 520 default: 521 gcc_unreachable (); 522 } 523 } 524 else 525 gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE); 526 527 if (depobj == error_mark_node) 528 return; 529 530 depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj); 531 tree dtype 532 = build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node), 533 true); 534 depobj = fold_convert (dtype, depobj); 535 tree r; 536 if (clause) 537 { 538 depobj = save_expr (depobj); 539 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR); 540 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t)); 541 } 542 int k; 543 switch (kind) 544 { 545 case OMP_CLAUSE_DEPEND_IN: 546 k = GOMP_DEPEND_IN; 547 break; 548 case OMP_CLAUSE_DEPEND_OUT: 549 k = GOMP_DEPEND_OUT; 550 break; 551 case OMP_CLAUSE_DEPEND_INOUT: 552 k = GOMP_DEPEND_INOUT; 553 break; 554 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: 555 k = GOMP_DEPEND_MUTEXINOUTSET; 556 break; 557 case OMP_CLAUSE_DEPEND_LAST: 558 k = -1; 559 break; 560 default: 561 gcc_unreachable (); 562 } 563 t = build_int_cst (ptr_type_node, k); 564 depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj, 565 TYPE_SIZE_UNIT (ptr_type_node)); 566 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR); 567 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t)); 568 } 569 570 571 /* Complete a #pragma omp flush construct. We don't do anything with 572 the variable list that the syntax allows. LOC is the location of 573 the #pragma. */ 574 575 void 576 c_finish_omp_flush (location_t loc, int mo) 577 { 578 tree x; 579 580 if (mo == MEMMODEL_LAST) 581 { 582 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); 583 x = build_call_expr_loc (loc, x, 0); 584 } 585 else 586 { 587 x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE); 588 x = build_call_expr_loc (loc, x, 1, 589 build_int_cst (integer_type_node, mo)); 590 } 591 add_stmt (x); 592 } 593 594 595 /* Check and canonicalize OMP_FOR increment expression. 596 Helper function for c_finish_omp_for. */ 597 598 static tree 599 check_omp_for_incr_expr (location_t loc, tree exp, tree decl) 600 { 601 tree t; 602 603 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) 604 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) 605 return error_mark_node; 606 607 if (exp == decl) 608 return build_int_cst (TREE_TYPE (exp), 0); 609 610 switch (TREE_CODE (exp)) 611 { 612 CASE_CONVERT: 613 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 614 if (t != error_mark_node) 615 return fold_convert_loc (loc, TREE_TYPE (exp), t); 616 break; 617 case MINUS_EXPR: 618 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 619 if (t != error_mark_node) 620 return fold_build2_loc (loc, MINUS_EXPR, 621 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); 622 break; 623 case PLUS_EXPR: 624 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 625 if (t != error_mark_node) 626 return fold_build2_loc (loc, PLUS_EXPR, 627 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); 628 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); 629 if (t != error_mark_node) 630 return fold_build2_loc (loc, PLUS_EXPR, 631 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); 632 break; 633 case COMPOUND_EXPR: 634 { 635 /* cp_build_modify_expr forces preevaluation of the RHS to make 636 sure that it is evaluated before the lvalue-rvalue conversion 637 is applied to the LHS. Reconstruct the original expression. */ 638 tree op0 = TREE_OPERAND (exp, 0); 639 if (TREE_CODE (op0) == TARGET_EXPR 640 && !VOID_TYPE_P (TREE_TYPE (op0))) 641 { 642 tree op1 = TREE_OPERAND (exp, 1); 643 tree temp = TARGET_EXPR_SLOT (op0); 644 if (BINARY_CLASS_P (op1) 645 && TREE_OPERAND (op1, 1) == temp) 646 { 647 op1 = copy_node (op1); 648 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); 649 return check_omp_for_incr_expr (loc, op1, decl); 650 } 651 } 652 break; 653 } 654 default: 655 break; 656 } 657 658 return error_mark_node; 659 } 660 661 /* If the OMP_FOR increment expression in INCR is of pointer type, 662 canonicalize it into an expression handled by gimplify_omp_for() 663 and return it. DECL is the iteration variable. */ 664 665 static tree 666 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) 667 { 668 if (POINTER_TYPE_P (TREE_TYPE (decl)) 669 && TREE_OPERAND (incr, 1)) 670 { 671 tree t = fold_convert_loc (loc, 672 sizetype, TREE_OPERAND (incr, 1)); 673 674 if (TREE_CODE (incr) == POSTDECREMENT_EXPR 675 || TREE_CODE (incr) == PREDECREMENT_EXPR) 676 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); 677 t = fold_build_pointer_plus (decl, t); 678 incr = build2 (MODIFY_EXPR, void_type_node, decl, t); 679 } 680 return incr; 681 } 682 683 /* Validate and generate OMP_FOR. 684 DECLV is a vector of iteration variables, for each collapsed loop. 685 686 ORIG_DECLV, if non-NULL, is a vector with the original iteration 687 variables (prior to any transformations, by say, C++ iterators). 688 689 INITV, CONDV and INCRV are vectors containing initialization 690 expressions, controlling predicates and increment expressions. 691 BODY is the body of the loop and PRE_BODY statements that go before 692 the loop. */ 693 694 tree 695 c_finish_omp_for (location_t locus, enum tree_code code, tree declv, 696 tree orig_declv, tree initv, tree condv, tree incrv, 697 tree body, tree pre_body, bool final_p) 698 { 699 location_t elocus; 700 bool fail = false; 701 int i; 702 703 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); 704 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); 705 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); 706 for (i = 0; i < TREE_VEC_LENGTH (declv); i++) 707 { 708 tree decl = TREE_VEC_ELT (declv, i); 709 tree init = TREE_VEC_ELT (initv, i); 710 tree cond = TREE_VEC_ELT (condv, i); 711 tree incr = TREE_VEC_ELT (incrv, i); 712 713 elocus = locus; 714 if (EXPR_HAS_LOCATION (init)) 715 elocus = EXPR_LOCATION (init); 716 717 /* Validate the iteration variable. */ 718 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) 719 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) 720 { 721 error_at (elocus, "invalid type for iteration variable %qE", decl); 722 fail = true; 723 } 724 else if (TYPE_ATOMIC (TREE_TYPE (decl))) 725 { 726 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl); 727 fail = true; 728 /* _Atomic iterator confuses stuff too much, so we risk ICE 729 trying to diagnose it further. */ 730 continue; 731 } 732 733 /* In the case of "for (int i = 0...)", init will be a decl. It should 734 have a DECL_INITIAL that we can turn into an assignment. */ 735 if (init == decl) 736 { 737 elocus = DECL_SOURCE_LOCATION (decl); 738 739 init = DECL_INITIAL (decl); 740 if (init == NULL) 741 { 742 error_at (elocus, "%qE is not initialized", decl); 743 init = integer_zero_node; 744 fail = true; 745 } 746 DECL_INITIAL (decl) = NULL_TREE; 747 748 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, 749 /* FIXME diagnostics: This should 750 be the location of the INIT. */ 751 elocus, 752 init, 753 NULL_TREE); 754 } 755 if (init != error_mark_node) 756 { 757 gcc_assert (TREE_CODE (init) == MODIFY_EXPR); 758 gcc_assert (TREE_OPERAND (init, 0) == decl); 759 } 760 761 if (cond == NULL_TREE) 762 { 763 error_at (elocus, "missing controlling predicate"); 764 fail = true; 765 } 766 else 767 { 768 bool cond_ok = false; 769 770 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with 771 evaluation of the vla VAR_DECL. We need to readd 772 them to the non-decl operand. See PR45784. */ 773 while (TREE_CODE (cond) == COMPOUND_EXPR) 774 cond = TREE_OPERAND (cond, 1); 775 776 if (EXPR_HAS_LOCATION (cond)) 777 elocus = EXPR_LOCATION (cond); 778 779 if (TREE_CODE (cond) == LT_EXPR 780 || TREE_CODE (cond) == LE_EXPR 781 || TREE_CODE (cond) == GT_EXPR 782 || TREE_CODE (cond) == GE_EXPR 783 || TREE_CODE (cond) == NE_EXPR 784 || TREE_CODE (cond) == EQ_EXPR) 785 { 786 tree op0 = TREE_OPERAND (cond, 0); 787 tree op1 = TREE_OPERAND (cond, 1); 788 789 /* 2.5.1. The comparison in the condition is computed in 790 the type of DECL, otherwise the behavior is undefined. 791 792 For example: 793 long n; int i; 794 i < n; 795 796 according to ISO will be evaluated as: 797 (long)i < n; 798 799 We want to force: 800 i < (int)n; */ 801 if (TREE_CODE (op0) == NOP_EXPR 802 && decl == TREE_OPERAND (op0, 0)) 803 { 804 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); 805 TREE_OPERAND (cond, 1) 806 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), 807 TREE_OPERAND (cond, 1)); 808 } 809 else if (TREE_CODE (op1) == NOP_EXPR 810 && decl == TREE_OPERAND (op1, 0)) 811 { 812 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); 813 TREE_OPERAND (cond, 0) 814 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), 815 TREE_OPERAND (cond, 0)); 816 } 817 818 if (decl == TREE_OPERAND (cond, 0)) 819 cond_ok = true; 820 else if (decl == TREE_OPERAND (cond, 1)) 821 { 822 TREE_SET_CODE (cond, 823 swap_tree_comparison (TREE_CODE (cond))); 824 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); 825 TREE_OPERAND (cond, 0) = decl; 826 cond_ok = true; 827 } 828 829 if (TREE_CODE (cond) == NE_EXPR 830 || TREE_CODE (cond) == EQ_EXPR) 831 { 832 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) 833 { 834 if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR) 835 cond_ok = false; 836 } 837 else if (operand_equal_p (TREE_OPERAND (cond, 1), 838 TYPE_MIN_VALUE (TREE_TYPE (decl)), 839 0)) 840 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR 841 ? GT_EXPR : LE_EXPR); 842 else if (operand_equal_p (TREE_OPERAND (cond, 1), 843 TYPE_MAX_VALUE (TREE_TYPE (decl)), 844 0)) 845 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR 846 ? LT_EXPR : GE_EXPR); 847 else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR) 848 cond_ok = false; 849 } 850 851 if (cond_ok && TREE_VEC_ELT (condv, i) != cond) 852 { 853 tree ce = NULL_TREE, *pce = &ce; 854 tree type = TREE_TYPE (TREE_OPERAND (cond, 1)); 855 for (tree c = TREE_VEC_ELT (condv, i); c != cond; 856 c = TREE_OPERAND (c, 1)) 857 { 858 *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0), 859 TREE_OPERAND (cond, 1)); 860 pce = &TREE_OPERAND (*pce, 1); 861 } 862 TREE_OPERAND (cond, 1) = ce; 863 TREE_VEC_ELT (condv, i) = cond; 864 } 865 } 866 867 if (!cond_ok) 868 { 869 error_at (elocus, "invalid controlling predicate"); 870 fail = true; 871 } 872 } 873 874 if (incr == NULL_TREE) 875 { 876 error_at (elocus, "missing increment expression"); 877 fail = true; 878 } 879 else 880 { 881 bool incr_ok = false; 882 883 if (EXPR_HAS_LOCATION (incr)) 884 elocus = EXPR_LOCATION (incr); 885 886 /* Check all the valid increment expressions: v++, v--, ++v, --v, 887 v = v + incr, v = incr + v and v = v - incr. */ 888 switch (TREE_CODE (incr)) 889 { 890 case POSTINCREMENT_EXPR: 891 case PREINCREMENT_EXPR: 892 case POSTDECREMENT_EXPR: 893 case PREDECREMENT_EXPR: 894 if (TREE_OPERAND (incr, 0) != decl) 895 break; 896 897 incr_ok = true; 898 if (!fail 899 && TREE_CODE (cond) == NE_EXPR 900 && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE 901 && TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))) 902 && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))) 903 != INTEGER_CST)) 904 { 905 /* For pointer to VLA, transform != into < or > 906 depending on whether incr is increment or decrement. */ 907 if (TREE_CODE (incr) == PREINCREMENT_EXPR 908 || TREE_CODE (incr) == POSTINCREMENT_EXPR) 909 TREE_SET_CODE (cond, LT_EXPR); 910 else 911 TREE_SET_CODE (cond, GT_EXPR); 912 } 913 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); 914 break; 915 916 case COMPOUND_EXPR: 917 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR 918 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) 919 break; 920 incr = TREE_OPERAND (incr, 1); 921 /* FALLTHRU */ 922 case MODIFY_EXPR: 923 if (TREE_OPERAND (incr, 0) != decl) 924 break; 925 if (TREE_OPERAND (incr, 1) == decl) 926 break; 927 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR 928 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl 929 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) 930 incr_ok = true; 931 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR 932 || (TREE_CODE (TREE_OPERAND (incr, 1)) 933 == POINTER_PLUS_EXPR)) 934 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) 935 incr_ok = true; 936 else 937 { 938 tree t = check_omp_for_incr_expr (elocus, 939 TREE_OPERAND (incr, 1), 940 decl); 941 if (t != error_mark_node) 942 { 943 incr_ok = true; 944 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); 945 incr = build2 (MODIFY_EXPR, void_type_node, decl, t); 946 } 947 } 948 if (!fail 949 && incr_ok 950 && TREE_CODE (cond) == NE_EXPR) 951 { 952 tree i = TREE_OPERAND (incr, 1); 953 i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl); 954 i = c_fully_fold (i, false, NULL); 955 if (!final_p 956 && TREE_CODE (i) != INTEGER_CST) 957 ; 958 else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) 959 { 960 tree unit 961 = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); 962 if (unit) 963 { 964 enum tree_code ccode = GT_EXPR; 965 unit = c_fully_fold (unit, false, NULL); 966 i = fold_convert (TREE_TYPE (unit), i); 967 if (operand_equal_p (unit, i, 0)) 968 ccode = LT_EXPR; 969 if (ccode == GT_EXPR) 970 { 971 i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i); 972 if (i == NULL_TREE 973 || !operand_equal_p (unit, i, 0)) 974 { 975 error_at (elocus, 976 "increment is not constant 1 or " 977 "-1 for != condition"); 978 fail = true; 979 } 980 } 981 if (TREE_CODE (unit) != INTEGER_CST) 982 /* For pointer to VLA, transform != into < or > 983 depending on whether the pointer is 984 incremented or decremented in each 985 iteration. */ 986 TREE_SET_CODE (cond, ccode); 987 } 988 } 989 else 990 { 991 if (!integer_onep (i) && !integer_minus_onep (i)) 992 { 993 error_at (elocus, 994 "increment is not constant 1 or -1 for" 995 " != condition"); 996 fail = true; 997 } 998 } 999 } 1000 break; 1001 1002 default: 1003 break; 1004 } 1005 if (!incr_ok) 1006 { 1007 error_at (elocus, "invalid increment expression"); 1008 fail = true; 1009 } 1010 } 1011 1012 TREE_VEC_ELT (initv, i) = init; 1013 TREE_VEC_ELT (incrv, i) = incr; 1014 } 1015 1016 if (fail) 1017 return NULL; 1018 else 1019 { 1020 tree t = make_node (code); 1021 1022 TREE_TYPE (t) = void_type_node; 1023 OMP_FOR_INIT (t) = initv; 1024 OMP_FOR_COND (t) = condv; 1025 OMP_FOR_INCR (t) = incrv; 1026 OMP_FOR_BODY (t) = body; 1027 OMP_FOR_PRE_BODY (t) = pre_body; 1028 OMP_FOR_ORIG_DECLS (t) = orig_declv; 1029 1030 SET_EXPR_LOCATION (t, locus); 1031 return t; 1032 } 1033 } 1034 1035 /* Type for passing data in between c_omp_check_loop_iv and 1036 c_omp_check_loop_iv_r. */ 1037 1038 struct c_omp_check_loop_iv_data 1039 { 1040 tree declv; 1041 bool fail; 1042 location_t stmt_loc; 1043 location_t expr_loc; 1044 int kind; 1045 walk_tree_lh lh; 1046 hash_set<tree> *ppset; 1047 }; 1048 1049 /* Helper function called via walk_tree, to diagnose uses 1050 of associated loop IVs inside of lb, b and incr expressions 1051 of OpenMP loops. */ 1052 1053 static tree 1054 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data) 1055 { 1056 struct c_omp_check_loop_iv_data *d 1057 = (struct c_omp_check_loop_iv_data *) data; 1058 if (DECL_P (*tp)) 1059 { 1060 int i; 1061 for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++) 1062 if (*tp == TREE_VEC_ELT (d->declv, i) 1063 || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST 1064 && *tp == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i))) 1065 || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST 1066 && TREE_CHAIN (TREE_VEC_ELT (d->declv, i)) 1067 && (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i))) 1068 == TREE_VEC) 1069 && *tp == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv, 1070 i)), 2))) 1071 { 1072 location_t loc = d->expr_loc; 1073 if (loc == UNKNOWN_LOCATION) 1074 loc = d->stmt_loc; 1075 switch (d->kind) 1076 { 1077 case 0: 1078 error_at (loc, "initializer expression refers to " 1079 "iteration variable %qD", *tp); 1080 break; 1081 case 1: 1082 error_at (loc, "condition expression refers to " 1083 "iteration variable %qD", *tp); 1084 break; 1085 case 2: 1086 error_at (loc, "increment expression refers to " 1087 "iteration variable %qD", *tp); 1088 break; 1089 } 1090 d->fail = true; 1091 } 1092 } 1093 /* Don't walk dtors added by C++ wrap_cleanups_r. */ 1094 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR 1095 && TRY_CATCH_IS_CLEANUP (*tp)) 1096 { 1097 *walk_subtrees = 0; 1098 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data, 1099 d->ppset, d->lh); 1100 } 1101 1102 return NULL_TREE; 1103 } 1104 1105 /* Diagnose invalid references to loop iterators in lb, b and incr 1106 expressions. */ 1107 1108 bool 1109 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh) 1110 { 1111 hash_set<tree> pset; 1112 struct c_omp_check_loop_iv_data data; 1113 int i; 1114 1115 data.declv = declv; 1116 data.fail = false; 1117 data.stmt_loc = EXPR_LOCATION (stmt); 1118 data.lh = lh; 1119 data.ppset = &pset; 1120 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++) 1121 { 1122 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i); 1123 gcc_assert (TREE_CODE (init) == MODIFY_EXPR); 1124 tree decl = TREE_OPERAND (init, 0); 1125 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i); 1126 gcc_assert (COMPARISON_CLASS_P (cond)); 1127 gcc_assert (TREE_OPERAND (cond, 0) == decl); 1128 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i); 1129 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1)); 1130 data.kind = 0; 1131 walk_tree_1 (&TREE_OPERAND (init, 1), 1132 c_omp_check_loop_iv_r, &data, &pset, lh); 1133 /* Don't warn for C++ random access iterators here, the 1134 expression then involves the subtraction and always refers 1135 to the original value. The C++ FE needs to warn on those 1136 earlier. */ 1137 if (decl == TREE_VEC_ELT (declv, i) 1138 || (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST 1139 && decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i)))) 1140 { 1141 data.expr_loc = EXPR_LOCATION (cond); 1142 data.kind = 1; 1143 walk_tree_1 (&TREE_OPERAND (cond, 1), 1144 c_omp_check_loop_iv_r, &data, &pset, lh); 1145 } 1146 if (TREE_CODE (incr) == MODIFY_EXPR) 1147 { 1148 gcc_assert (TREE_OPERAND (incr, 0) == decl); 1149 incr = TREE_OPERAND (incr, 1); 1150 data.kind = 2; 1151 if (TREE_CODE (incr) == PLUS_EXPR 1152 && TREE_OPERAND (incr, 1) == decl) 1153 { 1154 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0)); 1155 walk_tree_1 (&TREE_OPERAND (incr, 0), 1156 c_omp_check_loop_iv_r, &data, &pset, lh); 1157 } 1158 else 1159 { 1160 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1)); 1161 walk_tree_1 (&TREE_OPERAND (incr, 1), 1162 c_omp_check_loop_iv_r, &data, &pset, lh); 1163 } 1164 } 1165 } 1166 return !data.fail; 1167 } 1168 1169 /* Similar, but allows to check the init or cond expressions individually. */ 1170 1171 bool 1172 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl, 1173 tree init, tree cond, walk_tree_lh lh) 1174 { 1175 hash_set<tree> pset; 1176 struct c_omp_check_loop_iv_data data; 1177 1178 data.declv = declv; 1179 data.fail = false; 1180 data.stmt_loc = stmt_loc; 1181 data.lh = lh; 1182 data.ppset = &pset; 1183 if (init) 1184 { 1185 data.expr_loc = EXPR_LOCATION (init); 1186 data.kind = 0; 1187 walk_tree_1 (&init, 1188 c_omp_check_loop_iv_r, &data, &pset, lh); 1189 } 1190 if (cond) 1191 { 1192 gcc_assert (COMPARISON_CLASS_P (cond)); 1193 data.expr_loc = EXPR_LOCATION (init); 1194 data.kind = 1; 1195 if (TREE_OPERAND (cond, 0) == decl) 1196 walk_tree_1 (&TREE_OPERAND (cond, 1), 1197 c_omp_check_loop_iv_r, &data, &pset, lh); 1198 else 1199 walk_tree_1 (&TREE_OPERAND (cond, 0), 1200 c_omp_check_loop_iv_r, &data, &pset, lh); 1201 } 1202 return !data.fail; 1203 } 1204 1205 /* This function splits clauses for OpenACC combined loop 1206 constructs. OpenACC combined loop constructs are: 1207 #pragma acc kernels loop 1208 #pragma acc parallel loop */ 1209 1210 tree 1211 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses, 1212 bool is_parallel) 1213 { 1214 tree next, loop_clauses, nc; 1215 1216 loop_clauses = *not_loop_clauses = NULL_TREE; 1217 for (; clauses ; clauses = next) 1218 { 1219 next = OMP_CLAUSE_CHAIN (clauses); 1220 1221 switch (OMP_CLAUSE_CODE (clauses)) 1222 { 1223 /* Loop clauses. */ 1224 case OMP_CLAUSE_COLLAPSE: 1225 case OMP_CLAUSE_TILE: 1226 case OMP_CLAUSE_GANG: 1227 case OMP_CLAUSE_WORKER: 1228 case OMP_CLAUSE_VECTOR: 1229 case OMP_CLAUSE_AUTO: 1230 case OMP_CLAUSE_SEQ: 1231 case OMP_CLAUSE_INDEPENDENT: 1232 case OMP_CLAUSE_PRIVATE: 1233 OMP_CLAUSE_CHAIN (clauses) = loop_clauses; 1234 loop_clauses = clauses; 1235 break; 1236 1237 /* Reductions must be duplicated on both constructs. */ 1238 case OMP_CLAUSE_REDUCTION: 1239 if (is_parallel) 1240 { 1241 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1242 OMP_CLAUSE_REDUCTION); 1243 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses); 1244 OMP_CLAUSE_REDUCTION_CODE (nc) 1245 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1246 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses; 1247 *not_loop_clauses = nc; 1248 } 1249 1250 OMP_CLAUSE_CHAIN (clauses) = loop_clauses; 1251 loop_clauses = clauses; 1252 break; 1253 1254 /* Parallel/kernels clauses. */ 1255 default: 1256 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses; 1257 *not_loop_clauses = clauses; 1258 break; 1259 } 1260 } 1261 1262 return loop_clauses; 1263 } 1264 1265 /* This function attempts to split or duplicate clauses for OpenMP 1266 combined/composite constructs. Right now there are 26 different 1267 constructs. CODE is the innermost construct in the combined construct, 1268 and MASK allows to determine which constructs are combined together, 1269 as every construct has at least one clause that no other construct 1270 has (except for OMP_SECTIONS, but that can be only combined with parallel, 1271 and OMP_MASTER, which doesn't have any clauses at all). 1272 OpenMP combined/composite constructs are: 1273 #pragma omp distribute parallel for 1274 #pragma omp distribute parallel for simd 1275 #pragma omp distribute simd 1276 #pragma omp for simd 1277 #pragma omp master taskloop 1278 #pragma omp master taskloop simd 1279 #pragma omp parallel for 1280 #pragma omp parallel for simd 1281 #pragma omp parallel master 1282 #pragma omp parallel master taskloop 1283 #pragma omp parallel master taskloop simd 1284 #pragma omp parallel sections 1285 #pragma omp target parallel 1286 #pragma omp target parallel for 1287 #pragma omp target parallel for simd 1288 #pragma omp target teams 1289 #pragma omp target teams distribute 1290 #pragma omp target teams distribute parallel for 1291 #pragma omp target teams distribute parallel for simd 1292 #pragma omp target teams distribute simd 1293 #pragma omp target simd 1294 #pragma omp taskloop simd 1295 #pragma omp teams distribute 1296 #pragma omp teams distribute parallel for 1297 #pragma omp teams distribute parallel for simd 1298 #pragma omp teams distribute simd */ 1299 1300 void 1301 c_omp_split_clauses (location_t loc, enum tree_code code, 1302 omp_clause_mask mask, tree clauses, tree *cclauses) 1303 { 1304 tree next, c; 1305 enum c_omp_clause_split s; 1306 int i; 1307 1308 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) 1309 cclauses[i] = NULL; 1310 /* Add implicit nowait clause on 1311 #pragma omp parallel {for,for simd,sections}. */ 1312 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1313 switch (code) 1314 { 1315 case OMP_FOR: 1316 case OMP_SIMD: 1317 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1318 cclauses[C_OMP_CLAUSE_SPLIT_FOR] 1319 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); 1320 break; 1321 case OMP_SECTIONS: 1322 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] 1323 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); 1324 break; 1325 default: 1326 break; 1327 } 1328 1329 for (; clauses ; clauses = next) 1330 { 1331 next = OMP_CLAUSE_CHAIN (clauses); 1332 1333 switch (OMP_CLAUSE_CODE (clauses)) 1334 { 1335 /* First the clauses that are unique to some constructs. */ 1336 case OMP_CLAUSE_DEVICE: 1337 case OMP_CLAUSE_MAP: 1338 case OMP_CLAUSE_IS_DEVICE_PTR: 1339 case OMP_CLAUSE_DEFAULTMAP: 1340 case OMP_CLAUSE_DEPEND: 1341 s = C_OMP_CLAUSE_SPLIT_TARGET; 1342 break; 1343 case OMP_CLAUSE_NUM_TEAMS: 1344 case OMP_CLAUSE_THREAD_LIMIT: 1345 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1346 break; 1347 case OMP_CLAUSE_DIST_SCHEDULE: 1348 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1349 break; 1350 case OMP_CLAUSE_COPYIN: 1351 case OMP_CLAUSE_NUM_THREADS: 1352 case OMP_CLAUSE_PROC_BIND: 1353 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1354 break; 1355 case OMP_CLAUSE_ORDERED: 1356 s = C_OMP_CLAUSE_SPLIT_FOR; 1357 break; 1358 case OMP_CLAUSE_SCHEDULE: 1359 s = C_OMP_CLAUSE_SPLIT_FOR; 1360 if (code != OMP_SIMD) 1361 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; 1362 break; 1363 case OMP_CLAUSE_SAFELEN: 1364 case OMP_CLAUSE_SIMDLEN: 1365 case OMP_CLAUSE_ALIGNED: 1366 case OMP_CLAUSE_NONTEMPORAL: 1367 s = C_OMP_CLAUSE_SPLIT_SIMD; 1368 break; 1369 case OMP_CLAUSE_GRAINSIZE: 1370 case OMP_CLAUSE_NUM_TASKS: 1371 case OMP_CLAUSE_FINAL: 1372 case OMP_CLAUSE_UNTIED: 1373 case OMP_CLAUSE_MERGEABLE: 1374 case OMP_CLAUSE_NOGROUP: 1375 case OMP_CLAUSE_PRIORITY: 1376 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1377 break; 1378 /* Duplicate this to all of taskloop, distribute, for and simd. */ 1379 case OMP_CLAUSE_COLLAPSE: 1380 if (code == OMP_SIMD) 1381 { 1382 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) 1383 | (OMP_CLAUSE_MASK_1 1384 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) 1385 | (OMP_CLAUSE_MASK_1 1386 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) 1387 { 1388 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1389 OMP_CLAUSE_COLLAPSE); 1390 OMP_CLAUSE_COLLAPSE_EXPR (c) 1391 = OMP_CLAUSE_COLLAPSE_EXPR (clauses); 1392 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1393 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1394 } 1395 else 1396 { 1397 /* This must be #pragma omp target simd */ 1398 s = C_OMP_CLAUSE_SPLIT_SIMD; 1399 break; 1400 } 1401 } 1402 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1403 { 1404 if ((mask & (OMP_CLAUSE_MASK_1 1405 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1406 { 1407 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1408 OMP_CLAUSE_COLLAPSE); 1409 OMP_CLAUSE_COLLAPSE_EXPR (c) 1410 = OMP_CLAUSE_COLLAPSE_EXPR (clauses); 1411 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; 1412 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; 1413 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1414 } 1415 else 1416 s = C_OMP_CLAUSE_SPLIT_FOR; 1417 } 1418 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1419 != 0) 1420 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1421 else 1422 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1423 break; 1424 /* Private clause is supported on all constructs but master, 1425 it is enough to put it on the innermost one other than master. For 1426 #pragma omp {for,sections} put it on parallel though, 1427 as that's what we did for OpenMP 3.1. */ 1428 case OMP_CLAUSE_PRIVATE: 1429 switch (code) 1430 { 1431 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; 1432 case OMP_FOR: case OMP_SECTIONS: 1433 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; 1434 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; 1435 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; 1436 case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; 1437 case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; 1438 default: gcc_unreachable (); 1439 } 1440 break; 1441 /* Firstprivate clause is supported on all constructs but 1442 simd and master. Put it on the outermost of those and duplicate on 1443 teams and parallel. */ 1444 case OMP_CLAUSE_FIRSTPRIVATE: 1445 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) 1446 != 0) 1447 { 1448 if (code == OMP_SIMD 1449 && (mask & ((OMP_CLAUSE_MASK_1 1450 << PRAGMA_OMP_CLAUSE_NUM_THREADS) 1451 | (OMP_CLAUSE_MASK_1 1452 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) 1453 { 1454 /* This must be #pragma omp target simd. */ 1455 s = C_OMP_CLAUSE_SPLIT_TARGET; 1456 break; 1457 } 1458 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1459 OMP_CLAUSE_FIRSTPRIVATE); 1460 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1461 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; 1462 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; 1463 } 1464 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1465 != 0) 1466 { 1467 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) 1468 | (OMP_CLAUSE_MASK_1 1469 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) 1470 { 1471 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1472 OMP_CLAUSE_FIRSTPRIVATE); 1473 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1474 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; 1475 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; 1476 if ((mask & (OMP_CLAUSE_MASK_1 1477 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) 1478 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1479 else 1480 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1481 } 1482 else if ((mask & (OMP_CLAUSE_MASK_1 1483 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1484 /* This must be 1485 #pragma omp parallel master taskloop{, simd}. */ 1486 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1487 else 1488 /* This must be 1489 #pragma omp parallel{, for{, simd}, sections} 1490 or 1491 #pragma omp target parallel. */ 1492 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1493 } 1494 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1495 != 0) 1496 { 1497 /* This must be one of 1498 #pragma omp {,target }teams distribute 1499 #pragma omp target teams 1500 #pragma omp {,target }teams distribute simd. */ 1501 gcc_assert (code == OMP_DISTRIBUTE 1502 || code == OMP_TEAMS 1503 || code == OMP_SIMD); 1504 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1505 } 1506 else if ((mask & (OMP_CLAUSE_MASK_1 1507 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1508 { 1509 /* This must be #pragma omp distribute simd. */ 1510 gcc_assert (code == OMP_SIMD); 1511 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1512 } 1513 else if ((mask & (OMP_CLAUSE_MASK_1 1514 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1515 { 1516 /* This must be #pragma omp {,{,parallel }master }taskloop simd 1517 or 1518 #pragma omp {,parallel }master taskloop. */ 1519 gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP); 1520 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1521 } 1522 else 1523 { 1524 /* This must be #pragma omp for simd. */ 1525 gcc_assert (code == OMP_SIMD); 1526 s = C_OMP_CLAUSE_SPLIT_FOR; 1527 } 1528 break; 1529 /* Lastprivate is allowed on distribute, for, sections, taskloop and 1530 simd. In parallel {for{, simd},sections} we actually want to put 1531 it on parallel rather than for or sections. */ 1532 case OMP_CLAUSE_LASTPRIVATE: 1533 if (code == OMP_DISTRIBUTE) 1534 { 1535 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1536 break; 1537 } 1538 if ((mask & (OMP_CLAUSE_MASK_1 1539 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1540 { 1541 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1542 OMP_CLAUSE_LASTPRIVATE); 1543 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1544 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; 1545 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) 1546 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); 1547 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; 1548 } 1549 if (code == OMP_FOR || code == OMP_SECTIONS) 1550 { 1551 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1552 != 0) 1553 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1554 else 1555 s = C_OMP_CLAUSE_SPLIT_FOR; 1556 break; 1557 } 1558 if (code == OMP_TASKLOOP) 1559 { 1560 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1561 break; 1562 } 1563 gcc_assert (code == OMP_SIMD); 1564 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1565 { 1566 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1567 OMP_CLAUSE_LASTPRIVATE); 1568 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1569 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) 1570 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); 1571 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1572 != 0) 1573 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1574 else 1575 s = C_OMP_CLAUSE_SPLIT_FOR; 1576 OMP_CLAUSE_CHAIN (c) = cclauses[s]; 1577 cclauses[s] = c; 1578 } 1579 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1580 { 1581 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1582 OMP_CLAUSE_LASTPRIVATE); 1583 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1584 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) 1585 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); 1586 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; 1587 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; 1588 } 1589 s = C_OMP_CLAUSE_SPLIT_SIMD; 1590 break; 1591 /* Shared and default clauses are allowed on parallel, teams and 1592 taskloop. */ 1593 case OMP_CLAUSE_SHARED: 1594 case OMP_CLAUSE_DEFAULT: 1595 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1596 != 0) 1597 { 1598 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1599 != 0) 1600 { 1601 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1602 OMP_CLAUSE_CODE (clauses)); 1603 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) 1604 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1605 else 1606 OMP_CLAUSE_DEFAULT_KIND (c) 1607 = OMP_CLAUSE_DEFAULT_KIND (clauses); 1608 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; 1609 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; 1610 } 1611 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1612 break; 1613 } 1614 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1615 != 0) 1616 { 1617 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1618 == 0) 1619 { 1620 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1621 break; 1622 } 1623 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1624 OMP_CLAUSE_CODE (clauses)); 1625 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) 1626 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1627 else 1628 OMP_CLAUSE_DEFAULT_KIND (c) 1629 = OMP_CLAUSE_DEFAULT_KIND (clauses); 1630 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; 1631 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; 1632 } 1633 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1634 break; 1635 /* Reduction is allowed on simd, for, parallel, sections, taskloop 1636 and teams. Duplicate it on all of them, but omit on for or 1637 sections if parallel is present. If taskloop is combined with 1638 parallel, omit it on parallel. */ 1639 case OMP_CLAUSE_REDUCTION: 1640 if (OMP_CLAUSE_REDUCTION_TASK (clauses)) 1641 { 1642 if (code == OMP_SIMD /* || code == OMP_LOOP */) 1643 { 1644 error_at (OMP_CLAUSE_LOCATION (clauses), 1645 "invalid %<task%> reduction modifier on construct " 1646 "combined with %<simd%>" /* or %<loop%> */); 1647 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0; 1648 } 1649 else if (code != OMP_SECTIONS 1650 && (mask & (OMP_CLAUSE_MASK_1 1651 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0 1652 && (mask & (OMP_CLAUSE_MASK_1 1653 << PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0) 1654 { 1655 error_at (OMP_CLAUSE_LOCATION (clauses), 1656 "invalid %<task%> reduction modifier on construct " 1657 "not combined with %<parallel%>, %<for%> or " 1658 "%<sections%>"); 1659 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0; 1660 } 1661 } 1662 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1663 { 1664 if (code == OMP_SIMD) 1665 { 1666 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1667 OMP_CLAUSE_REDUCTION); 1668 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1669 OMP_CLAUSE_REDUCTION_CODE (c) 1670 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1671 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1672 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1673 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1674 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1675 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1676 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1677 } 1678 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1679 != 0) 1680 { 1681 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1682 OMP_CLAUSE_REDUCTION); 1683 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1684 OMP_CLAUSE_REDUCTION_CODE (c) 1685 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1686 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1687 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1688 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1689 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1690 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; 1691 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; 1692 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1693 } 1694 else if ((mask & (OMP_CLAUSE_MASK_1 1695 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1696 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1697 else 1698 s = C_OMP_CLAUSE_SPLIT_FOR; 1699 } 1700 else if (code == OMP_SECTIONS 1701 || code == OMP_PARALLEL 1702 || code == OMP_MASTER) 1703 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1704 else if (code == OMP_TASKLOOP) 1705 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1706 else if (code == OMP_SIMD) 1707 { 1708 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1709 != 0) 1710 { 1711 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1712 OMP_CLAUSE_REDUCTION); 1713 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1714 OMP_CLAUSE_REDUCTION_CODE (c) 1715 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1716 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1717 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1718 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1719 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1720 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; 1721 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; 1722 } 1723 s = C_OMP_CLAUSE_SPLIT_SIMD; 1724 } 1725 else 1726 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1727 break; 1728 case OMP_CLAUSE_IN_REDUCTION: 1729 /* in_reduction on taskloop simd becomes reduction on the simd 1730 and keeps being in_reduction on taskloop. */ 1731 if (code == OMP_SIMD) 1732 { 1733 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1734 OMP_CLAUSE_REDUCTION); 1735 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1736 OMP_CLAUSE_REDUCTION_CODE (c) 1737 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1738 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1739 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1740 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1741 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1742 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1743 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1744 } 1745 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1746 break; 1747 case OMP_CLAUSE_IF: 1748 if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK) 1749 { 1750 s = C_OMP_CLAUSE_SPLIT_COUNT; 1751 switch (OMP_CLAUSE_IF_MODIFIER (clauses)) 1752 { 1753 case OMP_PARALLEL: 1754 if ((mask & (OMP_CLAUSE_MASK_1 1755 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1756 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1757 break; 1758 case OMP_SIMD: 1759 if (code == OMP_SIMD) 1760 s = C_OMP_CLAUSE_SPLIT_SIMD; 1761 break; 1762 case OMP_TASKLOOP: 1763 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1764 != 0) 1765 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1766 break; 1767 case OMP_TARGET: 1768 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) 1769 != 0) 1770 s = C_OMP_CLAUSE_SPLIT_TARGET; 1771 break; 1772 default: 1773 break; 1774 } 1775 if (s != C_OMP_CLAUSE_SPLIT_COUNT) 1776 break; 1777 /* Error-recovery here, invalid if-modifier specified, add the 1778 clause to just one construct. */ 1779 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) 1780 s = C_OMP_CLAUSE_SPLIT_TARGET; 1781 else if ((mask & (OMP_CLAUSE_MASK_1 1782 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1783 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1784 else if ((mask & (OMP_CLAUSE_MASK_1 1785 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1786 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1787 else if (code == OMP_SIMD) 1788 s = C_OMP_CLAUSE_SPLIT_SIMD; 1789 else 1790 gcc_unreachable (); 1791 break; 1792 } 1793 /* Otherwise, duplicate if clause to all constructs. */ 1794 if (code == OMP_SIMD) 1795 { 1796 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) 1797 | (OMP_CLAUSE_MASK_1 1798 << PRAGMA_OMP_CLAUSE_NUM_THREADS) 1799 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) 1800 != 0) 1801 { 1802 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1803 OMP_CLAUSE_IF); 1804 OMP_CLAUSE_IF_MODIFIER (c) 1805 = OMP_CLAUSE_IF_MODIFIER (clauses); 1806 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); 1807 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1808 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1809 } 1810 else 1811 { 1812 s = C_OMP_CLAUSE_SPLIT_SIMD; 1813 break; 1814 } 1815 } 1816 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1817 != 0) 1818 { 1819 if ((mask & (OMP_CLAUSE_MASK_1 1820 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1821 { 1822 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1823 OMP_CLAUSE_IF); 1824 OMP_CLAUSE_IF_MODIFIER (c) 1825 = OMP_CLAUSE_IF_MODIFIER (clauses); 1826 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); 1827 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; 1828 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; 1829 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1830 } 1831 else 1832 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1833 } 1834 else if ((mask & (OMP_CLAUSE_MASK_1 1835 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1836 { 1837 if ((mask & (OMP_CLAUSE_MASK_1 1838 << PRAGMA_OMP_CLAUSE_MAP)) != 0) 1839 { 1840 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1841 OMP_CLAUSE_IF); 1842 OMP_CLAUSE_IF_MODIFIER (c) 1843 = OMP_CLAUSE_IF_MODIFIER (clauses); 1844 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); 1845 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; 1846 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; 1847 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1848 } 1849 else 1850 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1851 } 1852 else 1853 s = C_OMP_CLAUSE_SPLIT_TARGET; 1854 break; 1855 case OMP_CLAUSE_LINEAR: 1856 /* Linear clause is allowed on simd and for. Put it on the 1857 innermost construct. */ 1858 if (code == OMP_SIMD) 1859 s = C_OMP_CLAUSE_SPLIT_SIMD; 1860 else 1861 s = C_OMP_CLAUSE_SPLIT_FOR; 1862 break; 1863 case OMP_CLAUSE_NOWAIT: 1864 /* Nowait clause is allowed on target, for and sections, but 1865 is not allowed on parallel for or parallel sections. Therefore, 1866 put it on target construct if present, because that can only 1867 be combined with parallel for{, simd} and not with for{, simd}, 1868 otherwise to the worksharing construct. */ 1869 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) 1870 != 0) 1871 s = C_OMP_CLAUSE_SPLIT_TARGET; 1872 else 1873 s = C_OMP_CLAUSE_SPLIT_FOR; 1874 break; 1875 default: 1876 gcc_unreachable (); 1877 } 1878 OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; 1879 cclauses[s] = clauses; 1880 } 1881 1882 if (!flag_checking) 1883 return; 1884 1885 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) 1886 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); 1887 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) 1888 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); 1889 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) 1890 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); 1891 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) 1892 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); 1893 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) 1894 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 1895 && code != OMP_SECTIONS) 1896 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); 1897 if (code != OMP_SIMD) 1898 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); 1899 } 1900 1901 1902 /* qsort callback to compare #pragma omp declare simd clauses. */ 1903 1904 static int 1905 c_omp_declare_simd_clause_cmp (const void *p, const void *q) 1906 { 1907 tree a = *(const tree *) p; 1908 tree b = *(const tree *) q; 1909 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b)) 1910 { 1911 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b)) 1912 return -1; 1913 return 1; 1914 } 1915 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN 1916 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH 1917 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) 1918 { 1919 int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); 1920 int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); 1921 if (c < d) 1922 return 1; 1923 if (c > d) 1924 return -1; 1925 } 1926 return 0; 1927 } 1928 1929 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd 1930 CLAUSES on FNDECL into argument indexes and sort them. */ 1931 1932 tree 1933 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) 1934 { 1935 tree c; 1936 vec<tree> clvec = vNULL; 1937 1938 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) 1939 { 1940 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN 1941 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH 1942 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) 1943 { 1944 tree decl = OMP_CLAUSE_DECL (c); 1945 tree arg; 1946 int idx; 1947 for (arg = parms, idx = 0; arg; 1948 arg = TREE_CHAIN (arg), idx++) 1949 if (arg == decl) 1950 break; 1951 if (arg == NULL_TREE) 1952 { 1953 error_at (OMP_CLAUSE_LOCATION (c), 1954 "%qD is not an function argument", decl); 1955 continue; 1956 } 1957 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); 1958 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR 1959 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) 1960 { 1961 decl = OMP_CLAUSE_LINEAR_STEP (c); 1962 for (arg = parms, idx = 0; arg; 1963 arg = TREE_CHAIN (arg), idx++) 1964 if (arg == decl) 1965 break; 1966 if (arg == NULL_TREE) 1967 { 1968 error_at (OMP_CLAUSE_LOCATION (c), 1969 "%qD is not an function argument", decl); 1970 continue; 1971 } 1972 OMP_CLAUSE_LINEAR_STEP (c) 1973 = build_int_cst (integer_type_node, idx); 1974 } 1975 } 1976 clvec.safe_push (c); 1977 } 1978 if (!clvec.is_empty ()) 1979 { 1980 unsigned int len = clvec.length (), i; 1981 clvec.qsort (c_omp_declare_simd_clause_cmp); 1982 clauses = clvec[0]; 1983 for (i = 0; i < len; i++) 1984 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; 1985 } 1986 else 1987 clauses = NULL_TREE; 1988 clvec.release (); 1989 return clauses; 1990 } 1991 1992 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ 1993 1994 void 1995 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) 1996 { 1997 tree c; 1998 1999 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) 2000 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN 2001 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH 2002 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) 2003 { 2004 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; 2005 tree arg; 2006 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; 2007 arg = TREE_CHAIN (arg), i++) 2008 if (i == idx) 2009 break; 2010 gcc_assert (arg); 2011 OMP_CLAUSE_DECL (c) = arg; 2012 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR 2013 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) 2014 { 2015 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c)); 2016 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; 2017 arg = TREE_CHAIN (arg), i++) 2018 if (i == idx) 2019 break; 2020 gcc_assert (arg); 2021 OMP_CLAUSE_LINEAR_STEP (c) = arg; 2022 } 2023 } 2024 } 2025 2026 /* Return true for __func__ and similar function-local predefined 2027 variables (which are in OpenMP predetermined shared, allowed in 2028 shared/firstprivate clauses). */ 2029 2030 bool 2031 c_omp_predefined_variable (tree decl) 2032 { 2033 if (VAR_P (decl) 2034 && DECL_ARTIFICIAL (decl) 2035 && TREE_READONLY (decl) 2036 && TREE_STATIC (decl) 2037 && DECL_NAME (decl) 2038 && (DECL_NAME (decl) == ridpointers[RID_C99_FUNCTION_NAME] 2039 || DECL_NAME (decl) == ridpointers[RID_FUNCTION_NAME] 2040 || DECL_NAME (decl) == ridpointers[RID_PRETTY_FUNCTION_NAME])) 2041 return true; 2042 return false; 2043 } 2044 2045 /* True if OpenMP sharing attribute of DECL is predetermined. */ 2046 2047 enum omp_clause_default_kind 2048 c_omp_predetermined_sharing (tree decl) 2049 { 2050 /* Predetermine artificial variables holding integral values, those 2051 are usually result of gimplify_one_sizepos or SAVE_EXPR 2052 gimplification. */ 2053 if (VAR_P (decl) 2054 && DECL_ARTIFICIAL (decl) 2055 && INTEGRAL_TYPE_P (TREE_TYPE (decl))) 2056 return OMP_CLAUSE_DEFAULT_SHARED; 2057 2058 if (c_omp_predefined_variable (decl)) 2059 return OMP_CLAUSE_DEFAULT_SHARED; 2060 2061 return OMP_CLAUSE_DEFAULT_UNSPECIFIED; 2062 } 2063