1 /* This file contains routines to construct OpenACC and OpenMP constructs, 2 called from parsing in the C and C++ front ends. 3 4 Copyright (C) 2005-2020 Free Software Foundation, Inc. 5 Contributed by Richard Henderson <rth@redhat.com>, 6 Diego Novillo <dnovillo@redhat.com>. 7 8 This file is part of GCC. 9 10 GCC is free software; you can redistribute it and/or modify it under 11 the terms of the GNU General Public License as published by the Free 12 Software Foundation; either version 3, or (at your option) any later 13 version. 14 15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 16 WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with GCC; see the file COPYING3. If not see 22 <http://www.gnu.org/licenses/>. */ 23 24 #include "config.h" 25 #include "system.h" 26 #include "coretypes.h" 27 #include "options.h" 28 #include "c-common.h" 29 #include "gimple-expr.h" 30 #include "c-pragma.h" 31 #include "stringpool.h" 32 #include "omp-general.h" 33 #include "gomp-constants.h" 34 #include "memmodel.h" 35 #include "attribs.h" 36 #include "gimplify.h" 37 38 39 /* Complete a #pragma oacc wait construct. LOC is the location of 40 the #pragma. */ 41 42 tree 43 c_finish_oacc_wait (location_t loc, tree parms, tree clauses) 44 { 45 const int nparms = list_length (parms); 46 tree stmt, t; 47 vec<tree, va_gc> *args; 48 49 vec_alloc (args, nparms + 2); 50 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT); 51 52 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC)) 53 t = OMP_CLAUSE_ASYNC_EXPR (clauses); 54 else 55 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC); 56 57 args->quick_push (t); 58 args->quick_push (build_int_cst (integer_type_node, nparms)); 59 60 for (t = parms; t; t = TREE_CHAIN (t)) 61 { 62 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST) 63 args->quick_push (build_int_cst (integer_type_node, 64 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t)))); 65 else 66 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t)); 67 } 68 69 stmt = build_call_expr_loc_vec (loc, stmt, args); 70 71 vec_free (args); 72 73 return stmt; 74 } 75 76 /* Complete a #pragma omp master construct. STMT is the structured-block 77 that follows the pragma. LOC is the location of the #pragma. */ 78 79 tree 80 c_finish_omp_master (location_t loc, tree stmt) 81 { 82 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); 83 SET_EXPR_LOCATION (t, loc); 84 return t; 85 } 86 87 /* Complete a #pragma omp taskgroup construct. BODY is the structured-block 88 that follows the pragma. LOC is the location of the #pragma. */ 89 90 tree 91 c_finish_omp_taskgroup (location_t loc, tree body, tree clauses) 92 { 93 tree stmt = make_node (OMP_TASKGROUP); 94 TREE_TYPE (stmt) = void_type_node; 95 OMP_TASKGROUP_BODY (stmt) = body; 96 OMP_TASKGROUP_CLAUSES (stmt) = clauses; 97 SET_EXPR_LOCATION (stmt, loc); 98 return add_stmt (stmt); 99 } 100 101 /* Complete a #pragma omp critical construct. BODY is the structured-block 102 that follows the pragma, NAME is the identifier in the pragma, or null 103 if it was omitted. LOC is the location of the #pragma. */ 104 105 tree 106 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses) 107 { 108 tree stmt = make_node (OMP_CRITICAL); 109 TREE_TYPE (stmt) = void_type_node; 110 OMP_CRITICAL_BODY (stmt) = body; 111 OMP_CRITICAL_NAME (stmt) = name; 112 OMP_CRITICAL_CLAUSES (stmt) = clauses; 113 SET_EXPR_LOCATION (stmt, loc); 114 return add_stmt (stmt); 115 } 116 117 /* Complete a #pragma omp ordered construct. STMT is the structured-block 118 that follows the pragma. LOC is the location of the #pragma. */ 119 120 tree 121 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt) 122 { 123 tree t = make_node (OMP_ORDERED); 124 TREE_TYPE (t) = void_type_node; 125 OMP_ORDERED_BODY (t) = stmt; 126 if (!flag_openmp /* flag_openmp_simd */ 127 && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD 128 || OMP_CLAUSE_CHAIN (clauses))) 129 clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD); 130 OMP_ORDERED_CLAUSES (t) = clauses; 131 SET_EXPR_LOCATION (t, loc); 132 return add_stmt (t); 133 } 134 135 136 /* Complete a #pragma omp barrier construct. LOC is the location of 137 the #pragma. */ 138 139 void 140 c_finish_omp_barrier (location_t loc) 141 { 142 tree x; 143 144 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); 145 x = build_call_expr_loc (loc, x, 0); 146 add_stmt (x); 147 } 148 149 150 /* Complete a #pragma omp taskwait construct. LOC is the location of the 151 pragma. */ 152 153 void 154 c_finish_omp_taskwait (location_t loc) 155 { 156 tree x; 157 158 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); 159 x = build_call_expr_loc (loc, x, 0); 160 add_stmt (x); 161 } 162 163 164 /* Complete a #pragma omp taskyield construct. LOC is the location of the 165 pragma. */ 166 167 void 168 c_finish_omp_taskyield (location_t loc) 169 { 170 tree x; 171 172 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); 173 x = build_call_expr_loc (loc, x, 0); 174 add_stmt (x); 175 } 176 177 178 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC 179 the expression to be implemented atomically is LHS opcode= RHS. 180 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS 181 opcode= RHS with the new or old content of LHS returned. 182 LOC is the location of the atomic statement. The value returned 183 is either error_mark_node (if the construct was erroneous) or an 184 OMP_ATOMIC* node which should be added to the current statement 185 tree with add_stmt. If TEST is set, avoid calling save_expr 186 or create_tmp_var*. */ 187 188 tree 189 c_finish_omp_atomic (location_t loc, enum tree_code code, 190 enum tree_code opcode, tree lhs, tree rhs, 191 tree v, tree lhs1, tree rhs1, bool swapped, 192 enum omp_memory_order memory_order, bool test) 193 { 194 tree x, type, addr, pre = NULL_TREE; 195 HOST_WIDE_INT bitpos = 0, bitsize = 0; 196 197 if (lhs == error_mark_node || rhs == error_mark_node 198 || v == error_mark_node || lhs1 == error_mark_node 199 || rhs1 == error_mark_node) 200 return error_mark_node; 201 202 /* ??? According to one reading of the OpenMP spec, complex type are 203 supported, but there are no atomic stores for any architecture. 204 But at least icc 9.0 doesn't support complex types here either. 205 And lets not even talk about vector types... */ 206 type = TREE_TYPE (lhs); 207 if (!INTEGRAL_TYPE_P (type) 208 && !POINTER_TYPE_P (type) 209 && !SCALAR_FLOAT_TYPE_P (type)) 210 { 211 error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); 212 return error_mark_node; 213 } 214 if (TYPE_ATOMIC (type)) 215 { 216 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>"); 217 return error_mark_node; 218 } 219 220 if (opcode == RDIV_EXPR) 221 opcode = TRUNC_DIV_EXPR; 222 223 /* ??? Validate that rhs does not overlap lhs. */ 224 tree blhs = NULL; 225 if (TREE_CODE (lhs) == COMPONENT_REF 226 && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL 227 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1)) 228 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1))) 229 { 230 tree field = TREE_OPERAND (lhs, 1); 231 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); 232 if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)) 233 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))) 234 bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) 235 - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT; 236 else 237 bitpos = 0; 238 bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) 239 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); 240 gcc_assert (tree_fits_shwi_p (DECL_SIZE (field))); 241 bitsize = tree_to_shwi (DECL_SIZE (field)); 242 blhs = lhs; 243 type = TREE_TYPE (repr); 244 lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0), 245 repr, TREE_OPERAND (lhs, 2)); 246 } 247 248 /* Take and save the address of the lhs. From then on we'll reference it 249 via indirection. */ 250 addr = build_unary_op (loc, ADDR_EXPR, lhs, false); 251 if (addr == error_mark_node) 252 return error_mark_node; 253 if (!test) 254 addr = save_expr (addr); 255 if (!test 256 && TREE_CODE (addr) != SAVE_EXPR 257 && (TREE_CODE (addr) != ADDR_EXPR 258 || !VAR_P (TREE_OPERAND (addr, 0)))) 259 { 260 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize 261 it even after unsharing function body. */ 262 tree var = create_tmp_var_raw (TREE_TYPE (addr)); 263 DECL_CONTEXT (var) = current_function_decl; 264 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); 265 } 266 tree orig_lhs = lhs; 267 lhs = build_indirect_ref (loc, addr, RO_NULL); 268 tree new_lhs = lhs; 269 270 if (code == OMP_ATOMIC_READ) 271 { 272 x = build1 (OMP_ATOMIC_READ, type, addr); 273 SET_EXPR_LOCATION (x, loc); 274 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order; 275 if (blhs) 276 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x, 277 bitsize_int (bitsize), bitsize_int (bitpos)); 278 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, 279 loc, x, NULL_TREE); 280 } 281 282 /* There are lots of warnings, errors, and conversions that need to happen 283 in the course of interpreting a statement. Use the normal mechanisms 284 to do this, and then take it apart again. */ 285 if (blhs) 286 { 287 lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs, 288 bitsize_int (bitsize), bitsize_int (bitpos)); 289 if (swapped) 290 rhs = build_binary_op (loc, opcode, rhs, lhs, true); 291 else if (opcode != NOP_EXPR) 292 rhs = build_binary_op (loc, opcode, lhs, rhs, true); 293 opcode = NOP_EXPR; 294 } 295 else if (swapped) 296 { 297 rhs = build_binary_op (loc, opcode, rhs, lhs, true); 298 opcode = NOP_EXPR; 299 } 300 bool save = in_late_binary_op; 301 in_late_binary_op = true; 302 x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode, 303 loc, rhs, NULL_TREE); 304 in_late_binary_op = save; 305 if (x == error_mark_node) 306 return error_mark_node; 307 if (TREE_CODE (x) == COMPOUND_EXPR) 308 { 309 pre = TREE_OPERAND (x, 0); 310 gcc_assert (TREE_CODE (pre) == SAVE_EXPR || tree_invariant_p (pre)); 311 x = TREE_OPERAND (x, 1); 312 } 313 gcc_assert (TREE_CODE (x) == MODIFY_EXPR); 314 rhs = TREE_OPERAND (x, 1); 315 316 if (blhs) 317 rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs, 318 rhs, bitsize_int (bitpos)); 319 320 /* Punt the actual generation of atomic operations to common code. */ 321 if (code == OMP_ATOMIC) 322 type = void_type_node; 323 x = build2 (code, type, addr, rhs); 324 SET_EXPR_LOCATION (x, loc); 325 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order; 326 327 /* Generally it is hard to prove lhs1 and lhs are the same memory 328 location, just diagnose different variables. */ 329 if (rhs1 330 && VAR_P (rhs1) 331 && VAR_P (orig_lhs) 332 && rhs1 != orig_lhs 333 && !test) 334 { 335 if (code == OMP_ATOMIC) 336 error_at (loc, "%<#pragma omp atomic update%> uses two different " 337 "variables for memory"); 338 else 339 error_at (loc, "%<#pragma omp atomic capture%> uses two different " 340 "variables for memory"); 341 return error_mark_node; 342 } 343 344 if (lhs1 345 && lhs1 != orig_lhs 346 && TREE_CODE (lhs1) == COMPONENT_REF 347 && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL 348 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1)) 349 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1))) 350 { 351 tree field = TREE_OPERAND (lhs1, 1); 352 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); 353 lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0), 354 repr, TREE_OPERAND (lhs1, 2)); 355 } 356 if (rhs1 357 && rhs1 != orig_lhs 358 && TREE_CODE (rhs1) == COMPONENT_REF 359 && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL 360 && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1)) 361 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1))) 362 { 363 tree field = TREE_OPERAND (rhs1, 1); 364 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); 365 rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0), 366 repr, TREE_OPERAND (rhs1, 2)); 367 } 368 369 if (code != OMP_ATOMIC) 370 { 371 /* Generally it is hard to prove lhs1 and lhs are the same memory 372 location, just diagnose different variables. */ 373 if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs)) 374 { 375 if (lhs1 != orig_lhs && !test) 376 { 377 error_at (loc, "%<#pragma omp atomic capture%> uses two " 378 "different variables for memory"); 379 return error_mark_node; 380 } 381 } 382 if (blhs) 383 { 384 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x, 385 bitsize_int (bitsize), bitsize_int (bitpos)); 386 type = TREE_TYPE (blhs); 387 } 388 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, 389 loc, x, NULL_TREE); 390 if (rhs1 && rhs1 != orig_lhs) 391 { 392 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); 393 if (rhs1addr == error_mark_node) 394 return error_mark_node; 395 x = omit_one_operand_loc (loc, type, x, rhs1addr); 396 } 397 if (lhs1 && lhs1 != orig_lhs) 398 { 399 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false); 400 if (lhs1addr == error_mark_node) 401 return error_mark_node; 402 if (code == OMP_ATOMIC_CAPTURE_OLD) 403 x = omit_one_operand_loc (loc, type, x, lhs1addr); 404 else 405 { 406 if (!test) 407 x = save_expr (x); 408 x = omit_two_operands_loc (loc, type, x, x, lhs1addr); 409 } 410 } 411 } 412 else if (rhs1 && rhs1 != orig_lhs) 413 { 414 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); 415 if (rhs1addr == error_mark_node) 416 return error_mark_node; 417 x = omit_one_operand_loc (loc, type, x, rhs1addr); 418 } 419 420 if (pre) 421 x = omit_one_operand_loc (loc, type, x, pre); 422 return x; 423 } 424 425 426 /* Return true if TYPE is the implementation's omp_depend_t. */ 427 428 bool 429 c_omp_depend_t_p (tree type) 430 { 431 type = TYPE_MAIN_VARIANT (type); 432 return (TREE_CODE (type) == RECORD_TYPE 433 && TYPE_NAME (type) 434 && ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL 435 ? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type)) 436 == get_identifier ("omp_depend_t")) 437 && (!TYPE_CONTEXT (type) 438 || TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL) 439 && COMPLETE_TYPE_P (type) 440 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST 441 && !compare_tree_int (TYPE_SIZE (type), 442 2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node)))); 443 } 444 445 446 /* Complete a #pragma omp depobj construct. LOC is the location of the 447 #pragma. */ 448 449 void 450 c_finish_omp_depobj (location_t loc, tree depobj, 451 enum omp_clause_depend_kind kind, tree clause) 452 { 453 tree t = NULL_TREE; 454 if (!error_operand_p (depobj)) 455 { 456 if (!c_omp_depend_t_p (TREE_TYPE (depobj))) 457 { 458 error_at (EXPR_LOC_OR_LOC (depobj, loc), 459 "type of %<depobj%> expression is not %<omp_depend_t%>"); 460 depobj = error_mark_node; 461 } 462 else if (TYPE_READONLY (TREE_TYPE (depobj))) 463 { 464 error_at (EXPR_LOC_OR_LOC (depobj, loc), 465 "%<const%> qualified %<depobj%> expression"); 466 depobj = error_mark_node; 467 } 468 } 469 else 470 depobj = error_mark_node; 471 472 if (clause == error_mark_node) 473 return; 474 475 if (clause) 476 { 477 gcc_assert (TREE_CODE (clause) == OMP_CLAUSE 478 && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND); 479 if (OMP_CLAUSE_CHAIN (clause)) 480 error_at (OMP_CLAUSE_LOCATION (clause), 481 "more than one locator in %<depend%> clause on %<depobj%> " 482 "construct"); 483 switch (OMP_CLAUSE_DEPEND_KIND (clause)) 484 { 485 case OMP_CLAUSE_DEPEND_DEPOBJ: 486 error_at (OMP_CLAUSE_LOCATION (clause), 487 "%<depobj%> dependence type specified in %<depend%> " 488 "clause on %<depobj%> construct"); 489 return; 490 case OMP_CLAUSE_DEPEND_SOURCE: 491 case OMP_CLAUSE_DEPEND_SINK: 492 error_at (OMP_CLAUSE_LOCATION (clause), 493 "%<depend(%s)%> is only allowed in %<omp ordered%>", 494 OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE 495 ? "source" : "sink"); 496 return; 497 case OMP_CLAUSE_DEPEND_IN: 498 case OMP_CLAUSE_DEPEND_OUT: 499 case OMP_CLAUSE_DEPEND_INOUT: 500 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: 501 kind = OMP_CLAUSE_DEPEND_KIND (clause); 502 t = OMP_CLAUSE_DECL (clause); 503 gcc_assert (t); 504 if (TREE_CODE (t) == TREE_LIST 505 && TREE_PURPOSE (t) 506 && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC) 507 { 508 error_at (OMP_CLAUSE_LOCATION (clause), 509 "%<iterator%> modifier may not be specified on " 510 "%<depobj%> construct"); 511 return; 512 } 513 if (TREE_CODE (t) == COMPOUND_EXPR) 514 { 515 tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1)); 516 t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0), 517 t1); 518 } 519 else 520 t = build_fold_addr_expr (t); 521 break; 522 default: 523 gcc_unreachable (); 524 } 525 } 526 else 527 gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE); 528 529 if (depobj == error_mark_node) 530 return; 531 532 depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj); 533 tree dtype 534 = build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node), 535 true); 536 depobj = fold_convert (dtype, depobj); 537 tree r; 538 if (clause) 539 { 540 depobj = save_expr (depobj); 541 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR); 542 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t)); 543 } 544 int k; 545 switch (kind) 546 { 547 case OMP_CLAUSE_DEPEND_IN: 548 k = GOMP_DEPEND_IN; 549 break; 550 case OMP_CLAUSE_DEPEND_OUT: 551 k = GOMP_DEPEND_OUT; 552 break; 553 case OMP_CLAUSE_DEPEND_INOUT: 554 k = GOMP_DEPEND_INOUT; 555 break; 556 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: 557 k = GOMP_DEPEND_MUTEXINOUTSET; 558 break; 559 case OMP_CLAUSE_DEPEND_LAST: 560 k = -1; 561 break; 562 default: 563 gcc_unreachable (); 564 } 565 t = build_int_cst (ptr_type_node, k); 566 depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj, 567 TYPE_SIZE_UNIT (ptr_type_node)); 568 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR); 569 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t)); 570 } 571 572 573 /* Complete a #pragma omp flush construct. We don't do anything with 574 the variable list that the syntax allows. LOC is the location of 575 the #pragma. */ 576 577 void 578 c_finish_omp_flush (location_t loc, int mo) 579 { 580 tree x; 581 582 if (mo == MEMMODEL_LAST) 583 { 584 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); 585 x = build_call_expr_loc (loc, x, 0); 586 } 587 else 588 { 589 x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE); 590 x = build_call_expr_loc (loc, x, 1, 591 build_int_cst (integer_type_node, mo)); 592 } 593 add_stmt (x); 594 } 595 596 597 /* Check and canonicalize OMP_FOR increment expression. 598 Helper function for c_finish_omp_for. */ 599 600 static tree 601 check_omp_for_incr_expr (location_t loc, tree exp, tree decl) 602 { 603 tree t; 604 605 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) 606 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) 607 return error_mark_node; 608 609 if (exp == decl) 610 return build_int_cst (TREE_TYPE (exp), 0); 611 612 switch (TREE_CODE (exp)) 613 { 614 CASE_CONVERT: 615 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 616 if (t != error_mark_node) 617 return fold_convert_loc (loc, TREE_TYPE (exp), t); 618 break; 619 case MINUS_EXPR: 620 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 621 if (t != error_mark_node) 622 return fold_build2_loc (loc, MINUS_EXPR, 623 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); 624 break; 625 case PLUS_EXPR: 626 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 627 if (t != error_mark_node) 628 return fold_build2_loc (loc, PLUS_EXPR, 629 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); 630 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); 631 if (t != error_mark_node) 632 return fold_build2_loc (loc, PLUS_EXPR, 633 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); 634 break; 635 case COMPOUND_EXPR: 636 { 637 /* cp_build_modify_expr forces preevaluation of the RHS to make 638 sure that it is evaluated before the lvalue-rvalue conversion 639 is applied to the LHS. Reconstruct the original expression. */ 640 tree op0 = TREE_OPERAND (exp, 0); 641 if (TREE_CODE (op0) == TARGET_EXPR 642 && !VOID_TYPE_P (TREE_TYPE (op0))) 643 { 644 tree op1 = TREE_OPERAND (exp, 1); 645 tree temp = TARGET_EXPR_SLOT (op0); 646 if (BINARY_CLASS_P (op1) 647 && TREE_OPERAND (op1, 1) == temp) 648 { 649 op1 = copy_node (op1); 650 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); 651 return check_omp_for_incr_expr (loc, op1, decl); 652 } 653 } 654 break; 655 } 656 default: 657 break; 658 } 659 660 return error_mark_node; 661 } 662 663 /* If the OMP_FOR increment expression in INCR is of pointer type, 664 canonicalize it into an expression handled by gimplify_omp_for() 665 and return it. DECL is the iteration variable. */ 666 667 static tree 668 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) 669 { 670 if (POINTER_TYPE_P (TREE_TYPE (decl)) 671 && TREE_OPERAND (incr, 1)) 672 { 673 tree t = fold_convert_loc (loc, 674 sizetype, TREE_OPERAND (incr, 1)); 675 676 if (TREE_CODE (incr) == POSTDECREMENT_EXPR 677 || TREE_CODE (incr) == PREDECREMENT_EXPR) 678 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); 679 t = fold_build_pointer_plus (decl, t); 680 incr = build2 (MODIFY_EXPR, void_type_node, decl, t); 681 } 682 return incr; 683 } 684 685 /* Validate and generate OMP_FOR. 686 DECLV is a vector of iteration variables, for each collapsed loop. 687 688 ORIG_DECLV, if non-NULL, is a vector with the original iteration 689 variables (prior to any transformations, by say, C++ iterators). 690 691 INITV, CONDV and INCRV are vectors containing initialization 692 expressions, controlling predicates and increment expressions. 693 BODY is the body of the loop and PRE_BODY statements that go before 694 the loop. */ 695 696 tree 697 c_finish_omp_for (location_t locus, enum tree_code code, tree declv, 698 tree orig_declv, tree initv, tree condv, tree incrv, 699 tree body, tree pre_body, bool final_p) 700 { 701 location_t elocus; 702 bool fail = false; 703 int i; 704 705 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); 706 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); 707 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); 708 for (i = 0; i < TREE_VEC_LENGTH (declv); i++) 709 { 710 tree decl = TREE_VEC_ELT (declv, i); 711 tree init = TREE_VEC_ELT (initv, i); 712 tree cond = TREE_VEC_ELT (condv, i); 713 tree incr = TREE_VEC_ELT (incrv, i); 714 715 elocus = locus; 716 if (EXPR_HAS_LOCATION (init)) 717 elocus = EXPR_LOCATION (init); 718 719 /* Validate the iteration variable. */ 720 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) 721 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) 722 { 723 error_at (elocus, "invalid type for iteration variable %qE", decl); 724 fail = true; 725 } 726 else if (TYPE_ATOMIC (TREE_TYPE (decl))) 727 { 728 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl); 729 fail = true; 730 /* _Atomic iterator confuses stuff too much, so we risk ICE 731 trying to diagnose it further. */ 732 continue; 733 } 734 735 /* In the case of "for (int i = 0...)", init will be a decl. It should 736 have a DECL_INITIAL that we can turn into an assignment. */ 737 if (init == decl) 738 { 739 elocus = DECL_SOURCE_LOCATION (decl); 740 741 init = DECL_INITIAL (decl); 742 if (init == NULL) 743 { 744 error_at (elocus, "%qE is not initialized", decl); 745 init = integer_zero_node; 746 fail = true; 747 } 748 DECL_INITIAL (decl) = NULL_TREE; 749 750 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, 751 /* FIXME diagnostics: This should 752 be the location of the INIT. */ 753 elocus, 754 init, 755 NULL_TREE); 756 } 757 if (init != error_mark_node) 758 { 759 gcc_assert (TREE_CODE (init) == MODIFY_EXPR); 760 gcc_assert (TREE_OPERAND (init, 0) == decl); 761 } 762 763 if (cond == NULL_TREE) 764 { 765 error_at (elocus, "missing controlling predicate"); 766 fail = true; 767 } 768 else 769 { 770 bool cond_ok = false; 771 772 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with 773 evaluation of the vla VAR_DECL. We need to readd 774 them to the non-decl operand. See PR45784. */ 775 while (TREE_CODE (cond) == COMPOUND_EXPR) 776 cond = TREE_OPERAND (cond, 1); 777 778 if (EXPR_HAS_LOCATION (cond)) 779 elocus = EXPR_LOCATION (cond); 780 781 if (TREE_CODE (cond) == LT_EXPR 782 || TREE_CODE (cond) == LE_EXPR 783 || TREE_CODE (cond) == GT_EXPR 784 || TREE_CODE (cond) == GE_EXPR 785 || TREE_CODE (cond) == NE_EXPR 786 || TREE_CODE (cond) == EQ_EXPR) 787 { 788 tree op0 = TREE_OPERAND (cond, 0); 789 tree op1 = TREE_OPERAND (cond, 1); 790 791 /* 2.5.1. The comparison in the condition is computed in 792 the type of DECL, otherwise the behavior is undefined. 793 794 For example: 795 long n; int i; 796 i < n; 797 798 according to ISO will be evaluated as: 799 (long)i < n; 800 801 We want to force: 802 i < (int)n; */ 803 if (TREE_CODE (op0) == NOP_EXPR 804 && decl == TREE_OPERAND (op0, 0)) 805 { 806 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); 807 TREE_OPERAND (cond, 1) 808 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), 809 TREE_OPERAND (cond, 1)); 810 } 811 else if (TREE_CODE (op1) == NOP_EXPR 812 && decl == TREE_OPERAND (op1, 0)) 813 { 814 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); 815 TREE_OPERAND (cond, 0) 816 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), 817 TREE_OPERAND (cond, 0)); 818 } 819 820 if (decl == TREE_OPERAND (cond, 0)) 821 cond_ok = true; 822 else if (decl == TREE_OPERAND (cond, 1)) 823 { 824 TREE_SET_CODE (cond, 825 swap_tree_comparison (TREE_CODE (cond))); 826 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); 827 TREE_OPERAND (cond, 0) = decl; 828 cond_ok = true; 829 } 830 831 if (TREE_CODE (cond) == NE_EXPR 832 || TREE_CODE (cond) == EQ_EXPR) 833 { 834 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) 835 { 836 if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR) 837 cond_ok = false; 838 } 839 else if (operand_equal_p (TREE_OPERAND (cond, 1), 840 TYPE_MIN_VALUE (TREE_TYPE (decl)), 841 0)) 842 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR 843 ? GT_EXPR : LE_EXPR); 844 else if (operand_equal_p (TREE_OPERAND (cond, 1), 845 TYPE_MAX_VALUE (TREE_TYPE (decl)), 846 0)) 847 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR 848 ? LT_EXPR : GE_EXPR); 849 else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR) 850 cond_ok = false; 851 } 852 853 if (cond_ok && TREE_VEC_ELT (condv, i) != cond) 854 { 855 tree ce = NULL_TREE, *pce = &ce; 856 tree type = TREE_TYPE (TREE_OPERAND (cond, 1)); 857 for (tree c = TREE_VEC_ELT (condv, i); c != cond; 858 c = TREE_OPERAND (c, 1)) 859 { 860 *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0), 861 TREE_OPERAND (cond, 1)); 862 pce = &TREE_OPERAND (*pce, 1); 863 } 864 TREE_OPERAND (cond, 1) = ce; 865 TREE_VEC_ELT (condv, i) = cond; 866 } 867 } 868 869 if (!cond_ok) 870 { 871 error_at (elocus, "invalid controlling predicate"); 872 fail = true; 873 } 874 } 875 876 if (incr == NULL_TREE) 877 { 878 error_at (elocus, "missing increment expression"); 879 fail = true; 880 } 881 else 882 { 883 bool incr_ok = false; 884 885 if (EXPR_HAS_LOCATION (incr)) 886 elocus = EXPR_LOCATION (incr); 887 888 /* Check all the valid increment expressions: v++, v--, ++v, --v, 889 v = v + incr, v = incr + v and v = v - incr. */ 890 switch (TREE_CODE (incr)) 891 { 892 case POSTINCREMENT_EXPR: 893 case PREINCREMENT_EXPR: 894 case POSTDECREMENT_EXPR: 895 case PREDECREMENT_EXPR: 896 if (TREE_OPERAND (incr, 0) != decl) 897 break; 898 899 incr_ok = true; 900 if (!fail 901 && TREE_CODE (cond) == NE_EXPR 902 && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE 903 && TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))) 904 && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))) 905 != INTEGER_CST)) 906 { 907 /* For pointer to VLA, transform != into < or > 908 depending on whether incr is increment or decrement. */ 909 if (TREE_CODE (incr) == PREINCREMENT_EXPR 910 || TREE_CODE (incr) == POSTINCREMENT_EXPR) 911 TREE_SET_CODE (cond, LT_EXPR); 912 else 913 TREE_SET_CODE (cond, GT_EXPR); 914 } 915 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); 916 break; 917 918 case COMPOUND_EXPR: 919 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR 920 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) 921 break; 922 incr = TREE_OPERAND (incr, 1); 923 /* FALLTHRU */ 924 case MODIFY_EXPR: 925 if (TREE_OPERAND (incr, 0) != decl) 926 break; 927 if (TREE_OPERAND (incr, 1) == decl) 928 break; 929 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR 930 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl 931 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) 932 incr_ok = true; 933 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR 934 || (TREE_CODE (TREE_OPERAND (incr, 1)) 935 == POINTER_PLUS_EXPR)) 936 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) 937 incr_ok = true; 938 else 939 { 940 tree t = check_omp_for_incr_expr (elocus, 941 TREE_OPERAND (incr, 1), 942 decl); 943 if (t != error_mark_node) 944 { 945 incr_ok = true; 946 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); 947 incr = build2 (MODIFY_EXPR, void_type_node, decl, t); 948 } 949 } 950 if (!fail 951 && incr_ok 952 && TREE_CODE (cond) == NE_EXPR) 953 { 954 tree i = TREE_OPERAND (incr, 1); 955 i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl); 956 i = c_fully_fold (i, false, NULL); 957 if (!final_p 958 && TREE_CODE (i) != INTEGER_CST) 959 ; 960 else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) 961 { 962 tree unit 963 = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); 964 if (unit) 965 { 966 enum tree_code ccode = GT_EXPR; 967 unit = c_fully_fold (unit, false, NULL); 968 i = fold_convert (TREE_TYPE (unit), i); 969 if (operand_equal_p (unit, i, 0)) 970 ccode = LT_EXPR; 971 if (ccode == GT_EXPR) 972 { 973 i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i); 974 if (i == NULL_TREE 975 || !operand_equal_p (unit, i, 0)) 976 { 977 error_at (elocus, 978 "increment is not constant 1 or " 979 "-1 for %<!=%> condition"); 980 fail = true; 981 } 982 } 983 if (TREE_CODE (unit) != INTEGER_CST) 984 /* For pointer to VLA, transform != into < or > 985 depending on whether the pointer is 986 incremented or decremented in each 987 iteration. */ 988 TREE_SET_CODE (cond, ccode); 989 } 990 } 991 else 992 { 993 if (!integer_onep (i) && !integer_minus_onep (i)) 994 { 995 error_at (elocus, 996 "increment is not constant 1 or -1 for" 997 " %<!=%> condition"); 998 fail = true; 999 } 1000 } 1001 } 1002 break; 1003 1004 default: 1005 break; 1006 } 1007 if (!incr_ok) 1008 { 1009 error_at (elocus, "invalid increment expression"); 1010 fail = true; 1011 } 1012 } 1013 1014 TREE_VEC_ELT (initv, i) = init; 1015 TREE_VEC_ELT (incrv, i) = incr; 1016 } 1017 1018 if (fail) 1019 return NULL; 1020 else 1021 { 1022 tree t = make_node (code); 1023 1024 TREE_TYPE (t) = void_type_node; 1025 OMP_FOR_INIT (t) = initv; 1026 OMP_FOR_COND (t) = condv; 1027 OMP_FOR_INCR (t) = incrv; 1028 OMP_FOR_BODY (t) = body; 1029 OMP_FOR_PRE_BODY (t) = pre_body; 1030 OMP_FOR_ORIG_DECLS (t) = orig_declv; 1031 1032 SET_EXPR_LOCATION (t, locus); 1033 return t; 1034 } 1035 } 1036 1037 /* Type for passing data in between c_omp_check_loop_iv and 1038 c_omp_check_loop_iv_r. */ 1039 1040 struct c_omp_check_loop_iv_data 1041 { 1042 tree declv; 1043 bool fail; 1044 location_t stmt_loc; 1045 location_t expr_loc; 1046 int kind; 1047 walk_tree_lh lh; 1048 hash_set<tree> *ppset; 1049 }; 1050 1051 /* Helper function called via walk_tree, to diagnose uses 1052 of associated loop IVs inside of lb, b and incr expressions 1053 of OpenMP loops. */ 1054 1055 static tree 1056 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data) 1057 { 1058 struct c_omp_check_loop_iv_data *d 1059 = (struct c_omp_check_loop_iv_data *) data; 1060 if (DECL_P (*tp)) 1061 { 1062 int i; 1063 for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++) 1064 if (*tp == TREE_VEC_ELT (d->declv, i) 1065 || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST 1066 && *tp == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i))) 1067 || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST 1068 && TREE_CHAIN (TREE_VEC_ELT (d->declv, i)) 1069 && (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i))) 1070 == TREE_VEC) 1071 && *tp == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv, 1072 i)), 2))) 1073 { 1074 location_t loc = d->expr_loc; 1075 if (loc == UNKNOWN_LOCATION) 1076 loc = d->stmt_loc; 1077 switch (d->kind) 1078 { 1079 case 0: 1080 error_at (loc, "initializer expression refers to " 1081 "iteration variable %qD", *tp); 1082 break; 1083 case 1: 1084 error_at (loc, "condition expression refers to " 1085 "iteration variable %qD", *tp); 1086 break; 1087 case 2: 1088 error_at (loc, "increment expression refers to " 1089 "iteration variable %qD", *tp); 1090 break; 1091 } 1092 d->fail = true; 1093 } 1094 } 1095 /* Don't walk dtors added by C++ wrap_cleanups_r. */ 1096 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR 1097 && TRY_CATCH_IS_CLEANUP (*tp)) 1098 { 1099 *walk_subtrees = 0; 1100 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data, 1101 d->ppset, d->lh); 1102 } 1103 1104 return NULL_TREE; 1105 } 1106 1107 /* Diagnose invalid references to loop iterators in lb, b and incr 1108 expressions. */ 1109 1110 bool 1111 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh) 1112 { 1113 hash_set<tree> pset; 1114 struct c_omp_check_loop_iv_data data; 1115 int i; 1116 1117 data.declv = declv; 1118 data.fail = false; 1119 data.stmt_loc = EXPR_LOCATION (stmt); 1120 data.lh = lh; 1121 data.ppset = &pset; 1122 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++) 1123 { 1124 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i); 1125 gcc_assert (TREE_CODE (init) == MODIFY_EXPR); 1126 tree decl = TREE_OPERAND (init, 0); 1127 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i); 1128 gcc_assert (COMPARISON_CLASS_P (cond)); 1129 gcc_assert (TREE_OPERAND (cond, 0) == decl); 1130 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i); 1131 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1)); 1132 data.kind = 0; 1133 walk_tree_1 (&TREE_OPERAND (init, 1), 1134 c_omp_check_loop_iv_r, &data, &pset, lh); 1135 /* Don't warn for C++ random access iterators here, the 1136 expression then involves the subtraction and always refers 1137 to the original value. The C++ FE needs to warn on those 1138 earlier. */ 1139 if (decl == TREE_VEC_ELT (declv, i) 1140 || (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST 1141 && decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i)))) 1142 { 1143 data.expr_loc = EXPR_LOCATION (cond); 1144 data.kind = 1; 1145 walk_tree_1 (&TREE_OPERAND (cond, 1), 1146 c_omp_check_loop_iv_r, &data, &pset, lh); 1147 } 1148 if (TREE_CODE (incr) == MODIFY_EXPR) 1149 { 1150 gcc_assert (TREE_OPERAND (incr, 0) == decl); 1151 incr = TREE_OPERAND (incr, 1); 1152 data.kind = 2; 1153 if (TREE_CODE (incr) == PLUS_EXPR 1154 && TREE_OPERAND (incr, 1) == decl) 1155 { 1156 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0)); 1157 walk_tree_1 (&TREE_OPERAND (incr, 0), 1158 c_omp_check_loop_iv_r, &data, &pset, lh); 1159 } 1160 else 1161 { 1162 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1)); 1163 walk_tree_1 (&TREE_OPERAND (incr, 1), 1164 c_omp_check_loop_iv_r, &data, &pset, lh); 1165 } 1166 } 1167 } 1168 return !data.fail; 1169 } 1170 1171 /* Similar, but allows to check the init or cond expressions individually. */ 1172 1173 bool 1174 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl, 1175 tree init, tree cond, walk_tree_lh lh) 1176 { 1177 hash_set<tree> pset; 1178 struct c_omp_check_loop_iv_data data; 1179 1180 data.declv = declv; 1181 data.fail = false; 1182 data.stmt_loc = stmt_loc; 1183 data.lh = lh; 1184 data.ppset = &pset; 1185 if (init) 1186 { 1187 data.expr_loc = EXPR_LOCATION (init); 1188 data.kind = 0; 1189 walk_tree_1 (&init, 1190 c_omp_check_loop_iv_r, &data, &pset, lh); 1191 } 1192 if (cond) 1193 { 1194 gcc_assert (COMPARISON_CLASS_P (cond)); 1195 data.expr_loc = EXPR_LOCATION (init); 1196 data.kind = 1; 1197 if (TREE_OPERAND (cond, 0) == decl) 1198 walk_tree_1 (&TREE_OPERAND (cond, 1), 1199 c_omp_check_loop_iv_r, &data, &pset, lh); 1200 else 1201 walk_tree_1 (&TREE_OPERAND (cond, 0), 1202 c_omp_check_loop_iv_r, &data, &pset, lh); 1203 } 1204 return !data.fail; 1205 } 1206 1207 /* This function splits clauses for OpenACC combined loop 1208 constructs. OpenACC combined loop constructs are: 1209 #pragma acc kernels loop 1210 #pragma acc parallel loop */ 1211 1212 tree 1213 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses, 1214 bool is_parallel) 1215 { 1216 tree next, loop_clauses, nc; 1217 1218 loop_clauses = *not_loop_clauses = NULL_TREE; 1219 for (; clauses ; clauses = next) 1220 { 1221 next = OMP_CLAUSE_CHAIN (clauses); 1222 1223 switch (OMP_CLAUSE_CODE (clauses)) 1224 { 1225 /* Loop clauses. */ 1226 case OMP_CLAUSE_COLLAPSE: 1227 case OMP_CLAUSE_TILE: 1228 case OMP_CLAUSE_GANG: 1229 case OMP_CLAUSE_WORKER: 1230 case OMP_CLAUSE_VECTOR: 1231 case OMP_CLAUSE_AUTO: 1232 case OMP_CLAUSE_SEQ: 1233 case OMP_CLAUSE_INDEPENDENT: 1234 case OMP_CLAUSE_PRIVATE: 1235 OMP_CLAUSE_CHAIN (clauses) = loop_clauses; 1236 loop_clauses = clauses; 1237 break; 1238 1239 /* Reductions must be duplicated on both constructs. */ 1240 case OMP_CLAUSE_REDUCTION: 1241 if (is_parallel) 1242 { 1243 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1244 OMP_CLAUSE_REDUCTION); 1245 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses); 1246 OMP_CLAUSE_REDUCTION_CODE (nc) 1247 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1248 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses; 1249 *not_loop_clauses = nc; 1250 } 1251 1252 OMP_CLAUSE_CHAIN (clauses) = loop_clauses; 1253 loop_clauses = clauses; 1254 break; 1255 1256 /* Parallel/kernels clauses. */ 1257 default: 1258 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses; 1259 *not_loop_clauses = clauses; 1260 break; 1261 } 1262 } 1263 1264 return loop_clauses; 1265 } 1266 1267 /* This function attempts to split or duplicate clauses for OpenMP 1268 combined/composite constructs. Right now there are 30 different 1269 constructs. CODE is the innermost construct in the combined construct, 1270 and MASK allows to determine which constructs are combined together, 1271 as every construct has at least one clause that no other construct 1272 has (except for OMP_SECTIONS, but that can be only combined with parallel, 1273 and OMP_MASTER, which doesn't have any clauses at all). 1274 OpenMP combined/composite constructs are: 1275 #pragma omp distribute parallel for 1276 #pragma omp distribute parallel for simd 1277 #pragma omp distribute simd 1278 #pragma omp for simd 1279 #pragma omp master taskloop 1280 #pragma omp master taskloop simd 1281 #pragma omp parallel for 1282 #pragma omp parallel for simd 1283 #pragma omp parallel loop 1284 #pragma omp parallel master 1285 #pragma omp parallel master taskloop 1286 #pragma omp parallel master taskloop simd 1287 #pragma omp parallel sections 1288 #pragma omp target parallel 1289 #pragma omp target parallel for 1290 #pragma omp target parallel for simd 1291 #pragma omp target parallel loop 1292 #pragma omp target teams 1293 #pragma omp target teams distribute 1294 #pragma omp target teams distribute parallel for 1295 #pragma omp target teams distribute parallel for simd 1296 #pragma omp target teams distribute simd 1297 #pragma omp target teams loop 1298 #pragma omp target simd 1299 #pragma omp taskloop simd 1300 #pragma omp teams distribute 1301 #pragma omp teams distribute parallel for 1302 #pragma omp teams distribute parallel for simd 1303 #pragma omp teams distribute simd 1304 #pragma omp teams loop */ 1305 1306 void 1307 c_omp_split_clauses (location_t loc, enum tree_code code, 1308 omp_clause_mask mask, tree clauses, tree *cclauses) 1309 { 1310 tree next, c; 1311 enum c_omp_clause_split s; 1312 int i; 1313 1314 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) 1315 cclauses[i] = NULL; 1316 /* Add implicit nowait clause on 1317 #pragma omp parallel {for,for simd,sections}. */ 1318 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1319 switch (code) 1320 { 1321 case OMP_FOR: 1322 case OMP_SIMD: 1323 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1324 cclauses[C_OMP_CLAUSE_SPLIT_FOR] 1325 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); 1326 break; 1327 case OMP_SECTIONS: 1328 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] 1329 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); 1330 break; 1331 default: 1332 break; 1333 } 1334 1335 for (; clauses ; clauses = next) 1336 { 1337 next = OMP_CLAUSE_CHAIN (clauses); 1338 1339 switch (OMP_CLAUSE_CODE (clauses)) 1340 { 1341 /* First the clauses that are unique to some constructs. */ 1342 case OMP_CLAUSE_DEVICE: 1343 case OMP_CLAUSE_MAP: 1344 case OMP_CLAUSE_IS_DEVICE_PTR: 1345 case OMP_CLAUSE_DEFAULTMAP: 1346 case OMP_CLAUSE_DEPEND: 1347 s = C_OMP_CLAUSE_SPLIT_TARGET; 1348 break; 1349 case OMP_CLAUSE_NUM_TEAMS: 1350 case OMP_CLAUSE_THREAD_LIMIT: 1351 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1352 break; 1353 case OMP_CLAUSE_DIST_SCHEDULE: 1354 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1355 break; 1356 case OMP_CLAUSE_COPYIN: 1357 case OMP_CLAUSE_NUM_THREADS: 1358 case OMP_CLAUSE_PROC_BIND: 1359 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1360 break; 1361 case OMP_CLAUSE_ORDERED: 1362 s = C_OMP_CLAUSE_SPLIT_FOR; 1363 break; 1364 case OMP_CLAUSE_SCHEDULE: 1365 s = C_OMP_CLAUSE_SPLIT_FOR; 1366 if (code != OMP_SIMD) 1367 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; 1368 break; 1369 case OMP_CLAUSE_SAFELEN: 1370 case OMP_CLAUSE_SIMDLEN: 1371 case OMP_CLAUSE_ALIGNED: 1372 case OMP_CLAUSE_NONTEMPORAL: 1373 s = C_OMP_CLAUSE_SPLIT_SIMD; 1374 break; 1375 case OMP_CLAUSE_GRAINSIZE: 1376 case OMP_CLAUSE_NUM_TASKS: 1377 case OMP_CLAUSE_FINAL: 1378 case OMP_CLAUSE_UNTIED: 1379 case OMP_CLAUSE_MERGEABLE: 1380 case OMP_CLAUSE_NOGROUP: 1381 case OMP_CLAUSE_PRIORITY: 1382 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1383 break; 1384 case OMP_CLAUSE_BIND: 1385 s = C_OMP_CLAUSE_SPLIT_LOOP; 1386 break; 1387 /* Duplicate this to all of taskloop, distribute, for, simd and 1388 loop. */ 1389 case OMP_CLAUSE_COLLAPSE: 1390 if (code == OMP_SIMD) 1391 { 1392 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) 1393 | (OMP_CLAUSE_MASK_1 1394 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) 1395 | (OMP_CLAUSE_MASK_1 1396 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) 1397 { 1398 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1399 OMP_CLAUSE_COLLAPSE); 1400 OMP_CLAUSE_COLLAPSE_EXPR (c) 1401 = OMP_CLAUSE_COLLAPSE_EXPR (clauses); 1402 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1403 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1404 } 1405 else 1406 { 1407 /* This must be #pragma omp target simd */ 1408 s = C_OMP_CLAUSE_SPLIT_SIMD; 1409 break; 1410 } 1411 } 1412 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1413 { 1414 if ((mask & (OMP_CLAUSE_MASK_1 1415 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1416 { 1417 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1418 OMP_CLAUSE_COLLAPSE); 1419 OMP_CLAUSE_COLLAPSE_EXPR (c) 1420 = OMP_CLAUSE_COLLAPSE_EXPR (clauses); 1421 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; 1422 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; 1423 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1424 } 1425 else 1426 s = C_OMP_CLAUSE_SPLIT_FOR; 1427 } 1428 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1429 != 0) 1430 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1431 else if (code == OMP_LOOP) 1432 s = C_OMP_CLAUSE_SPLIT_LOOP; 1433 else 1434 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1435 break; 1436 /* Private clause is supported on all constructs but master, 1437 it is enough to put it on the innermost one other than master. For 1438 #pragma omp {for,sections} put it on parallel though, 1439 as that's what we did for OpenMP 3.1. */ 1440 case OMP_CLAUSE_PRIVATE: 1441 switch (code) 1442 { 1443 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; 1444 case OMP_FOR: case OMP_SECTIONS: 1445 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; 1446 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; 1447 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; 1448 case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; 1449 case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; 1450 case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break; 1451 default: gcc_unreachable (); 1452 } 1453 break; 1454 /* Firstprivate clause is supported on all constructs but 1455 simd, master and loop. Put it on the outermost of those and 1456 duplicate on teams and parallel. */ 1457 case OMP_CLAUSE_FIRSTPRIVATE: 1458 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) 1459 != 0) 1460 { 1461 if (code == OMP_SIMD 1462 && (mask & ((OMP_CLAUSE_MASK_1 1463 << PRAGMA_OMP_CLAUSE_NUM_THREADS) 1464 | (OMP_CLAUSE_MASK_1 1465 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) 1466 { 1467 /* This must be #pragma omp target simd. */ 1468 s = C_OMP_CLAUSE_SPLIT_TARGET; 1469 break; 1470 } 1471 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1472 OMP_CLAUSE_FIRSTPRIVATE); 1473 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1474 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; 1475 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; 1476 } 1477 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1478 != 0) 1479 { 1480 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) 1481 | (OMP_CLAUSE_MASK_1 1482 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) 1483 { 1484 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1485 OMP_CLAUSE_FIRSTPRIVATE); 1486 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1487 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; 1488 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; 1489 if ((mask & (OMP_CLAUSE_MASK_1 1490 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) 1491 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1492 else 1493 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1494 } 1495 else if ((mask & (OMP_CLAUSE_MASK_1 1496 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1497 /* This must be 1498 #pragma omp parallel master taskloop{, simd}. */ 1499 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1500 else 1501 /* This must be 1502 #pragma omp parallel{, for{, simd}, sections,loop} 1503 or 1504 #pragma omp target parallel. */ 1505 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1506 } 1507 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1508 != 0) 1509 { 1510 /* This must be one of 1511 #pragma omp {,target }teams {distribute,loop} 1512 #pragma omp target teams 1513 #pragma omp {,target }teams distribute simd. */ 1514 gcc_assert (code == OMP_DISTRIBUTE 1515 || code == OMP_LOOP 1516 || code == OMP_TEAMS 1517 || code == OMP_SIMD); 1518 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1519 } 1520 else if ((mask & (OMP_CLAUSE_MASK_1 1521 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1522 { 1523 /* This must be #pragma omp distribute simd. */ 1524 gcc_assert (code == OMP_SIMD); 1525 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1526 } 1527 else if ((mask & (OMP_CLAUSE_MASK_1 1528 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1529 { 1530 /* This must be #pragma omp {,{,parallel }master }taskloop simd 1531 or 1532 #pragma omp {,parallel }master taskloop. */ 1533 gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP); 1534 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1535 } 1536 else 1537 { 1538 /* This must be #pragma omp for simd. */ 1539 gcc_assert (code == OMP_SIMD); 1540 s = C_OMP_CLAUSE_SPLIT_FOR; 1541 } 1542 break; 1543 /* Lastprivate is allowed on distribute, for, sections, taskloop, loop 1544 and simd. In parallel {for{, simd},sections} we actually want to 1545 put it on parallel rather than for or sections. */ 1546 case OMP_CLAUSE_LASTPRIVATE: 1547 if (code == OMP_DISTRIBUTE) 1548 { 1549 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 1550 break; 1551 } 1552 if ((mask & (OMP_CLAUSE_MASK_1 1553 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 1554 { 1555 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1556 OMP_CLAUSE_LASTPRIVATE); 1557 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1558 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; 1559 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) 1560 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); 1561 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; 1562 } 1563 if (code == OMP_FOR || code == OMP_SECTIONS) 1564 { 1565 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1566 != 0) 1567 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1568 else 1569 s = C_OMP_CLAUSE_SPLIT_FOR; 1570 break; 1571 } 1572 if (code == OMP_TASKLOOP) 1573 { 1574 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1575 break; 1576 } 1577 if (code == OMP_LOOP) 1578 { 1579 s = C_OMP_CLAUSE_SPLIT_LOOP; 1580 break; 1581 } 1582 gcc_assert (code == OMP_SIMD); 1583 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1584 { 1585 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1586 OMP_CLAUSE_LASTPRIVATE); 1587 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1588 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) 1589 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); 1590 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1591 != 0) 1592 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1593 else 1594 s = C_OMP_CLAUSE_SPLIT_FOR; 1595 OMP_CLAUSE_CHAIN (c) = cclauses[s]; 1596 cclauses[s] = c; 1597 } 1598 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1599 { 1600 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1601 OMP_CLAUSE_LASTPRIVATE); 1602 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1603 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) 1604 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); 1605 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; 1606 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; 1607 } 1608 s = C_OMP_CLAUSE_SPLIT_SIMD; 1609 break; 1610 /* Shared and default clauses are allowed on parallel, teams and 1611 taskloop. */ 1612 case OMP_CLAUSE_SHARED: 1613 case OMP_CLAUSE_DEFAULT: 1614 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1615 != 0) 1616 { 1617 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1618 != 0) 1619 { 1620 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1621 OMP_CLAUSE_CODE (clauses)); 1622 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) 1623 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1624 else 1625 OMP_CLAUSE_DEFAULT_KIND (c) 1626 = OMP_CLAUSE_DEFAULT_KIND (clauses); 1627 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; 1628 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; 1629 } 1630 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1631 break; 1632 } 1633 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1634 != 0) 1635 { 1636 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 1637 == 0) 1638 { 1639 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1640 break; 1641 } 1642 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1643 OMP_CLAUSE_CODE (clauses)); 1644 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) 1645 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1646 else 1647 OMP_CLAUSE_DEFAULT_KIND (c) 1648 = OMP_CLAUSE_DEFAULT_KIND (clauses); 1649 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; 1650 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; 1651 } 1652 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1653 break; 1654 /* order clauses are allowed on for, simd and loop. */ 1655 case OMP_CLAUSE_ORDER: 1656 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1657 { 1658 if (code == OMP_SIMD) 1659 { 1660 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1661 OMP_CLAUSE_ORDER); 1662 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; 1663 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; 1664 s = C_OMP_CLAUSE_SPLIT_SIMD; 1665 } 1666 else 1667 s = C_OMP_CLAUSE_SPLIT_FOR; 1668 } 1669 else if (code == OMP_LOOP) 1670 s = C_OMP_CLAUSE_SPLIT_LOOP; 1671 else 1672 s = C_OMP_CLAUSE_SPLIT_SIMD; 1673 break; 1674 /* Reduction is allowed on simd, for, parallel, sections, taskloop, 1675 teams and loop. Duplicate it on all of them, but omit on for or 1676 sections if parallel is present (unless inscan, in that case 1677 omit on parallel). If taskloop or loop is combined with 1678 parallel, omit it on parallel. */ 1679 case OMP_CLAUSE_REDUCTION: 1680 if (OMP_CLAUSE_REDUCTION_TASK (clauses)) 1681 { 1682 if (code == OMP_SIMD || code == OMP_LOOP) 1683 { 1684 error_at (OMP_CLAUSE_LOCATION (clauses), 1685 "invalid %<task%> reduction modifier on construct " 1686 "combined with %<simd%> or %<loop%>"); 1687 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0; 1688 } 1689 else if (code != OMP_SECTIONS 1690 && (mask & (OMP_CLAUSE_MASK_1 1691 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0 1692 && (mask & (OMP_CLAUSE_MASK_1 1693 << PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0) 1694 { 1695 error_at (OMP_CLAUSE_LOCATION (clauses), 1696 "invalid %<task%> reduction modifier on construct " 1697 "not combined with %<parallel%>, %<for%> or " 1698 "%<sections%>"); 1699 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0; 1700 } 1701 } 1702 if (OMP_CLAUSE_REDUCTION_INSCAN (clauses) 1703 && ((mask & ((OMP_CLAUSE_MASK_1 1704 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) 1705 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))) 1706 != 0)) 1707 { 1708 error_at (OMP_CLAUSE_LOCATION (clauses), 1709 "%<inscan%> %<reduction%> clause on construct other " 1710 "than %<for%>, %<simd%>, %<for simd%>, " 1711 "%<parallel for%>, %<parallel for simd%>"); 1712 OMP_CLAUSE_REDUCTION_INSCAN (clauses) = 0; 1713 } 1714 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 1715 { 1716 if (code == OMP_SIMD) 1717 { 1718 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1719 OMP_CLAUSE_REDUCTION); 1720 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1721 OMP_CLAUSE_REDUCTION_CODE (c) 1722 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1723 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1724 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1725 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1726 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1727 OMP_CLAUSE_REDUCTION_INSCAN (c) 1728 = OMP_CLAUSE_REDUCTION_INSCAN (clauses); 1729 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1730 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1731 } 1732 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 1733 != 0) 1734 { 1735 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1736 OMP_CLAUSE_REDUCTION); 1737 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1738 OMP_CLAUSE_REDUCTION_CODE (c) 1739 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1740 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1741 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1742 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1743 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1744 OMP_CLAUSE_REDUCTION_INSCAN (c) 1745 = OMP_CLAUSE_REDUCTION_INSCAN (clauses); 1746 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; 1747 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; 1748 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1749 } 1750 else if ((mask & (OMP_CLAUSE_MASK_1 1751 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0 1752 && !OMP_CLAUSE_REDUCTION_INSCAN (clauses)) 1753 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1754 else 1755 s = C_OMP_CLAUSE_SPLIT_FOR; 1756 } 1757 else if (code == OMP_SECTIONS 1758 || code == OMP_PARALLEL 1759 || code == OMP_MASTER) 1760 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1761 else if (code == OMP_TASKLOOP) 1762 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1763 else if (code == OMP_LOOP) 1764 s = C_OMP_CLAUSE_SPLIT_LOOP; 1765 else if (code == OMP_SIMD) 1766 { 1767 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1768 != 0) 1769 { 1770 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1771 OMP_CLAUSE_REDUCTION); 1772 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1773 OMP_CLAUSE_REDUCTION_CODE (c) 1774 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1775 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1776 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1777 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1778 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1779 OMP_CLAUSE_REDUCTION_INSCAN (c) 1780 = OMP_CLAUSE_REDUCTION_INSCAN (clauses); 1781 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; 1782 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; 1783 } 1784 s = C_OMP_CLAUSE_SPLIT_SIMD; 1785 } 1786 else 1787 s = C_OMP_CLAUSE_SPLIT_TEAMS; 1788 break; 1789 case OMP_CLAUSE_IN_REDUCTION: 1790 /* in_reduction on taskloop simd becomes reduction on the simd 1791 and keeps being in_reduction on taskloop. */ 1792 if (code == OMP_SIMD) 1793 { 1794 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1795 OMP_CLAUSE_REDUCTION); 1796 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 1797 OMP_CLAUSE_REDUCTION_CODE (c) 1798 = OMP_CLAUSE_REDUCTION_CODE (clauses); 1799 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 1800 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 1801 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) 1802 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); 1803 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1804 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1805 } 1806 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1807 break; 1808 case OMP_CLAUSE_IF: 1809 if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK) 1810 { 1811 s = C_OMP_CLAUSE_SPLIT_COUNT; 1812 switch (OMP_CLAUSE_IF_MODIFIER (clauses)) 1813 { 1814 case OMP_PARALLEL: 1815 if ((mask & (OMP_CLAUSE_MASK_1 1816 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1817 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1818 break; 1819 case OMP_SIMD: 1820 if (code == OMP_SIMD) 1821 s = C_OMP_CLAUSE_SPLIT_SIMD; 1822 break; 1823 case OMP_TASKLOOP: 1824 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1825 != 0) 1826 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1827 break; 1828 case OMP_TARGET: 1829 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) 1830 != 0) 1831 s = C_OMP_CLAUSE_SPLIT_TARGET; 1832 break; 1833 default: 1834 break; 1835 } 1836 if (s != C_OMP_CLAUSE_SPLIT_COUNT) 1837 break; 1838 /* Error-recovery here, invalid if-modifier specified, add the 1839 clause to just one construct. */ 1840 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) 1841 s = C_OMP_CLAUSE_SPLIT_TARGET; 1842 else if ((mask & (OMP_CLAUSE_MASK_1 1843 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1844 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1845 else if ((mask & (OMP_CLAUSE_MASK_1 1846 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) 1847 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1848 else if (code == OMP_SIMD) 1849 s = C_OMP_CLAUSE_SPLIT_SIMD; 1850 else 1851 gcc_unreachable (); 1852 break; 1853 } 1854 /* Otherwise, duplicate if clause to all constructs. */ 1855 if (code == OMP_SIMD) 1856 { 1857 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) 1858 | (OMP_CLAUSE_MASK_1 1859 << PRAGMA_OMP_CLAUSE_NUM_THREADS) 1860 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) 1861 != 0) 1862 { 1863 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1864 OMP_CLAUSE_IF); 1865 OMP_CLAUSE_IF_MODIFIER (c) 1866 = OMP_CLAUSE_IF_MODIFIER (clauses); 1867 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); 1868 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 1869 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 1870 } 1871 else 1872 { 1873 s = C_OMP_CLAUSE_SPLIT_SIMD; 1874 break; 1875 } 1876 } 1877 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) 1878 != 0) 1879 { 1880 if ((mask & (OMP_CLAUSE_MASK_1 1881 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1882 { 1883 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1884 OMP_CLAUSE_IF); 1885 OMP_CLAUSE_IF_MODIFIER (c) 1886 = OMP_CLAUSE_IF_MODIFIER (clauses); 1887 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); 1888 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; 1889 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; 1890 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1891 } 1892 else 1893 s = C_OMP_CLAUSE_SPLIT_TASKLOOP; 1894 } 1895 else if ((mask & (OMP_CLAUSE_MASK_1 1896 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 1897 { 1898 if ((mask & (OMP_CLAUSE_MASK_1 1899 << PRAGMA_OMP_CLAUSE_MAP)) != 0) 1900 { 1901 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 1902 OMP_CLAUSE_IF); 1903 OMP_CLAUSE_IF_MODIFIER (c) 1904 = OMP_CLAUSE_IF_MODIFIER (clauses); 1905 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); 1906 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; 1907 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; 1908 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1909 } 1910 else 1911 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 1912 } 1913 else 1914 s = C_OMP_CLAUSE_SPLIT_TARGET; 1915 break; 1916 case OMP_CLAUSE_LINEAR: 1917 /* Linear clause is allowed on simd and for. Put it on the 1918 innermost construct. */ 1919 if (code == OMP_SIMD) 1920 s = C_OMP_CLAUSE_SPLIT_SIMD; 1921 else 1922 s = C_OMP_CLAUSE_SPLIT_FOR; 1923 break; 1924 case OMP_CLAUSE_NOWAIT: 1925 /* Nowait clause is allowed on target, for and sections, but 1926 is not allowed on parallel for or parallel sections. Therefore, 1927 put it on target construct if present, because that can only 1928 be combined with parallel for{, simd} and not with for{, simd}, 1929 otherwise to the worksharing construct. */ 1930 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) 1931 != 0) 1932 s = C_OMP_CLAUSE_SPLIT_TARGET; 1933 else 1934 s = C_OMP_CLAUSE_SPLIT_FOR; 1935 break; 1936 default: 1937 gcc_unreachable (); 1938 } 1939 OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; 1940 cclauses[s] = clauses; 1941 } 1942 1943 if (!flag_checking) 1944 return; 1945 1946 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) 1947 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); 1948 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) 1949 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); 1950 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) 1951 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); 1952 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) 1953 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); 1954 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) 1955 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 1956 && code != OMP_SECTIONS 1957 && code != OMP_LOOP) 1958 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); 1959 if (code != OMP_SIMD) 1960 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); 1961 } 1962 1963 1964 /* qsort callback to compare #pragma omp declare simd clauses. */ 1965 1966 static int 1967 c_omp_declare_simd_clause_cmp (const void *p, const void *q) 1968 { 1969 tree a = *(const tree *) p; 1970 tree b = *(const tree *) q; 1971 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b)) 1972 { 1973 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b)) 1974 return -1; 1975 return 1; 1976 } 1977 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN 1978 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH 1979 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) 1980 { 1981 int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); 1982 int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); 1983 if (c < d) 1984 return 1; 1985 if (c > d) 1986 return -1; 1987 } 1988 return 0; 1989 } 1990 1991 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd 1992 CLAUSES on FNDECL into argument indexes and sort them. */ 1993 1994 tree 1995 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) 1996 { 1997 tree c; 1998 vec<tree> clvec = vNULL; 1999 2000 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) 2001 { 2002 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN 2003 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH 2004 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) 2005 { 2006 tree decl = OMP_CLAUSE_DECL (c); 2007 tree arg; 2008 int idx; 2009 for (arg = parms, idx = 0; arg; 2010 arg = TREE_CHAIN (arg), idx++) 2011 if (arg == decl) 2012 break; 2013 if (arg == NULL_TREE) 2014 { 2015 error_at (OMP_CLAUSE_LOCATION (c), 2016 "%qD is not a function argument", decl); 2017 continue; 2018 } 2019 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); 2020 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR 2021 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) 2022 { 2023 decl = OMP_CLAUSE_LINEAR_STEP (c); 2024 for (arg = parms, idx = 0; arg; 2025 arg = TREE_CHAIN (arg), idx++) 2026 if (arg == decl) 2027 break; 2028 if (arg == NULL_TREE) 2029 { 2030 error_at (OMP_CLAUSE_LOCATION (c), 2031 "%qD is not a function argument", decl); 2032 continue; 2033 } 2034 OMP_CLAUSE_LINEAR_STEP (c) 2035 = build_int_cst (integer_type_node, idx); 2036 } 2037 } 2038 clvec.safe_push (c); 2039 } 2040 if (!clvec.is_empty ()) 2041 { 2042 unsigned int len = clvec.length (), i; 2043 clvec.qsort (c_omp_declare_simd_clause_cmp); 2044 clauses = clvec[0]; 2045 for (i = 0; i < len; i++) 2046 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; 2047 } 2048 else 2049 clauses = NULL_TREE; 2050 clvec.release (); 2051 return clauses; 2052 } 2053 2054 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ 2055 2056 void 2057 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) 2058 { 2059 tree c; 2060 2061 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) 2062 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN 2063 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH 2064 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) 2065 { 2066 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; 2067 tree arg; 2068 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; 2069 arg = TREE_CHAIN (arg), i++) 2070 if (i == idx) 2071 break; 2072 gcc_assert (arg); 2073 OMP_CLAUSE_DECL (c) = arg; 2074 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR 2075 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) 2076 { 2077 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c)); 2078 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; 2079 arg = TREE_CHAIN (arg), i++) 2080 if (i == idx) 2081 break; 2082 gcc_assert (arg); 2083 OMP_CLAUSE_LINEAR_STEP (c) = arg; 2084 } 2085 } 2086 } 2087 2088 /* Return true for __func__ and similar function-local predefined 2089 variables (which are in OpenMP predetermined shared, allowed in 2090 shared/firstprivate clauses). */ 2091 2092 bool 2093 c_omp_predefined_variable (tree decl) 2094 { 2095 if (VAR_P (decl) 2096 && DECL_ARTIFICIAL (decl) 2097 && TREE_STATIC (decl) 2098 && DECL_NAME (decl)) 2099 { 2100 if (TREE_READONLY (decl) 2101 && (DECL_NAME (decl) == ridpointers[RID_C99_FUNCTION_NAME] 2102 || DECL_NAME (decl) == ridpointers[RID_FUNCTION_NAME] 2103 || DECL_NAME (decl) == ridpointers[RID_PRETTY_FUNCTION_NAME])) 2104 return true; 2105 /* For UBSan handle the same also ubsan_create_data created 2106 variables. There is no magic flag for those, but user variables 2107 shouldn't be DECL_ARTIFICIAL or have TYPE_ARTIFICIAL type with 2108 such names. */ 2109 if ((flag_sanitize & (SANITIZE_UNDEFINED 2110 | SANITIZE_UNDEFINED_NONDEFAULT)) != 0 2111 && DECL_IGNORED_P (decl) 2112 && !TREE_READONLY (decl) 2113 && TREE_CODE (DECL_NAME (decl)) == IDENTIFIER_NODE 2114 && TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE 2115 && TYPE_ARTIFICIAL (TREE_TYPE (decl)) 2116 && TYPE_NAME (TREE_TYPE (decl)) 2117 && TREE_CODE (TYPE_NAME (TREE_TYPE (decl))) == TYPE_DECL 2118 && DECL_NAME (TYPE_NAME (TREE_TYPE (decl))) 2119 && (TREE_CODE (DECL_NAME (TYPE_NAME (TREE_TYPE (decl)))) 2120 == IDENTIFIER_NODE)) 2121 { 2122 tree id1 = DECL_NAME (decl); 2123 tree id2 = DECL_NAME (TYPE_NAME (TREE_TYPE (decl))); 2124 if (IDENTIFIER_LENGTH (id1) >= sizeof ("ubsan_data") - 1 2125 && IDENTIFIER_LENGTH (id2) >= sizeof ("__ubsan__data") 2126 && !memcmp (IDENTIFIER_POINTER (id2), "__ubsan_", 2127 sizeof ("__ubsan_") - 1) 2128 && !memcmp (IDENTIFIER_POINTER (id2) + IDENTIFIER_LENGTH (id2) 2129 - sizeof ("_data") + 1, "_data", 2130 sizeof ("_data") - 1) 2131 && strstr (IDENTIFIER_POINTER (id1), "ubsan_data")) 2132 return true; 2133 } 2134 } 2135 return false; 2136 } 2137 2138 /* True if OpenMP sharing attribute of DECL is predetermined. */ 2139 2140 enum omp_clause_default_kind 2141 c_omp_predetermined_sharing (tree decl) 2142 { 2143 /* Predetermine artificial variables holding integral values, those 2144 are usually result of gimplify_one_sizepos or SAVE_EXPR 2145 gimplification. */ 2146 if (VAR_P (decl) 2147 && DECL_ARTIFICIAL (decl) 2148 && INTEGRAL_TYPE_P (TREE_TYPE (decl))) 2149 return OMP_CLAUSE_DEFAULT_SHARED; 2150 2151 if (c_omp_predefined_variable (decl)) 2152 return OMP_CLAUSE_DEFAULT_SHARED; 2153 2154 return OMP_CLAUSE_DEFAULT_UNSPECIFIED; 2155 } 2156 2157 /* Diagnose errors in an OpenMP context selector, return CTX if 2158 it is correct or error_mark_node otherwise. */ 2159 2160 tree 2161 c_omp_check_context_selector (location_t loc, tree ctx) 2162 { 2163 /* Each trait-set-selector-name can only be specified once. 2164 There are just 4 set names. */ 2165 for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1)) 2166 for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2)) 2167 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2)) 2168 { 2169 error_at (loc, "selector set %qs specified more than once", 2170 IDENTIFIER_POINTER (TREE_PURPOSE (t1))); 2171 return error_mark_node; 2172 } 2173 for (tree t = ctx; t; t = TREE_CHAIN (t)) 2174 { 2175 /* Each trait-selector-name can only be specified once. */ 2176 if (list_length (TREE_VALUE (t)) < 5) 2177 { 2178 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) 2179 for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2)) 2180 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2)) 2181 { 2182 error_at (loc, 2183 "selector %qs specified more than once in set %qs", 2184 IDENTIFIER_POINTER (TREE_PURPOSE (t1)), 2185 IDENTIFIER_POINTER (TREE_PURPOSE (t))); 2186 return error_mark_node; 2187 } 2188 } 2189 else 2190 { 2191 hash_set<tree> pset; 2192 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) 2193 if (pset.add (TREE_PURPOSE (t1))) 2194 { 2195 error_at (loc, 2196 "selector %qs specified more than once in set %qs", 2197 IDENTIFIER_POINTER (TREE_PURPOSE (t1)), 2198 IDENTIFIER_POINTER (TREE_PURPOSE (t))); 2199 return error_mark_node; 2200 } 2201 } 2202 2203 static const char *const kind[] = { 2204 "host", "nohost", "cpu", "gpu", "fpga", "any", NULL }; 2205 static const char *const vendor[] = { 2206 "amd", "arm", "bsc", "cray", "fujitsu", "gnu", "ibm", "intel", 2207 "llvm", "nvidia", "pgi", "ti", "unknown", NULL }; 2208 static const char *const extension[] = { NULL }; 2209 static const char *const atomic_default_mem_order[] = { 2210 "seq_cst", "relaxed", "acq_rel", NULL }; 2211 struct known_properties { const char *set; const char *selector; 2212 const char *const *props; }; 2213 known_properties props[] = { 2214 { "device", "kind", kind }, 2215 { "implementation", "vendor", vendor }, 2216 { "implementation", "extension", extension }, 2217 { "implementation", "atomic_default_mem_order", 2218 atomic_default_mem_order } }; 2219 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) 2220 for (unsigned i = 0; i < ARRAY_SIZE (props); i++) 2221 if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)), 2222 props[i].selector) 2223 && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t)), 2224 props[i].set)) 2225 for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2)) 2226 for (unsigned j = 0; ; j++) 2227 { 2228 if (props[i].props[j] == NULL) 2229 { 2230 if (TREE_PURPOSE (t2) 2231 && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), 2232 " score")) 2233 break; 2234 if (props[i].props == atomic_default_mem_order) 2235 { 2236 error_at (loc, 2237 "incorrect property %qs of %qs selector", 2238 IDENTIFIER_POINTER (TREE_PURPOSE (t2)), 2239 "atomic_default_mem_order"); 2240 return error_mark_node; 2241 } 2242 else if (TREE_PURPOSE (t2)) 2243 warning_at (loc, 0, 2244 "unknown property %qs of %qs selector", 2245 IDENTIFIER_POINTER (TREE_PURPOSE (t2)), 2246 props[i].selector); 2247 else 2248 warning_at (loc, 0, 2249 "unknown property %qE of %qs selector", 2250 TREE_VALUE (t2), props[i].selector); 2251 break; 2252 } 2253 else if (TREE_PURPOSE (t2) == NULL_TREE) 2254 { 2255 const char *str = TREE_STRING_POINTER (TREE_VALUE (t2)); 2256 if (!strcmp (str, props[i].props[j]) 2257 && ((size_t) TREE_STRING_LENGTH (TREE_VALUE (t2)) 2258 == strlen (str) + 1)) 2259 break; 2260 } 2261 else if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), 2262 props[i].props[j])) 2263 break; 2264 } 2265 } 2266 return ctx; 2267 } 2268 2269 /* Register VARIANT as variant of some base function marked with 2270 #pragma omp declare variant. CONSTRUCT is corresponding construct 2271 selector set. */ 2272 2273 void 2274 c_omp_mark_declare_variant (location_t loc, tree variant, tree construct) 2275 { 2276 tree attr = lookup_attribute ("omp declare variant variant", 2277 DECL_ATTRIBUTES (variant)); 2278 if (attr == NULL_TREE) 2279 { 2280 attr = tree_cons (get_identifier ("omp declare variant variant"), 2281 unshare_expr (construct), 2282 DECL_ATTRIBUTES (variant)); 2283 DECL_ATTRIBUTES (variant) = attr; 2284 return; 2285 } 2286 if ((TREE_VALUE (attr) != NULL_TREE) != (construct != NULL_TREE) 2287 || (construct != NULL_TREE 2288 && omp_context_selector_set_compare ("construct", TREE_VALUE (attr), 2289 construct))) 2290 error_at (loc, "%qD used as a variant with incompatible %<construct%> " 2291 "selector sets", variant); 2292 } 2293 2294 /* For OpenACC, the OMP_CLAUSE_MAP_KIND of an OMP_CLAUSE_MAP is used internally 2295 to distinguish clauses as seen by the user. Return the "friendly" clause 2296 name for error messages etc., where possible. See also 2297 c/c-parser.c:c_parser_oacc_data_clause and 2298 cp/parser.c:cp_parser_oacc_data_clause. */ 2299 2300 const char * 2301 c_omp_map_clause_name (tree clause, bool oacc) 2302 { 2303 if (oacc && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP) 2304 switch (OMP_CLAUSE_MAP_KIND (clause)) 2305 { 2306 case GOMP_MAP_FORCE_ALLOC: 2307 case GOMP_MAP_ALLOC: return "create"; 2308 case GOMP_MAP_FORCE_TO: 2309 case GOMP_MAP_TO: return "copyin"; 2310 case GOMP_MAP_FORCE_FROM: 2311 case GOMP_MAP_FROM: return "copyout"; 2312 case GOMP_MAP_FORCE_TOFROM: 2313 case GOMP_MAP_TOFROM: return "copy"; 2314 case GOMP_MAP_RELEASE: return "delete"; 2315 case GOMP_MAP_FORCE_PRESENT: return "present"; 2316 case GOMP_MAP_ATTACH: return "attach"; 2317 case GOMP_MAP_FORCE_DETACH: 2318 case GOMP_MAP_DETACH: return "detach"; 2319 case GOMP_MAP_DEVICE_RESIDENT: return "device_resident"; 2320 case GOMP_MAP_LINK: return "link"; 2321 case GOMP_MAP_FORCE_DEVICEPTR: return "deviceptr"; 2322 default: break; 2323 } 2324 return omp_clause_code_name[OMP_CLAUSE_CODE (clause)]; 2325 } 2326