xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/c-family/c-omp.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2    called from parsing in the C and C++ front ends.
3 
4    Copyright (C) 2005-2015 Free Software Foundation, Inc.
5    Contributed by Richard Henderson <rth@redhat.com>,
6 		  Diego Novillo <dnovillo@redhat.com>.
7 
8 This file is part of GCC.
9 
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14 
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3.  If not see
22 <http://www.gnu.org/licenses/>.  */
23 
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "hash-set.h"
29 #include "machmode.h"
30 #include "vec.h"
31 #include "double-int.h"
32 #include "input.h"
33 #include "alias.h"
34 #include "symtab.h"
35 #include "wide-int.h"
36 #include "inchash.h"
37 #include "tree.h"
38 #include "c-common.h"
39 #include "c-pragma.h"
40 #include "gimple-expr.h"
41 #include "langhooks.h"
42 #include "omp-low.h"
43 #include "gomp-constants.h"
44 
45 
46 /* Complete a #pragma oacc wait construct.  LOC is the location of
47    the #pragma.  */
48 
49 tree
50 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
51 {
52   const int nparms = list_length (parms);
53   tree stmt, t;
54   vec<tree, va_gc> *args;
55 
56   vec_alloc (args, nparms + 2);
57   stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
58 
59   if (find_omp_clause (clauses, OMP_CLAUSE_ASYNC))
60     t = OMP_CLAUSE_ASYNC_EXPR (clauses);
61   else
62     t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
63 
64   args->quick_push (t);
65   args->quick_push (build_int_cst (integer_type_node, nparms));
66 
67   for (t = parms; t; t = TREE_CHAIN (t))
68     {
69       if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
70 	args->quick_push (build_int_cst (integer_type_node,
71 			TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
72       else
73 	args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
74     }
75 
76   stmt = build_call_expr_loc_vec (loc, stmt, args);
77   add_stmt (stmt);
78 
79   vec_free (args);
80 
81   return stmt;
82 }
83 
84 /* Complete a #pragma omp master construct.  STMT is the structured-block
85    that follows the pragma.  LOC is the l*/
86 
87 tree
88 c_finish_omp_master (location_t loc, tree stmt)
89 {
90   tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
91   SET_EXPR_LOCATION (t, loc);
92   return t;
93 }
94 
95 /* Complete a #pragma omp taskgroup construct.  STMT is the structured-block
96    that follows the pragma.  LOC is the l*/
97 
98 tree
99 c_finish_omp_taskgroup (location_t loc, tree stmt)
100 {
101   tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
102   SET_EXPR_LOCATION (t, loc);
103   return t;
104 }
105 
106 /* Complete a #pragma omp critical construct.  STMT is the structured-block
107    that follows the pragma, NAME is the identifier in the pragma, or null
108    if it was omitted.  LOC is the location of the #pragma.  */
109 
110 tree
111 c_finish_omp_critical (location_t loc, tree body, tree name)
112 {
113   tree stmt = make_node (OMP_CRITICAL);
114   TREE_TYPE (stmt) = void_type_node;
115   OMP_CRITICAL_BODY (stmt) = body;
116   OMP_CRITICAL_NAME (stmt) = name;
117   SET_EXPR_LOCATION (stmt, loc);
118   return add_stmt (stmt);
119 }
120 
121 /* Complete a #pragma omp ordered construct.  STMT is the structured-block
122    that follows the pragma.  LOC is the location of the #pragma.  */
123 
124 tree
125 c_finish_omp_ordered (location_t loc, tree stmt)
126 {
127   tree t = build1 (OMP_ORDERED, void_type_node, stmt);
128   SET_EXPR_LOCATION (t, loc);
129   return add_stmt (t);
130 }
131 
132 
133 /* Complete a #pragma omp barrier construct.  LOC is the location of
134    the #pragma.  */
135 
136 void
137 c_finish_omp_barrier (location_t loc)
138 {
139   tree x;
140 
141   x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
142   x = build_call_expr_loc (loc, x, 0);
143   add_stmt (x);
144 }
145 
146 
147 /* Complete a #pragma omp taskwait construct.  LOC is the location of the
148    pragma.  */
149 
150 void
151 c_finish_omp_taskwait (location_t loc)
152 {
153   tree x;
154 
155   x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
156   x = build_call_expr_loc (loc, x, 0);
157   add_stmt (x);
158 }
159 
160 
161 /* Complete a #pragma omp taskyield construct.  LOC is the location of the
162    pragma.  */
163 
164 void
165 c_finish_omp_taskyield (location_t loc)
166 {
167   tree x;
168 
169   x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
170   x = build_call_expr_loc (loc, x, 0);
171   add_stmt (x);
172 }
173 
174 
175 /* Complete a #pragma omp atomic construct.  For CODE OMP_ATOMIC
176    the expression to be implemented atomically is LHS opcode= RHS.
177    For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
178    opcode= RHS with the new or old content of LHS returned.
179    LOC is the location of the atomic statement.  The value returned
180    is either error_mark_node (if the construct was erroneous) or an
181    OMP_ATOMIC* node which should be added to the current statement
182    tree with add_stmt.  */
183 
184 tree
185 c_finish_omp_atomic (location_t loc, enum tree_code code,
186 		     enum tree_code opcode, tree lhs, tree rhs,
187 		     tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
188 {
189   tree x, type, addr, pre = NULL_TREE;
190 
191   if (lhs == error_mark_node || rhs == error_mark_node
192       || v == error_mark_node || lhs1 == error_mark_node
193       || rhs1 == error_mark_node)
194     return error_mark_node;
195 
196   /* ??? According to one reading of the OpenMP spec, complex type are
197      supported, but there are no atomic stores for any architecture.
198      But at least icc 9.0 doesn't support complex types here either.
199      And lets not even talk about vector types...  */
200   type = TREE_TYPE (lhs);
201   if (!INTEGRAL_TYPE_P (type)
202       && !POINTER_TYPE_P (type)
203       && !SCALAR_FLOAT_TYPE_P (type))
204     {
205       error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
206       return error_mark_node;
207     }
208 
209   if (opcode == RDIV_EXPR)
210     opcode = TRUNC_DIV_EXPR;
211 
212   /* ??? Validate that rhs does not overlap lhs.  */
213 
214   /* Take and save the address of the lhs.  From then on we'll reference it
215      via indirection.  */
216   addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
217   if (addr == error_mark_node)
218     return error_mark_node;
219   addr = save_expr (addr);
220   if (TREE_CODE (addr) != SAVE_EXPR
221       && (TREE_CODE (addr) != ADDR_EXPR
222 	  || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
223     {
224       /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
225 	 it even after unsharing function body.  */
226       tree var = create_tmp_var_raw (TREE_TYPE (addr));
227       DECL_CONTEXT (var) = current_function_decl;
228       addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
229     }
230   lhs = build_indirect_ref (loc, addr, RO_NULL);
231 
232   if (code == OMP_ATOMIC_READ)
233     {
234       x = build1 (OMP_ATOMIC_READ, type, addr);
235       SET_EXPR_LOCATION (x, loc);
236       OMP_ATOMIC_SEQ_CST (x) = seq_cst;
237       return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
238 				loc, x, NULL_TREE);
239     }
240 
241   /* There are lots of warnings, errors, and conversions that need to happen
242      in the course of interpreting a statement.  Use the normal mechanisms
243      to do this, and then take it apart again.  */
244   if (swapped)
245     {
246       rhs = build_binary_op (loc, opcode, rhs, lhs, 1);
247       opcode = NOP_EXPR;
248     }
249   bool save = in_late_binary_op;
250   in_late_binary_op = true;
251   x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
252   in_late_binary_op = save;
253   if (x == error_mark_node)
254     return error_mark_node;
255   if (TREE_CODE (x) == COMPOUND_EXPR)
256     {
257       pre = TREE_OPERAND (x, 0);
258       gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
259       x = TREE_OPERAND (x, 1);
260     }
261   gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
262   rhs = TREE_OPERAND (x, 1);
263 
264   /* Punt the actual generation of atomic operations to common code.  */
265   if (code == OMP_ATOMIC)
266     type = void_type_node;
267   x = build2 (code, type, addr, rhs);
268   SET_EXPR_LOCATION (x, loc);
269   OMP_ATOMIC_SEQ_CST (x) = seq_cst;
270 
271   /* Generally it is hard to prove lhs1 and lhs are the same memory
272      location, just diagnose different variables.  */
273   if (rhs1
274       && TREE_CODE (rhs1) == VAR_DECL
275       && TREE_CODE (lhs) == VAR_DECL
276       && rhs1 != lhs)
277     {
278       if (code == OMP_ATOMIC)
279 	error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
280       else
281 	error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
282       return error_mark_node;
283     }
284 
285   if (code != OMP_ATOMIC)
286     {
287       /* Generally it is hard to prove lhs1 and lhs are the same memory
288 	 location, just diagnose different variables.  */
289       if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL)
290 	{
291 	  if (lhs1 != lhs)
292 	    {
293 	      error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
294 	      return error_mark_node;
295 	    }
296 	}
297       x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
298 			     loc, x, NULL_TREE);
299       if (rhs1 && rhs1 != lhs)
300 	{
301 	  tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
302 	  if (rhs1addr == error_mark_node)
303 	    return error_mark_node;
304 	  x = omit_one_operand_loc (loc, type, x, rhs1addr);
305 	}
306       if (lhs1 && lhs1 != lhs)
307 	{
308 	  tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
309 	  if (lhs1addr == error_mark_node)
310 	    return error_mark_node;
311 	  if (code == OMP_ATOMIC_CAPTURE_OLD)
312 	    x = omit_one_operand_loc (loc, type, x, lhs1addr);
313 	  else
314 	    {
315 	      x = save_expr (x);
316 	      x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
317 	    }
318 	}
319     }
320   else if (rhs1 && rhs1 != lhs)
321     {
322       tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
323       if (rhs1addr == error_mark_node)
324 	return error_mark_node;
325       x = omit_one_operand_loc (loc, type, x, rhs1addr);
326     }
327 
328   if (pre)
329     x = omit_one_operand_loc (loc, type, x, pre);
330   return x;
331 }
332 
333 
334 /* Complete a #pragma omp flush construct.  We don't do anything with
335    the variable list that the syntax allows.  LOC is the location of
336    the #pragma.  */
337 
338 void
339 c_finish_omp_flush (location_t loc)
340 {
341   tree x;
342 
343   x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
344   x = build_call_expr_loc (loc, x, 0);
345   add_stmt (x);
346 }
347 
348 
349 /* Check and canonicalize OMP_FOR increment expression.
350    Helper function for c_finish_omp_for.  */
351 
352 static tree
353 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
354 {
355   tree t;
356 
357   if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
358       || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
359     return error_mark_node;
360 
361   if (exp == decl)
362     return build_int_cst (TREE_TYPE (exp), 0);
363 
364   switch (TREE_CODE (exp))
365     {
366     CASE_CONVERT:
367       t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
368       if (t != error_mark_node)
369         return fold_convert_loc (loc, TREE_TYPE (exp), t);
370       break;
371     case MINUS_EXPR:
372       t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
373       if (t != error_mark_node)
374         return fold_build2_loc (loc, MINUS_EXPR,
375 			    TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
376       break;
377     case PLUS_EXPR:
378       t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
379       if (t != error_mark_node)
380         return fold_build2_loc (loc, PLUS_EXPR,
381 			    TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
382       t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
383       if (t != error_mark_node)
384         return fold_build2_loc (loc, PLUS_EXPR,
385 			    TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
386       break;
387     case COMPOUND_EXPR:
388       {
389 	/* cp_build_modify_expr forces preevaluation of the RHS to make
390 	   sure that it is evaluated before the lvalue-rvalue conversion
391 	   is applied to the LHS.  Reconstruct the original expression.  */
392 	tree op0 = TREE_OPERAND (exp, 0);
393 	if (TREE_CODE (op0) == TARGET_EXPR
394 	    && !VOID_TYPE_P (TREE_TYPE (op0)))
395 	  {
396 	    tree op1 = TREE_OPERAND (exp, 1);
397 	    tree temp = TARGET_EXPR_SLOT (op0);
398 	    if (TREE_CODE_CLASS (TREE_CODE (op1)) == tcc_binary
399 		&& TREE_OPERAND (op1, 1) == temp)
400 	      {
401 		op1 = copy_node (op1);
402 		TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
403 		return check_omp_for_incr_expr (loc, op1, decl);
404 	      }
405 	  }
406 	break;
407       }
408     default:
409       break;
410     }
411 
412   return error_mark_node;
413 }
414 
415 /* If the OMP_FOR increment expression in INCR is of pointer type,
416    canonicalize it into an expression handled by gimplify_omp_for()
417    and return it.  DECL is the iteration variable.  */
418 
419 static tree
420 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
421 {
422   if (POINTER_TYPE_P (TREE_TYPE (decl))
423       && TREE_OPERAND (incr, 1))
424     {
425       tree t = fold_convert_loc (loc,
426 				 sizetype, TREE_OPERAND (incr, 1));
427 
428       if (TREE_CODE (incr) == POSTDECREMENT_EXPR
429 	  || TREE_CODE (incr) == PREDECREMENT_EXPR)
430 	t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
431       t = fold_build_pointer_plus (decl, t);
432       incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
433     }
434   return incr;
435 }
436 
437 /* Validate and generate OMP_FOR.
438    DECLV is a vector of iteration variables, for each collapsed loop.
439    INITV, CONDV and INCRV are vectors containing initialization
440    expressions, controlling predicates and increment expressions.
441    BODY is the body of the loop and PRE_BODY statements that go before
442    the loop.  */
443 
444 tree
445 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
446 		  tree initv, tree condv, tree incrv, tree body, tree pre_body)
447 {
448   location_t elocus;
449   bool fail = false;
450   int i;
451 
452   if ((code == CILK_SIMD || code == CILK_FOR)
453       && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
454     fail = true;
455 
456   gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
457   gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
458   gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
459   for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
460     {
461       tree decl = TREE_VEC_ELT (declv, i);
462       tree init = TREE_VEC_ELT (initv, i);
463       tree cond = TREE_VEC_ELT (condv, i);
464       tree incr = TREE_VEC_ELT (incrv, i);
465 
466       elocus = locus;
467       if (EXPR_HAS_LOCATION (init))
468 	elocus = EXPR_LOCATION (init);
469 
470       /* Validate the iteration variable.  */
471       if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
472 	  && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
473 	{
474 	  error_at (elocus, "invalid type for iteration variable %qE", decl);
475 	  fail = true;
476 	}
477 
478       /* In the case of "for (int i = 0...)", init will be a decl.  It should
479 	 have a DECL_INITIAL that we can turn into an assignment.  */
480       if (init == decl)
481 	{
482 	  elocus = DECL_SOURCE_LOCATION (decl);
483 
484 	  init = DECL_INITIAL (decl);
485 	  if (init == NULL)
486 	    {
487 	      error_at (elocus, "%qE is not initialized", decl);
488 	      init = integer_zero_node;
489 	      fail = true;
490 	    }
491 
492 	  init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
493 	      			    /* FIXME diagnostics: This should
494 				       be the location of the INIT.  */
495 	      			    elocus,
496 				    init,
497 				    NULL_TREE);
498 	}
499       if (init != error_mark_node)
500 	{
501 	  gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
502 	  gcc_assert (TREE_OPERAND (init, 0) == decl);
503 	}
504 
505       if (cond == NULL_TREE)
506 	{
507 	  error_at (elocus, "missing controlling predicate");
508 	  fail = true;
509 	}
510       else
511 	{
512 	  bool cond_ok = false;
513 
514 	  /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
515 	     evaluation of the vla VAR_DECL.  We need to readd
516 	     them to the non-decl operand.  See PR45784.  */
517 	  while (TREE_CODE (cond) == COMPOUND_EXPR)
518 	    cond = TREE_OPERAND (cond, 1);
519 
520 	  if (EXPR_HAS_LOCATION (cond))
521 	    elocus = EXPR_LOCATION (cond);
522 
523 	  if (TREE_CODE (cond) == LT_EXPR
524 	      || TREE_CODE (cond) == LE_EXPR
525 	      || TREE_CODE (cond) == GT_EXPR
526 	      || TREE_CODE (cond) == GE_EXPR
527 	      || TREE_CODE (cond) == NE_EXPR
528 	      || TREE_CODE (cond) == EQ_EXPR)
529 	    {
530 	      tree op0 = TREE_OPERAND (cond, 0);
531 	      tree op1 = TREE_OPERAND (cond, 1);
532 
533 	      /* 2.5.1.  The comparison in the condition is computed in
534 		 the type of DECL, otherwise the behavior is undefined.
535 
536 		 For example:
537 		 long n; int i;
538 		 i < n;
539 
540 		 according to ISO will be evaluated as:
541 		 (long)i < n;
542 
543 		 We want to force:
544 		 i < (int)n;  */
545 	      if (TREE_CODE (op0) == NOP_EXPR
546 		  && decl == TREE_OPERAND (op0, 0))
547 		{
548 		  TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
549 		  TREE_OPERAND (cond, 1)
550 		    = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
551 				   TREE_OPERAND (cond, 1));
552 		}
553 	      else if (TREE_CODE (op1) == NOP_EXPR
554 		       && decl == TREE_OPERAND (op1, 0))
555 		{
556 		  TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
557 		  TREE_OPERAND (cond, 0)
558 		    = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
559 				   TREE_OPERAND (cond, 0));
560 		}
561 
562 	      if (decl == TREE_OPERAND (cond, 0))
563 		cond_ok = true;
564 	      else if (decl == TREE_OPERAND (cond, 1))
565 		{
566 		  TREE_SET_CODE (cond,
567 				 swap_tree_comparison (TREE_CODE (cond)));
568 		  TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
569 		  TREE_OPERAND (cond, 0) = decl;
570 		  cond_ok = true;
571 		}
572 
573 	      if (TREE_CODE (cond) == NE_EXPR
574 		  || TREE_CODE (cond) == EQ_EXPR)
575 		{
576 		  if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
577 		    {
578 		      if (code != CILK_SIMD && code != CILK_FOR)
579 			cond_ok = false;
580 		    }
581 		  else if (operand_equal_p (TREE_OPERAND (cond, 1),
582 					    TYPE_MIN_VALUE (TREE_TYPE (decl)),
583 					    0))
584 		    TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
585 					 ? GT_EXPR : LE_EXPR);
586 		  else if (operand_equal_p (TREE_OPERAND (cond, 1),
587 					    TYPE_MAX_VALUE (TREE_TYPE (decl)),
588 					    0))
589 		    TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
590 					 ? LT_EXPR : GE_EXPR);
591 		  else if (code != CILK_SIMD && code != CILK_FOR)
592 		    cond_ok = false;
593 		}
594 
595 	      if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
596 		{
597 		  tree ce = NULL_TREE, *pce = &ce;
598 		  tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
599 		  for (tree c = TREE_VEC_ELT (condv, i); c != cond;
600 		       c = TREE_OPERAND (c, 1))
601 		    {
602 		      *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
603 				     TREE_OPERAND (cond, 1));
604 		      pce = &TREE_OPERAND (*pce, 1);
605 		    }
606 		  TREE_OPERAND (cond, 1) = ce;
607 		  TREE_VEC_ELT (condv, i) = cond;
608 		}
609 	    }
610 
611 	  if (!cond_ok)
612 	    {
613 	      error_at (elocus, "invalid controlling predicate");
614 	      fail = true;
615 	    }
616 	}
617 
618       if (incr == NULL_TREE)
619 	{
620 	  error_at (elocus, "missing increment expression");
621 	  fail = true;
622 	}
623       else
624 	{
625 	  bool incr_ok = false;
626 
627 	  if (EXPR_HAS_LOCATION (incr))
628 	    elocus = EXPR_LOCATION (incr);
629 
630 	  /* Check all the valid increment expressions: v++, v--, ++v, --v,
631 	     v = v + incr, v = incr + v and v = v - incr.  */
632 	  switch (TREE_CODE (incr))
633 	    {
634 	    case POSTINCREMENT_EXPR:
635 	    case PREINCREMENT_EXPR:
636 	    case POSTDECREMENT_EXPR:
637 	    case PREDECREMENT_EXPR:
638 	      if (TREE_OPERAND (incr, 0) != decl)
639 		break;
640 
641 	      incr_ok = true;
642 	      incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
643 	      break;
644 
645 	    case COMPOUND_EXPR:
646 	      if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
647 		  || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
648 		break;
649 	      incr = TREE_OPERAND (incr, 1);
650 	      /* FALLTHRU */
651 	    case MODIFY_EXPR:
652 	      if (TREE_OPERAND (incr, 0) != decl)
653 		break;
654 	      if (TREE_OPERAND (incr, 1) == decl)
655 		break;
656 	      if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
657 		  && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
658 		      || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
659 		incr_ok = true;
660 	      else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
661 			|| (TREE_CODE (TREE_OPERAND (incr, 1))
662 			    == POINTER_PLUS_EXPR))
663 		       && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
664 		incr_ok = true;
665 	      else
666 		{
667 		  tree t = check_omp_for_incr_expr (elocus,
668 						    TREE_OPERAND (incr, 1),
669 						    decl);
670 		  if (t != error_mark_node)
671 		    {
672 		      incr_ok = true;
673 		      t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
674 		      incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
675 		    }
676 		}
677 	      break;
678 
679 	    default:
680 	      break;
681 	    }
682 	  if (!incr_ok)
683 	    {
684 	      error_at (elocus, "invalid increment expression");
685 	      fail = true;
686 	    }
687 	}
688 
689       TREE_VEC_ELT (initv, i) = init;
690       TREE_VEC_ELT (incrv, i) = incr;
691     }
692 
693   if (fail)
694     return NULL;
695   else
696     {
697       tree t = make_node (code);
698 
699       TREE_TYPE (t) = void_type_node;
700       OMP_FOR_INIT (t) = initv;
701       OMP_FOR_COND (t) = condv;
702       OMP_FOR_INCR (t) = incrv;
703       OMP_FOR_BODY (t) = body;
704       OMP_FOR_PRE_BODY (t) = pre_body;
705 
706       SET_EXPR_LOCATION (t, locus);
707       return add_stmt (t);
708     }
709 }
710 
711 /* Right now we have 14 different combined constructs, this
712    function attempts to split or duplicate clauses for combined
713    constructs.  CODE is the innermost construct in the combined construct,
714    and MASK allows to determine which constructs are combined together,
715    as every construct has at least one clause that no other construct
716    has (except for OMP_SECTIONS, but that can be only combined with parallel).
717    Combined constructs are:
718    #pragma omp parallel for
719    #pragma omp parallel sections
720    #pragma omp parallel for simd
721    #pragma omp for simd
722    #pragma omp distribute simd
723    #pragma omp distribute parallel for
724    #pragma omp distribute parallel for simd
725    #pragma omp teams distribute
726    #pragma omp teams distribute parallel for
727    #pragma omp teams distribute parallel for simd
728    #pragma omp target teams
729    #pragma omp target teams distribute
730    #pragma omp target teams distribute parallel for
731    #pragma omp target teams distribute parallel for simd  */
732 
733 void
734 c_omp_split_clauses (location_t loc, enum tree_code code,
735 		     omp_clause_mask mask, tree clauses, tree *cclauses)
736 {
737   tree next, c;
738   enum c_omp_clause_split s;
739   int i;
740 
741   for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
742     cclauses[i] = NULL;
743   /* Add implicit nowait clause on
744      #pragma omp parallel {for,for simd,sections}.  */
745   if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
746     switch (code)
747       {
748       case OMP_FOR:
749       case OMP_SIMD:
750         cclauses[C_OMP_CLAUSE_SPLIT_FOR]
751 	  = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
752 	break;
753       case OMP_SECTIONS:
754 	cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
755 	  = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
756 	break;
757       default:
758 	break;
759       }
760 
761   for (; clauses ; clauses = next)
762     {
763       next = OMP_CLAUSE_CHAIN (clauses);
764 
765       switch (OMP_CLAUSE_CODE (clauses))
766 	{
767 	/* First the clauses that are unique to some constructs.  */
768 	case OMP_CLAUSE_DEVICE:
769 	case OMP_CLAUSE_MAP:
770 	  s = C_OMP_CLAUSE_SPLIT_TARGET;
771 	  break;
772 	case OMP_CLAUSE_NUM_TEAMS:
773 	case OMP_CLAUSE_THREAD_LIMIT:
774 	  s = C_OMP_CLAUSE_SPLIT_TEAMS;
775 	  break;
776 	case OMP_CLAUSE_DIST_SCHEDULE:
777 	  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
778 	  break;
779 	case OMP_CLAUSE_COPYIN:
780 	case OMP_CLAUSE_NUM_THREADS:
781 	case OMP_CLAUSE_PROC_BIND:
782 	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
783 	  break;
784 	case OMP_CLAUSE_ORDERED:
785 	case OMP_CLAUSE_SCHEDULE:
786 	case OMP_CLAUSE_NOWAIT:
787 	  s = C_OMP_CLAUSE_SPLIT_FOR;
788 	  break;
789 	case OMP_CLAUSE_SAFELEN:
790 	case OMP_CLAUSE_LINEAR:
791 	case OMP_CLAUSE_ALIGNED:
792 	  s = C_OMP_CLAUSE_SPLIT_SIMD;
793 	  break;
794 	/* Duplicate this to all of distribute, for and simd.  */
795 	case OMP_CLAUSE_COLLAPSE:
796 	  if (code == OMP_SIMD)
797 	    {
798 	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
799 				    OMP_CLAUSE_COLLAPSE);
800 	      OMP_CLAUSE_COLLAPSE_EXPR (c)
801 		= OMP_CLAUSE_COLLAPSE_EXPR (clauses);
802 	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
803 	      cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
804 	    }
805 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
806 	    {
807 	      if ((mask & (OMP_CLAUSE_MASK_1
808 			   << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
809 		{
810 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
811 					OMP_CLAUSE_COLLAPSE);
812 		  OMP_CLAUSE_COLLAPSE_EXPR (c)
813 		    = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
814 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
815 		  cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
816 		  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
817 		}
818 	      else
819 		s = C_OMP_CLAUSE_SPLIT_FOR;
820 	    }
821 	  else
822 	    s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
823 	  break;
824 	/* Private clause is supported on all constructs but target,
825 	   it is enough to put it on the innermost one.  For
826 	   #pragma omp {for,sections} put it on parallel though,
827 	   as that's what we did for OpenMP 3.1.  */
828 	case OMP_CLAUSE_PRIVATE:
829 	  switch (code)
830 	    {
831 	    case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
832 	    case OMP_FOR: case OMP_SECTIONS:
833 	    case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
834 	    case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
835 	    case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
836 	    default: gcc_unreachable ();
837 	    }
838 	  break;
839 	/* Firstprivate clause is supported on all constructs but
840 	   target and simd.  Put it on the outermost of those and
841 	   duplicate on parallel.  */
842 	case OMP_CLAUSE_FIRSTPRIVATE:
843 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
844 	      != 0)
845 	    {
846 	      if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
847 			   | (OMP_CLAUSE_MASK_1
848 			      << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
849 		{
850 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
851 					OMP_CLAUSE_FIRSTPRIVATE);
852 		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
853 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
854 		  cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
855 		  if ((mask & (OMP_CLAUSE_MASK_1
856 			       << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
857 		    s = C_OMP_CLAUSE_SPLIT_TEAMS;
858 		  else
859 		    s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
860 		}
861 	      else
862 		/* This must be
863 		   #pragma omp parallel{, for{, simd}, sections}.  */
864 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
865 	    }
866 	  else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
867 		   != 0)
868 	    {
869 	      /* This must be one of
870 		 #pragma omp {,target }teams distribute
871 		 #pragma omp target teams
872 		 #pragma omp {,target }teams distribute simd.  */
873 	      gcc_assert (code == OMP_DISTRIBUTE
874 			  || code == OMP_TEAMS
875 			  || code == OMP_SIMD);
876 	      s = C_OMP_CLAUSE_SPLIT_TEAMS;
877 	    }
878 	  else if ((mask & (OMP_CLAUSE_MASK_1
879 			    << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
880 	    {
881 	      /* This must be #pragma omp distribute simd.  */
882 	      gcc_assert (code == OMP_SIMD);
883 	      s = C_OMP_CLAUSE_SPLIT_TEAMS;
884 	    }
885 	  else
886 	    {
887 	      /* This must be #pragma omp for simd.  */
888 	      gcc_assert (code == OMP_SIMD);
889 	      s = C_OMP_CLAUSE_SPLIT_FOR;
890 	    }
891 	  break;
892 	/* Lastprivate is allowed on for, sections and simd.  In
893 	   parallel {for{, simd},sections} we actually want to put it on
894 	   parallel rather than for or sections.  */
895 	case OMP_CLAUSE_LASTPRIVATE:
896 	  if (code == OMP_FOR || code == OMP_SECTIONS)
897 	    {
898 	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
899 		  != 0)
900 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
901 	      else
902 		s = C_OMP_CLAUSE_SPLIT_FOR;
903 	      break;
904 	    }
905 	  gcc_assert (code == OMP_SIMD);
906 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
907 	    {
908 	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
909 				    OMP_CLAUSE_LASTPRIVATE);
910 	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
911 	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
912 		  != 0)
913 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
914 	      else
915 		s = C_OMP_CLAUSE_SPLIT_FOR;
916 	      OMP_CLAUSE_CHAIN (c) = cclauses[s];
917 	      cclauses[s] = c;
918 	    }
919 	  s = C_OMP_CLAUSE_SPLIT_SIMD;
920 	  break;
921 	/* Shared and default clauses are allowed on private and teams.  */
922 	case OMP_CLAUSE_SHARED:
923 	case OMP_CLAUSE_DEFAULT:
924 	  if (code == OMP_TEAMS)
925 	    {
926 	      s = C_OMP_CLAUSE_SPLIT_TEAMS;
927 	      break;
928 	    }
929 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
930 	      != 0)
931 	    {
932 	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
933 				    OMP_CLAUSE_CODE (clauses));
934 	      if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
935 		OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
936 	      else
937 		OMP_CLAUSE_DEFAULT_KIND (c)
938 		  = OMP_CLAUSE_DEFAULT_KIND (clauses);
939 	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
940 	      cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
941 
942 	    }
943 	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
944 	  break;
945 	/* Reduction is allowed on simd, for, parallel, sections and teams.
946 	   Duplicate it on all of them, but omit on for or sections if
947 	   parallel is present.  */
948 	case OMP_CLAUSE_REDUCTION:
949 	  if (code == OMP_SIMD)
950 	    {
951 	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
952 				    OMP_CLAUSE_REDUCTION);
953 	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
954 	      OMP_CLAUSE_REDUCTION_CODE (c)
955 		= OMP_CLAUSE_REDUCTION_CODE (clauses);
956 	      OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
957 		= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
958 	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
959 	      cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
960 	    }
961 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
962 	    {
963 	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
964 		  != 0)
965 		{
966 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
967 					OMP_CLAUSE_REDUCTION);
968 		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
969 		  OMP_CLAUSE_REDUCTION_CODE (c)
970 		    = OMP_CLAUSE_REDUCTION_CODE (clauses);
971 		  OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
972 		    = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
973 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
974 		  cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
975 		  s = C_OMP_CLAUSE_SPLIT_TEAMS;
976 		}
977 	      else if ((mask & (OMP_CLAUSE_MASK_1
978 				<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
979 		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
980 	      else
981 		s = C_OMP_CLAUSE_SPLIT_FOR;
982 	    }
983 	  else if (code == OMP_SECTIONS)
984 	    s = C_OMP_CLAUSE_SPLIT_PARALLEL;
985 	  else
986 	    s = C_OMP_CLAUSE_SPLIT_TEAMS;
987 	  break;
988 	case OMP_CLAUSE_IF:
989 	  /* FIXME: This is currently being discussed.  */
990 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
991 	      != 0)
992 	    s = C_OMP_CLAUSE_SPLIT_PARALLEL;
993 	  else
994 	    s = C_OMP_CLAUSE_SPLIT_TARGET;
995 	  break;
996 	default:
997 	  gcc_unreachable ();
998 	}
999       OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
1000       cclauses[s] = clauses;
1001     }
1002 }
1003 
1004 
1005 /* qsort callback to compare #pragma omp declare simd clauses.  */
1006 
1007 static int
1008 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
1009 {
1010   tree a = *(const tree *) p;
1011   tree b = *(const tree *) q;
1012   if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
1013     {
1014       if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
1015 	return -1;
1016       return 1;
1017     }
1018   if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
1019       && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
1020       && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
1021     {
1022       int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
1023       int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
1024       if (c < d)
1025 	return 1;
1026       if (c > d)
1027 	return -1;
1028     }
1029   return 0;
1030 }
1031 
1032 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1033    CLAUSES on FNDECL into argument indexes and sort them.  */
1034 
1035 tree
1036 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1037 {
1038   tree c;
1039   vec<tree> clvec = vNULL;
1040 
1041   for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1042     {
1043       if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1044 	  && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1045 	  && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1046 	{
1047 	  tree decl = OMP_CLAUSE_DECL (c);
1048 	  tree arg;
1049 	  int idx;
1050 	  for (arg = parms, idx = 0; arg;
1051 	       arg = TREE_CHAIN (arg), idx++)
1052 	    if (arg == decl)
1053 	      break;
1054 	  if (arg == NULL_TREE)
1055 	    {
1056 	      error_at (OMP_CLAUSE_LOCATION (c),
1057 			"%qD is not an function argument", decl);
1058 	      continue;
1059 	    }
1060 	  OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1061 	}
1062       clvec.safe_push (c);
1063     }
1064   if (!clvec.is_empty ())
1065     {
1066       unsigned int len = clvec.length (), i;
1067       clvec.qsort (c_omp_declare_simd_clause_cmp);
1068       clauses = clvec[0];
1069       for (i = 0; i < len; i++)
1070 	OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1071     }
1072   else
1073     clauses = NULL_TREE;
1074   clvec.release ();
1075   return clauses;
1076 }
1077 
1078 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs.  */
1079 
1080 void
1081 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1082 {
1083   tree c;
1084 
1085   for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1086     if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1087 	&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1088 	&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1089       {
1090 	int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1091 	tree arg;
1092 	for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1093 	     arg = TREE_CHAIN (arg), i++)
1094 	  if (i == idx)
1095 	    break;
1096 	gcc_assert (arg);
1097 	OMP_CLAUSE_DECL (c) = arg;
1098       }
1099 }
1100 
1101 /* True if OpenMP sharing attribute of DECL is predetermined.  */
1102 
1103 enum omp_clause_default_kind
1104 c_omp_predetermined_sharing (tree decl)
1105 {
1106   /* Variables with const-qualified type having no mutable member
1107      are predetermined shared.  */
1108   if (TREE_READONLY (decl))
1109     return OMP_CLAUSE_DEFAULT_SHARED;
1110 
1111   return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
1112 }
1113