xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/omp-low.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /* Lowering pass for OpenMP directives.  Converts OpenMP directives
2    into explicit calls to the runtime library (libgomp) and data
3    marshalling to implement data sharing and copying clauses.
4    Contributed by Diego Novillo <dnovillo@redhat.com>
5 
6    Copyright (C) 2005-2013 Free Software Foundation, Inc.
7 
8 This file is part of GCC.
9 
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14 
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3.  If not see
22 <http://www.gnu.org/licenses/>.  */
23 
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-flow.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "tree-pass.h"
40 #include "ggc.h"
41 #include "except.h"
42 #include "splay-tree.h"
43 #include "optabs.h"
44 #include "cfgloop.h"
45 
46 
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48    phases.  The first phase scans the function looking for OMP statements
49    and then for variables that must be replaced to satisfy data sharing
50    clauses.  The second phase expands code for the constructs, as well as
51    re-gimplifying things when variables have been replaced with complex
52    expressions.
53 
54    Final code generation is done by pass_expand_omp.  The flowgraph is
55    scanned for parallel regions which are then moved to a new
56    function, to be invoked by the thread library.  */
57 
58 /* Context structure.  Used to store information about each parallel
59    directive in the code.  */
60 
61 typedef struct omp_context
62 {
63   /* This field must be at the beginning, as we do "inheritance": Some
64      callback functions for tree-inline.c (e.g., omp_copy_decl)
65      receive a copy_body_data pointer that is up-casted to an
66      omp_context pointer.  */
67   copy_body_data cb;
68 
69   /* The tree of contexts corresponding to the encountered constructs.  */
70   struct omp_context *outer;
71   gimple stmt;
72 
73   /* Map variables to fields in a structure that allows communication
74      between sending and receiving threads.  */
75   splay_tree field_map;
76   tree record_type;
77   tree sender_decl;
78   tree receiver_decl;
79 
80   /* These are used just by task contexts, if task firstprivate fn is
81      needed.  srecord_type is used to communicate from the thread
82      that encountered the task construct to task firstprivate fn,
83      record_type is allocated by GOMP_task, initialized by task firstprivate
84      fn and passed to the task body fn.  */
85   splay_tree sfield_map;
86   tree srecord_type;
87 
88   /* A chain of variables to add to the top-level block surrounding the
89      construct.  In the case of a parallel, this is in the child function.  */
90   tree block_vars;
91 
92   /* What to do with variables with implicitly determined sharing
93      attributes.  */
94   enum omp_clause_default_kind default_kind;
95 
96   /* Nesting depth of this context.  Used to beautify error messages re
97      invalid gotos.  The outermost ctx is depth 1, with depth 0 being
98      reserved for the main body of the function.  */
99   int depth;
100 
101   /* True if this parallel directive is nested within another.  */
102   bool is_nested;
103 } omp_context;
104 
105 
106 struct omp_for_data_loop
107 {
108   tree v, n1, n2, step;
109   enum tree_code cond_code;
110 };
111 
112 /* A structure describing the main elements of a parallel loop.  */
113 
114 struct omp_for_data
115 {
116   struct omp_for_data_loop loop;
117   tree chunk_size;
118   gimple for_stmt;
119   tree pre, iter_type;
120   int collapse;
121   bool have_nowait, have_ordered;
122   enum omp_clause_schedule_kind sched_kind;
123   struct omp_for_data_loop *loops;
124 };
125 
126 
127 static splay_tree all_contexts;
128 static int taskreg_nesting_level;
129 struct omp_region *root_omp_region;
130 static bitmap task_shared_vars;
131 static vec<omp_context *> taskreg_contexts;
132 
133 static void scan_omp (gimple_seq *, omp_context *);
134 static tree scan_omp_1_op (tree *, int *, void *);
135 
136 #define WALK_SUBSTMTS  \
137     case GIMPLE_BIND: \
138     case GIMPLE_TRY: \
139     case GIMPLE_CATCH: \
140     case GIMPLE_EH_FILTER: \
141     case GIMPLE_TRANSACTION: \
142       /* The sub-statements for these should be walked.  */ \
143       *handled_ops_p = false; \
144       break;
145 
146 /* Convenience function for calling scan_omp_1_op on tree operands.  */
147 
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151   struct walk_stmt_info wi;
152 
153   memset (&wi, 0, sizeof (wi));
154   wi.info = ctx;
155   wi.want_locations = true;
156 
157   return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159 
160 static void lower_omp (gimple_seq *, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163 
164 /* Find an OpenMP clause of type KIND within CLAUSES.  */
165 
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169   for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170     if (OMP_CLAUSE_CODE (clauses) == kind)
171       return clauses;
172 
173   return NULL_TREE;
174 }
175 
176 /* Return true if CTX is for an omp parallel.  */
177 
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181   return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183 
184 
185 /* Return true if CTX is for an omp task.  */
186 
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190   return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192 
193 
194 /* Return true if CTX is for an omp parallel or omp task.  */
195 
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199   return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 	 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202 
203 
204 /* Return true if REGION is a combined parallel+workshare region.  */
205 
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209   return region->is_combined_parallel;
210 }
211 
212 
213 /* Extract the header elements of parallel loop FOR_STMT and store
214    them into *FD.  */
215 
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 		      struct omp_for_data_loop *loops)
219 {
220   tree t, var, *collapse_iter, *collapse_count;
221   tree count = NULL_TREE, iter_type = long_integer_type_node;
222   struct omp_for_data_loop *loop;
223   int i;
224   struct omp_for_data_loop dummy_loop;
225   location_t loc = gimple_location (for_stmt);
226 
227   fd->for_stmt = for_stmt;
228   fd->pre = NULL;
229   fd->collapse = gimple_omp_for_collapse (for_stmt);
230   if (fd->collapse > 1)
231     fd->loops = loops;
232   else
233     fd->loops = &fd->loop;
234 
235   fd->have_nowait = fd->have_ordered = false;
236   fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237   fd->chunk_size = NULL_TREE;
238   collapse_iter = NULL;
239   collapse_count = NULL;
240 
241   for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242     switch (OMP_CLAUSE_CODE (t))
243       {
244       case OMP_CLAUSE_NOWAIT:
245 	fd->have_nowait = true;
246 	break;
247       case OMP_CLAUSE_ORDERED:
248 	fd->have_ordered = true;
249 	break;
250       case OMP_CLAUSE_SCHEDULE:
251 	fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 	fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 	break;
254       case OMP_CLAUSE_COLLAPSE:
255 	if (fd->collapse > 1)
256 	  {
257 	    collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 	    collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 	  }
260       default:
261 	break;
262       }
263 
264   /* FIXME: for now map schedule(auto) to schedule(static).
265      There should be analysis to determine whether all iterations
266      are approximately the same amount of work (then schedule(static)
267      is best) or if it varies (then schedule(dynamic,N) is better).  */
268   if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269     {
270       fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271       gcc_assert (fd->chunk_size == NULL);
272     }
273   gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274   if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275     gcc_assert (fd->chunk_size == NULL);
276   else if (fd->chunk_size == NULL)
277     {
278       /* We only need to compute a default chunk size for ordered
279 	 static loops and dynamic loops.  */
280       if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 	  || fd->have_ordered
282 	  || fd->collapse > 1)
283 	fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 			 ? integer_zero_node : integer_one_node;
285     }
286 
287   for (i = 0; i < fd->collapse; i++)
288     {
289       if (fd->collapse == 1)
290 	loop = &fd->loop;
291       else if (loops != NULL)
292 	loop = loops + i;
293       else
294 	loop = &dummy_loop;
295 
296 
297       loop->v = gimple_omp_for_index (for_stmt, i);
298       gcc_assert (SSA_VAR_P (loop->v));
299       gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 		  || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301       var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302       loop->n1 = gimple_omp_for_initial (for_stmt, i);
303 
304       loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305       loop->n2 = gimple_omp_for_final (for_stmt, i);
306       switch (loop->cond_code)
307 	{
308 	case LT_EXPR:
309 	case GT_EXPR:
310 	  break;
311 	case LE_EXPR:
312 	  if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 	    loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
314 	  else
315 	    loop->n2 = fold_build2_loc (loc,
316 				    PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
317 				    build_int_cst (TREE_TYPE (loop->n2), 1));
318 	  loop->cond_code = LT_EXPR;
319 	  break;
320 	case GE_EXPR:
321 	  if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
322 	    loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
323 	  else
324 	    loop->n2 = fold_build2_loc (loc,
325 				    MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
326 				    build_int_cst (TREE_TYPE (loop->n2), 1));
327 	  loop->cond_code = GT_EXPR;
328 	  break;
329 	default:
330 	  gcc_unreachable ();
331 	}
332 
333       t = gimple_omp_for_incr (for_stmt, i);
334       gcc_assert (TREE_OPERAND (t, 0) == var);
335       switch (TREE_CODE (t))
336 	{
337 	case PLUS_EXPR:
338 	  loop->step = TREE_OPERAND (t, 1);
339 	  break;
340 	case POINTER_PLUS_EXPR:
341 	  loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
342 	  break;
343 	case MINUS_EXPR:
344 	  loop->step = TREE_OPERAND (t, 1);
345 	  loop->step = fold_build1_loc (loc,
346 				    NEGATE_EXPR, TREE_TYPE (loop->step),
347 				    loop->step);
348 	  break;
349 	default:
350 	  gcc_unreachable ();
351 	}
352 
353       if (iter_type != long_long_unsigned_type_node)
354 	{
355 	  if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
356 	    iter_type = long_long_unsigned_type_node;
357 	  else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
358 		   && TYPE_PRECISION (TREE_TYPE (loop->v))
359 		      >= TYPE_PRECISION (iter_type))
360 	    {
361 	      tree n;
362 
363 	      if (loop->cond_code == LT_EXPR)
364 		n = fold_build2_loc (loc,
365 				 PLUS_EXPR, TREE_TYPE (loop->v),
366 				 loop->n2, loop->step);
367 	      else
368 		n = loop->n1;
369 	      if (TREE_CODE (n) != INTEGER_CST
370 		  || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
371 		iter_type = long_long_unsigned_type_node;
372 	    }
373 	  else if (TYPE_PRECISION (TREE_TYPE (loop->v))
374 		   > TYPE_PRECISION (iter_type))
375 	    {
376 	      tree n1, n2;
377 
378 	      if (loop->cond_code == LT_EXPR)
379 		{
380 		  n1 = loop->n1;
381 		  n2 = fold_build2_loc (loc,
382 				    PLUS_EXPR, TREE_TYPE (loop->v),
383 				    loop->n2, loop->step);
384 		}
385 	      else
386 		{
387 		  n1 = fold_build2_loc (loc,
388 				    MINUS_EXPR, TREE_TYPE (loop->v),
389 				    loop->n2, loop->step);
390 		  n2 = loop->n1;
391 		}
392 	      if (TREE_CODE (n1) != INTEGER_CST
393 		  || TREE_CODE (n2) != INTEGER_CST
394 		  || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
395 		  || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
396 		iter_type = long_long_unsigned_type_node;
397 	    }
398 	}
399 
400       if (collapse_count && *collapse_count == NULL)
401 	{
402 	  t = fold_binary (loop->cond_code, boolean_type_node,
403 			   fold_convert (TREE_TYPE (loop->v), loop->n1),
404 			   fold_convert (TREE_TYPE (loop->v), loop->n2));
405 	  if (t && integer_zerop (t))
406 	    count = build_zero_cst (long_long_unsigned_type_node);
407 	  else if ((i == 0 || count != NULL_TREE)
408 		   && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
409 		   && TREE_CONSTANT (loop->n1)
410 		   && TREE_CONSTANT (loop->n2)
411 		   && TREE_CODE (loop->step) == INTEGER_CST)
412 	    {
413 	      tree itype = TREE_TYPE (loop->v);
414 
415 	      if (POINTER_TYPE_P (itype))
416 		itype = signed_type_for (itype);
417 	      t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
418 	      t = fold_build2_loc (loc,
419 			       PLUS_EXPR, itype,
420 			       fold_convert_loc (loc, itype, loop->step), t);
421 	      t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
422 			       fold_convert_loc (loc, itype, loop->n2));
423 	      t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
424 			       fold_convert_loc (loc, itype, loop->n1));
425 	      if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
426 		t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
427 				 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
428 				 fold_build1_loc (loc, NEGATE_EXPR, itype,
429 					      fold_convert_loc (loc, itype,
430 								loop->step)));
431 	      else
432 		t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
433 				 fold_convert_loc (loc, itype, loop->step));
434 	      t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
435 	      if (count != NULL_TREE)
436 		count = fold_build2_loc (loc,
437 				     MULT_EXPR, long_long_unsigned_type_node,
438 				     count, t);
439 	      else
440 		count = t;
441 	      if (TREE_CODE (count) != INTEGER_CST)
442 		count = NULL_TREE;
443 	    }
444 	  else if (count && !integer_zerop (count))
445 	    count = NULL_TREE;
446 	}
447     }
448 
449   if (count)
450     {
451       if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
452 	iter_type = long_long_unsigned_type_node;
453       else
454 	iter_type = long_integer_type_node;
455     }
456   else if (collapse_iter && *collapse_iter != NULL)
457     iter_type = TREE_TYPE (*collapse_iter);
458   fd->iter_type = iter_type;
459   if (collapse_iter && *collapse_iter == NULL)
460     *collapse_iter = create_tmp_var (iter_type, ".iter");
461   if (collapse_count && *collapse_count == NULL)
462     {
463       if (count)
464 	*collapse_count = fold_convert_loc (loc, iter_type, count);
465       else
466 	*collapse_count = create_tmp_var (iter_type, ".count");
467     }
468 
469   if (fd->collapse > 1)
470     {
471       fd->loop.v = *collapse_iter;
472       fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
473       fd->loop.n2 = *collapse_count;
474       fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
475       fd->loop.cond_code = LT_EXPR;
476     }
477 }
478 
479 
480 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
481    is the immediate dominator of PAR_ENTRY_BB, return true if there
482    are no data dependencies that would prevent expanding the parallel
483    directive at PAR_ENTRY_BB as a combined parallel+workshare region.
484 
485    When expanding a combined parallel+workshare region, the call to
486    the child function may need additional arguments in the case of
487    GIMPLE_OMP_FOR regions.  In some cases, these arguments are
488    computed out of variables passed in from the parent to the child
489    via 'struct .omp_data_s'.  For instance:
490 
491 	#pragma omp parallel for schedule (guided, i * 4)
492 	for (j ...)
493 
494    Is lowered into:
495 
496    	# BLOCK 2 (PAR_ENTRY_BB)
497 	.omp_data_o.i = i;
498 	#pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
499 
500 	# BLOCK 3 (WS_ENTRY_BB)
501 	.omp_data_i = &.omp_data_o;
502 	D.1667 = .omp_data_i->i;
503 	D.1598 = D.1667 * 4;
504 	#pragma omp for schedule (guided, D.1598)
505 
506    When we outline the parallel region, the call to the child function
507    'bar.omp_fn.0' will need the value D.1598 in its argument list, but
508    that value is computed *after* the call site.  So, in principle we
509    cannot do the transformation.
510 
511    To see whether the code in WS_ENTRY_BB blocks the combined
512    parallel+workshare call, we collect all the variables used in the
513    GIMPLE_OMP_FOR header check whether they appear on the LHS of any
514    statement in WS_ENTRY_BB.  If so, then we cannot emit the combined
515    call.
516 
517    FIXME.  If we had the SSA form built at this point, we could merely
518    hoist the code in block 3 into block 2 and be done with it.  But at
519    this point we don't have dataflow information and though we could
520    hack something up here, it is really not worth the aggravation.  */
521 
522 static bool
523 workshare_safe_to_combine_p (basic_block ws_entry_bb)
524 {
525   struct omp_for_data fd;
526   gimple ws_stmt = last_stmt (ws_entry_bb);
527 
528   if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
529     return true;
530 
531   gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
532 
533   extract_omp_for_data (ws_stmt, &fd, NULL);
534 
535   if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
536     return false;
537   if (fd.iter_type != long_integer_type_node)
538     return false;
539 
540   /* FIXME.  We give up too easily here.  If any of these arguments
541      are not constants, they will likely involve variables that have
542      been mapped into fields of .omp_data_s for sharing with the child
543      function.  With appropriate data flow, it would be possible to
544      see through this.  */
545   if (!is_gimple_min_invariant (fd.loop.n1)
546       || !is_gimple_min_invariant (fd.loop.n2)
547       || !is_gimple_min_invariant (fd.loop.step)
548       || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
549     return false;
550 
551   return true;
552 }
553 
554 
555 /* Collect additional arguments needed to emit a combined
556    parallel+workshare call.  WS_STMT is the workshare directive being
557    expanded.  */
558 
559 static vec<tree, va_gc> *
560 get_ws_args_for (gimple ws_stmt)
561 {
562   tree t;
563   location_t loc = gimple_location (ws_stmt);
564   vec<tree, va_gc> *ws_args;
565 
566   if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
567     {
568       struct omp_for_data fd;
569 
570       extract_omp_for_data (ws_stmt, &fd, NULL);
571 
572       vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
573 
574       t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
575       ws_args->quick_push (t);
576 
577       t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
578       ws_args->quick_push (t);
579 
580       t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
581       ws_args->quick_push (t);
582 
583       if (fd.chunk_size)
584 	{
585 	  t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
586 	  ws_args->quick_push (t);
587 	}
588 
589       return ws_args;
590     }
591   else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
592     {
593       /* Number of sections is equal to the number of edges from the
594 	 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
595 	 the exit of the sections region.  */
596       basic_block bb = single_succ (gimple_bb (ws_stmt));
597       t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
598       vec_alloc (ws_args, 1);
599       ws_args->quick_push (t);
600       return ws_args;
601     }
602 
603   gcc_unreachable ();
604 }
605 
606 
607 /* Discover whether REGION is a combined parallel+workshare region.  */
608 
609 static void
610 determine_parallel_type (struct omp_region *region)
611 {
612   basic_block par_entry_bb, par_exit_bb;
613   basic_block ws_entry_bb, ws_exit_bb;
614 
615   if (region == NULL || region->inner == NULL
616       || region->exit == NULL || region->inner->exit == NULL
617       || region->inner->cont == NULL)
618     return;
619 
620   /* We only support parallel+for and parallel+sections.  */
621   if (region->type != GIMPLE_OMP_PARALLEL
622       || (region->inner->type != GIMPLE_OMP_FOR
623 	  && region->inner->type != GIMPLE_OMP_SECTIONS))
624     return;
625 
626   /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
627      WS_EXIT_BB -> PAR_EXIT_BB.  */
628   par_entry_bb = region->entry;
629   par_exit_bb = region->exit;
630   ws_entry_bb = region->inner->entry;
631   ws_exit_bb = region->inner->exit;
632 
633   if (single_succ (par_entry_bb) == ws_entry_bb
634       && single_succ (ws_exit_bb) == par_exit_bb
635       && workshare_safe_to_combine_p (ws_entry_bb)
636       && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
637 	  || (last_and_only_stmt (ws_entry_bb)
638 	      && last_and_only_stmt (par_exit_bb))))
639     {
640       gimple ws_stmt = last_stmt (ws_entry_bb);
641 
642       if (region->inner->type == GIMPLE_OMP_FOR)
643 	{
644 	  /* If this is a combined parallel loop, we need to determine
645 	     whether or not to use the combined library calls.  There
646 	     are two cases where we do not apply the transformation:
647 	     static loops and any kind of ordered loop.  In the first
648 	     case, we already open code the loop so there is no need
649 	     to do anything else.  In the latter case, the combined
650 	     parallel loop call would still need extra synchronization
651 	     to implement ordered semantics, so there would not be any
652 	     gain in using the combined call.  */
653 	  tree clauses = gimple_omp_for_clauses (ws_stmt);
654 	  tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
655 	  if (c == NULL
656 	      || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
657 	      || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
658 	    {
659 	      region->is_combined_parallel = false;
660 	      region->inner->is_combined_parallel = false;
661 	      return;
662 	    }
663 	}
664 
665       region->is_combined_parallel = true;
666       region->inner->is_combined_parallel = true;
667       region->ws_args = get_ws_args_for (ws_stmt);
668     }
669 }
670 
671 
672 /* Return true if EXPR is variable sized.  */
673 
674 static inline bool
675 is_variable_sized (const_tree expr)
676 {
677   return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
678 }
679 
680 /* Return true if DECL is a reference type.  */
681 
682 static inline bool
683 is_reference (tree decl)
684 {
685   return lang_hooks.decls.omp_privatize_by_reference (decl);
686 }
687 
688 /* Lookup variables in the decl or field splay trees.  The "maybe" form
689    allows for the variable form to not have been entered, otherwise we
690    assert that the variable must have been entered.  */
691 
692 static inline tree
693 lookup_decl (tree var, omp_context *ctx)
694 {
695   tree *n;
696   n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
697   return *n;
698 }
699 
700 static inline tree
701 maybe_lookup_decl (const_tree var, omp_context *ctx)
702 {
703   tree *n;
704   n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
705   return n ? *n : NULL_TREE;
706 }
707 
708 static inline tree
709 lookup_field (tree var, omp_context *ctx)
710 {
711   splay_tree_node n;
712   n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
713   return (tree) n->value;
714 }
715 
716 static inline tree
717 lookup_sfield (tree var, omp_context *ctx)
718 {
719   splay_tree_node n;
720   n = splay_tree_lookup (ctx->sfield_map
721 			 ? ctx->sfield_map : ctx->field_map,
722 			 (splay_tree_key) var);
723   return (tree) n->value;
724 }
725 
726 static inline tree
727 maybe_lookup_field (tree var, omp_context *ctx)
728 {
729   splay_tree_node n;
730   n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
731   return n ? (tree) n->value : NULL_TREE;
732 }
733 
734 /* Return true if DECL should be copied by pointer.  SHARED_CTX is
735    the parallel context if DECL is to be shared.  */
736 
737 static bool
738 use_pointer_for_field (tree decl, omp_context *shared_ctx)
739 {
740   if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
741     return true;
742 
743   /* We can only use copy-in/copy-out semantics for shared variables
744      when we know the value is not accessible from an outer scope.  */
745   if (shared_ctx)
746     {
747       /* ??? Trivially accessible from anywhere.  But why would we even
748 	 be passing an address in this case?  Should we simply assert
749 	 this to be false, or should we have a cleanup pass that removes
750 	 these from the list of mappings?  */
751       if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
752 	return true;
753 
754       /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
755 	 without analyzing the expression whether or not its location
756 	 is accessible to anyone else.  In the case of nested parallel
757 	 regions it certainly may be.  */
758       if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
759 	return true;
760 
761       /* Do not use copy-in/copy-out for variables that have their
762 	 address taken.  */
763       if (TREE_ADDRESSABLE (decl))
764 	return true;
765 
766       /* lower_send_shared_vars only uses copy-in, but not copy-out
767 	 for these.  */
768       if (TREE_READONLY (decl)
769 	  || ((TREE_CODE (decl) == RESULT_DECL
770 	       || TREE_CODE (decl) == PARM_DECL)
771 	      && DECL_BY_REFERENCE (decl)))
772 	return false;
773 
774       /* Disallow copy-in/out in nested parallel if
775 	 decl is shared in outer parallel, otherwise
776 	 each thread could store the shared variable
777 	 in its own copy-in location, making the
778 	 variable no longer really shared.  */
779       if (shared_ctx->is_nested)
780 	{
781 	  omp_context *up;
782 
783 	  for (up = shared_ctx->outer; up; up = up->outer)
784 	    if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
785 	      break;
786 
787 	  if (up)
788 	    {
789 	      tree c;
790 
791 	      for (c = gimple_omp_taskreg_clauses (up->stmt);
792 		   c; c = OMP_CLAUSE_CHAIN (c))
793 		if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
794 		    && OMP_CLAUSE_DECL (c) == decl)
795 		  break;
796 
797 	      if (c)
798 		goto maybe_mark_addressable_and_ret;
799 	    }
800 	}
801 
802       /* For tasks avoid using copy-in/out.  As tasks can be
803 	 deferred or executed in different thread, when GOMP_task
804 	 returns, the task hasn't necessarily terminated.  */
805       if (is_task_ctx (shared_ctx))
806 	{
807 	  tree outer;
808 	maybe_mark_addressable_and_ret:
809 	  outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
810 	  if (is_gimple_reg (outer))
811 	    {
812 	      /* Taking address of OUTER in lower_send_shared_vars
813 		 might need regimplification of everything that uses the
814 		 variable.  */
815 	      if (!task_shared_vars)
816 		task_shared_vars = BITMAP_ALLOC (NULL);
817 	      bitmap_set_bit (task_shared_vars, DECL_UID (outer));
818 	      TREE_ADDRESSABLE (outer) = 1;
819 	    }
820 	  return true;
821 	}
822     }
823 
824   return false;
825 }
826 
827 /* Create a new VAR_DECL and copy information from VAR to it.  */
828 
829 tree
830 copy_var_decl (tree var, tree name, tree type)
831 {
832   tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
833 
834   TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
835   TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
836   DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
837   DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
838   DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
839   DECL_CONTEXT (copy) = DECL_CONTEXT (var);
840   TREE_NO_WARNING (copy) = TREE_NO_WARNING (var);
841   TREE_USED (copy) = 1;
842   DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
843 
844   return copy;
845 }
846 
847 /* Construct a new automatic decl similar to VAR.  */
848 
849 static tree
850 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
851 {
852   tree copy = copy_var_decl (var, name, type);
853 
854   DECL_CONTEXT (copy) = current_function_decl;
855   DECL_CHAIN (copy) = ctx->block_vars;
856   ctx->block_vars = copy;
857 
858   return copy;
859 }
860 
861 static tree
862 omp_copy_decl_1 (tree var, omp_context *ctx)
863 {
864   return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
865 }
866 
867 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
868    as appropriate.  */
869 static tree
870 omp_build_component_ref (tree obj, tree field)
871 {
872   tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
873   if (TREE_THIS_VOLATILE (field))
874     TREE_THIS_VOLATILE (ret) |= 1;
875   if (TREE_READONLY (field))
876     TREE_READONLY (ret) |= 1;
877   return ret;
878 }
879 
880 /* Build tree nodes to access the field for VAR on the receiver side.  */
881 
882 static tree
883 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
884 {
885   tree x, field = lookup_field (var, ctx);
886 
887   /* If the receiver record type was remapped in the child function,
888      remap the field into the new record type.  */
889   x = maybe_lookup_field (field, ctx);
890   if (x != NULL)
891     field = x;
892 
893   x = build_simple_mem_ref (ctx->receiver_decl);
894   x = omp_build_component_ref (x, field);
895   if (by_ref)
896     x = build_simple_mem_ref (x);
897 
898   return x;
899 }
900 
901 /* Build tree nodes to access VAR in the scope outer to CTX.  In the case
902    of a parallel, this is a component reference; for workshare constructs
903    this is some variable.  */
904 
905 static tree
906 build_outer_var_ref (tree var, omp_context *ctx)
907 {
908   tree x;
909 
910   if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
911     x = var;
912   else if (is_variable_sized (var))
913     {
914       x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
915       x = build_outer_var_ref (x, ctx);
916       x = build_simple_mem_ref (x);
917     }
918   else if (is_taskreg_ctx (ctx))
919     {
920       bool by_ref = use_pointer_for_field (var, NULL);
921       x = build_receiver_ref (var, by_ref, ctx);
922     }
923   else if (ctx->outer)
924     x = lookup_decl (var, ctx->outer);
925   else if (is_reference (var))
926     /* This can happen with orphaned constructs.  If var is reference, it is
927        possible it is shared and as such valid.  */
928     x = var;
929   else
930     gcc_unreachable ();
931 
932   if (is_reference (var))
933     x = build_simple_mem_ref (x);
934 
935   return x;
936 }
937 
938 /* Build tree nodes to access the field for VAR on the sender side.  */
939 
940 static tree
941 build_sender_ref (tree var, omp_context *ctx)
942 {
943   tree field = lookup_sfield (var, ctx);
944   return omp_build_component_ref (ctx->sender_decl, field);
945 }
946 
947 /* Add a new field for VAR inside the structure CTX->SENDER_DECL.  */
948 
949 static void
950 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
951 {
952   tree field, type, sfield = NULL_TREE;
953 
954   gcc_assert ((mask & 1) == 0
955 	      || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
956   gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
957 	      || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
958 
959   type = TREE_TYPE (var);
960   if (by_ref)
961     type = build_pointer_type (type);
962   else if ((mask & 3) == 1 && is_reference (var))
963     type = TREE_TYPE (type);
964 
965   field = build_decl (DECL_SOURCE_LOCATION (var),
966 		      FIELD_DECL, DECL_NAME (var), type);
967 
968   /* Remember what variable this field was created for.  This does have a
969      side effect of making dwarf2out ignore this member, so for helpful
970      debugging we clear it later in delete_omp_context.  */
971   DECL_ABSTRACT_ORIGIN (field) = var;
972   if (type == TREE_TYPE (var))
973     {
974       DECL_ALIGN (field) = DECL_ALIGN (var);
975       DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
976       TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
977     }
978   else
979     DECL_ALIGN (field) = TYPE_ALIGN (type);
980 
981   if ((mask & 3) == 3)
982     {
983       insert_field_into_struct (ctx->record_type, field);
984       if (ctx->srecord_type)
985 	{
986 	  sfield = build_decl (DECL_SOURCE_LOCATION (var),
987 			       FIELD_DECL, DECL_NAME (var), type);
988 	  DECL_ABSTRACT_ORIGIN (sfield) = var;
989 	  DECL_ALIGN (sfield) = DECL_ALIGN (field);
990 	  DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
991 	  TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
992 	  insert_field_into_struct (ctx->srecord_type, sfield);
993 	}
994     }
995   else
996     {
997       if (ctx->srecord_type == NULL_TREE)
998 	{
999 	  tree t;
1000 
1001 	  ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1002 	  ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1003 	  for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1004 	    {
1005 	      sfield = build_decl (DECL_SOURCE_LOCATION (var),
1006 				   FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1007 	      DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1008 	      insert_field_into_struct (ctx->srecord_type, sfield);
1009 	      splay_tree_insert (ctx->sfield_map,
1010 				 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1011 				 (splay_tree_value) sfield);
1012 	    }
1013 	}
1014       sfield = field;
1015       insert_field_into_struct ((mask & 1) ? ctx->record_type
1016 				: ctx->srecord_type, field);
1017     }
1018 
1019   if (mask & 1)
1020     splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1021 		       (splay_tree_value) field);
1022   if ((mask & 2) && ctx->sfield_map)
1023     splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1024 		       (splay_tree_value) sfield);
1025 }
1026 
1027 static tree
1028 install_var_local (tree var, omp_context *ctx)
1029 {
1030   tree new_var = omp_copy_decl_1 (var, ctx);
1031   insert_decl_map (&ctx->cb, var, new_var);
1032   return new_var;
1033 }
1034 
1035 /* Adjust the replacement for DECL in CTX for the new context.  This means
1036    copying the DECL_VALUE_EXPR, and fixing up the type.  */
1037 
1038 static void
1039 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1040 {
1041   tree new_decl, size;
1042 
1043   new_decl = lookup_decl (decl, ctx);
1044 
1045   TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1046 
1047   if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1048       && DECL_HAS_VALUE_EXPR_P (decl))
1049     {
1050       tree ve = DECL_VALUE_EXPR (decl);
1051       walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1052       SET_DECL_VALUE_EXPR (new_decl, ve);
1053       DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1054     }
1055 
1056   if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1057     {
1058       size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1059       if (size == error_mark_node)
1060 	size = TYPE_SIZE (TREE_TYPE (new_decl));
1061       DECL_SIZE (new_decl) = size;
1062 
1063       size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1064       if (size == error_mark_node)
1065 	size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1066       DECL_SIZE_UNIT (new_decl) = size;
1067     }
1068 }
1069 
1070 /* The callback for remap_decl.  Search all containing contexts for a
1071    mapping of the variable; this avoids having to duplicate the splay
1072    tree ahead of time.  We know a mapping doesn't already exist in the
1073    given context.  Create new mappings to implement default semantics.  */
1074 
1075 static tree
1076 omp_copy_decl (tree var, copy_body_data *cb)
1077 {
1078   omp_context *ctx = (omp_context *) cb;
1079   tree new_var;
1080 
1081   if (TREE_CODE (var) == LABEL_DECL)
1082     {
1083       new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1084       DECL_CONTEXT (new_var) = current_function_decl;
1085       insert_decl_map (&ctx->cb, var, new_var);
1086       return new_var;
1087     }
1088 
1089   while (!is_taskreg_ctx (ctx))
1090     {
1091       ctx = ctx->outer;
1092       if (ctx == NULL)
1093 	return var;
1094       new_var = maybe_lookup_decl (var, ctx);
1095       if (new_var)
1096 	return new_var;
1097     }
1098 
1099   if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1100     return var;
1101 
1102   return error_mark_node;
1103 }
1104 
1105 
1106 /* Return the parallel region associated with STMT.  */
1107 
1108 /* Debugging dumps for parallel regions.  */
1109 void dump_omp_region (FILE *, struct omp_region *, int);
1110 void debug_omp_region (struct omp_region *);
1111 void debug_all_omp_regions (void);
1112 
1113 /* Dump the parallel region tree rooted at REGION.  */
1114 
1115 void
1116 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1117 {
1118   fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1119 	   gimple_code_name[region->type]);
1120 
1121   if (region->inner)
1122     dump_omp_region (file, region->inner, indent + 4);
1123 
1124   if (region->cont)
1125     {
1126       fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1127 	       region->cont->index);
1128     }
1129 
1130   if (region->exit)
1131     fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1132 	     region->exit->index);
1133   else
1134     fprintf (file, "%*s[no exit marker]\n", indent, "");
1135 
1136   if (region->next)
1137     dump_omp_region (file, region->next, indent);
1138 }
1139 
1140 DEBUG_FUNCTION void
1141 debug_omp_region (struct omp_region *region)
1142 {
1143   dump_omp_region (stderr, region, 0);
1144 }
1145 
1146 DEBUG_FUNCTION void
1147 debug_all_omp_regions (void)
1148 {
1149   dump_omp_region (stderr, root_omp_region, 0);
1150 }
1151 
1152 
1153 /* Create a new parallel region starting at STMT inside region PARENT.  */
1154 
1155 struct omp_region *
1156 new_omp_region (basic_block bb, enum gimple_code type,
1157 		struct omp_region *parent)
1158 {
1159   struct omp_region *region = XCNEW (struct omp_region);
1160 
1161   region->outer = parent;
1162   region->entry = bb;
1163   region->type = type;
1164 
1165   if (parent)
1166     {
1167       /* This is a nested region.  Add it to the list of inner
1168 	 regions in PARENT.  */
1169       region->next = parent->inner;
1170       parent->inner = region;
1171     }
1172   else
1173     {
1174       /* This is a toplevel region.  Add it to the list of toplevel
1175 	 regions in ROOT_OMP_REGION.  */
1176       region->next = root_omp_region;
1177       root_omp_region = region;
1178     }
1179 
1180   return region;
1181 }
1182 
1183 /* Release the memory associated with the region tree rooted at REGION.  */
1184 
1185 static void
1186 free_omp_region_1 (struct omp_region *region)
1187 {
1188   struct omp_region *i, *n;
1189 
1190   for (i = region->inner; i ; i = n)
1191     {
1192       n = i->next;
1193       free_omp_region_1 (i);
1194     }
1195 
1196   free (region);
1197 }
1198 
1199 /* Release the memory for the entire omp region tree.  */
1200 
1201 void
1202 free_omp_regions (void)
1203 {
1204   struct omp_region *r, *n;
1205   for (r = root_omp_region; r ; r = n)
1206     {
1207       n = r->next;
1208       free_omp_region_1 (r);
1209     }
1210   root_omp_region = NULL;
1211 }
1212 
1213 
1214 /* Create a new context, with OUTER_CTX being the surrounding context.  */
1215 
1216 static omp_context *
1217 new_omp_context (gimple stmt, omp_context *outer_ctx)
1218 {
1219   omp_context *ctx = XCNEW (omp_context);
1220 
1221   splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1222 		     (splay_tree_value) ctx);
1223   ctx->stmt = stmt;
1224 
1225   if (outer_ctx)
1226     {
1227       ctx->outer = outer_ctx;
1228       ctx->cb = outer_ctx->cb;
1229       ctx->cb.block = NULL;
1230       ctx->depth = outer_ctx->depth + 1;
1231     }
1232   else
1233     {
1234       ctx->cb.src_fn = current_function_decl;
1235       ctx->cb.dst_fn = current_function_decl;
1236       ctx->cb.src_node = cgraph_get_node (current_function_decl);
1237       gcc_checking_assert (ctx->cb.src_node);
1238       ctx->cb.dst_node = ctx->cb.src_node;
1239       ctx->cb.src_cfun = cfun;
1240       ctx->cb.copy_decl = omp_copy_decl;
1241       ctx->cb.eh_lp_nr = 0;
1242       ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1243       ctx->depth = 1;
1244     }
1245 
1246   ctx->cb.decl_map = pointer_map_create ();
1247 
1248   return ctx;
1249 }
1250 
1251 static gimple_seq maybe_catch_exception (gimple_seq);
1252 
1253 /* Finalize task copyfn.  */
1254 
1255 static void
1256 finalize_task_copyfn (gimple task_stmt)
1257 {
1258   struct function *child_cfun;
1259   tree child_fn;
1260   gimple_seq seq = NULL, new_seq;
1261   gimple bind;
1262 
1263   child_fn = gimple_omp_task_copy_fn (task_stmt);
1264   if (child_fn == NULL_TREE)
1265     return;
1266 
1267   child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1268 
1269   /* Inform the callgraph about the new function.  */
1270   DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1271     = cfun->curr_properties & ~PROP_loops;
1272 
1273   push_cfun (child_cfun);
1274   bind = gimplify_body (child_fn, false);
1275   gimple_seq_add_stmt (&seq, bind);
1276   new_seq = maybe_catch_exception (seq);
1277   if (new_seq != seq)
1278     {
1279       bind = gimple_build_bind (NULL, new_seq, NULL);
1280       seq = NULL;
1281       gimple_seq_add_stmt (&seq, bind);
1282     }
1283   gimple_set_body (child_fn, seq);
1284   pop_cfun ();
1285 
1286   cgraph_add_new_function (child_fn, false);
1287 }
1288 
1289 /* Destroy a omp_context data structures.  Called through the splay tree
1290    value delete callback.  */
1291 
1292 static void
1293 delete_omp_context (splay_tree_value value)
1294 {
1295   omp_context *ctx = (omp_context *) value;
1296 
1297   pointer_map_destroy (ctx->cb.decl_map);
1298 
1299   if (ctx->field_map)
1300     splay_tree_delete (ctx->field_map);
1301   if (ctx->sfield_map)
1302     splay_tree_delete (ctx->sfield_map);
1303 
1304   /* We hijacked DECL_ABSTRACT_ORIGIN earlier.  We need to clear it before
1305      it produces corrupt debug information.  */
1306   if (ctx->record_type)
1307     {
1308       tree t;
1309       for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1310 	DECL_ABSTRACT_ORIGIN (t) = NULL;
1311     }
1312   if (ctx->srecord_type)
1313     {
1314       tree t;
1315       for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1316 	DECL_ABSTRACT_ORIGIN (t) = NULL;
1317     }
1318 
1319   if (is_task_ctx (ctx))
1320     finalize_task_copyfn (ctx->stmt);
1321 
1322   XDELETE (ctx);
1323 }
1324 
1325 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1326    context.  */
1327 
1328 static void
1329 fixup_child_record_type (omp_context *ctx)
1330 {
1331   tree f, type = ctx->record_type;
1332 
1333   /* ??? It isn't sufficient to just call remap_type here, because
1334      variably_modified_type_p doesn't work the way we expect for
1335      record types.  Testing each field for whether it needs remapping
1336      and creating a new record by hand works, however.  */
1337   for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1338     if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1339       break;
1340   if (f)
1341     {
1342       tree name, new_fields = NULL;
1343 
1344       type = lang_hooks.types.make_type (RECORD_TYPE);
1345       name = DECL_NAME (TYPE_NAME (ctx->record_type));
1346       name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1347 			 TYPE_DECL, name, type);
1348       TYPE_NAME (type) = name;
1349 
1350       for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1351 	{
1352 	  tree new_f = copy_node (f);
1353 	  DECL_CONTEXT (new_f) = type;
1354 	  TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1355 	  DECL_CHAIN (new_f) = new_fields;
1356 	  walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1357 	  walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1358 		     &ctx->cb, NULL);
1359 	  walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1360 		     &ctx->cb, NULL);
1361 	  new_fields = new_f;
1362 
1363 	  /* Arrange to be able to look up the receiver field
1364 	     given the sender field.  */
1365 	  splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1366 			     (splay_tree_value) new_f);
1367 	}
1368       TYPE_FIELDS (type) = nreverse (new_fields);
1369       layout_type (type);
1370     }
1371 
1372   TREE_TYPE (ctx->receiver_decl)
1373     = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
1374 }
1375 
1376 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1377    specified by CLAUSES.  */
1378 
1379 static void
1380 scan_sharing_clauses (tree clauses, omp_context *ctx)
1381 {
1382   tree c, decl;
1383   bool scan_array_reductions = false;
1384 
1385   for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1386     {
1387       bool by_ref;
1388 
1389       switch (OMP_CLAUSE_CODE (c))
1390 	{
1391 	case OMP_CLAUSE_PRIVATE:
1392 	  decl = OMP_CLAUSE_DECL (c);
1393 	  if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1394 	    goto do_private;
1395 	  else if (!is_variable_sized (decl))
1396 	    install_var_local (decl, ctx);
1397 	  break;
1398 
1399 	case OMP_CLAUSE_SHARED:
1400 	  gcc_assert (is_taskreg_ctx (ctx));
1401 	  decl = OMP_CLAUSE_DECL (c);
1402 	  gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1403 		      || !is_variable_sized (decl));
1404 	  /* Global variables don't need to be copied,
1405 	     the receiver side will use them directly.  */
1406 	  if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1407 	    break;
1408 	  by_ref = use_pointer_for_field (decl, ctx);
1409 	  if (! TREE_READONLY (decl)
1410 	      || TREE_ADDRESSABLE (decl)
1411 	      || by_ref
1412 	      || is_reference (decl))
1413 	    {
1414 	      install_var_field (decl, by_ref, 3, ctx);
1415 	      install_var_local (decl, ctx);
1416 	      break;
1417 	    }
1418 	  /* We don't need to copy const scalar vars back.  */
1419 	  OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1420 	  goto do_private;
1421 
1422 	case OMP_CLAUSE_LASTPRIVATE:
1423 	  /* Let the corresponding firstprivate clause create
1424 	     the variable.  */
1425 	  if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1426 	    break;
1427 	  /* FALLTHRU */
1428 
1429 	case OMP_CLAUSE_FIRSTPRIVATE:
1430 	case OMP_CLAUSE_REDUCTION:
1431 	  decl = OMP_CLAUSE_DECL (c);
1432 	do_private:
1433 	  if (is_variable_sized (decl))
1434 	    {
1435 	      if (is_task_ctx (ctx))
1436 		install_var_field (decl, false, 1, ctx);
1437 	      break;
1438 	    }
1439 	  else if (is_taskreg_ctx (ctx))
1440 	    {
1441 	      bool global
1442 		= is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1443 	      by_ref = use_pointer_for_field (decl, NULL);
1444 
1445 	      if (is_task_ctx (ctx)
1446 		  && (global || by_ref || is_reference (decl)))
1447 		{
1448 		  install_var_field (decl, false, 1, ctx);
1449 		  if (!global)
1450 		    install_var_field (decl, by_ref, 2, ctx);
1451 		}
1452 	      else if (!global)
1453 		install_var_field (decl, by_ref, 3, ctx);
1454 	    }
1455 	  install_var_local (decl, ctx);
1456 	  break;
1457 
1458 	case OMP_CLAUSE_COPYPRIVATE:
1459 	case OMP_CLAUSE_COPYIN:
1460 	  decl = OMP_CLAUSE_DECL (c);
1461 	  by_ref = use_pointer_for_field (decl, NULL);
1462 	  install_var_field (decl, by_ref, 3, ctx);
1463 	  break;
1464 
1465 	case OMP_CLAUSE_DEFAULT:
1466 	  ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1467 	  break;
1468 
1469 	case OMP_CLAUSE_FINAL:
1470 	case OMP_CLAUSE_IF:
1471 	case OMP_CLAUSE_NUM_THREADS:
1472 	case OMP_CLAUSE_SCHEDULE:
1473 	  if (ctx->outer)
1474 	    scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1475 	  break;
1476 
1477 	case OMP_CLAUSE_NOWAIT:
1478 	case OMP_CLAUSE_ORDERED:
1479 	case OMP_CLAUSE_COLLAPSE:
1480 	case OMP_CLAUSE_UNTIED:
1481 	case OMP_CLAUSE_MERGEABLE:
1482 	  break;
1483 
1484 	default:
1485 	  gcc_unreachable ();
1486 	}
1487     }
1488 
1489   for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1490     {
1491       switch (OMP_CLAUSE_CODE (c))
1492 	{
1493 	case OMP_CLAUSE_LASTPRIVATE:
1494 	  /* Let the corresponding firstprivate clause create
1495 	     the variable.  */
1496 	  if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1497 	    scan_array_reductions = true;
1498 	  if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1499 	    break;
1500 	  /* FALLTHRU */
1501 
1502 	case OMP_CLAUSE_PRIVATE:
1503 	case OMP_CLAUSE_FIRSTPRIVATE:
1504 	case OMP_CLAUSE_REDUCTION:
1505 	  decl = OMP_CLAUSE_DECL (c);
1506 	  if (is_variable_sized (decl))
1507 	    install_var_local (decl, ctx);
1508 	  fixup_remapped_decl (decl, ctx,
1509 			       OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1510 			       && OMP_CLAUSE_PRIVATE_DEBUG (c));
1511 	  if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1512 	      && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1513 	    scan_array_reductions = true;
1514 	  break;
1515 
1516 	case OMP_CLAUSE_SHARED:
1517 	  decl = OMP_CLAUSE_DECL (c);
1518 	  if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1519 	    fixup_remapped_decl (decl, ctx, false);
1520 	  break;
1521 
1522 	case OMP_CLAUSE_COPYPRIVATE:
1523 	case OMP_CLAUSE_COPYIN:
1524 	case OMP_CLAUSE_DEFAULT:
1525 	case OMP_CLAUSE_IF:
1526 	case OMP_CLAUSE_NUM_THREADS:
1527 	case OMP_CLAUSE_SCHEDULE:
1528 	case OMP_CLAUSE_NOWAIT:
1529 	case OMP_CLAUSE_ORDERED:
1530 	case OMP_CLAUSE_COLLAPSE:
1531 	case OMP_CLAUSE_UNTIED:
1532 	case OMP_CLAUSE_FINAL:
1533 	case OMP_CLAUSE_MERGEABLE:
1534 	  break;
1535 
1536 	default:
1537 	  gcc_unreachable ();
1538 	}
1539     }
1540 
1541   if (scan_array_reductions)
1542     for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1543       if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1544 	  && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1545 	{
1546 	  scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1547 	  scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1548 	}
1549       else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1550 	       && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1551 	scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1552 }
1553 
1554 /* Create a new name for omp child function.  Returns an identifier.  */
1555 
1556 static GTY(()) unsigned int tmp_ompfn_id_num;
1557 
1558 static tree
1559 create_omp_child_function_name (bool task_copy)
1560 {
1561   return (clone_function_name (current_function_decl,
1562 			       task_copy ? "_omp_cpyfn" : "_omp_fn"));
1563 }
1564 
1565 /* Build a decl for the omp child function.  It'll not contain a body
1566    yet, just the bare decl.  */
1567 
1568 static void
1569 create_omp_child_function (omp_context *ctx, bool task_copy)
1570 {
1571   tree decl, type, name, t;
1572 
1573   name = create_omp_child_function_name (task_copy);
1574   if (task_copy)
1575     type = build_function_type_list (void_type_node, ptr_type_node,
1576 				     ptr_type_node, NULL_TREE);
1577   else
1578     type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1579 
1580   decl = build_decl (gimple_location (ctx->stmt),
1581 		     FUNCTION_DECL, name, type);
1582 
1583   if (!task_copy)
1584     ctx->cb.dst_fn = decl;
1585   else
1586     gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1587 
1588   TREE_STATIC (decl) = 1;
1589   TREE_USED (decl) = 1;
1590   DECL_ARTIFICIAL (decl) = 1;
1591   DECL_IGNORED_P (decl) = 0;
1592   TREE_PUBLIC (decl) = 0;
1593   DECL_UNINLINABLE (decl) = 1;
1594   DECL_EXTERNAL (decl) = 0;
1595   DECL_CONTEXT (decl) = NULL_TREE;
1596   DECL_INITIAL (decl) = make_node (BLOCK);
1597 
1598   t = build_decl (DECL_SOURCE_LOCATION (decl),
1599 		  RESULT_DECL, NULL_TREE, void_type_node);
1600   DECL_ARTIFICIAL (t) = 1;
1601   DECL_IGNORED_P (t) = 1;
1602   DECL_CONTEXT (t) = decl;
1603   DECL_RESULT (decl) = t;
1604 
1605   t = build_decl (DECL_SOURCE_LOCATION (decl),
1606 		  PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1607   DECL_ARTIFICIAL (t) = 1;
1608   DECL_NAMELESS (t) = 1;
1609   DECL_ARG_TYPE (t) = ptr_type_node;
1610   DECL_CONTEXT (t) = current_function_decl;
1611   TREE_USED (t) = 1;
1612   DECL_ARGUMENTS (decl) = t;
1613   if (!task_copy)
1614     ctx->receiver_decl = t;
1615   else
1616     {
1617       t = build_decl (DECL_SOURCE_LOCATION (decl),
1618 		      PARM_DECL, get_identifier (".omp_data_o"),
1619 		      ptr_type_node);
1620       DECL_ARTIFICIAL (t) = 1;
1621       DECL_NAMELESS (t) = 1;
1622       DECL_ARG_TYPE (t) = ptr_type_node;
1623       DECL_CONTEXT (t) = current_function_decl;
1624       TREE_USED (t) = 1;
1625       TREE_ADDRESSABLE (t) = 1;
1626       DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1627       DECL_ARGUMENTS (decl) = t;
1628     }
1629 
1630   /* Allocate memory for the function structure.  The call to
1631      allocate_struct_function clobbers CFUN, so we need to restore
1632      it afterward.  */
1633   push_struct_function (decl);
1634   cfun->function_end_locus = gimple_location (ctx->stmt);
1635   pop_cfun ();
1636 }
1637 
1638 
1639 /* Scan an OpenMP parallel directive.  */
1640 
1641 static void
1642 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1643 {
1644   omp_context *ctx;
1645   tree name;
1646   gimple stmt = gsi_stmt (*gsi);
1647 
1648   /* Ignore parallel directives with empty bodies, unless there
1649      are copyin clauses.  */
1650   if (optimize > 0
1651       && empty_body_p (gimple_omp_body (stmt))
1652       && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1653 			  OMP_CLAUSE_COPYIN) == NULL)
1654     {
1655       gsi_replace (gsi, gimple_build_nop (), false);
1656       return;
1657     }
1658 
1659   ctx = new_omp_context (stmt, outer_ctx);
1660   taskreg_contexts.safe_push (ctx);
1661   if (taskreg_nesting_level > 1)
1662     ctx->is_nested = true;
1663   ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1664   ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1665   ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1666   name = create_tmp_var_name (".omp_data_s");
1667   name = build_decl (gimple_location (stmt),
1668 		     TYPE_DECL, name, ctx->record_type);
1669   DECL_ARTIFICIAL (name) = 1;
1670   DECL_NAMELESS (name) = 1;
1671   TYPE_NAME (ctx->record_type) = name;
1672   create_omp_child_function (ctx, false);
1673   gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1674 
1675   scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1676   scan_omp (gimple_omp_body_ptr (stmt), ctx);
1677 
1678   if (TYPE_FIELDS (ctx->record_type) == NULL)
1679     ctx->record_type = ctx->receiver_decl = NULL;
1680 }
1681 
1682 /* Scan an OpenMP task directive.  */
1683 
1684 static void
1685 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1686 {
1687   omp_context *ctx;
1688   tree name, t;
1689   gimple stmt = gsi_stmt (*gsi);
1690 
1691   /* Ignore task directives with empty bodies.  */
1692   if (optimize > 0
1693       && empty_body_p (gimple_omp_body (stmt)))
1694     {
1695       gsi_replace (gsi, gimple_build_nop (), false);
1696       return;
1697     }
1698 
1699   ctx = new_omp_context (stmt, outer_ctx);
1700   taskreg_contexts.safe_push (ctx);
1701   if (taskreg_nesting_level > 1)
1702     ctx->is_nested = true;
1703   ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1704   ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1705   ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1706   name = create_tmp_var_name (".omp_data_s");
1707   name = build_decl (gimple_location (stmt),
1708 		     TYPE_DECL, name, ctx->record_type);
1709   DECL_ARTIFICIAL (name) = 1;
1710   DECL_NAMELESS (name) = 1;
1711   TYPE_NAME (ctx->record_type) = name;
1712   create_omp_child_function (ctx, false);
1713   gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1714 
1715   scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1716 
1717   if (ctx->srecord_type)
1718     {
1719       name = create_tmp_var_name (".omp_data_a");
1720       name = build_decl (gimple_location (stmt),
1721 			 TYPE_DECL, name, ctx->srecord_type);
1722       DECL_ARTIFICIAL (name) = 1;
1723       DECL_NAMELESS (name) = 1;
1724       TYPE_NAME (ctx->srecord_type) = name;
1725       create_omp_child_function (ctx, true);
1726     }
1727 
1728   scan_omp (gimple_omp_body_ptr (stmt), ctx);
1729 
1730   if (TYPE_FIELDS (ctx->record_type) == NULL)
1731     {
1732       ctx->record_type = ctx->receiver_decl = NULL;
1733       t = build_int_cst (long_integer_type_node, 0);
1734       gimple_omp_task_set_arg_size (stmt, t);
1735       t = build_int_cst (long_integer_type_node, 1);
1736       gimple_omp_task_set_arg_align (stmt, t);
1737     }
1738 }
1739 
1740 
1741 /* If any decls have been made addressable during scan_omp,
1742    adjust their fields if needed, and layout record types
1743    of parallel/task constructs.  */
1744 
1745 static void
1746 finish_taskreg_scan (omp_context *ctx)
1747 {
1748   if (ctx->record_type == NULL_TREE)
1749     return;
1750 
1751   /* If any task_shared_vars were needed, verify all
1752      OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
1753      statements if use_pointer_for_field hasn't changed
1754      because of that.  If it did, update field types now.  */
1755   if (task_shared_vars)
1756     {
1757       tree c;
1758 
1759       for (c = gimple_omp_taskreg_clauses (ctx->stmt);
1760 	   c; c = OMP_CLAUSE_CHAIN (c))
1761 	if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
1762 	  {
1763 	    tree decl = OMP_CLAUSE_DECL (c);
1764 
1765 	    /* Global variables don't need to be copied,
1766 	       the receiver side will use them directly.  */
1767 	    if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1768 	      continue;
1769 	    if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
1770 		|| !use_pointer_for_field (decl, ctx))
1771 	      continue;
1772 	    tree field = lookup_field (decl, ctx);
1773 	    if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
1774 		&& TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
1775 	      continue;
1776 	    TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
1777 	    TREE_THIS_VOLATILE (field) = 0;
1778 	    DECL_USER_ALIGN (field) = 0;
1779 	    DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
1780 	    if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
1781 	      TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
1782 	    if (ctx->srecord_type)
1783 	      {
1784 		tree sfield = lookup_sfield (decl, ctx);
1785 		TREE_TYPE (sfield) = TREE_TYPE (field);
1786 		TREE_THIS_VOLATILE (sfield) = 0;
1787 		DECL_USER_ALIGN (sfield) = 0;
1788 		DECL_ALIGN (sfield) = DECL_ALIGN (field);
1789 		if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
1790 		  TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
1791 	      }
1792 	  }
1793     }
1794 
1795   if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
1796     {
1797       layout_type (ctx->record_type);
1798       fixup_child_record_type (ctx);
1799     }
1800   else
1801     {
1802       location_t loc = gimple_location (ctx->stmt);
1803       tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1804       /* Move VLA fields to the end.  */
1805       p = &TYPE_FIELDS (ctx->record_type);
1806       while (*p)
1807 	if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1808 	    || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1809 	  {
1810 	    *q = *p;
1811 	    *p = TREE_CHAIN (*p);
1812 	    TREE_CHAIN (*q) = NULL_TREE;
1813 	    q = &TREE_CHAIN (*q);
1814 	  }
1815 	else
1816 	  p = &DECL_CHAIN (*p);
1817       *p = vla_fields;
1818       layout_type (ctx->record_type);
1819       fixup_child_record_type (ctx);
1820       if (ctx->srecord_type)
1821 	layout_type (ctx->srecord_type);
1822       tree t = fold_convert_loc (loc, long_integer_type_node,
1823 				 TYPE_SIZE_UNIT (ctx->record_type));
1824       gimple_omp_task_set_arg_size (ctx->stmt, t);
1825       t = build_int_cst (long_integer_type_node,
1826 			 TYPE_ALIGN_UNIT (ctx->record_type));
1827       gimple_omp_task_set_arg_align (ctx->stmt, t);
1828     }
1829 }
1830 
1831 
1832 /* Scan an OpenMP loop directive.  */
1833 
1834 static void
1835 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1836 {
1837   omp_context *ctx;
1838   size_t i;
1839 
1840   ctx = new_omp_context (stmt, outer_ctx);
1841 
1842   scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1843 
1844   scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1845   for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1846     {
1847       scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1848       scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1849       scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1850       scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1851     }
1852   scan_omp (gimple_omp_body_ptr (stmt), ctx);
1853 }
1854 
1855 /* Scan an OpenMP sections directive.  */
1856 
1857 static void
1858 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1859 {
1860   omp_context *ctx;
1861 
1862   ctx = new_omp_context (stmt, outer_ctx);
1863   scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1864   scan_omp (gimple_omp_body_ptr (stmt), ctx);
1865 }
1866 
1867 /* Scan an OpenMP single directive.  */
1868 
1869 static void
1870 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1871 {
1872   omp_context *ctx;
1873   tree name;
1874 
1875   ctx = new_omp_context (stmt, outer_ctx);
1876   ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1877   ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1878   name = create_tmp_var_name (".omp_copy_s");
1879   name = build_decl (gimple_location (stmt),
1880 		     TYPE_DECL, name, ctx->record_type);
1881   TYPE_NAME (ctx->record_type) = name;
1882 
1883   scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1884   scan_omp (gimple_omp_body_ptr (stmt), ctx);
1885 
1886   if (TYPE_FIELDS (ctx->record_type) == NULL)
1887     ctx->record_type = NULL;
1888   else
1889     layout_type (ctx->record_type);
1890 }
1891 
1892 
1893 /* Check OpenMP nesting restrictions.  */
1894 static bool
1895 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1896 {
1897   switch (gimple_code (stmt))
1898     {
1899     case GIMPLE_OMP_FOR:
1900     case GIMPLE_OMP_SECTIONS:
1901     case GIMPLE_OMP_SINGLE:
1902     case GIMPLE_CALL:
1903       for (; ctx != NULL; ctx = ctx->outer)
1904 	switch (gimple_code (ctx->stmt))
1905 	  {
1906 	  case GIMPLE_OMP_FOR:
1907 	  case GIMPLE_OMP_SECTIONS:
1908 	  case GIMPLE_OMP_SINGLE:
1909 	  case GIMPLE_OMP_ORDERED:
1910 	  case GIMPLE_OMP_MASTER:
1911 	  case GIMPLE_OMP_TASK:
1912 	    if (is_gimple_call (stmt))
1913 	      {
1914 		error_at (gimple_location (stmt),
1915 			  "barrier region may not be closely nested inside "
1916 			  "of work-sharing, critical, ordered, master or "
1917 			  "explicit task region");
1918 		return false;
1919 	      }
1920 	    error_at (gimple_location (stmt),
1921 		      "work-sharing region may not be closely nested inside "
1922 		      "of work-sharing, critical, ordered, master or explicit "
1923 		      "task region");
1924 	    return false;
1925 	  case GIMPLE_OMP_PARALLEL:
1926 	    return true;
1927 	  default:
1928 	    break;
1929 	  }
1930       break;
1931     case GIMPLE_OMP_MASTER:
1932       for (; ctx != NULL; ctx = ctx->outer)
1933 	switch (gimple_code (ctx->stmt))
1934 	  {
1935 	  case GIMPLE_OMP_FOR:
1936 	  case GIMPLE_OMP_SECTIONS:
1937 	  case GIMPLE_OMP_SINGLE:
1938 	  case GIMPLE_OMP_TASK:
1939 	    error_at (gimple_location (stmt),
1940 		      "master region may not be closely nested inside "
1941 		      "of work-sharing or explicit task region");
1942 	    return false;
1943 	  case GIMPLE_OMP_PARALLEL:
1944 	    return true;
1945 	  default:
1946 	    break;
1947 	  }
1948       break;
1949     case GIMPLE_OMP_ORDERED:
1950       for (; ctx != NULL; ctx = ctx->outer)
1951 	switch (gimple_code (ctx->stmt))
1952 	  {
1953 	  case GIMPLE_OMP_CRITICAL:
1954 	  case GIMPLE_OMP_TASK:
1955 	    error_at (gimple_location (stmt),
1956 		      "ordered region may not be closely nested inside "
1957 		      "of critical or explicit task region");
1958 	    return false;
1959 	  case GIMPLE_OMP_FOR:
1960 	    if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1961 				 OMP_CLAUSE_ORDERED) == NULL)
1962 	      {
1963 		error_at (gimple_location (stmt),
1964 			  "ordered region must be closely nested inside "
1965 			  "a loop region with an ordered clause");
1966 		return false;
1967 	      }
1968 	    return true;
1969 	  case GIMPLE_OMP_PARALLEL:
1970 	    return true;
1971 	  default:
1972 	    break;
1973 	  }
1974       break;
1975     case GIMPLE_OMP_CRITICAL:
1976       for (; ctx != NULL; ctx = ctx->outer)
1977 	if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1978 	    && (gimple_omp_critical_name (stmt)
1979 		== gimple_omp_critical_name (ctx->stmt)))
1980 	  {
1981 	    error_at (gimple_location (stmt),
1982 		      "critical region may not be nested inside a critical "
1983 		      "region with the same name");
1984 	    return false;
1985 	  }
1986       break;
1987     default:
1988       break;
1989     }
1990   return true;
1991 }
1992 
1993 
1994 /* Helper function scan_omp.
1995 
1996    Callback for walk_tree or operators in walk_gimple_stmt used to
1997    scan for OpenMP directives in TP.  */
1998 
1999 static tree
2000 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2001 {
2002   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2003   omp_context *ctx = (omp_context *) wi->info;
2004   tree t = *tp;
2005 
2006   switch (TREE_CODE (t))
2007     {
2008     case VAR_DECL:
2009     case PARM_DECL:
2010     case LABEL_DECL:
2011     case RESULT_DECL:
2012       if (ctx)
2013 	*tp = remap_decl (t, &ctx->cb);
2014       break;
2015 
2016     default:
2017       if (ctx && TYPE_P (t))
2018 	*tp = remap_type (t, &ctx->cb);
2019       else if (!DECL_P (t))
2020 	{
2021 	  *walk_subtrees = 1;
2022 	  if (ctx)
2023 	    {
2024 	      tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2025 	      if (tem != TREE_TYPE (t))
2026 		{
2027 		  if (TREE_CODE (t) == INTEGER_CST)
2028 		    *tp = build_int_cst_wide (tem,
2029 					      TREE_INT_CST_LOW (t),
2030 					      TREE_INT_CST_HIGH (t));
2031 		  else
2032 		    TREE_TYPE (t) = tem;
2033 		}
2034 	    }
2035 	}
2036       break;
2037     }
2038 
2039   return NULL_TREE;
2040 }
2041 
2042 
2043 /* Helper function for scan_omp.
2044 
2045    Callback for walk_gimple_stmt used to scan for OpenMP directives in
2046    the current statement in GSI.  */
2047 
2048 static tree
2049 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2050 		 struct walk_stmt_info *wi)
2051 {
2052   gimple stmt = gsi_stmt (*gsi);
2053   omp_context *ctx = (omp_context *) wi->info;
2054 
2055   if (gimple_has_location (stmt))
2056     input_location = gimple_location (stmt);
2057 
2058   /* Check the OpenMP nesting restrictions.  */
2059   if (ctx != NULL)
2060     {
2061       bool remove = false;
2062       if (is_gimple_omp (stmt))
2063 	remove = !check_omp_nesting_restrictions (stmt, ctx);
2064       else if (is_gimple_call (stmt))
2065 	{
2066 	  tree fndecl = gimple_call_fndecl (stmt);
2067 	  if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2068 	      && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
2069 	    remove = !check_omp_nesting_restrictions (stmt, ctx);
2070 	}
2071       if (remove)
2072 	{
2073 	  stmt = gimple_build_nop ();
2074 	  gsi_replace (gsi, stmt, false);
2075 	}
2076     }
2077 
2078   *handled_ops_p = true;
2079 
2080   switch (gimple_code (stmt))
2081     {
2082     case GIMPLE_OMP_PARALLEL:
2083       taskreg_nesting_level++;
2084       scan_omp_parallel (gsi, ctx);
2085       taskreg_nesting_level--;
2086       break;
2087 
2088     case GIMPLE_OMP_TASK:
2089       taskreg_nesting_level++;
2090       scan_omp_task (gsi, ctx);
2091       taskreg_nesting_level--;
2092       break;
2093 
2094     case GIMPLE_OMP_FOR:
2095       scan_omp_for (stmt, ctx);
2096       break;
2097 
2098     case GIMPLE_OMP_SECTIONS:
2099       scan_omp_sections (stmt, ctx);
2100       break;
2101 
2102     case GIMPLE_OMP_SINGLE:
2103       scan_omp_single (stmt, ctx);
2104       break;
2105 
2106     case GIMPLE_OMP_SECTION:
2107     case GIMPLE_OMP_MASTER:
2108     case GIMPLE_OMP_ORDERED:
2109     case GIMPLE_OMP_CRITICAL:
2110       ctx = new_omp_context (stmt, ctx);
2111       scan_omp (gimple_omp_body_ptr (stmt), ctx);
2112       break;
2113 
2114     case GIMPLE_BIND:
2115       {
2116 	tree var;
2117 
2118 	*handled_ops_p = false;
2119 	if (ctx)
2120 	  for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2121 	    insert_decl_map (&ctx->cb, var, var);
2122       }
2123       break;
2124     default:
2125       *handled_ops_p = false;
2126       break;
2127     }
2128 
2129   return NULL_TREE;
2130 }
2131 
2132 
2133 /* Scan all the statements starting at the current statement.  CTX
2134    contains context information about the OpenMP directives and
2135    clauses found during the scan.  */
2136 
2137 static void
2138 scan_omp (gimple_seq *body_p, omp_context *ctx)
2139 {
2140   location_t saved_location;
2141   struct walk_stmt_info wi;
2142 
2143   memset (&wi, 0, sizeof (wi));
2144   wi.info = ctx;
2145   wi.want_locations = true;
2146 
2147   saved_location = input_location;
2148   walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2149   input_location = saved_location;
2150 }
2151 
2152 /* Re-gimplification and code generation routines.  */
2153 
2154 /* Build a call to GOMP_barrier.  */
2155 
2156 static tree
2157 build_omp_barrier (void)
2158 {
2159   return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2160 }
2161 
2162 /* If a context was created for STMT when it was scanned, return it.  */
2163 
2164 static omp_context *
2165 maybe_lookup_ctx (gimple stmt)
2166 {
2167   splay_tree_node n;
2168   n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2169   return n ? (omp_context *) n->value : NULL;
2170 }
2171 
2172 
2173 /* Find the mapping for DECL in CTX or the immediately enclosing
2174    context that has a mapping for DECL.
2175 
2176    If CTX is a nested parallel directive, we may have to use the decl
2177    mappings created in CTX's parent context.  Suppose that we have the
2178    following parallel nesting (variable UIDs showed for clarity):
2179 
2180 	iD.1562 = 0;
2181      	#omp parallel shared(iD.1562)		-> outer parallel
2182 	  iD.1562 = iD.1562 + 1;
2183 
2184 	  #omp parallel shared (iD.1562)	-> inner parallel
2185 	     iD.1562 = iD.1562 - 1;
2186 
2187    Each parallel structure will create a distinct .omp_data_s structure
2188    for copying iD.1562 in/out of the directive:
2189 
2190   	outer parallel		.omp_data_s.1.i -> iD.1562
2191 	inner parallel		.omp_data_s.2.i -> iD.1562
2192 
2193    A shared variable mapping will produce a copy-out operation before
2194    the parallel directive and a copy-in operation after it.  So, in
2195    this case we would have:
2196 
2197   	iD.1562 = 0;
2198 	.omp_data_o.1.i = iD.1562;
2199 	#omp parallel shared(iD.1562)		-> outer parallel
2200 	  .omp_data_i.1 = &.omp_data_o.1
2201 	  .omp_data_i.1->i = .omp_data_i.1->i + 1;
2202 
2203 	  .omp_data_o.2.i = iD.1562;		-> **
2204 	  #omp parallel shared(iD.1562)		-> inner parallel
2205 	    .omp_data_i.2 = &.omp_data_o.2
2206 	    .omp_data_i.2->i = .omp_data_i.2->i - 1;
2207 
2208 
2209     ** This is a problem.  The symbol iD.1562 cannot be referenced
2210        inside the body of the outer parallel region.  But since we are
2211        emitting this copy operation while expanding the inner parallel
2212        directive, we need to access the CTX structure of the outer
2213        parallel directive to get the correct mapping:
2214 
2215 	  .omp_data_o.2.i = .omp_data_i.1->i
2216 
2217     Since there may be other workshare or parallel directives enclosing
2218     the parallel directive, it may be necessary to walk up the context
2219     parent chain.  This is not a problem in general because nested
2220     parallelism happens only rarely.  */
2221 
2222 static tree
2223 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2224 {
2225   tree t;
2226   omp_context *up;
2227 
2228   for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2229     t = maybe_lookup_decl (decl, up);
2230 
2231   gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2232 
2233   return t ? t : decl;
2234 }
2235 
2236 
2237 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2238    in outer contexts.  */
2239 
2240 static tree
2241 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2242 {
2243   tree t = NULL;
2244   omp_context *up;
2245 
2246   for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2247     t = maybe_lookup_decl (decl, up);
2248 
2249   return t ? t : decl;
2250 }
2251 
2252 
2253 /* Construct the initialization value for reduction CLAUSE.  */
2254 
2255 tree
2256 omp_reduction_init (tree clause, tree type)
2257 {
2258   location_t loc = OMP_CLAUSE_LOCATION (clause);
2259   switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2260     {
2261     case PLUS_EXPR:
2262     case MINUS_EXPR:
2263     case BIT_IOR_EXPR:
2264     case BIT_XOR_EXPR:
2265     case TRUTH_OR_EXPR:
2266     case TRUTH_ORIF_EXPR:
2267     case TRUTH_XOR_EXPR:
2268     case NE_EXPR:
2269       return build_zero_cst (type);
2270 
2271     case MULT_EXPR:
2272     case TRUTH_AND_EXPR:
2273     case TRUTH_ANDIF_EXPR:
2274     case EQ_EXPR:
2275       return fold_convert_loc (loc, type, integer_one_node);
2276 
2277     case BIT_AND_EXPR:
2278       return fold_convert_loc (loc, type, integer_minus_one_node);
2279 
2280     case MAX_EXPR:
2281       if (SCALAR_FLOAT_TYPE_P (type))
2282 	{
2283 	  REAL_VALUE_TYPE max, min;
2284 	  if (HONOR_INFINITIES (TYPE_MODE (type)))
2285 	    {
2286 	      real_inf (&max);
2287 	      real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2288 	    }
2289 	  else
2290 	    real_maxval (&min, 1, TYPE_MODE (type));
2291 	  return build_real (type, min);
2292 	}
2293       else
2294 	{
2295 	  gcc_assert (INTEGRAL_TYPE_P (type));
2296 	  return TYPE_MIN_VALUE (type);
2297 	}
2298 
2299     case MIN_EXPR:
2300       if (SCALAR_FLOAT_TYPE_P (type))
2301 	{
2302 	  REAL_VALUE_TYPE max;
2303 	  if (HONOR_INFINITIES (TYPE_MODE (type)))
2304 	    real_inf (&max);
2305 	  else
2306 	    real_maxval (&max, 0, TYPE_MODE (type));
2307 	  return build_real (type, max);
2308 	}
2309       else
2310 	{
2311 	  gcc_assert (INTEGRAL_TYPE_P (type));
2312 	  return TYPE_MAX_VALUE (type);
2313 	}
2314 
2315     default:
2316       gcc_unreachable ();
2317     }
2318 }
2319 
2320 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2321    from the receiver (aka child) side and initializers for REFERENCE_TYPE
2322    private variables.  Initialization statements go in ILIST, while calls
2323    to destructors go in DLIST.  */
2324 
2325 static void
2326 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2327 			 omp_context *ctx)
2328 {
2329   tree c, dtor, copyin_seq, x, ptr;
2330   bool copyin_by_ref = false;
2331   bool lastprivate_firstprivate = false;
2332   int pass;
2333 
2334   copyin_seq = NULL;
2335 
2336   /* Do all the fixed sized types in the first pass, and the variable sized
2337      types in the second pass.  This makes sure that the scalar arguments to
2338      the variable sized types are processed before we use them in the
2339      variable sized operations.  */
2340   for (pass = 0; pass < 2; ++pass)
2341     {
2342       for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2343 	{
2344 	  enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2345 	  tree var, new_var;
2346 	  bool by_ref;
2347 	  location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2348 
2349 	  switch (c_kind)
2350 	    {
2351 	    case OMP_CLAUSE_PRIVATE:
2352 	      if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2353 		continue;
2354 	      break;
2355 	    case OMP_CLAUSE_SHARED:
2356 	      if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2357 		{
2358 		  gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2359 		  continue;
2360 		}
2361 	    case OMP_CLAUSE_FIRSTPRIVATE:
2362 	    case OMP_CLAUSE_COPYIN:
2363 	    case OMP_CLAUSE_REDUCTION:
2364 	      break;
2365 	    case OMP_CLAUSE_LASTPRIVATE:
2366 	      if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2367 		{
2368 		  lastprivate_firstprivate = true;
2369 		  if (pass != 0)
2370 		    continue;
2371 		}
2372 	      break;
2373 	    default:
2374 	      continue;
2375 	    }
2376 
2377 	  new_var = var = OMP_CLAUSE_DECL (c);
2378 	  if (c_kind != OMP_CLAUSE_COPYIN)
2379 	    new_var = lookup_decl (var, ctx);
2380 
2381 	  if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2382 	    {
2383 	      if (pass != 0)
2384 		continue;
2385 	    }
2386 	  else if (is_variable_sized (var))
2387 	    {
2388 	      /* For variable sized types, we need to allocate the
2389 		 actual storage here.  Call alloca and store the
2390 		 result in the pointer decl that we created elsewhere.  */
2391 	      if (pass == 0)
2392 		continue;
2393 
2394 	      if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2395 		{
2396 		  gimple stmt;
2397 		  tree tmp, atmp;
2398 
2399 		  ptr = DECL_VALUE_EXPR (new_var);
2400 		  gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2401 		  ptr = TREE_OPERAND (ptr, 0);
2402 		  gcc_assert (DECL_P (ptr));
2403 		  x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2404 
2405 		  /* void *tmp = __builtin_alloca */
2406 		  atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2407 		  stmt = gimple_build_call (atmp, 1, x);
2408 		  tmp = create_tmp_var_raw (ptr_type_node, NULL);
2409 		  gimple_add_tmp_var (tmp);
2410 		  gimple_call_set_lhs (stmt, tmp);
2411 
2412 		  gimple_seq_add_stmt (ilist, stmt);
2413 
2414 		  x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2415 		  gimplify_assign (ptr, x, ilist);
2416 		}
2417 	    }
2418 	  else if (is_reference (var))
2419 	    {
2420 	      /* For references that are being privatized for Fortran,
2421 		 allocate new backing storage for the new pointer
2422 		 variable.  This allows us to avoid changing all the
2423 		 code that expects a pointer to something that expects
2424 		 a direct variable.  Note that this doesn't apply to
2425 		 C++, since reference types are disallowed in data
2426 		 sharing clauses there, except for NRV optimized
2427 		 return values.  */
2428 	      if (pass == 0)
2429 		continue;
2430 
2431 	      x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2432 	      if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2433 		{
2434 		  x = build_receiver_ref (var, false, ctx);
2435 		  x = build_fold_addr_expr_loc (clause_loc, x);
2436 		}
2437 	      else if (TREE_CONSTANT (x))
2438 		{
2439 		  const char *name = NULL;
2440 		  if (DECL_NAME (var))
2441 		    name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2442 
2443 		  x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2444 					  name);
2445 		  gimple_add_tmp_var (x);
2446 		  TREE_ADDRESSABLE (x) = 1;
2447 		  x = build_fold_addr_expr_loc (clause_loc, x);
2448 		}
2449 	      else
2450 		{
2451 		  tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2452 		  x = build_call_expr_loc (clause_loc, atmp, 1, x);
2453 		}
2454 
2455 	      x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2456 	      gimplify_assign (new_var, x, ilist);
2457 
2458 	      new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2459 	    }
2460 	  else if (c_kind == OMP_CLAUSE_REDUCTION
2461 		   && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2462 	    {
2463 	      if (pass == 0)
2464 		continue;
2465 	    }
2466 	  else if (pass != 0)
2467 	    continue;
2468 
2469 	  switch (OMP_CLAUSE_CODE (c))
2470 	    {
2471 	    case OMP_CLAUSE_SHARED:
2472 	      /* Shared global vars are just accessed directly.  */
2473 	      if (is_global_var (new_var))
2474 		break;
2475 	      /* Set up the DECL_VALUE_EXPR for shared variables now.  This
2476 		 needs to be delayed until after fixup_child_record_type so
2477 		 that we get the correct type during the dereference.  */
2478 	      by_ref = use_pointer_for_field (var, ctx);
2479 	      x = build_receiver_ref (var, by_ref, ctx);
2480 	      SET_DECL_VALUE_EXPR (new_var, x);
2481 	      DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2482 
2483 	      /* ??? If VAR is not passed by reference, and the variable
2484 		 hasn't been initialized yet, then we'll get a warning for
2485 		 the store into the omp_data_s structure.  Ideally, we'd be
2486 		 able to notice this and not store anything at all, but
2487 		 we're generating code too early.  Suppress the warning.  */
2488 	      if (!by_ref)
2489 		TREE_NO_WARNING (var) = 1;
2490 	      break;
2491 
2492 	    case OMP_CLAUSE_LASTPRIVATE:
2493 	      if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2494 		break;
2495 	      /* FALLTHRU */
2496 
2497 	    case OMP_CLAUSE_PRIVATE:
2498 	      if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2499 		x = build_outer_var_ref (var, ctx);
2500 	      else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2501 		{
2502 		  if (is_task_ctx (ctx))
2503 		    x = build_receiver_ref (var, false, ctx);
2504 		  else
2505 		    x = build_outer_var_ref (var, ctx);
2506 		}
2507 	      else
2508 		x = NULL;
2509 	      x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2510 	      if (x)
2511 		gimplify_and_add (x, ilist);
2512 	      /* FALLTHRU */
2513 
2514 	    do_dtor:
2515 	      x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2516 	      if (x)
2517 		{
2518 		  gimple_seq tseq = NULL;
2519 
2520 		  dtor = x;
2521 		  gimplify_stmt (&dtor, &tseq);
2522 		  gimple_seq_add_seq (dlist, tseq);
2523 		}
2524 	      break;
2525 
2526 	    case OMP_CLAUSE_FIRSTPRIVATE:
2527 	      if (is_task_ctx (ctx))
2528 		{
2529 		  if (is_reference (var) || is_variable_sized (var))
2530 		    goto do_dtor;
2531 		  else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2532 									  ctx))
2533 			   || use_pointer_for_field (var, NULL))
2534 		    {
2535 		      x = build_receiver_ref (var, false, ctx);
2536 		      SET_DECL_VALUE_EXPR (new_var, x);
2537 		      DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2538 		      goto do_dtor;
2539 		    }
2540 		}
2541 	      x = build_outer_var_ref (var, ctx);
2542 	      x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2543 	      gimplify_and_add (x, ilist);
2544 	      goto do_dtor;
2545 	      break;
2546 
2547 	    case OMP_CLAUSE_COPYIN:
2548 	      by_ref = use_pointer_for_field (var, NULL);
2549 	      x = build_receiver_ref (var, by_ref, ctx);
2550 	      x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2551 	      append_to_statement_list (x, &copyin_seq);
2552 	      copyin_by_ref |= by_ref;
2553 	      break;
2554 
2555 	    case OMP_CLAUSE_REDUCTION:
2556 	      if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2557 		{
2558 		  tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2559 		  x = build_outer_var_ref (var, ctx);
2560 
2561 		  if (is_reference (var))
2562 		    x = build_fold_addr_expr_loc (clause_loc, x);
2563 		  SET_DECL_VALUE_EXPR (placeholder, x);
2564 		  DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2565 		  lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2566 		  gimple_seq_add_seq (ilist,
2567 				      OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2568 		  OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2569 		  DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2570 		}
2571 	      else
2572 		{
2573 		  x = omp_reduction_init (c, TREE_TYPE (new_var));
2574 		  gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2575 		  gimplify_assign (new_var, x, ilist);
2576 		}
2577 	      break;
2578 
2579 	    default:
2580 	      gcc_unreachable ();
2581 	    }
2582 	}
2583     }
2584 
2585   /* The copyin sequence is not to be executed by the main thread, since
2586      that would result in self-copies.  Perhaps not visible to scalars,
2587      but it certainly is to C++ operator=.  */
2588   if (copyin_seq)
2589     {
2590       x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2591 			   0);
2592       x = build2 (NE_EXPR, boolean_type_node, x,
2593 		  build_int_cst (TREE_TYPE (x), 0));
2594       x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2595       gimplify_and_add (x, ilist);
2596     }
2597 
2598   /* If any copyin variable is passed by reference, we must ensure the
2599      master thread doesn't modify it before it is copied over in all
2600      threads.  Similarly for variables in both firstprivate and
2601      lastprivate clauses we need to ensure the lastprivate copying
2602      happens after firstprivate copying in all threads.  */
2603   if (copyin_by_ref || lastprivate_firstprivate)
2604     gimplify_and_add (build_omp_barrier (), ilist);
2605 }
2606 
2607 
2608 /* Generate code to implement the LASTPRIVATE clauses.  This is used for
2609    both parallel and workshare constructs.  PREDICATE may be NULL if it's
2610    always true.   */
2611 
2612 static void
2613 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2614 			    omp_context *ctx)
2615 {
2616   tree x, c, label = NULL;
2617   bool par_clauses = false;
2618 
2619   /* Early exit if there are no lastprivate clauses.  */
2620   clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2621   if (clauses == NULL)
2622     {
2623       /* If this was a workshare clause, see if it had been combined
2624 	 with its parallel.  In that case, look for the clauses on the
2625 	 parallel statement itself.  */
2626       if (is_parallel_ctx (ctx))
2627 	return;
2628 
2629       ctx = ctx->outer;
2630       if (ctx == NULL || !is_parallel_ctx (ctx))
2631 	return;
2632 
2633       clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2634 				 OMP_CLAUSE_LASTPRIVATE);
2635       if (clauses == NULL)
2636 	return;
2637       par_clauses = true;
2638     }
2639 
2640   if (predicate)
2641     {
2642       gimple stmt;
2643       tree label_true, arm1, arm2;
2644 
2645       label = create_artificial_label (UNKNOWN_LOCATION);
2646       label_true = create_artificial_label (UNKNOWN_LOCATION);
2647       arm1 = TREE_OPERAND (predicate, 0);
2648       arm2 = TREE_OPERAND (predicate, 1);
2649       gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2650       gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2651       stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2652 				label_true, label);
2653       gimple_seq_add_stmt (stmt_list, stmt);
2654       gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2655     }
2656 
2657   for (c = clauses; c ;)
2658     {
2659       tree var, new_var;
2660       location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2661 
2662       if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2663 	{
2664 	  var = OMP_CLAUSE_DECL (c);
2665 	  new_var = lookup_decl (var, ctx);
2666 
2667 	  if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2668 	    {
2669 	      lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2670 	      gimple_seq_add_seq (stmt_list,
2671 				  OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2672 	    }
2673 	  OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2674 
2675 	  x = build_outer_var_ref (var, ctx);
2676 	  if (is_reference (var))
2677 	    new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2678 	  x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2679 	  gimplify_and_add (x, stmt_list);
2680 	}
2681       c = OMP_CLAUSE_CHAIN (c);
2682       if (c == NULL && !par_clauses)
2683 	{
2684 	  /* If this was a workshare clause, see if it had been combined
2685 	     with its parallel.  In that case, continue looking for the
2686 	     clauses also on the parallel statement itself.  */
2687 	  if (is_parallel_ctx (ctx))
2688 	    break;
2689 
2690 	  ctx = ctx->outer;
2691 	  if (ctx == NULL || !is_parallel_ctx (ctx))
2692 	    break;
2693 
2694 	  c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2695 			       OMP_CLAUSE_LASTPRIVATE);
2696 	  par_clauses = true;
2697 	}
2698     }
2699 
2700   if (label)
2701     gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2702 }
2703 
2704 
2705 /* Generate code to implement the REDUCTION clauses.  */
2706 
2707 static void
2708 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2709 {
2710   gimple_seq sub_seq = NULL;
2711   gimple stmt;
2712   tree x, c;
2713   int count = 0;
2714 
2715   /* First see if there is exactly one reduction clause.  Use OMP_ATOMIC
2716      update in that case, otherwise use a lock.  */
2717   for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2718     if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2719       {
2720 	if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2721 	  {
2722 	    /* Never use OMP_ATOMIC for array reductions.  */
2723 	    count = -1;
2724 	    break;
2725 	  }
2726 	count++;
2727       }
2728 
2729   if (count == 0)
2730     return;
2731 
2732   for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2733     {
2734       tree var, ref, new_var;
2735       enum tree_code code;
2736       location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2737 
2738       if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2739 	continue;
2740 
2741       var = OMP_CLAUSE_DECL (c);
2742       new_var = lookup_decl (var, ctx);
2743       if (is_reference (var))
2744 	new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2745       ref = build_outer_var_ref (var, ctx);
2746       code = OMP_CLAUSE_REDUCTION_CODE (c);
2747 
2748       /* reduction(-:var) sums up the partial results, so it acts
2749 	 identically to reduction(+:var).  */
2750       if (code == MINUS_EXPR)
2751         code = PLUS_EXPR;
2752 
2753       if (count == 1)
2754 	{
2755 	  tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2756 
2757 	  addr = save_expr (addr);
2758 	  ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2759 	  x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2760 	  x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2761 	  gimplify_and_add (x, stmt_seqp);
2762 	  return;
2763 	}
2764 
2765       if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2766 	{
2767 	  tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2768 
2769 	  if (is_reference (var))
2770 	    ref = build_fold_addr_expr_loc (clause_loc, ref);
2771 	  SET_DECL_VALUE_EXPR (placeholder, ref);
2772 	  DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2773 	  lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2774 	  gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2775 	  OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2776 	  OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2777 	}
2778       else
2779 	{
2780 	  x = build2 (code, TREE_TYPE (ref), ref, new_var);
2781 	  ref = build_outer_var_ref (var, ctx);
2782 	  gimplify_assign (ref, x, &sub_seq);
2783 	}
2784     }
2785 
2786   stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2787 			    0);
2788   gimple_seq_add_stmt (stmt_seqp, stmt);
2789 
2790   gimple_seq_add_seq (stmt_seqp, sub_seq);
2791 
2792   stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2793 			    0);
2794   gimple_seq_add_stmt (stmt_seqp, stmt);
2795 }
2796 
2797 
2798 /* Generate code to implement the COPYPRIVATE clauses.  */
2799 
2800 static void
2801 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2802 			    omp_context *ctx)
2803 {
2804   tree c;
2805 
2806   for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2807     {
2808       tree var, new_var, ref, x;
2809       bool by_ref;
2810       location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2811 
2812       if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2813 	continue;
2814 
2815       var = OMP_CLAUSE_DECL (c);
2816       by_ref = use_pointer_for_field (var, NULL);
2817 
2818       ref = build_sender_ref (var, ctx);
2819       x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2820       if (by_ref)
2821 	{
2822 	  x = build_fold_addr_expr_loc (clause_loc, new_var);
2823 	  x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2824 	}
2825       gimplify_assign (ref, x, slist);
2826 
2827       ref = build_receiver_ref (var, false, ctx);
2828       if (by_ref)
2829 	{
2830 	  ref = fold_convert_loc (clause_loc,
2831 				  build_pointer_type (TREE_TYPE (new_var)),
2832 				  ref);
2833 	  ref = build_fold_indirect_ref_loc (clause_loc, ref);
2834 	}
2835       if (is_reference (var))
2836 	{
2837 	  ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2838 	  ref = build_simple_mem_ref_loc (clause_loc, ref);
2839 	  new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2840 	}
2841       x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2842       gimplify_and_add (x, rlist);
2843     }
2844 }
2845 
2846 
2847 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2848    and REDUCTION from the sender (aka parent) side.  */
2849 
2850 static void
2851 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2852     		    omp_context *ctx)
2853 {
2854   tree c;
2855 
2856   for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2857     {
2858       tree val, ref, x, var;
2859       bool by_ref, do_in = false, do_out = false;
2860       location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2861 
2862       switch (OMP_CLAUSE_CODE (c))
2863 	{
2864 	case OMP_CLAUSE_PRIVATE:
2865 	  if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2866 	    break;
2867 	  continue;
2868 	case OMP_CLAUSE_FIRSTPRIVATE:
2869 	case OMP_CLAUSE_COPYIN:
2870 	case OMP_CLAUSE_LASTPRIVATE:
2871 	case OMP_CLAUSE_REDUCTION:
2872 	  break;
2873 	default:
2874 	  continue;
2875 	}
2876 
2877       val = OMP_CLAUSE_DECL (c);
2878       var = lookup_decl_in_outer_ctx (val, ctx);
2879 
2880       if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2881 	  && is_global_var (var))
2882 	continue;
2883       if (is_variable_sized (val))
2884 	continue;
2885       by_ref = use_pointer_for_field (val, NULL);
2886 
2887       switch (OMP_CLAUSE_CODE (c))
2888 	{
2889 	case OMP_CLAUSE_PRIVATE:
2890 	case OMP_CLAUSE_FIRSTPRIVATE:
2891 	case OMP_CLAUSE_COPYIN:
2892 	  do_in = true;
2893 	  break;
2894 
2895 	case OMP_CLAUSE_LASTPRIVATE:
2896 	  if (by_ref || is_reference (val))
2897 	    {
2898 	      if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2899 		continue;
2900 	      do_in = true;
2901 	    }
2902 	  else
2903 	    {
2904 	      do_out = true;
2905 	      if (lang_hooks.decls.omp_private_outer_ref (val))
2906 		do_in = true;
2907 	    }
2908 	  break;
2909 
2910 	case OMP_CLAUSE_REDUCTION:
2911 	  do_in = true;
2912 	  do_out = !(by_ref || is_reference (val));
2913 	  break;
2914 
2915 	default:
2916 	  gcc_unreachable ();
2917 	}
2918 
2919       if (do_in)
2920 	{
2921 	  ref = build_sender_ref (val, ctx);
2922 	  x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2923 	  gimplify_assign (ref, x, ilist);
2924 	  if (is_task_ctx (ctx))
2925 	    DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2926 	}
2927 
2928       if (do_out)
2929 	{
2930 	  ref = build_sender_ref (val, ctx);
2931 	  gimplify_assign (var, ref, olist);
2932 	}
2933     }
2934 }
2935 
2936 /* Generate code to implement SHARED from the sender (aka parent)
2937    side.  This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2938    list things that got automatically shared.  */
2939 
2940 static void
2941 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2942 {
2943   tree var, ovar, nvar, f, x, record_type;
2944 
2945   if (ctx->record_type == NULL)
2946     return;
2947 
2948   record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2949   for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2950     {
2951       ovar = DECL_ABSTRACT_ORIGIN (f);
2952       nvar = maybe_lookup_decl (ovar, ctx);
2953       if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2954 	continue;
2955 
2956       /* If CTX is a nested parallel directive.  Find the immediately
2957 	 enclosing parallel or workshare construct that contains a
2958 	 mapping for OVAR.  */
2959       var = lookup_decl_in_outer_ctx (ovar, ctx);
2960 
2961       if (use_pointer_for_field (ovar, ctx))
2962 	{
2963 	  x = build_sender_ref (ovar, ctx);
2964 	  var = build_fold_addr_expr (var);
2965 	  gimplify_assign (x, var, ilist);
2966 	}
2967       else
2968 	{
2969 	  x = build_sender_ref (ovar, ctx);
2970 	  gimplify_assign (x, var, ilist);
2971 
2972 	  if (!TREE_READONLY (var)
2973 	      /* We don't need to receive a new reference to a result
2974 	         or parm decl.  In fact we may not store to it as we will
2975 		 invalidate any pending RSO and generate wrong gimple
2976 		 during inlining.  */
2977 	      && !((TREE_CODE (var) == RESULT_DECL
2978 		    || TREE_CODE (var) == PARM_DECL)
2979 		   && DECL_BY_REFERENCE (var)))
2980 	    {
2981 	      x = build_sender_ref (ovar, ctx);
2982 	      gimplify_assign (var, x, olist);
2983 	    }
2984 	}
2985     }
2986 }
2987 
2988 
2989 /* A convenience function to build an empty GIMPLE_COND with just the
2990    condition.  */
2991 
2992 static gimple
2993 gimple_build_cond_empty (tree cond)
2994 {
2995   enum tree_code pred_code;
2996   tree lhs, rhs;
2997 
2998   gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2999   return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
3000 }
3001 
3002 
3003 /* Build the function calls to GOMP_parallel_start etc to actually
3004    generate the parallel operation.  REGION is the parallel region
3005    being expanded.  BB is the block where to insert the code.  WS_ARGS
3006    will be set if this is a call to a combined parallel+workshare
3007    construct, it contains the list of additional arguments needed by
3008    the workshare construct.  */
3009 
3010 static void
3011 expand_parallel_call (struct omp_region *region, basic_block bb,
3012 		      gimple entry_stmt, vec<tree, va_gc> *ws_args)
3013 {
3014   tree t, t1, t2, val, cond, c, clauses;
3015   gimple_stmt_iterator gsi;
3016   gimple stmt;
3017   enum built_in_function start_ix;
3018   int start_ix2;
3019   location_t clause_loc;
3020   vec<tree, va_gc> *args;
3021 
3022   clauses = gimple_omp_parallel_clauses (entry_stmt);
3023 
3024   /* Determine what flavor of GOMP_parallel_start we will be
3025      emitting.  */
3026   start_ix = BUILT_IN_GOMP_PARALLEL_START;
3027   if (is_combined_parallel (region))
3028     {
3029       switch (region->inner->type)
3030 	{
3031 	case GIMPLE_OMP_FOR:
3032 	  gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
3033 	  start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
3034 		       + (region->inner->sched_kind
3035 			  == OMP_CLAUSE_SCHEDULE_RUNTIME
3036 			  ? 3 : region->inner->sched_kind));
3037 	  start_ix = (enum built_in_function)start_ix2;
3038 	  break;
3039 	case GIMPLE_OMP_SECTIONS:
3040 	  start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
3041 	  break;
3042 	default:
3043 	  gcc_unreachable ();
3044 	}
3045     }
3046 
3047   /* By default, the value of NUM_THREADS is zero (selected at run time)
3048      and there is no conditional.  */
3049   cond = NULL_TREE;
3050   val = build_int_cst (unsigned_type_node, 0);
3051 
3052   c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3053   if (c)
3054     cond = OMP_CLAUSE_IF_EXPR (c);
3055 
3056   c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
3057   if (c)
3058     {
3059       val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
3060       clause_loc = OMP_CLAUSE_LOCATION (c);
3061     }
3062   else
3063     clause_loc = gimple_location (entry_stmt);
3064 
3065   /* Ensure 'val' is of the correct type.  */
3066   val = fold_convert_loc (clause_loc, unsigned_type_node, val);
3067 
3068   /* If we found the clause 'if (cond)', build either
3069      (cond != 0) or (cond ? val : 1u).  */
3070   if (cond)
3071     {
3072       gimple_stmt_iterator gsi;
3073 
3074       cond = gimple_boolify (cond);
3075 
3076       if (integer_zerop (val))
3077 	val = fold_build2_loc (clause_loc,
3078 			   EQ_EXPR, unsigned_type_node, cond,
3079 			   build_int_cst (TREE_TYPE (cond), 0));
3080       else
3081 	{
3082 	  basic_block cond_bb, then_bb, else_bb;
3083 	  edge e, e_then, e_else;
3084 	  tree tmp_then, tmp_else, tmp_join, tmp_var;
3085 
3086 	  tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3087 	  if (gimple_in_ssa_p (cfun))
3088 	    {
3089 	      tmp_then = make_ssa_name (tmp_var, NULL);
3090 	      tmp_else = make_ssa_name (tmp_var, NULL);
3091 	      tmp_join = make_ssa_name (tmp_var, NULL);
3092 	    }
3093 	  else
3094 	    {
3095 	      tmp_then = tmp_var;
3096 	      tmp_else = tmp_var;
3097 	      tmp_join = tmp_var;
3098 	    }
3099 
3100 	  e = split_block (bb, NULL);
3101 	  cond_bb = e->src;
3102 	  bb = e->dest;
3103 	  remove_edge (e);
3104 
3105 	  then_bb = create_empty_bb (cond_bb);
3106 	  else_bb = create_empty_bb (then_bb);
3107 	  set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3108 	  set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3109 
3110 	  stmt = gimple_build_cond_empty (cond);
3111 	  gsi = gsi_start_bb (cond_bb);
3112 	  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3113 
3114 	  gsi = gsi_start_bb (then_bb);
3115 	  stmt = gimple_build_assign (tmp_then, val);
3116 	  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3117 
3118 	  gsi = gsi_start_bb (else_bb);
3119 	  stmt = gimple_build_assign
3120 	    	   (tmp_else, build_int_cst (unsigned_type_node, 1));
3121 	  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3122 
3123 	  make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3124 	  make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3125 	  e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3126 	  e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3127 
3128 	  if (gimple_in_ssa_p (cfun))
3129 	    {
3130 	      gimple phi = create_phi_node (tmp_join, bb);
3131 	      add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3132 	      add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3133 	    }
3134 
3135 	  val = tmp_join;
3136 	}
3137 
3138       gsi = gsi_start_bb (bb);
3139       val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3140 				      false, GSI_CONTINUE_LINKING);
3141     }
3142 
3143   gsi = gsi_last_bb (bb);
3144   t = gimple_omp_parallel_data_arg (entry_stmt);
3145   if (t == NULL)
3146     t1 = null_pointer_node;
3147   else
3148     t1 = build_fold_addr_expr (t);
3149   t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3150 
3151   vec_alloc (args, 3 + vec_safe_length (ws_args));
3152   args->quick_push (t2);
3153   args->quick_push (t1);
3154   args->quick_push (val);
3155   if (ws_args)
3156     args->splice (*ws_args);
3157 
3158   t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3159 			       builtin_decl_explicit (start_ix), args);
3160 
3161   force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3162 			    false, GSI_CONTINUE_LINKING);
3163 
3164   t = gimple_omp_parallel_data_arg (entry_stmt);
3165   if (t == NULL)
3166     t = null_pointer_node;
3167   else
3168     t = build_fold_addr_expr (t);
3169   t = build_call_expr_loc (gimple_location (entry_stmt),
3170 			   gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3171   force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3172 			    false, GSI_CONTINUE_LINKING);
3173 
3174   t = build_call_expr_loc (gimple_location (entry_stmt),
3175 			   builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3176 			   0);
3177   force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3178 			    false, GSI_CONTINUE_LINKING);
3179 }
3180 
3181 
3182 /* Build the function call to GOMP_task to actually
3183    generate the task operation.  BB is the block where to insert the code.  */
3184 
3185 static void
3186 expand_task_call (basic_block bb, gimple entry_stmt)
3187 {
3188   tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3189   gimple_stmt_iterator gsi;
3190   location_t loc = gimple_location (entry_stmt);
3191 
3192   clauses = gimple_omp_task_clauses (entry_stmt);
3193 
3194   c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3195   if (c)
3196     cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3197   else
3198     cond = boolean_true_node;
3199 
3200   c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3201   c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3202   flags = build_int_cst (unsigned_type_node,
3203 			 (c ? 1 : 0) + (c2 ? 4 : 0));
3204 
3205   c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3206   if (c)
3207     {
3208       c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3209       c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3210 			   build_int_cst (unsigned_type_node, 2),
3211 			   build_int_cst (unsigned_type_node, 0));
3212       flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3213     }
3214 
3215   gsi = gsi_last_bb (bb);
3216   t = gimple_omp_task_data_arg (entry_stmt);
3217   if (t == NULL)
3218     t2 = null_pointer_node;
3219   else
3220     t2 = build_fold_addr_expr_loc (loc, t);
3221   t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3222   t = gimple_omp_task_copy_fn (entry_stmt);
3223   if (t == NULL)
3224     t3 = null_pointer_node;
3225   else
3226     t3 = build_fold_addr_expr_loc (loc, t);
3227 
3228   t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3229 		       7, t1, t2, t3,
3230 		       gimple_omp_task_arg_size (entry_stmt),
3231 		       gimple_omp_task_arg_align (entry_stmt), cond, flags);
3232 
3233   force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3234 			    false, GSI_CONTINUE_LINKING);
3235 }
3236 
3237 
3238 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3239    catch handler and return it.  This prevents programs from violating the
3240    structured block semantics with throws.  */
3241 
3242 static gimple_seq
3243 maybe_catch_exception (gimple_seq body)
3244 {
3245   gimple g;
3246   tree decl;
3247 
3248   if (!flag_exceptions)
3249     return body;
3250 
3251   if (lang_hooks.eh_protect_cleanup_actions != NULL)
3252     decl = lang_hooks.eh_protect_cleanup_actions ();
3253   else
3254     decl = builtin_decl_explicit (BUILT_IN_TRAP);
3255 
3256   g = gimple_build_eh_must_not_throw (decl);
3257   g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3258       			GIMPLE_TRY_CATCH);
3259 
3260  return gimple_seq_alloc_with_stmt (g);
3261 }
3262 
3263 /* Chain all the DECLs in LIST by their TREE_CHAIN fields.  */
3264 
3265 static tree
3266 vec2chain (vec<tree, va_gc> *v)
3267 {
3268   tree chain = NULL_TREE, t;
3269   unsigned ix;
3270 
3271   FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
3272     {
3273       DECL_CHAIN (t) = chain;
3274       chain = t;
3275     }
3276 
3277   return chain;
3278 }
3279 
3280 
3281 /* Remove barriers in REGION->EXIT's block.  Note that this is only
3282    valid for GIMPLE_OMP_PARALLEL regions.  Since the end of a parallel region
3283    is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3284    left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3285    removed.  */
3286 
3287 static void
3288 remove_exit_barrier (struct omp_region *region)
3289 {
3290   gimple_stmt_iterator gsi;
3291   basic_block exit_bb;
3292   edge_iterator ei;
3293   edge e;
3294   gimple stmt;
3295   int any_addressable_vars = -1;
3296 
3297   exit_bb = region->exit;
3298 
3299   /* If the parallel region doesn't return, we don't have REGION->EXIT
3300      block at all.  */
3301   if (! exit_bb)
3302     return;
3303 
3304   /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN.  The
3305      workshare's GIMPLE_OMP_RETURN will be in a preceding block.  The kinds of
3306      statements that can appear in between are extremely limited -- no
3307      memory operations at all.  Here, we allow nothing at all, so the
3308      only thing we allow to precede this GIMPLE_OMP_RETURN is a label.  */
3309   gsi = gsi_last_bb (exit_bb);
3310   gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3311   gsi_prev (&gsi);
3312   if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3313     return;
3314 
3315   FOR_EACH_EDGE (e, ei, exit_bb->preds)
3316     {
3317       gsi = gsi_last_bb (e->src);
3318       if (gsi_end_p (gsi))
3319 	continue;
3320       stmt = gsi_stmt (gsi);
3321       if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3322 	  && !gimple_omp_return_nowait_p (stmt))
3323 	{
3324 	  /* OpenMP 3.0 tasks unfortunately prevent this optimization
3325 	     in many cases.  If there could be tasks queued, the barrier
3326 	     might be needed to let the tasks run before some local
3327 	     variable of the parallel that the task uses as shared
3328 	     runs out of scope.  The task can be spawned either
3329 	     from within current function (this would be easy to check)
3330 	     or from some function it calls and gets passed an address
3331 	     of such a variable.  */
3332 	  if (any_addressable_vars < 0)
3333 	    {
3334 	      gimple parallel_stmt = last_stmt (region->entry);
3335 	      tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3336 	      tree local_decls, block, decl;
3337 	      unsigned ix;
3338 
3339 	      any_addressable_vars = 0;
3340 	      FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3341 		if (TREE_ADDRESSABLE (decl))
3342 		  {
3343 		    any_addressable_vars = 1;
3344 		    break;
3345 		  }
3346 	      for (block = gimple_block (stmt);
3347 		   !any_addressable_vars
3348 		   && block
3349 		   && TREE_CODE (block) == BLOCK;
3350 		   block = BLOCK_SUPERCONTEXT (block))
3351 		{
3352 		  for (local_decls = BLOCK_VARS (block);
3353 		       local_decls;
3354 		       local_decls = DECL_CHAIN (local_decls))
3355 		    if (TREE_ADDRESSABLE (local_decls))
3356 		      {
3357 			any_addressable_vars = 1;
3358 			break;
3359 		      }
3360 		  if (block == gimple_block (parallel_stmt))
3361 		    break;
3362 		}
3363 	    }
3364 	  if (!any_addressable_vars)
3365 	    gimple_omp_return_set_nowait (stmt);
3366 	}
3367     }
3368 }
3369 
3370 static void
3371 remove_exit_barriers (struct omp_region *region)
3372 {
3373   if (region->type == GIMPLE_OMP_PARALLEL)
3374     remove_exit_barrier (region);
3375 
3376   if (region->inner)
3377     {
3378       region = region->inner;
3379       remove_exit_barriers (region);
3380       while (region->next)
3381 	{
3382 	  region = region->next;
3383 	  remove_exit_barriers (region);
3384 	}
3385     }
3386 }
3387 
3388 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3389    calls.  These can't be declared as const functions, but
3390    within one parallel body they are constant, so they can be
3391    transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3392    which are declared const.  Similarly for task body, except
3393    that in untied task omp_get_thread_num () can change at any task
3394    scheduling point.  */
3395 
3396 static void
3397 optimize_omp_library_calls (gimple entry_stmt)
3398 {
3399   basic_block bb;
3400   gimple_stmt_iterator gsi;
3401   tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3402   tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3403   tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3404   tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3405   bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3406 		      && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3407 					  OMP_CLAUSE_UNTIED) != NULL);
3408 
3409   FOR_EACH_BB (bb)
3410     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3411       {
3412 	gimple call = gsi_stmt (gsi);
3413 	tree decl;
3414 
3415 	if (is_gimple_call (call)
3416 	    && (decl = gimple_call_fndecl (call))
3417 	    && DECL_EXTERNAL (decl)
3418 	    && TREE_PUBLIC (decl)
3419 	    && DECL_INITIAL (decl) == NULL)
3420 	  {
3421 	    tree built_in;
3422 
3423 	    if (DECL_NAME (decl) == thr_num_id)
3424 	      {
3425 		/* In #pragma omp task untied omp_get_thread_num () can change
3426 		   during the execution of the task region.  */
3427 		if (untied_task)
3428 		  continue;
3429 		built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3430 	      }
3431 	    else if (DECL_NAME (decl) == num_thr_id)
3432 	      built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3433 	    else
3434 	      continue;
3435 
3436 	    if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3437 		|| gimple_call_num_args (call) != 0)
3438 	      continue;
3439 
3440 	    if (flag_exceptions && !TREE_NOTHROW (decl))
3441 	      continue;
3442 
3443 	    if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3444 		|| !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3445 					TREE_TYPE (TREE_TYPE (built_in))))
3446 	      continue;
3447 
3448 	    gimple_call_set_fndecl (call, built_in);
3449 	  }
3450       }
3451 }
3452 
3453 /* Callback for expand_omp_build_assign.  Return non-NULL if *tp needs to be
3454    regimplified.  */
3455 
3456 static tree
3457 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
3458 {
3459   tree t = *tp;
3460 
3461   /* Any variable with DECL_VALUE_EXPR needs to be regimplified.  */
3462   if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
3463     return t;
3464 
3465   if (TREE_CODE (t) == ADDR_EXPR)
3466     recompute_tree_invariant_for_addr_expr (t);
3467 
3468   *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
3469   return NULL_TREE;
3470 }
3471 
3472 /* Expand the OpenMP parallel or task directive starting at REGION.  */
3473 
3474 static void
3475 expand_omp_taskreg (struct omp_region *region)
3476 {
3477   basic_block entry_bb, exit_bb, new_bb;
3478   struct function *child_cfun;
3479   tree child_fn, block, t;
3480   gimple_stmt_iterator gsi;
3481   gimple entry_stmt, stmt;
3482   edge e;
3483   vec<tree, va_gc> *ws_args;
3484 
3485   entry_stmt = last_stmt (region->entry);
3486   child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3487   child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3488 
3489   entry_bb = region->entry;
3490   if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK)
3491     exit_bb = region->cont;
3492   else
3493     exit_bb = region->exit;
3494 
3495   if (is_combined_parallel (region))
3496     ws_args = region->ws_args;
3497   else
3498     ws_args = NULL;
3499 
3500   if (child_cfun->cfg)
3501     {
3502       /* Due to inlining, it may happen that we have already outlined
3503 	 the region, in which case all we need to do is make the
3504 	 sub-graph unreachable and emit the parallel call.  */
3505       edge entry_succ_e, exit_succ_e;
3506       gimple_stmt_iterator gsi;
3507 
3508       entry_succ_e = single_succ_edge (entry_bb);
3509 
3510       gsi = gsi_last_bb (entry_bb);
3511       gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3512 		  || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3513       gsi_remove (&gsi, true);
3514 
3515       new_bb = entry_bb;
3516       if (exit_bb)
3517 	{
3518 	  exit_succ_e = single_succ_edge (exit_bb);
3519 	  make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3520 	}
3521       remove_edge_and_dominated_blocks (entry_succ_e);
3522     }
3523   else
3524     {
3525       unsigned srcidx, dstidx, num;
3526 
3527       /* If the parallel region needs data sent from the parent
3528 	 function, then the very first statement (except possible
3529 	 tree profile counter updates) of the parallel body
3530 	 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O.  Since
3531 	 &.OMP_DATA_O is passed as an argument to the child function,
3532 	 we need to replace it with the argument as seen by the child
3533 	 function.
3534 
3535 	 In most cases, this will end up being the identity assignment
3536 	 .OMP_DATA_I = .OMP_DATA_I.  However, if the parallel body had
3537 	 a function call that has been inlined, the original PARM_DECL
3538 	 .OMP_DATA_I may have been converted into a different local
3539 	 variable.  In which case, we need to keep the assignment.  */
3540       if (gimple_omp_taskreg_data_arg (entry_stmt))
3541 	{
3542 	  basic_block entry_succ_bb
3543 	    = single_succ_p (entry_bb) ? single_succ (entry_bb)
3544 				       : FALLTHRU_EDGE (entry_bb)->dest;
3545 	  gimple_stmt_iterator gsi;
3546 	  tree arg, narg;
3547 	  gimple parcopy_stmt = NULL;
3548 
3549 	  for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3550 	    {
3551 	      gimple stmt;
3552 
3553 	      gcc_assert (!gsi_end_p (gsi));
3554 	      stmt = gsi_stmt (gsi);
3555 	      if (gimple_code (stmt) != GIMPLE_ASSIGN)
3556 		continue;
3557 
3558 	      if (gimple_num_ops (stmt) == 2)
3559 		{
3560 		  tree arg = gimple_assign_rhs1 (stmt);
3561 
3562 		  /* We're ignore the subcode because we're
3563 		     effectively doing a STRIP_NOPS.  */
3564 
3565 		  if (TREE_CODE (arg) == ADDR_EXPR
3566 		      && TREE_OPERAND (arg, 0)
3567 		        == gimple_omp_taskreg_data_arg (entry_stmt))
3568 		    {
3569 		      parcopy_stmt = stmt;
3570 		      break;
3571 		    }
3572 		}
3573 	    }
3574 
3575 	  gcc_assert (parcopy_stmt != NULL);
3576 	  arg = DECL_ARGUMENTS (child_fn);
3577 
3578 	  if (!gimple_in_ssa_p (cfun))
3579 	    {
3580 	      if (gimple_assign_lhs (parcopy_stmt) == arg)
3581 		gsi_remove (&gsi, true);
3582 	      else
3583 		{
3584 	          /* ?? Is setting the subcode really necessary ??  */
3585 		  gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3586 		  gimple_assign_set_rhs1 (parcopy_stmt, arg);
3587 		}
3588 	    }
3589 	  else
3590 	    {
3591 	      /* If we are in ssa form, we must load the value from the default
3592 		 definition of the argument.  That should not be defined now,
3593 		 since the argument is not used uninitialized.  */
3594 	      gcc_assert (ssa_default_def (cfun, arg) == NULL);
3595 	      narg = make_ssa_name (arg, gimple_build_nop ());
3596 	      set_ssa_default_def (cfun, arg, narg);
3597 	      /* ?? Is setting the subcode really necessary ??  */
3598 	      gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3599 	      gimple_assign_set_rhs1 (parcopy_stmt, narg);
3600 	      update_stmt (parcopy_stmt);
3601 	    }
3602 	}
3603 
3604       /* Declare local variables needed in CHILD_CFUN.  */
3605       block = DECL_INITIAL (child_fn);
3606       BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3607       /* The gimplifier could record temporaries in parallel/task block
3608 	 rather than in containing function's local_decls chain,
3609 	 which would mean cgraph missed finalizing them.  Do it now.  */
3610       for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3611 	if (TREE_CODE (t) == VAR_DECL
3612 	    && TREE_STATIC (t)
3613 	    && !DECL_EXTERNAL (t))
3614 	  varpool_finalize_decl (t);
3615       DECL_SAVED_TREE (child_fn) = NULL;
3616       /* We'll create a CFG for child_fn, so no gimple body is needed.  */
3617       gimple_set_body (child_fn, NULL);
3618       TREE_USED (block) = 1;
3619 
3620       /* Reset DECL_CONTEXT on function arguments.  */
3621       for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3622 	DECL_CONTEXT (t) = child_fn;
3623 
3624       /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3625 	 so that it can be moved to the child function.  */
3626       gsi = gsi_last_bb (entry_bb);
3627       stmt = gsi_stmt (gsi);
3628       gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3629 			   || gimple_code (stmt) == GIMPLE_OMP_TASK));
3630       gsi_remove (&gsi, true);
3631       e = split_block (entry_bb, stmt);
3632       entry_bb = e->dest;
3633       edge e2 = NULL;
3634       if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3635 	single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3636       else
3637 	{
3638 	  e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL);
3639 	  gcc_assert (e2->dest == region->exit);
3640 	  remove_edge (BRANCH_EDGE (entry_bb));
3641 	  set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
3642 	  gsi = gsi_last_bb (region->exit);
3643 	  gcc_assert (!gsi_end_p (gsi)
3644 		      && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3645 	  gsi_remove (&gsi, true);
3646 	}
3647 
3648       /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR.  */
3649       if (exit_bb)
3650 	{
3651 	  gsi = gsi_last_bb (exit_bb);
3652 	  gcc_assert (!gsi_end_p (gsi)
3653 		      && (gimple_code (gsi_stmt (gsi))
3654 			  == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
3655 	  stmt = gimple_build_return (NULL);
3656 	  gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3657 	  gsi_remove (&gsi, true);
3658 	}
3659 
3660       /* Move the parallel region into CHILD_CFUN.  */
3661 
3662       if (gimple_in_ssa_p (cfun))
3663 	{
3664 	  init_tree_ssa (child_cfun);
3665 	  init_ssa_operands (child_cfun);
3666 	  child_cfun->gimple_df->in_ssa_p = true;
3667 	  block = NULL_TREE;
3668 	}
3669       else
3670 	block = gimple_block (entry_stmt);
3671 
3672       new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3673       if (exit_bb)
3674 	single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3675       if (e2)
3676 	{
3677 	  basic_block dest_bb = e2->dest;
3678 	  if (!exit_bb)
3679 	    make_edge (new_bb, dest_bb, EDGE_FALLTHRU);
3680 	  remove_edge (e2);
3681 	  set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb);
3682 	}
3683 
3684       /* Remove non-local VAR_DECLs from child_cfun->local_decls list.  */
3685       num = vec_safe_length (child_cfun->local_decls);
3686       for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3687 	{
3688 	  t = (*child_cfun->local_decls)[srcidx];
3689 	  if (DECL_CONTEXT (t) == cfun->decl)
3690 	    continue;
3691 	  if (srcidx != dstidx)
3692 	    (*child_cfun->local_decls)[dstidx] = t;
3693 	  dstidx++;
3694 	}
3695       if (dstidx != num)
3696 	vec_safe_truncate (child_cfun->local_decls, dstidx);
3697 
3698       /* Inform the callgraph about the new function.  */
3699       DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3700 	= cfun->curr_properties & ~PROP_loops;
3701       cgraph_add_new_function (child_fn, true);
3702 
3703       /* Fix the callgraph edges for child_cfun.  Those for cfun will be
3704 	 fixed in a following pass.  */
3705       push_cfun (child_cfun);
3706       if (optimize)
3707 	optimize_omp_library_calls (entry_stmt);
3708       rebuild_cgraph_edges ();
3709 
3710       /* Some EH regions might become dead, see PR34608.  If
3711 	 pass_cleanup_cfg isn't the first pass to happen with the
3712 	 new child, these dead EH edges might cause problems.
3713 	 Clean them up now.  */
3714       if (flag_exceptions)
3715 	{
3716 	  basic_block bb;
3717 	  bool changed = false;
3718 
3719 	  FOR_EACH_BB (bb)
3720 	    changed |= gimple_purge_dead_eh_edges (bb);
3721 	  if (changed)
3722 	    cleanup_tree_cfg ();
3723 	}
3724       if (gimple_in_ssa_p (cfun))
3725 	update_ssa (TODO_update_ssa);
3726       pop_cfun ();
3727     }
3728 
3729   /* Emit a library call to launch the children threads.  */
3730   if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3731     expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3732   else
3733     expand_task_call (new_bb, entry_stmt);
3734   if (gimple_in_ssa_p (cfun))
3735     update_ssa (TODO_update_ssa_only_virtuals);
3736 }
3737 
3738 
3739 /* A subroutine of expand_omp_for.  Generate code for a parallel
3740    loop with any schedule.  Given parameters:
3741 
3742 	for (V = N1; V cond N2; V += STEP) BODY;
3743 
3744    where COND is "<" or ">", we generate pseudocode
3745 
3746 	more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3747 	if (more) goto L0; else goto L3;
3748     L0:
3749 	V = istart0;
3750 	iend = iend0;
3751     L1:
3752 	BODY;
3753 	V += STEP;
3754 	if (V cond iend) goto L1; else goto L2;
3755     L2:
3756 	if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3757     L3:
3758 
3759     If this is a combined omp parallel loop, instead of the call to
3760     GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3761 
3762     For collapsed loops, given parameters:
3763       collapse(3)
3764       for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3765 	for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3766 	  for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3767 	    BODY;
3768 
3769     we generate pseudocode
3770 
3771 	if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
3772 	if (cond3 is <)
3773 	  adj = STEP3 - 1;
3774 	else
3775 	  adj = STEP3 + 1;
3776 	count3 = (adj + N32 - N31) / STEP3;
3777 	if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
3778 	if (cond2 is <)
3779 	  adj = STEP2 - 1;
3780 	else
3781 	  adj = STEP2 + 1;
3782 	count2 = (adj + N22 - N21) / STEP2;
3783 	if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
3784 	if (cond1 is <)
3785 	  adj = STEP1 - 1;
3786 	else
3787 	  adj = STEP1 + 1;
3788 	count1 = (adj + N12 - N11) / STEP1;
3789 	count = count1 * count2 * count3;
3790 	goto Z1;
3791     Z0:
3792 	count = 0;
3793     Z1:
3794 	more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3795 	if (more) goto L0; else goto L3;
3796     L0:
3797 	V = istart0;
3798 	T = V;
3799 	V3 = N31 + (T % count3) * STEP3;
3800 	T = T / count3;
3801 	V2 = N21 + (T % count2) * STEP2;
3802 	T = T / count2;
3803 	V1 = N11 + T * STEP1;
3804 	iend = iend0;
3805     L1:
3806 	BODY;
3807 	V += 1;
3808 	if (V < iend) goto L10; else goto L2;
3809     L10:
3810 	V3 += STEP3;
3811 	if (V3 cond3 N32) goto L1; else goto L11;
3812     L11:
3813 	V3 = N31;
3814 	V2 += STEP2;
3815 	if (V2 cond2 N22) goto L1; else goto L12;
3816     L12:
3817 	V2 = N21;
3818 	V1 += STEP1;
3819 	goto L1;
3820     L2:
3821 	if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3822     L3:
3823 
3824       */
3825 
3826 static void
3827 expand_omp_for_generic (struct omp_region *region,
3828 			struct omp_for_data *fd,
3829 			enum built_in_function start_fn,
3830 			enum built_in_function next_fn)
3831 {
3832   tree type, istart0, iend0, iend;
3833   tree t, vmain, vback, bias = NULL_TREE;
3834   basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3835   basic_block l2_bb = NULL, l3_bb = NULL;
3836   gimple_stmt_iterator gsi;
3837   gimple stmt;
3838   bool in_combined_parallel = is_combined_parallel (region);
3839   bool broken_loop = region->cont == NULL;
3840   edge e, ne;
3841   tree *counts = NULL;
3842   int i;
3843 
3844   gcc_assert (!broken_loop || !in_combined_parallel);
3845   gcc_assert (fd->iter_type == long_integer_type_node
3846 	      || !in_combined_parallel);
3847 
3848   type = TREE_TYPE (fd->loop.v);
3849   istart0 = create_tmp_var (fd->iter_type, ".istart0");
3850   iend0 = create_tmp_var (fd->iter_type, ".iend0");
3851   TREE_ADDRESSABLE (istart0) = 1;
3852   TREE_ADDRESSABLE (iend0) = 1;
3853 
3854   /* See if we need to bias by LLONG_MIN.  */
3855   if (fd->iter_type == long_long_unsigned_type_node
3856       && TREE_CODE (type) == INTEGER_TYPE
3857       && !TYPE_UNSIGNED (type))
3858     {
3859       tree n1, n2;
3860 
3861       if (fd->loop.cond_code == LT_EXPR)
3862 	{
3863 	  n1 = fd->loop.n1;
3864 	  n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3865 	}
3866       else
3867 	{
3868 	  n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3869 	  n2 = fd->loop.n1;
3870 	}
3871       if (TREE_CODE (n1) != INTEGER_CST
3872 	  || TREE_CODE (n2) != INTEGER_CST
3873 	  || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3874 	bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3875     }
3876 
3877   entry_bb = region->entry;
3878   cont_bb = region->cont;
3879   collapse_bb = NULL;
3880   gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3881   gcc_assert (broken_loop
3882 	      || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3883   l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3884   l1_bb = single_succ (l0_bb);
3885   if (!broken_loop)
3886     {
3887       l2_bb = create_empty_bb (cont_bb);
3888       gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3889       gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3890     }
3891   else
3892     l2_bb = NULL;
3893   l3_bb = BRANCH_EDGE (entry_bb)->dest;
3894   exit_bb = region->exit;
3895 
3896   gsi = gsi_last_bb (entry_bb);
3897 
3898   gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3899   if (fd->collapse > 1)
3900     {
3901       basic_block zero_iter_bb = NULL;
3902       int first_zero_iter = -1;
3903 
3904       /* collapsed loops need work for expansion in SSA form.  */
3905       gcc_assert (!gimple_in_ssa_p (cfun));
3906       counts = (tree *) alloca (fd->collapse * sizeof (tree));
3907       for (i = 0; i < fd->collapse; i++)
3908 	{
3909 	  tree itype = TREE_TYPE (fd->loops[i].v);
3910 
3911 	  if (SSA_VAR_P (fd->loop.n2)
3912 	      && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
3913 				    fold_convert (itype, fd->loops[i].n1),
3914 				    fold_convert (itype, fd->loops[i].n2)))
3915 		  == NULL_TREE || !integer_onep (t)))
3916 	    {
3917 	      tree n1, n2;
3918 	      n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
3919 	      n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
3920 					     true, GSI_SAME_STMT);
3921 	      n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
3922 	      n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
3923 					     true, GSI_SAME_STMT);
3924 	      stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
3925 					NULL_TREE, NULL_TREE);
3926 	      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3927 	      if (walk_tree (gimple_cond_lhs_ptr (stmt),
3928 			     expand_omp_regimplify_p, NULL, NULL)
3929 		  || walk_tree (gimple_cond_rhs_ptr (stmt),
3930 				expand_omp_regimplify_p, NULL, NULL))
3931 		{
3932 		  gsi = gsi_for_stmt (stmt);
3933 		  gimple_regimplify_operands (stmt, &gsi);
3934 		}
3935 	      e = split_block (entry_bb, stmt);
3936 	      if (zero_iter_bb == NULL)
3937 		{
3938 		  first_zero_iter = i;
3939 		  zero_iter_bb = create_empty_bb (entry_bb);
3940 		  if (current_loops)
3941 		    add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
3942 		  gsi = gsi_after_labels (zero_iter_bb);
3943 		  stmt = gimple_build_assign (fd->loop.n2,
3944 					      build_zero_cst (type));
3945 		  gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3946 		  set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
3947 					   entry_bb);
3948 		}
3949 	      ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
3950 	      ne->probability = REG_BR_PROB_BASE / 2000 - 1;
3951 	      e->flags = EDGE_TRUE_VALUE;
3952 	      e->probability = REG_BR_PROB_BASE - ne->probability;
3953 	      entry_bb = e->dest;
3954 	      gsi = gsi_last_bb (entry_bb);
3955 	    }
3956 	  if (POINTER_TYPE_P (itype))
3957 	    itype = signed_type_for (itype);
3958 	  t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3959 				     ? -1 : 1));
3960 	  t = fold_build2 (PLUS_EXPR, itype,
3961 			   fold_convert (itype, fd->loops[i].step), t);
3962 	  t = fold_build2 (PLUS_EXPR, itype, t,
3963 			   fold_convert (itype, fd->loops[i].n2));
3964 	  t = fold_build2 (MINUS_EXPR, itype, t,
3965 			   fold_convert (itype, fd->loops[i].n1));
3966 	  if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3967 	    t = fold_build2 (TRUNC_DIV_EXPR, itype,
3968 			     fold_build1 (NEGATE_EXPR, itype, t),
3969 			     fold_build1 (NEGATE_EXPR, itype,
3970 					  fold_convert (itype,
3971 							fd->loops[i].step)));
3972 	  else
3973 	    t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3974 			     fold_convert (itype, fd->loops[i].step));
3975 	  t = fold_convert (type, t);
3976 	  if (TREE_CODE (t) == INTEGER_CST)
3977 	    counts[i] = t;
3978 	  else
3979 	    {
3980 	      counts[i] = create_tmp_reg (type, ".count");
3981 	      t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3982 					    true, GSI_SAME_STMT);
3983 	      stmt = gimple_build_assign (counts[i], t);
3984 	      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3985 	    }
3986 	  if (SSA_VAR_P (fd->loop.n2))
3987 	    {
3988 	      if (i == 0)
3989 		t = counts[0];
3990 	      else
3991 		{
3992 		  t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3993 		  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3994 						true, GSI_SAME_STMT);
3995 		}
3996 	      stmt = gimple_build_assign (fd->loop.n2, t);
3997 	      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3998 	    }
3999 	}
4000       if (zero_iter_bb)
4001 	{
4002 	  /* Some counts[i] vars might be uninitialized if
4003 	     some loop has zero iterations.  But the body shouldn't
4004 	     be executed in that case, so just avoid uninit warnings.  */
4005 	  for (i = first_zero_iter; i < fd->collapse; i++)
4006 	    if (SSA_VAR_P (counts[i]))
4007 	      TREE_NO_WARNING (counts[i]) = 1;
4008 	  gsi_prev (&gsi);
4009 	  e = split_block (entry_bb, gsi_stmt (gsi));
4010 	  entry_bb = e->dest;
4011 	  make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
4012 	  gsi = gsi_last_bb (entry_bb);
4013 	  set_immediate_dominator (CDI_DOMINATORS, entry_bb,
4014 				   get_immediate_dominator (CDI_DOMINATORS,
4015 							    zero_iter_bb));
4016 	}
4017     }
4018   if (in_combined_parallel)
4019     {
4020       /* In a combined parallel loop, emit a call to
4021 	 GOMP_loop_foo_next.  */
4022       t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4023 			   build_fold_addr_expr (istart0),
4024 			   build_fold_addr_expr (iend0));
4025     }
4026   else
4027     {
4028       tree t0, t1, t2, t3, t4;
4029       /* If this is not a combined parallel loop, emit a call to
4030 	 GOMP_loop_foo_start in ENTRY_BB.  */
4031       t4 = build_fold_addr_expr (iend0);
4032       t3 = build_fold_addr_expr (istart0);
4033       t2 = fold_convert (fd->iter_type, fd->loop.step);
4034       if (POINTER_TYPE_P (type)
4035 	  && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
4036 	{
4037 	  /* Avoid casting pointers to integer of a different size.  */
4038 	  tree itype = signed_type_for (type);
4039 	  t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
4040 	  t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
4041 	}
4042       else
4043 	{
4044 	  t1 = fold_convert (fd->iter_type, fd->loop.n2);
4045 	  t0 = fold_convert (fd->iter_type, fd->loop.n1);
4046 	}
4047       if (bias)
4048 	{
4049 	  t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
4050 	  t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
4051 	}
4052       if (fd->iter_type == long_integer_type_node)
4053 	{
4054 	  if (fd->chunk_size)
4055 	    {
4056 	      t = fold_convert (fd->iter_type, fd->chunk_size);
4057 	      t = build_call_expr (builtin_decl_explicit (start_fn),
4058 				   6, t0, t1, t2, t, t3, t4);
4059 	    }
4060 	  else
4061 	    t = build_call_expr (builtin_decl_explicit (start_fn),
4062 				 5, t0, t1, t2, t3, t4);
4063 	}
4064       else
4065 	{
4066 	  tree t5;
4067 	  tree c_bool_type;
4068 	  tree bfn_decl;
4069 
4070 	  /* The GOMP_loop_ull_*start functions have additional boolean
4071 	     argument, true for < loops and false for > loops.
4072 	     In Fortran, the C bool type can be different from
4073 	     boolean_type_node.  */
4074 	  bfn_decl = builtin_decl_explicit (start_fn);
4075 	  c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
4076 	  t5 = build_int_cst (c_bool_type,
4077 			      fd->loop.cond_code == LT_EXPR ? 1 : 0);
4078 	  if (fd->chunk_size)
4079 	    {
4080 	      tree bfn_decl = builtin_decl_explicit (start_fn);
4081 	      t = fold_convert (fd->iter_type, fd->chunk_size);
4082 	      t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
4083 	    }
4084 	  else
4085 	    t = build_call_expr (builtin_decl_explicit (start_fn),
4086 				 6, t5, t0, t1, t2, t3, t4);
4087 	}
4088     }
4089   if (TREE_TYPE (t) != boolean_type_node)
4090     t = fold_build2 (NE_EXPR, boolean_type_node,
4091 		     t, build_int_cst (TREE_TYPE (t), 0));
4092   t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4093 			       	true, GSI_SAME_STMT);
4094   gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4095 
4096   /* Remove the GIMPLE_OMP_FOR statement.  */
4097   gsi_remove (&gsi, true);
4098 
4099   /* Iteration setup for sequential loop goes in L0_BB.  */
4100   gsi = gsi_start_bb (l0_bb);
4101   t = istart0;
4102   if (bias)
4103     t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4104   if (POINTER_TYPE_P (type))
4105     t = fold_convert (signed_type_for (type), t);
4106   t = fold_convert (type, t);
4107   t = force_gimple_operand_gsi (&gsi, t,
4108 				DECL_P (fd->loop.v)
4109 				&& TREE_ADDRESSABLE (fd->loop.v),
4110 				NULL_TREE, false, GSI_CONTINUE_LINKING);
4111   stmt = gimple_build_assign (fd->loop.v, t);
4112   gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4113 
4114   t = iend0;
4115   if (bias)
4116     t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4117   if (POINTER_TYPE_P (type))
4118     t = fold_convert (signed_type_for (type), t);
4119   t = fold_convert (type, t);
4120   iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4121 				   false, GSI_CONTINUE_LINKING);
4122   if (fd->collapse > 1)
4123     {
4124       tree tem = create_tmp_reg (type, ".tem");
4125       stmt = gimple_build_assign (tem, fd->loop.v);
4126       gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4127       for (i = fd->collapse - 1; i >= 0; i--)
4128 	{
4129 	  tree vtype = TREE_TYPE (fd->loops[i].v), itype;
4130 	  itype = vtype;
4131 	  if (POINTER_TYPE_P (vtype))
4132 	    itype = signed_type_for (vtype);
4133 	  t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
4134 	  t = fold_convert (itype, t);
4135 	  t = fold_build2 (MULT_EXPR, itype, t,
4136 			   fold_convert (itype, fd->loops[i].step));
4137 	  if (POINTER_TYPE_P (vtype))
4138 	    t = fold_build_pointer_plus (fd->loops[i].n1, t);
4139 	  else
4140 	    t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
4141 	  t = force_gimple_operand_gsi (&gsi, t,
4142 					DECL_P (fd->loops[i].v)
4143 					&& TREE_ADDRESSABLE (fd->loops[i].v),
4144 					NULL_TREE, false,
4145 					GSI_CONTINUE_LINKING);
4146 	  stmt = gimple_build_assign (fd->loops[i].v, t);
4147 	  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4148 	  if (i != 0)
4149 	    {
4150 	      t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
4151 	      t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4152 					    false, GSI_CONTINUE_LINKING);
4153 	      stmt = gimple_build_assign (tem, t);
4154 	      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4155 	    }
4156 	}
4157     }
4158 
4159   if (!broken_loop)
4160     {
4161       /* Code to control the increment and predicate for the sequential
4162 	 loop goes in the CONT_BB.  */
4163       gsi = gsi_last_bb (cont_bb);
4164       stmt = gsi_stmt (gsi);
4165       gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4166       vmain = gimple_omp_continue_control_use (stmt);
4167       vback = gimple_omp_continue_control_def (stmt);
4168 
4169       if (POINTER_TYPE_P (type))
4170 	t = fold_build_pointer_plus (vmain, fd->loop.step);
4171       else
4172 	t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4173       t = force_gimple_operand_gsi (&gsi, t,
4174 				    DECL_P (vback) && TREE_ADDRESSABLE (vback),
4175 				    NULL_TREE, true, GSI_SAME_STMT);
4176       stmt = gimple_build_assign (vback, t);
4177       gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4178 
4179       t = build2 (fd->loop.cond_code, boolean_type_node,
4180 		  DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
4181 		  iend);
4182       stmt = gimple_build_cond_empty (t);
4183       gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4184 
4185       /* Remove GIMPLE_OMP_CONTINUE.  */
4186       gsi_remove (&gsi, true);
4187 
4188       if (fd->collapse > 1)
4189 	{
4190 	  basic_block last_bb, bb;
4191 
4192 	  last_bb = cont_bb;
4193 	  for (i = fd->collapse - 1; i >= 0; i--)
4194 	    {
4195 	      tree vtype = TREE_TYPE (fd->loops[i].v);
4196 
4197 	      bb = create_empty_bb (last_bb);
4198 	      gsi = gsi_start_bb (bb);
4199 
4200 	      if (i < fd->collapse - 1)
4201 		{
4202 		  e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4203 		  e->probability = REG_BR_PROB_BASE / 8;
4204 
4205 		  t = fd->loops[i + 1].n1;
4206 		  t = force_gimple_operand_gsi (&gsi, t,
4207 						DECL_P (fd->loops[i + 1].v)
4208 						&& TREE_ADDRESSABLE
4209 							(fd->loops[i + 1].v),
4210 						NULL_TREE, false,
4211 						GSI_CONTINUE_LINKING);
4212 		  stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4213 		  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4214 		}
4215 	      else
4216 		collapse_bb = bb;
4217 
4218 	      set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4219 
4220 	      if (POINTER_TYPE_P (vtype))
4221 		t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4222 	      else
4223 		t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4224 				 fd->loops[i].step);
4225 	      t = force_gimple_operand_gsi (&gsi, t,
4226 					    DECL_P (fd->loops[i].v)
4227 					    && TREE_ADDRESSABLE (fd->loops[i].v),
4228 					    NULL_TREE, false,
4229 					    GSI_CONTINUE_LINKING);
4230 	      stmt = gimple_build_assign (fd->loops[i].v, t);
4231 	      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4232 
4233 	      if (i > 0)
4234 		{
4235 		  t = fd->loops[i].n2;
4236 		  t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4237 						false, GSI_CONTINUE_LINKING);
4238 		  tree v = fd->loops[i].v;
4239 		  if (DECL_P (v) && TREE_ADDRESSABLE (v))
4240 		    v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4241 						  false, GSI_CONTINUE_LINKING);
4242 		  t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4243 				   v, t);
4244 		  stmt = gimple_build_cond_empty (t);
4245 		  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4246 		  e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4247 		  e->probability = REG_BR_PROB_BASE * 7 / 8;
4248 		}
4249 	      else
4250 		make_edge (bb, l1_bb, EDGE_FALLTHRU);
4251 	      last_bb = bb;
4252 	    }
4253 	}
4254 
4255       /* Emit code to get the next parallel iteration in L2_BB.  */
4256       gsi = gsi_start_bb (l2_bb);
4257 
4258       t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4259 			   build_fold_addr_expr (istart0),
4260 			   build_fold_addr_expr (iend0));
4261       t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4262 				    false, GSI_CONTINUE_LINKING);
4263       if (TREE_TYPE (t) != boolean_type_node)
4264 	t = fold_build2 (NE_EXPR, boolean_type_node,
4265 			 t, build_int_cst (TREE_TYPE (t), 0));
4266       stmt = gimple_build_cond_empty (t);
4267       gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4268     }
4269 
4270   /* Add the loop cleanup function.  */
4271   gsi = gsi_last_bb (exit_bb);
4272   if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4273     t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4274   else
4275     t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4276   stmt = gimple_build_call (t, 0);
4277   gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4278   gsi_remove (&gsi, true);
4279 
4280   /* Connect the new blocks.  */
4281   find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4282   find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4283 
4284   if (!broken_loop)
4285     {
4286       gimple_seq phis;
4287 
4288       e = find_edge (cont_bb, l3_bb);
4289       ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4290 
4291       phis = phi_nodes (l3_bb);
4292       for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4293 	{
4294 	  gimple phi = gsi_stmt (gsi);
4295 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4296 		   PHI_ARG_DEF_FROM_EDGE (phi, e));
4297 	}
4298       remove_edge (e);
4299 
4300       make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4301       if (fd->collapse > 1)
4302 	{
4303 	  e = find_edge (cont_bb, l1_bb);
4304 	  remove_edge (e);
4305 	  e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4306 	}
4307       else
4308 	{
4309 	  e = find_edge (cont_bb, l1_bb);
4310 	  e->flags = EDGE_TRUE_VALUE;
4311 	}
4312       e->probability = REG_BR_PROB_BASE * 7 / 8;
4313       find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4314       make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4315 
4316       set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4317 			       recompute_dominator (CDI_DOMINATORS, l2_bb));
4318       set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4319 			       recompute_dominator (CDI_DOMINATORS, l3_bb));
4320       set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4321 			       recompute_dominator (CDI_DOMINATORS, l0_bb));
4322       set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4323 			       recompute_dominator (CDI_DOMINATORS, l1_bb));
4324     }
4325 }
4326 
4327 
4328 /* A subroutine of expand_omp_for.  Generate code for a parallel
4329    loop with static schedule and no specified chunk size.  Given
4330    parameters:
4331 
4332 	for (V = N1; V cond N2; V += STEP) BODY;
4333 
4334    where COND is "<" or ">", we generate pseudocode
4335 
4336 	if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
4337 	if (cond is <)
4338 	  adj = STEP - 1;
4339 	else
4340 	  adj = STEP + 1;
4341 	if ((__typeof (V)) -1 > 0 && cond is >)
4342 	  n = -(adj + N2 - N1) / -STEP;
4343 	else
4344 	  n = (adj + N2 - N1) / STEP;
4345 	q = n / nthreads;
4346 	tt = n % nthreads;
4347 	if (threadid < tt) goto L3; else goto L4;
4348     L3:
4349 	tt = 0;
4350 	q = q + 1;
4351     L4:
4352 	s0 = q * threadid + tt;
4353 	e0 = s0 + q;
4354 	V = s0 * STEP + N1;
4355 	if (s0 >= e0) goto L2; else goto L0;
4356     L0:
4357 	e = e0 * STEP + N1;
4358     L1:
4359 	BODY;
4360 	V += STEP;
4361 	if (V cond e) goto L1;
4362     L2:
4363 */
4364 
4365 static void
4366 expand_omp_for_static_nochunk (struct omp_region *region,
4367 			       struct omp_for_data *fd)
4368 {
4369   tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4370   tree type, itype, vmain, vback;
4371   basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4372   basic_block body_bb, cont_bb;
4373   basic_block fin_bb;
4374   gimple_stmt_iterator gsi;
4375   gimple stmt;
4376   edge ep;
4377 
4378   itype = type = TREE_TYPE (fd->loop.v);
4379   if (POINTER_TYPE_P (type))
4380     itype = signed_type_for (type);
4381 
4382   entry_bb = region->entry;
4383   cont_bb = region->cont;
4384   gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4385   gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4386   seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4387   body_bb = single_succ (seq_start_bb);
4388   gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4389   gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4390   fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4391   exit_bb = region->exit;
4392 
4393   /* Iteration space partitioning goes in ENTRY_BB.  */
4394   gsi = gsi_last_bb (entry_bb);
4395   gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4396 
4397   t = fold_binary (fd->loop.cond_code, boolean_type_node,
4398 		   fold_convert (type, fd->loop.n1),
4399 		   fold_convert (type, fd->loop.n2));
4400   if (TYPE_UNSIGNED (type)
4401       && (t == NULL_TREE || !integer_onep (t)))
4402     {
4403       tree n1, n2;
4404       n1 = fold_convert (type, unshare_expr (fd->loop.n1));
4405       n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
4406 				     true, GSI_SAME_STMT);
4407       n2 = fold_convert (type, unshare_expr (fd->loop.n2));
4408       n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
4409 				     true, GSI_SAME_STMT);
4410       stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
4411 				NULL_TREE, NULL_TREE);
4412       gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4413       if (walk_tree (gimple_cond_lhs_ptr (stmt),
4414 		     expand_omp_regimplify_p, NULL, NULL)
4415 	  || walk_tree (gimple_cond_rhs_ptr (stmt),
4416 			expand_omp_regimplify_p, NULL, NULL))
4417 	{
4418 	  gsi = gsi_for_stmt (stmt);
4419 	  gimple_regimplify_operands (stmt, &gsi);
4420 	}
4421       ep = split_block (entry_bb, stmt);
4422       ep->flags = EDGE_TRUE_VALUE;
4423       entry_bb = ep->dest;
4424       ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
4425       ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
4426       ep->probability = REG_BR_PROB_BASE / 2000 - 1;
4427       if (gimple_in_ssa_p (cfun))
4428 	{
4429 	  int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
4430 	  for (gsi = gsi_start_phis (fin_bb);
4431 	       !gsi_end_p (gsi); gsi_next (&gsi))
4432 	    {
4433 	      gimple phi = gsi_stmt (gsi);
4434 	      add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
4435 			   ep, UNKNOWN_LOCATION);
4436 	    }
4437 	}
4438       gsi = gsi_last_bb (entry_bb);
4439     }
4440 
4441   t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4442   t = fold_convert (itype, t);
4443   nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4444 				       true, GSI_SAME_STMT);
4445 
4446   t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4447   t = fold_convert (itype, t);
4448   threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4449 				       true, GSI_SAME_STMT);
4450 
4451   fd->loop.n1
4452     = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4453 				true, NULL_TREE, true, GSI_SAME_STMT);
4454   fd->loop.n2
4455     = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4456 				true, NULL_TREE, true, GSI_SAME_STMT);
4457   fd->loop.step
4458     = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4459 				true, NULL_TREE, true, GSI_SAME_STMT);
4460 
4461   t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4462   t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4463   t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4464   t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4465   if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4466     t = fold_build2 (TRUNC_DIV_EXPR, itype,
4467 		     fold_build1 (NEGATE_EXPR, itype, t),
4468 		     fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4469   else
4470     t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4471   t = fold_convert (itype, t);
4472   n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4473 
4474   q = create_tmp_reg (itype, "q");
4475   t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4476   t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4477   gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4478 
4479   tt = create_tmp_reg (itype, "tt");
4480   t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4481   t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4482   gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4483 
4484   t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4485   stmt = gimple_build_cond_empty (t);
4486   gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4487 
4488   second_bb = split_block (entry_bb, stmt)->dest;
4489   gsi = gsi_last_bb (second_bb);
4490   gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4491 
4492   gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4493 		     GSI_SAME_STMT);
4494   stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4495 				       build_int_cst (itype, 1));
4496   gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4497 
4498   third_bb = split_block (second_bb, stmt)->dest;
4499   gsi = gsi_last_bb (third_bb);
4500   gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4501 
4502   t = build2 (MULT_EXPR, itype, q, threadid);
4503   t = build2 (PLUS_EXPR, itype, t, tt);
4504   s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4505 
4506   t = fold_build2 (PLUS_EXPR, itype, s0, q);
4507   e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4508 
4509   t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4510   gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4511 
4512   /* Remove the GIMPLE_OMP_FOR statement.  */
4513   gsi_remove (&gsi, true);
4514 
4515   /* Setup code for sequential iteration goes in SEQ_START_BB.  */
4516   gsi = gsi_start_bb (seq_start_bb);
4517 
4518   t = fold_convert (itype, s0);
4519   t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4520   if (POINTER_TYPE_P (type))
4521     t = fold_build_pointer_plus (fd->loop.n1, t);
4522   else
4523     t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4524   t = force_gimple_operand_gsi (&gsi, t,
4525 				DECL_P (fd->loop.v)
4526 				&& TREE_ADDRESSABLE (fd->loop.v),
4527 				NULL_TREE, false, GSI_CONTINUE_LINKING);
4528   stmt = gimple_build_assign (fd->loop.v, t);
4529   gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4530 
4531   t = fold_convert (itype, e0);
4532   t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4533   if (POINTER_TYPE_P (type))
4534     t = fold_build_pointer_plus (fd->loop.n1, t);
4535   else
4536     t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4537   e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4538 				false, GSI_CONTINUE_LINKING);
4539 
4540   /* The code controlling the sequential loop replaces the
4541      GIMPLE_OMP_CONTINUE.  */
4542   gsi = gsi_last_bb (cont_bb);
4543   stmt = gsi_stmt (gsi);
4544   gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4545   vmain = gimple_omp_continue_control_use (stmt);
4546   vback = gimple_omp_continue_control_def (stmt);
4547 
4548   if (POINTER_TYPE_P (type))
4549     t = fold_build_pointer_plus (vmain, fd->loop.step);
4550   else
4551     t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4552   t = force_gimple_operand_gsi (&gsi, t,
4553 				DECL_P (vback) && TREE_ADDRESSABLE (vback),
4554 				NULL_TREE, true, GSI_SAME_STMT);
4555   stmt = gimple_build_assign (vback, t);
4556   gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4557 
4558   t = build2 (fd->loop.cond_code, boolean_type_node,
4559 	      DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
4560   gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4561 
4562   /* Remove the GIMPLE_OMP_CONTINUE statement.  */
4563   gsi_remove (&gsi, true);
4564 
4565   /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing.  */
4566   gsi = gsi_last_bb (exit_bb);
4567   if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4568     force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4569 			      false, GSI_SAME_STMT);
4570   gsi_remove (&gsi, true);
4571 
4572   /* Connect all the blocks.  */
4573   ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4574   ep->probability = REG_BR_PROB_BASE / 4 * 3;
4575   ep = find_edge (entry_bb, second_bb);
4576   ep->flags = EDGE_TRUE_VALUE;
4577   ep->probability = REG_BR_PROB_BASE / 4;
4578   find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4579   find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4580 
4581   find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4582   find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4583 
4584   set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4585   set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4586   set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4587   set_immediate_dominator (CDI_DOMINATORS, body_bb,
4588 			   recompute_dominator (CDI_DOMINATORS, body_bb));
4589   set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4590 			   recompute_dominator (CDI_DOMINATORS, fin_bb));
4591 }
4592 
4593 
4594 /* A subroutine of expand_omp_for.  Generate code for a parallel
4595    loop with static schedule and a specified chunk size.  Given
4596    parameters:
4597 
4598 	for (V = N1; V cond N2; V += STEP) BODY;
4599 
4600    where COND is "<" or ">", we generate pseudocode
4601 
4602 	if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
4603 	if (cond is <)
4604 	  adj = STEP - 1;
4605 	else
4606 	  adj = STEP + 1;
4607 	if ((__typeof (V)) -1 > 0 && cond is >)
4608 	  n = -(adj + N2 - N1) / -STEP;
4609 	else
4610 	  n = (adj + N2 - N1) / STEP;
4611 	trip = 0;
4612 	V = threadid * CHUNK * STEP + N1;  -- this extra definition of V is
4613 					      here so that V is defined
4614 					      if the loop is not entered
4615     L0:
4616 	s0 = (trip * nthreads + threadid) * CHUNK;
4617 	e0 = min(s0 + CHUNK, n);
4618 	if (s0 < n) goto L1; else goto L4;
4619     L1:
4620 	V = s0 * STEP + N1;
4621 	e = e0 * STEP + N1;
4622     L2:
4623 	BODY;
4624 	V += STEP;
4625 	if (V cond e) goto L2; else goto L3;
4626     L3:
4627 	trip += 1;
4628 	goto L0;
4629     L4:
4630 */
4631 
4632 static void
4633 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4634 {
4635   tree n, s0, e0, e, t;
4636   tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4637   tree type, itype, v_main, v_back, v_extra;
4638   basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4639   basic_block trip_update_bb, cont_bb, fin_bb;
4640   gimple_stmt_iterator si;
4641   gimple stmt;
4642   edge se;
4643 
4644   itype = type = TREE_TYPE (fd->loop.v);
4645   if (POINTER_TYPE_P (type))
4646     itype = signed_type_for (type);
4647 
4648   entry_bb = region->entry;
4649   se = split_block (entry_bb, last_stmt (entry_bb));
4650   entry_bb = se->src;
4651   iter_part_bb = se->dest;
4652   cont_bb = region->cont;
4653   gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4654   gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4655 	      == FALLTHRU_EDGE (cont_bb)->dest);
4656   seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4657   body_bb = single_succ (seq_start_bb);
4658   gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4659   gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4660   fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4661   trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4662   exit_bb = region->exit;
4663 
4664   /* Trip and adjustment setup goes in ENTRY_BB.  */
4665   si = gsi_last_bb (entry_bb);
4666   gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4667 
4668   t = fold_binary (fd->loop.cond_code, boolean_type_node,
4669 		   fold_convert (type, fd->loop.n1),
4670 		   fold_convert (type, fd->loop.n2));
4671   if (TYPE_UNSIGNED (type)
4672       && (t == NULL_TREE || !integer_onep (t)))
4673     {
4674       tree n1, n2;
4675       n1 = fold_convert (type, unshare_expr (fd->loop.n1));
4676       n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
4677 				     true, GSI_SAME_STMT);
4678       n2 = fold_convert (type, unshare_expr (fd->loop.n2));
4679       n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
4680 				     true, GSI_SAME_STMT);
4681       stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
4682 				NULL_TREE, NULL_TREE);
4683       gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4684       if (walk_tree (gimple_cond_lhs_ptr (stmt),
4685 		     expand_omp_regimplify_p, NULL, NULL)
4686 	  || walk_tree (gimple_cond_rhs_ptr (stmt),
4687 			expand_omp_regimplify_p, NULL, NULL))
4688 	{
4689 	  si = gsi_for_stmt (stmt);
4690 	  gimple_regimplify_operands (stmt, &si);
4691 	}
4692       se = split_block (entry_bb, stmt);
4693       se->flags = EDGE_TRUE_VALUE;
4694       entry_bb = se->dest;
4695       se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
4696       se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
4697       se->probability = REG_BR_PROB_BASE / 2000 - 1;
4698       if (gimple_in_ssa_p (cfun))
4699 	{
4700 	  int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
4701 	  for (si = gsi_start_phis (fin_bb);
4702 	       !gsi_end_p (si); gsi_next (&si))
4703 	    {
4704 	      gimple phi = gsi_stmt (si);
4705 	      add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
4706 			   se, UNKNOWN_LOCATION);
4707 	    }
4708 	}
4709       si = gsi_last_bb (entry_bb);
4710     }
4711 
4712   t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4713   t = fold_convert (itype, t);
4714   nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4715 				       true, GSI_SAME_STMT);
4716 
4717   t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4718   t = fold_convert (itype, t);
4719   threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4720 				       true, GSI_SAME_STMT);
4721 
4722   fd->loop.n1
4723     = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4724 				true, NULL_TREE, true, GSI_SAME_STMT);
4725   fd->loop.n2
4726     = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4727 				true, NULL_TREE, true, GSI_SAME_STMT);
4728   fd->loop.step
4729     = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4730 				true, NULL_TREE, true, GSI_SAME_STMT);
4731   fd->chunk_size
4732     = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4733 				true, NULL_TREE, true, GSI_SAME_STMT);
4734 
4735   t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4736   t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4737   t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4738   t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4739   if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4740     t = fold_build2 (TRUNC_DIV_EXPR, itype,
4741 		     fold_build1 (NEGATE_EXPR, itype, t),
4742 		     fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4743   else
4744     t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4745   t = fold_convert (itype, t);
4746   n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4747 				true, GSI_SAME_STMT);
4748 
4749   trip_var = create_tmp_reg (itype, ".trip");
4750   if (gimple_in_ssa_p (cfun))
4751     {
4752       trip_init = make_ssa_name (trip_var, NULL);
4753       trip_main = make_ssa_name (trip_var, NULL);
4754       trip_back = make_ssa_name (trip_var, NULL);
4755     }
4756   else
4757     {
4758       trip_init = trip_var;
4759       trip_main = trip_var;
4760       trip_back = trip_var;
4761     }
4762 
4763   stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4764   gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4765 
4766   t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4767   t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4768   if (POINTER_TYPE_P (type))
4769     t = fold_build_pointer_plus (fd->loop.n1, t);
4770   else
4771     t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4772   v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4773 				      true, GSI_SAME_STMT);
4774 
4775   /* Remove the GIMPLE_OMP_FOR.  */
4776   gsi_remove (&si, true);
4777 
4778   /* Iteration space partitioning goes in ITER_PART_BB.  */
4779   si = gsi_last_bb (iter_part_bb);
4780 
4781   t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4782   t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4783   t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4784   s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4785 				 false, GSI_CONTINUE_LINKING);
4786 
4787   t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4788   t = fold_build2 (MIN_EXPR, itype, t, n);
4789   e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4790 				 false, GSI_CONTINUE_LINKING);
4791 
4792   t = build2 (LT_EXPR, boolean_type_node, s0, n);
4793   gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4794 
4795   /* Setup code for sequential iteration goes in SEQ_START_BB.  */
4796   si = gsi_start_bb (seq_start_bb);
4797 
4798   t = fold_convert (itype, s0);
4799   t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4800   if (POINTER_TYPE_P (type))
4801     t = fold_build_pointer_plus (fd->loop.n1, t);
4802   else
4803     t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4804   t = force_gimple_operand_gsi (&si, t,
4805 				DECL_P (fd->loop.v)
4806 				&& TREE_ADDRESSABLE (fd->loop.v),
4807 				NULL_TREE, false, GSI_CONTINUE_LINKING);
4808   stmt = gimple_build_assign (fd->loop.v, t);
4809   gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4810 
4811   t = fold_convert (itype, e0);
4812   t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4813   if (POINTER_TYPE_P (type))
4814     t = fold_build_pointer_plus (fd->loop.n1, t);
4815   else
4816     t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4817   e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4818 				false, GSI_CONTINUE_LINKING);
4819 
4820   /* The code controlling the sequential loop goes in CONT_BB,
4821      replacing the GIMPLE_OMP_CONTINUE.  */
4822   si = gsi_last_bb (cont_bb);
4823   stmt = gsi_stmt (si);
4824   gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4825   v_main = gimple_omp_continue_control_use (stmt);
4826   v_back = gimple_omp_continue_control_def (stmt);
4827 
4828   if (POINTER_TYPE_P (type))
4829     t = fold_build_pointer_plus (v_main, fd->loop.step);
4830   else
4831     t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4832   if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
4833     t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4834 				  true, GSI_SAME_STMT);
4835   stmt = gimple_build_assign (v_back, t);
4836   gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4837 
4838   t = build2 (fd->loop.cond_code, boolean_type_node,
4839 	      DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
4840 	      ? t : v_back, e);
4841   gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4842 
4843   /* Remove GIMPLE_OMP_CONTINUE.  */
4844   gsi_remove (&si, true);
4845 
4846   /* Trip update code goes into TRIP_UPDATE_BB.  */
4847   si = gsi_start_bb (trip_update_bb);
4848 
4849   t = build_int_cst (itype, 1);
4850   t = build2 (PLUS_EXPR, itype, trip_main, t);
4851   stmt = gimple_build_assign (trip_back, t);
4852   gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4853 
4854   /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing.  */
4855   si = gsi_last_bb (exit_bb);
4856   if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4857     force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4858 			      false, GSI_SAME_STMT);
4859   gsi_remove (&si, true);
4860 
4861   /* Connect the new blocks.  */
4862   find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4863   find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4864 
4865   find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4866   find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4867 
4868   redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4869 
4870   if (gimple_in_ssa_p (cfun))
4871     {
4872       gimple_stmt_iterator psi;
4873       gimple phi;
4874       edge re, ene;
4875       edge_var_map_vector *head;
4876       edge_var_map *vm;
4877       size_t i;
4878 
4879       /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4880 	 remove arguments of the phi nodes in fin_bb.  We need to create
4881 	 appropriate phi nodes in iter_part_bb instead.  */
4882       se = single_pred_edge (fin_bb);
4883       re = single_succ_edge (trip_update_bb);
4884       head = redirect_edge_var_map_vector (re);
4885       ene = single_succ_edge (entry_bb);
4886 
4887       psi = gsi_start_phis (fin_bb);
4888       for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
4889 	   gsi_next (&psi), ++i)
4890 	{
4891 	  gimple nphi;
4892 	  source_location locus;
4893 
4894 	  phi = gsi_stmt (psi);
4895 	  t = gimple_phi_result (phi);
4896 	  gcc_assert (t == redirect_edge_var_map_result (vm));
4897 	  nphi = create_phi_node (t, iter_part_bb);
4898 
4899 	  t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4900 	  locus = gimple_phi_arg_location_from_edge (phi, se);
4901 
4902 	  /* A special case -- fd->loop.v is not yet computed in
4903 	     iter_part_bb, we need to use v_extra instead.  */
4904 	  if (t == fd->loop.v)
4905 	    t = v_extra;
4906 	  add_phi_arg (nphi, t, ene, locus);
4907 	  locus = redirect_edge_var_map_location (vm);
4908 	  add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4909 	}
4910       gcc_assert (!gsi_end_p (psi) && i == head->length ());
4911       redirect_edge_var_map_clear (re);
4912       while (1)
4913 	{
4914 	  psi = gsi_start_phis (fin_bb);
4915 	  if (gsi_end_p (psi))
4916 	    break;
4917 	  remove_phi_node (&psi, false);
4918 	}
4919 
4920       /* Make phi node for trip.  */
4921       phi = create_phi_node (trip_main, iter_part_bb);
4922       add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4923 		   UNKNOWN_LOCATION);
4924       add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4925 		   UNKNOWN_LOCATION);
4926     }
4927 
4928   set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4929   set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4930 			   recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4931   set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4932 			   recompute_dominator (CDI_DOMINATORS, fin_bb));
4933   set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4934 			   recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4935   set_immediate_dominator (CDI_DOMINATORS, body_bb,
4936 			   recompute_dominator (CDI_DOMINATORS, body_bb));
4937 }
4938 
4939 
4940 /* Expand the OpenMP loop defined by REGION.  */
4941 
4942 static void
4943 expand_omp_for (struct omp_region *region)
4944 {
4945   struct omp_for_data fd;
4946   struct omp_for_data_loop *loops;
4947 
4948   loops
4949     = (struct omp_for_data_loop *)
4950       alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4951 	      * sizeof (struct omp_for_data_loop));
4952   extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4953   region->sched_kind = fd.sched_kind;
4954 
4955   gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4956   BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4957   FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4958   if (region->cont)
4959     {
4960       gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4961       BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4962       FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4963     }
4964 
4965   if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4966       && !fd.have_ordered
4967       && fd.collapse == 1
4968       && region->cont != NULL)
4969     {
4970       if (fd.chunk_size == NULL)
4971 	expand_omp_for_static_nochunk (region, &fd);
4972       else
4973 	expand_omp_for_static_chunk (region, &fd);
4974     }
4975   else
4976     {
4977       int fn_index, start_ix, next_ix;
4978 
4979       if (fd.chunk_size == NULL
4980 	  && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4981 	fd.chunk_size = integer_zero_node;
4982       gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4983       fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4984 		  ? 3 : fd.sched_kind;
4985       fn_index += fd.have_ordered * 4;
4986       start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4987       next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4988       if (fd.iter_type == long_long_unsigned_type_node)
4989 	{
4990 	  start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4991 			- (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4992 	  next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4993 		      - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4994 	}
4995       expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4996 			      (enum built_in_function) next_ix);
4997     }
4998 
4999   if (gimple_in_ssa_p (cfun))
5000     update_ssa (TODO_update_ssa_only_virtuals);
5001 }
5002 
5003 
5004 /* Expand code for an OpenMP sections directive.  In pseudo code, we generate
5005 
5006 	v = GOMP_sections_start (n);
5007     L0:
5008 	switch (v)
5009 	  {
5010 	  case 0:
5011 	    goto L2;
5012 	  case 1:
5013 	    section 1;
5014 	    goto L1;
5015 	  case 2:
5016 	    ...
5017 	  case n:
5018 	    ...
5019 	  default:
5020 	    abort ();
5021 	  }
5022     L1:
5023 	v = GOMP_sections_next ();
5024 	goto L0;
5025     L2:
5026 	reduction;
5027 
5028     If this is a combined parallel sections, replace the call to
5029     GOMP_sections_start with call to GOMP_sections_next.  */
5030 
5031 static void
5032 expand_omp_sections (struct omp_region *region)
5033 {
5034   tree t, u, vin = NULL, vmain, vnext, l2;
5035   vec<tree> label_vec;
5036   unsigned len;
5037   basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
5038   gimple_stmt_iterator si, switch_si;
5039   gimple sections_stmt, stmt, cont;
5040   edge_iterator ei;
5041   edge e;
5042   struct omp_region *inner;
5043   unsigned i, casei;
5044   bool exit_reachable = region->cont != NULL;
5045 
5046   gcc_assert (region->exit != NULL);
5047   entry_bb = region->entry;
5048   l0_bb = single_succ (entry_bb);
5049   l1_bb = region->cont;
5050   l2_bb = region->exit;
5051   if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
5052     l2 = gimple_block_label (l2_bb);
5053   else
5054     {
5055       /* This can happen if there are reductions.  */
5056       len = EDGE_COUNT (l0_bb->succs);
5057       gcc_assert (len > 0);
5058       e = EDGE_SUCC (l0_bb, len - 1);
5059       si = gsi_last_bb (e->dest);
5060       l2 = NULL_TREE;
5061       if (gsi_end_p (si)
5062           || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5063 	l2 = gimple_block_label (e->dest);
5064       else
5065 	FOR_EACH_EDGE (e, ei, l0_bb->succs)
5066 	  {
5067 	    si = gsi_last_bb (e->dest);
5068 	    if (gsi_end_p (si)
5069 		|| gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5070 	      {
5071 		l2 = gimple_block_label (e->dest);
5072 		break;
5073 	      }
5074 	  }
5075     }
5076   if (exit_reachable)
5077     default_bb = create_empty_bb (l1_bb->prev_bb);
5078   else
5079     default_bb = create_empty_bb (l0_bb);
5080 
5081   /* We will build a switch() with enough cases for all the
5082      GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5083      and a default case to abort if something goes wrong.  */
5084   len = EDGE_COUNT (l0_bb->succs);
5085 
5086   /* Use vec::quick_push on label_vec throughout, since we know the size
5087      in advance.  */
5088   label_vec.create (len);
5089 
5090   /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5091      GIMPLE_OMP_SECTIONS statement.  */
5092   si = gsi_last_bb (entry_bb);
5093   sections_stmt = gsi_stmt (si);
5094   gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
5095   vin = gimple_omp_sections_control (sections_stmt);
5096   if (!is_combined_parallel (region))
5097     {
5098       /* If we are not inside a combined parallel+sections region,
5099 	 call GOMP_sections_start.  */
5100       t = build_int_cst (unsigned_type_node, len - 1);
5101       u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
5102       stmt = gimple_build_call (u, 1, t);
5103     }
5104   else
5105     {
5106       /* Otherwise, call GOMP_sections_next.  */
5107       u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5108       stmt = gimple_build_call (u, 0);
5109     }
5110   gimple_call_set_lhs (stmt, vin);
5111   gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5112   gsi_remove (&si, true);
5113 
5114   /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5115      L0_BB.  */
5116   switch_si = gsi_last_bb (l0_bb);
5117   gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
5118   if (exit_reachable)
5119     {
5120       cont = last_stmt (l1_bb);
5121       gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
5122       vmain = gimple_omp_continue_control_use (cont);
5123       vnext = gimple_omp_continue_control_def (cont);
5124     }
5125   else
5126     {
5127       vmain = vin;
5128       vnext = NULL_TREE;
5129     }
5130 
5131   t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
5132   label_vec.quick_push (t);
5133   i = 1;
5134 
5135   /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR.  */
5136   for (inner = region->inner, casei = 1;
5137        inner;
5138        inner = inner->next, i++, casei++)
5139     {
5140       basic_block s_entry_bb, s_exit_bb;
5141 
5142       /* Skip optional reduction region.  */
5143       if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
5144 	{
5145 	  --i;
5146 	  --casei;
5147 	  continue;
5148 	}
5149 
5150       s_entry_bb = inner->entry;
5151       s_exit_bb = inner->exit;
5152 
5153       t = gimple_block_label (s_entry_bb);
5154       u = build_int_cst (unsigned_type_node, casei);
5155       u = build_case_label (u, NULL, t);
5156       label_vec.quick_push (u);
5157 
5158       si = gsi_last_bb (s_entry_bb);
5159       gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
5160       gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
5161       gsi_remove (&si, true);
5162       single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
5163 
5164       if (s_exit_bb == NULL)
5165 	continue;
5166 
5167       si = gsi_last_bb (s_exit_bb);
5168       gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5169       gsi_remove (&si, true);
5170 
5171       single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
5172     }
5173 
5174   /* Error handling code goes in DEFAULT_BB.  */
5175   t = gimple_block_label (default_bb);
5176   u = build_case_label (NULL, NULL, t);
5177   make_edge (l0_bb, default_bb, 0);
5178 
5179   stmt = gimple_build_switch (vmain, u, label_vec);
5180   gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
5181   gsi_remove (&switch_si, true);
5182   label_vec.release ();
5183 
5184   si = gsi_start_bb (default_bb);
5185   stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
5186   gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5187 
5188   if (exit_reachable)
5189     {
5190       tree bfn_decl;
5191 
5192       /* Code to get the next section goes in L1_BB.  */
5193       si = gsi_last_bb (l1_bb);
5194       gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
5195 
5196       bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5197       stmt = gimple_build_call (bfn_decl, 0);
5198       gimple_call_set_lhs (stmt, vnext);
5199       gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5200       gsi_remove (&si, true);
5201 
5202       single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
5203     }
5204 
5205   /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB.  */
5206   si = gsi_last_bb (l2_bb);
5207   if (gimple_omp_return_nowait_p (gsi_stmt (si)))
5208     t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
5209   else
5210     t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
5211   stmt = gimple_build_call (t, 0);
5212   gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5213   gsi_remove (&si, true);
5214 
5215   set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
5216 }
5217 
5218 
5219 /* Expand code for an OpenMP single directive.  We've already expanded
5220    much of the code, here we simply place the GOMP_barrier call.  */
5221 
5222 static void
5223 expand_omp_single (struct omp_region *region)
5224 {
5225   basic_block entry_bb, exit_bb;
5226   gimple_stmt_iterator si;
5227   bool need_barrier = false;
5228 
5229   entry_bb = region->entry;
5230   exit_bb = region->exit;
5231 
5232   si = gsi_last_bb (entry_bb);
5233   /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
5234      be removed.  We need to ensure that the thread that entered the single
5235      does not exit before the data is copied out by the other threads.  */
5236   if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
5237 		       OMP_CLAUSE_COPYPRIVATE))
5238     need_barrier = true;
5239   gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
5240   gsi_remove (&si, true);
5241   single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5242 
5243   si = gsi_last_bb (exit_bb);
5244   if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
5245     force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
5246 			      false, GSI_SAME_STMT);
5247   gsi_remove (&si, true);
5248   single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
5249 }
5250 
5251 
5252 /* Generic expansion for OpenMP synchronization directives: master,
5253    ordered and critical.  All we need to do here is remove the entry
5254    and exit markers for REGION.  */
5255 
5256 static void
5257 expand_omp_synch (struct omp_region *region)
5258 {
5259   basic_block entry_bb, exit_bb;
5260   gimple_stmt_iterator si;
5261 
5262   entry_bb = region->entry;
5263   exit_bb = region->exit;
5264 
5265   si = gsi_last_bb (entry_bb);
5266   gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
5267 	      || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
5268 	      || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
5269 	      || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
5270   gsi_remove (&si, true);
5271   single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5272 
5273   if (exit_bb)
5274     {
5275       si = gsi_last_bb (exit_bb);
5276       gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5277       gsi_remove (&si, true);
5278       single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
5279     }
5280 }
5281 
5282 /* A subroutine of expand_omp_atomic.  Attempt to implement the atomic
5283    operation as a normal volatile load.  */
5284 
5285 static bool
5286 expand_omp_atomic_load (basic_block load_bb, tree addr,
5287 			tree loaded_val, int index)
5288 {
5289   enum built_in_function tmpbase;
5290   gimple_stmt_iterator gsi;
5291   basic_block store_bb;
5292   location_t loc;
5293   gimple stmt;
5294   tree decl, call, type, itype;
5295 
5296   gsi = gsi_last_bb (load_bb);
5297   stmt = gsi_stmt (gsi);
5298   gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5299   loc = gimple_location (stmt);
5300 
5301   /* ??? If the target does not implement atomic_load_optab[mode], and mode
5302      is smaller than word size, then expand_atomic_load assumes that the load
5303      is atomic.  We could avoid the builtin entirely in this case.  */
5304 
5305   tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
5306   decl = builtin_decl_explicit (tmpbase);
5307   if (decl == NULL_TREE)
5308     return false;
5309 
5310   type = TREE_TYPE (loaded_val);
5311   itype = TREE_TYPE (TREE_TYPE (decl));
5312 
5313   call = build_call_expr_loc (loc, decl, 2, addr,
5314 			      build_int_cst (NULL, MEMMODEL_RELAXED));
5315   if (!useless_type_conversion_p (type, itype))
5316     call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5317   call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5318 
5319   force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5320   gsi_remove (&gsi, true);
5321 
5322   store_bb = single_succ (load_bb);
5323   gsi = gsi_last_bb (store_bb);
5324   gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5325   gsi_remove (&gsi, true);
5326 
5327   if (gimple_in_ssa_p (cfun))
5328     update_ssa (TODO_update_ssa_no_phi);
5329 
5330   return true;
5331 }
5332 
5333 /* A subroutine of expand_omp_atomic.  Attempt to implement the atomic
5334    operation as a normal volatile store.  */
5335 
5336 static bool
5337 expand_omp_atomic_store (basic_block load_bb, tree addr,
5338 			 tree loaded_val, tree stored_val, int index)
5339 {
5340   enum built_in_function tmpbase;
5341   gimple_stmt_iterator gsi;
5342   basic_block store_bb = single_succ (load_bb);
5343   location_t loc;
5344   gimple stmt;
5345   tree decl, call, type, itype;
5346   enum machine_mode imode;
5347   bool exchange;
5348 
5349   gsi = gsi_last_bb (load_bb);
5350   stmt = gsi_stmt (gsi);
5351   gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5352 
5353   /* If the load value is needed, then this isn't a store but an exchange.  */
5354   exchange = gimple_omp_atomic_need_value_p (stmt);
5355 
5356   gsi = gsi_last_bb (store_bb);
5357   stmt = gsi_stmt (gsi);
5358   gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5359   loc = gimple_location (stmt);
5360 
5361   /* ??? If the target does not implement atomic_store_optab[mode], and mode
5362      is smaller than word size, then expand_atomic_store assumes that the store
5363      is atomic.  We could avoid the builtin entirely in this case.  */
5364 
5365   tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5366   tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5367   decl = builtin_decl_explicit (tmpbase);
5368   if (decl == NULL_TREE)
5369     return false;
5370 
5371   type = TREE_TYPE (stored_val);
5372 
5373   /* Dig out the type of the function's second argument.  */
5374   itype = TREE_TYPE (decl);
5375   itype = TYPE_ARG_TYPES (itype);
5376   itype = TREE_CHAIN (itype);
5377   itype = TREE_VALUE (itype);
5378   imode = TYPE_MODE (itype);
5379 
5380   if (exchange && !can_atomic_exchange_p (imode, true))
5381     return false;
5382 
5383   if (!useless_type_conversion_p (itype, type))
5384     stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5385   call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5386 			      build_int_cst (NULL, MEMMODEL_RELAXED));
5387   if (exchange)
5388     {
5389       if (!useless_type_conversion_p (type, itype))
5390 	call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5391       call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5392     }
5393 
5394   force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5395   gsi_remove (&gsi, true);
5396 
5397   /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above.  */
5398   gsi = gsi_last_bb (load_bb);
5399   gsi_remove (&gsi, true);
5400 
5401   if (gimple_in_ssa_p (cfun))
5402     update_ssa (TODO_update_ssa_no_phi);
5403 
5404   return true;
5405 }
5406 
5407 /* A subroutine of expand_omp_atomic.  Attempt to implement the atomic
5408    operation as a __atomic_fetch_op builtin.  INDEX is log2 of the
5409    size of the data type, and thus usable to find the index of the builtin
5410    decl.  Returns false if the expression is not of the proper form.  */
5411 
5412 static bool
5413 expand_omp_atomic_fetch_op (basic_block load_bb,
5414 			    tree addr, tree loaded_val,
5415 			    tree stored_val, int index)
5416 {
5417   enum built_in_function oldbase, newbase, tmpbase;
5418   tree decl, itype, call;
5419   tree lhs, rhs;
5420   basic_block store_bb = single_succ (load_bb);
5421   gimple_stmt_iterator gsi;
5422   gimple stmt;
5423   location_t loc;
5424   enum tree_code code;
5425   bool need_old, need_new;
5426   enum machine_mode imode;
5427 
5428   /* We expect to find the following sequences:
5429 
5430    load_bb:
5431        GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5432 
5433    store_bb:
5434        val = tmp OP something; (or: something OP tmp)
5435        GIMPLE_OMP_STORE (val)
5436 
5437   ???FIXME: Allow a more flexible sequence.
5438   Perhaps use data flow to pick the statements.
5439 
5440   */
5441 
5442   gsi = gsi_after_labels (store_bb);
5443   stmt = gsi_stmt (gsi);
5444   loc = gimple_location (stmt);
5445   if (!is_gimple_assign (stmt))
5446     return false;
5447   gsi_next (&gsi);
5448   if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5449     return false;
5450   need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5451   need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5452   gcc_checking_assert (!need_old || !need_new);
5453 
5454   if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5455     return false;
5456 
5457   /* Check for one of the supported fetch-op operations.  */
5458   code = gimple_assign_rhs_code (stmt);
5459   switch (code)
5460     {
5461     case PLUS_EXPR:
5462     case POINTER_PLUS_EXPR:
5463       oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5464       newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5465       break;
5466     case MINUS_EXPR:
5467       oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5468       newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5469       break;
5470     case BIT_AND_EXPR:
5471       oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5472       newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5473       break;
5474     case BIT_IOR_EXPR:
5475       oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5476       newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5477       break;
5478     case BIT_XOR_EXPR:
5479       oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5480       newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5481       break;
5482     default:
5483       return false;
5484     }
5485 
5486   /* Make sure the expression is of the proper form.  */
5487   if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5488     rhs = gimple_assign_rhs2 (stmt);
5489   else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5490 	   && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5491     rhs = gimple_assign_rhs1 (stmt);
5492   else
5493     return false;
5494 
5495   tmpbase = ((enum built_in_function)
5496 	     ((need_new ? newbase : oldbase) + index + 1));
5497   decl = builtin_decl_explicit (tmpbase);
5498   if (decl == NULL_TREE)
5499     return false;
5500   itype = TREE_TYPE (TREE_TYPE (decl));
5501   imode = TYPE_MODE (itype);
5502 
5503   /* We could test all of the various optabs involved, but the fact of the
5504      matter is that (with the exception of i486 vs i586 and xadd) all targets
5505      that support any atomic operaton optab also implements compare-and-swap.
5506      Let optabs.c take care of expanding any compare-and-swap loop.  */
5507   if (!can_compare_and_swap_p (imode, true))
5508     return false;
5509 
5510   gsi = gsi_last_bb (load_bb);
5511   gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5512 
5513   /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5514      It only requires that the operation happen atomically.  Thus we can
5515      use the RELAXED memory model.  */
5516   call = build_call_expr_loc (loc, decl, 3, addr,
5517 			      fold_convert_loc (loc, itype, rhs),
5518 			      build_int_cst (NULL, MEMMODEL_RELAXED));
5519 
5520   if (need_old || need_new)
5521     {
5522       lhs = need_old ? loaded_val : stored_val;
5523       call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5524       call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5525     }
5526   else
5527     call = fold_convert_loc (loc, void_type_node, call);
5528   force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5529   gsi_remove (&gsi, true);
5530 
5531   gsi = gsi_last_bb (store_bb);
5532   gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5533   gsi_remove (&gsi, true);
5534   gsi = gsi_last_bb (store_bb);
5535   gsi_remove (&gsi, true);
5536 
5537   if (gimple_in_ssa_p (cfun))
5538     update_ssa (TODO_update_ssa_no_phi);
5539 
5540   return true;
5541 }
5542 
5543 /* A subroutine of expand_omp_atomic.  Implement the atomic operation as:
5544 
5545       oldval = *addr;
5546       repeat:
5547         newval = rhs;	 // with oldval replacing *addr in rhs
5548 	oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5549 	if (oldval != newval)
5550 	  goto repeat;
5551 
5552    INDEX is log2 of the size of the data type, and thus usable to find the
5553    index of the builtin decl.  */
5554 
5555 static bool
5556 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5557 			    tree addr, tree loaded_val, tree stored_val,
5558 			    int index)
5559 {
5560   tree loadedi, storedi, initial, new_storedi, old_vali;
5561   tree type, itype, cmpxchg, iaddr;
5562   gimple_stmt_iterator si;
5563   basic_block loop_header = single_succ (load_bb);
5564   gimple phi, stmt;
5565   edge e;
5566   enum built_in_function fncode;
5567 
5568   /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5569      order to use the RELAXED memory model effectively.  */
5570   fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5571 				    + index + 1);
5572   cmpxchg = builtin_decl_explicit (fncode);
5573   if (cmpxchg == NULL_TREE)
5574     return false;
5575   type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5576   itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5577 
5578   if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5579     return false;
5580 
5581   /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD.  */
5582   si = gsi_last_bb (load_bb);
5583   gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5584 
5585   /* For floating-point values, we'll need to view-convert them to integers
5586      so that we can perform the atomic compare and swap.  Simplify the
5587      following code by always setting up the "i"ntegral variables.  */
5588   if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5589     {
5590       tree iaddr_val;
5591 
5592       iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
5593 							   true), NULL);
5594       iaddr_val
5595 	= force_gimple_operand_gsi (&si,
5596 				    fold_convert (TREE_TYPE (iaddr), addr),
5597 				    false, NULL_TREE, true, GSI_SAME_STMT);
5598       stmt = gimple_build_assign (iaddr, iaddr_val);
5599       gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5600       loadedi = create_tmp_var (itype, NULL);
5601       if (gimple_in_ssa_p (cfun))
5602 	loadedi = make_ssa_name (loadedi, NULL);
5603     }
5604   else
5605     {
5606       iaddr = addr;
5607       loadedi = loaded_val;
5608     }
5609 
5610   initial
5611     = force_gimple_operand_gsi (&si,
5612 				build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5613 					iaddr,
5614 					build_int_cst (TREE_TYPE (iaddr), 0)),
5615 				true, NULL_TREE, true, GSI_SAME_STMT);
5616 
5617   /* Move the value to the LOADEDI temporary.  */
5618   if (gimple_in_ssa_p (cfun))
5619     {
5620       gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5621       phi = create_phi_node (loadedi, loop_header);
5622       SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5623 	       initial);
5624     }
5625   else
5626     gsi_insert_before (&si,
5627 		       gimple_build_assign (loadedi, initial),
5628 		       GSI_SAME_STMT);
5629   if (loadedi != loaded_val)
5630     {
5631       gimple_stmt_iterator gsi2;
5632       tree x;
5633 
5634       x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5635       gsi2 = gsi_start_bb (loop_header);
5636       if (gimple_in_ssa_p (cfun))
5637 	{
5638 	  gimple stmt;
5639 	  x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5640 					true, GSI_SAME_STMT);
5641 	  stmt = gimple_build_assign (loaded_val, x);
5642 	  gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5643 	}
5644       else
5645 	{
5646 	  x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5647 	  force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5648 				    true, GSI_SAME_STMT);
5649 	}
5650     }
5651   gsi_remove (&si, true);
5652 
5653   si = gsi_last_bb (store_bb);
5654   gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5655 
5656   if (iaddr == addr)
5657     storedi = stored_val;
5658   else
5659     storedi =
5660       force_gimple_operand_gsi (&si,
5661 				build1 (VIEW_CONVERT_EXPR, itype,
5662 					stored_val), true, NULL_TREE, true,
5663 				GSI_SAME_STMT);
5664 
5665   /* Build the compare&swap statement.  */
5666   new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5667   new_storedi = force_gimple_operand_gsi (&si,
5668 					  fold_convert (TREE_TYPE (loadedi),
5669 							new_storedi),
5670 					  true, NULL_TREE,
5671 					  true, GSI_SAME_STMT);
5672 
5673   if (gimple_in_ssa_p (cfun))
5674     old_vali = loadedi;
5675   else
5676     {
5677       old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5678       stmt = gimple_build_assign (old_vali, loadedi);
5679       gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5680 
5681       stmt = gimple_build_assign (loadedi, new_storedi);
5682       gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5683     }
5684 
5685   /* Note that we always perform the comparison as an integer, even for
5686      floating point.  This allows the atomic operation to properly
5687      succeed even with NaNs and -0.0.  */
5688   stmt = gimple_build_cond_empty
5689            (build2 (NE_EXPR, boolean_type_node,
5690 		    new_storedi, old_vali));
5691   gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5692 
5693   /* Update cfg.  */
5694   e = single_succ_edge (store_bb);
5695   e->flags &= ~EDGE_FALLTHRU;
5696   e->flags |= EDGE_FALSE_VALUE;
5697 
5698   e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5699 
5700   /* Copy the new value to loadedi (we already did that before the condition
5701      if we are not in SSA).  */
5702   if (gimple_in_ssa_p (cfun))
5703     {
5704       phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5705       SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5706     }
5707 
5708   /* Remove GIMPLE_OMP_ATOMIC_STORE.  */
5709   gsi_remove (&si, true);
5710 
5711   if (gimple_in_ssa_p (cfun))
5712     update_ssa (TODO_update_ssa_no_phi);
5713 
5714   return true;
5715 }
5716 
5717 /* A subroutine of expand_omp_atomic.  Implement the atomic operation as:
5718 
5719 		 		  GOMP_atomic_start ();
5720 		 		  *addr = rhs;
5721 		 		  GOMP_atomic_end ();
5722 
5723    The result is not globally atomic, but works so long as all parallel
5724    references are within #pragma omp atomic directives.  According to
5725    responses received from omp@openmp.org, appears to be within spec.
5726    Which makes sense, since that's how several other compilers handle
5727    this situation as well.
5728    LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5729    expanding.  STORED_VAL is the operand of the matching
5730    GIMPLE_OMP_ATOMIC_STORE.
5731 
5732    We replace
5733    GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5734    loaded_val = *addr;
5735 
5736    and replace
5737    GIMPLE_OMP_ATOMIC_STORE (stored_val)  with
5738    *addr = stored_val;
5739 */
5740 
5741 static bool
5742 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5743 			 tree addr, tree loaded_val, tree stored_val)
5744 {
5745   gimple_stmt_iterator si;
5746   gimple stmt;
5747   tree t;
5748 
5749   si = gsi_last_bb (load_bb);
5750   gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5751 
5752   t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5753   t = build_call_expr (t, 0);
5754   force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5755 
5756   stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5757   gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5758   gsi_remove (&si, true);
5759 
5760   si = gsi_last_bb (store_bb);
5761   gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5762 
5763   stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5764 			      stored_val);
5765   gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5766 
5767   t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5768   t = build_call_expr (t, 0);
5769   force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5770   gsi_remove (&si, true);
5771 
5772   if (gimple_in_ssa_p (cfun))
5773     update_ssa (TODO_update_ssa_no_phi);
5774   return true;
5775 }
5776 
5777 /* Expand an GIMPLE_OMP_ATOMIC statement.  We try to expand
5778    using expand_omp_atomic_fetch_op. If it failed, we try to
5779    call expand_omp_atomic_pipeline, and if it fails too, the
5780    ultimate fallback is wrapping the operation in a mutex
5781    (expand_omp_atomic_mutex).  REGION is the atomic region built
5782    by build_omp_regions_1().  */
5783 
5784 static void
5785 expand_omp_atomic (struct omp_region *region)
5786 {
5787   basic_block load_bb = region->entry, store_bb = region->exit;
5788   gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5789   tree loaded_val = gimple_omp_atomic_load_lhs (load);
5790   tree addr = gimple_omp_atomic_load_rhs (load);
5791   tree stored_val = gimple_omp_atomic_store_val (store);
5792   tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5793   HOST_WIDE_INT index;
5794 
5795   /* Make sure the type is one of the supported sizes.  */
5796   index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5797   index = exact_log2 (index);
5798   if (index >= 0 && index <= 4)
5799     {
5800       unsigned int align = TYPE_ALIGN_UNIT (type);
5801 
5802       /* __sync builtins require strict data alignment.  */
5803       if (exact_log2 (align) >= index)
5804 	{
5805 	  /* Atomic load.  */
5806 	  if (loaded_val == stored_val
5807 	      && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5808 		  || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5809 	      && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5810 	      && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5811 	    return;
5812 
5813 	  /* Atomic store.  */
5814 	  if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5815 	       || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5816 	      && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5817 	      && store_bb == single_succ (load_bb)
5818 	      && first_stmt (store_bb) == store
5819 	      && expand_omp_atomic_store (load_bb, addr, loaded_val,
5820 					  stored_val, index))
5821 	    return;
5822 
5823 	  /* When possible, use specialized atomic update functions.  */
5824 	  if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5825 	      && store_bb == single_succ (load_bb)
5826 	      && expand_omp_atomic_fetch_op (load_bb, addr,
5827 					     loaded_val, stored_val, index))
5828 	    return;
5829 
5830 	  /* If we don't have specialized __sync builtins, try and implement
5831 	     as a compare and swap loop.  */
5832 	  if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5833 					  loaded_val, stored_val, index))
5834 	    return;
5835 	}
5836     }
5837 
5838   /* The ultimate fallback is wrapping the operation in a mutex.  */
5839   expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5840 }
5841 
5842 
5843 /* Expand the parallel region tree rooted at REGION.  Expansion
5844    proceeds in depth-first order.  Innermost regions are expanded
5845    first.  This way, parallel regions that require a new function to
5846    be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5847    internal dependencies in their body.  */
5848 
5849 static void
5850 expand_omp (struct omp_region *region)
5851 {
5852   while (region)
5853     {
5854       location_t saved_location;
5855 
5856       /* First, determine whether this is a combined parallel+workshare
5857        	 region.  */
5858       if (region->type == GIMPLE_OMP_PARALLEL)
5859 	determine_parallel_type (region);
5860 
5861       if (region->inner)
5862 	expand_omp (region->inner);
5863 
5864       saved_location = input_location;
5865       if (gimple_has_location (last_stmt (region->entry)))
5866 	input_location = gimple_location (last_stmt (region->entry));
5867 
5868       switch (region->type)
5869 	{
5870 	case GIMPLE_OMP_PARALLEL:
5871 	case GIMPLE_OMP_TASK:
5872 	  expand_omp_taskreg (region);
5873 	  break;
5874 
5875 	case GIMPLE_OMP_FOR:
5876 	  expand_omp_for (region);
5877 	  break;
5878 
5879 	case GIMPLE_OMP_SECTIONS:
5880 	  expand_omp_sections (region);
5881 	  break;
5882 
5883 	case GIMPLE_OMP_SECTION:
5884 	  /* Individual omp sections are handled together with their
5885 	     parent GIMPLE_OMP_SECTIONS region.  */
5886 	  break;
5887 
5888 	case GIMPLE_OMP_SINGLE:
5889 	  expand_omp_single (region);
5890 	  break;
5891 
5892 	case GIMPLE_OMP_MASTER:
5893 	case GIMPLE_OMP_ORDERED:
5894 	case GIMPLE_OMP_CRITICAL:
5895 	  expand_omp_synch (region);
5896 	  break;
5897 
5898 	case GIMPLE_OMP_ATOMIC_LOAD:
5899 	  expand_omp_atomic (region);
5900 	  break;
5901 
5902 	default:
5903 	  gcc_unreachable ();
5904 	}
5905 
5906       input_location = saved_location;
5907       region = region->next;
5908     }
5909 }
5910 
5911 
5912 /* Helper for build_omp_regions.  Scan the dominator tree starting at
5913    block BB.  PARENT is the region that contains BB.  If SINGLE_TREE is
5914    true, the function ends once a single tree is built (otherwise, whole
5915    forest of OMP constructs may be built).  */
5916 
5917 static void
5918 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5919 		     bool single_tree)
5920 {
5921   gimple_stmt_iterator gsi;
5922   gimple stmt;
5923   basic_block son;
5924 
5925   gsi = gsi_last_bb (bb);
5926   if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5927     {
5928       struct omp_region *region;
5929       enum gimple_code code;
5930 
5931       stmt = gsi_stmt (gsi);
5932       code = gimple_code (stmt);
5933       if (code == GIMPLE_OMP_RETURN)
5934 	{
5935 	  /* STMT is the return point out of region PARENT.  Mark it
5936 	     as the exit point and make PARENT the immediately
5937 	     enclosing region.  */
5938 	  gcc_assert (parent);
5939 	  region = parent;
5940 	  region->exit = bb;
5941 	  parent = parent->outer;
5942 	}
5943       else if (code == GIMPLE_OMP_ATOMIC_STORE)
5944 	{
5945 	  /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5946 	     GIMPLE_OMP_RETURN, but matches with
5947 	     GIMPLE_OMP_ATOMIC_LOAD.  */
5948 	  gcc_assert (parent);
5949 	  gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5950 	  region = parent;
5951 	  region->exit = bb;
5952 	  parent = parent->outer;
5953 	}
5954 
5955       else if (code == GIMPLE_OMP_CONTINUE)
5956 	{
5957 	  gcc_assert (parent);
5958 	  parent->cont = bb;
5959 	}
5960       else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5961 	{
5962 	  /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5963 	     GIMPLE_OMP_SECTIONS, and we do nothing for it.  */
5964 	  ;
5965 	}
5966       else
5967 	{
5968 	  /* Otherwise, this directive becomes the parent for a new
5969 	     region.  */
5970 	  region = new_omp_region (bb, code, parent);
5971 	  parent = region;
5972 	}
5973     }
5974 
5975   if (single_tree && !parent)
5976     return;
5977 
5978   for (son = first_dom_son (CDI_DOMINATORS, bb);
5979        son;
5980        son = next_dom_son (CDI_DOMINATORS, son))
5981     build_omp_regions_1 (son, parent, single_tree);
5982 }
5983 
5984 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5985    root_omp_region.  */
5986 
5987 static void
5988 build_omp_regions_root (basic_block root)
5989 {
5990   gcc_assert (root_omp_region == NULL);
5991   build_omp_regions_1 (root, NULL, true);
5992   gcc_assert (root_omp_region != NULL);
5993 }
5994 
5995 /* Expands omp construct (and its subconstructs) starting in HEAD.  */
5996 
5997 void
5998 omp_expand_local (basic_block head)
5999 {
6000   build_omp_regions_root (head);
6001   if (dump_file && (dump_flags & TDF_DETAILS))
6002     {
6003       fprintf (dump_file, "\nOMP region tree\n\n");
6004       dump_omp_region (dump_file, root_omp_region, 0);
6005       fprintf (dump_file, "\n");
6006     }
6007 
6008   remove_exit_barriers (root_omp_region);
6009   expand_omp (root_omp_region);
6010 
6011   free_omp_regions ();
6012 }
6013 
6014 /* Scan the CFG and build a tree of OMP regions.  Return the root of
6015    the OMP region tree.  */
6016 
6017 static void
6018 build_omp_regions (void)
6019 {
6020   gcc_assert (root_omp_region == NULL);
6021   calculate_dominance_info (CDI_DOMINATORS);
6022   build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
6023 }
6024 
6025 /* Main entry point for expanding OMP-GIMPLE into runtime calls.  */
6026 
6027 static unsigned int
6028 execute_expand_omp (void)
6029 {
6030   build_omp_regions ();
6031 
6032   if (!root_omp_region)
6033     return 0;
6034 
6035   if (dump_file)
6036     {
6037       fprintf (dump_file, "\nOMP region tree\n\n");
6038       dump_omp_region (dump_file, root_omp_region, 0);
6039       fprintf (dump_file, "\n");
6040     }
6041 
6042   remove_exit_barriers (root_omp_region);
6043 
6044   expand_omp (root_omp_region);
6045 
6046   cleanup_tree_cfg ();
6047 
6048   free_omp_regions ();
6049 
6050   return 0;
6051 }
6052 
6053 /* OMP expansion -- the default pass, run before creation of SSA form.  */
6054 
6055 static bool
6056 gate_expand_omp (void)
6057 {
6058   return (flag_openmp != 0 && !seen_error ());
6059 }
6060 
6061 struct gimple_opt_pass pass_expand_omp =
6062 {
6063  {
6064   GIMPLE_PASS,
6065   "ompexp",				/* name */
6066   OPTGROUP_NONE,                        /* optinfo_flags */
6067   gate_expand_omp,			/* gate */
6068   execute_expand_omp,			/* execute */
6069   NULL,					/* sub */
6070   NULL,					/* next */
6071   0,					/* static_pass_number */
6072   TV_NONE,				/* tv_id */
6073   PROP_gimple_any,			/* properties_required */
6074   0,					/* properties_provided */
6075   0,					/* properties_destroyed */
6076   0,					/* todo_flags_start */
6077   0                      		/* todo_flags_finish */
6078  }
6079 };
6080 
6081 /* Routines to lower OpenMP directives into OMP-GIMPLE.  */
6082 
6083 /* Lower the OpenMP sections directive in the current statement in GSI_P.
6084    CTX is the enclosing OMP context for the current statement.  */
6085 
6086 static void
6087 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6088 {
6089   tree block, control;
6090   gimple_stmt_iterator tgsi;
6091   gimple stmt, new_stmt, bind, t;
6092   gimple_seq ilist, dlist, olist, new_body;
6093   struct gimplify_ctx gctx;
6094 
6095   stmt = gsi_stmt (*gsi_p);
6096 
6097   push_gimplify_context (&gctx);
6098 
6099   dlist = NULL;
6100   ilist = NULL;
6101   lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
6102       			   &ilist, &dlist, ctx);
6103 
6104   new_body = gimple_omp_body (stmt);
6105   gimple_omp_set_body (stmt, NULL);
6106   tgsi = gsi_start (new_body);
6107   for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
6108     {
6109       omp_context *sctx;
6110       gimple sec_start;
6111 
6112       sec_start = gsi_stmt (tgsi);
6113       sctx = maybe_lookup_ctx (sec_start);
6114       gcc_assert (sctx);
6115 
6116       lower_omp (gimple_omp_body_ptr (sec_start), sctx);
6117       gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
6118 			    GSI_CONTINUE_LINKING);
6119       gimple_omp_set_body (sec_start, NULL);
6120 
6121       if (gsi_one_before_end_p (tgsi))
6122 	{
6123 	  gimple_seq l = NULL;
6124 	  lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
6125 				     &l, ctx);
6126 	  gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
6127 	  gimple_omp_section_set_last (sec_start);
6128 	}
6129 
6130       gsi_insert_after (&tgsi, gimple_build_omp_return (false),
6131 			GSI_CONTINUE_LINKING);
6132     }
6133 
6134   block = make_node (BLOCK);
6135   bind = gimple_build_bind (NULL, new_body, block);
6136 
6137   olist = NULL;
6138   lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
6139 
6140   block = make_node (BLOCK);
6141   new_stmt = gimple_build_bind (NULL, NULL, block);
6142   gsi_replace (gsi_p, new_stmt, true);
6143 
6144   pop_gimplify_context (new_stmt);
6145   gimple_bind_append_vars (new_stmt, ctx->block_vars);
6146   BLOCK_VARS (block) = gimple_bind_vars (bind);
6147   if (BLOCK_VARS (block))
6148     TREE_USED (block) = 1;
6149 
6150   new_body = NULL;
6151   gimple_seq_add_seq (&new_body, ilist);
6152   gimple_seq_add_stmt (&new_body, stmt);
6153   gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
6154   gimple_seq_add_stmt (&new_body, bind);
6155 
6156   control = create_tmp_var (unsigned_type_node, ".section");
6157   t = gimple_build_omp_continue (control, control);
6158   gimple_omp_sections_set_control (stmt, control);
6159   gimple_seq_add_stmt (&new_body, t);
6160 
6161   gimple_seq_add_seq (&new_body, olist);
6162   gimple_seq_add_seq (&new_body, dlist);
6163 
6164   new_body = maybe_catch_exception (new_body);
6165 
6166   t = gimple_build_omp_return
6167         (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
6168 			    OMP_CLAUSE_NOWAIT));
6169   gimple_seq_add_stmt (&new_body, t);
6170 
6171   gimple_bind_set_body (new_stmt, new_body);
6172 }
6173 
6174 
6175 /* A subroutine of lower_omp_single.  Expand the simple form of
6176    a GIMPLE_OMP_SINGLE, without a copyprivate clause:
6177 
6178      	if (GOMP_single_start ())
6179 	  BODY;
6180 	[ GOMP_barrier (); ]	-> unless 'nowait' is present.
6181 
6182   FIXME.  It may be better to delay expanding the logic of this until
6183   pass_expand_omp.  The expanded logic may make the job more difficult
6184   to a synchronization analysis pass.  */
6185 
6186 static void
6187 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
6188 {
6189   location_t loc = gimple_location (single_stmt);
6190   tree tlabel = create_artificial_label (loc);
6191   tree flabel = create_artificial_label (loc);
6192   gimple call, cond;
6193   tree lhs, decl;
6194 
6195   decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
6196   lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
6197   call = gimple_build_call (decl, 0);
6198   gimple_call_set_lhs (call, lhs);
6199   gimple_seq_add_stmt (pre_p, call);
6200 
6201   cond = gimple_build_cond (EQ_EXPR, lhs,
6202 			    fold_convert_loc (loc, TREE_TYPE (lhs),
6203 					      boolean_true_node),
6204 			    tlabel, flabel);
6205   gimple_seq_add_stmt (pre_p, cond);
6206   gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
6207   gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
6208   gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
6209 }
6210 
6211 
6212 /* A subroutine of lower_omp_single.  Expand the simple form of
6213    a GIMPLE_OMP_SINGLE, with a copyprivate clause:
6214 
6215 	#pragma omp single copyprivate (a, b, c)
6216 
6217    Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
6218 
6219       {
6220 	if ((copyout_p = GOMP_single_copy_start ()) == NULL)
6221 	  {
6222 	    BODY;
6223 	    copyout.a = a;
6224 	    copyout.b = b;
6225 	    copyout.c = c;
6226 	    GOMP_single_copy_end (&copyout);
6227 	  }
6228 	else
6229 	  {
6230 	    a = copyout_p->a;
6231 	    b = copyout_p->b;
6232 	    c = copyout_p->c;
6233 	  }
6234 	GOMP_barrier ();
6235       }
6236 
6237   FIXME.  It may be better to delay expanding the logic of this until
6238   pass_expand_omp.  The expanded logic may make the job more difficult
6239   to a synchronization analysis pass.  */
6240 
6241 static void
6242 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
6243 {
6244   tree ptr_type, t, l0, l1, l2, bfn_decl;
6245   gimple_seq copyin_seq;
6246   location_t loc = gimple_location (single_stmt);
6247 
6248   ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
6249 
6250   ptr_type = build_pointer_type (ctx->record_type);
6251   ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
6252 
6253   l0 = create_artificial_label (loc);
6254   l1 = create_artificial_label (loc);
6255   l2 = create_artificial_label (loc);
6256 
6257   bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
6258   t = build_call_expr_loc (loc, bfn_decl, 0);
6259   t = fold_convert_loc (loc, ptr_type, t);
6260   gimplify_assign (ctx->receiver_decl, t, pre_p);
6261 
6262   t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
6263 	      build_int_cst (ptr_type, 0));
6264   t = build3 (COND_EXPR, void_type_node, t,
6265 	      build_and_jump (&l0), build_and_jump (&l1));
6266   gimplify_and_add (t, pre_p);
6267 
6268   gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
6269 
6270   gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
6271 
6272   copyin_seq = NULL;
6273   lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
6274 			      &copyin_seq, ctx);
6275 
6276   t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6277   bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
6278   t = build_call_expr_loc (loc, bfn_decl, 1, t);
6279   gimplify_and_add (t, pre_p);
6280 
6281   t = build_and_jump (&l2);
6282   gimplify_and_add (t, pre_p);
6283 
6284   gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
6285 
6286   gimple_seq_add_seq (pre_p, copyin_seq);
6287 
6288   gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
6289 }
6290 
6291 
6292 /* Expand code for an OpenMP single directive.  */
6293 
6294 static void
6295 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6296 {
6297   tree block;
6298   gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
6299   gimple_seq bind_body, dlist;
6300   struct gimplify_ctx gctx;
6301 
6302   push_gimplify_context (&gctx);
6303 
6304   block = make_node (BLOCK);
6305   bind = gimple_build_bind (NULL, NULL, block);
6306   gsi_replace (gsi_p, bind, true);
6307   bind_body = NULL;
6308   dlist = NULL;
6309   lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6310 			   &bind_body, &dlist, ctx);
6311   lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
6312 
6313   gimple_seq_add_stmt (&bind_body, single_stmt);
6314 
6315   if (ctx->record_type)
6316     lower_omp_single_copy (single_stmt, &bind_body, ctx);
6317   else
6318     lower_omp_single_simple (single_stmt, &bind_body);
6319 
6320   gimple_omp_set_body (single_stmt, NULL);
6321 
6322   gimple_seq_add_seq (&bind_body, dlist);
6323 
6324   bind_body = maybe_catch_exception (bind_body);
6325 
6326   t = gimple_build_omp_return
6327         (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6328 			    OMP_CLAUSE_NOWAIT));
6329   gimple_seq_add_stmt (&bind_body, t);
6330   gimple_bind_set_body (bind, bind_body);
6331 
6332   pop_gimplify_context (bind);
6333 
6334   gimple_bind_append_vars (bind, ctx->block_vars);
6335   BLOCK_VARS (block) = ctx->block_vars;
6336   if (BLOCK_VARS (block))
6337     TREE_USED (block) = 1;
6338 }
6339 
6340 
6341 /* Expand code for an OpenMP master directive.  */
6342 
6343 static void
6344 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6345 {
6346   tree block, lab = NULL, x, bfn_decl;
6347   gimple stmt = gsi_stmt (*gsi_p), bind;
6348   location_t loc = gimple_location (stmt);
6349   gimple_seq tseq;
6350   struct gimplify_ctx gctx;
6351 
6352   push_gimplify_context (&gctx);
6353 
6354   block = make_node (BLOCK);
6355   bind = gimple_build_bind (NULL, NULL, block);
6356   gsi_replace (gsi_p, bind, true);
6357   gimple_bind_add_stmt (bind, stmt);
6358 
6359   bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6360   x = build_call_expr_loc (loc, bfn_decl, 0);
6361   x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6362   x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6363   tseq = NULL;
6364   gimplify_and_add (x, &tseq);
6365   gimple_bind_add_seq (bind, tseq);
6366 
6367   lower_omp (gimple_omp_body_ptr (stmt), ctx);
6368   gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6369   gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6370   gimple_omp_set_body (stmt, NULL);
6371 
6372   gimple_bind_add_stmt (bind, gimple_build_label (lab));
6373 
6374   gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6375 
6376   pop_gimplify_context (bind);
6377 
6378   gimple_bind_append_vars (bind, ctx->block_vars);
6379   BLOCK_VARS (block) = ctx->block_vars;
6380 }
6381 
6382 
6383 /* Expand code for an OpenMP ordered directive.  */
6384 
6385 static void
6386 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6387 {
6388   tree block;
6389   gimple stmt = gsi_stmt (*gsi_p), bind, x;
6390   struct gimplify_ctx gctx;
6391 
6392   push_gimplify_context (&gctx);
6393 
6394   block = make_node (BLOCK);
6395   bind = gimple_build_bind (NULL, NULL, block);
6396   gsi_replace (gsi_p, bind, true);
6397   gimple_bind_add_stmt (bind, stmt);
6398 
6399   x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6400 			 0);
6401   gimple_bind_add_stmt (bind, x);
6402 
6403   lower_omp (gimple_omp_body_ptr (stmt), ctx);
6404   gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6405   gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6406   gimple_omp_set_body (stmt, NULL);
6407 
6408   x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6409   gimple_bind_add_stmt (bind, x);
6410 
6411   gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6412 
6413   pop_gimplify_context (bind);
6414 
6415   gimple_bind_append_vars (bind, ctx->block_vars);
6416   BLOCK_VARS (block) = gimple_bind_vars (bind);
6417 }
6418 
6419 
6420 /* Gimplify a GIMPLE_OMP_CRITICAL statement.  This is a relatively simple
6421    substitution of a couple of function calls.  But in the NAMED case,
6422    requires that languages coordinate a symbol name.  It is therefore
6423    best put here in common code.  */
6424 
6425 static GTY((param1_is (tree), param2_is (tree)))
6426   splay_tree critical_name_mutexes;
6427 
6428 static void
6429 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6430 {
6431   tree block;
6432   tree name, lock, unlock;
6433   gimple stmt = gsi_stmt (*gsi_p), bind;
6434   location_t loc = gimple_location (stmt);
6435   gimple_seq tbody;
6436   struct gimplify_ctx gctx;
6437 
6438   name = gimple_omp_critical_name (stmt);
6439   if (name)
6440     {
6441       tree decl;
6442       splay_tree_node n;
6443 
6444       if (!critical_name_mutexes)
6445 	critical_name_mutexes
6446 	  = splay_tree_new_ggc (splay_tree_compare_pointers,
6447 				ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6448 				ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6449 
6450       n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6451       if (n == NULL)
6452 	{
6453 	  char *new_str;
6454 
6455 	  decl = create_tmp_var_raw (ptr_type_node, NULL);
6456 
6457 	  new_str = ACONCAT ((".gomp_critical_user_",
6458 			      IDENTIFIER_POINTER (name), NULL));
6459 	  DECL_NAME (decl) = get_identifier (new_str);
6460 	  TREE_PUBLIC (decl) = 1;
6461 	  TREE_STATIC (decl) = 1;
6462 	  DECL_COMMON (decl) = 1;
6463 	  DECL_ARTIFICIAL (decl) = 1;
6464 	  DECL_IGNORED_P (decl) = 1;
6465 	  varpool_finalize_decl (decl);
6466 
6467 	  splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6468 			     (splay_tree_value) decl);
6469 	}
6470       else
6471 	decl = (tree) n->value;
6472 
6473       lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6474       lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6475 
6476       unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6477       unlock = build_call_expr_loc (loc, unlock, 1,
6478 				build_fold_addr_expr_loc (loc, decl));
6479     }
6480   else
6481     {
6482       lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6483       lock = build_call_expr_loc (loc, lock, 0);
6484 
6485       unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6486       unlock = build_call_expr_loc (loc, unlock, 0);
6487     }
6488 
6489   push_gimplify_context (&gctx);
6490 
6491   block = make_node (BLOCK);
6492   bind = gimple_build_bind (NULL, NULL, block);
6493   gsi_replace (gsi_p, bind, true);
6494   gimple_bind_add_stmt (bind, stmt);
6495 
6496   tbody = gimple_bind_body (bind);
6497   gimplify_and_add (lock, &tbody);
6498   gimple_bind_set_body (bind, tbody);
6499 
6500   lower_omp (gimple_omp_body_ptr (stmt), ctx);
6501   gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6502   gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6503   gimple_omp_set_body (stmt, NULL);
6504 
6505   tbody = gimple_bind_body (bind);
6506   gimplify_and_add (unlock, &tbody);
6507   gimple_bind_set_body (bind, tbody);
6508 
6509   gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6510 
6511   pop_gimplify_context (bind);
6512   gimple_bind_append_vars (bind, ctx->block_vars);
6513   BLOCK_VARS (block) = gimple_bind_vars (bind);
6514 }
6515 
6516 
6517 /* A subroutine of lower_omp_for.  Generate code to emit the predicate
6518    for a lastprivate clause.  Given a loop control predicate of (V
6519    cond N2), we gate the clause on (!(V cond N2)).  The lowered form
6520    is appended to *DLIST, iterator initialization is appended to
6521    *BODY_P.  */
6522 
6523 static void
6524 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6525 			   gimple_seq *dlist, struct omp_context *ctx)
6526 {
6527   tree clauses, cond, vinit;
6528   enum tree_code cond_code;
6529   gimple_seq stmts;
6530 
6531   cond_code = fd->loop.cond_code;
6532   cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6533 
6534   /* When possible, use a strict equality expression.  This can let VRP
6535      type optimizations deduce the value and remove a copy.  */
6536   if (host_integerp (fd->loop.step, 0))
6537     {
6538       HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6539       if (step == 1 || step == -1)
6540 	cond_code = EQ_EXPR;
6541     }
6542 
6543   cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6544 
6545   clauses = gimple_omp_for_clauses (fd->for_stmt);
6546   stmts = NULL;
6547   lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6548   if (!gimple_seq_empty_p (stmts))
6549     {
6550       gimple_seq_add_seq (&stmts, *dlist);
6551       *dlist = stmts;
6552 
6553       /* Optimize: v = 0; is usually cheaper than v = some_other_constant.  */
6554       vinit = fd->loop.n1;
6555       if (cond_code == EQ_EXPR
6556 	  && host_integerp (fd->loop.n2, 0)
6557 	  && ! integer_zerop (fd->loop.n2))
6558 	vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6559 
6560       /* Initialize the iterator variable, so that threads that don't execute
6561 	 any iterations don't execute the lastprivate clauses by accident.  */
6562       gimplify_assign (fd->loop.v, vinit, body_p);
6563     }
6564 }
6565 
6566 
6567 /* Lower code for an OpenMP loop directive.  */
6568 
6569 static void
6570 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6571 {
6572   tree *rhs_p, block;
6573   struct omp_for_data fd;
6574   gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6575   gimple_seq omp_for_body, body, dlist;
6576   size_t i;
6577   struct gimplify_ctx gctx;
6578 
6579   push_gimplify_context (&gctx);
6580 
6581   lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
6582   lower_omp (gimple_omp_body_ptr (stmt), ctx);
6583 
6584   block = make_node (BLOCK);
6585   new_stmt = gimple_build_bind (NULL, NULL, block);
6586   /* Replace at gsi right away, so that 'stmt' is no member
6587      of a sequence anymore as we're going to add to to a different
6588      one below.  */
6589   gsi_replace (gsi_p, new_stmt, true);
6590 
6591   /* Move declaration of temporaries in the loop body before we make
6592      it go away.  */
6593   omp_for_body = gimple_omp_body (stmt);
6594   if (!gimple_seq_empty_p (omp_for_body)
6595       && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6596     {
6597       tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6598       gimple_bind_append_vars (new_stmt, vars);
6599     }
6600 
6601   /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR.  */
6602   dlist = NULL;
6603   body = NULL;
6604   lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6605   gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6606 
6607   /* Lower the header expressions.  At this point, we can assume that
6608      the header is of the form:
6609 
6610      	#pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6611 
6612      We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6613      using the .omp_data_s mapping, if needed.  */
6614   for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6615     {
6616       rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6617       if (!is_gimple_min_invariant (*rhs_p))
6618 	*rhs_p = get_formal_tmp_var (*rhs_p, &body);
6619 
6620       rhs_p = gimple_omp_for_final_ptr (stmt, i);
6621       if (!is_gimple_min_invariant (*rhs_p))
6622 	*rhs_p = get_formal_tmp_var (*rhs_p, &body);
6623 
6624       rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6625       if (!is_gimple_min_invariant (*rhs_p))
6626 	*rhs_p = get_formal_tmp_var (*rhs_p, &body);
6627     }
6628 
6629   /* Once lowered, extract the bounds and clauses.  */
6630   extract_omp_for_data (stmt, &fd, NULL);
6631 
6632   lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6633 
6634   gimple_seq_add_stmt (&body, stmt);
6635   gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6636 
6637   gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6638 							 fd.loop.v));
6639 
6640   /* After the loop, add exit clauses.  */
6641   lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6642   gimple_seq_add_seq (&body, dlist);
6643 
6644   body = maybe_catch_exception (body);
6645 
6646   /* Region exit marker goes at the end of the loop body.  */
6647   gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6648 
6649   pop_gimplify_context (new_stmt);
6650 
6651   gimple_bind_append_vars (new_stmt, ctx->block_vars);
6652   BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6653   if (BLOCK_VARS (block))
6654     TREE_USED (block) = 1;
6655 
6656   gimple_bind_set_body (new_stmt, body);
6657   gimple_omp_set_body (stmt, NULL);
6658   gimple_omp_for_set_pre_body (stmt, NULL);
6659 }
6660 
6661 /* Callback for walk_stmts.  Check if the current statement only contains
6662    GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL.  */
6663 
6664 static tree
6665 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6666     			 bool *handled_ops_p,
6667     			 struct walk_stmt_info *wi)
6668 {
6669   int *info = (int *) wi->info;
6670   gimple stmt = gsi_stmt (*gsi_p);
6671 
6672   *handled_ops_p = true;
6673   switch (gimple_code (stmt))
6674     {
6675     WALK_SUBSTMTS;
6676 
6677     case GIMPLE_OMP_FOR:
6678     case GIMPLE_OMP_SECTIONS:
6679       *info = *info == 0 ? 1 : -1;
6680       break;
6681     default:
6682       *info = -1;
6683       break;
6684     }
6685   return NULL;
6686 }
6687 
6688 struct omp_taskcopy_context
6689 {
6690   /* This field must be at the beginning, as we do "inheritance": Some
6691      callback functions for tree-inline.c (e.g., omp_copy_decl)
6692      receive a copy_body_data pointer that is up-casted to an
6693      omp_context pointer.  */
6694   copy_body_data cb;
6695   omp_context *ctx;
6696 };
6697 
6698 static tree
6699 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6700 {
6701   struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6702 
6703   if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6704     return create_tmp_var (TREE_TYPE (var), NULL);
6705 
6706   return var;
6707 }
6708 
6709 static tree
6710 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6711 {
6712   tree name, new_fields = NULL, type, f;
6713 
6714   type = lang_hooks.types.make_type (RECORD_TYPE);
6715   name = DECL_NAME (TYPE_NAME (orig_type));
6716   name = build_decl (gimple_location (tcctx->ctx->stmt),
6717 		     TYPE_DECL, name, type);
6718   TYPE_NAME (type) = name;
6719 
6720   for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6721     {
6722       tree new_f = copy_node (f);
6723       DECL_CONTEXT (new_f) = type;
6724       TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6725       TREE_CHAIN (new_f) = new_fields;
6726       walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6727       walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6728       walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6729 		 &tcctx->cb, NULL);
6730       new_fields = new_f;
6731       *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6732     }
6733   TYPE_FIELDS (type) = nreverse (new_fields);
6734   layout_type (type);
6735   return type;
6736 }
6737 
6738 /* Create task copyfn.  */
6739 
6740 static void
6741 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6742 {
6743   struct function *child_cfun;
6744   tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6745   tree record_type, srecord_type, bind, list;
6746   bool record_needs_remap = false, srecord_needs_remap = false;
6747   splay_tree_node n;
6748   struct omp_taskcopy_context tcctx;
6749   struct gimplify_ctx gctx;
6750   location_t loc = gimple_location (task_stmt);
6751 
6752   child_fn = gimple_omp_task_copy_fn (task_stmt);
6753   child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6754   gcc_assert (child_cfun->cfg == NULL);
6755   DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6756 
6757   /* Reset DECL_CONTEXT on function arguments.  */
6758   for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6759     DECL_CONTEXT (t) = child_fn;
6760 
6761   /* Populate the function.  */
6762   push_gimplify_context (&gctx);
6763   push_cfun (child_cfun);
6764 
6765   bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6766   TREE_SIDE_EFFECTS (bind) = 1;
6767   list = NULL;
6768   DECL_SAVED_TREE (child_fn) = bind;
6769   DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6770 
6771   /* Remap src and dst argument types if needed.  */
6772   record_type = ctx->record_type;
6773   srecord_type = ctx->srecord_type;
6774   for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6775     if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6776       {
6777 	record_needs_remap = true;
6778 	break;
6779       }
6780   for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6781     if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6782       {
6783 	srecord_needs_remap = true;
6784 	break;
6785       }
6786 
6787   if (record_needs_remap || srecord_needs_remap)
6788     {
6789       memset (&tcctx, '\0', sizeof (tcctx));
6790       tcctx.cb.src_fn = ctx->cb.src_fn;
6791       tcctx.cb.dst_fn = child_fn;
6792       tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6793       gcc_checking_assert (tcctx.cb.src_node);
6794       tcctx.cb.dst_node = tcctx.cb.src_node;
6795       tcctx.cb.src_cfun = ctx->cb.src_cfun;
6796       tcctx.cb.copy_decl = task_copyfn_copy_decl;
6797       tcctx.cb.eh_lp_nr = 0;
6798       tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6799       tcctx.cb.decl_map = pointer_map_create ();
6800       tcctx.ctx = ctx;
6801 
6802       if (record_needs_remap)
6803 	record_type = task_copyfn_remap_type (&tcctx, record_type);
6804       if (srecord_needs_remap)
6805 	srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6806     }
6807   else
6808     tcctx.cb.decl_map = NULL;
6809 
6810   arg = DECL_ARGUMENTS (child_fn);
6811   TREE_TYPE (arg) = build_pointer_type (record_type);
6812   sarg = DECL_CHAIN (arg);
6813   TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6814 
6815   /* First pass: initialize temporaries used in record_type and srecord_type
6816      sizes and field offsets.  */
6817   if (tcctx.cb.decl_map)
6818     for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6819       if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6820 	{
6821 	  tree *p;
6822 
6823 	  decl = OMP_CLAUSE_DECL (c);
6824 	  p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6825 	  if (p == NULL)
6826 	    continue;
6827 	  n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6828 	  sf = (tree) n->value;
6829 	  sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6830 	  src = build_simple_mem_ref_loc (loc, sarg);
6831 	  src = omp_build_component_ref (src, sf);
6832 	  t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6833 	  append_to_statement_list (t, &list);
6834 	}
6835 
6836   /* Second pass: copy shared var pointers and copy construct non-VLA
6837      firstprivate vars.  */
6838   for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6839     switch (OMP_CLAUSE_CODE (c))
6840       {
6841       case OMP_CLAUSE_SHARED:
6842 	decl = OMP_CLAUSE_DECL (c);
6843 	n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6844 	if (n == NULL)
6845 	  break;
6846 	f = (tree) n->value;
6847 	if (tcctx.cb.decl_map)
6848 	  f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6849 	n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6850 	sf = (tree) n->value;
6851 	if (tcctx.cb.decl_map)
6852 	  sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6853 	src = build_simple_mem_ref_loc (loc, sarg);
6854 	src = omp_build_component_ref (src, sf);
6855 	dst = build_simple_mem_ref_loc (loc, arg);
6856 	dst = omp_build_component_ref (dst, f);
6857 	t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6858 	append_to_statement_list (t, &list);
6859 	break;
6860       case OMP_CLAUSE_FIRSTPRIVATE:
6861 	decl = OMP_CLAUSE_DECL (c);
6862 	if (is_variable_sized (decl))
6863 	  break;
6864 	n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6865 	if (n == NULL)
6866 	  break;
6867 	f = (tree) n->value;
6868 	if (tcctx.cb.decl_map)
6869 	  f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6870 	n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6871 	if (n != NULL)
6872 	  {
6873 	    sf = (tree) n->value;
6874 	    if (tcctx.cb.decl_map)
6875 	      sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6876 	    src = build_simple_mem_ref_loc (loc, sarg);
6877 	    src = omp_build_component_ref (src, sf);
6878 	    if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6879 	      src = build_simple_mem_ref_loc (loc, src);
6880 	  }
6881 	else
6882 	  src = decl;
6883 	dst = build_simple_mem_ref_loc (loc, arg);
6884 	dst = omp_build_component_ref (dst, f);
6885 	t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6886 	append_to_statement_list (t, &list);
6887 	break;
6888       case OMP_CLAUSE_PRIVATE:
6889 	if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6890 	  break;
6891 	decl = OMP_CLAUSE_DECL (c);
6892 	n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6893 	f = (tree) n->value;
6894 	if (tcctx.cb.decl_map)
6895 	  f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6896 	n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6897 	if (n != NULL)
6898 	  {
6899 	    sf = (tree) n->value;
6900 	    if (tcctx.cb.decl_map)
6901 	      sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6902 	    src = build_simple_mem_ref_loc (loc, sarg);
6903 	    src = omp_build_component_ref (src, sf);
6904 	    if (use_pointer_for_field (decl, NULL))
6905 	      src = build_simple_mem_ref_loc (loc, src);
6906 	  }
6907 	else
6908 	  src = decl;
6909 	dst = build_simple_mem_ref_loc (loc, arg);
6910 	dst = omp_build_component_ref (dst, f);
6911 	t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6912 	append_to_statement_list (t, &list);
6913 	break;
6914       default:
6915 	break;
6916       }
6917 
6918   /* Last pass: handle VLA firstprivates.  */
6919   if (tcctx.cb.decl_map)
6920     for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6921       if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6922 	{
6923 	  tree ind, ptr, df;
6924 
6925 	  decl = OMP_CLAUSE_DECL (c);
6926 	  if (!is_variable_sized (decl))
6927 	    continue;
6928 	  n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6929 	  if (n == NULL)
6930 	    continue;
6931 	  f = (tree) n->value;
6932 	  f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6933 	  gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6934 	  ind = DECL_VALUE_EXPR (decl);
6935 	  gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6936 	  gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6937 	  n = splay_tree_lookup (ctx->sfield_map,
6938 				 (splay_tree_key) TREE_OPERAND (ind, 0));
6939 	  sf = (tree) n->value;
6940 	  sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6941 	  src = build_simple_mem_ref_loc (loc, sarg);
6942 	  src = omp_build_component_ref (src, sf);
6943 	  src = build_simple_mem_ref_loc (loc, src);
6944 	  dst = build_simple_mem_ref_loc (loc, arg);
6945 	  dst = omp_build_component_ref (dst, f);
6946 	  t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6947 	  append_to_statement_list (t, &list);
6948 	  n = splay_tree_lookup (ctx->field_map,
6949 				 (splay_tree_key) TREE_OPERAND (ind, 0));
6950 	  df = (tree) n->value;
6951 	  df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6952 	  ptr = build_simple_mem_ref_loc (loc, arg);
6953 	  ptr = omp_build_component_ref (ptr, df);
6954 	  t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6955 		      build_fold_addr_expr_loc (loc, dst));
6956 	  append_to_statement_list (t, &list);
6957 	}
6958 
6959   t = build1 (RETURN_EXPR, void_type_node, NULL);
6960   append_to_statement_list (t, &list);
6961 
6962   if (tcctx.cb.decl_map)
6963     pointer_map_destroy (tcctx.cb.decl_map);
6964   pop_gimplify_context (NULL);
6965   BIND_EXPR_BODY (bind) = list;
6966   pop_cfun ();
6967 }
6968 
6969 /* Lower the OpenMP parallel or task directive in the current statement
6970    in GSI_P.  CTX holds context information for the directive.  */
6971 
6972 static void
6973 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6974 {
6975   tree clauses;
6976   tree child_fn, t;
6977   gimple stmt = gsi_stmt (*gsi_p);
6978   gimple par_bind, bind;
6979   gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6980   struct gimplify_ctx gctx;
6981   location_t loc = gimple_location (stmt);
6982 
6983   clauses = gimple_omp_taskreg_clauses (stmt);
6984   par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6985   par_body = gimple_bind_body (par_bind);
6986   child_fn = ctx->cb.dst_fn;
6987   if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6988       && !gimple_omp_parallel_combined_p (stmt))
6989     {
6990       struct walk_stmt_info wi;
6991       int ws_num = 0;
6992 
6993       memset (&wi, 0, sizeof (wi));
6994       wi.info = &ws_num;
6995       wi.val_only = true;
6996       walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6997       if (ws_num == 1)
6998 	gimple_omp_parallel_set_combined_p (stmt, true);
6999     }
7000   if (ctx->srecord_type)
7001     create_task_copyfn (stmt, ctx);
7002 
7003   push_gimplify_context (&gctx);
7004 
7005   par_olist = NULL;
7006   par_ilist = NULL;
7007   lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
7008   lower_omp (&par_body, ctx);
7009   if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
7010     lower_reduction_clauses (clauses, &par_olist, ctx);
7011 
7012   /* Declare all the variables created by mapping and the variables
7013      declared in the scope of the parallel body.  */
7014   record_vars_into (ctx->block_vars, child_fn);
7015   record_vars_into (gimple_bind_vars (par_bind), child_fn);
7016 
7017   if (ctx->record_type)
7018     {
7019       ctx->sender_decl
7020 	= create_tmp_var (ctx->srecord_type ? ctx->srecord_type
7021 			  : ctx->record_type, ".omp_data_o");
7022       DECL_NAMELESS (ctx->sender_decl) = 1;
7023       TREE_ADDRESSABLE (ctx->sender_decl) = 1;
7024       gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
7025     }
7026 
7027   olist = NULL;
7028   ilist = NULL;
7029   lower_send_clauses (clauses, &ilist, &olist, ctx);
7030   lower_send_shared_vars (&ilist, &olist, ctx);
7031 
7032   /* Once all the expansions are done, sequence all the different
7033      fragments inside gimple_omp_body.  */
7034 
7035   new_body = NULL;
7036 
7037   if (ctx->record_type)
7038     {
7039       t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
7040       /* fixup_child_record_type might have changed receiver_decl's type.  */
7041       t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
7042       gimple_seq_add_stmt (&new_body,
7043 	  		   gimple_build_assign (ctx->receiver_decl, t));
7044     }
7045 
7046   gimple_seq_add_seq (&new_body, par_ilist);
7047   gimple_seq_add_seq (&new_body, par_body);
7048   gimple_seq_add_seq (&new_body, par_olist);
7049   new_body = maybe_catch_exception (new_body);
7050   if (gimple_code (stmt) == GIMPLE_OMP_TASK)
7051     gimple_seq_add_stmt (&new_body,
7052 			 gimple_build_omp_continue (integer_zero_node,
7053 						    integer_zero_node));
7054   gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
7055   gimple_omp_set_body (stmt, new_body);
7056 
7057   bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
7058   gsi_replace (gsi_p, bind, true);
7059   gimple_bind_add_seq (bind, ilist);
7060   gimple_bind_add_stmt (bind, stmt);
7061   gimple_bind_add_seq (bind, olist);
7062 
7063   pop_gimplify_context (NULL);
7064 }
7065 
7066 /* Callback for lower_omp_1.  Return non-NULL if *tp needs to be
7067    regimplified.  If DATA is non-NULL, lower_omp_1 is outside
7068    of OpenMP context, but with task_shared_vars set.  */
7069 
7070 static tree
7071 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
7072     			void *data)
7073 {
7074   tree t = *tp;
7075 
7076   /* Any variable with DECL_VALUE_EXPR needs to be regimplified.  */
7077   if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
7078     return t;
7079 
7080   if (task_shared_vars
7081       && DECL_P (t)
7082       && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
7083     return t;
7084 
7085   /* If a global variable has been privatized, TREE_CONSTANT on
7086      ADDR_EXPR might be wrong.  */
7087   if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
7088     recompute_tree_invariant_for_addr_expr (t);
7089 
7090   *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7091   return NULL_TREE;
7092 }
7093 
7094 static void
7095 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7096 {
7097   gimple stmt = gsi_stmt (*gsi_p);
7098   struct walk_stmt_info wi;
7099 
7100   if (gimple_has_location (stmt))
7101     input_location = gimple_location (stmt);
7102 
7103   if (task_shared_vars)
7104     memset (&wi, '\0', sizeof (wi));
7105 
7106   /* If we have issued syntax errors, avoid doing any heavy lifting.
7107      Just replace the OpenMP directives with a NOP to avoid
7108      confusing RTL expansion.  */
7109   if (seen_error () && is_gimple_omp (stmt))
7110     {
7111       gsi_replace (gsi_p, gimple_build_nop (), true);
7112       return;
7113     }
7114 
7115   switch (gimple_code (stmt))
7116     {
7117     case GIMPLE_COND:
7118       if ((ctx || task_shared_vars)
7119 	  && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
7120 	      		 ctx ? NULL : &wi, NULL)
7121 	      || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
7122 			    ctx ? NULL : &wi, NULL)))
7123 	gimple_regimplify_operands (stmt, gsi_p);
7124       break;
7125     case GIMPLE_CATCH:
7126       lower_omp (gimple_catch_handler_ptr (stmt), ctx);
7127       break;
7128     case GIMPLE_EH_FILTER:
7129       lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
7130       break;
7131     case GIMPLE_TRY:
7132       lower_omp (gimple_try_eval_ptr (stmt), ctx);
7133       lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
7134       break;
7135     case GIMPLE_TRANSACTION:
7136       lower_omp (gimple_transaction_body_ptr (stmt), ctx);
7137       break;
7138     case GIMPLE_BIND:
7139       lower_omp (gimple_bind_body_ptr (stmt), ctx);
7140       break;
7141     case GIMPLE_OMP_PARALLEL:
7142     case GIMPLE_OMP_TASK:
7143       ctx = maybe_lookup_ctx (stmt);
7144       lower_omp_taskreg (gsi_p, ctx);
7145       break;
7146     case GIMPLE_OMP_FOR:
7147       ctx = maybe_lookup_ctx (stmt);
7148       gcc_assert (ctx);
7149       lower_omp_for (gsi_p, ctx);
7150       break;
7151     case GIMPLE_OMP_SECTIONS:
7152       ctx = maybe_lookup_ctx (stmt);
7153       gcc_assert (ctx);
7154       lower_omp_sections (gsi_p, ctx);
7155       break;
7156     case GIMPLE_OMP_SINGLE:
7157       ctx = maybe_lookup_ctx (stmt);
7158       gcc_assert (ctx);
7159       lower_omp_single (gsi_p, ctx);
7160       break;
7161     case GIMPLE_OMP_MASTER:
7162       ctx = maybe_lookup_ctx (stmt);
7163       gcc_assert (ctx);
7164       lower_omp_master (gsi_p, ctx);
7165       break;
7166     case GIMPLE_OMP_ORDERED:
7167       ctx = maybe_lookup_ctx (stmt);
7168       gcc_assert (ctx);
7169       lower_omp_ordered (gsi_p, ctx);
7170       break;
7171     case GIMPLE_OMP_CRITICAL:
7172       ctx = maybe_lookup_ctx (stmt);
7173       gcc_assert (ctx);
7174       lower_omp_critical (gsi_p, ctx);
7175       break;
7176     case GIMPLE_OMP_ATOMIC_LOAD:
7177       if ((ctx || task_shared_vars)
7178 	  && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
7179 			lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
7180 	gimple_regimplify_operands (stmt, gsi_p);
7181       break;
7182     default:
7183       if ((ctx || task_shared_vars)
7184 	  && walk_gimple_op (stmt, lower_omp_regimplify_p,
7185 			     ctx ? NULL : &wi))
7186 	gimple_regimplify_operands (stmt, gsi_p);
7187       break;
7188     }
7189 }
7190 
7191 static void
7192 lower_omp (gimple_seq *body, omp_context *ctx)
7193 {
7194   location_t saved_location = input_location;
7195   gimple_stmt_iterator gsi;
7196   for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
7197     lower_omp_1 (&gsi, ctx);
7198   input_location = saved_location;
7199 }
7200 
7201 /* Main entry point.  */
7202 
7203 static unsigned int
7204 execute_lower_omp (void)
7205 {
7206   gimple_seq body;
7207   int i;
7208   omp_context *ctx;
7209 
7210   /* This pass always runs, to provide PROP_gimple_lomp.
7211      But there is nothing to do unless -fopenmp is given.  */
7212   if (flag_openmp == 0)
7213     return 0;
7214 
7215   all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
7216 				 delete_omp_context);
7217 
7218   body = gimple_body (current_function_decl);
7219   scan_omp (&body, NULL);
7220   gcc_assert (taskreg_nesting_level == 0);
7221   FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
7222     finish_taskreg_scan (ctx);
7223   taskreg_contexts.release ();
7224 
7225   if (all_contexts->root)
7226     {
7227       struct gimplify_ctx gctx;
7228 
7229       if (task_shared_vars)
7230 	push_gimplify_context (&gctx);
7231       lower_omp (&body, NULL);
7232       if (task_shared_vars)
7233 	pop_gimplify_context (NULL);
7234     }
7235 
7236   if (all_contexts)
7237     {
7238       splay_tree_delete (all_contexts);
7239       all_contexts = NULL;
7240     }
7241   BITMAP_FREE (task_shared_vars);
7242   return 0;
7243 }
7244 
7245 struct gimple_opt_pass pass_lower_omp =
7246 {
7247  {
7248   GIMPLE_PASS,
7249   "omplower",				/* name */
7250   OPTGROUP_NONE,                        /* optinfo_flags */
7251   NULL,					/* gate */
7252   execute_lower_omp,			/* execute */
7253   NULL,					/* sub */
7254   NULL,					/* next */
7255   0,					/* static_pass_number */
7256   TV_NONE,				/* tv_id */
7257   PROP_gimple_any,			/* properties_required */
7258   PROP_gimple_lomp,			/* properties_provided */
7259   0,					/* properties_destroyed */
7260   0,					/* todo_flags_start */
7261   0                                     /* todo_flags_finish */
7262  }
7263 };
7264 
7265 /* The following is a utility to diagnose OpenMP structured block violations.
7266    It is not part of the "omplower" pass, as that's invoked too late.  It
7267    should be invoked by the respective front ends after gimplification.  */
7268 
7269 static splay_tree all_labels;
7270 
7271 /* Check for mismatched contexts and generate an error if needed.  Return
7272    true if an error is detected.  */
7273 
7274 static bool
7275 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
7276     	       gimple branch_ctx, gimple label_ctx)
7277 {
7278   if (label_ctx == branch_ctx)
7279     return false;
7280 
7281 
7282   /*
7283      Previously we kept track of the label's entire context in diagnose_sb_[12]
7284      so we could traverse it and issue a correct "exit" or "enter" error
7285      message upon a structured block violation.
7286 
7287      We built the context by building a list with tree_cons'ing, but there is
7288      no easy counterpart in gimple tuples.  It seems like far too much work
7289      for issuing exit/enter error messages.  If someone really misses the
7290      distinct error message... patches welcome.
7291    */
7292 
7293 #if 0
7294   /* Try to avoid confusing the user by producing and error message
7295      with correct "exit" or "enter" verbiage.  We prefer "exit"
7296      unless we can show that LABEL_CTX is nested within BRANCH_CTX.  */
7297   if (branch_ctx == NULL)
7298     exit_p = false;
7299   else
7300     {
7301       while (label_ctx)
7302 	{
7303 	  if (TREE_VALUE (label_ctx) == branch_ctx)
7304 	    {
7305 	      exit_p = false;
7306 	      break;
7307 	    }
7308 	  label_ctx = TREE_CHAIN (label_ctx);
7309 	}
7310     }
7311 
7312   if (exit_p)
7313     error ("invalid exit from OpenMP structured block");
7314   else
7315     error ("invalid entry to OpenMP structured block");
7316 #endif
7317 
7318   /* If it's obvious we have an invalid entry, be specific about the error.  */
7319   if (branch_ctx == NULL)
7320     error ("invalid entry to OpenMP structured block");
7321   else
7322     /* Otherwise, be vague and lazy, but efficient.  */
7323     error ("invalid branch to/from an OpenMP structured block");
7324 
7325   gsi_replace (gsi_p, gimple_build_nop (), false);
7326   return true;
7327 }
7328 
7329 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7330    where each label is found.  */
7331 
7332 static tree
7333 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7334     	       struct walk_stmt_info *wi)
7335 {
7336   gimple context = (gimple) wi->info;
7337   gimple inner_context;
7338   gimple stmt = gsi_stmt (*gsi_p);
7339 
7340   *handled_ops_p = true;
7341 
7342  switch (gimple_code (stmt))
7343     {
7344     WALK_SUBSTMTS;
7345 
7346     case GIMPLE_OMP_PARALLEL:
7347     case GIMPLE_OMP_TASK:
7348     case GIMPLE_OMP_SECTIONS:
7349     case GIMPLE_OMP_SINGLE:
7350     case GIMPLE_OMP_SECTION:
7351     case GIMPLE_OMP_MASTER:
7352     case GIMPLE_OMP_ORDERED:
7353     case GIMPLE_OMP_CRITICAL:
7354       /* The minimal context here is just the current OMP construct.  */
7355       inner_context = stmt;
7356       wi->info = inner_context;
7357       walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7358       wi->info = context;
7359       break;
7360 
7361     case GIMPLE_OMP_FOR:
7362       inner_context = stmt;
7363       wi->info = inner_context;
7364       /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7365 	 walk them.  */
7366       walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7367 	  	       diagnose_sb_1, NULL, wi);
7368       walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7369       wi->info = context;
7370       break;
7371 
7372     case GIMPLE_LABEL:
7373       splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7374 			 (splay_tree_value) context);
7375       break;
7376 
7377     default:
7378       break;
7379     }
7380 
7381   return NULL_TREE;
7382 }
7383 
7384 /* Pass 2: Check each branch and see if its context differs from that of
7385    the destination label's context.  */
7386 
7387 static tree
7388 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7389     	       struct walk_stmt_info *wi)
7390 {
7391   gimple context = (gimple) wi->info;
7392   splay_tree_node n;
7393   gimple stmt = gsi_stmt (*gsi_p);
7394 
7395   *handled_ops_p = true;
7396 
7397   switch (gimple_code (stmt))
7398     {
7399     WALK_SUBSTMTS;
7400 
7401     case GIMPLE_OMP_PARALLEL:
7402     case GIMPLE_OMP_TASK:
7403     case GIMPLE_OMP_SECTIONS:
7404     case GIMPLE_OMP_SINGLE:
7405     case GIMPLE_OMP_SECTION:
7406     case GIMPLE_OMP_MASTER:
7407     case GIMPLE_OMP_ORDERED:
7408     case GIMPLE_OMP_CRITICAL:
7409       wi->info = stmt;
7410       walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7411       wi->info = context;
7412       break;
7413 
7414     case GIMPLE_OMP_FOR:
7415       wi->info = stmt;
7416       /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7417 	 walk them.  */
7418       walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
7419 			   diagnose_sb_2, NULL, wi);
7420       walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7421       wi->info = context;
7422       break;
7423 
7424     case GIMPLE_COND:
7425 	{
7426 	  tree lab = gimple_cond_true_label (stmt);
7427 	  if (lab)
7428 	    {
7429 	      n = splay_tree_lookup (all_labels,
7430 				     (splay_tree_key) lab);
7431 	      diagnose_sb_0 (gsi_p, context,
7432 			     n ? (gimple) n->value : NULL);
7433 	    }
7434 	  lab = gimple_cond_false_label (stmt);
7435 	  if (lab)
7436 	    {
7437 	      n = splay_tree_lookup (all_labels,
7438 				     (splay_tree_key) lab);
7439 	      diagnose_sb_0 (gsi_p, context,
7440 			     n ? (gimple) n->value : NULL);
7441 	    }
7442 	}
7443       break;
7444 
7445     case GIMPLE_GOTO:
7446       {
7447 	tree lab = gimple_goto_dest (stmt);
7448 	if (TREE_CODE (lab) != LABEL_DECL)
7449 	  break;
7450 
7451 	n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7452 	diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7453       }
7454       break;
7455 
7456     case GIMPLE_SWITCH:
7457       {
7458 	unsigned int i;
7459 	for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7460 	  {
7461 	    tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7462 	    n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7463 	    if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7464 	      break;
7465 	  }
7466       }
7467       break;
7468 
7469     case GIMPLE_RETURN:
7470       diagnose_sb_0 (gsi_p, context, NULL);
7471       break;
7472 
7473     default:
7474       break;
7475     }
7476 
7477   return NULL_TREE;
7478 }
7479 
7480 static unsigned int
7481 diagnose_omp_structured_block_errors (void)
7482 {
7483   struct walk_stmt_info wi;
7484   gimple_seq body = gimple_body (current_function_decl);
7485 
7486   all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7487 
7488   memset (&wi, 0, sizeof (wi));
7489   walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7490 
7491   memset (&wi, 0, sizeof (wi));
7492   wi.want_locations = true;
7493   walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
7494 
7495   gimple_set_body (current_function_decl, body);
7496 
7497   splay_tree_delete (all_labels);
7498   all_labels = NULL;
7499 
7500   return 0;
7501 }
7502 
7503 static bool
7504 gate_diagnose_omp_blocks (void)
7505 {
7506   return flag_openmp != 0;
7507 }
7508 
7509 struct gimple_opt_pass pass_diagnose_omp_blocks =
7510 {
7511   {
7512     GIMPLE_PASS,
7513     "*diagnose_omp_blocks",		/* name */
7514     OPTGROUP_NONE,                      /* optinfo_flags */
7515     gate_diagnose_omp_blocks,		/* gate */
7516     diagnose_omp_structured_block_errors,	/* execute */
7517     NULL,				/* sub */
7518     NULL,				/* next */
7519     0,					/* static_pass_number */
7520     TV_NONE,				/* tv_id */
7521     PROP_gimple_any,			/* properties_required */
7522     0,					/* properties_provided */
7523     0,					/* properties_destroyed */
7524     0,					/* todo_flags_start */
7525     0,					/* todo_flags_finish */
7526   }
7527 };
7528 
7529 #include "gt-omp-low.h"
7530