xref: /llvm-project/mlir/test/Dialect/OpenMP/ops.mlir (revision afcbcae668f1d8061974247f2828190173aef742)
1// RUN: mlir-opt %s | mlir-opt | FileCheck %s
2
3func.func @omp_barrier() -> () {
4  // CHECK: omp.barrier
5  omp.barrier
6  return
7}
8
9func.func @omp_master() -> () {
10  // CHECK: omp.master
11  omp.master {
12    // CHECK: omp.terminator
13    omp.terminator
14  }
15
16  return
17}
18
19// CHECK-LABEL: omp_masked
20func.func @omp_masked(%filtered_thread_id : i32) -> () {
21  // CHECK: omp.masked filter(%{{.*}} : i32)
22  "omp.masked" (%filtered_thread_id) ({
23    omp.terminator
24  }) : (i32) -> ()
25
26  // CHECK: omp.masked
27  "omp.masked" () ({
28    omp.terminator
29  }) : () -> ()
30  return
31}
32
33func.func @omp_taskwait() -> () {
34  // CHECK: omp.taskwait
35  omp.taskwait
36  return
37}
38
39func.func @omp_taskyield() -> () {
40  // CHECK: omp.taskyield
41  omp.taskyield
42  return
43}
44
45// CHECK-LABEL: func @omp_flush
46// CHECK-SAME: ([[ARG0:%.*]]: memref<i32>) {
47func.func @omp_flush(%arg0 : memref<i32>) -> () {
48  // Test without data var
49  // CHECK: omp.flush
50  omp.flush
51
52  // Test with one data var
53  // CHECK: omp.flush([[ARG0]] : memref<i32>)
54  omp.flush(%arg0 : memref<i32>)
55
56  // Test with two data var
57  // CHECK: omp.flush([[ARG0]], [[ARG0]] : memref<i32>, memref<i32>)
58  omp.flush(%arg0, %arg0: memref<i32>, memref<i32>)
59
60  return
61}
62
63func.func @omp_terminator() -> () {
64  // CHECK: omp.terminator
65  omp.terminator
66}
67
68func.func @omp_parallel(%data_var : memref<i32>, %if_cond : i1, %num_threads : i32, %idx : index) -> () {
69  // CHECK: omp.parallel allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>) if(%{{.*}}) num_threads(%{{.*}} : i32)
70  "omp.parallel" (%data_var, %data_var, %if_cond, %num_threads) ({
71
72  // test without if condition
73  // CHECK: omp.parallel allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>) num_threads(%{{.*}} : i32)
74    "omp.parallel"(%data_var, %data_var, %num_threads) ({
75      omp.terminator
76    }) {operandSegmentSizes = array<i32: 1,1,0,1,0,0>} : (memref<i32>, memref<i32>, i32) -> ()
77
78  // CHECK: omp.barrier
79    omp.barrier
80
81  // test without num_threads
82  // CHECK: omp.parallel allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>) if(%{{.*}})
83    "omp.parallel"(%data_var, %data_var, %if_cond) ({
84      omp.terminator
85    }) {operandSegmentSizes = array<i32: 1,1,1,0,0,0>} : (memref<i32>, memref<i32>, i1) -> ()
86
87  // test without allocate
88  // CHECK: omp.parallel if(%{{.*}}) num_threads(%{{.*}} : i32)
89    "omp.parallel"(%if_cond, %num_threads) ({
90      omp.terminator
91    }) {operandSegmentSizes = array<i32: 0,0,1,1,0,0>} : (i1, i32) -> ()
92
93    omp.terminator
94  }) {operandSegmentSizes = array<i32: 1,1,1,1,0,0>, proc_bind_kind = #omp<procbindkind spread>} : (memref<i32>, memref<i32>, i1, i32) -> ()
95
96  // test with multiple parameters for single variadic argument
97  // CHECK: omp.parallel allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>)
98  "omp.parallel" (%data_var, %data_var) ({
99    omp.terminator
100  }) {operandSegmentSizes = array<i32: 1,1,0,0,0,0>} : (memref<i32>, memref<i32>) -> ()
101
102  // CHECK: omp.parallel
103  omp.parallel {
104    // CHECK-NOT: omp.terminator
105    // CHECK: omp.distribute
106    omp.distribute {
107      // CHECK-NEXT: omp.wsloop
108      omp.wsloop {
109        // CHECK-NEXT: omp.loop_nest
110        omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
111          omp.yield
112        }
113      } {omp.composite}
114    } {omp.composite}
115    omp.terminator
116  } {omp.composite}
117
118  // CHECK: omp.parallel
119  omp.parallel {
120    // CHECK-NOT: omp.terminator
121    // CHECK: omp.distribute
122    omp.distribute {
123      // CHECK-NEXT: omp.wsloop
124      omp.wsloop {
125        // CHECK-NEXT: omp.simd
126        omp.simd {
127          // CHECK-NEXT: omp.loop_nest
128          omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
129            omp.yield
130          }
131        } {omp.composite}
132      } {omp.composite}
133    } {omp.composite}
134    omp.terminator
135  } {omp.composite}
136
137  return
138}
139
140func.func @omp_parallel_pretty(%data_var : memref<i32>, %if_cond : i1, %num_threads : i32, %allocator : si32) -> () {
141 // CHECK: omp.parallel
142 omp.parallel {
143  omp.terminator
144 }
145
146 // CHECK: omp.parallel num_threads(%{{.*}} : i32)
147 omp.parallel num_threads(%num_threads : i32) {
148   omp.terminator
149 }
150
151 %n_index = arith.constant 2 : index
152 // CHECK: omp.parallel num_threads(%{{.*}} : index)
153 omp.parallel num_threads(%n_index : index) {
154   omp.terminator
155 }
156
157 %n_i64 = arith.constant 4 : i64
158 // CHECK: omp.parallel num_threads(%{{.*}} : i64)
159 omp.parallel num_threads(%n_i64 : i64) {
160   omp.terminator
161 }
162
163 // CHECK: omp.parallel allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>)
164 omp.parallel allocate(%data_var : memref<i32> -> %data_var : memref<i32>) {
165   omp.terminator
166 }
167
168 // CHECK: omp.parallel
169 // CHECK-NEXT: omp.parallel if(%{{.*}})
170 omp.parallel {
171   omp.parallel if(%if_cond) {
172     omp.terminator
173   }
174   omp.terminator
175 }
176
177 // CHECK: omp.parallel if(%{{.*}}) num_threads(%{{.*}} : i32) proc_bind(close)
178 omp.parallel num_threads(%num_threads : i32) if(%if_cond) proc_bind(close) {
179   omp.terminator
180 }
181
182  return
183}
184
185// CHECK-LABEL: omp_loop_nest
186func.func @omp_loop_nest(%lb : index, %ub : index, %step : index) -> () {
187  omp.wsloop {
188    // CHECK: omp.loop_nest
189    // CHECK-SAME: (%{{.*}}) : index =
190    // CHECK-SAME: (%{{.*}}) to (%{{.*}}) step (%{{.*}})
191    "omp.loop_nest" (%lb, %ub, %step) ({
192    ^bb0(%iv: index):
193      omp.yield
194    }) : (index, index, index) -> ()
195  }
196
197  omp.wsloop {
198    // CHECK: omp.loop_nest
199    // CHECK-SAME: (%{{.*}}) : index =
200    // CHECK-SAME: (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}})
201    "omp.loop_nest" (%lb, %ub, %step) ({
202    ^bb0(%iv: index):
203      omp.yield
204    }) {loop_inclusive} : (index, index, index) -> ()
205  }
206
207  omp.wsloop {
208    // CHECK: omp.loop_nest
209    // CHECK-SAME: (%{{.*}}, %{{.*}}) : index =
210    // CHECK-SAME: (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}})
211    "omp.loop_nest" (%lb, %lb, %ub, %ub, %step, %step) ({
212    ^bb0(%iv: index, %iv3: index):
213      omp.yield
214    }) : (index, index, index, index, index, index) -> ()
215  }
216
217  omp.wsloop {
218    // CHECK: omp.loop_nest
219    // CHECK-SAME: (%{{.*}}) : index =
220    // CHECK-SAME: (%{{.*}}) to (%{{.*}}) step (%{{.*}})
221    "omp.loop_nest" (%lb, %ub, %step) ({
222    ^bb0(%iv: index):
223      // CHECK: test.op1
224      "test.op1"(%lb) : (index) -> ()
225      // CHECK: test.op2
226      "test.op2"() : () -> ()
227      // CHECK: omp.yield
228      omp.yield
229    }) : (index, index, index) -> ()
230  }
231
232  return
233}
234
235// CHECK-LABEL: omp_loop_nest_pretty
236func.func @omp_loop_nest_pretty(%lb : index, %ub : index, %step : index) -> () {
237  omp.wsloop {
238    // CHECK: omp.loop_nest
239    // CHECK-SAME: (%{{.*}}) : index =
240    // CHECK-SAME: (%{{.*}}) to (%{{.*}}) step (%{{.*}})
241    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
242      omp.yield
243    }
244  }
245
246  omp.wsloop {
247    // CHECK: omp.loop_nest
248    // CHECK-SAME: (%{{.*}}) : index =
249    // CHECK-SAME: (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}})
250    omp.loop_nest (%iv) : index = (%lb) to (%ub) inclusive step (%step) {
251      omp.yield
252    }
253  }
254
255  omp.wsloop {
256    // CHECK: omp.loop_nest
257    // CHECK-SAME: (%{{.*}}) : index =
258    // CHECK-SAME: (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}})
259    omp.loop_nest (%iv1, %iv2) : index = (%lb, %lb) to (%ub, %ub) step (%step, %step) {
260      omp.yield
261    }
262  }
263
264  omp.wsloop {
265    // CHECK: omp.loop_nest
266    // CHECK-SAME: (%{{.*}}) : index =
267    // CHECK-SAME: (%{{.*}}) to (%{{.*}}) step (%{{.*}})
268    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step)  {
269      // CHECK: test.op1
270      "test.op1"(%lb) : (index) -> ()
271      // CHECK: test.op2
272      "test.op2"() : () -> ()
273      // CHECK: omp.yield
274      omp.yield
275    }
276  }
277
278  return
279}
280
281// CHECK-LABEL: omp_loop_nest_pretty_multi_block
282func.func @omp_loop_nest_pretty_multi_block(%lb : index, %ub : index,
283    %step : index, %data1 : memref<?xi32>, %data2 : memref<?xi32>) -> () {
284
285  omp.wsloop {
286    // CHECK: omp.loop_nest (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
287    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
288      %1 = "test.payload"(%iv) : (index) -> (i32)
289      cf.br ^bb1(%1: i32)
290    ^bb1(%arg: i32):
291      memref.store %arg, %data1[%iv] : memref<?xi32>
292      omp.yield
293    }
294  }
295
296  omp.wsloop {
297    // CHECK: omp.loop_nest (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
298    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
299      %c = "test.condition"(%iv) : (index) -> (i1)
300      %v1 = "test.payload"(%iv) : (index) -> (i32)
301      cf.cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32)
302    ^bb1(%arg0: i32):
303      memref.store %arg0, %data1[%iv] : memref<?xi32>
304      cf.br ^bb3
305    ^bb2(%arg1: i32):
306      memref.store %arg1, %data2[%iv] : memref<?xi32>
307      cf.br ^bb3
308    ^bb3:
309      omp.yield
310    }
311  }
312
313  omp.wsloop {
314    // CHECK: omp.loop_nest (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
315    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
316      %c = "test.condition"(%iv) : (index) -> (i1)
317      %v1 = "test.payload"(%iv) : (index) -> (i32)
318      cf.cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32)
319    ^bb1(%arg0: i32):
320      memref.store %arg0, %data1[%iv] : memref<?xi32>
321      omp.yield
322    ^bb2(%arg1: i32):
323      memref.store %arg1, %data2[%iv] : memref<?xi32>
324      omp.yield
325    }
326  }
327
328  return
329}
330
331// CHECK-LABEL: omp_loop_nest_pretty_non_index
332func.func @omp_loop_nest_pretty_non_index(%lb1 : i32, %ub1 : i32, %step1 : i32,
333    %lb2 : i64, %ub2 : i64, %step2 : i64, %data1 : memref<?xi32>,
334    %data2 : memref<?xi64>) -> () {
335
336  omp.wsloop {
337    // CHECK: omp.loop_nest (%{{.*}}) : i32 = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
338    omp.loop_nest (%iv1) : i32 = (%lb1) to (%ub1) step (%step1) {
339      %1 = "test.payload"(%iv1) : (i32) -> (index)
340      cf.br ^bb1(%1: index)
341    ^bb1(%arg1: index):
342      memref.store %iv1, %data1[%arg1] : memref<?xi32>
343      omp.yield
344    }
345  }
346
347  omp.wsloop {
348    // CHECK: omp.loop_nest (%{{.*}}) : i64 = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
349    omp.loop_nest (%iv) : i64 = (%lb2) to (%ub2) step (%step2) {
350      %2 = "test.payload"(%iv) : (i64) -> (index)
351      cf.br ^bb1(%2: index)
352    ^bb1(%arg2: index):
353      memref.store %iv, %data2[%arg2] : memref<?xi64>
354      omp.yield
355    }
356  }
357
358  return
359}
360
361// CHECK-LABEL: omp_loop_nest_pretty_multiple
362func.func @omp_loop_nest_pretty_multiple(%lb1 : i32, %ub1 : i32, %step1 : i32,
363    %lb2 : i32, %ub2 : i32, %step2 : i32, %data1 : memref<?xi32>) -> () {
364
365  omp.wsloop {
366    // CHECK: omp.loop_nest (%{{.*}}, %{{.*}}) : i32 = (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}})
367    omp.loop_nest (%iv1, %iv2) : i32 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) {
368      %1 = "test.payload"(%iv1) : (i32) -> (index)
369      %2 = "test.payload"(%iv2) : (i32) -> (index)
370      memref.store %iv1, %data1[%1] : memref<?xi32>
371      memref.store %iv2, %data1[%2] : memref<?xi32>
372      omp.yield
373    }
374  }
375
376  return
377}
378
379// CHECK-LABEL: omp_wsloop
380func.func @omp_wsloop(%lb : index, %ub : index, %step : index, %data_var : memref<i32>, %linear_var : i32, %chunk_var : i32) -> () {
381
382  // CHECK: omp.wsloop ordered(1) {
383  // CHECK-NEXT: omp.loop_nest
384  "omp.wsloop" () ({
385    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
386      omp.yield
387    }
388  }) {operandSegmentSizes = array<i32: 0,0,0,0,0,0,0>, ordered = 1} :
389    () -> ()
390
391  // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref<i32>) schedule(static) {
392  // CHECK-NEXT: omp.loop_nest
393  "omp.wsloop" (%data_var, %linear_var) ({
394    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
395      omp.yield
396    }
397  }) {operandSegmentSizes = array<i32: 0,0,1,1,0,0,0>, schedule_kind = #omp<schedulekind static>} :
398    (memref<i32>, i32) -> ()
399
400  // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref<i32>, %{{.*}} = %{{.*}} : memref<i32>) schedule(static) {
401  // CHECK-NEXT: omp.loop_nest
402  "omp.wsloop" (%data_var, %data_var, %linear_var, %linear_var) ({
403    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
404      omp.yield
405    }
406  }) {operandSegmentSizes = array<i32: 0,0,2,2,0,0,0>, schedule_kind = #omp<schedulekind static>} :
407    (memref<i32>, memref<i32>, i32, i32) -> ()
408
409  // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref<i32>) ordered(2) schedule(dynamic = %{{.*}}) {
410  // CHECK-NEXT: omp.loop_nest
411  "omp.wsloop" (%data_var, %linear_var, %chunk_var) ({
412    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
413      omp.yield
414    }
415  }) {operandSegmentSizes = array<i32: 0,0,1,1,0,0,1>, schedule_kind = #omp<schedulekind dynamic>, ordered = 2} :
416    (memref<i32>, i32, i32) -> ()
417
418  // CHECK: omp.wsloop nowait schedule(auto) {
419  // CHECK-NEXT: omp.loop_nest
420  "omp.wsloop" () ({
421    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
422      omp.yield
423    }
424  }) {operandSegmentSizes = array<i32: 0,0,0,0,0,0,0>, nowait, schedule_kind = #omp<schedulekind auto>} :
425    () -> ()
426
427  // CHECK: omp.wsloop {
428  // CHECK-NEXT: omp.simd
429  // CHECK-NEXT: omp.loop_nest
430  "omp.wsloop" () ({
431    omp.simd {
432      omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
433        omp.yield
434      }
435    } {omp.composite}
436  }) {omp.composite} : () -> ()
437
438  return
439}
440
441// CHECK-LABEL: omp_wsloop_pretty
442func.func @omp_wsloop_pretty(%lb : index, %ub : index, %step : index, %data_var : memref<i32>, %linear_var : i32, %chunk_var : i32, %chunk_var2 : i16) -> () {
443
444  // CHECK: omp.wsloop ordered(2) {
445  // CHECK-NEXT: omp.loop_nest
446  omp.wsloop ordered(2) {
447    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
448      omp.yield
449    }
450  }
451
452  // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref<i32>) schedule(static) {
453  // CHECK-NEXT: omp.loop_nest
454  omp.wsloop schedule(static) linear(%data_var = %linear_var : memref<i32>) {
455    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
456      omp.yield
457    }
458  }
459
460  // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref<i32>) ordered(2) schedule(static = %{{.*}} : i32) {
461  // CHECK-NEXT: omp.loop_nest
462  omp.wsloop ordered(2) linear(%data_var = %linear_var : memref<i32>) schedule(static = %chunk_var : i32) {
463    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
464      omp.yield
465    }
466  }
467
468  // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref<i32>) ordered(2) schedule(dynamic = %{{.*}} : i32, nonmonotonic) {
469  // CHECK-NEXT: omp.loop_nest
470  omp.wsloop ordered(2) linear(%data_var = %linear_var : memref<i32>) schedule(dynamic = %chunk_var : i32, nonmonotonic) {
471    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step)  {
472      omp.yield
473    }
474  }
475
476  // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref<i32>) ordered(2) schedule(dynamic = %{{.*}} : i16, monotonic) {
477  // CHECK-NEXT: omp.loop_nest
478  omp.wsloop ordered(2) linear(%data_var = %linear_var : memref<i32>) schedule(dynamic = %chunk_var2 : i16, monotonic) {
479    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
480      omp.yield
481    }
482  }
483
484  // CHECK: omp.wsloop {
485  // CHECK-NEXT: omp.loop_nest
486  omp.wsloop {
487    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
488      omp.yield
489    }
490  }
491
492  // CHECK: omp.wsloop nowait {
493  // CHECK-NEXT: omp.loop_nest
494  omp.wsloop nowait {
495    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
496      omp.yield
497    }
498  }
499
500  // CHECK: omp.wsloop nowait order(concurrent) {
501  // CHECK-NEXT: omp.loop_nest
502  omp.wsloop order(concurrent) nowait {
503    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
504      omp.yield
505    }
506  }
507
508  // CHECK: omp.wsloop nowait order(reproducible:concurrent) {
509  // CHECK-NEXT: omp.loop_nest
510  omp.wsloop order(reproducible:concurrent) nowait {
511    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
512      omp.yield
513    }
514  }
515  // CHECK: omp.wsloop nowait order(unconstrained:concurrent) {
516  // CHECK-NEXT: omp.loop_nest
517  omp.wsloop order(unconstrained:concurrent) nowait {
518    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
519      omp.yield
520    }
521  }
522  // CHECK: omp.wsloop {
523  // CHECK-NEXT: omp.simd
524  // CHECK-NEXT: omp.loop_nest
525  omp.wsloop {
526    omp.simd {
527      omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
528        omp.yield
529      }
530    } {omp.composite}
531  } {omp.composite}
532
533  return
534}
535
536// CHECK-LABEL: omp_simd
537func.func @omp_simd(%lb : index, %ub : index, %step : index) -> () {
538  // CHECK: omp.simd
539  omp.simd {
540    "omp.loop_nest" (%lb, %ub, %step) ({
541    ^bb1(%iv2: index):
542      "omp.yield"() : () -> ()
543    }) : (index, index, index) -> ()
544  }
545
546  return
547}
548
549// CHECK-LABEL: omp_simd_aligned_list
550func.func @omp_simd_aligned_list(%arg0 : index, %arg1 : index, %arg2 : index,
551                                 %arg3 : memref<i32>, %arg4 : memref<i32>) -> () {
552  // CHECK:      omp.simd aligned(
553  // CHECK-SAME: %{{.*}} : memref<i32> -> 32 : i64,
554  // CHECK-SAME: %{{.*}} : memref<i32> -> 128 : i64)
555  "omp.simd"(%arg3, %arg4) ({
556    "omp.loop_nest" (%arg0, %arg1, %arg2) ({
557    ^bb1(%iv2: index):
558      "omp.yield"() : () -> ()
559    }) : (index, index, index) -> ()
560  }) {alignments = [32, 128],
561      operandSegmentSizes = array<i32: 2, 0, 0, 0, 0, 0, 0>} : (memref<i32>, memref<i32>) -> ()
562  return
563}
564
565// CHECK-LABEL: omp_simd_aligned_single
566func.func @omp_simd_aligned_single(%arg0 : index, %arg1 : index, %arg2 : index,
567                                   %arg3 : memref<i32>, %arg4 : memref<i32>) -> () {
568  // CHECK: omp.simd aligned(%{{.*}} : memref<i32> -> 32 : i64)
569  "omp.simd"(%arg3) ({
570    "omp.loop_nest" (%arg0, %arg1, %arg2) ({
571    ^bb1(%iv2: index):
572      "omp.yield"() : () -> ()
573    }) : (index, index, index) -> ()
574  }) {alignments = [32],
575      operandSegmentSizes = array<i32: 1, 0, 0, 0, 0, 0, 0>} : (memref<i32>) -> ()
576  return
577}
578
579// CHECK-LABEL: omp_simd_nontemporal_list
580func.func @omp_simd_nontemporal_list(%arg0 : index, %arg1 : index,
581                                     %arg2 : index, %arg3 : memref<i32>,
582                                     %arg4 : memref<i64>) -> () {
583  // CHECK: omp.simd nontemporal(%{{.*}}, %{{.*}} : memref<i32>, memref<i64>)
584  "omp.simd"(%arg3, %arg4) ({
585    "omp.loop_nest" (%arg0, %arg1, %arg2) ({
586    ^bb1(%iv2: index):
587      "omp.yield"() : () -> ()
588    }) : (index, index, index) -> ()
589  }) {operandSegmentSizes = array<i32: 0, 0, 0, 0, 2, 0, 0>} : (memref<i32>, memref<i64>) -> ()
590  return
591}
592
593// CHECK-LABEL: omp_simd_nontemporal_single
594func.func @omp_simd_nontemporal_single(%arg0 : index, %arg1 : index,
595                                       %arg2 : index, %arg3 : memref<i32>,
596                                       %arg4 : memref<i64>) -> () {
597  // CHECK: omp.simd nontemporal(%{{.*}} : memref<i32>)
598  "omp.simd"(%arg3) ({
599    "omp.loop_nest" (%arg0, %arg1, %arg2) ({
600    ^bb1(%iv2: index):
601      "omp.yield"() : () -> ()
602    }) : (index, index, index) -> ()
603  }) {operandSegmentSizes = array<i32: 0, 0, 0, 0, 1, 0, 0>} : (memref<i32>) -> ()
604  return
605}
606
607// CHECK-LABEL: omp_simd_pretty
608func.func @omp_simd_pretty(%lb : index, %ub : index, %step : index) -> () {
609  // CHECK: omp.simd {
610  omp.simd {
611    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
612      omp.yield
613    }
614  }
615  return
616}
617
618// CHECK-LABEL:   func.func @omp_simd_pretty_aligned(
619func.func @omp_simd_pretty_aligned(%lb : index, %ub : index, %step : index,
620                                   %data_var : memref<i32>,
621                                   %data_var1 : memref<i32>) -> () {
622  // CHECK:      omp.simd aligned(
623  // CHECK-SAME: %{{.*}} : memref<i32> -> 32 : i64,
624  // CHECK-SAME: %{{.*}} : memref<i32> -> 128 : i64)
625  omp.simd aligned(%data_var :  memref<i32> -> 32, %data_var1 : memref<i32> -> 128) {
626    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
627      omp.yield
628    }
629  }
630  return
631}
632
633// CHECK-LABEL: omp_simd_pretty_if
634func.func @omp_simd_pretty_if(%lb : index, %ub : index, %step : index, %if_cond : i1) -> () {
635  // CHECK: omp.simd if(%{{.*}})
636  omp.simd if(%if_cond) {
637    omp.loop_nest (%iv): index = (%lb) to (%ub) step (%step) {
638      omp.yield
639    }
640  }
641  return
642}
643
644// CHECK-LABEL: func.func @omp_simd_pretty_nontemporal
645func.func @omp_simd_pretty_nontemporal(%lb : index, %ub : index, %step : index,
646                                       %data_var : memref<i32>,
647                                       %data_var1 : memref<i32>) -> () {
648  // CHECK: omp.simd nontemporal(%{{.*}}, %{{.*}} : memref<i32>, memref<i32>)
649  omp.simd nontemporal(%data_var, %data_var1 : memref<i32>, memref<i32>) {
650    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
651      omp.yield
652    }
653  }
654  return
655}
656
657// CHECK-LABEL: omp_simd_pretty_order
658func.func @omp_simd_pretty_order(%lb : index, %ub : index, %step : index) -> () {
659  // CHECK: omp.simd order(concurrent)
660  omp.simd order(concurrent) {
661    omp.loop_nest (%iv): index = (%lb) to (%ub) step (%step) {
662      omp.yield
663    }
664  }
665  // CHECK: omp.simd order(reproducible:concurrent)
666  omp.simd order(reproducible:concurrent) {
667    omp.loop_nest (%iv): index = (%lb) to (%ub) step (%step) {
668      omp.yield
669    }
670  }
671  // CHECK: omp.simd order(unconstrained:concurrent)
672  omp.simd order(unconstrained:concurrent) {
673    omp.loop_nest (%iv): index = (%lb) to (%ub) step (%step) {
674      omp.yield
675    }
676  }
677  return
678}
679
680// CHECK-LABEL: omp_simd_pretty_simdlen
681func.func @omp_simd_pretty_simdlen(%lb : index, %ub : index, %step : index) -> () {
682  // CHECK: omp.simd simdlen(2)
683  omp.simd simdlen(2) {
684    omp.loop_nest (%iv): index = (%lb) to (%ub) step (%step) {
685      omp.yield
686    }
687  }
688  return
689}
690
691// CHECK-LABEL: omp_simd_pretty_safelen
692func.func @omp_simd_pretty_safelen(%lb : index, %ub : index, %step : index) -> () {
693  // CHECK: omp.simd safelen(2)
694  omp.simd safelen(2) {
695    omp.loop_nest (%iv): index = (%lb) to (%ub) step (%step) {
696      omp.yield
697    }
698  }
699  return
700}
701
702// CHECK-LABEL: omp_distribute
703func.func @omp_distribute(%chunk_size : i32, %data_var : memref<i32>, %arg0 : i32) -> () {
704  // CHECK: omp.distribute
705  "omp.distribute" () ({
706    "omp.loop_nest" (%arg0, %arg0, %arg0) ({
707    ^bb0(%iv: i32):
708      "omp.yield"() : () -> ()
709    }) : (i32, i32, i32) -> ()
710  }) {} : () -> ()
711  // CHECK: omp.distribute
712  omp.distribute {
713    omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
714      omp.yield
715    }
716  }
717  // CHECK: omp.distribute dist_schedule_static
718  omp.distribute dist_schedule_static {
719    omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
720      omp.yield
721    }
722  }
723  // CHECK: omp.distribute dist_schedule_static dist_schedule_chunk_size(%{{.+}} : i32)
724  omp.distribute dist_schedule_static dist_schedule_chunk_size(%chunk_size : i32) {
725    omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
726      omp.yield
727    }
728  }
729  // CHECK: omp.distribute order(concurrent)
730  omp.distribute order(concurrent) {
731    omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
732      omp.yield
733    }
734  }
735  // CHECK: omp.distribute order(reproducible:concurrent)
736  omp.distribute order(reproducible:concurrent) {
737    omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
738      omp.yield
739    }
740  }
741  // CHECK: omp.distribute order(unconstrained:concurrent)
742  omp.distribute order(unconstrained:concurrent) {
743    omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
744      omp.yield
745    }
746  }
747  // CHECK: omp.distribute allocate(%{{.+}} : memref<i32> -> %{{.+}} : memref<i32>)
748  omp.distribute allocate(%data_var : memref<i32> -> %data_var : memref<i32>) {
749    omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
750      omp.yield
751    }
752  }
753  // CHECK: omp.distribute
754  omp.distribute {
755    omp.simd {
756      omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
757        omp.yield
758      }
759    } {omp.composite}
760  } {omp.composite}
761  return
762}
763
764
765// CHECK-LABEL: omp_target
766func.func @omp_target(%if_cond : i1, %device : si32,  %num_threads : i32, %device_ptr: memref<i32>, %device_addr: memref<?xi32>, %map1: memref<?xi32>, %map2: memref<?xi32>) -> () {
767
768    // Test with optional operands; if_expr, device, thread_limit, private, firstprivate and nowait.
769    // CHECK: omp.target device({{.*}}) if({{.*}}) nowait thread_limit({{.*}})
770    "omp.target"(%device, %if_cond, %num_threads) ({
771       // CHECK: omp.terminator
772       omp.terminator
773    }) {nowait, operandSegmentSizes = array<i32: 0,0,0,1,0,0,1,0,0,0,0,1>} : ( si32, i1, i32 ) -> ()
774
775    // Test with optional map clause.
776    // CHECK: %[[MAP_A:.*]] = omp.map.info var_ptr(%[[VAL_1:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(tofrom) capture(ByRef) -> memref<?xi32> {name = ""}
777    // CHECK: %[[MAP_B:.*]] = omp.map.info var_ptr(%[[VAL_2:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
778    // CHECK: omp.target has_device_addr(%[[VAL_5:.*]] : memref<?xi32>) is_device_ptr(%[[VAL_4:.*]] : memref<i32>) map_entries(%[[MAP_A]] -> {{.*}}, %[[MAP_B]] -> {{.*}} : memref<?xi32>, memref<?xi32>) {
779    %mapv1 = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>)   map_clauses(tofrom) capture(ByRef) -> memref<?xi32> {name = ""}
780    %mapv2 = omp.map.info var_ptr(%map2 : memref<?xi32>, tensor<?xi32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
781    omp.target is_device_ptr(%device_ptr : memref<i32>) has_device_addr(%device_addr : memref<?xi32>) map_entries(%mapv1 -> %arg0, %mapv2 -> %arg1 : memref<?xi32>, memref<?xi32>) {
782      omp.terminator
783    }
784    // CHECK: %[[MAP_C:.*]] = omp.map.info var_ptr(%[[VAL_1:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(to) capture(ByRef) -> memref<?xi32> {name = ""}
785    // CHECK: %[[MAP_D:.*]] = omp.map.info var_ptr(%[[VAL_2:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(always, from) capture(ByRef) -> memref<?xi32> {name = ""}
786    // CHECK: omp.target map_entries(%[[MAP_C]] -> {{.*}}, %[[MAP_D]] -> {{.*}} : memref<?xi32>, memref<?xi32>) {
787    %mapv3 = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>)   map_clauses(to) capture(ByRef) -> memref<?xi32> {name = ""}
788    %mapv4 = omp.map.info var_ptr(%map2 : memref<?xi32>, tensor<?xi32>)   map_clauses(always, from) capture(ByRef) -> memref<?xi32> {name = ""}
789    omp.target map_entries(%mapv3 -> %arg0, %mapv4 -> %arg1 : memref<?xi32>, memref<?xi32>) {
790      omp.terminator
791    }
792    // CHECK: omp.barrier
793    omp.barrier
794
795    return
796}
797
798func.func @omp_target_data (%if_cond : i1, %device : si32, %device_ptr: memref<i32>, %device_addr: memref<?xi32>, %map1: memref<?xi32>, %map2: memref<?xi32>) -> () {
799    // CHECK: %[[MAP_A:.*]] = omp.map.info var_ptr(%[[VAL_2:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(always, from) capture(ByRef) -> memref<?xi32> {name = ""}
800    // CHECK: omp.target_data device(%[[VAL_1:.*]] : si32) if(%[[VAL_0:.*]]) map_entries(%[[MAP_A]] : memref<?xi32>)
801    %mapv1 = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>)   map_clauses(always, from) capture(ByRef) -> memref<?xi32> {name = ""}
802    omp.target_data if(%if_cond) device(%device : si32) map_entries(%mapv1 : memref<?xi32>){}
803
804    // CHECK: %[[MAP_A:.*]] = omp.map.info var_ptr(%[[VAL_2:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(close, present, to) capture(ByRef) -> memref<?xi32> {name = ""}
805    // CHECK: omp.target_data map_entries(%[[MAP_A]] : memref<?xi32>) use_device_addr(%[[VAL_3:.*]] -> %{{.*}} : memref<?xi32>) use_device_ptr(%[[VAL_4:.*]] -> %{{.*}} : memref<i32>)
806    %mapv2 = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>)   map_clauses(close, present, to) capture(ByRef) -> memref<?xi32> {name = ""}
807    omp.target_data map_entries(%mapv2 : memref<?xi32>) use_device_addr(%device_addr -> %arg0 : memref<?xi32>) use_device_ptr(%device_ptr -> %arg1 : memref<i32>) {
808      omp.terminator
809    }
810
811    // CHECK: %[[MAP_A:.*]] = omp.map.info var_ptr(%[[VAL_1:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(tofrom) capture(ByRef) -> memref<?xi32> {name = ""}
812    // CHECK: %[[MAP_B:.*]] = omp.map.info var_ptr(%[[VAL_2:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
813    // CHECK: omp.target_data map_entries(%[[MAP_A]], %[[MAP_B]] : memref<?xi32>, memref<?xi32>)
814    %mapv3 = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>)   map_clauses(tofrom) capture(ByRef) -> memref<?xi32> {name = ""}
815    %mapv4 = omp.map.info var_ptr(%map2 : memref<?xi32>, tensor<?xi32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
816    omp.target_data map_entries(%mapv3, %mapv4 : memref<?xi32>, memref<?xi32>) {}
817
818    // CHECK: %[[MAP_A:.*]] = omp.map.info var_ptr(%[[VAL_3:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
819    // CHECK: omp.target_enter_data device(%[[VAL_1:.*]] : si32) if(%[[VAL_0:.*]]) map_entries(%[[MAP_A]] : memref<?xi32>) nowait
820    %mapv5 = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
821    omp.target_enter_data if(%if_cond) device(%device : si32) nowait map_entries(%mapv5 : memref<?xi32>)
822
823    // CHECK: %[[MAP_A:.*]] = omp.map.info var_ptr(%[[VAL_3:.*]] : memref<?xi32>, tensor<?xi32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
824    // CHECK: omp.target_exit_data device(%[[VAL_1:.*]] : si32) if(%[[VAL_0:.*]]) map_entries(%[[MAP_A]] : memref<?xi32>) nowait
825    %mapv6 = omp.map.info var_ptr(%map2 : memref<?xi32>, tensor<?xi32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
826    omp.target_exit_data if(%if_cond) device(%device : si32) nowait map_entries(%mapv6 : memref<?xi32>)
827
828    return
829}
830
831// CHECK-LABEL: omp_target_pretty
832func.func @omp_target_pretty(%if_cond : i1, %device : si32,  %num_threads : i32) -> () {
833    // CHECK: omp.target device({{.*}}) if({{.*}})
834    omp.target if(%if_cond) device(%device : si32) {
835      omp.terminator
836    }
837
838    // CHECK: omp.target device({{.*}}) if({{.*}}) nowait
839    omp.target if(%if_cond) device(%device : si32) thread_limit(%num_threads : i32) nowait {
840      omp.terminator
841    }
842
843    return
844}
845
846// CHECK: omp.declare_reduction
847// CHECK-LABEL: @add_f32
848// CHECK: : f32
849// CHECK: init
850// CHECK: ^{{.+}}(%{{.+}}: f32):
851// CHECK:   omp.yield
852// CHECK: combiner
853// CHECK: ^{{.+}}(%{{.+}}: f32, %{{.+}}: f32):
854// CHECK:   omp.yield
855// CHECK: atomic
856// CHECK: ^{{.+}}(%{{.+}}: !llvm.ptr, %{{.+}}: !llvm.ptr):
857// CHECK:  omp.yield
858// CHECK: cleanup
859// CHECK:  omp.yield
860omp.declare_reduction @add_f32 : f32
861init {
862^bb0(%arg: f32):
863  %0 = arith.constant 0.0 : f32
864  omp.yield (%0 : f32)
865}
866combiner {
867^bb1(%arg0: f32, %arg1: f32):
868  %1 = arith.addf %arg0, %arg1 : f32
869  omp.yield (%1 : f32)
870}
871atomic {
872^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
873  %2 = llvm.load %arg3 : !llvm.ptr -> f32
874  llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
875  omp.yield
876}
877cleanup {
878^bb0(%arg: f32):
879  omp.yield
880}
881
882// CHECK-LABEL: func @wsloop_reduction
883func.func @wsloop_reduction(%lb : index, %ub : index, %step : index) {
884  %c1 = arith.constant 1 : i32
885  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
886  // CHECK: reduction(@add_f32 %{{.+}} -> %[[PRV:.+]] : !llvm.ptr)
887  omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr) {
888    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
889      // CHECK: %[[CST:.+]] = arith.constant 2.0{{.*}} : f32
890      %cst = arith.constant 2.0 : f32
891      // CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> f32
892      %lprv = llvm.load %prv : !llvm.ptr -> f32
893      // CHECK: %[[RES:.+]] = llvm.fadd %[[LPRV]], %[[CST]] : f32
894      %res = llvm.fadd %lprv, %cst: f32
895      // CHECK: llvm.store %[[RES]], %[[PRV]] :  f32, !llvm.ptr
896      llvm.store %res, %prv :  f32, !llvm.ptr
897      omp.yield
898    }
899  }
900  return
901}
902
903// CHECK-LABEL: func @wsloop_inscan_reduction
904func.func @wsloop_inscan_reduction(%lb : index, %ub : index, %step : index) {
905  %c1 = arith.constant 1 : i32
906  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
907  // CHECK: reduction(mod: inscan, @add_f32 %{{.+}} -> %[[PRV:.+]] : !llvm.ptr)
908  omp.wsloop reduction(mod:inscan, @add_f32 %0 -> %prv : !llvm.ptr) {
909    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
910       // CHECK: omp.scan inclusive(%{{.*}} : !llvm.ptr)
911       omp.scan inclusive(%prv : !llvm.ptr)
912       omp.yield
913    }
914  }
915  // CHECK: reduction(mod: inscan, @add_f32 %{{.+}} -> %[[PRV:.+]] : !llvm.ptr)
916  omp.wsloop reduction(mod:inscan, @add_f32 %0 -> %prv : !llvm.ptr) {
917    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
918       // CHECK: omp.scan exclusive(%{{.*}} : !llvm.ptr)
919       omp.scan exclusive(%prv : !llvm.ptr)
920       omp.yield
921    }
922  }
923  return
924}
925
926// CHECK-LABEL: func @wsloop_reduction_byref
927func.func @wsloop_reduction_byref(%lb : index, %ub : index, %step : index) {
928  %c1 = arith.constant 1 : i32
929  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
930  // CHECK: reduction(byref @add_f32 %{{.+}} -> %[[PRV:.+]] : !llvm.ptr)
931  omp.wsloop reduction(byref @add_f32 %0 -> %prv : !llvm.ptr) {
932    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
933      // CHECK: %[[CST:.+]] = arith.constant 2.0{{.*}} : f32
934      %cst = arith.constant 2.0 : f32
935      // CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> f32
936      %lprv = llvm.load %prv : !llvm.ptr -> f32
937      // CHECK: %[[RES:.+]] = llvm.fadd %[[LPRV]], %[[CST]] : f32
938      %res = llvm.fadd %lprv, %cst: f32
939      // CHECK: llvm.store %[[RES]], %[[PRV]] :  f32, !llvm.ptr
940      llvm.store %res, %prv :  f32, !llvm.ptr
941      omp.yield
942    }
943  }
944  return
945}
946
947// CHECK-LABEL: func @parallel_reduction
948func.func @parallel_reduction() {
949  %c1 = arith.constant 1 : i32
950  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
951  // CHECK: omp.parallel reduction(@add_f32 {{.+}} -> {{.+}} : !llvm.ptr)
952  omp.parallel reduction(@add_f32 %0 -> %prv : !llvm.ptr) {
953    %1 = arith.constant 2.0 : f32
954    %2 = llvm.load %prv : !llvm.ptr -> f32
955    // CHECK: llvm.fadd %{{.*}}, %{{.*}} : f32
956    %3 = llvm.fadd %1, %2 : f32
957    llvm.store %3, %prv : f32, !llvm.ptr
958    omp.terminator
959  }
960  return
961}
962
963// CHECK-LABEL: func @parallel_reduction_byref
964func.func @parallel_reduction_byref() {
965  %c1 = arith.constant 1 : i32
966  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
967  // CHECK: omp.parallel reduction(byref @add_f32 {{.+}} -> {{.+}} : !llvm.ptr)
968  omp.parallel reduction(byref @add_f32 %0 -> %prv : !llvm.ptr) {
969    %1 = arith.constant 2.0 : f32
970    %2 = llvm.load %prv : !llvm.ptr -> f32
971    // CHECK: llvm.fadd %{{.*}}, %{{.*}} : f32
972    %3 = llvm.fadd %1, %2 : f32
973    llvm.store %3, %prv : f32, !llvm.ptr
974    omp.terminator
975  }
976  return
977}
978
979// CHECK: func @parallel_wsloop_reduction
980func.func @parallel_wsloop_reduction(%lb : index, %ub : index, %step : index) {
981  %c1 = arith.constant 1 : i32
982  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
983  // CHECK: omp.parallel {
984  omp.parallel {
985    // CHECK: omp.wsloop reduction(@add_f32 %{{.*}} -> %{{.+}} : !llvm.ptr) {
986    omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr) {
987      // CHECK: omp.loop_nest (%{{.+}}) : index = (%{{.+}}) to (%{{.+}}) step (%{{.+}}) {
988      omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
989        %1 = arith.constant 2.0 : f32
990        %2 = llvm.load %prv : !llvm.ptr -> f32
991        // CHECK: llvm.fadd %{{.+}}, %{{.+}} : f32
992        llvm.fadd %1, %2 : f32
993        // CHECK: omp.yield
994        omp.yield
995      }
996    }
997    // CHECK: omp.terminator
998    omp.terminator
999  }
1000  return
1001}
1002
1003// CHECK-LABEL: omp_teams
1004func.func @omp_teams(%lb : i32, %ub : i32, %if_cond : i1, %num_threads : i32,
1005                     %data_var : memref<i32>) -> () {
1006  // Test nesting inside of omp.target
1007  omp.target {
1008    // CHECK: omp.teams
1009    omp.teams {
1010      // CHECK: omp.terminator
1011      omp.terminator
1012    }
1013    // CHECK: omp.terminator
1014    omp.terminator
1015  }
1016
1017  // CHECK: omp.teams
1018  omp.teams {
1019    %0 = arith.constant 1 : i32
1020    // CHECK: omp.terminator
1021    omp.terminator
1022  }
1023
1024  // Test num teams.
1025  // CHECK: omp.teams num_teams(%{{.+}} : i32 to %{{.+}} : i32)
1026  omp.teams num_teams(%lb : i32 to %ub : i32) {
1027    // CHECK: omp.terminator
1028    omp.terminator
1029  }
1030
1031  // CHECK: omp.teams num_teams( to %{{.+}} : i32)
1032  omp.teams num_teams(to %ub : i32) {
1033    // CHECK: omp.terminator
1034    omp.terminator
1035  }
1036
1037  // Test if.
1038  // CHECK: omp.teams if(%{{.+}})
1039  omp.teams if(%if_cond) {
1040    // CHECK: omp.terminator
1041    omp.terminator
1042  }
1043
1044  // Test thread limit.
1045  // CHECK: omp.teams thread_limit(%{{.+}} : i32)
1046  omp.teams thread_limit(%num_threads : i32) {
1047    // CHECK: omp.terminator
1048    omp.terminator
1049  }
1050
1051  // Test reduction.
1052  %c1 = arith.constant 1 : i32
1053  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
1054  // CHECK: omp.teams reduction(@add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr) {
1055  omp.teams reduction(@add_f32 %0 -> %arg0 : !llvm.ptr) {
1056    %1 = arith.constant 2.0 : f32
1057    // CHECK: omp.terminator
1058    omp.terminator
1059  }
1060
1061  // Test reduction byref
1062  // CHECK: omp.teams reduction(byref @add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr) {
1063  omp.teams reduction(byref @add_f32 %0 -> %arg0 : !llvm.ptr) {
1064    %1 = arith.constant 2.0 : f32
1065    // CHECK: omp.terminator
1066    omp.terminator
1067  }
1068
1069  // Test allocate.
1070  // CHECK: omp.teams allocate(%{{.+}} : memref<i32> -> %{{.+}} : memref<i32>)
1071  omp.teams allocate(%data_var : memref<i32> -> %data_var : memref<i32>) {
1072    // CHECK: omp.terminator
1073    omp.terminator
1074  }
1075
1076  return
1077}
1078
1079// CHECK-LABEL: func @sections_reduction
1080func.func @sections_reduction() {
1081  %c1 = arith.constant 1 : i32
1082  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
1083  // CHECK: omp.sections reduction(@add_f32 %{{.+}} -> {{.+}} : !llvm.ptr)
1084  omp.sections reduction(@add_f32 %0 -> %arg0 : !llvm.ptr) {
1085    // CHECK: omp.section
1086    omp.section {
1087    ^bb0(%arg1 : !llvm.ptr):
1088      %1 = arith.constant 2.0 : f32
1089      omp.terminator
1090    }
1091    // CHECK: omp.section
1092    omp.section {
1093    ^bb0(%arg1 : !llvm.ptr):
1094      %1 = arith.constant 3.0 : f32
1095      omp.terminator
1096    }
1097    omp.terminator
1098  }
1099  return
1100}
1101
1102// CHECK-LABEL: func @sections_reduction_byref
1103func.func @sections_reduction_byref() {
1104  %c1 = arith.constant 1 : i32
1105  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
1106  // CHECK: omp.sections reduction(byref @add_f32 %{{.+}} -> {{.+}} : !llvm.ptr)
1107  omp.sections reduction(byref @add_f32 %0 -> %arg0 : !llvm.ptr) {
1108    // CHECK: omp.section
1109    omp.section {
1110    ^bb0(%arg1 : !llvm.ptr):
1111      %1 = arith.constant 2.0 : f32
1112      omp.terminator
1113    }
1114    // CHECK: omp.section
1115    omp.section {
1116    ^bb0(%arg1 : !llvm.ptr):
1117      %1 = arith.constant 3.0 : f32
1118      omp.terminator
1119    }
1120    omp.terminator
1121  }
1122  return
1123}
1124
1125// CHECK: omp.declare_reduction
1126// CHECK-LABEL: @add2_f32
1127omp.declare_reduction @add2_f32 : f32
1128// CHECK: init
1129init {
1130^bb0(%arg: f32):
1131  %0 = arith.constant 0.0 : f32
1132  omp.yield (%0 : f32)
1133}
1134// CHECK: combiner
1135combiner {
1136^bb1(%arg0: f32, %arg1: f32):
1137  %1 = arith.addf %arg0, %arg1 : f32
1138  omp.yield (%1 : f32)
1139}
1140// CHECK-NOT: atomic
1141// CHECK-NOT: cleanup
1142
1143// CHECK-LABEL: func @wsloop_reduction2
1144func.func @wsloop_reduction2(%lb : index, %ub : index, %step : index) {
1145  %0 = memref.alloca() : memref<1xf32>
1146  // CHECK: omp.wsloop reduction(@add2_f32 %{{.+}} -> %{{.+}} : memref<1xf32>) {
1147  omp.wsloop reduction(@add2_f32 %0 -> %prv : memref<1xf32>) {
1148    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
1149      %1 = arith.constant 2.0 : f32
1150      %2 = arith.constant 0 : index
1151      %3 = memref.load %prv[%2] : memref<1xf32>
1152      // CHECK: llvm.fadd
1153      %4 = llvm.fadd %1, %3 : f32
1154      memref.store %4, %prv[%2] : memref<1xf32>
1155      omp.yield
1156    }
1157  }
1158  return
1159}
1160
1161// CHECK-LABEL: func @parallel_reduction2
1162func.func @parallel_reduction2() {
1163  %0 = memref.alloca() : memref<1xf32>
1164  // CHECK: omp.parallel reduction(@add2_f32 %{{.+}} -> %{{.+}} : memref<1xf32>)
1165  omp.parallel reduction(@add2_f32 %0 -> %prv : memref<1xf32>) {
1166    %1 = arith.constant 2.0 : f32
1167    %2 = arith.constant 0 : index
1168    %3 = memref.load %prv[%2] : memref<1xf32>
1169    // CHECK: llvm.fadd
1170    %4 = llvm.fadd %1, %3 : f32
1171    memref.store %4, %prv[%2] : memref<1xf32>
1172    omp.terminator
1173  }
1174  return
1175}
1176
1177// CHECK: func @parallel_wsloop_reduction2
1178func.func @parallel_wsloop_reduction2(%lb : index, %ub : index, %step : index) {
1179  %c1 = arith.constant 1 : i32
1180  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
1181  // CHECK: omp.parallel {
1182  omp.parallel {
1183    // CHECK: omp.wsloop reduction(@add2_f32 %{{.*}} -> %{{.+}} : !llvm.ptr) {
1184    omp.wsloop reduction(@add2_f32 %0 -> %prv : !llvm.ptr) {
1185      // CHECK: omp.loop_nest (%{{.+}}) : index = (%{{.+}}) to (%{{.+}}) step (%{{.+}}) {
1186      omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
1187        %1 = arith.constant 2.0 : f32
1188        %2 = llvm.load %prv : !llvm.ptr -> f32
1189        // CHECK: llvm.fadd %{{.+}}, %{{.+}} : f32
1190        %3 = llvm.fadd %1, %2 : f32
1191        // CHECK: omp.yield
1192        omp.yield
1193      }
1194    }
1195    // CHECK: omp.terminator
1196    omp.terminator
1197  }
1198  return
1199}
1200
1201// CHECK-LABEL: func @sections_reduction2
1202func.func @sections_reduction2() {
1203  %0 = memref.alloca() : memref<1xf32>
1204  // CHECK: omp.sections reduction(@add2_f32 %{{.+}} -> %{{.+}} : memref<1xf32>)
1205  omp.sections reduction(@add2_f32 %0 -> %arg0 : memref<1xf32>) {
1206    omp.section {
1207    ^bb0(%arg1 : !llvm.ptr):
1208      %1 = arith.constant 2.0 : f32
1209      omp.terminator
1210    }
1211    omp.section {
1212    ^bb0(%arg1 : !llvm.ptr):
1213      %1 = arith.constant 2.0 : f32
1214      omp.terminator
1215    }
1216    omp.terminator
1217  }
1218  return
1219}
1220
1221// CHECK: omp.critical.declare @mutex1 hint(uncontended)
1222omp.critical.declare @mutex1 hint(uncontended)
1223// CHECK: omp.critical.declare @mutex2 hint(contended)
1224omp.critical.declare @mutex2 hint(contended)
1225// CHECK: omp.critical.declare @mutex3 hint(nonspeculative)
1226omp.critical.declare @mutex3 hint(nonspeculative)
1227// CHECK: omp.critical.declare @mutex4 hint(speculative)
1228omp.critical.declare @mutex4 hint(speculative)
1229// CHECK: omp.critical.declare @mutex5 hint(uncontended, nonspeculative)
1230omp.critical.declare @mutex5 hint(uncontended, nonspeculative)
1231// CHECK: omp.critical.declare @mutex6 hint(contended, nonspeculative)
1232omp.critical.declare @mutex6 hint(contended, nonspeculative)
1233// CHECK: omp.critical.declare @mutex7 hint(uncontended, speculative)
1234omp.critical.declare @mutex7 hint(uncontended, speculative)
1235// CHECK: omp.critical.declare @mutex8 hint(contended, speculative)
1236omp.critical.declare @mutex8 hint(contended, speculative)
1237// CHECK: omp.critical.declare @mutex9
1238omp.critical.declare @mutex9 hint(none)
1239// CHECK: omp.critical.declare @mutex10
1240omp.critical.declare @mutex10
1241
1242
1243// CHECK-LABEL: omp_critical
1244func.func @omp_critical() -> () {
1245  // CHECK: omp.critical
1246  omp.critical {
1247    omp.terminator
1248  }
1249
1250  // CHECK: omp.critical(@{{.*}})
1251  omp.critical(@mutex1) {
1252    omp.terminator
1253  }
1254  return
1255}
1256
1257func.func @omp_ordered(%arg1 : i32, %arg2 : i32, %arg3 : i32,
1258    %vec0 : i64, %vec1 : i64, %vec2 : i64, %vec3 : i64) -> () {
1259  // CHECK: omp.ordered.region
1260  omp.ordered.region {
1261    // CHECK: omp.terminator
1262    omp.terminator
1263  }
1264
1265  omp.wsloop ordered(0) {
1266    omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3)  {
1267      // CHECK: omp.ordered.region
1268      omp.ordered.region {
1269        // CHECK: omp.terminator
1270        omp.terminator
1271      }
1272      omp.yield
1273    }
1274  }
1275
1276  omp.wsloop ordered(1) {
1277    omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) {
1278      // Only one DEPEND(SINK: vec) clause
1279      // CHECK: omp.ordered depend_type(dependsink) depend_vec(%{{.*}} : i64) {doacross_num_loops = 1 : i64}
1280      omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {doacross_num_loops = 1 : i64}
1281
1282      // CHECK: omp.ordered depend_type(dependsource) depend_vec(%{{.*}} : i64) {doacross_num_loops = 1 : i64}
1283      omp.ordered depend_type(dependsource) depend_vec(%vec0 : i64) {doacross_num_loops = 1 : i64}
1284
1285      omp.yield
1286    }
1287  }
1288
1289  omp.wsloop ordered(2) {
1290    omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) {
1291      // Multiple DEPEND(SINK: vec) clauses
1292      // CHECK: omp.ordered depend_type(dependsink) depend_vec(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : i64, i64, i64, i64) {doacross_num_loops = 2 : i64}
1293      omp.ordered depend_type(dependsink) depend_vec(%vec0, %vec1, %vec2, %vec3 : i64, i64, i64, i64) {doacross_num_loops = 2 : i64}
1294
1295      // CHECK: omp.ordered depend_type(dependsource) depend_vec(%{{.*}}, %{{.*}} : i64, i64) {doacross_num_loops = 2 : i64}
1296      omp.ordered depend_type(dependsource) depend_vec(%vec0, %vec1 : i64, i64) {doacross_num_loops = 2 : i64}
1297
1298      omp.yield
1299    }
1300  }
1301
1302  return
1303}
1304
1305// CHECK-LABEL: omp_atomic_read
1306// CHECK-SAME: (%[[v:.*]]: memref<i32>, %[[x:.*]]: memref<i32>)
1307func.func @omp_atomic_read(%v: memref<i32>, %x: memref<i32>) {
1308  // CHECK: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1309  omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1310  // CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(seq_cst) : memref<i32>, memref<i32>, i32
1311  omp.atomic.read %v = %x memory_order(seq_cst) : memref<i32>, memref<i32>, i32
1312  // CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(acquire) : memref<i32>, memref<i32>, i32
1313  omp.atomic.read %v = %x memory_order(acquire) : memref<i32>, memref<i32>, i32
1314  // CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(relaxed) : memref<i32>, memref<i32>, i32
1315  omp.atomic.read %v = %x memory_order(relaxed) : memref<i32>, memref<i32>, i32
1316  // CHECK: omp.atomic.read %[[v]] = %[[x]] hint(contended, nonspeculative) : memref<i32>, memref<i32>, i32
1317  omp.atomic.read %v = %x hint(nonspeculative, contended) : memref<i32>, memref<i32>, i32
1318  // CHECK: omp.atomic.read %[[v]] = %[[x]] hint(contended, speculative) memory_order(seq_cst) : memref<i32>, memref<i32>, i32
1319  omp.atomic.read %v = %x hint(speculative, contended) memory_order(seq_cst) : memref<i32>, memref<i32>, i32
1320  // CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(seq_cst) : memref<i32>, memref<i32>, i32
1321  omp.atomic.read %v = %x hint(none) memory_order(seq_cst) : memref<i32>, memref<i32>, i32
1322  return
1323}
1324
1325// CHECK-LABEL: omp_atomic_write
1326// CHECK-SAME: (%[[ADDR:.*]]: memref<i32>, %[[VAL:.*]]: i32)
1327func.func @omp_atomic_write(%addr : memref<i32>, %val : i32) {
1328  // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] : memref<i32>, i32
1329  omp.atomic.write %addr = %val : memref<i32>, i32
1330  // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] memory_order(seq_cst) : memref<i32>, i32
1331  omp.atomic.write %addr = %val memory_order(seq_cst) : memref<i32>, i32
1332  // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] memory_order(release) : memref<i32>, i32
1333  omp.atomic.write %addr = %val memory_order(release) : memref<i32>, i32
1334  // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] memory_order(relaxed) : memref<i32>, i32
1335  omp.atomic.write %addr = %val memory_order(relaxed) : memref<i32>, i32
1336  // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] hint(uncontended, speculative) : memref<i32>, i32
1337  omp.atomic.write %addr = %val hint(speculative, uncontended) : memref<i32>, i32
1338  // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] : memref<i32>, i32
1339  omp.atomic.write %addr = %val hint(none) : memref<i32>, i32
1340  return
1341}
1342
1343// CHECK-LABEL: omp_atomic_update
1344// CHECK-SAME: (%[[X:.*]]: memref<i32>, %[[EXPR:.*]]: i32, %[[XBOOL:.*]]: memref<i1>, %[[EXPRBOOL:.*]]: i1)
1345func.func @omp_atomic_update(%x : memref<i32>, %expr : i32, %xBool : memref<i1>, %exprBool : i1) {
1346  // CHECK: omp.atomic.update %[[X]] : memref<i32>
1347  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1348  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1349  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1350  omp.atomic.update %x : memref<i32> {
1351  ^bb0(%xval: i32):
1352    %newval = llvm.add %xval, %expr : i32
1353    omp.yield(%newval : i32)
1354  }
1355  // CHECK: omp.atomic.update %[[XBOOL]] : memref<i1>
1356  // CHECK-NEXT: (%[[XVAL:.*]]: i1):
1357  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.and %[[XVAL]], %[[EXPRBOOL]] : i1
1358  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i1)
1359  omp.atomic.update %xBool : memref<i1> {
1360  ^bb0(%xval: i1):
1361    %newval = llvm.and %xval, %exprBool : i1
1362    omp.yield(%newval : i1)
1363  }
1364  // CHECK: omp.atomic.update %[[X]] : memref<i32>
1365  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1366  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.shl %[[XVAL]], %[[EXPR]] : i32
1367  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1368  // CHECK-NEXT: }
1369  omp.atomic.update %x : memref<i32> {
1370  ^bb0(%xval: i32):
1371    %newval = llvm.shl %xval, %expr : i32
1372    omp.yield(%newval : i32)
1373  }
1374  // CHECK: omp.atomic.update %[[X]] : memref<i32>
1375  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1376  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.intr.smax(%[[XVAL]], %[[EXPR]]) : (i32, i32) -> i32
1377  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1378  // CHECK-NEXT: }
1379  omp.atomic.update %x : memref<i32> {
1380  ^bb0(%xval: i32):
1381    %newval = llvm.intr.smax(%xval, %expr) : (i32, i32) -> i32
1382    omp.yield(%newval : i32)
1383  }
1384
1385  // CHECK: omp.atomic.update %[[XBOOL]] : memref<i1>
1386  // CHECK-NEXT: (%[[XVAL:.*]]: i1):
1387  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.icmp "eq" %[[XVAL]], %[[EXPRBOOL]] : i1
1388  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i1)
1389  // }
1390  omp.atomic.update %xBool : memref<i1> {
1391  ^bb0(%xval: i1):
1392    %newval = llvm.icmp "eq" %xval, %exprBool : i1
1393    omp.yield(%newval : i1)
1394  }
1395
1396  // CHECK: omp.atomic.update %[[X]] : memref<i32> {
1397  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1398  // CHECK-NEXT:   omp.yield(%[[XVAL]] : i32)
1399  // CHECK-NEXT: }
1400  omp.atomic.update %x : memref<i32> {
1401  ^bb0(%xval:i32):
1402    omp.yield(%xval:i32)
1403  }
1404
1405  // CHECK: omp.atomic.update %[[X]] : memref<i32> {
1406  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1407  // CHECK-NEXT:   omp.yield(%{{.+}} : i32)
1408  // CHECK-NEXT: }
1409  %const = arith.constant 42 : i32
1410  omp.atomic.update %x : memref<i32> {
1411  ^bb0(%xval:i32):
1412    omp.yield(%const:i32)
1413  }
1414
1415  // CHECK: omp.atomic.update %[[X]] : memref<i32>
1416  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1417  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1418  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1419  omp.atomic.update hint(none) %x : memref<i32> {
1420  ^bb0(%xval: i32):
1421    %newval = llvm.add %xval, %expr : i32
1422    omp.yield(%newval : i32)
1423  }
1424
1425  // CHECK: omp.atomic.update hint(uncontended) %[[X]] : memref<i32>
1426  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1427  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1428  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1429  omp.atomic.update hint(uncontended) %x : memref<i32> {
1430  ^bb0(%xval: i32):
1431    %newval = llvm.add %xval, %expr : i32
1432    omp.yield(%newval : i32)
1433  }
1434
1435  // CHECK: omp.atomic.update hint(contended) %[[X]] : memref<i32>
1436  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1437  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1438  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1439  omp.atomic.update hint(contended) %x : memref<i32> {
1440  ^bb0(%xval: i32):
1441    %newval = llvm.add %xval, %expr : i32
1442    omp.yield(%newval : i32)
1443  }
1444
1445  // CHECK: omp.atomic.update hint(nonspeculative) %[[X]] : memref<i32>
1446  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1447  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1448  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1449  omp.atomic.update hint(nonspeculative) %x : memref<i32> {
1450  ^bb0(%xval: i32):
1451    %newval = llvm.add %xval, %expr : i32
1452    omp.yield(%newval : i32)
1453  }
1454
1455  // CHECK: omp.atomic.update hint(speculative) %[[X]] : memref<i32>
1456  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1457  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1458  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1459  omp.atomic.update hint(speculative) %x : memref<i32> {
1460  ^bb0(%xval: i32):
1461    %newval = llvm.add %xval, %expr : i32
1462    omp.yield(%newval : i32)
1463  }
1464
1465  // CHECK: omp.atomic.update hint(uncontended, nonspeculative) %[[X]] : memref<i32>
1466  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1467  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1468  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1469  omp.atomic.update hint(uncontended, nonspeculative) %x : memref<i32> {
1470  ^bb0(%xval: i32):
1471    %newval = llvm.add %xval, %expr : i32
1472    omp.yield(%newval : i32)
1473  }
1474
1475  // CHECK: omp.atomic.update hint(contended, nonspeculative) %[[X]] : memref<i32>
1476  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1477  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1478  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1479  omp.atomic.update hint(contended, nonspeculative) %x : memref<i32> {
1480  ^bb0(%xval: i32):
1481    %newval = llvm.add %xval, %expr : i32
1482    omp.yield(%newval : i32)
1483  }
1484
1485  // CHECK: omp.atomic.update hint(uncontended, speculative) %[[X]] : memref<i32>
1486  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1487  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1488  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1489  omp.atomic.update hint(uncontended, speculative) %x : memref<i32> {
1490  ^bb0(%xval: i32):
1491    %newval = llvm.add %xval, %expr : i32
1492    omp.yield(%newval : i32)
1493  }
1494
1495  // CHECK: omp.atomic.update hint(contended, speculative) %[[X]] : memref<i32>
1496  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1497  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1498  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1499  omp.atomic.update hint(contended, speculative) %x : memref<i32> {
1500  ^bb0(%xval: i32):
1501    %newval = llvm.add %xval, %expr : i32
1502    omp.yield(%newval : i32)
1503  }
1504
1505  // CHECK: omp.atomic.update memory_order(seq_cst) %[[X]] : memref<i32>
1506  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1507  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1508  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1509  omp.atomic.update memory_order(seq_cst) %x : memref<i32> {
1510  ^bb0(%xval: i32):
1511    %newval = llvm.add %xval, %expr : i32
1512    omp.yield(%newval : i32)
1513  }
1514
1515  // CHECK: omp.atomic.update memory_order(release) %[[X]] : memref<i32>
1516  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1517  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1518  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1519  omp.atomic.update memory_order(release) %x : memref<i32> {
1520  ^bb0(%xval: i32):
1521    %newval = llvm.add %xval, %expr : i32
1522    omp.yield(%newval : i32)
1523  }
1524
1525  // CHECK: omp.atomic.update memory_order(relaxed) %[[X]] : memref<i32>
1526  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1527  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1528  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1529  omp.atomic.update memory_order(relaxed) %x : memref<i32> {
1530  ^bb0(%xval: i32):
1531    %newval = llvm.add %xval, %expr : i32
1532    omp.yield(%newval : i32)
1533  }
1534
1535  // CHECK: omp.atomic.update hint(uncontended, speculative) memory_order(seq_cst) %[[X]] : memref<i32>
1536  // CHECK-NEXT: (%[[XVAL:.*]]: i32):
1537  // CHECK-NEXT:   %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
1538  // CHECK-NEXT:   omp.yield(%[[NEWVAL]] : i32)
1539  omp.atomic.update memory_order(seq_cst) hint(uncontended, speculative) %x : memref<i32> {
1540  ^bb0(%xval: i32):
1541    %newval = llvm.add %xval, %expr : i32
1542    omp.yield(%newval : i32)
1543  }
1544
1545  return
1546}
1547
1548// CHECK-LABEL: omp_atomic_capture
1549// CHECK-SAME: (%[[v:.*]]: memref<i32>, %[[x:.*]]: memref<i32>, %[[expr:.*]]: i32)
1550func.func @omp_atomic_capture(%v: memref<i32>, %x: memref<i32>, %expr: i32) {
1551  // CHECK: omp.atomic.capture {
1552  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1553  // CHECK-NEXT: (%[[xval:.*]]: i32):
1554  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1555  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1556  // CHECK-NEXT: }
1557  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1558  // CHECK-NEXT: }
1559  omp.atomic.capture{
1560    omp.atomic.update %x : memref<i32> {
1561    ^bb0(%xval: i32):
1562      %newval = llvm.add %xval, %expr : i32
1563      omp.yield(%newval : i32)
1564    }
1565    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1566  }
1567  // CHECK: omp.atomic.capture {
1568  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1569  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1570  // CHECK-NEXT: (%[[xval:.*]]: i32):
1571  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1572  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1573  // CHECK-NEXT: }
1574  // CHECK-NEXT: }
1575  omp.atomic.capture{
1576    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1577    omp.atomic.update %x : memref<i32> {
1578    ^bb0(%xval: i32):
1579      %newval = llvm.add %xval, %expr : i32
1580      omp.yield(%newval : i32)
1581    }
1582  }
1583  // CHECK: omp.atomic.capture {
1584  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1585  // CHECK-NEXT: omp.atomic.write %[[x]] = %[[expr]] : memref<i32>, i32
1586  // CHECK-NEXT: }
1587  omp.atomic.capture{
1588    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1589    omp.atomic.write %x = %expr : memref<i32>, i32
1590  }
1591
1592  // CHECK: omp.atomic.capture {
1593  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1594  // CHECK-NEXT: (%[[xval:.*]]: i32):
1595  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1596  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1597  // CHECK-NEXT: }
1598  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1599  // CHECK-NEXT: }
1600  omp.atomic.capture hint(none) {
1601    omp.atomic.update %x : memref<i32> {
1602    ^bb0(%xval: i32):
1603      %newval = llvm.add %xval, %expr : i32
1604      omp.yield(%newval : i32)
1605    }
1606    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1607  }
1608
1609  // CHECK: omp.atomic.capture hint(uncontended) {
1610  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1611  // CHECK-NEXT: (%[[xval:.*]]: i32):
1612  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1613  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1614  // CHECK-NEXT: }
1615  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1616  // CHECK-NEXT: }
1617  omp.atomic.capture hint(uncontended) {
1618    omp.atomic.update %x : memref<i32> {
1619    ^bb0(%xval: i32):
1620      %newval = llvm.add %xval, %expr : i32
1621      omp.yield(%newval : i32)
1622    }
1623    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1624  }
1625
1626  // CHECK: omp.atomic.capture hint(contended) {
1627  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1628  // CHECK-NEXT: (%[[xval:.*]]: i32):
1629  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1630  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1631  // CHECK-NEXT: }
1632  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1633  // CHECK-NEXT: }
1634  omp.atomic.capture hint(contended) {
1635    omp.atomic.update %x : memref<i32> {
1636    ^bb0(%xval: i32):
1637      %newval = llvm.add %xval, %expr : i32
1638      omp.yield(%newval : i32)
1639    }
1640    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1641  }
1642
1643  // CHECK: omp.atomic.capture hint(nonspeculative) {
1644  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1645  // CHECK-NEXT: (%[[xval:.*]]: i32):
1646  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1647  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1648  // CHECK-NEXT: }
1649  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1650  // CHECK-NEXT: }
1651  omp.atomic.capture hint(nonspeculative) {
1652    omp.atomic.update %x : memref<i32> {
1653    ^bb0(%xval: i32):
1654      %newval = llvm.add %xval, %expr : i32
1655      omp.yield(%newval : i32)
1656    }
1657    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1658  }
1659
1660  // CHECK: omp.atomic.capture hint(speculative) {
1661  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1662  // CHECK-NEXT: (%[[xval:.*]]: i32):
1663  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1664  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1665  // CHECK-NEXT: }
1666  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1667  // CHECK-NEXT: }
1668  omp.atomic.capture hint(speculative) {
1669    omp.atomic.update %x : memref<i32> {
1670    ^bb0(%xval: i32):
1671      %newval = llvm.add %xval, %expr : i32
1672      omp.yield(%newval : i32)
1673    }
1674    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1675  }
1676
1677  // CHECK: omp.atomic.capture hint(uncontended, nonspeculative) {
1678  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1679  // CHECK-NEXT: (%[[xval:.*]]: i32):
1680  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1681  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1682  // CHECK-NEXT: }
1683  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1684  // CHECK-NEXT: }
1685  omp.atomic.capture hint(uncontended, nonspeculative) {
1686    omp.atomic.update %x : memref<i32> {
1687    ^bb0(%xval: i32):
1688      %newval = llvm.add %xval, %expr : i32
1689      omp.yield(%newval : i32)
1690    }
1691    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1692  }
1693
1694  // CHECK: omp.atomic.capture hint(contended, nonspeculative) {
1695  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1696  // CHECK-NEXT: (%[[xval:.*]]: i32):
1697  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1698  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1699  // CHECK-NEXT: }
1700  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1701  // CHECK-NEXT: }
1702  omp.atomic.capture hint(contended, nonspeculative) {
1703    omp.atomic.update %x : memref<i32> {
1704    ^bb0(%xval: i32):
1705      %newval = llvm.add %xval, %expr : i32
1706      omp.yield(%newval : i32)
1707    }
1708    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1709  }
1710
1711  // CHECK: omp.atomic.capture hint(uncontended, speculative) {
1712  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1713  // CHECK-NEXT: (%[[xval:.*]]: i32):
1714  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1715  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1716  // CHECK-NEXT: }
1717  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1718  // CHECK-NEXT: }
1719  omp.atomic.capture hint(uncontended, speculative) {
1720    omp.atomic.update %x : memref<i32> {
1721    ^bb0(%xval: i32):
1722      %newval = llvm.add %xval, %expr : i32
1723      omp.yield(%newval : i32)
1724    }
1725    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1726  }
1727
1728  // CHECK: omp.atomic.capture hint(contended, speculative) {
1729  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1730  // CHECK-NEXT: (%[[xval:.*]]: i32):
1731  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1732  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1733  // CHECK-NEXT: }
1734  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1735  // CHECK-NEXT: }
1736  omp.atomic.capture hint(contended, speculative) {
1737    omp.atomic.update %x : memref<i32> {
1738    ^bb0(%xval: i32):
1739      %newval = llvm.add %xval, %expr : i32
1740      omp.yield(%newval : i32)
1741    }
1742    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1743  }
1744
1745  // CHECK: omp.atomic.capture memory_order(seq_cst) {
1746  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1747  // CHECK-NEXT: (%[[xval:.*]]: i32):
1748  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1749  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1750  // CHECK-NEXT: }
1751  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1752  // CHECK-NEXT: }
1753  omp.atomic.capture memory_order(seq_cst) {
1754    omp.atomic.update %x : memref<i32> {
1755    ^bb0(%xval: i32):
1756      %newval = llvm.add %xval, %expr : i32
1757      omp.yield(%newval : i32)
1758    }
1759    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1760  }
1761
1762  // CHECK: omp.atomic.capture memory_order(acq_rel) {
1763  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1764  // CHECK-NEXT: (%[[xval:.*]]: i32):
1765  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1766  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1767  // CHECK-NEXT: }
1768  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1769  // CHECK-NEXT: }
1770  omp.atomic.capture memory_order(acq_rel) {
1771    omp.atomic.update %x : memref<i32> {
1772    ^bb0(%xval: i32):
1773      %newval = llvm.add %xval, %expr : i32
1774      omp.yield(%newval : i32)
1775    }
1776    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1777  }
1778
1779  // CHECK: omp.atomic.capture memory_order(acquire) {
1780  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1781  // CHECK-NEXT: (%[[xval:.*]]: i32):
1782  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1783  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1784  // CHECK-NEXT: }
1785  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1786  // CHECK-NEXT: }
1787  omp.atomic.capture memory_order(acquire) {
1788    omp.atomic.update %x : memref<i32> {
1789    ^bb0(%xval: i32):
1790      %newval = llvm.add %xval, %expr : i32
1791      omp.yield(%newval : i32)
1792    }
1793    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1794  }
1795
1796  // CHECK: omp.atomic.capture memory_order(release) {
1797  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1798  // CHECK-NEXT: (%[[xval:.*]]: i32):
1799  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1800  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1801  // CHECK-NEXT: }
1802  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1803  // CHECK-NEXT: }
1804  omp.atomic.capture memory_order(release) {
1805    omp.atomic.update %x : memref<i32> {
1806    ^bb0(%xval: i32):
1807      %newval = llvm.add %xval, %expr : i32
1808      omp.yield(%newval : i32)
1809    }
1810    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1811  }
1812
1813  // CHECK: omp.atomic.capture memory_order(relaxed) {
1814  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1815  // CHECK-NEXT: (%[[xval:.*]]: i32):
1816  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1817  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1818  // CHECK-NEXT: }
1819  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1820  // CHECK-NEXT: }
1821  omp.atomic.capture memory_order(relaxed) {
1822    omp.atomic.update %x : memref<i32> {
1823    ^bb0(%xval: i32):
1824      %newval = llvm.add %xval, %expr : i32
1825      omp.yield(%newval : i32)
1826    }
1827    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1828  }
1829
1830  // CHECK: omp.atomic.capture hint(contended, speculative) memory_order(seq_cst) {
1831  // CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
1832  // CHECK-NEXT: (%[[xval:.*]]: i32):
1833  // CHECK-NEXT:   %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
1834  // CHECK-NEXT:   omp.yield(%[[newval]] : i32)
1835  // CHECK-NEXT: }
1836  // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>, memref<i32>, i32
1837  // CHECK-NEXT: }
1838  omp.atomic.capture hint(contended, speculative) memory_order(seq_cst) {
1839    omp.atomic.update %x : memref<i32> {
1840    ^bb0(%xval: i32):
1841      %newval = llvm.add %xval, %expr : i32
1842      omp.yield(%newval : i32)
1843    }
1844    omp.atomic.read %v = %x : memref<i32>, memref<i32>, i32
1845  }
1846
1847  return
1848}
1849
1850// CHECK-LABEL: omp_sectionsop
1851func.func @omp_sectionsop(%data_var1 : memref<i32>, %data_var2 : memref<i32>,
1852                     %data_var3 : memref<i32>, %redn_var : !llvm.ptr) {
1853  // CHECK: omp.sections allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>)
1854  "omp.sections" (%data_var1, %data_var1) ({
1855    // CHECK: omp.terminator
1856    omp.terminator
1857  }) {operandSegmentSizes = array<i32: 1,1,0,0>} : (memref<i32>, memref<i32>) -> ()
1858
1859    // CHECK: omp.sections reduction(@add_f32 %{{.*}} -> %{{.*}} : !llvm.ptr)
1860  "omp.sections" (%redn_var) ({
1861  ^bb0(%arg0: !llvm.ptr):
1862    // CHECK: omp.terminator
1863    omp.terminator
1864  }) {operandSegmentSizes = array<i32: 0,0,0,1>, reduction_byref = array<i1: false>, reduction_syms=[@add_f32]} : (!llvm.ptr) -> ()
1865
1866  // CHECK: omp.sections nowait {
1867  omp.sections nowait {
1868    // CHECK: omp.terminator
1869    omp.terminator
1870  }
1871
1872  // CHECK: omp.sections reduction(@add_f32 %{{.*}} -> %{{.*}} : !llvm.ptr) {
1873  omp.sections reduction(@add_f32 %redn_var -> %arg0 : !llvm.ptr) {
1874    // CHECK: omp.terminator
1875    omp.terminator
1876  }
1877
1878  // CHECK: omp.sections allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>)
1879  omp.sections allocate(%data_var1 : memref<i32> -> %data_var1 : memref<i32>) {
1880    // CHECK: omp.terminator
1881    omp.terminator
1882  }
1883
1884  // CHECK: omp.sections nowait
1885  omp.sections nowait {
1886    // CHECK: omp.section
1887    omp.section {
1888      // CHECK: %{{.*}} = "test.payload"() : () -> i32
1889      %1 = "test.payload"() : () -> i32
1890      // CHECK: %{{.*}} = "test.payload"() : () -> i32
1891      %2 = "test.payload"() : () -> i32
1892      // CHECK: %{{.*}} = "test.payload"(%{{.*}}, %{{.*}}) : (i32, i32) -> i32
1893      %3 = "test.payload"(%1, %2) : (i32, i32) -> i32
1894    }
1895    // CHECK: omp.section
1896    omp.section {
1897      // CHECK: %{{.*}} = "test.payload"(%{{.*}}) : (!llvm.ptr) -> i32
1898      %1 = "test.payload"(%redn_var) : (!llvm.ptr) -> i32
1899    }
1900    // CHECK: omp.section
1901    omp.section {
1902      // CHECK: "test.payload"(%{{.*}}) : (!llvm.ptr) -> ()
1903      "test.payload"(%redn_var) : (!llvm.ptr) -> ()
1904    }
1905    // CHECK: omp.terminator
1906    omp.terminator
1907  }
1908  return
1909}
1910
1911// CHECK-LABEL: func @omp_single
1912func.func @omp_single() {
1913  omp.parallel {
1914    // CHECK: omp.single {
1915    omp.single {
1916      "test.payload"() : () -> ()
1917      // CHECK: omp.terminator
1918      omp.terminator
1919    }
1920    // CHECK: omp.terminator
1921    omp.terminator
1922  }
1923  return
1924}
1925
1926// CHECK-LABEL: func @omp_single_nowait
1927func.func @omp_single_nowait() {
1928  omp.parallel {
1929    // CHECK: omp.single nowait {
1930    omp.single nowait {
1931      "test.payload"() : () -> ()
1932      // CHECK: omp.terminator
1933      omp.terminator
1934    }
1935    // CHECK: omp.terminator
1936    omp.terminator
1937  }
1938  return
1939}
1940
1941// CHECK-LABEL: func @omp_single_allocate
1942func.func @omp_single_allocate(%data_var: memref<i32>) {
1943  omp.parallel {
1944    // CHECK: omp.single allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>) {
1945    omp.single allocate(%data_var : memref<i32> -> %data_var : memref<i32>) {
1946      "test.payload"() : () -> ()
1947      // CHECK: omp.terminator
1948      omp.terminator
1949    }
1950    // CHECK: omp.terminator
1951    omp.terminator
1952  }
1953  return
1954}
1955
1956// CHECK-LABEL: func @omp_single_allocate_nowait
1957func.func @omp_single_allocate_nowait(%data_var: memref<i32>) {
1958  omp.parallel {
1959    // CHECK: omp.single allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>) nowait {
1960    omp.single allocate(%data_var : memref<i32> -> %data_var : memref<i32>) nowait {
1961      "test.payload"() : () -> ()
1962      // CHECK: omp.terminator
1963      omp.terminator
1964    }
1965    // CHECK: omp.terminator
1966    omp.terminator
1967  }
1968  return
1969}
1970
1971// CHECK-LABEL: func @omp_single_multiple_blocks
1972func.func @omp_single_multiple_blocks() {
1973  // CHECK: omp.single {
1974  omp.single {
1975    cf.br ^bb2
1976    ^bb2:
1977    // CHECK: omp.terminator
1978    omp.terminator
1979  }
1980  return
1981}
1982
1983func.func private @copy_i32(memref<i32>, memref<i32>)
1984
1985// CHECK-LABEL: func @omp_single_copyprivate
1986func.func @omp_single_copyprivate(%data_var: memref<i32>) {
1987  omp.parallel {
1988    // CHECK: omp.single copyprivate(%{{.*}} -> @copy_i32 : memref<i32>) {
1989    omp.single copyprivate(%data_var -> @copy_i32 : memref<i32>) {
1990      "test.payload"() : () -> ()
1991      // CHECK: omp.terminator
1992      omp.terminator
1993    }
1994    // CHECK: omp.terminator
1995    omp.terminator
1996  }
1997  return
1998}
1999
2000// CHECK-LABEL: @omp_task
2001// CHECK-SAME: (%[[bool_var:.*]]: i1, %[[i64_var:.*]]: i64, %[[i32_var:.*]]: i32, %[[data_var:.*]]: memref<i32>, %[[event_handle:.*]]: !llvm.ptr)
2002func.func @omp_task(%bool_var: i1, %i64_var: i64, %i32_var: i32, %data_var: memref<i32>, %event_handle : !llvm.ptr) {
2003
2004  // Checking simple task
2005  // CHECK: omp.task {
2006  omp.task {
2007    // CHECK: "test.foo"() : () -> ()
2008    "test.foo"() : () -> ()
2009    // CHECK: omp.terminator
2010    omp.terminator
2011  }
2012
2013  // Checking `if` clause
2014  // CHECK: omp.task if(%[[bool_var]]) {
2015  omp.task if(%bool_var) {
2016    // CHECK: "test.foo"() : () -> ()
2017    "test.foo"() : () -> ()
2018    // CHECK: omp.terminator
2019    omp.terminator
2020  }
2021
2022  // Checking `final` clause
2023  // CHECK: omp.task final(%[[bool_var]]) {
2024  omp.task final(%bool_var) {
2025    // CHECK: "test.foo"() : () -> ()
2026    "test.foo"() : () -> ()
2027    // CHECK: omp.terminator
2028    omp.terminator
2029  }
2030
2031  // Checking `untied` clause
2032  // CHECK: omp.task untied {
2033  omp.task untied {
2034    // CHECK: "test.foo"() : () -> ()
2035    "test.foo"() : () -> ()
2036    // CHECK: omp.terminator
2037    omp.terminator
2038  }
2039
2040  // Checking `in_reduction` clause
2041  %c1 = arith.constant 1 : i32
2042  // CHECK: %[[redn_var1:.*]] = llvm.alloca %{{.*}} x f32 : (i32) -> !llvm.ptr
2043  %0 = llvm.alloca %c1 x f32 : (i32) -> !llvm.ptr
2044  // CHECK: %[[redn_var2:.*]] = llvm.alloca %{{.*}} x f32 : (i32) -> !llvm.ptr
2045  %1 = llvm.alloca %c1 x f32 : (i32) -> !llvm.ptr
2046  // CHECK: omp.task in_reduction(@add_f32 %[[redn_var1]] -> %{{.+}}, @add_f32 %[[redn_var2]] -> %{{.+}} : !llvm.ptr, !llvm.ptr) {
2047  omp.task in_reduction(@add_f32 %0 -> %arg0, @add_f32 %1 -> %arg1 : !llvm.ptr, !llvm.ptr) {
2048    // CHECK: "test.foo"() : () -> ()
2049    "test.foo"() : () -> ()
2050    // CHECK: omp.terminator
2051    omp.terminator
2052  }
2053
2054  // Checking `in_reduction` clause (mixed) byref
2055  // CHECK: omp.task in_reduction(byref @add_f32 %[[redn_var1]] -> %{{.+}}, @add_f32 %[[redn_var2]] -> %{{.+}} : !llvm.ptr, !llvm.ptr) {
2056  omp.task in_reduction(byref @add_f32 %0 -> %arg0, @add_f32 %1 -> %arg1 : !llvm.ptr, !llvm.ptr) {
2057    // CHECK: "test.foo"() : () -> ()
2058    "test.foo"() : () -> ()
2059    // CHECK: omp.terminator
2060    omp.terminator
2061  }
2062
2063  // Checking priority clause
2064  // CHECK: omp.task priority(%[[i32_var]] : i32) {
2065  omp.task priority(%i32_var : i32) {
2066    // CHECK: "test.foo"() : () -> ()
2067    "test.foo"() : () -> ()
2068    // CHECK: omp.terminator
2069    omp.terminator
2070  }
2071
2072  // Checking allocate clause
2073  // CHECK: omp.task allocate(%[[data_var]] : memref<i32> -> %[[data_var]] : memref<i32>) {
2074  omp.task allocate(%data_var : memref<i32> -> %data_var : memref<i32>) {
2075    // CHECK: "test.foo"() : () -> ()
2076    "test.foo"() : () -> ()
2077    // CHECK: omp.terminator
2078    omp.terminator
2079  }
2080  // Checking detach clause
2081  // CHECK: omp.task detach(%[[event_handle]] : !llvm.ptr)
2082  omp.task detach(%event_handle : !llvm.ptr){
2083     omp.terminator
2084  }
2085  // Checking multiple clauses
2086  // CHECK: omp.task allocate(%[[data_var]] : memref<i32> -> %[[data_var]] : memref<i32>)
2087  omp.task allocate(%data_var : memref<i32> -> %data_var : memref<i32>)
2088      // CHECK-SAME: final(%[[bool_var]]) if(%[[bool_var]])
2089      final(%bool_var) if(%bool_var)
2090      // CHECK-SAME: priority(%[[i32_var]] : i32) untied
2091      priority(%i32_var : i32) untied
2092      // CHECK-SAME: in_reduction(@add_f32 %[[redn_var1]] -> %{{.+}}, byref @add_f32 %[[redn_var2]] -> %{{.+}} : !llvm.ptr, !llvm.ptr)
2093      in_reduction(@add_f32 %0 -> %arg0, byref @add_f32 %1 -> %arg1 : !llvm.ptr, !llvm.ptr) {
2094    // CHECK: "test.foo"() : () -> ()
2095    "test.foo"() : () -> ()
2096    // CHECK: omp.terminator
2097    omp.terminator
2098  }
2099
2100  return
2101}
2102
2103// CHECK-LABEL: @omp_task_depend
2104// CHECK-SAME: (%arg0: memref<i32>, %arg1: memref<i32>) {
2105func.func @omp_task_depend(%arg0: memref<i32>, %arg1: memref<i32>) {
2106  // CHECK:  omp.task   depend(taskdependin -> %arg0 : memref<i32>, taskdependin -> %arg1 : memref<i32>, taskdependinout -> %arg0 : memref<i32>) {
2107  omp.task   depend(taskdependin -> %arg0 : memref<i32>, taskdependin -> %arg1 : memref<i32>, taskdependinout -> %arg0 : memref<i32>) {
2108    // CHECK: "test.foo"() : () -> ()
2109    "test.foo"() : () -> ()
2110    // CHECK: omp.terminator
2111    omp.terminator
2112  }
2113  return
2114}
2115
2116
2117// CHECK-LABEL: @omp_target_depend
2118// CHECK-SAME: (%arg0: memref<i32>, %arg1: memref<i32>) {
2119func.func @omp_target_depend(%arg0: memref<i32>, %arg1: memref<i32>) {
2120  // CHECK:  omp.target depend(taskdependin -> %arg0 : memref<i32>, taskdependin -> %arg1 : memref<i32>, taskdependinout -> %arg0 : memref<i32>) {
2121  omp.target depend(taskdependin -> %arg0 : memref<i32>, taskdependin -> %arg1 : memref<i32>, taskdependinout -> %arg0 : memref<i32>) {
2122    // CHECK: omp.terminator
2123    omp.terminator
2124  } {operandSegmentSizes = array<i32: 0,0,0,3,0,0,0,0>}
2125  return
2126}
2127
2128func.func @omp_threadprivate() {
2129  %0 = arith.constant 1 : i32
2130  %1 = arith.constant 2 : i32
2131  %2 = arith.constant 3 : i32
2132
2133  // CHECK: [[ARG0:%.*]] = llvm.mlir.addressof @_QFsubEx : !llvm.ptr
2134  // CHECK: {{.*}} = omp.threadprivate [[ARG0]] : !llvm.ptr -> !llvm.ptr
2135  %3 = llvm.mlir.addressof @_QFsubEx : !llvm.ptr
2136  %4 = omp.threadprivate %3 : !llvm.ptr -> !llvm.ptr
2137  llvm.store %0, %4 : i32, !llvm.ptr
2138
2139  // CHECK:  omp.parallel
2140  // CHECK:    {{.*}} = omp.threadprivate [[ARG0]] : !llvm.ptr -> !llvm.ptr
2141  omp.parallel  {
2142    %5 = omp.threadprivate %3 : !llvm.ptr -> !llvm.ptr
2143    llvm.store %1, %5 : i32, !llvm.ptr
2144    omp.terminator
2145  }
2146  llvm.store %2, %4 : i32, !llvm.ptr
2147  return
2148}
2149
2150llvm.mlir.global internal @_QFsubEx() : i32
2151
2152func.func @omp_cancel_parallel(%if_cond : i1) -> () {
2153  // Test with optional operand; if_expr.
2154  omp.parallel {
2155    // CHECK: omp.cancel cancellation_construct_type(parallel) if(%{{.*}})
2156    omp.cancel cancellation_construct_type(parallel) if(%if_cond)
2157    // CHECK: omp.terminator
2158    omp.terminator
2159  }
2160  return
2161}
2162
2163func.func @omp_cancel_wsloop(%lb : index, %ub : index, %step : index) {
2164  omp.wsloop {
2165    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
2166      // CHECK: omp.cancel cancellation_construct_type(loop)
2167      omp.cancel cancellation_construct_type(loop)
2168      // CHECK: omp.yield
2169      omp.yield
2170    }
2171  }
2172  return
2173}
2174
2175func.func @omp_cancel_sections() -> () {
2176  omp.sections {
2177    omp.section {
2178      // CHECK: omp.cancel cancellation_construct_type(sections)
2179      omp.cancel cancellation_construct_type(sections)
2180      omp.terminator
2181    }
2182    // CHECK: omp.terminator
2183    omp.terminator
2184  }
2185  return
2186}
2187
2188func.func @omp_cancellationpoint_parallel() -> () {
2189  omp.parallel {
2190    // CHECK: omp.cancellation_point cancellation_construct_type(parallel)
2191    omp.cancellation_point cancellation_construct_type(parallel)
2192    // CHECK: omp.cancel cancellation_construct_type(parallel)
2193    omp.cancel cancellation_construct_type(parallel)
2194    omp.terminator
2195  }
2196  return
2197}
2198
2199func.func @omp_cancellationpoint_wsloop(%lb : index, %ub : index, %step : index) {
2200  omp.wsloop {
2201    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
2202      // CHECK: omp.cancellation_point cancellation_construct_type(loop)
2203      omp.cancellation_point cancellation_construct_type(loop)
2204      // CHECK: omp.cancel cancellation_construct_type(loop)
2205      omp.cancel cancellation_construct_type(loop)
2206      // CHECK: omp.yield
2207      omp.yield
2208    }
2209  }
2210  return
2211}
2212
2213func.func @omp_cancellationpoint_sections() -> () {
2214  omp.sections {
2215    omp.section {
2216      // CHECK: omp.cancellation_point cancellation_construct_type(sections)
2217      omp.cancellation_point cancellation_construct_type(sections)
2218      // CHECK: omp.cancel cancellation_construct_type(sections)
2219      omp.cancel cancellation_construct_type(sections)
2220      omp.terminator
2221    }
2222    // CHECK: omp.terminator
2223    omp.terminator
2224  }
2225  return
2226}
2227
2228// CHECK-LABEL: @omp_taskgroup_no_tasks
2229func.func @omp_taskgroup_no_tasks() -> () {
2230
2231  // CHECK: omp.taskgroup
2232  omp.taskgroup {
2233    // CHECK: "test.foo"() : () -> ()
2234    "test.foo"() : () -> ()
2235    // CHECK: omp.terminator
2236    omp.terminator
2237  }
2238  return
2239}
2240
2241// CHECK-LABEL: @omp_taskgroup_multiple_tasks
2242func.func @omp_taskgroup_multiple_tasks() -> () {
2243  // CHECK: omp.taskgroup
2244  omp.taskgroup {
2245    // CHECK: omp.task
2246    omp.task {
2247      "test.foo"() : () -> ()
2248      // CHECK: omp.terminator
2249      omp.terminator
2250    }
2251    // CHECK: omp.task
2252    omp.task {
2253      "test.foo"() : () -> ()
2254      // CHECK: omp.terminator
2255      omp.terminator
2256    }
2257    // CHECK: omp.terminator
2258    omp.terminator
2259  }
2260  return
2261}
2262
2263// CHECK-LABEL: @omp_taskgroup_clauses
2264func.func @omp_taskgroup_clauses() -> () {
2265  %testmemref = "test.memref"() : () -> (memref<i32>)
2266  %testf32 = "test.f32"() : () -> (!llvm.ptr)
2267  // CHECK: omp.taskgroup allocate(%{{.+}}: memref<i32> -> %{{.+}} : memref<i32>) task_reduction(@add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr)
2268  omp.taskgroup allocate(%testmemref : memref<i32> -> %testmemref : memref<i32>) task_reduction(@add_f32 %testf32 -> %arg0 : !llvm.ptr) {
2269    // CHECK: omp.task
2270    omp.task {
2271      "test.foo"() : () -> ()
2272      // CHECK: omp.terminator
2273      omp.terminator
2274    }
2275    // CHECK: omp.task
2276    omp.task {
2277      "test.foo"() : () -> ()
2278      // CHECK: omp.terminator
2279      omp.terminator
2280    }
2281    // CHECK: omp.terminator
2282    omp.terminator
2283  }
2284  return
2285}
2286
2287// CHECK-LABEL: @omp_taskloop
2288func.func @omp_taskloop(%lb: i32, %ub: i32, %step: i32) -> () {
2289
2290  // CHECK: omp.taskloop {
2291  omp.taskloop {
2292    omp.loop_nest (%i) : i32 = (%lb) to (%ub) step (%step)  {
2293      // CHECK: omp.yield
2294      omp.yield
2295    }
2296  }
2297
2298  %testbool = "test.bool"() : () -> (i1)
2299
2300  // CHECK: omp.taskloop if(%{{[^)]+}}) {
2301  omp.taskloop if(%testbool) {
2302    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2303      // CHECK: omp.yield
2304      omp.yield
2305    }
2306  }
2307
2308  // CHECK: omp.taskloop final(%{{[^)]+}}) {
2309  omp.taskloop final(%testbool) {
2310    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2311      // CHECK: omp.yield
2312      omp.yield
2313    }
2314  }
2315
2316  // CHECK: omp.taskloop untied {
2317  omp.taskloop untied {
2318    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2319      // CHECK: omp.yield
2320      omp.yield
2321    }
2322  }
2323
2324  // CHECK: omp.taskloop mergeable {
2325  omp.taskloop mergeable {
2326    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2327      // CHECK: omp.yield
2328      omp.yield
2329    }
2330  }
2331
2332  %testf32 = "test.f32"() : () -> (!llvm.ptr)
2333  %testf32_2 = "test.f32"() : () -> (!llvm.ptr)
2334  // CHECK: omp.taskloop in_reduction(@add_f32 %{{.+}} -> %{{.+}}, @add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr, !llvm.ptr) {
2335  omp.taskloop in_reduction(@add_f32 %testf32 -> %arg0, @add_f32 %testf32_2 -> %arg1 : !llvm.ptr, !llvm.ptr) {
2336    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2337      // CHECK: omp.yield
2338      omp.yield
2339    }
2340  }
2341
2342  // Checking byref attribute for in_reduction
2343  // CHECK: omp.taskloop in_reduction(byref @add_f32 %{{.+}} -> %{{.+}}, @add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr, !llvm.ptr) {
2344  omp.taskloop in_reduction(byref @add_f32 %testf32 -> %arg0, @add_f32 %testf32_2 -> %arg1 : !llvm.ptr, !llvm.ptr) {
2345    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2346      // CHECK: omp.yield
2347      omp.yield
2348    }
2349  }
2350
2351  // CHECK: omp.taskloop reduction(byref @add_f32 %{{.+}} -> %{{.+}}, @add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr, !llvm.ptr) {
2352  omp.taskloop reduction(byref @add_f32 %testf32 -> %arg0, @add_f32 %testf32_2 -> %arg1 : !llvm.ptr, !llvm.ptr) {
2353    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2354      // CHECK: omp.yield
2355      omp.yield
2356    }
2357  }
2358
2359  // check byref attrbute for reduction
2360  // CHECK: omp.taskloop reduction(byref @add_f32 %{{.+}} -> %{{.+}}, byref @add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr, !llvm.ptr) {
2361  omp.taskloop reduction(byref @add_f32 %testf32 -> %arg0, byref @add_f32 %testf32_2 -> %arg1 : !llvm.ptr, !llvm.ptr) {
2362    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2363      // CHECK: omp.yield
2364      omp.yield
2365    }
2366  }
2367
2368  // CHECK: omp.taskloop in_reduction(@add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr) reduction(@add_f32 %{{.+}} -> %{{.+}} : !llvm.ptr) {
2369  omp.taskloop in_reduction(@add_f32 %testf32 -> %arg0 : !llvm.ptr) reduction(@add_f32 %testf32_2 -> %arg1 : !llvm.ptr) {
2370    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2371      // CHECK: omp.yield
2372      omp.yield
2373    }
2374  }
2375
2376  %testi32 = "test.i32"() : () -> (i32)
2377  // CHECK: omp.taskloop priority(%{{[^:]+}}: i32) {
2378  omp.taskloop priority(%testi32 : i32) {
2379    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2380      // CHECK: omp.yield
2381      omp.yield
2382    }
2383  }
2384
2385  %testmemref = "test.memref"() : () -> (memref<i32>)
2386  // CHECK: omp.taskloop allocate(%{{.+}} : memref<i32> -> %{{.+}} : memref<i32>) {
2387  omp.taskloop allocate(%testmemref : memref<i32> -> %testmemref : memref<i32>) {
2388    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2389      // CHECK: omp.yield
2390      omp.yield
2391    }
2392  }
2393
2394  %testi64 = "test.i64"() : () -> (i64)
2395  // CHECK: omp.taskloop grainsize(%{{[^:]+}}: i64) {
2396  omp.taskloop grainsize(%testi64: i64) {
2397    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2398      // CHECK: omp.yield
2399      omp.yield
2400    }
2401  }
2402
2403  // CHECK: omp.taskloop num_tasks(%{{[^:]+}}: i64) {
2404  omp.taskloop num_tasks(%testi64: i64) {
2405    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2406      // CHECK: omp.yield
2407      omp.yield
2408    }
2409  }
2410
2411  // CHECK: omp.taskloop nogroup {
2412  omp.taskloop nogroup {
2413    omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2414      // CHECK: omp.yield
2415      omp.yield
2416    }
2417  }
2418
2419  // CHECK: omp.taskloop {
2420  omp.taskloop {
2421    omp.simd {
2422      omp.loop_nest (%i, %j) : i32 = (%lb, %ub) to (%ub, %lb) step (%step, %step) {
2423        // CHECK: omp.yield
2424        omp.yield
2425      }
2426    } {omp.composite}
2427  } {omp.composite}
2428
2429  // CHECK: return
2430  return
2431}
2432
2433// CHECK: func.func @omp_requires_one
2434// CHECK-SAME: omp.requires = #omp<clause_requires reverse_offload>
2435func.func @omp_requires_one() -> ()
2436    attributes {omp.requires = #omp<clause_requires reverse_offload>} {
2437  return
2438}
2439
2440// CHECK: func.func @omp_requires_multiple
2441// CHECK-SAME: omp.requires = #omp<clause_requires unified_address|dynamic_allocators>
2442func.func @omp_requires_multiple() -> ()
2443    attributes {omp.requires = #omp<clause_requires unified_address|dynamic_allocators>} {
2444  return
2445}
2446
2447// CHECK-LABEL: @opaque_pointers_atomic_rwu
2448// CHECK-SAME: (%[[v:.*]]: !llvm.ptr, %[[x:.*]]: !llvm.ptr)
2449func.func @opaque_pointers_atomic_rwu(%v: !llvm.ptr, %x: !llvm.ptr) {
2450  // CHECK: omp.atomic.read %[[v]] = %[[x]] : !llvm.ptr, !llvm.ptr, i32
2451  // CHECK: %[[VAL:.*]] = llvm.load %[[x]] : !llvm.ptr -> i32
2452  // CHECK: omp.atomic.write %[[v]] = %[[VAL]] : !llvm.ptr, i32
2453  // CHECK: omp.atomic.update %[[x]] : !llvm.ptr {
2454  // CHECK-NEXT: ^{{[[:alnum:]]+}}(%[[XVAL:.*]]: i32):
2455  // CHECK-NEXT:   omp.yield(%[[XVAL]] : i32)
2456  // CHECK-NEXT: }
2457  omp.atomic.read %v = %x : !llvm.ptr, !llvm.ptr, i32
2458  %val = llvm.load %x : !llvm.ptr -> i32
2459  omp.atomic.write %v = %val : !llvm.ptr, i32
2460  omp.atomic.update %x : !llvm.ptr {
2461    ^bb0(%xval: i32):
2462      omp.yield(%xval : i32)
2463  }
2464  return
2465}
2466
2467// CHECK-LABEL: @opaque_pointers_reduction
2468// CHECK: atomic {
2469// CHECK-NEXT: ^{{[[:alnum:]]+}}(%{{.*}}: !llvm.ptr, %{{.*}}: !llvm.ptr):
2470// CHECK-NOT: cleanup
2471omp.declare_reduction @opaque_pointers_reduction : f32
2472init {
2473^bb0(%arg: f32):
2474  %0 = arith.constant 0.0 : f32
2475  omp.yield (%0 : f32)
2476}
2477combiner {
2478^bb1(%arg0: f32, %arg1: f32):
2479  %1 = arith.addf %arg0, %arg1 : f32
2480  omp.yield (%1 : f32)
2481}
2482atomic {
2483^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
2484  %2 = llvm.load %arg3 : !llvm.ptr -> f32
2485  llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
2486  omp.yield
2487}
2488
2489// CHECK-LABEL: @alloc_reduction
2490// CHECK-SAME:  alloc {
2491// CHECK-NEXT:  ^bb0(%[[ARG0:.*]]: !llvm.ptr):
2492// ...
2493// CHECK:         omp.yield
2494// CHECK-NEXT:  } init {
2495// CHECK:       } combiner {
2496// CHECK:       }
2497omp.declare_reduction @alloc_reduction : !llvm.ptr
2498alloc {
2499^bb0(%arg: !llvm.ptr):
2500  %c1 = arith.constant 1 : i32
2501  %0 = llvm.alloca %c1 x f32 : (i32) -> !llvm.ptr
2502  omp.yield (%0 : !llvm.ptr)
2503}
2504init {
2505^bb0(%mold: !llvm.ptr, %alloc: !llvm.ptr):
2506  %cst = arith.constant 1.0 : f32
2507  llvm.store %cst, %alloc : f32, !llvm.ptr
2508  omp.yield (%alloc : !llvm.ptr)
2509}
2510combiner {
2511^bb1(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
2512  %0 = llvm.load %arg0 : !llvm.ptr -> f32
2513  %1 = llvm.load %arg1 : !llvm.ptr -> f32
2514  %2 = arith.addf %0, %1 : f32
2515  llvm.store %2, %arg0 : f32, !llvm.ptr
2516  omp.yield (%arg0 : !llvm.ptr)
2517}
2518
2519// CHECK-LABEL: omp_targets_with_map_bounds
2520// CHECK-SAME: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr)
2521func.func @omp_targets_with_map_bounds(%arg0: !llvm.ptr, %arg1: !llvm.ptr) -> () {
2522  // CHECK: %[[C_00:.*]] = llvm.mlir.constant(4 : index) : i64
2523  // CHECK: %[[C_01:.*]] = llvm.mlir.constant(1 : index) : i64
2524  // CHECK: %[[C_02:.*]] = llvm.mlir.constant(1 : index) : i64
2525  // CHECK: %[[C_03:.*]] = llvm.mlir.constant(1 : index) : i64
2526  // CHECK: %[[BOUNDS0:.*]] = omp.map.bounds   lower_bound(%[[C_01]] : i64) upper_bound(%[[C_00]] : i64) stride(%[[C_02]] : i64) start_idx(%[[C_03]] : i64)
2527  // CHECK: %[[MAP0:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, !llvm.array<10 x i32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS0]]) -> !llvm.ptr {name = ""}
2528    %0 = llvm.mlir.constant(4 : index) : i64
2529    %1 = llvm.mlir.constant(1 : index) : i64
2530    %2 = llvm.mlir.constant(1 : index) : i64
2531    %3 = llvm.mlir.constant(1 : index) : i64
2532    %4 = omp.map.bounds   lower_bound(%1 : i64) upper_bound(%0 : i64) stride(%2 : i64) start_idx(%3 : i64)
2533
2534    %mapv1 = omp.map.info var_ptr(%arg0 : !llvm.ptr, !llvm.array<10 x i32>)   map_clauses(tofrom) capture(ByRef) bounds(%4) -> !llvm.ptr {name = ""}
2535  // CHECK: %[[C_10:.*]] = llvm.mlir.constant(9 : index) : i64
2536  // CHECK: %[[C_11:.*]] = llvm.mlir.constant(1 : index) : i64
2537  // CHECK: %[[C_12:.*]] = llvm.mlir.constant(2 : index) : i64
2538  // CHECK: %[[C_13:.*]] = llvm.mlir.constant(2 : index) : i64
2539  // CHECK: %[[BOUNDS1:.*]] = omp.map.bounds   lower_bound(%[[C_11]] : i64) upper_bound(%[[C_10]] : i64) stride(%[[C_12]] : i64) start_idx(%[[C_13]] : i64)
2540  // CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, !llvm.array<10 x i32>)   map_clauses(exit_release_or_enter_alloc) capture(ByCopy) bounds(%[[BOUNDS1]]) -> !llvm.ptr {name = ""}
2541    %6 = llvm.mlir.constant(9 : index) : i64
2542    %7 = llvm.mlir.constant(1 : index) : i64
2543    %8 = llvm.mlir.constant(2 : index) : i64
2544    %9 = llvm.mlir.constant(2 : index) : i64
2545    %10 = omp.map.bounds   lower_bound(%7 : i64) upper_bound(%6 : i64) stride(%8 : i64) start_idx(%9 : i64)
2546    %mapv2 = omp.map.info var_ptr(%arg1 : !llvm.ptr, !llvm.array<10 x i32>)   map_clauses(exit_release_or_enter_alloc) capture(ByCopy) bounds(%10) -> !llvm.ptr {name = ""}
2547
2548    // CHECK: omp.target map_entries(%[[MAP0]] -> {{.*}}, %[[MAP1]] -> {{.*}} : !llvm.ptr, !llvm.ptr)
2549    omp.target map_entries(%mapv1 -> %arg2, %mapv2 -> %arg3 : !llvm.ptr, !llvm.ptr) {
2550      omp.terminator
2551    }
2552
2553    // CHECK: omp.target_data map_entries(%[[MAP0]], %[[MAP1]] : !llvm.ptr, !llvm.ptr)
2554    omp.target_data map_entries(%mapv1, %mapv2 : !llvm.ptr, !llvm.ptr){}
2555
2556    // CHECK: %[[MAP2:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, !llvm.array<10 x i32>)   map_clauses(exit_release_or_enter_alloc) capture(VLAType) bounds(%[[BOUNDS0]]) -> !llvm.ptr {name = ""}
2557    // CHECK: omp.target_enter_data map_entries(%[[MAP2]] : !llvm.ptr)
2558    %mapv3 = omp.map.info var_ptr(%arg0 : !llvm.ptr, !llvm.array<10 x i32>)   map_clauses(exit_release_or_enter_alloc) capture(VLAType) bounds(%4) -> !llvm.ptr {name = ""}
2559    omp.target_enter_data map_entries(%mapv3 : !llvm.ptr){}
2560
2561    // CHECK: %[[MAP3:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, !llvm.array<10 x i32>)   map_clauses(exit_release_or_enter_alloc) capture(This) bounds(%[[BOUNDS1]]) -> !llvm.ptr {name = ""}
2562    // CHECK: omp.target_exit_data map_entries(%[[MAP3]] : !llvm.ptr)
2563    %mapv4 = omp.map.info var_ptr(%arg1 : !llvm.ptr, !llvm.array<10 x i32>)   map_clauses(exit_release_or_enter_alloc) capture(This) bounds(%10) -> !llvm.ptr {name = ""}
2564    omp.target_exit_data map_entries(%mapv4 : !llvm.ptr){}
2565
2566    return
2567}
2568
2569// CHECK-LABEL: omp_target_update_data
2570func.func @omp_target_update_data (%if_cond : i1, %device : si32, %map1: memref<?xi32>, %map2: memref<?xi32>) -> () {
2571    %mapv_from = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>) map_clauses(from) capture(ByRef) -> memref<?xi32> {name = ""}
2572
2573    %mapv_to = omp.map.info var_ptr(%map2 : memref<?xi32>, tensor<?xi32>) map_clauses(present, to) capture(ByRef) -> memref<?xi32> {name = ""}
2574
2575    // CHECK: omp.target_update device(%[[VAL_1:.*]] : si32) if(%[[VAL_0:.*]]) map_entries(%{{.*}}, %{{.*}} : memref<?xi32>, memref<?xi32>) nowait
2576    omp.target_update if(%if_cond) device(%device : si32) nowait map_entries(%mapv_from , %mapv_to : memref<?xi32>, memref<?xi32>)
2577    return
2578}
2579
2580// CHECK-LABEL: omp_targets_is_allocatable
2581// CHECK-SAME: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr)
2582func.func @omp_targets_is_allocatable(%arg0: !llvm.ptr, %arg1: !llvm.ptr) -> () {
2583  // CHECK: %[[MAP0:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
2584  %mapv1 = omp.map.info var_ptr(%arg0 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
2585  // CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>) map_clauses(tofrom) capture(ByRef) members(%[[MAP0]] : [0] : !llvm.ptr) -> !llvm.ptr {name = ""}
2586  %mapv2 = omp.map.info var_ptr(%arg1 : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>)   map_clauses(tofrom) capture(ByRef) members(%mapv1 : [0] : !llvm.ptr) -> !llvm.ptr {name = ""}
2587  // CHECK: omp.target map_entries(%[[MAP0]] -> {{.*}}, %[[MAP1]] -> {{.*}} : !llvm.ptr, !llvm.ptr)
2588  omp.target map_entries(%mapv1 -> %arg2, %mapv2 -> %arg3 : !llvm.ptr, !llvm.ptr) {
2589    omp.terminator
2590  }
2591  return
2592}
2593
2594// CHECK-LABEL: func @omp_target_enter_update_exit_data_depend
2595// CHECK-SAME:([[ARG0:%.*]]: memref<?xi32>, [[ARG1:%.*]]: memref<?xi32>, [[ARG2:%.*]]: memref<?xi32>) {
2596func.func @omp_target_enter_update_exit_data_depend(%a: memref<?xi32>, %b: memref<?xi32>, %c: memref<?xi32>) {
2597// CHECK-NEXT: [[MAP0:%.*]] = omp.map.info
2598// CHECK-NEXT: [[MAP1:%.*]] = omp.map.info
2599// CHECK-NEXT: [[MAP2:%.*]] = omp.map.info
2600  %map_a = omp.map.info var_ptr(%a: memref<?xi32>, tensor<?xi32>) map_clauses(to) capture(ByRef) -> memref<?xi32>
2601  %map_b = omp.map.info var_ptr(%b: memref<?xi32>, tensor<?xi32>) map_clauses(from) capture(ByRef) -> memref<?xi32>
2602  %map_c = omp.map.info var_ptr(%c: memref<?xi32>, tensor<?xi32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32>
2603
2604  // Do some work on the host that writes to 'a'
2605  omp.task depend(taskdependout -> %a : memref<?xi32>) {
2606    "test.foo"(%a) : (memref<?xi32>) -> ()
2607    omp.terminator
2608  }
2609
2610  // Then map that over to the target
2611  // CHECK: omp.target_enter_data depend(taskdependin -> [[ARG0]] : memref<?xi32>) map_entries([[MAP0]], [[MAP2]] : memref<?xi32>, memref<?xi32>) nowait
2612  omp.target_enter_data depend(taskdependin ->  %a: memref<?xi32>) nowait map_entries(%map_a, %map_c: memref<?xi32>, memref<?xi32>)
2613
2614  // Compute 'b' on the target and copy it back
2615  // CHECK: omp.target map_entries([[MAP1]] -> {{%.*}} : memref<?xi32>) {
2616  omp.target map_entries(%map_b -> %arg0 : memref<?xi32>) {
2617    "test.foo"(%arg0) : (memref<?xi32>) -> ()
2618    omp.terminator
2619  }
2620
2621  // Update 'a' on the host using 'b'
2622  omp.task depend(taskdependout -> %a: memref<?xi32>){
2623    "test.bar"(%a, %b) : (memref<?xi32>, memref<?xi32>) -> ()
2624  }
2625
2626  // Copy the updated 'a' onto the target
2627  // CHECK: omp.target_update depend(taskdependin -> [[ARG0]] : memref<?xi32>) map_entries([[MAP0]] : memref<?xi32>) nowait
2628  omp.target_update depend(taskdependin -> %a : memref<?xi32>) nowait map_entries(%map_a :  memref<?xi32>)
2629
2630  // Compute 'c' on the target and copy it back
2631  %map_c_from = omp.map.info var_ptr(%c: memref<?xi32>, tensor<?xi32>) map_clauses(from) capture(ByRef) -> memref<?xi32>
2632  omp.target depend(taskdependout -> %c : memref<?xi32>) map_entries(%map_a -> %arg0, %map_c_from -> %arg1 : memref<?xi32>, memref<?xi32>) {
2633    "test.foobar"() : ()->()
2634    omp.terminator
2635  }
2636  // CHECK: omp.target_exit_data depend(taskdependin -> [[ARG2]] : memref<?xi32>) map_entries([[MAP2]] : memref<?xi32>)
2637  omp.target_exit_data depend(taskdependin -> %c : memref<?xi32>) map_entries(%map_c : memref<?xi32>)
2638
2639  return
2640}
2641
2642// CHECK-LABEL: omp_map_with_members
2643// CHECK-SAME: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr, %[[ARG2:.*]]: !llvm.ptr, %[[ARG3:.*]]: !llvm.ptr, %[[ARG4:.*]]: !llvm.ptr, %[[ARG5:.*]]: !llvm.ptr)
2644func.func @omp_map_with_members(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr, %arg3: !llvm.ptr, %arg4: !llvm.ptr, %arg5: !llvm.ptr) -> () {
2645  // CHECK: %[[MAP0:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
2646  %mapv1 = omp.map.info var_ptr(%arg0 : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
2647
2648  // CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, f32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
2649  %mapv2 = omp.map.info var_ptr(%arg1 : !llvm.ptr, f32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
2650
2651  // CHECK: %[[MAP2:.*]] = omp.map.info var_ptr(%[[ARG2]] : !llvm.ptr, !llvm.struct<(i32, f32)>) map_clauses(to) capture(ByRef) members(%[[MAP0]], %[[MAP1]] : [0], [1] : !llvm.ptr, !llvm.ptr) -> !llvm.ptr {name = "", partial_map = true}
2652  %mapv3 = omp.map.info var_ptr(%arg2 : !llvm.ptr, !llvm.struct<(i32, f32)>)   map_clauses(to) capture(ByRef) members(%mapv1, %mapv2 : [0], [1] : !llvm.ptr, !llvm.ptr) -> !llvm.ptr {name = "", partial_map = true}
2653
2654  // CHECK: omp.target_enter_data map_entries(%[[MAP0]], %[[MAP1]], %[[MAP2]] : !llvm.ptr, !llvm.ptr, !llvm.ptr)
2655  omp.target_enter_data map_entries(%mapv1, %mapv2, %mapv3 : !llvm.ptr, !llvm.ptr, !llvm.ptr){}
2656
2657  // CHECK: %[[MAP3:.*]] = omp.map.info var_ptr(%[[ARG3]] : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""}
2658  %mapv4 = omp.map.info var_ptr(%arg3 : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""}
2659
2660  // CHECK: %[[MAP4:.*]] = omp.map.info var_ptr(%[[ARG4]] : !llvm.ptr, f32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""}
2661  %mapv5 = omp.map.info var_ptr(%arg4 : !llvm.ptr, f32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""}
2662
2663  // CHECK: %[[MAP5:.*]] = omp.map.info var_ptr(%[[ARG5]] : !llvm.ptr, !llvm.struct<(i32, struct<(i32, f32)>)>) map_clauses(from) capture(ByRef) members(%[[MAP3]], %[[MAP4]] : [1, 0], [1, 1] : !llvm.ptr, !llvm.ptr) -> !llvm.ptr {name = "", partial_map = true}
2664  %mapv6 = omp.map.info var_ptr(%arg5 : !llvm.ptr, !llvm.struct<(i32, struct<(i32, f32)>)>) map_clauses(from) capture(ByRef) members(%mapv4, %mapv5 : [1, 0], [1, 1] : !llvm.ptr, !llvm.ptr) -> !llvm.ptr {name = "", partial_map = true}
2665
2666  // CHECK: omp.target_exit_data map_entries(%[[MAP3]], %[[MAP4]], %[[MAP5]] : !llvm.ptr, !llvm.ptr, !llvm.ptr)
2667  omp.target_exit_data map_entries(%mapv4, %mapv5, %mapv6 : !llvm.ptr, !llvm.ptr, !llvm.ptr){}
2668
2669  return
2670}
2671
2672// CHECK-LABEL: parallel_op_privatizers
2673// CHECK-SAME: (%[[ARG0:[^[:space:]]+]]: !llvm.ptr, %[[ARG1:[^[:space:]]+]]: !llvm.ptr)
2674func.func @parallel_op_privatizers(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
2675  // CHECK: omp.parallel private(
2676  // CHECK-SAME: @x.privatizer %[[ARG0]] -> %[[ARG0_PRIV:[^[:space:]]+]],
2677  // CHECK-SAME: @y.privatizer %[[ARG1]] -> %[[ARG1_PRIV:[^[:space:]]+]] : !llvm.ptr, !llvm.ptr)
2678  omp.parallel private(@x.privatizer %arg0 -> %arg2, @y.privatizer %arg1 -> %arg3 : !llvm.ptr, !llvm.ptr) {
2679    // CHECK: llvm.load %[[ARG0_PRIV]]
2680    %0 = llvm.load %arg2 : !llvm.ptr -> i32
2681    // CHECK: llvm.load %[[ARG1_PRIV]]
2682    %1 = llvm.load %arg3 : !llvm.ptr -> i32
2683    omp.terminator
2684  }
2685  return
2686}
2687
2688// CHECK-LABEL: omp.private {type = private} @a.privatizer : !llvm.ptr alloc {
2689omp.private {type = private} @a.privatizer : !llvm.ptr alloc {
2690// CHECK: ^bb0(%{{.*}}: {{.*}}):
2691^bb0(%arg0: !llvm.ptr):
2692  omp.yield(%arg0 : !llvm.ptr)
2693}
2694
2695// CHECK-LABEL: omp.private {type = private} @x.privatizer : !llvm.ptr alloc {
2696omp.private {type = private} @x.privatizer : !llvm.ptr alloc {
2697// CHECK: ^bb0(%{{.*}}: {{.*}}):
2698^bb0(%arg0: !llvm.ptr):
2699  omp.yield(%arg0 : !llvm.ptr)
2700} dealloc {
2701// CHECK: ^bb0(%{{.*}}: {{.*}}):
2702^bb0(%arg0: !llvm.ptr):
2703  omp.yield
2704}
2705
2706// CHECK-LABEL: omp.private {type = firstprivate} @y.privatizer : !llvm.ptr alloc {
2707omp.private {type = firstprivate} @y.privatizer : !llvm.ptr alloc {
2708// CHECK: ^bb0(%{{.*}}: {{.*}}):
2709^bb0(%arg0: !llvm.ptr):
2710  omp.yield(%arg0 : !llvm.ptr)
2711// CHECK: } copy {
2712} copy {
2713// CHECK: ^bb0(%{{.*}}: {{.*}}, %{{.*}}: {{.*}}):
2714^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
2715  omp.yield(%arg0 : !llvm.ptr)
2716} dealloc {
2717// CHECK: ^bb0(%{{.*}}: {{.*}}):
2718^bb0(%arg0: !llvm.ptr):
2719  omp.yield
2720}
2721
2722// CHECK-LABEL: parallel_op_reduction_and_private
2723func.func @parallel_op_reduction_and_private(%priv_var: !llvm.ptr, %priv_var2: !llvm.ptr, %reduc_var: !llvm.ptr, %reduc_var2: !llvm.ptr) {
2724  // CHECK: omp.parallel
2725  // CHECK-SAME: private(
2726  // CHECK-SAME: @x.privatizer %[[PRIV_VAR:[^[:space:]]+]] -> %[[PRIV_ARG:[^[:space:]]+]],
2727  // CHECK-SAME: @y.privatizer %[[PRIV_VAR2:[^[:space:]]+]] -> %[[PRIV_ARG2:[^[:space:]]+]] : !llvm.ptr, !llvm.ptr)
2728  //
2729  // CHECK-SAME: reduction(
2730  // CHECK-SAME: @add_f32 %[[REDUC_VAR:[^[:space:]]+]] -> %[[REDUC_ARG:[^[:space:]]+]],
2731  // CHECK-SAME: @add_f32 %[[REDUC_VAR2:[^[:space:]]+]] -> %[[REDUC_ARG2:[^[:space:]]+]] : !llvm.ptr, !llvm.ptr)
2732  omp.parallel private(@x.privatizer %priv_var -> %priv_arg, @y.privatizer %priv_var2 -> %priv_arg2 : !llvm.ptr, !llvm.ptr)
2733               reduction(@add_f32 %reduc_var -> %reduc_arg, @add_f32 %reduc_var2 -> %reduc_arg2 : !llvm.ptr, !llvm.ptr) {
2734    // CHECK: llvm.load %[[PRIV_ARG]]
2735    %0 = llvm.load %priv_arg : !llvm.ptr -> f32
2736    // CHECK: llvm.load %[[PRIV_ARG2]]
2737    %1 = llvm.load %priv_arg2 : !llvm.ptr -> f32
2738    // CHECK: llvm.load %[[REDUC_ARG]]
2739    %2 = llvm.load %reduc_arg : !llvm.ptr -> f32
2740    // CHECK: llvm.load %[[REDUC_ARG2]]
2741    %3 = llvm.load %reduc_arg2 : !llvm.ptr -> f32
2742    omp.terminator
2743  }
2744  return
2745}
2746
2747// CHECK-LABEL: omp_target_private
2748func.func @omp_target_private(%map1: memref<?xi32>, %map2: memref<?xi32>, %priv_var: !llvm.ptr) -> () {
2749  %mapv1 = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>) map_clauses(tofrom) capture(ByRef) -> memref<?xi32> {name = ""}
2750  %mapv2 = omp.map.info var_ptr(%map2 : memref<?xi32>, tensor<?xi32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
2751
2752  // CHECK: omp.target
2753  // CHECK-SAME: private(
2754  // CHECK-SAME:   @x.privatizer %{{[^[:space:]]+}} -> %[[PRIV_ARG:[^[:space:]]+]]
2755  // CHECK-SAME:   : !llvm.ptr
2756  // CHECK-SAME: )
2757  omp.target private(@x.privatizer %priv_var -> %priv_arg : !llvm.ptr) {
2758    omp.terminator
2759  }
2760
2761  // CHECK: omp.target
2762
2763  // CHECK-SAME: map_entries(
2764  // CHECK-SAME:   %{{[^[:space:]]+}} -> %[[MAP1_ARG:[^[:space:]]+]],
2765  // CHECK-SAME:   %{{[^[:space:]]+}} -> %[[MAP2_ARG:[^[:space:]]+]]
2766  // CHECK-SAME:   : memref<?xi32>, memref<?xi32>
2767  // CHECK-SAME: )
2768
2769  // CHECK-SAME: private(
2770  // CHECK-SAME:   @x.privatizer %{{[^[:space:]]+}} -> %[[PRIV_ARG:[^[:space:]]+]]
2771  // CHECK-SAME:   : !llvm.ptr
2772  // CHECK-SAME: )
2773  omp.target map_entries(%mapv1 -> %arg0, %mapv2 -> %arg1 : memref<?xi32>, memref<?xi32>) private(@x.privatizer %priv_var -> %priv_arg : !llvm.ptr) {
2774    omp.terminator
2775  }
2776
2777  return
2778}
2779
2780// CHECK-LABEL: omp_target_private_with_map_idx
2781func.func @omp_target_private_with_map_idx(%map1: memref<?xi32>, %map2: memref<?xi32>, %priv_var: !llvm.ptr) -> () {
2782  %mapv1 = omp.map.info var_ptr(%map1 : memref<?xi32>, tensor<?xi32>) map_clauses(tofrom) capture(ByRef) -> memref<?xi32> {name = ""}
2783  %mapv2 = omp.map.info var_ptr(%map2 : memref<?xi32>, tensor<?xi32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> memref<?xi32> {name = ""}
2784
2785  // CHECK: omp.target
2786
2787  // CHECK-SAME: map_entries(
2788  // CHECK-SAME:   %{{[^[:space:]]+}} -> %[[MAP1_ARG:[^[:space:]]+]],
2789  // CHECK-SAME:   %{{[^[:space:]]+}} -> %[[MAP2_ARG:[^[:space:]]+]]
2790  // CHECK-SAME:   : memref<?xi32>, memref<?xi32>
2791  // CHECK-SAME: )
2792
2793  // CHECK-SAME: private(
2794  // CHECK-SAME:   @x.privatizer %{{[^[:space:]]+}} -> %[[PRIV_ARG:[^[:space:]]+]] [map_idx=1]
2795  // CHECK-SAME:   : !llvm.ptr
2796  // CHECK-SAME: )
2797  omp.target map_entries(%mapv1 -> %arg0, %mapv2 -> %arg1 : memref<?xi32>, memref<?xi32>) private(@x.privatizer %priv_var -> %priv_arg [map_idx=1] : !llvm.ptr) {
2798    omp.terminator
2799  }
2800
2801  return
2802}
2803
2804func.func @omp_target_host_eval(%x : i32) {
2805  // CHECK: omp.target host_eval(%{{.*}} -> %[[HOST_ARG:.*]] : i32) {
2806  // CHECK: omp.teams num_teams( to %[[HOST_ARG]] : i32)
2807  // CHECK-SAME: thread_limit(%[[HOST_ARG]] : i32)
2808  omp.target host_eval(%x -> %arg0 : i32) {
2809    omp.teams num_teams(to %arg0 : i32) thread_limit(%arg0 : i32) {
2810      omp.terminator
2811    }
2812    omp.terminator
2813  }
2814
2815  // CHECK: omp.target host_eval(%{{.*}} -> %[[HOST_ARG:.*]] : i32) {
2816  // CHECK: omp.teams {
2817  // CHECK: omp.parallel num_threads(%[[HOST_ARG]] : i32) {
2818  // CHECK: omp.distribute {
2819  // CHECK: omp.wsloop {
2820  // CHECK: omp.loop_nest (%{{.*}}) : i32 = (%[[HOST_ARG]]) to (%[[HOST_ARG]]) step (%[[HOST_ARG]]) {
2821  omp.target host_eval(%x -> %arg0 : i32) {
2822    omp.teams {
2823      omp.parallel num_threads(%arg0 : i32) {
2824        omp.distribute {
2825          omp.wsloop {
2826            omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
2827              omp.yield
2828            }
2829          } {omp.composite}
2830        } {omp.composite}
2831        omp.terminator
2832      } {omp.composite}
2833      omp.terminator
2834    }
2835    omp.terminator
2836  }
2837
2838  // CHECK: omp.target host_eval(%{{.*}} -> %[[HOST_ARG:.*]] : i32) {
2839  // CHECK: omp.teams {
2840  // CHECK: omp.distribute {
2841  // CHECK: omp.loop_nest (%{{.*}}) : i32 = (%[[HOST_ARG]]) to (%[[HOST_ARG]]) step (%[[HOST_ARG]]) {
2842  omp.target host_eval(%x -> %arg0 : i32) {
2843    omp.teams {
2844      omp.distribute {
2845        omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) {
2846          omp.yield
2847        }
2848      }
2849      omp.terminator
2850    }
2851    omp.terminator
2852  }
2853  return
2854}
2855
2856// CHECK-LABEL: omp_loop
2857func.func @omp_loop(%lb : index, %ub : index, %step : index) {
2858  // CHECK: omp.loop {
2859  omp.loop {
2860    // CHECK: omp.loop_nest {{.*}} {
2861    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
2862      // CHECK: omp.yield
2863      omp.yield
2864    }
2865    // CHECK: }
2866  }
2867  // CHECK: }
2868
2869  // CHECK: omp.loop bind(teams) {
2870  omp.loop bind(teams) {
2871    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
2872      omp.yield
2873    }
2874  }
2875  // CHECK: }
2876
2877  // CHECK: omp.loop bind(parallel) {
2878  omp.loop bind(parallel) {
2879    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
2880      omp.yield
2881    }
2882  }
2883  // CHECK: }
2884
2885  // CHECK: omp.loop bind(thread) {
2886  omp.loop bind(thread) {
2887    omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
2888      omp.yield
2889    }
2890  }
2891  // CHECK: }
2892
2893  return
2894}
2895
2896// CHECK-LABEL: func @omp_workshare
2897func.func @omp_workshare() {
2898  // CHECK: omp.workshare {
2899  omp.workshare {
2900    "test.payload"() : () -> ()
2901    // CHECK: omp.terminator
2902    omp.terminator
2903  }
2904  return
2905}
2906
2907// CHECK-LABEL: func @omp_workshare_nowait
2908func.func @omp_workshare_nowait() {
2909  // CHECK: omp.workshare nowait {
2910  omp.workshare nowait {
2911    "test.payload"() : () -> ()
2912    // CHECK: omp.terminator
2913    omp.terminator
2914  }
2915  return
2916}
2917
2918// CHECK-LABEL: func @omp_workshare_multiple_blocks
2919func.func @omp_workshare_multiple_blocks() {
2920  // CHECK: omp.workshare {
2921  omp.workshare {
2922    cf.br ^bb2
2923    ^bb2:
2924    // CHECK: omp.terminator
2925    omp.terminator
2926  }
2927  return
2928}
2929
2930// CHECK-LABEL: func @omp_workshare_loop_wrapper
2931func.func @omp_workshare_loop_wrapper(%idx : index) {
2932  // CHECK-NEXT: omp.workshare {
2933  omp.workshare {
2934    // CHECK-NEXT: omp.workshare.loop_wrapper
2935    omp.workshare.loop_wrapper {
2936      // CHECK-NEXT: omp.loop_nest
2937      omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
2938        omp.yield
2939      }
2940    }
2941    omp.terminator
2942  }
2943  return
2944}
2945
2946// CHECK-LABEL: func @omp_workshare_loop_wrapper_attrs
2947func.func @omp_workshare_loop_wrapper_attrs(%idx : index) {
2948  // CHECK-NEXT: omp.workshare {
2949  omp.workshare {
2950    // CHECK-NEXT: omp.workshare.loop_wrapper {
2951    omp.workshare.loop_wrapper {
2952      // CHECK-NEXT: omp.loop_nest
2953      omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
2954        omp.yield
2955      }
2956    // CHECK: } {attr_in_dict}
2957    } {attr_in_dict}
2958    omp.terminator
2959  }
2960  return
2961}
2962