1// RUN: fir-opt --omp-generic-loop-conversion %s | FileCheck %s 2 3omp.private {type = private} @_QFteams_loopEi_private_ref_i32 : !fir.ref<i32> alloc { 4^bb0(%arg0: !fir.ref<i32>): 5 omp.yield(%arg0 : !fir.ref<i32>) 6} 7 8func.func @_QPteams_loop() { 9 %i = fir.alloca i32 10 omp.teams { 11 %c0 = arith.constant 0 : i32 12 %c10 = arith.constant 10 : i32 13 %c1 = arith.constant 1 : i32 14 omp.loop private(@_QFteams_loopEi_private_ref_i32 %i -> %arg2 : !fir.ref<i32>) { 15 omp.loop_nest (%arg3) : i32 = (%c0) to (%c10) inclusive step (%c1) { 16 fir.store %arg3 to %arg2 : !fir.ref<i32> 17 omp.yield 18 } 19 } 20 omp.terminator 21 } 22 return 23} 24 25// CHECK-LABEL: func.func @_QPteams_loop 26// CHECK: %[[I:.*]] = fir.alloca i32 27// CHECK: omp.teams { 28// 29// TODO we probably need to move the `loop_nest` bounds ops from the `teams` 30// region to the `parallel` region to avoid making these values `shared`. We can 31// find the backward slices of these bounds that are within the `teams` region 32// and move these slices to the `parallel` op. 33 34// CHECK: %[[LB:.*]] = arith.constant 0 : i32 35// CHECK: %[[UB:.*]] = arith.constant 10 : i32 36// CHECK: %[[STEP:.*]] = arith.constant 1 : i32 37// 38// CHECK: omp.parallel private(@{{.*}} %[[I]] 39// CHECK-SAME: -> %[[I_PRIV_ARG:[^[:space:]]+]] : !fir.ref<i32>) { 40// CHECK: omp.distribute { 41// CHECK: omp.wsloop { 42// 43// CHECK: omp.loop_nest (%{{.*}}) : i32 = 44// CHECK-SAME: (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { 45// CHECK: fir.store %{{.*}} to %[[I_PRIV_ARG]] : !fir.ref<i32> 46// CHECK: } 47// CHECK: } 48// CHECK: } 49// CHECK: } 50// CHECK: } 51