1// RUN: mlir-opt -convert-openmp-to-llvm -split-input-file %s | FileCheck %s 2// RUN: mlir-opt -convert-to-llvm -split-input-file %s | FileCheck %s 3 4// CHECK-LABEL: llvm.func @foo(i64, i64) 5func.func private @foo(index, index) 6 7// CHECK-LABEL: llvm.func @critical_block_arg 8func.func @critical_block_arg() { 9 // CHECK: omp.critical 10 omp.critical { 11 // CHECK-NEXT: ^[[BB0:.*]](%[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64): 12 ^bb0(%arg1: index, %arg2: index): 13 // CHECK-NEXT: llvm.call @foo(%[[ARG1]], %[[ARG2]]) : (i64, i64) -> () 14 func.call @foo(%arg1, %arg2) : (index, index) -> () 15 omp.terminator 16 } 17 return 18} 19 20// ----- 21 22// CHECK: omp.critical.declare @[[MUTEX:.*]] hint(contended, speculative) 23omp.critical.declare @mutex hint(contended, speculative) 24 25// CHECK: llvm.func @critical_declare 26func.func @critical_declare() { 27 // CHECK: omp.critical(@[[MUTEX]]) 28 omp.critical(@mutex) { 29 omp.terminator 30 } 31 return 32} 33 34// ----- 35 36// CHECK-LABEL: llvm.func @master_block_arg 37func.func @master_block_arg() { 38 // CHECK: omp.master 39 omp.master { 40 // CHECK-NEXT: ^[[BB0:.*]](%[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64): 41 ^bb0(%arg1: index, %arg2: index): 42 // CHECK-DAG: %[[CAST_ARG1:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : i64 to index 43 // CHECK-DAG: %[[CAST_ARG2:.*]] = builtin.unrealized_conversion_cast %[[ARG2]] : i64 to index 44 // CHECK-NEXT: "test.payload"(%[[CAST_ARG1]], %[[CAST_ARG2]]) : (index, index) -> () 45 "test.payload"(%arg1, %arg2) : (index, index) -> () 46 omp.terminator 47 } 48 return 49} 50 51// ----- 52 53// CHECK-LABEL: llvm.func @branch_loop 54func.func @branch_loop() { 55 %start = arith.constant 0 : index 56 %end = arith.constant 0 : index 57 // CHECK: omp.parallel 58 omp.parallel { 59 // CHECK-NEXT: llvm.br ^[[BB1:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64 60 cf.br ^bb1(%start, %end : index, index) 61 // CHECK-NEXT: ^[[BB1]](%[[ARG1:[0-9]+]]: i64, %[[ARG2:[0-9]+]]: i64):{{.*}} 62 ^bb1(%0: index, %1: index): 63 // CHECK-NEXT: %[[CMP:[0-9]+]] = llvm.icmp "slt" %[[ARG1]], %[[ARG2]] : i64 64 %2 = arith.cmpi slt, %0, %1 : index 65 // CHECK-NEXT: llvm.cond_br %[[CMP]], ^[[BB2:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64), ^[[BB3:.*]] 66 cf.cond_br %2, ^bb2(%end, %end : index, index), ^bb3 67 // CHECK-NEXT: ^[[BB2]](%[[ARG3:[0-9]+]]: i64, %[[ARG4:[0-9]+]]: i64): 68 ^bb2(%3: index, %4: index): 69 // CHECK-NEXT: llvm.br ^[[BB1]](%[[ARG3]], %[[ARG4]] : i64, i64) 70 cf.br ^bb1(%3, %4 : index, index) 71 // CHECK-NEXT: ^[[BB3]]: 72 ^bb3: 73 omp.flush 74 omp.barrier 75 omp.taskwait 76 omp.taskyield 77 omp.terminator 78 } 79 return 80} 81 82// ----- 83 84// CHECK-LABEL: @wsloop 85// CHECK: (%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64, %[[ARG3:.*]]: i64, %[[ARG4:.*]]: i64, %[[ARG5:.*]]: i64) 86func.func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) { 87 // CHECK: omp.parallel 88 omp.parallel { 89 // CHECK: omp.wsloop { 90 "omp.wsloop"() ({ 91 // CHECK: omp.loop_nest (%[[ARG6:.*]], %[[ARG7:.*]]) : i64 = (%[[ARG0]], %[[ARG1]]) to (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[ARG5]]) { 92 omp.loop_nest (%arg6, %arg7) : index = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) { 93 // CHECK-DAG: %[[CAST_ARG6:.*]] = builtin.unrealized_conversion_cast %[[ARG6]] : i64 to index 94 // CHECK-DAG: %[[CAST_ARG7:.*]] = builtin.unrealized_conversion_cast %[[ARG7]] : i64 to index 95 // CHECK: "test.payload"(%[[CAST_ARG6]], %[[CAST_ARG7]]) : (index, index) -> () 96 "test.payload"(%arg6, %arg7) : (index, index) -> () 97 omp.yield 98 } 99 }) : () -> () 100 omp.terminator 101 } 102 return 103} 104 105// ----- 106 107// CHECK-LABEL: @atomic_write 108// CHECK: (%[[ARG0:.*]]: !llvm.ptr) 109// CHECK: %[[VAL0:.*]] = llvm.mlir.constant(1 : i32) : i32 110// CHECK: omp.atomic.write %[[ARG0]] = %[[VAL0]] memory_order(relaxed) : !llvm.ptr, i32 111func.func @atomic_write(%a: !llvm.ptr) -> () { 112 %1 = arith.constant 1 : i32 113 omp.atomic.write %a = %1 hint(none) memory_order(relaxed) : !llvm.ptr, i32 114 return 115} 116 117// ----- 118 119// CHECK-LABEL: @atomic_read 120// CHECK: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr) 121// CHECK: omp.atomic.read %[[ARG1]] = %[[ARG0]] hint(contended) memory_order(acquire) : !llvm.ptr 122func.func @atomic_read(%a: !llvm.ptr, %b: !llvm.ptr) -> () { 123 omp.atomic.read %b = %a memory_order(acquire) hint(contended) : !llvm.ptr, !llvm.ptr, i32 124 return 125} 126 127// ----- 128 129func.func @atomic_update() { 130 %0 = llvm.mlir.addressof @_QFsEc : !llvm.ptr 131 omp.atomic.update %0 : !llvm.ptr { 132 ^bb0(%arg0: i32): 133 %1 = arith.constant 1 : i32 134 %2 = arith.addi %arg0, %1 : i32 135 omp.yield(%2 : i32) 136 } 137 return 138} 139llvm.mlir.global internal @_QFsEc() : i32 { 140 %0 = arith.constant 10 : i32 141 llvm.return %0 : i32 142} 143 144// CHECK-LABEL: @atomic_update 145// CHECK: %[[GLOBAL_VAR:.*]] = llvm.mlir.addressof @_QFsEc : !llvm.ptr 146// CHECK: omp.atomic.update %[[GLOBAL_VAR]] : !llvm.ptr { 147// CHECK: ^bb0(%[[IN_VAL:.*]]: i32): 148// CHECK: %[[CONST_1:.*]] = llvm.mlir.constant(1 : i32) : i32 149// CHECK: %[[OUT_VAL:.*]] = llvm.add %[[IN_VAL]], %[[CONST_1]] : i32 150// CHECK: omp.yield(%[[OUT_VAL]] : i32) 151// CHECK: } 152 153// ----- 154 155// CHECK-LABEL: @threadprivate 156// CHECK: (%[[ARG0:.*]]: !llvm.ptr) 157// CHECK: %[[VAL0:.*]] = omp.threadprivate %[[ARG0]] : !llvm.ptr -> !llvm.ptr 158func.func @threadprivate(%a: !llvm.ptr) -> () { 159 %1 = omp.threadprivate %a : !llvm.ptr -> !llvm.ptr 160 return 161} 162 163// ----- 164 165// CHECK: llvm.func @loop_nest_block_arg(%[[LOWER:.*]]: i32, %[[UPPER:.*]]: i32, %[[ITER:.*]]: i64) { 166// CHECK: omp.simd { 167// CHECK-NEXT: omp.loop_nest (%[[ARG_0:.*]]) : i32 = (%[[LOWER]]) 168// CHECK-SAME: to (%[[UPPER]]) inclusive step (%[[LOWER]]) { 169// CHECK: llvm.br ^[[BB1:.*]](%[[ITER]] : i64) 170// CHECK: ^[[BB1]](%[[VAL_0:.*]]: i64): 171// CHECK: %[[VAL_1:.*]] = llvm.icmp "slt" %[[VAL_0]], %[[ITER]] : i64 172// CHECK: llvm.cond_br %[[VAL_1]], ^[[BB2:.*]], ^[[BB3:.*]] 173// CHECK: ^[[BB2]]: 174// CHECK: %[[VAL_2:.*]] = llvm.add %[[VAL_0]], %[[ITER]] : i64 175// CHECK: llvm.br ^[[BB1]](%[[VAL_2]] : i64) 176// CHECK: ^[[BB3]]: 177// CHECK: omp.yield 178func.func @loop_nest_block_arg(%val : i32, %ub : i32, %i : index) { 179 omp.simd { 180 omp.loop_nest (%arg0) : i32 = (%val) to (%ub) inclusive step (%val) { 181 cf.br ^bb1(%i : index) 182 ^bb1(%0: index): 183 %1 = arith.cmpi slt, %0, %i : index 184 cf.cond_br %1, ^bb2, ^bb3 185 ^bb2: 186 %2 = arith.addi %0, %i : index 187 cf.br ^bb1(%2 : index) 188 ^bb3: 189 omp.yield 190 } 191 } 192 return 193} 194 195// ----- 196 197// CHECK-LABEL: @task_depend 198// CHECK: (%[[ARG0:.*]]: !llvm.ptr) { 199// CHECK: omp.task depend(taskdependin -> %[[ARG0]] : !llvm.ptr) { 200// CHECK: omp.terminator 201// CHECK: } 202// CHECK: llvm.return 203// CHECK: } 204 205func.func @task_depend(%arg0: !llvm.ptr) { 206 omp.task depend(taskdependin -> %arg0 : !llvm.ptr) { 207 omp.terminator 208 } 209 return 210} 211 212// ----- 213 214// CHECK-LABEL: @_QPomp_target_data 215// CHECK: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr, %[[ARG2:.*]]: !llvm.ptr, %[[ARG3:.*]]: !llvm.ptr) 216// CHECK: %[[MAP0:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""} 217// CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""} 218// CHECK: %[[MAP2:.*]] = omp.map.info var_ptr(%[[ARG2]] : !llvm.ptr, i32) map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) -> !llvm.ptr {name = ""} 219// CHECK: omp.target_enter_data map_entries(%[[MAP0]], %[[MAP1]], %[[MAP2]] : !llvm.ptr, !llvm.ptr, !llvm.ptr) 220// CHECK: %[[MAP3:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""} 221// CHECK: %[[MAP4:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""} 222// CHECK: %[[MAP5:.*]] = omp.map.info var_ptr(%[[ARG2]] : !llvm.ptr, i32) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !llvm.ptr {name = ""} 223// CHECK: %[[MAP6:.*]] = omp.map.info var_ptr(%[[ARG3]] : !llvm.ptr, i32) map_clauses(always, delete) capture(ByRef) -> !llvm.ptr {name = ""} 224// CHECK: omp.target_exit_data map_entries(%[[MAP3]], %[[MAP4]], %[[MAP5]], %[[MAP6]] : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr) 225 226llvm.func @_QPomp_target_data(%a : !llvm.ptr, %b : !llvm.ptr, %c : !llvm.ptr, %d : !llvm.ptr) { 227 %0 = omp.map.info var_ptr(%a : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""} 228 %1 = omp.map.info var_ptr(%b : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""} 229 %2 = omp.map.info var_ptr(%c : !llvm.ptr, i32) map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) -> !llvm.ptr {name = ""} 230 omp.target_enter_data map_entries(%0, %1, %2 : !llvm.ptr, !llvm.ptr, !llvm.ptr) {} 231 %3 = omp.map.info var_ptr(%a : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""} 232 %4 = omp.map.info var_ptr(%b : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""} 233 %5 = omp.map.info var_ptr(%c : !llvm.ptr, i32) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !llvm.ptr {name = ""} 234 %6 = omp.map.info var_ptr(%d : !llvm.ptr, i32) map_clauses(always, delete) capture(ByRef) -> !llvm.ptr {name = ""} 235 omp.target_exit_data map_entries(%3, %4, %5, %6 : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr) {} 236 llvm.return 237} 238 239// ----- 240 241// CHECK-LABEL: @_QPomp_target_data_region 242// CHECK: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr) { 243// CHECK: %[[MAP_0:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""} 244// CHECK: omp.target_data map_entries(%[[MAP_0]] : !llvm.ptr) { 245// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(10 : i32) : i32 246// CHECK: llvm.store %[[VAL_1]], %[[ARG1]] : i32, !llvm.ptr 247// CHECK: omp.terminator 248// CHECK: } 249// CHECK: llvm.return 250 251llvm.func @_QPomp_target_data_region(%a : !llvm.ptr, %i : !llvm.ptr) { 252 %1 = omp.map.info var_ptr(%a : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""} 253 omp.target_data map_entries(%1 : !llvm.ptr) { 254 %2 = llvm.mlir.constant(10 : i32) : i32 255 llvm.store %2, %i : i32, !llvm.ptr 256 omp.terminator 257 } 258 llvm.return 259} 260 261// ----- 262 263// CHECK-LABEL: llvm.func @_QPomp_target( 264// CHECK: %[[ARG_0:.*]]: !llvm.ptr, 265// CHECK: %[[ARG_1:.*]]: !llvm.ptr) { 266// CHECK: %[[VAL_0:.*]] = llvm.mlir.constant(64 : i32) : i32 267// CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG_0]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""} 268// CHECK: %[[MAP2:.*]] = omp.map.info var_ptr(%[[ARG_1]] : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = ""} 269// CHECK: omp.target thread_limit(%[[VAL_0]] : i32) map_entries(%[[MAP1]] -> %[[BB_ARG0:.*]], %[[MAP2]] -> %[[BB_ARG1:.*]] : !llvm.ptr, !llvm.ptr) { 270// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(10 : i32) : i32 271// CHECK: llvm.store %[[VAL_1]], %[[BB_ARG1]] : i32, !llvm.ptr 272// CHECK: omp.terminator 273// CHECK: } 274// CHECK: llvm.return 275// CHECK: } 276 277llvm.func @_QPomp_target(%a : !llvm.ptr, %i : !llvm.ptr) { 278 %0 = llvm.mlir.constant(64 : i32) : i32 279 %1 = omp.map.info var_ptr(%a : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""} 280 %3 = omp.map.info var_ptr(%i : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = ""} 281 omp.target thread_limit(%0 : i32) map_entries(%1 -> %arg0, %3 -> %arg1 : !llvm.ptr, !llvm.ptr) { 282 %2 = llvm.mlir.constant(10 : i32) : i32 283 llvm.store %2, %arg1 : i32, !llvm.ptr 284 omp.terminator 285 } 286 llvm.return 287} 288 289// ----- 290 291// CHECK-LABEL: @_QPsb 292// CHECK: omp.sections 293// CHECK: omp.section 294// CHECK: llvm.br 295// CHECK: llvm.icmp 296// CHECK: llvm.cond_br 297// CHECK: llvm.br 298// CHECK: omp.terminator 299// CHECK: omp.terminator 300// CHECK: llvm.return 301 302llvm.func @_QPsb() { 303 %0 = llvm.mlir.constant(0 : i64) : i64 304 %1 = llvm.mlir.constant(10 : i64) : i64 305 %2 = llvm.mlir.constant(1 : i64) : i64 306 omp.sections { 307 omp.section { 308 llvm.br ^bb1(%1 : i64) 309 ^bb1(%3: i64): // 2 preds: ^bb0, ^bb2 310 %4 = llvm.icmp "sgt" %3, %0 : i64 311 llvm.cond_br %4, ^bb2, ^bb3 312 ^bb2: // pred: ^bb1 313 %5 = llvm.sub %3, %2 : i64 314 llvm.br ^bb1(%5 : i64) 315 ^bb3: // pred: ^bb1 316 omp.terminator 317 } 318 omp.terminator 319 } 320 llvm.return 321} 322 323// ----- 324 325// CHECK: omp.declare_reduction @eqv_reduction : i32 init 326// CHECK: ^bb0(%{{.*}}: i32): 327// CHECK: %[[TRUE:.*]] = llvm.mlir.constant(true) : i1 328// CHECK: %[[TRUE_EXT:.*]] = llvm.zext %[[TRUE]] : i1 to i32 329// CHECK: omp.yield(%[[TRUE_EXT]] : i32) 330// CHECK: } combiner { 331// CHECK: ^bb0(%[[ARG_1:.*]]: i32, %[[ARG_2:.*]]: i32): 332// CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i32 333// CHECK: %[[CMP_1:.*]] = llvm.icmp "ne" %[[ARG_1]], %[[ZERO]] : i32 334// CHECK: %[[CMP_2:.*]] = llvm.icmp "ne" %[[ARG_2]], %[[ZERO]] : i32 335// CHECK: %[[COMBINE_VAL:.*]] = llvm.icmp "eq" %[[CMP_1]], %[[CMP_2]] : i1 336// CHECK: %[[COMBINE_VAL_EXT:.*]] = llvm.zext %[[COMBINE_VAL]] : i1 to i32 337// CHECK: omp.yield(%[[COMBINE_VAL_EXT]] : i32) 338// CHECK-LABEL: @_QPsimple_reduction 339// CHECK: %[[RED_ACCUMULATOR:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"} : (i64) -> !llvm.ptr 340// CHECK: omp.parallel 341// CHECK: omp.wsloop reduction(@eqv_reduction %{{.+}} -> %[[PRV:.+]] : !llvm.ptr) 342// CHECK-NEXT: omp.loop_nest {{.*}}{ 343// CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> i32 344// CHECK: %[[CMP:.+]] = llvm.icmp "eq" %{{.*}}, %[[LPRV]] : i32 345// CHECK: %[[ZEXT:.+]] = llvm.zext %[[CMP]] : i1 to i32 346// CHECK: llvm.store %[[ZEXT]], %[[PRV]] : i32, !llvm.ptr 347// CHECK: omp.yield 348// CHECK: omp.terminator 349// CHECK: llvm.return 350 351omp.declare_reduction @eqv_reduction : i32 init { 352^bb0(%arg0: i32): 353 %0 = llvm.mlir.constant(true) : i1 354 %1 = llvm.zext %0 : i1 to i32 355 omp.yield(%1 : i32) 356} combiner { 357^bb0(%arg0: i32, %arg1: i32): 358 %0 = llvm.mlir.constant(0 : i64) : i32 359 %1 = llvm.icmp "ne" %arg0, %0 : i32 360 %2 = llvm.icmp "ne" %arg1, %0 : i32 361 %3 = llvm.icmp "eq" %1, %2 : i1 362 %4 = llvm.zext %3 : i1 to i32 363 omp.yield(%4 : i32) 364} 365llvm.func @_QPsimple_reduction(%arg0: !llvm.ptr {fir.bindc_name = "y"}) { 366 %0 = llvm.mlir.constant(100 : i32) : i32 367 %1 = llvm.mlir.constant(1 : i32) : i32 368 %2 = llvm.mlir.constant(true) : i1 369 %3 = llvm.mlir.constant(1 : i64) : i64 370 %4 = llvm.alloca %3 x i32 {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"} : (i64) -> !llvm.ptr 371 %5 = llvm.zext %2 : i1 to i32 372 llvm.store %5, %4 : i32, !llvm.ptr 373 omp.parallel { 374 %6 = llvm.alloca %3 x i32 {adapt.valuebyref, in_type = i32, operandSegmentSizes = array<i32: 0, 0>, pinned} : (i64) -> !llvm.ptr 375 omp.wsloop reduction(@eqv_reduction %4 -> %prv : !llvm.ptr) { 376 omp.loop_nest (%arg1) : i32 = (%1) to (%0) inclusive step (%1) { 377 llvm.store %arg1, %6 : i32, !llvm.ptr 378 %7 = llvm.load %6 : !llvm.ptr -> i32 379 %8 = llvm.sext %7 : i32 to i64 380 %9 = llvm.sub %8, %3 : i64 381 %10 = llvm.getelementptr %arg0[0, %9] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<100 x i32> 382 %11 = llvm.load %10 : !llvm.ptr -> i32 383 %12 = llvm.load %prv : !llvm.ptr -> i32 384 %13 = llvm.icmp "eq" %11, %12 : i32 385 %14 = llvm.zext %13 : i1 to i32 386 llvm.store %14, %prv : i32, !llvm.ptr 387 omp.yield 388 } 389 } 390 omp.terminator 391 } 392 llvm.return 393} 394 395// ----- 396 397// CHECK-LABEL: @_QQmain 398llvm.func @_QQmain() { 399 %0 = llvm.mlir.constant(0 : index) : i64 400 %1 = llvm.mlir.constant(5 : index) : i64 401 %2 = llvm.mlir.constant(1 : index) : i64 402 %3 = llvm.mlir.constant(1 : i64) : i64 403 %4 = llvm.alloca %3 x i32 : (i64) -> !llvm.ptr 404// CHECK: omp.taskgroup 405 omp.taskgroup { 406 %5 = llvm.trunc %2 : i64 to i32 407 llvm.br ^bb1(%5, %1 : i32, i64) 408 ^bb1(%6: i32, %7: i64): // 2 preds: ^bb0, ^bb2 409 %8 = llvm.icmp "sgt" %7, %0 : i64 410 llvm.cond_br %8, ^bb2, ^bb3 411 ^bb2: // pred: ^bb1 412 llvm.store %6, %4 : i32, !llvm.ptr 413// CHECK: omp.task 414 omp.task { 415// CHECK: llvm.call @[[CALL_FUNC:.*]]({{.*}}) : 416 llvm.call @_QFPdo_work(%4) : (!llvm.ptr) -> () 417// CHECK: omp.terminator 418 omp.terminator 419 } 420 %9 = llvm.load %4 : !llvm.ptr -> i32 421 %10 = llvm.add %9, %5 : i32 422 %11 = llvm.sub %7, %2 : i64 423 llvm.br ^bb1(%10, %11 : i32, i64) 424 ^bb3: // pred: ^bb1 425 llvm.store %6, %4 : i32, !llvm.ptr 426// CHECK: omp.terminator 427 omp.terminator 428 } 429 llvm.return 430} 431// CHECK: @[[CALL_FUNC]] 432llvm.func @_QFPdo_work(%arg0: !llvm.ptr {fir.bindc_name = "i"}) { 433 llvm.return 434} 435 436// ----- 437 438// CHECK-LABEL: @sub_ 439llvm.func @sub_() { 440 %0 = llvm.mlir.constant(0 : index) : i64 441 %1 = llvm.mlir.constant(1 : index) : i64 442 %2 = llvm.mlir.constant(1 : i64) : i64 443 %3 = llvm.alloca %2 x i32 {bindc_name = "i", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFsubEi"} : (i64) -> !llvm.ptr 444// CHECK: omp.ordered.region 445 omp.ordered.region { 446 %4 = llvm.trunc %1 : i64 to i32 447 llvm.br ^bb1(%4, %1 : i32, i64) 448 ^bb1(%5: i32, %6: i64): // 2 preds: ^bb0, ^bb2 449 %7 = llvm.icmp "sgt" %6, %0 : i64 450 llvm.cond_br %7, ^bb2, ^bb3 451 ^bb2: // pred: ^bb1 452 llvm.store %5, %3 : i32, !llvm.ptr 453 %8 = llvm.load %3 : !llvm.ptr -> i32 454// CHECK: llvm.add 455 %9 = arith.addi %8, %4 : i32 456// CHECK: llvm.sub 457 %10 = arith.subi %6, %1 : i64 458 llvm.br ^bb1(%9, %10 : i32, i64) 459 ^bb3: // pred: ^bb1 460 llvm.store %5, %3 : i32, !llvm.ptr 461// CHECK: omp.terminator 462 omp.terminator 463 } 464 llvm.return 465} 466 467// ----- 468 469// CHECK-LABEL: llvm.func @_QPtarget_map_with_bounds( 470// CHECK: %[[ARG_0:.*]]: !llvm.ptr, %[[ARG_1:.*]]: !llvm.ptr, %[[ARG_2:.*]]: !llvm.ptr) { 471// CHECK: %[[C_01:.*]] = llvm.mlir.constant(4 : index) : i64 472// CHECK: %[[C_02:.*]] = llvm.mlir.constant(1 : index) : i64 473// CHECK: %[[C_03:.*]] = llvm.mlir.constant(1 : index) : i64 474// CHECK: %[[C_04:.*]] = llvm.mlir.constant(1 : index) : i64 475// CHECK: %[[BOUNDS0:.*]] = omp.map.bounds lower_bound(%[[C_02]] : i64) upper_bound(%[[C_01]] : i64) stride(%[[C_04]] : i64) start_idx(%[[C_04]] : i64) 476// CHECK: %[[MAP0:.*]] = omp.map.info var_ptr(%[[ARG_1]] : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS0]]) -> !llvm.ptr {name = ""} 477// CHECK: %[[C_11:.*]] = llvm.mlir.constant(4 : index) : i64 478// CHECK: %[[C_12:.*]] = llvm.mlir.constant(1 : index) : i64 479// CHECK: %[[C_13:.*]] = llvm.mlir.constant(1 : index) : i64 480// CHECK: %[[C_14:.*]] = llvm.mlir.constant(1 : index) : i64 481// CHECK: %[[BOUNDS1:.*]] = omp.map.bounds lower_bound(%[[C_12]] : i64) upper_bound(%[[C_11]] : i64) stride(%[[C_14]] : i64) start_idx(%[[C_14]] : i64) 482// CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG_2]] : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS1]]) -> !llvm.ptr {name = ""} 483// CHECK: omp.target map_entries(%[[MAP0]] -> %[[BB_ARG0:.*]], %[[MAP1]] -> %[[BB_ARG1:.*]] : !llvm.ptr, !llvm.ptr) { 484// CHECK: omp.terminator 485// CHECK: } 486// CHECK: llvm.return 487// CHECK:} 488 489llvm.func @_QPtarget_map_with_bounds(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) { 490 %0 = llvm.mlir.constant(4 : index) : i64 491 %1 = llvm.mlir.constant(1 : index) : i64 492 %2 = llvm.mlir.constant(1 : index) : i64 493 %3 = llvm.mlir.constant(1 : index) : i64 494 %4 = omp.map.bounds lower_bound(%1 : i64) upper_bound(%0 : i64) stride(%3 : i64) start_idx(%3 : i64) 495 %5 = omp.map.info var_ptr(%arg1 : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(tofrom) capture(ByRef) bounds(%4) -> !llvm.ptr {name = ""} 496 %6 = llvm.mlir.constant(4 : index) : i64 497 %7 = llvm.mlir.constant(1 : index) : i64 498 %8 = llvm.mlir.constant(1 : index) : i64 499 %9 = llvm.mlir.constant(1 : index) : i64 500 %10 = omp.map.bounds lower_bound(%7 : i64) upper_bound(%6 : i64) stride(%9 : i64) start_idx(%9 : i64) 501 %11 = omp.map.info var_ptr(%arg2 : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(tofrom) capture(ByRef) bounds(%10) -> !llvm.ptr {name = ""} 502 omp.target map_entries(%5 -> %arg3, %11 -> %arg4: !llvm.ptr, !llvm.ptr) { 503 omp.terminator 504 } 505 llvm.return 506} 507 508// ----- 509 510// CHECK: omp.private {type = private} @x.privatizer : !llvm.struct<{{.*}}> alloc { 511omp.private {type = private} @x.privatizer : memref<?xf32> alloc { 512// CHECK: ^bb0(%arg0: !llvm.struct<{{.*}}>): 513^bb0(%arg0: memref<?xf32>): 514 // CHECK: omp.yield(%arg0 : !llvm.struct<{{.*}}>) 515 omp.yield(%arg0 : memref<?xf32>) 516} 517 518// ----- 519 520// CHECK: omp.private {type = firstprivate} @y.privatizer : i64 alloc { 521omp.private {type = firstprivate} @y.privatizer : index alloc { 522// CHECK: ^bb0(%arg0: i64): 523^bb0(%arg0: index): 524 // CHECK: omp.yield(%arg0 : i64) 525 omp.yield(%arg0 : index) 526// CHECK: } copy { 527} copy { 528// CHECK: ^bb0(%arg0: i64, %arg1: i64): 529^bb0(%arg0: index, %arg1: index): 530 // CHECK: omp.yield(%arg0 : i64) 531 omp.yield(%arg0 : index) 532} 533 534// ----- 535 536// CHECK-LABEL: llvm.func @omp_cancel_cancellation_point() 537func.func @omp_cancel_cancellation_point() -> () { 538 omp.parallel { 539 // CHECK: omp.cancel cancellation_construct_type(parallel) 540 omp.cancel cancellation_construct_type(parallel) 541 // CHECK: omp.cancellation_point cancellation_construct_type(parallel) 542 omp.cancellation_point cancellation_construct_type(parallel) 543 omp.terminator 544 } 545 return 546} 547 548// ----- 549 550// CHECK-LABEL: llvm.func @omp_distribute( 551// CHECK-SAME: %[[ARG0:.*]]: i64) 552func.func @omp_distribute(%arg0 : index) -> () { 553 // CHECK: omp.distribute dist_schedule_static dist_schedule_chunk_size(%[[ARG0]] : i64) { 554 omp.distribute dist_schedule_static dist_schedule_chunk_size(%arg0 : index) { 555 omp.loop_nest (%iv) : index = (%arg0) to (%arg0) step (%arg0) { 556 omp.yield 557 } 558 } 559 return 560} 561 562// ----- 563 564// CHECK-LABEL: llvm.func @omp_teams( 565// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr, %[[ARG2:.*]]: i64) 566func.func @omp_teams(%arg0 : memref<i32>) -> () { 567 // CHECK: omp.teams allocate(%{{.*}} : !llvm.struct<(ptr, ptr, i64)> -> %{{.*}} : !llvm.struct<(ptr, ptr, i64)>) 568 omp.teams allocate(%arg0 : memref<i32> -> %arg0 : memref<i32>) { 569 omp.terminator 570 } 571 return 572} 573 574// ----- 575 576// CHECK-LABEL: llvm.func @omp_ordered( 577// CHECK-SAME: %[[ARG0:.*]]: i64) 578func.func @omp_ordered(%arg0 : index) -> () { 579 omp.wsloop ordered(1) { 580 omp.loop_nest (%iv) : index = (%arg0) to (%arg0) step (%arg0) { 581 // CHECK: omp.ordered depend_vec(%[[ARG0]] : i64) {doacross_num_loops = 1 : i64} 582 omp.ordered depend_vec(%arg0 : index) {doacross_num_loops = 1 : i64} 583 omp.yield 584 } 585 } 586 return 587} 588 589// ----- 590 591// CHECK-LABEL: @omp_taskloop( 592// CHECK-SAME: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: !llvm.ptr, %[[ARG2:.*]]: !llvm.ptr, %[[ARG3:.*]]: i64) 593func.func @omp_taskloop(%arg0: index, %arg1 : memref<i32>) { 594 // CHECK: omp.parallel { 595 omp.parallel { 596 // CHECK: omp.taskloop allocate(%{{.*}} : !llvm.struct<(ptr, ptr, i64)> -> %{{.*}} : !llvm.struct<(ptr, ptr, i64)>) { 597 omp.taskloop allocate(%arg1 : memref<i32> -> %arg1 : memref<i32>) { 598 // CHECK: omp.loop_nest (%[[IV:.*]]) : i64 = (%[[ARG0]]) to (%[[ARG0]]) step (%[[ARG0]]) { 599 omp.loop_nest (%iv) : index = (%arg0) to (%arg0) step (%arg0) { 600 // CHECK-DAG: %[[CAST_IV:.*]] = builtin.unrealized_conversion_cast %[[IV]] : i64 to index 601 // CHECK: "test.payload"(%[[CAST_IV]]) : (index) -> () 602 "test.payload"(%iv) : (index) -> () 603 omp.yield 604 } 605 } 606 omp.terminator 607 } 608 return 609} 610