1// RUN: mlir-opt %s \ 2// RUN: -async-parallel-for=async-dispatch=true \ 3// RUN: -canonicalize -inline -symbol-dce \ 4// RUN: | FileCheck %s 5 6// RUN: mlir-opt %s \ 7// RUN: -async-parallel-for=async-dispatch=false \ 8// RUN: -canonicalize -inline -symbol-dce \ 9// RUN: | FileCheck %s 10 11// Check that if we statically know that the parallel operation has a single 12// block then all async operations will be canonicalized away and we will 13// end up with a single synchonous compute function call. 14 15// CHECK-LABEL: @loop_1d( 16// CHECK: %[[MEMREF:.*]]: memref<?xf32> 17func.func @loop_1d(%arg0: memref<?xf32>) { 18 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 19 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 20 // CHECK-DAG: %[[C100:.*]] = arith.constant 100 : index 21 // CHECK-DAG: %[[ONE:.*]] = arith.constant 1.000000e+00 : f32 22 // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C100]] step %[[C1]] 23 // CHECK: memref.store %[[ONE]], %[[MEMREF]][%[[I]]] 24 %lb = arith.constant 0 : index 25 %ub = arith.constant 100 : index 26 %st = arith.constant 1 : index 27 scf.parallel (%i) = (%lb) to (%ub) step (%st) { 28 %one = arith.constant 1.0 : f32 29 memref.store %one, %arg0[%i] : memref<?xf32> 30 } 31 32 return 33} 34