xref: /llvm-project/mlir/test/Conversion/SCFToGPU/step_positive.mlir (revision 477c0b67a3ab30e74f3563b3f0b9d4d53caba465)
1// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1}))" %s | FileCheck %s
2
3// CHECK-LABEL: @step_var
4func.func @step_var(%A : memref<?x?xf32>, %B : memref<?x?xf32>) {
5  // Check that we divide by step.
6  // CHECK:  %[[range_i:.*]] = arith.ceildivsi {{.*}}, %{{.*}}
7  // CHECK:  %[[range_j:.*]] = arith.ceildivsi {{.*}}, %{{.*}}
8
9  // CHECK: gpu.launch
10  // CHECK-SAME: blocks(%{{[^)]*}}, %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[range_i]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}})
11  // CHECK-SAME: threads(%{{[^)]*}}, %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[range_j]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}})
12  affine.for %i = 5 to 15 step 4 {
13    affine.for %j = 3 to 19 step 7 {
14      // Loop induction variable remapping:
15      //     iv = thread(block)_id * step + lower_bound
16      // CHECK:      %[[prod_i:.*]] = arith.muli %{{.*}}, %{{.*}} : index
17      // CHECK-NEXT: %[[i:.*]] = arith.addi %{{.*}}, %[[prod_i]] : index
18      // CHECK-NEXT: %[[prod_j:.*]] = arith.muli %{{.*}}, %{{.*}} : index
19      // CHECK-NEXT: %[[j:.*]] = arith.addi %{{.*}}, %[[prod_j]] : index
20
21      // CHECK:     {{.*}} = memref.load %{{.*}}[%[[i]], %[[j]]] : memref<?x?xf32>
22      %0 = memref.load %A[%i, %j] : memref<?x?xf32>
23      // CHECK:     memref.store {{.*}}, %{{.*}}[%[[i]], %[[j]]] : memref<?x?xf32>
24      memref.store %0, %B[%i, %j] : memref<?x?xf32>
25    }
26  }
27  return
28}
29