xref: /llvm-project/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir (revision 298412b5786cf9d65f01d90bf38402b11bf87b4f)
1f82bee13SPeiming Liu// RUN: mlir-opt %s --lower-sparse-foreach-to-scf --canonicalize | FileCheck %s
26df483c9SPeiming Liu
36df483c9SPeiming Liu// CHECK-LABEL: func.func @sparse_foreach_constant
46df483c9SPeiming Liu// CHECK-DAG:   %[[C1:.*]] = arith.constant 1 : index
56df483c9SPeiming Liu// CHECK-DAG:   %[[V1:.*]] = arith.constant 5.000000e+00 : f32
66df483c9SPeiming Liu// CHECK-DAG:   %[[C2:.*]] = arith.constant 2 : index
76df483c9SPeiming Liu// CHECK-DAG:   %[[V3:.*]] = arith.constant 1.000000e+00 : f32
86df483c9SPeiming Liu// CHECK-DAG:   %[[V4:.*]] = arith.constant 6.000000e+00 : f32
96df483c9SPeiming Liu//               (1, 1) -> (2, 1) -> (2, 2)
106df483c9SPeiming Liu// CHECK-NEXT:  "test.use"(%[[C1]], %[[C1]], %[[V1]])
116df483c9SPeiming Liu// CHECK-NEXT:  "test.use"(%[[C2]], %[[C1]], %[[V3]])
126df483c9SPeiming Liu// CHECK-NEXT:  "test.use"(%[[C1]], %[[C2]], %[[V4]])
136df483c9SPeiming Liu//               (1, 1) -> (1, 2) -> (2, 1)
146df483c9SPeiming Liu// CHECK-NEXT:  "test.use"(%[[C1]], %[[C1]], %[[V1]])
156df483c9SPeiming Liu// CHECK-NEXT:  "test.use"(%[[C1]], %[[C2]], %[[V4]])
166df483c9SPeiming Liu// CHECK-NEXT:  "test.use"(%[[C2]], %[[C1]], %[[V3]])
176df483c9SPeiming Liufunc.func @sparse_foreach_constant() -> () {
186df483c9SPeiming Liu  %cst = arith.constant sparse<[[2, 1], [1, 1], [1, 2]], [1.0, 5.0, 6.0]> : tensor<8x7xf32>
196df483c9SPeiming Liu  // Make use the sparse constant are properly sorted based on the requested order.
206df483c9SPeiming Liu  sparse_tensor.foreach in %cst { order = affine_map<(d0, d1) -> (d1, d0)> } : tensor<8x7xf32> do {
216df483c9SPeiming Liu  ^bb0(%arg0: index, %arg1: index, %arg2: f32):
226df483c9SPeiming Liu    "test.use" (%arg0, %arg1, %arg2): (index,index,f32)->()
236df483c9SPeiming Liu  }
246df483c9SPeiming Liu  sparse_tensor.foreach in %cst : tensor<8x7xf32> do {
256df483c9SPeiming Liu  ^bb0(%arg0: index, %arg1: index, %arg2: f32):
266df483c9SPeiming Liu    "test.use" (%arg0, %arg1, %arg2): (index,index,f32)->()
276df483c9SPeiming Liu  }
286df483c9SPeiming Liu  return
296df483c9SPeiming Liu}
306db397a8SPeiming Liu
316db397a8SPeiming Liu#CSR_SLICE = #sparse_tensor.encoding<{
32256ac461SYinying Li  map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(2, 4, 1)>) -> (d0 : compressed, d1 : compressed)
336db397a8SPeiming Liu}>
346db397a8SPeiming Liu
356db397a8SPeiming Liu#CSR_SLICE_DYN = #sparse_tensor.encoding<{
36256ac461SYinying Li  map = (d0 : #sparse_tensor<slice(?, ?, ?)>, d1 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed, d1 : compressed)
376db397a8SPeiming Liu}>
386db397a8SPeiming Liu
39*298412b5SPeiming Liu// TODO: re-enable after lowering coo.next to function call (such that loop structure is more clear).
406db397a8SPeiming Liu
41*298412b5SPeiming Liu// C_HECK-LABEL:   func.func @foreach_print_slice_dyn(
42*298412b5SPeiming Liu// C_HECK-SAME:                                       %[[VAL_0:.*]]: tensor<?x?xf64,
43*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_1:.*]] = arith.constant 0 : index
44*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_2:.*]] = arith.constant 1 : index
45*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_3:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<?x?xf64,
46*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_4:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<?x?xf64,
47*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_5:.*]] = sparse_tensor.lvl %[[VAL_0]], %[[VAL_1]] : tensor<?x?xf64,
48*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.slice.offset %[[VAL_0]] at 0 : tensor<?x?xf64,
49*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.slice.stride %[[VAL_0]] at 0 : tensor<?x?xf64,
50*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64,
51*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64,
52*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_10:.*]] = sparse_tensor.lvl %[[VAL_0]], %[[VAL_2]] : tensor<?x?xf64,
53*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_11:.*]] = sparse_tensor.slice.offset %[[VAL_0]] at 1 : tensor<?x?xf64,
54*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_12:.*]] = sparse_tensor.slice.stride %[[VAL_0]] at 1 : tensor<?x?xf64,
55*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf64,
56*298412b5SPeiming Liu// C_HECK:           %[[VAL_14:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref<?xindex>
57*298412b5SPeiming Liu// C_HECK:           %[[VAL_15:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_2]]] : memref<?xindex>
58*298412b5SPeiming Liu// C_HECK:           scf.for %[[VAL_16:.*]] = %[[VAL_14]] to %[[VAL_15]] step %[[VAL_2]] {
59*298412b5SPeiming Liu// C_HECK:             %[[VAL_17:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_16]]] : memref<?xindex>
60*298412b5SPeiming Liu// C_HECK:             %[[VAL_18:.*]] = arith.subi %[[VAL_17]], %[[VAL_6]] : index
61*298412b5SPeiming Liu// C_HECK:             %[[VAL_19:.*]] = arith.remui %[[VAL_18]], %[[VAL_7]] : index
62*298412b5SPeiming Liu// C_HECK:             %[[VAL_20:.*]] = arith.divui %[[VAL_18]], %[[VAL_7]] : index
63*298412b5SPeiming Liu// C_HECK:             %[[VAL_21:.*]] = arith.cmpi uge, %[[VAL_17]], %[[VAL_6]] : index
64*298412b5SPeiming Liu// C_HECK:             %[[VAL_22:.*]] = arith.cmpi ult, %[[VAL_20]], %[[VAL_5]] : index
65*298412b5SPeiming Liu// C_HECK:             %[[VAL_23:.*]] = arith.cmpi eq, %[[VAL_19]], %[[VAL_1]] : index
66*298412b5SPeiming Liu// C_HECK:             %[[VAL_24:.*]] = arith.andi %[[VAL_21]], %[[VAL_22]] : i1
67*298412b5SPeiming Liu// C_HECK:             %[[VAL_25:.*]] = arith.andi %[[VAL_24]], %[[VAL_23]] : i1
68*298412b5SPeiming Liu// C_HECK:             scf.if %[[VAL_25]] {
69*298412b5SPeiming Liu// C_HECK:               %[[VAL_26:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex>
70*298412b5SPeiming Liu// C_HECK:               %[[VAL_27:.*]] = arith.addi %[[VAL_16]], %[[VAL_2]] : index
71*298412b5SPeiming Liu// C_HECK:               %[[VAL_28:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
72*298412b5SPeiming Liu// C_HECK:               scf.for %[[VAL_29:.*]] = %[[VAL_26]] to %[[VAL_28]] step %[[VAL_2]] {
73*298412b5SPeiming Liu// C_HECK:                 %[[VAL_30:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_29]]] : memref<?xindex>
74*298412b5SPeiming Liu// C_HECK:                 %[[VAL_31:.*]] = arith.subi %[[VAL_30]], %[[VAL_11]] : index
75*298412b5SPeiming Liu// C_HECK:                 %[[VAL_32:.*]] = arith.remui %[[VAL_31]], %[[VAL_12]] : index
76*298412b5SPeiming Liu// C_HECK:                 %[[VAL_33:.*]] = arith.divui %[[VAL_31]], %[[VAL_12]] : index
77*298412b5SPeiming Liu// C_HECK:                 %[[VAL_34:.*]] = arith.cmpi uge, %[[VAL_30]], %[[VAL_11]] : index
78*298412b5SPeiming Liu// C_HECK:                 %[[VAL_35:.*]] = arith.cmpi ult, %[[VAL_33]], %[[VAL_10]] : index
79*298412b5SPeiming Liu// C_HECK:                 %[[VAL_36:.*]] = arith.cmpi eq, %[[VAL_32]], %[[VAL_1]] : index
80*298412b5SPeiming Liu// C_HECK:                 %[[VAL_37:.*]] = arith.andi %[[VAL_34]], %[[VAL_35]] : i1
81*298412b5SPeiming Liu// C_HECK:                 %[[VAL_38:.*]] = arith.andi %[[VAL_37]], %[[VAL_36]] : i1
82*298412b5SPeiming Liu// C_HECK:                 scf.if %[[VAL_38]] {
83*298412b5SPeiming Liu// C_HECK:                   %[[VAL_39:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_29]]] : memref<?xf64>
84*298412b5SPeiming Liu// C_HECK:                   "test.use"(%[[VAL_39]]) : (f64) -> ()
85*298412b5SPeiming Liu// C_HECK:                 }
86*298412b5SPeiming Liu// C_HECK:               }
87*298412b5SPeiming Liu// C_HECK:             }
88*298412b5SPeiming Liu// C_HECK:           }
89*298412b5SPeiming Liu// C_HECK:           return
906db397a8SPeiming Liu//
916db397a8SPeiming Liufunc.func @foreach_print_slice_dyn(%A: tensor<?x?xf64, #CSR_SLICE_DYN>) {
926db397a8SPeiming Liu  sparse_tensor.foreach in %A : tensor<?x?xf64, #CSR_SLICE_DYN> do {
936db397a8SPeiming Liu  ^bb0(%1: index, %2: index, %v: f64) :
946db397a8SPeiming Liu    "test.use" (%v) : (f64) -> ()
956db397a8SPeiming Liu  }
966db397a8SPeiming Liu  return
976db397a8SPeiming Liu}
986db397a8SPeiming Liu
99*298412b5SPeiming Liu// C_HECK-LABEL:   func.func @foreach_print_slice(
100*298412b5SPeiming Liu// C_HECK-SAME:                                   %[[VAL_0:.*]]: tensor<4x4xf64,
101*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_1:.*]] = arith.constant 4 : index
102*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_2:.*]] = arith.constant 2 : index
103*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_3:.*]] = arith.constant 0 : index
104*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_4:.*]] = arith.constant 1 : index
105*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<4x4xf64,
106*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<4x4xf64,
107*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x4xf64,
108*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x4xf64,
109*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x4xf64,
110*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
111*298412b5SPeiming Liu// C_HECK:           %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
112*298412b5SPeiming Liu// C_HECK:           scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] {
113*298412b5SPeiming Liu// C_HECK:             %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
114*298412b5SPeiming Liu// C_HECK:             %[[VAL_14:.*]] = arith.cmpi ult, %[[VAL_13]], %[[VAL_1]] : index
115*298412b5SPeiming Liu// C_HECK:             scf.if %[[VAL_14]] {
116*298412b5SPeiming Liu// C_HECK:               %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xindex>
117*298412b5SPeiming Liu// C_HECK:               %[[VAL_16:.*]] = arith.addi %[[VAL_12]], %[[VAL_4]] : index
118*298412b5SPeiming Liu// C_HECK:               %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xindex>
119*298412b5SPeiming Liu// C_HECK:               scf.for %[[VAL_18:.*]] = %[[VAL_15]] to %[[VAL_17]] step %[[VAL_4]] {
120*298412b5SPeiming Liu// C_HECK:                 %[[VAL_19:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
121*298412b5SPeiming Liu// C_HECK:                 %[[VAL_20:.*]] = arith.subi %[[VAL_19]], %[[VAL_2]] : index
122*298412b5SPeiming Liu// C_HECK:                 %[[VAL_21:.*]] = arith.cmpi uge, %[[VAL_19]], %[[VAL_2]] : index
123*298412b5SPeiming Liu// C_HECK:                 %[[VAL_22:.*]] = arith.cmpi ult, %[[VAL_20]], %[[VAL_1]] : index
124*298412b5SPeiming Liu// C_HECK:                 %[[VAL_23:.*]] = arith.andi %[[VAL_21]], %[[VAL_22]] : i1
125*298412b5SPeiming Liu// C_HECK:                 scf.if %[[VAL_23]] {
126*298412b5SPeiming Liu// C_HECK:                   %[[VAL_24:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf64>
127*298412b5SPeiming Liu// C_HECK:                   "test.use"(%[[VAL_24]]) : (f64) -> ()
128*298412b5SPeiming Liu// C_HECK:                 }
129*298412b5SPeiming Liu// C_HECK:               }
130*298412b5SPeiming Liu// C_HECK:             }
131*298412b5SPeiming Liu// C_HECK:           }
132*298412b5SPeiming Liu// C_HECK:           return
1336db397a8SPeiming Liu//
1346db397a8SPeiming Liufunc.func @foreach_print_slice(%A: tensor<4x4xf64, #CSR_SLICE>) {
1356db397a8SPeiming Liu  sparse_tensor.foreach in %A : tensor<4x4xf64, #CSR_SLICE> do {
1366db397a8SPeiming Liu  ^bb0(%1: index, %2: index, %v: f64) :
1376db397a8SPeiming Liu    "test.use" (%v) : (f64) -> ()
1386db397a8SPeiming Liu  }
1396db397a8SPeiming Liu  return
1406db397a8SPeiming Liu}
141abd66d91SPeiming Liu
142abd66d91SPeiming Liu#BCOO = #sparse_tensor.encoding<{
143d374a785SYinying Li  map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton)
144abd66d91SPeiming Liu}>
145abd66d91SPeiming Liu
146*298412b5SPeiming Liu// C_HECK-LABEL:   func.func @foreach_bcoo(
147*298412b5SPeiming Liu// C_HECK-SAME:      %[[VAL_0:.*]]: tensor<4x4x4xf64, #sparse{{[0-9]*}}>) {
148*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_1:.*]] = arith.constant 4 : index
149*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_2:.*]] = arith.constant 0 : index
150*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_3:.*]] = arith.constant 1 : index
151*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_4:.*]] = arith.constant 2 : index
152*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x4x4xf64, #sparse{{[0-9]*}}> to memref<?xindex>
153*298412b5SPeiming Liu// C_HECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x4x4xf64, #sparse{{[0-9]*}}> to memref<?xf64>
154*298412b5SPeiming Liu// C_HECK:           scf.for %[[VAL_7:.*]] = %[[VAL_2]] to %[[VAL_1]] step %[[VAL_3]] {
155*298412b5SPeiming Liu// C_HECK:             %[[VAL_8:.*]] = arith.muli %[[VAL_7]], %[[VAL_4]] : index
156*298412b5SPeiming Liu// C_HECK:             %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_8]]] : memref<?xindex>
157*298412b5SPeiming Liu// C_HECK:             %[[VAL_10:.*]] = arith.addi %[[VAL_8]], %[[VAL_3]] : index
158*298412b5SPeiming Liu// C_HECK:             %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex>
159*298412b5SPeiming Liu// C_HECK:             scf.for %[[VAL_12:.*]] = %[[VAL_9]] to %[[VAL_11]] step %[[VAL_3]] {
160*298412b5SPeiming Liu// C_HECK:               %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xf64>
161*298412b5SPeiming Liu// C_HECK:               "test.use"(%[[VAL_13]]) : (f64) -> ()
162*298412b5SPeiming Liu// C_HECK:             } {"Emitted from" = "sparse_tensor.foreach"}
163*298412b5SPeiming Liu// C_HECK:           } {"Emitted from" = "sparse_tensor.foreach"}
164*298412b5SPeiming Liu// C_HECK:           return
165*298412b5SPeiming Liu// C_HECK:         }
166abd66d91SPeiming Liufunc.func @foreach_bcoo(%A: tensor<4x4x4xf64, #BCOO>) {
167abd66d91SPeiming Liu  sparse_tensor.foreach in %A : tensor<4x4x4xf64, #BCOO> do {
168abd66d91SPeiming Liu  ^bb0(%1: index, %2: index, %3: index,  %v: f64) :
169abd66d91SPeiming Liu    "test.use" (%v) : (f64) -> ()
170abd66d91SPeiming Liu  }
171abd66d91SPeiming Liu  return
172abd66d91SPeiming Liu}
173