xref: /llvm-project/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir (revision 06a65ce500a632048db1058de9ca61072004a640)
1// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification="parallelization-strategy=none" | \
2// RUN:   FileCheck %s --check-prefix=CHECK-PAR0
3// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification="parallelization-strategy=dense-outer-loop" | \
4// RUN:   FileCheck %s --check-prefix=CHECK-PAR1
5// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification="parallelization-strategy=any-storage-outer-loop" | \
6// RUN:   FileCheck %s --check-prefix=CHECK-PAR2
7// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification="parallelization-strategy=dense-any-loop" | \
8// RUN:   FileCheck %s --check-prefix=CHECK-PAR3
9// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification="parallelization-strategy=any-storage-any-loop" | \
10// RUN:   FileCheck %s --check-prefix=CHECK-PAR4
11
12#DenseMatrix = #sparse_tensor.encoding<{
13  map = (d0, d1) -> (d0 : dense, d1 : dense)
14}>
15
16#SparseMatrix = #sparse_tensor.encoding<{
17  map = (d0, d1) -> (d0 : compressed, d1 : compressed)
18}>
19
20#CSR = #sparse_tensor.encoding<{
21  map = (d0, d1) -> (d0 : dense, d1 : compressed)
22}>
23
24#trait_dd = {
25  indexing_maps = [
26    affine_map<(i,j) -> (i,j)>,  // A
27    affine_map<(i,j) -> (i,j)>   // X (out)
28  ],
29  iterator_types = ["parallel", "parallel"],
30  doc = "X(i,j) = A(i,j) * SCALE"
31}
32
33//
34// CHECK-PAR0-LABEL: func @scale_dd
35// CHECK-PAR0:         scf.for
36// CHECK-PAR0:           scf.for
37// CHECK-PAR0:         return
38//
39// CHECK-PAR1-LABEL: func @scale_dd
40// CHECK-PAR1:         scf.parallel
41// CHECK-PAR1:           scf.for
42// CHECK-PAR1:         return
43//
44// CHECK-PAR2-LABEL: func @scale_dd
45// CHECK-PAR2:         scf.parallel
46// CHECK-PAR2:           scf.for
47// CHECK-PAR2:         return
48//
49// CHECK-PAR3-LABEL: func @scale_dd
50// CHECK-PAR3:         scf.parallel
51// CHECK-PAR3:           scf.parallel
52// CHECK-PAR3:         return
53//
54// CHECK-PAR4-LABEL: func @scale_dd
55// CHECK-PAR4:         scf.parallel
56// CHECK-PAR4:           scf.parallel
57// CHECK-PAR4:         return
58//
59func.func @scale_dd(%scale: f32,
60               %arga: tensor<?x?xf32, #DenseMatrix>,
61	       %argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
62  %0 = linalg.generic #trait_dd
63     ins(%arga: tensor<?x?xf32, #DenseMatrix>)
64    outs(%argx: tensor<?x?xf32>) {
65      ^bb(%a: f32, %x: f32):
66        %0 = arith.mulf %a, %scale : f32
67        linalg.yield %0 : f32
68  } -> tensor<?x?xf32>
69  return %0 : tensor<?x?xf32>
70}
71
72#trait_ss = {
73  indexing_maps = [
74    affine_map<(i,j) -> (i,j)>,  // A
75    affine_map<(i,j) -> (i,j)>   // X (out)
76  ],
77  iterator_types = ["parallel", "parallel"],
78  doc = "X(i,j) = A(i,j) * SCALE"
79}
80
81//
82// CHECK-PAR0-LABEL: func @scale_ss
83// CHECK-PAR0:         scf.for
84// CHECK-PAR0:           scf.for
85// CHECK-PAR0:         return
86//
87// CHECK-PAR1-LABEL: func @scale_ss
88// CHECK-PAR1:         scf.for
89// CHECK-PAR1:           scf.for
90// CHECK-PAR1:         return
91//
92// CHECK-PAR2-LABEL: func @scale_ss
93// CHECK-PAR2:         scf.parallel
94// CHECK-PAR2:           scf.for
95// CHECK-PAR2:         return
96//
97// CHECK-PAR3-LABEL: func @scale_ss
98// CHECK-PAR3:         scf.for
99// CHECK-PAR3:           scf.for
100// CHECK-PAR3:         return
101//
102// CHECK-PAR4-LABEL: func @scale_ss
103// CHECK-PAR4:         scf.parallel
104// CHECK-PAR4:           scf.parallel
105// CHECK-PAR4:         return
106//
107func.func @scale_ss(%scale: f32,
108               %arga: tensor<?x?xf32, #SparseMatrix>,
109	       %argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
110  %0 = linalg.generic #trait_ss
111     ins(%arga: tensor<?x?xf32, #SparseMatrix>)
112    outs(%argx: tensor<?x?xf32>) {
113      ^bb(%a: f32, %x: f32):
114        %0 = arith.mulf %a, %scale : f32
115        linalg.yield %0 : f32
116  } -> tensor<?x?xf32>
117  return %0 : tensor<?x?xf32>
118}
119
120#trait_matvec = {
121  indexing_maps = [
122    affine_map<(i,j) -> (i,j)>,  // A
123    affine_map<(i,j) -> (j)>,    // b
124    affine_map<(i,j) -> (i)>     // x (out)
125  ],
126  iterator_types = ["parallel", "reduction"],
127  doc = "x(i) += A(i,j) * b(j)"
128}
129
130//
131// CHECK-PAR0-LABEL: func @matvec
132// CHECK-PAR0:         scf.for
133// CHECK-PAR0:           scf.for
134// CHECK-PAR0:         return
135//
136// CHECK-PAR1-LABEL: func @matvec
137// CHECK-PAR1:         scf.parallel
138// CHECK-PAR1:           scf.for
139// CHECK-PAR1:         return
140//
141// CHECK-PAR2-LABEL: func @matvec
142// CHECK-PAR2:         scf.parallel
143// CHECK-PAR2:           scf.for
144// CHECK-PAR2:         return
145//
146// CHECK-PAR3-LABEL: func @matvec
147// CHECK-PAR3:         scf.parallel
148// CHECK-PAR3:           scf.for
149// CHECK-PAR3:         return
150//
151// CHECK-PAR4-LABEL: func @matvec
152// CHECK-PAR4:         scf.parallel
153// CHECK-PAR4:           scf.parallel
154// CHECK-PAR4:             scf.reduce
155// CHECK-PAR4:         return
156//
157func.func @matvec(%arga: tensor<16x32xf32, #CSR>,
158             %argb: tensor<32xf32>,
159	     %argx: tensor<16xf32>) -> tensor<16xf32> {
160  %0 = linalg.generic #trait_matvec
161      ins(%arga, %argb : tensor<16x32xf32, #CSR>, tensor<32xf32>)
162     outs(%argx: tensor<16xf32>) {
163    ^bb(%A: f32, %b: f32, %x: f32):
164      %0 = arith.mulf %A, %b : f32
165      %1 = arith.addf %0, %x : f32
166      linalg.yield %1 : f32
167  } -> tensor<16xf32>
168  return %0 : tensor<16xf32>
169}
170