xref: /llvm-project/mlir/test/Dialect/SparseTensor/sparse_vector_ops.mlir (revision 06a65ce500a632048db1058de9ca61072004a640)
1*06a65ce5SPeiming Liu// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification -cse -sparse-vectorization="vl=8" -cse | \
22fda6207SAart Bik// RUN:   FileCheck %s
32fda6207SAart Bik
4dbe1be9aSYinying Li#DenseVector = #sparse_tensor.encoding<{ map = (d0) -> (d0 : dense) }>
52fda6207SAart Bik
62fda6207SAart Bik#trait = {
72fda6207SAart Bik  indexing_maps = [
82fda6207SAart Bik    affine_map<(i) -> (i)>,  // a
92fda6207SAart Bik    affine_map<(i) -> (i)>,  // b
102fda6207SAart Bik    affine_map<(i) -> (i)>   // x (out)
112fda6207SAart Bik  ],
122fda6207SAart Bik  iterator_types = ["parallel"],
132fda6207SAart Bik  doc = "x(i) = a(i) ops b(i)"
142fda6207SAart Bik}
152fda6207SAart Bik
162fda6207SAart Bik// CHECK-LABEL: func.func @vops
172fda6207SAart Bik// CHECK-DAG:       %[[C1:.*]] = arith.constant dense<2.000000e+00> : vector<8xf32>
182fda6207SAart Bik// CHECK-DAG:       %[[C2:.*]] = arith.constant dense<1.000000e+00> : vector<8xf32>
192fda6207SAart Bik// CHECK-DAG:       %[[C3:.*]] = arith.constant dense<255> : vector<8xi64>
20431f6a54SAart Bik// CHECK-DAG:       %[[C4:.*]] = arith.constant dense<4> : vector<8xi32>
21431f6a54SAart Bik// CHECK-DAG:       %[[C5:.*]] = arith.constant dense<1> : vector<8xi32>
222fda6207SAart Bik// CHECK:           scf.for
232fda6207SAart Bik// CHECK:             %[[VAL_14:.*]] = vector.load
242fda6207SAart Bik// CHECK:             %[[VAL_15:.*]] = math.absf %[[VAL_14]] : vector<8xf32>
252fda6207SAart Bik// CHECK:             %[[VAL_16:.*]] = math.ceil %[[VAL_15]] : vector<8xf32>
262fda6207SAart Bik// CHECK:             %[[VAL_17:.*]] = math.floor %[[VAL_16]] : vector<8xf32>
272fda6207SAart Bik// CHECK:             %[[VAL_18:.*]] = math.sqrt %[[VAL_17]] : vector<8xf32>
282fda6207SAart Bik// CHECK:             %[[VAL_19:.*]] = math.expm1 %[[VAL_18]] : vector<8xf32>
292fda6207SAart Bik// CHECK:             %[[VAL_20:.*]] = math.sin %[[VAL_19]] : vector<8xf32>
302fda6207SAart Bik// CHECK:             %[[VAL_21:.*]] = math.tanh %[[VAL_20]] : vector<8xf32>
312fda6207SAart Bik// CHECK:             %[[VAL_22:.*]] = arith.negf %[[VAL_21]] : vector<8xf32>
322fda6207SAart Bik// CHECK:             %[[VAL_23:.*]] = vector.load
332fda6207SAart Bik// CHECK:             %[[VAL_24:.*]] = arith.mulf %[[VAL_22]], %[[VAL_23]] : vector<8xf32>
342fda6207SAart Bik// CHECK:             %[[VAL_25:.*]] = arith.divf %[[VAL_24]], %[[C1]] : vector<8xf32>
352fda6207SAart Bik// CHECK:             %[[VAL_26:.*]] = arith.addf %[[VAL_25]], %[[C1]] : vector<8xf32>
362fda6207SAart Bik// CHECK:             %[[VAL_27:.*]] = arith.subf %[[VAL_26]], %[[C2]] : vector<8xf32>
372fda6207SAart Bik// CHECK:             %[[VAL_28:.*]] = arith.extf %[[VAL_27]] : vector<8xf32> to vector<8xf64>
382fda6207SAart Bik// CHECK:             %[[VAL_29:.*]] = arith.bitcast %[[VAL_28]] : vector<8xf64> to vector<8xi64>
392fda6207SAart Bik// CHECK:             %[[VAL_30:.*]] = arith.addi %[[VAL_29]], %[[VAL_29]] : vector<8xi64>
402fda6207SAart Bik// CHECK:             %[[VAL_31:.*]] = arith.andi %[[VAL_30]], %[[C3]] : vector<8xi64>
412fda6207SAart Bik// CHECK:             %[[VAL_32:.*]] = arith.trunci %[[VAL_31]] : vector<8xi64> to vector<8xi16>
422fda6207SAart Bik// CHECK:             %[[VAL_33:.*]] = arith.extsi %[[VAL_32]] : vector<8xi16> to vector<8xi32>
43431f6a54SAart Bik// CHECK:             %[[VAL_34:.*]] = arith.shrsi %[[VAL_33]], %[[C4]] : vector<8xi32>
44431f6a54SAart Bik// CHECK:             %[[VAL_35:.*]] = arith.shrui %[[VAL_34]], %[[C4]] : vector<8xi32>
45431f6a54SAart Bik// CHECK:             %[[VAL_36:.*]] = arith.shli %[[VAL_35]], %[[C5]] : vector<8xi32>
46431f6a54SAart Bik// CHECK:             %[[VAL_37:.*]] = arith.uitofp %[[VAL_36]] : vector<8xi32> to vector<8xf32>
47431f6a54SAart Bik// CHECK:             vector.store %[[VAL_37]]
482fda6207SAart Bik// CHECK:           }
492fda6207SAart Bikfunc.func @vops(%arga: tensor<1024xf32, #DenseVector>,
502fda6207SAart Bik                %argb: tensor<1024xf32, #DenseVector>) -> tensor<1024xf32> {
51c6472f57SAart Bik  %init = tensor.empty() : tensor<1024xf32>
522fda6207SAart Bik  %o = arith.constant 1.0 : f32
532fda6207SAart Bik  %c = arith.constant 2.0 : f32
542fda6207SAart Bik  %i = arith.constant 255 : i64
55431f6a54SAart Bik  %s = arith.constant 4 : i32
56431f6a54SAart Bik  %t = arith.constant 1 : i32
572fda6207SAart Bik  %0 = linalg.generic #trait
582fda6207SAart Bik    ins(%arga, %argb: tensor<1024xf32, #DenseVector>, tensor<1024xf32, #DenseVector>)
592fda6207SAart Bik    outs(%init: tensor<1024xf32>) {
602fda6207SAart Bik      ^bb(%a: f32, %b: f32, %x: f32):
612fda6207SAart Bik        %0 = math.absf %a : f32
622fda6207SAart Bik        %1 = math.ceil %0 : f32
632fda6207SAart Bik        %2 = math.floor %1 : f32
642fda6207SAart Bik        %3 = math.sqrt %2 : f32
652fda6207SAart Bik        %4 = math.expm1 %3 : f32
662fda6207SAart Bik        %5 = math.sin %4 : f32
672fda6207SAart Bik        %6 = math.tanh %5 : f32
682fda6207SAart Bik        %7 = arith.negf %6 : f32
692fda6207SAart Bik        %8 = arith.mulf %7, %b : f32
702fda6207SAart Bik        %9 = arith.divf %8, %c : f32
712fda6207SAart Bik        %10 = arith.addf %9, %c : f32
722fda6207SAart Bik        %11 = arith.subf %10, %o : f32
732fda6207SAart Bik        %12 = arith.extf %11 : f32 to f64
742fda6207SAart Bik        %13 = arith.bitcast %12 : f64 to i64
752fda6207SAart Bik        %14 = arith.addi %13, %13 : i64
762fda6207SAart Bik        %15 = arith.andi %14, %i : i64
772fda6207SAart Bik        %16 = arith.trunci %15 : i64 to i16
782fda6207SAart Bik        %17 = arith.extsi %16 : i16 to i32
79431f6a54SAart Bik	%18 = arith.shrsi %17, %s : i32
80431f6a54SAart Bik	%19 = arith.shrui %18, %s : i32
81431f6a54SAart Bik	%20 = arith.shli %19, %t : i32
82431f6a54SAart Bik        %21 = arith.uitofp %20 : i32 to f32
83431f6a54SAart Bik        linalg.yield %21 : f32
842fda6207SAart Bik  } -> tensor<1024xf32>
852fda6207SAart Bik  return %0 : tensor<1024xf32>
862fda6207SAart Bik}
87