/llvm-project/mlir/utils/tree-sitter-mlir/dialect/ |
H A D | linalg.js | 6 'linalg.batch_matmul', 7 'linalg.batch_matmul_transpose_b', 8 'linalg.batch_matvec', 9 'linalg.batch_reduce_matmul', 'linalg.broadcast', 10 'linalg.conv_1d_ncw_fcw', 'linalg.conv_1d_nwc_wcf', 11 'linalg.conv_1d', 'linalg.conv_2d_nchw_fchw', 12 'linalg.conv_2d_ngchw_fgchw', 13 'linalg.conv_2d_nhwc_fhwc', 14 'linalg.conv_2d_nhwc_hwcf', 15 'linalg.conv_2d_nhwc_hwcf_q', 'linalg.conv_2d', [all …]
|
/llvm-project/mlir/utils/tree-sitter-mlir/queries/ |
H A D | highlights.scm | 215 "linalg.batch_matmul" 216 "linalg.batch_matmul_transpose_b" 217 "linalg.batch_matvec" 218 "linalg.batch_reduce_matmul" 219 "linalg.broadcast" 220 "linalg.conv_1d_ncw_fcw" 221 "linalg.conv_1d_nwc_wcf" 222 "linalg.conv_1d" 223 "linalg.conv_2d_nchw_fchw" 224 "linalg.conv_2d_ngchw_fgchw" [all …]
|
/llvm-project/mlir/include/mlir/Dialect/Linalg/ |
H A D | Passes.td | 1 //===-- Passes.td - Linalg pass definition file ------------*- tablegen -*-===// 14 def ConvertElementwiseToLinalgPass : Pass<"convert-elementwise-to-linalg", ""> { 15 let summary = "Convert ElementwiseMappable ops to linalg"; 17 Convert ops with the `ElementwiseMappable` trait to linalg parallel loops. 20 run on op which contains linalg ops (most commonly a 23 let dependentDialects = ["linalg::LinalgDialect", "memref::MemRefDialect"]; 26 def ConvertLinalgToAffineLoopsPass : Pass<"convert-linalg-to-affine-loops"> { 27 let summary = "Lower the operations from the linalg dialect into affine " 30 "affine::AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"]; 33 def ConvertLinalgToLoopsPass : Pass<"convert-linalg-to-loops"> { [all …]
|
/llvm-project/mlir/test/Dialect/Linalg/ |
H A D | invalid.mlir | 22 linalg.yield %arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>> 29 linalg.index 0 : index 36 linalg.generic { 41 linalg.index 2 : index 42 linalg.yield %0 : f32 50 linalg.generic { 55 linalg.index -1 : index 56 linalg.yield %0 : f32 64 linalg.generic { 74 linalg [all...] |
H A D | named-ops-fail.mlir | 5 …linalg.add ins(%arg0, %arg1 : memref<4x8x16xf32>, memref<4x8x16xf16>) outs(%arg2: memref<4x8x16xf3… 13 …linalg.add ins(%arg0, %arg1 : memref<8x16xf32>, memref<4x8x16xf32>) outs(%arg2: memref<4x8x16xf32>) 21 …linalg.sub ins(%arg0, %arg1 : memref<4x8x16xf32>, memref<4x8x16xf16>) outs(%arg2: memref<4x8x16xf3… 29 …linalg.sub ins(%arg0, %arg1 : memref<8x16xf32>, memref<4x8x16xf32>) outs(%arg2: memref<4x8x16xf32>) 37 …linalg.mul ins(%arg0, %arg1 : memref<4x8x16xf32>, memref<4x8x16xf16>) outs(%arg2: memref<4x8x16xf3… 45 …linalg.mul ins(%arg0, %arg1 : memref<8x16xf32>, memref<4x8x16xf32>) outs(%arg2: memref<4x8x16xf32>) 53 …linalg.div ins(%arg0, %arg1 : memref<4x8x16xf32>, memref<4x8x16xf16>) outs(%arg2: memref<4x8x16xf3… 61 …linalg.div ins(%arg0, %arg1 : memref<8x16xf32>, memref<4x8x16xf32>) outs(%arg2: memref<4x8x16xf32>) 69 …linalg.div_unsigned ins(%arg0, %arg1 : memref<4x8x16xi32>, memref<4x8x16xi16>) outs(%arg2: memref<… 77 …linalg.div_unsigned ins(%arg0, %arg1 : memref<8x16xi32>, memref<4x8x16xi32>) outs(%arg2: memref<4x… [all …]
|
H A D | one-shot-bufferize-analysis.mlir | 6 // CHECK: linalg.elemwise_binary 7 …// CHECK-SAME: {__inplace_operands_attr__ = ["true", "true", "true"], fun = #linalg.binary_fn<add>} 8 %0 = linalg.elemwise_binary {fun = #linalg.binary_fn<add>} 18 // CHECK: linalg.elemwise_binary 19 …// CHECK-SAME: {__inplace_operands_attr__ = ["true", "true", "true"], fun = #linalg.binary_fn<add>} 20 %0 = linalg.elemwise_binary {fun = #linalg.binary_fn<add>} 31 // CHECK: linalg.elemwise_binary 32 …// CHECK-SAME: {__inplace_operands_attr__ = ["true", "none", "true"], fun = #linalg.binary_fn<add>} 33 %0 = linalg.elemwise_binary {fun = #linalg.binary_fn<add>} 47 // CHECK: linalg.generic [all …]
|
H A D | transform-pack-greedily.mlir | 14 // CHECK: linalg.generic 19 %0 = linalg.matmul ins(%A, %B : !A_mk, !B_kn) outs(%C : !C_mn) -> !C_mn 25 %matmul = transform.structured.match ops{["linalg.matmul"]} in %module_op 26 : (!transform.any_op) -> !transform.op<"linalg.matmul"> 29 : (!transform.op<"linalg.matmul">) -> !transform.op<"linalg.generic"> 57 // CHECK: linalg.generic 62 %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) { 66 linalg.yield %e : f32 73 …c = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !tra… 76 : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic"> [all …]
|
H A D | roundtrip.mlir | 33 linalg.matmul ins(%arg0, %arg0 : memref<?x?xf32, strided<[?, 1], offset: ?>>, 36 linalg.matvec ins(%arg0, %arg1: memref<?x?xf32, strided<[?, 1], offset: ?>>, 39 linalg.dot ins(%arg1, %arg2: memref<?xf32, strided<[1], offset: ?>>, 45 // CHECK: linalg.matmul 49 // CHECK: linalg.matvec 53 // CHECK: linalg.dot 61 linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<?xf32, strided<[1], offset: ?>>) 66 // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%{{.*}} : memref<?xf32, strided<[1], offset: ?>>) 82 linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<?x?x?xf32, strided<[?, ?, 1], offset: ?>>) 87 // CHECK: linalg [all...] |
H A D | fusion-elementwise-ops.mlir | 1 // RUN: mlir-opt %s -linalg-fuse-elementwise-ops -split-input-file | FileCheck %s 14 …%3 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "paralle… 19 linalg.yield %4 : f32 21 // CHECK: linalg.generic { 23 …%4 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "paralle… 32 // CHECK-NOT: linalg.yield 34 // CHECK: linalg.yield 36 linalg.yield %5 : f32 56 …%3 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "paralle… 61 linalg.yield %4 : f32 [all …]
|
H A D | named-ops.mlir | 7 %fill = linalg.fill ins(%zero : f32) outs(%init : tensor<1x10x8x8xf32>) -> tensor<1x10x8x8xf32> 9 %0 = linalg.depthwise_conv_1d_nwc_wcm {dilations = dense<1> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>} 21 %fill = linalg.fill ins(%zero : f32) outs(%init : tensor<1x10x8xf32>) -> tensor<1x10x8xf32> 23 %0 = linalg.depthwise_conv_1d_nwc_wc {dilations = dense<1> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>} 35 %fill = linalg.fill ins(%zero : f32) outs(%init : tensor<1x8x10xf32>) -> tensor<1x8x10xf32> 37 %0 = linalg.depthwise_conv_1d_ncw_cw {dilations = dense<1> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>} 49 %fill = linalg.fill ins(%zero : f32) outs(%init : tensor<2x3x4x2x3xf32>) -> tensor<2x3x4x2x3xf32> 50 // CHECK: %{{.+}} = linalg.depthwise_conv_2d_nhwc_hwcm 54 %0 = linalg.depthwise_conv_2d_nhwc_hwcm 63 // CHECK: linalg [all...] |
H A D | transform-op-gpu-map-copy-to-threads.mlir | 11 // CHECK: linalg.copy {{.*}} -> tensor<8xf16> 13 %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 19 %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 23 : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">) 69 // CHECK: linalg.copy {{.*}} -> tensor<8xf16> 71 %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 77 %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 81 : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">) 95 // CHECK: linalg.copy {{.*}} -> tensor<4xf16> 97 %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt [all …]
|
H A D | int-narrowing.mlir |
|
H A D | roundtrip-linalg-named-ops.mlir | 1 // The following test examples of linalg named ops lowered to linalg.generic and then 3 // RUN: mlir-opt %s -linalg-generalize-named-ops | mlir-opt --linalg-specialize-generic-ops | FileC… 6 linalg.exp ins(%A : memref<7x14x21xf32>) outs(%Out : memref<7x14x21xf32>) 12 // CHECK-NOT: linalg.generic 13 // CHECK: linalg.exp ins(%[[A]] : memref<7x14x21xf32>) outs(%[[Out]] : memref<7x14x21xf32>) 18 …%0 = linalg.add ins(%A, %B : tensor<?x?xf32>, tensor<?x?xf32>) outs(%Out : tensor<?x?xf32>) -> ten… 24 // CHECK-NOT: linalg.generic 25 // CHECK: linalg.add ins(%[[A]], %[[B]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[Out]] : tensor<… 30 …%0 = linalg.matmul ins(%A, %B : tensor<?x?xf32>, tensor<?x?xf32>) outs(%Out : tensor<?x?xf32>) -> … 36 // CHECK-NOT: linalg.generic [all …]
|
H A D | generalize-named-polymorphic-ops.mlir | 1 // RUN: mlir-opt %s -split-input-file -linalg-generalize-named-ops | FileCheck %s 5 %0 = linalg.matmul ins(%A, %B: tensor<16x8xf16>, tensor<8x32xf64>) 17 // CHECK-NEXT: linalg.yield %[[ADD]] : f32 24 %0 = linalg.matmul ins(%A, %B: tensor<16x8xi16>, tensor<8x32xi64>) 36 // CHECK-NEXT: linalg.yield %[[ADD]] : i32 44 %0 = linalg.matmul {cast = #linalg.type_fn<cast_unsigned>} 56 %0 = linalg.matmul ins(%A, %B: tensor<16x8xi16>, tensor<8x32xi64>) 69 %0 = linalg.matmul ins(%A, %B: tensor<16x8xf16>, tensor<8x32xf64>) 82 %0 = linalg [all...] |
/llvm-project/mlir/test/python/dialects/linalg/ |
H A D | ops.py | 3 from mlir.dialects import arith, func, linalg, tensor, memref 4 from mlir.dialects.linalg.opdsl.lang import * 24 # CHECK-NEXT: %[[RES:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : tensor<12x?xf32>) -> tensor<12x?xf32> 33 return linalg.fill(zero, outs=[out]) 38 # CHECK-NEXT: linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : memref<12x?xf32>) 47 linalg.fill(zero, outs=[out]) 66 # CHECK: linalg.elemwise_unary 67 # CHECK-SAME: cast = #linalg.type_fn<cast_signed> 68 # CHECK-SAME: fun = #linalg.unary_fn<exp> 70 unary_result = linalg [all...] |
/llvm-project/mlir/lib/Dialect/Linalg/Transforms/ |
H A D | TransposeMatmul.cpp | 1 //===- TransposeMatmul.cpp - Convert Linalg matmul to transposed variants -===// 12 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 16 #define DEBUG_TYPE "linalg-transpose-matmul" 19 using namespace mlir::linalg; 23 /// linalg.matmul(a, b) 27 /// linalg.matmul_transpose_a(linalg.transpose(a), b) 31 FailureOr<Operation *> mlir::linalg::transposeMatmul(RewriterBase &rewriter, in transposeMatmul() 32 linalg::MatmulOp matmulOp, in transposeMatmul() 59 auto transposeOp = rewriter.create<linalg in transposeMatmul() [all...] |
H A D | BlockPackMatmul.cpp | 1 //===- BlockPackMatmul.cpp - Linalg matmul block packing ------------------===// 9 #include "mlir/Dialect/Linalg/Passes.h" 11 #include "mlir/Dialect/Linalg/IR/Linalg.h" 12 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 13 #include "mlir/Dialect/Linalg/Utils/Utils.h" 23 #include "mlir/Dialect/Linalg/Passes.h.inc" 27 using namespace mlir::linalg; 44 static bool validateFullTilesOnDims(linalg::LinalgOp linalgOp, in validateFullTilesOnDims() 90 transposePackedMatmul(RewriterBase &rewriter, linalg [all...] |
H A D | TransposeConv2D.cpp | 10 #include "mlir/Dialect/Linalg/IR/Linalg.h" 25 namespace linalg { namespace 32 /// %0 = linalg.conv_2d_nhwc_fhwc {dilations = dense<1> : tensor<2xi64>, 41 /// %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x2x6x8xf32>) -> tensor<2x2x6x8xf32> 42 /// %transposed = linalg.transpose ins(%arg1 : tensor<8x2x2x6xf32>) outs(%1 : tensor<2x2x6x8xf32>) 44 /// %2 = linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} 66 // Because linalg.transpose expects an "out" parameter we need to pass it a in transposeConv2DHelper() 87 rewriter.create<linalg::TransposeOp>(loc, filter, input, filterPerm); in transposeConv2DHelper() 128 linalg in transposeConv2D() [all...] |
/llvm-project/mlir/test/lib/Dialect/Linalg/ |
H A D | TestLinalgElementwiseFusion.cpp | 1 //===- TestLinalgElementwiseFusion.cpp - Test Linalg elementwise fusion ---===// 10 // Linalg, mainly linalg options. 16 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 28 .Case<linalg::LinalgOp>([&](linalg::LinalgOp linalgOp) { in addOperands() 57 struct TestMultiUseProducerFusion : public OpRewritePattern<linalg::GenericOp> { 58 using OpRewritePattern<linalg::GenericOp>::OpRewritePattern; 60 LogicalResult matchAndRewrite(linalg::GenericOp genericOp, in matchAndRewrite() 64 if (linalg in matchAndRewrite() [all...] |
/llvm-project/mlir/docs/Dialects/Linalg/ |
H A D | _index.md | 1 # 'linalg' Dialect 9 Linalg is designed to solve the High-level Hierarchical Optimization (HHO box) 19 Linalg. They are all implemented in terms of the properties of the 20 `linalg.generic` OpInterface and avoid the pitfall of relying on hardcoded 25 Linalg IR and that have influenced its design: 35 1. Partially Lower to Iterations Over a Finer-Grained Linalg Op. 37 ## High-Level Description of Linalg Ops<a name="linalg_ops"></a> 39 Linalg takes at least some inspiration from all previously 62 [Linalg and Shapes](https://llvm.discourse.group/t/linalg [all...] |
/llvm-project/mlir/docs/Tutorials/transform/ |
H A D | Ch1.md | 18 %matmul = linalg.matmul ins(%lhs, %rhs: tensor<512x512xf32>, tensor<512x512xf32>) 22 %biased = linalg.elemwise_binary { fun = #linalg.binary_fn<add> } 28 %relued = linalg.elemwise_binary { fun = #linalg.binary_fn<max_signed> } 43 %arg1: !transform.op<"linalg.matmul">, 44 %arg2: !transform.op<"linalg.elemwise_binary">): 74 %arg1: !transform.op<"linalg.matmul">, 75 %arg2: !transform.op<"linalg.elemwise_binary">): 77 : !transform.op<"linalg.matmul"> 79 : !transform.op<"linalg.elemwise_binary"> 92 debug-bind-trailing-args=linalg.matmul,linalg.elemwise_binary})" [all …]
|
/llvm-project/mlir/test/Examples/transform/Ch1/ |
H A D | invalidation-1.mlir | 3 // RUN: debug-bind-trailing-args=linalg.matmul,linalg.elemwise_binary},\ 18 %arg1: !transform.op<"linalg.matmul">, 19 %arg2: !transform.op<"linalg.elemwise_binary">) { 23 : (!transform.op<"linalg.matmul">) -> (!transform.any_op, !transform.any_op) 27 transform.debug.emit_remark_at %arg1, "remark" : !transform.op<"linalg.matmul"> 38 %matmul = linalg.matmul ins(%lhs, %rhs: tensor<512x512xf32>, tensor<512x512xf32>) 42 %biased = linalg.elemwise_binary { fun = #linalg.binary_fn<add> } 48 %relued = linalg.elemwise_binary { fun = #linalg.binary_fn<max_signed> } 59 %arg1: !transform.op<"linalg.matmul">, 60 %arg2: !transform.op<"linalg.elemwise_binary">) { [all …]
|
/llvm-project/mlir/docs/Rationale/ |
H A D | RationaleLinalgDialect.md | 1 # Linalg Dialect Rationale: The Case For Compiler-Friendly Custom Operations 12 that led to the existing implementation of Linalg and aims at exposing 16 Linalg is designed to solve the High-level Hierarchical Optimization 26 this doc and the architecture of Linalg can help inform the community on a 31 Linalg started as a pragmatic dialect to bootstrap code generation in MLIR, by 34 into fast library implementations when available. Linalg **defines ops and 44 However, as the design of Linalg co-evolved with the design of MLIR, it became 48 The design and evolution of Linalg follow a *codegen-friendly* approach where 55 necessary. For example, `linalg.matmul` remains `linalg.matmul` after tiling 58 Furthermore, Linalg decouples transformation validity from profitability [all …]
|
/llvm-project/mlir/lib/Dialect/Linalg/IR/ |
H A D | LinalgDialect.cpp | 1 //===- Dialect.cpp - Implementation of the linalg dialect and types -------===// 9 // This file implements the Linalg dialect types and dialect. 16 #include "mlir/Dialect/Linalg/IR/Linalg.h" 37 using namespace mlir::linalg; 54 // Operations in Linalg dialect are always legal to inline. 104 void mlir::linalg::LinalgDialect::initialize() { in initialize() 107 #include "mlir/Dialect/Linalg/IR/LinalgOpsAttrDefs.cpp.inc" in initialize() 111 #include "mlir/Dialect/Linalg/IR/LinalgOps.cpp.inc" in initialize() 115 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc" in initialize() 118 // Fill the Linalg-specific OpName to RegionBuilder map. in initialize() [all …]
|
/llvm-project/mlir/lib/CAPI/Dialect/ |
H A D | Linalg.cpp | 1 //===- Linalg.cpp - C Interface for Linalg dialect ------------------------===// 9 #include "mlir-c/Dialect/Linalg.h" 11 #include "mlir/Dialect/Linalg/IR/Linalg.h" 14 using namespace mlir::linalg; 16 /// Apply the special region builder for the builtin named Linalg op. 17 /// Assert that `op` is a builtin named Linalg op. 25 assert(fun && "Expected a builtin named Linalg op."); in mlirLinalgFillBuiltinNamedOpRegion() 26 assert(op->getNumRegions() == 1 && "Expected Linalg op with 1 region"); in mlirLinalgFillBuiltinNamedOpRegion() 28 "Expected Linalg op with 0 blocks"); in mlirLinalgFillBuiltinNamedOpRegion() 44 MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(Linalg, linalg, LinalgDialect)
|