1 //===- TensorTilingOpInterfaceImpl.h - ------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements Tiling interface for TensorOps with ExternalModel. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef MLIR_DIALECT_TENSOR_IR_TENSORTILINGINTERFACEIMPL_H_ 14 #define MLIR_DIALECT_TENSOR_IR_TENSORTILINGINTERFACEIMPL_H_ 15 16 #include "mlir/IR/Dialect.h" 17 18 namespace mlir { 19 20 struct TilingResult; 21 22 namespace tensor { 23 24 class PadOp; 25 26 /// Bubbles up a slice of this pad by taking the slice first and then performing 27 /// the padding. `offsets` and `strides` specifies each dimension's start offset 28 /// and size for the slice. The slice has unit strides along all dimensions. 29 /// 30 /// Specifically, this function converts: 31 /// ``` 32 /// %0 = tensor.pad %source low[...] high[...] { linalg.yield %cst } 33 /// %1 = <extract-slice> %0 offsets=[...], sizes[...] 34 /// ``` 35 /// into 36 /// ``` 37 /// %0 = tensor.extract_slice %source ... 38 /// %0 = tensor.pad %0 low[...] high[...] { linalg.yield %cst } 39 /// ``` 40 /// 41 /// If `generateZeroSliceGuard` is true, the generated IR will contain logic 42 /// to guard against the case that we might take a zero-sized slice from the 43 /// original source. For such cases, we `tensor.generate` to generate the 44 /// full tensor. 45 FailureOr<TilingResult> bubbleUpPadSlice(OpBuilder &b, tensor::PadOp padOp, 46 ArrayRef<OpFoldResult> offsets, 47 ArrayRef<OpFoldResult> sizes, 48 bool generateZeroSliceGuard = true); 49 50 /// Registers external models for Tiling interface for tensor ops. 51 /// Currently, it registers: 52 /// 53 /// * TilingInterface for `tensor.pad`, `tensor.pack`, and `tensor.unpack`. 54 /// 55 /// Unfortunately, a "normal" internal registration is not possible at the 56 /// moment, because of the dependency of the interface implementation for these 57 /// ops on `affine.apply` and Affine dialect already depends on TensorOps. In 58 /// order to break the cyclic dependency (TensorOps->AffineOps->TensorOps) the 59 /// implementation is moved to a separate library. 60 void registerTilingInterfaceExternalModels(mlir::DialectRegistry ®istry); 61 62 /// Similar to the above registeration, but it is only for `tensor.pack` and 63 /// `tensor.unpack` ops. 64 void registerTilingInterfaceExternalModelsForPackUnPackOps( 65 DialectRegistry ®istry); 66 67 } // namespace tensor 68 } // namespace mlir 69 70 #endif // MLIR_DIALECT_TENSOR_IR_TENSORTILINGINTERFACEIMPL_H_ 71