1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Promotion pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" 14 #include "mlir/Dialect/Linalg/EDSC/Intrinsics.h" 15 #include "mlir/Dialect/Linalg/IR/LinalgOps.h" 16 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h" 17 #include "mlir/Dialect/Linalg/Passes.h" 18 #include "mlir/Dialect/Linalg/Utils/Utils.h" 19 #include "mlir/Dialect/LoopOps/LoopOps.h" 20 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 21 #include "mlir/IR/AffineExpr.h" 22 #include "mlir/IR/AffineExprVisitor.h" 23 #include "mlir/IR/AffineMap.h" 24 #include "mlir/IR/OpImplementation.h" 25 #include "mlir/Pass/Pass.h" 26 #include "mlir/Support/LLVM.h" 27 #include "mlir/Support/STLExtras.h" 28 #include "mlir/Transforms/FoldUtils.h" 29 30 #include "llvm/ADT/SetVector.h" 31 #include "llvm/Support/CommandLine.h" 32 33 using namespace mlir; 34 using namespace mlir::edsc; 35 using namespace mlir::edsc::intrinsics; 36 using namespace mlir::linalg; 37 using namespace mlir::loop; 38 39 using llvm::SetVector; 40 41 using folded_affine_min = folded::ValueBuilder<AffineMinOp>; 42 using folded_linalg_range = folded::ValueBuilder<linalg::RangeOp>; 43 44 #define DEBUG_TYPE "linalg-promotion" 45 46 static Value allocBuffer(Type elementType, Value size, bool dynamicBuffers) { 47 auto *ctx = size.getContext(); 48 auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8); 49 if (!dynamicBuffers) 50 if (auto cst = dyn_cast_or_null<ConstantIndexOp>(size.getDefiningOp())) 51 return std_alloc( 52 MemRefType::get(width * cst.getValue(), IntegerType::get(8, ctx))); 53 Value mul = std_muli(std_constant_index(width), size); 54 return std_alloc(MemRefType::get(-1, IntegerType::get(8, ctx)), mul); 55 } 56 57 // Performs promotion of a `subView` into a local buffer of the size of the 58 // *ranges* of the `subView`. This produces a buffer whose size may be bigger 59 // than the actual size of the `subView` at the boundaries. 60 // This is related to the full/partial tile problem. 61 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and 62 // `partialLocalView` such that: 63 // * `buffer` is always the size of the full tile. 64 // * `fullLocalView` is a dense contiguous view into that buffer. 65 // * `partialLocalView` is a dense non-contiguous slice of `fullLocalView` 66 // that corresponds to the size of `subView` and accounting for boundary 67 // effects. 68 // The point of the full tile buffer is that constant static tile sizes are 69 // folded and result in a buffer type with statically known size and alignment 70 // properties. 71 // To account for general boundary effects, padding must be performed on the 72 // boundary tiles. For now this is done with an unconditional `fill` op followed 73 // by a partial `copy` op. 74 static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc, 75 SubViewOp subView, 76 bool dynamicBuffers, 77 OperationFolder *folder) { 78 auto zero = folded_std_constant_index(folder, 0); 79 auto one = folded_std_constant_index(folder, 1); 80 81 auto viewType = subView.getType(); 82 auto rank = viewType.getRank(); 83 Value allocSize = one; 84 SmallVector<Value, 8> fullRanges, partialRanges; 85 fullRanges.reserve(rank); 86 partialRanges.reserve(rank); 87 for (auto en : llvm::enumerate(subView.getRanges())) { 88 auto rank = en.index(); 89 auto rangeValue = en.value(); 90 Value d = rangeValue.size; 91 allocSize = folded_std_muli(folder, allocSize, d).getValue(); 92 fullRanges.push_back(d); 93 partialRanges.push_back( 94 folded_linalg_range(folder, zero, std_dim(subView, rank), one)); 95 } 96 SmallVector<int64_t, 4> dynSizes(fullRanges.size(), -1); 97 auto buffer = 98 allocBuffer(viewType.getElementType(), allocSize, dynamicBuffers); 99 auto fullLocalView = std_view( 100 MemRefType::get(dynSizes, viewType.getElementType()), buffer, fullRanges); 101 auto partialLocalView = linalg_slice(fullLocalView, partialRanges); 102 return PromotionInfo{buffer, fullLocalView, partialLocalView}; 103 } 104 105 SmallVector<PromotionInfo, 8> 106 mlir::linalg::promoteSubViews(OpBuilder &b, Location loc, 107 ArrayRef<Value> subViews, bool dynamicBuffers, 108 OperationFolder *folder) { 109 if (subViews.empty()) 110 return {}; 111 112 ScopedContext scope(b, loc); 113 SmallVector<PromotionInfo, 8> res; 114 res.reserve(subViews.size()); 115 DenseMap<Value, PromotionInfo> promotionInfoMap; 116 for (auto v : subViews) { 117 SubViewOp subView = cast<SubViewOp>(v.getDefiningOp()); 118 auto promotionInfo = 119 promoteFullTileBuffer(b, loc, subView, dynamicBuffers, folder); 120 promotionInfoMap.insert(std::make_pair(subView.getResult(), promotionInfo)); 121 res.push_back(promotionInfo); 122 } 123 124 for (auto v : subViews) { 125 SubViewOp subView = cast<SubViewOp>(v.getDefiningOp()); 126 auto info = promotionInfoMap.find(v); 127 if (info == promotionInfoMap.end()) 128 continue; 129 Value fillVal; 130 if (auto t = subView.getType().getElementType().dyn_cast<FloatType>()) 131 fillVal = folded_std_constant(folder, FloatAttr::get(t, 0.0)); 132 else if (auto t = 133 subView.getType().getElementType().dyn_cast<IntegerType>()) 134 fillVal = folded_std_constant_int(folder, 0, t); 135 // TODO(ntv): fill is only necessary if `promotionInfo` has a full local 136 // view that is different from the partial local view and we are on the 137 // boundary. 138 linalg_fill(info->second.fullLocalView, fillVal); 139 } 140 141 for (auto v : subViews) { 142 auto info = promotionInfoMap.find(v); 143 if (info == promotionInfoMap.end()) 144 continue; 145 linalg_copy(cast<SubViewOp>(v.getDefiningOp()), 146 info->second.partialLocalView); 147 } 148 return res; 149 } 150 151 LinalgOp mlir::linalg::promoteSubViewOperands(OpBuilder &b, LinalgOp op, 152 SetVector<Value> subViews, 153 bool dynamicBuffers, 154 OperationFolder *folder) { 155 assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics"); 156 157 if (auto convOp = dyn_cast<linalg::ConvOp>(op.getOperation())) { 158 // TODO(ntv): add a level of indirection to linalg.generic. 159 if (convOp.padding()) 160 llvm_unreachable("Unexpected conv with padding"); 161 } 162 163 // 1. Promote the specified views and use them in the new op. 164 ScopedContext scope(b, op.getLoc()); 165 auto promotedBufferAndViews = promoteSubViews( 166 b, op.getLoc(), subViews.getArrayRef(), dynamicBuffers, folder); 167 SmallVector<Value, 8> opViews; 168 opViews.reserve(op.getNumInputsAndOutputs()); 169 SmallVector<std::pair<Value, Value>, 8> writebackViews; 170 writebackViews.reserve(subViews.size()); 171 unsigned promotedIdx = 0; 172 for (auto view : op.getInputsAndOutputBuffers()) { 173 if (subViews.count(view) != 0) { 174 opViews.push_back(promotedBufferAndViews[promotedIdx].fullLocalView); 175 writebackViews.emplace_back(std::make_pair( 176 view, promotedBufferAndViews[promotedIdx].partialLocalView)); 177 promotedIdx++; 178 } else { 179 opViews.push_back(view); 180 } 181 } 182 183 // 2. Append all other operands as they appear, this enforces that such 184 // operands are not views. This is to support cases such as FillOp taking 185 // extra scalars etc. 186 auto operands = getAssumedNonViewOperands(op); 187 opViews.append(operands.begin(), operands.end()); 188 LinalgOp res = op.clone(b, op.getLoc(), opViews); 189 190 // 3. Emit write-back for the promoted output views: copy the partial view. 191 for (auto viewAndPartialLocalView : writebackViews) { 192 // WARNING: MUST use the old op to determine whether the operand view is an 193 // output. 194 bool isOutput = 195 op.getIndexOfOutputBuffer(viewAndPartialLocalView.first).hasValue(); 196 if (isOutput) 197 linalg_copy(viewAndPartialLocalView.second, 198 viewAndPartialLocalView.first); 199 } 200 201 // 4. Dealloc local buffers. 202 for (const auto &pi : promotedBufferAndViews) 203 std_dealloc(pi.buffer); 204 205 return res; 206 } 207 208 static void promoteSubViews(FuncOp f, bool dynamicBuffers) { 209 SmallVector<LinalgOp, 8> toErase; 210 OperationFolder folder(f.getContext()); 211 f.walk([dynamicBuffers, &folder, &toErase](LinalgOp op) { 212 if (!op.hasBufferSemantics()) 213 return; 214 215 // TODO(ntv) some heuristic here to decide what to promote. Atm only float 216 // and integer buffers can be promoted. 217 SetVector<Value> subViews; 218 OpBuilder b(op); 219 for (auto it : op.getInputsAndOutputBuffers()) 220 if (auto sv = dyn_cast_or_null<SubViewOp>(it.getDefiningOp())) 221 if (sv.getType().getElementType().isSignlessIntOrFloat()) 222 subViews.insert(sv); 223 if (!subViews.empty()) { 224 promoteSubViewOperands(b, op, subViews, dynamicBuffers, &folder); 225 toErase.push_back(op); 226 } 227 }); 228 for (auto op : toErase) 229 op.erase(); 230 } 231 232 namespace { 233 struct LinalgPromotionPass : public FunctionPass<LinalgPromotionPass> { 234 LinalgPromotionPass() = default; 235 LinalgPromotionPass(const LinalgPromotionPass &) {} 236 LinalgPromotionPass(bool dynamicBuffers) { 237 this->dynamicBuffers = dynamicBuffers; 238 } 239 240 void runOnFunction() override { 241 promoteSubViews(getFunction(), dynamicBuffers); 242 } 243 244 Option<bool> dynamicBuffers{ 245 *this, "test-promote-dynamic", 246 llvm::cl::desc("Test generation of dynamic promoted buffers"), 247 llvm::cl::init(false)}; 248 }; 249 } // namespace 250 251 std::unique_ptr<OpPassBase<FuncOp>> 252 mlir::createLinalgPromotionPass(bool dynamicBuffers) { 253 return std::make_unique<LinalgPromotionPass>(dynamicBuffers); 254 } 255 std::unique_ptr<OpPassBase<FuncOp>> mlir::createLinalgPromotionPass() { 256 return std::make_unique<LinalgPromotionPass>(); 257 } 258