1 //===- VectorTransforms.cpp - Conversion within the Vector dialect --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements target-independent rewrites as 1->N patterns. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Dialect/Vector/Transforms/VectorTransforms.h" 14 15 #include <cassert> 16 #include <cstdint> 17 #include <functional> 18 #include <optional> 19 #include <type_traits> 20 21 #include "mlir/Dialect/Affine/IR/AffineOps.h" 22 #include "mlir/Dialect/Arith/IR/Arith.h" 23 #include "mlir/Dialect/Arith/Utils/Utils.h" 24 #include "mlir/Dialect/Linalg/IR/Linalg.h" 25 #include "mlir/Dialect/MemRef/IR/MemRef.h" 26 #include "mlir/Dialect/SCF/IR/SCF.h" 27 #include "mlir/Dialect/Tensor/IR/Tensor.h" 28 #include "mlir/Dialect/Utils/IndexingUtils.h" 29 #include "mlir/Dialect/Utils/StructuredOpsUtils.h" 30 #include "mlir/Dialect/Vector/IR/VectorOps.h" 31 #include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h" 32 #include "mlir/Dialect/Vector/Utils/VectorUtils.h" 33 #include "mlir/IR/BuiltinAttributeInterfaces.h" 34 #include "mlir/IR/BuiltinTypes.h" 35 #include "mlir/IR/ImplicitLocOpBuilder.h" 36 #include "mlir/IR/Location.h" 37 #include "mlir/IR/Matchers.h" 38 #include "mlir/IR/PatternMatch.h" 39 #include "mlir/IR/TypeUtilities.h" 40 #include "mlir/Interfaces/VectorInterfaces.h" 41 42 #include "llvm/ADT/DenseSet.h" 43 #include "llvm/ADT/MapVector.h" 44 #include "llvm/ADT/STLExtras.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Support/FormatVariadic.h" 48 #include "llvm/Support/raw_ostream.h" 49 50 #define DEBUG_TYPE "vector-to-vector" 51 52 using namespace mlir; 53 using namespace mlir::vector; 54 55 template <typename IntType> 56 static SmallVector<IntType> extractVector(ArrayAttr arrayAttr) { 57 return llvm::to_vector<4>(llvm::map_range( 58 arrayAttr.getAsRange<IntegerAttr>(), 59 [](IntegerAttr attr) { return static_cast<IntType>(attr.getInt()); })); 60 } 61 62 // Helper to find an index in an affine map. 63 static std::optional<int64_t> getResultIndex(AffineMap map, int64_t index) { 64 for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) { 65 int64_t idx = map.getDimPosition(i); 66 if (idx == index) 67 return i; 68 } 69 return std::nullopt; 70 } 71 72 namespace { 73 74 /// ShapeCastOpFolder folds cancelling ShapeCastOps away. 75 // 76 // Example: 77 // 78 // The following MLIR with cancelling ShapeCastOps: 79 // 80 // %0 = source : vector<5x4x2xf32> 81 // %1 = shape_cast %0 : vector<5x4x2xf32> to vector<20x2xf32> 82 // %2 = shape_cast %1 : vector<20x2xf32> to vector<5x4x2xf32> 83 // %3 = user %2 : vector<5x4x2xf32> 84 // 85 // Should canonicalize to the following: 86 // 87 // %0 = source : vector<5x4x2xf32> 88 // %1 = user %0 : vector<5x4x2xf32> 89 // 90 struct ShapeCastOpFolder : public OpRewritePattern<vector::ShapeCastOp> { 91 using OpRewritePattern::OpRewritePattern; 92 93 LogicalResult matchAndRewrite(vector::ShapeCastOp shapeCastOp, 94 PatternRewriter &rewriter) const override { 95 // Check if 'shapeCastOp' has vector source/result type. 96 auto sourceVectorType = 97 dyn_cast_or_null<VectorType>(shapeCastOp.getSource().getType()); 98 auto resultVectorType = 99 dyn_cast_or_null<VectorType>(shapeCastOp.getResult().getType()); 100 if (!sourceVectorType || !resultVectorType) 101 return failure(); 102 103 // Check if shape cast op source operand is also a shape cast op. 104 auto sourceShapeCastOp = dyn_cast_or_null<vector::ShapeCastOp>( 105 shapeCastOp.getSource().getDefiningOp()); 106 if (!sourceShapeCastOp) 107 return failure(); 108 auto operandSourceVectorType = 109 cast<VectorType>(sourceShapeCastOp.getSource().getType()); 110 auto operandResultVectorType = sourceShapeCastOp.getType(); 111 112 // Check if shape cast operations invert each other. 113 if (operandSourceVectorType != resultVectorType || 114 operandResultVectorType != sourceVectorType) 115 return failure(); 116 117 rewriter.replaceOp(shapeCastOp, sourceShapeCastOp.getSource()); 118 return success(); 119 } 120 }; 121 122 /// Convert MulIOp/MulFOp + MultiDimReductionOp<add> into ContractionOp. 123 /// Ex: 124 /// ``` 125 /// %0 = arith.mulf %arg0, %arg1 : vector<8x32x16xf32> 126 /// %1 = vector.multi_reduction add, %0 [1] 127 /// : vector<8x32x16xf32> to vector<8x16xf32> 128 /// ``` 129 /// Gets converted to: 130 /// ``` 131 /// %1 = vector.contract {indexing_maps = [ 132 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 133 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 134 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 135 /// iterator_types = ["parallel", "parallel", "reduction"], 136 /// kind = add} %0, %arg1, %cst_f0 137 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 138 /// ``` 139 struct MultiReduceToContract 140 : public OpRewritePattern<vector::MultiDimReductionOp> { 141 using OpRewritePattern::OpRewritePattern; 142 143 LogicalResult matchAndRewrite(vector::MultiDimReductionOp reduceOp, 144 PatternRewriter &rewriter) const override { 145 if (reduceOp.getKind() != vector::CombiningKind::ADD) 146 return failure(); 147 Operation *mulOp = reduceOp.getSource().getDefiningOp(); 148 if (!mulOp || !isa<arith::MulIOp, arith::MulFOp>(mulOp)) 149 return failure(); 150 SmallVector<bool> reductionMask = reduceOp.getReductionMask(); 151 auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size()); 152 SmallVector<AffineExpr> exprs; 153 SmallVector<vector::IteratorType> iteratorTypes; 154 for (const auto &isReduceDim : llvm::enumerate(reductionMask)) { 155 if (!isReduceDim.value()) { 156 iteratorTypes.push_back(vector::IteratorType::parallel); 157 exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index())); 158 } else { 159 iteratorTypes.push_back(vector::IteratorType::reduction); 160 } 161 } 162 auto dstMap = 163 AffineMap::get(/*dimCount=*/reductionMask.size(), 164 /*symbolCount=*/0, exprs, reduceOp.getContext()); 165 rewriter.replaceOpWithNewOp<mlir::vector::ContractionOp>( 166 reduceOp, mulOp->getOperand(0), mulOp->getOperand(1), reduceOp.getAcc(), 167 rewriter.getAffineMapArrayAttr({srcMap, srcMap, dstMap}), 168 rewriter.getArrayAttr(llvm::to_vector(llvm::map_range( 169 iteratorTypes, [&](IteratorType t) -> mlir::Attribute { 170 return IteratorTypeAttr::get(rewriter.getContext(), t); 171 })))); 172 return success(); 173 } 174 }; 175 176 /// Merge LHS/RHS (A/B) TransposeOp into ContractionOp user. 177 /// Ex: 178 /// ``` 179 /// %0 = vector.transpose %arg0, [2, 0, 1] 180 /// : vector<32x16x8xf32> to vector<8x32x16xf32> 181 /// %1 = vector.contract {indexing_maps = [ 182 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 183 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 184 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 185 /// iterator_types = ["parallel", "parallel", "reduction"], 186 /// kind = add} %0, %arg1, %cst_f0 187 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 188 /// ``` 189 /// Gets converted to: 190 /// ``` 191 /// %1 = vector.contract {indexing_maps = [ 192 /// affine_map<(d0, d1, d2) -> (d1, d2, d0)>, 193 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 194 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 195 /// iterator_types = ["parallel", "parallel", "reduction"], 196 /// kind = add} %arg0, %arg1, %cst_f0 197 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 198 /// ``` 199 struct CombineContractABTranspose final 200 : public OpRewritePattern<vector::ContractionOp> { 201 using OpRewritePattern::OpRewritePattern; 202 203 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 204 PatternRewriter &rewriter) const override { 205 SmallVector<AffineMap> maps = 206 llvm::to_vector<4>(contractOp.getIndexingMapsArray()); 207 Value lhs = contractOp.getLhs(); 208 Value rhs = contractOp.getRhs(); 209 size_t index = 0; 210 bool changed = false; 211 for (Value *operand : {&lhs, &rhs}) { 212 AffineMap &map = maps[index++]; 213 auto transposeOp = operand->getDefiningOp<vector::TransposeOp>(); 214 if (!transposeOp) 215 continue; 216 AffineMap permutationMap = AffineMap::getPermutationMap( 217 transposeOp.getPermutation(), contractOp.getContext()); 218 map = inversePermutation(permutationMap).compose(map); 219 *operand = transposeOp.getVector(); 220 changed = true; 221 } 222 if (!changed) 223 return failure(); 224 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 225 contractOp, lhs, rhs, contractOp.getAcc(), 226 rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes()); 227 return success(); 228 } 229 }; 230 231 /// Merges accumulator and result transposes into contract. 232 /// 233 /// For example: 234 /// ```mlir 235 /// %accT = vector.transpose %acc, [0, 2, 1] 236 /// : vector<2x8x4xf32> to vector<2x4x8xf32> 237 /// %contract = vector.contract { 238 /// indexing_maps = [ 239 /// affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, 240 /// affine_map<(d0, d1, d2, d3) -> (d3, d2)>, 241 /// affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> 242 /// ], 243 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"], 244 /// kind = #vector.kind<add> 245 /// } %lhs, %rhs, %accT 246 /// : vector<2x4x4xf32>, vector<4x8xf32> into vector<2x4x8xf32> 247 /// %0 = vector.transpose %contract, [0, 2, 1] 248 /// : vector<2x4x8xf32> to vector<2x8x4> 249 /// ``` 250 /// Becomes: 251 /// ```mlir 252 /// %0 = vector.contract { 253 /// indexing_maps = [ 254 /// affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, 255 /// affine_map<(d0, d1, d2, d3) -> (d3, d2)>, 256 /// affine_map<(d0, d1, d2, d3) -> (d0, d2, d1)> 257 /// ], 258 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"], 259 /// kind = #vector.kind<add> 260 /// } %lhs, %rhs, %acc 261 /// : vector<2x4x4xf32>, vector<4x8xf32> into vector<2x8x4xf32> 262 /// ``` 263 struct CombineContractResultTranspose final 264 : public OpRewritePattern<vector::TransposeOp> { 265 using OpRewritePattern::OpRewritePattern; 266 267 LogicalResult matchAndRewrite(vector::TransposeOp resTOp, 268 PatternRewriter &rewriter) const override { 269 auto contractOp = resTOp.getVector().getDefiningOp<vector::ContractionOp>(); 270 if (!contractOp || !contractOp->hasOneUse()) 271 return failure(); 272 273 auto accTOp = contractOp.getAcc().getDefiningOp<vector::TransposeOp>(); 274 if (!accTOp) 275 return failure(); 276 277 MLIRContext *context = contractOp.getContext(); 278 auto maps = llvm::to_vector<3>(contractOp.getIndexingMapsArray()); 279 AffineMap contractMap = maps.back(); 280 281 // Accumulator transpose performs f(A) -> B. Contract performs g(C) -> B. 282 // To index into A in contract, we need revert(f)(g(C)) -> A. 283 auto accTMap = 284 AffineMap::getPermutationMap(accTOp.getPermutation(), context); 285 286 // Contract performs g(C) -> D. Result transpose performs h(D) -> E. 287 // To index into E in contract, we need h(g(C)) -> E. 288 auto resTMap = 289 AffineMap::getPermutationMap(resTOp.getPermutation(), context); 290 auto combinedResMap = resTMap.compose(contractMap); 291 292 // The accumulator and result share the same indexing map. So they should be 293 // the same to be able to merge. This means combinedResMap is the same as 294 // inversePermutation(accTMap).compose(contractMap), which means 295 if (inversePermutation(accTMap) != resTMap) 296 return failure(); 297 maps.back() = combinedResMap; 298 299 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 300 resTOp, contractOp.getLhs(), contractOp.getRhs(), accTOp.getVector(), 301 rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes()); 302 return success(); 303 } 304 }; 305 306 /// Merge BroadcastOp into ContractionOp user. 307 /// Ex: 308 /// ``` 309 /// %0 = vector.broadcast %arg0 : vector<32x16xf32> to vector<8x32x16xf32> 310 /// %1 = vector.contract {indexing_maps = [ 311 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 312 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 313 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 314 /// iterator_types = ["parallel", "parallel", "reduction"], 315 /// kind = add} %0, %arg1, %cst_f0 316 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 317 /// ``` 318 /// Gets converted to: 319 /// ``` 320 /// %1 = vector.contract {indexing_maps = [ 321 /// affine_map<(d0, d1, d2) -> (d1, d2)>, 322 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 323 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 324 /// iterator_types = ["parallel", "parallel", "reduction"], 325 /// kind = add} %arg0, %arg1, %cst_f0 326 /// : vector<32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 327 /// ``` 328 struct CombineContractBroadcast 329 : public OpRewritePattern<vector::ContractionOp> { 330 using OpRewritePattern::OpRewritePattern; 331 332 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 333 PatternRewriter &rewriter) const override { 334 SmallVector<AffineMap> maps = 335 llvm::to_vector<4>(contractOp.getIndexingMapsArray()); 336 Value lhs = contractOp.getLhs(); 337 Value rhs = contractOp.getRhs(); 338 size_t index = 0; 339 bool changed = false; 340 for (Value *operand : {&lhs, &rhs}) { 341 AffineMap &map = maps[index++]; 342 auto broadcast = operand->getDefiningOp<vector::BroadcastOp>(); 343 if (!broadcast) 344 continue; 345 // contractionOp can only take vector as operands. 346 auto srcType = dyn_cast<VectorType>(broadcast.getSourceType()); 347 if (!srcType || 348 srcType.getRank() == broadcast.getResultVectorType().getRank()) 349 continue; 350 int64_t rankDiff = 351 broadcast.getResultVectorType().getRank() - srcType.getRank(); 352 bool innerDimBroadcast = false; 353 SmallVector<AffineExpr> originalDims; 354 for (const auto &dim : llvm::enumerate(srcType.getShape())) { 355 if (dim.value() != broadcast.getResultVectorType().getDimSize( 356 rankDiff + dim.index())) { 357 innerDimBroadcast = true; 358 break; 359 } 360 originalDims.push_back( 361 rewriter.getAffineDimExpr(dim.index() + rankDiff)); 362 } 363 // Contract doesn't support inner dimension broadcast. Once this is 364 // relaxed we can remove this case. 365 if (innerDimBroadcast) 366 continue; 367 368 // It would be incorrect to fold a broadcast onto a reduction dimension 369 // of non-unit size. 370 bool nonUnitDimReductionBroadcast = false; 371 for (int64_t i = 0; i < rankDiff; ++i) { 372 if (broadcast.getResultVectorType().getDimSize(i) != 1 && 373 isReductionIterator(contractOp.getIteratorTypes() 374 .getValue()[map.getDimPosition(i)])) { 375 nonUnitDimReductionBroadcast = true; 376 break; 377 } 378 } 379 if (nonUnitDimReductionBroadcast) 380 continue; 381 382 AffineMap broadcastMap = 383 AffineMap::get(broadcast.getResultVectorType().getRank(), 0, 384 originalDims, contractOp.getContext()); 385 map = broadcastMap.compose(map); 386 *operand = broadcast.getSource(); 387 changed = true; 388 } 389 390 if (!changed) 391 return failure(); 392 393 // Determine which dims are usused, now that the maps have been composed 394 // with the broadcast maps. 395 llvm::SmallBitVector unusedDimsBitVector = getUnusedDimsBitVector(maps); 396 // Compress unused dims. 397 for (auto &m : maps) 398 m = compressDims(m, unusedDimsBitVector); 399 // Compute the combined iterators. 400 SmallVector<Attribute> iterators; 401 for (unsigned i = 0; i < unusedDimsBitVector.size(); ++i) { 402 if (!unusedDimsBitVector.test(i)) 403 iterators.push_back(contractOp.getIteratorTypes().getValue()[i]); 404 } 405 // Check that compressing unused dims isn't removing all reduction dimension 406 // pairs. For example, if the vector.contract had only one reduction 407 // iterator and that was a unit-dimension created by a broadcast, 408 // then we should bail here, otherwise we would create a contract without 409 // a reduction dimension pair. 410 bool hasReductionIteratorApplyingOnBothSides = false; 411 for (unsigned i = 0; i < iterators.size(); ++i) { 412 if (!isReductionIterator(iterators[i])) 413 continue; 414 if (getResultIndex(maps[0], i) && getResultIndex(maps[1], i)) { 415 hasReductionIteratorApplyingOnBothSides = true; 416 break; 417 } 418 } 419 if (!hasReductionIteratorApplyingOnBothSides) 420 return failure(); 421 422 // If the compressed maps have a dimension that is not used by either LHS or 423 // RHS then the ContractionOp verifier would fail. 424 if (getUnusedDimsBitVector({maps[0], maps[1]}).any()) 425 return failure(); 426 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 427 contractOp, lhs, rhs, contractOp.getAcc(), 428 rewriter.getAffineMapArrayAttr(maps), rewriter.getArrayAttr(iterators)); 429 return success(); 430 } 431 }; 432 433 /// Reorders cast(broadcast) to broadcast(cast). This makes broadcast ops and 434 /// contraction ops closer, which kicks in CombineContractBroadcast pattern when 435 /// casting ops are around these operations. 436 /// Ex: 437 /// ``` 438 /// %0 = vector.broadcast %arg0 : vector<32x16xi8> to vector<8x32x16xi8> 439 /// %1 = arith.extsi %0 : vector<8x32x16xi8> to vector<8x32x16xi32> 440 /// ``` 441 /// Gets converted to: 442 /// ``` 443 /// %0 = arith.extsi %0 : vector<32x16xi8> to vector<32x16xi32> 444 /// %1 = vector.broadcast %arg0 : vector<32x16xi32> to vector<8x32x16xi32> 445 /// ``` 446 struct ReorderCastOpsOnBroadcast 447 : public OpInterfaceRewritePattern<CastOpInterface> { 448 using OpInterfaceRewritePattern<CastOpInterface>::OpInterfaceRewritePattern; 449 450 LogicalResult matchAndRewrite(CastOpInterface op, 451 PatternRewriter &rewriter) const override { 452 if (op->getNumOperands() != 1) 453 return failure(); 454 auto bcastOp = op->getOperand(0).getDefiningOp<vector::BroadcastOp>(); 455 if (!bcastOp) 456 return failure(); 457 458 Type castResTy = getElementTypeOrSelf(op->getResult(0)); 459 if (auto vecTy = dyn_cast<VectorType>(bcastOp.getSourceType())) 460 castResTy = vecTy.clone(castResTy); 461 auto *castOp = 462 rewriter.create(op->getLoc(), op->getName().getIdentifier(), 463 bcastOp.getSource(), castResTy, op->getAttrs()); 464 rewriter.replaceOpWithNewOp<vector::BroadcastOp>( 465 op, op->getResult(0).getType(), castOp->getResult(0)); 466 return success(); 467 } 468 }; 469 470 /// Reorders elementwise(transpose) to transpose(elementwise). This makes 471 /// transpose ops and contraction ops closer, which kicks in 472 /// CombineContractABTranspose pattern when elementwise ops are between these 473 /// operations. Ex: 474 /// ``` 475 /// %at = vector.transpose %a, [1, 0]: vector<4x2xf32> to vector<2x4xf32> 476 /// %bt = vector.transpose %b, [1, 0]: vector<4x2xf32> to vector<2x4xf32> 477 /// %r = arith.addf %at, %bt : vector<2x4xf32> 478 /// ``` 479 /// Gets converted to: 480 /// ``` 481 /// %0 = arith.addf %a, %b : vector<4x2xf32> 482 /// %r = vector.transpose %0, [1, 0] : vector<2x4xf32> 483 /// ``` 484 struct ReorderElementwiseOpsOnTranspose final 485 : public OpTraitRewritePattern<OpTrait::Elementwise> { 486 using OpTraitRewritePattern::OpTraitRewritePattern; 487 LogicalResult matchAndRewrite(Operation *op, 488 PatternRewriter &rewriter) const override { 489 if (op->getNumResults() != 1 || op->getNumRegions() != 0) 490 return failure(); 491 492 // Make sure all operands are transpose/constant ops and collect their 493 // transposition maps. 494 SmallVector<ArrayRef<int64_t>> transposeMaps; 495 transposeMaps.reserve(op->getNumOperands()); 496 // Record the initial type before transposition. We'll use its shape later. 497 // Any type will do here as we will check all transpose maps are the same. 498 VectorType srcType; 499 for (Value operand : op->getOperands()) { 500 auto transposeOp = operand.getDefiningOp<vector::TransposeOp>(); 501 if (transposeOp) { 502 transposeMaps.push_back(transposeOp.getPermutation()); 503 srcType = transposeOp.getSourceVectorType(); 504 } else if (!matchPattern(operand, m_Constant())) { 505 return failure(); 506 } 507 } 508 if (transposeMaps.empty()) 509 return failure(); 510 // This is an elementwise op, so all transposed operands should have the 511 // same type. We need to additionally check that all transposes uses the 512 // same map. 513 if (!llvm::all_equal(transposeMaps)) 514 return rewriter.notifyMatchFailure(op, "different transpose map"); 515 516 SmallVector<Value> srcValues; 517 srcValues.reserve(op->getNumOperands()); 518 519 // If there are constant operands, we need to insert inverse transposes for 520 // them. Calculate the inverse order first. 521 auto order = transposeMaps.front(); 522 SmallVector<int64_t> invOrder(order.size()); 523 for (int i = 0, e = order.size(); i < e; ++i) 524 invOrder[order[i]] = i; 525 526 for (Value operand : op->getOperands()) { 527 auto transposeOp = operand.getDefiningOp<vector::TransposeOp>(); 528 if (transposeOp) { 529 srcValues.push_back(transposeOp.getVector()); 530 } else { 531 // This is a constant. Create a reverse transpose op for it. 532 auto vectorType = 533 srcType.clone(cast<VectorType>(operand.getType()).getElementType()); 534 srcValues.push_back(rewriter.create<vector::TransposeOp>( 535 operand.getLoc(), vectorType, operand, invOrder)); 536 } 537 } 538 539 auto vectorType = srcType.clone( 540 cast<VectorType>(op->getResultTypes()[0]).getElementType()); 541 Operation *elementwiseOp = 542 rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues, 543 vectorType, op->getAttrs()); 544 rewriter.replaceOpWithNewOp<vector::TransposeOp>( 545 op, op->getResultTypes()[0], elementwiseOp->getResult(0), 546 transposeMaps.front()); 547 return success(); 548 } 549 }; 550 551 // Returns the values in `arrayAttr` as an integer vector. 552 static SmallVector<int64_t> getIntValueVector(ArrayAttr arrayAttr) { 553 return llvm::to_vector<4>( 554 llvm::map_range(arrayAttr.getAsRange<IntegerAttr>(), 555 [](IntegerAttr attr) { return attr.getInt(); })); 556 } 557 558 // Shuffles vector.bitcast op after vector.extract op. 559 // 560 // This transforms IR like: 561 // %0 = vector.bitcast %src : vector<4xf32> to vector<8xf16> 562 // %1 = vector.extract %0[3] : f16 from vector<8xf16> 563 // Into: 564 // %0 = vector.extract %src[1] : f32 from vector<4xf32> 565 // %1 = vector.bitcast %0: vector<1xf32> to vector<2xf16> 566 // %2 = vector.extract %1[1] : f16 from vector<2xf16> 567 struct BubbleDownVectorBitCastForExtract 568 : public OpRewritePattern<vector::ExtractOp> { 569 using OpRewritePattern::OpRewritePattern; 570 571 LogicalResult matchAndRewrite(vector::ExtractOp extractOp, 572 PatternRewriter &rewriter) const override { 573 // Only support extracting scalars for now. 574 if (extractOp.getSourceVectorType().getRank() != 1) 575 return failure(); 576 577 auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>(); 578 if (!castOp) 579 return failure(); 580 581 VectorType castSrcType = castOp.getSourceVectorType(); 582 VectorType castDstType = castOp.getResultVectorType(); 583 assert(castSrcType.getRank() == castDstType.getRank()); 584 585 // Fail to match if we only have one element in the cast op source. 586 // This is to avoid infinite loop given that this pattern can generate 587 // such cases. 588 if (castSrcType.getNumElements() == 1) 589 return failure(); 590 591 // Only support casting to a larger number of elements or now. 592 // E.g., vector<4xf32> -> vector<8xf16>. 593 if (castSrcType.getNumElements() > castDstType.getNumElements()) 594 return failure(); 595 596 unsigned expandRatio = 597 castDstType.getNumElements() / castSrcType.getNumElements(); 598 599 // Get the first element of the mixed position as integer. 600 auto mixedPos = extractOp.getMixedPosition(); 601 if (mixedPos.size() > 0 && !isa<Attribute>(mixedPos[0])) 602 return failure(); 603 uint64_t index = cast<IntegerAttr>(cast<Attribute>(mixedPos[0])).getInt(); 604 605 // Get the single scalar (as a vector) in the source value that packs the 606 // desired scalar. E.g. extract vector<1xf32> from vector<4xf32> 607 Location loc = extractOp.getLoc(); 608 Value packedValue = rewriter.create<vector::ExtractOp>( 609 loc, castOp.getSource(), index / expandRatio); 610 Type packedVecType = VectorType::get(/*shape=*/{1}, packedValue.getType()); 611 Value zero = rewriter.create<arith::ConstantOp>( 612 loc, packedVecType, rewriter.getZeroAttr(packedVecType)); 613 packedValue = rewriter.create<vector::InsertOp>(loc, packedValue, zero, 614 /*position=*/0); 615 616 // Cast it to a vector with the desired scalar's type. 617 // E.g. f32 -> vector<2xf16> 618 VectorType packedType = 619 VectorType::get({expandRatio}, castDstType.getElementType()); 620 Value castedValue = 621 rewriter.create<vector::BitCastOp>(loc, packedType, packedValue); 622 623 // Finally extract the desired scalar. 624 rewriter.replaceOpWithNewOp<vector::ExtractOp>(extractOp, castedValue, 625 index % expandRatio); 626 return success(); 627 } 628 }; 629 630 // Shuffles vector.bitcast op after vector.extract_strided_slice op. 631 // 632 // This transforms IR like: 633 // %cast = vector.bitcast %arg0: vector<4xf32> to vector<8xf16> 634 // %0 = vector.extract_strided_slice %cast { 635 // offsets = [4], sizes = [4], strides = [1] 636 // } : vector<8xf16> to vector<4xf16> 637 // Into: 638 // %0 = vector.extract_strided_slice %src { 639 // offsets = [2], sizes = [2], strides = [1] 640 // } : vector<4xf32> to vector<2xf32> 641 // %1 = vector.bitcast %0 : vector<2xf32> to vector<4xf16> 642 struct BubbleDownBitCastForStridedSliceExtract 643 : public OpRewritePattern<vector::ExtractStridedSliceOp> { 644 using OpRewritePattern::OpRewritePattern; 645 646 LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp, 647 PatternRewriter &rewriter) const override { 648 auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>(); 649 if (!castOp) 650 return failure(); 651 652 VectorType castSrcType = castOp.getSourceVectorType(); 653 VectorType castDstType = castOp.getResultVectorType(); 654 assert(castSrcType.getRank() == castDstType.getRank()); 655 656 int64_t castSrcLastDim = castSrcType.getShape().back(); 657 int64_t castDstLastDim = castDstType.getShape().back(); 658 // Require casting to more elements for now; other cases to be implemented. 659 if (castSrcLastDim > castDstLastDim) 660 return failure(); 661 662 // Only accept all one strides for now. 663 if (llvm::any_of(extractOp.getStrides().getAsValueRange<IntegerAttr>(), 664 [](const APInt &val) { return !val.isOne(); })) 665 return failure(); 666 667 unsigned rank = extractOp.getSourceVectorType().getRank(); 668 assert(castDstLastDim % castSrcLastDim == 0); 669 int64_t expandRatio = castDstLastDim / castSrcLastDim; 670 671 // If we have a less number of offsets than the rank, then implicitly we 672 // are selecting the full range for the last bitcasted dimension; other 673 // dimensions aren't affected. Otherwise, we need to scale down the last 674 // dimension's offset given we are extracting from less elements now. 675 ArrayAttr newOffsets = extractOp.getOffsets(); 676 if (newOffsets.size() == rank) { 677 SmallVector<int64_t> offsets = getIntValueVector(newOffsets); 678 if (offsets.back() % expandRatio != 0) 679 return failure(); 680 offsets.back() = offsets.back() / expandRatio; 681 newOffsets = rewriter.getI64ArrayAttr(offsets); 682 } 683 684 // Similarly for sizes. 685 ArrayAttr newSizes = extractOp.getSizes(); 686 if (newSizes.size() == rank) { 687 SmallVector<int64_t> sizes = getIntValueVector(newSizes); 688 if (sizes.back() % expandRatio != 0) 689 return failure(); 690 sizes.back() = sizes.back() / expandRatio; 691 newSizes = rewriter.getI64ArrayAttr(sizes); 692 } 693 694 SmallVector<int64_t> dims = 695 llvm::to_vector<4>(cast<VectorType>(extractOp.getType()).getShape()); 696 dims.back() = dims.back() / expandRatio; 697 VectorType newExtractType = 698 VectorType::get(dims, castSrcType.getElementType()); 699 700 auto newExtractOp = rewriter.create<vector::ExtractStridedSliceOp>( 701 extractOp.getLoc(), newExtractType, castOp.getSource(), newOffsets, 702 newSizes, extractOp.getStrides()); 703 704 rewriter.replaceOpWithNewOp<vector::BitCastOp>( 705 extractOp, extractOp.getType(), newExtractOp); 706 707 return success(); 708 } 709 }; 710 711 // Shuffles vector.bitcast op before vector.insert_strided_slice op. 712 // 713 // This transforms IR like: 714 // %0 = vector.insert %val, %dst[4] : vector<32xi4> into vector<8x32xi4> 715 // %1 = vector.bitcast %0 : vector<8x32xi4> to vector<8x16xi8> 716 // Into: 717 // %0 = vector.bitcast %val : vector<32xi4> to vector<16xi8> 718 // %1 = vector.bitcast %dst : vector<8x32xi4> to vector<8x16xi8> 719 // %2 = vector.insert %0, %1 [4] : vector<16xi8> into vector<8x16xi8> 720 // 721 struct BubbleUpBitCastForInsert : public OpRewritePattern<vector::BitCastOp> { 722 using OpRewritePattern::OpRewritePattern; 723 724 LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, 725 PatternRewriter &rewriter) const override { 726 VectorType castSrcType = bitcastOp.getSourceVectorType(); 727 VectorType castDstType = bitcastOp.getResultVectorType(); 728 729 // 0-D and scalable vectors are not supported yet. 730 if (castSrcType.getRank() == 0 || castSrcType.isScalable() || 731 castDstType.isScalable()) 732 return failure(); 733 734 int64_t castSrcLastDim = castSrcType.getShape().back(); 735 int64_t castDstLastDim = castDstType.getShape().back(); 736 bool isNumElemsShrink = castSrcLastDim >= castDstLastDim; 737 int64_t ratio; 738 if (isNumElemsShrink) { 739 assert(castSrcLastDim % castDstLastDim == 0); 740 ratio = castSrcLastDim / castDstLastDim; 741 } else { 742 assert(castDstLastDim % castSrcLastDim == 0); 743 ratio = castDstLastDim / castSrcLastDim; 744 } 745 746 auto insertOp = bitcastOp.getSource().getDefiningOp<vector::InsertOp>(); 747 if (!insertOp) 748 return failure(); 749 750 // Only vector sources are supported for now. 751 auto insertSrcType = dyn_cast<VectorType>(insertOp.getSourceType()); 752 if (!insertSrcType) 753 return failure(); 754 755 // Bitcast the source. 756 SmallVector<int64_t> srcDims(insertSrcType.getShape()); 757 srcDims.back() = 758 isNumElemsShrink ? srcDims.back() / ratio : srcDims.back() * ratio; 759 VectorType newCastSrcType = 760 VectorType::get(srcDims, castDstType.getElementType()); 761 auto newCastSrcOp = rewriter.create<vector::BitCastOp>( 762 bitcastOp.getLoc(), newCastSrcType, insertOp.getSource()); 763 764 SmallVector<int64_t> dstDims(insertOp.getDestVectorType().getShape()); 765 dstDims.back() = 766 isNumElemsShrink ? dstDims.back() / ratio : dstDims.back() * ratio; 767 VectorType newCastDstType = 768 VectorType::get(dstDims, castDstType.getElementType()); 769 770 // Bitcast the destination. 771 auto newCastDstOp = rewriter.create<vector::BitCastOp>( 772 bitcastOp.getLoc(), newCastDstType, insertOp.getDest()); 773 774 // Generate new insert. 775 rewriter.replaceOpWithNewOp<vector::InsertOp>( 776 bitcastOp, newCastSrcOp, newCastDstOp, insertOp.getMixedPosition()); 777 return success(); 778 } 779 }; 780 781 // Shuffles vector.bitcast op before vector.insert_strided_slice op. 782 // 783 // This transforms IR like: 784 // %0 = vector.insert_strided_slice %src, %dst { 785 // offsets = [0], strides = [1]} : vector<4xf16> into vector<8xf16> 786 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32> 787 // Into: 788 // %0 = vector.bitcast %src : vector<4xf16> to vector<2xf32> 789 // %1 = vector.bitcast %dst : vector<8xf16> to vector<4xf32> 790 // %2 = vector.insert_strided_slice %src, %dst { 791 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32> 792 struct BubbleUpBitCastForStridedSliceInsert 793 : public OpRewritePattern<vector::BitCastOp> { 794 using OpRewritePattern::OpRewritePattern; 795 796 LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, 797 PatternRewriter &rewriter) const override { 798 VectorType castSrcType = bitcastOp.getSourceVectorType(); 799 VectorType castDstType = bitcastOp.getResultVectorType(); 800 assert(castSrcType.getRank() == castDstType.getRank()); 801 // Skip 0-D vector which will not from InsertStridedSliceOp. 802 if (castSrcType.getRank() == 0) 803 return failure(); 804 805 int64_t castSrcLastDim = castSrcType.getShape().back(); 806 int64_t castDstLastDim = castDstType.getShape().back(); 807 // Require casting to less elements for now; other cases to be implemented. 808 if (castSrcLastDim < castDstLastDim) 809 return failure(); 810 811 assert(castSrcLastDim % castDstLastDim == 0); 812 int64_t shrinkRatio = castSrcLastDim / castDstLastDim; 813 814 auto insertOp = 815 bitcastOp.getSource().getDefiningOp<vector::InsertStridedSliceOp>(); 816 if (!insertOp) 817 return failure(); 818 819 // Only accept all one strides for now. 820 if (llvm::any_of(insertOp.getStrides().getAsValueRange<IntegerAttr>(), 821 [](const APInt &val) { return !val.isOne(); })) 822 return failure(); 823 824 unsigned rank = insertOp.getSourceVectorType().getRank(); 825 // Require insert op to have the same rank for the source and destination 826 // vector; other cases to be implemented. 827 if (rank != insertOp.getDestVectorType().getRank()) 828 return failure(); 829 830 // Requires that shape of insert op src is castable to dstType. 831 unsigned sourceWidth = castSrcType.getElementType().getIntOrFloatBitWidth(); 832 unsigned destinationWidth = 833 castDstType.getElementType().getIntOrFloatBitWidth(); 834 unsigned numElements = destinationWidth / sourceWidth; 835 if (insertOp.getSourceVectorType().getNumElements() % numElements != 0) 836 return failure(); 837 838 ArrayAttr newOffsets = insertOp.getOffsets(); 839 assert(newOffsets.size() == rank); 840 SmallVector<int64_t> offsets = getIntValueVector(newOffsets); 841 if (offsets.back() % shrinkRatio != 0) 842 return failure(); 843 offsets.back() = offsets.back() / shrinkRatio; 844 newOffsets = rewriter.getI64ArrayAttr(offsets); 845 846 SmallVector<int64_t> srcDims = 847 llvm::to_vector<4>(insertOp.getSourceVectorType().getShape()); 848 srcDims.back() = srcDims.back() / shrinkRatio; 849 VectorType newCastSrcType = 850 VectorType::get(srcDims, castDstType.getElementType()); 851 852 auto newCastSrcOp = rewriter.create<vector::BitCastOp>( 853 bitcastOp.getLoc(), newCastSrcType, insertOp.getSource()); 854 855 SmallVector<int64_t> dstDims = 856 llvm::to_vector<4>(insertOp.getDestVectorType().getShape()); 857 dstDims.back() = dstDims.back() / shrinkRatio; 858 VectorType newCastDstType = 859 VectorType::get(dstDims, castDstType.getElementType()); 860 861 auto newCastDstOp = rewriter.create<vector::BitCastOp>( 862 bitcastOp.getLoc(), newCastDstType, insertOp.getDest()); 863 864 rewriter.replaceOpWithNewOp<vector::InsertStridedSliceOp>( 865 bitcastOp, bitcastOp.getType(), newCastSrcOp, newCastDstOp, newOffsets, 866 insertOp.getStrides()); 867 868 return success(); 869 } 870 }; 871 872 // Breaks down vector.bitcast op 873 // 874 // This transforms IR like: 875 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32> 876 // Into: 877 // %cst = vector.splat %c0_f32 : vector<4xf32> 878 // %1 = vector.extract_strided_slice %0 { 879 // offsets = [0], sizes = [4], strides = [1] 880 // } : vector<8xf16> to vector<4xf16> 881 // %2 = vector.bitcast %1 : vector<4xf16> to vector<2xf32> 882 // %4 = vector.insert_strided_slice %2, %cst { 883 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32> 884 // %5 = vector.extract_strided_slice %0 { 885 // offsets = [4], sizes = [4], strides = [1] 886 // } : vector<8xf16> to vector<4xf16> 887 // %6 = vector.bitcast %5 : vector<4xf16> to vector<2xf32> 888 // %7 = vector.insert_strided_slice %6, %cst { 889 // offsets = [2], strides = [1]} : vector<2xf32> into vector<4xf32> 890 struct BreakDownVectorBitCast : public OpRewritePattern<vector::BitCastOp> { 891 using OpRewritePattern::OpRewritePattern; 892 893 public: 894 BreakDownVectorBitCast(MLIRContext *context, 895 std::function<bool(vector::BitCastOp)> controlFn, 896 PatternBenefit benefit) 897 : OpRewritePattern(context, benefit), controlFn(std::move(controlFn)) {} 898 899 LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, 900 PatternRewriter &rewriter) const override { 901 902 if (controlFn && !controlFn(bitcastOp)) 903 return failure(); 904 905 VectorType castSrcType = bitcastOp.getSourceVectorType(); 906 VectorType castDstType = bitcastOp.getResultVectorType(); 907 assert(castSrcType.getRank() == castDstType.getRank()); 908 909 // Only support rank 1 case for now. 910 if (castSrcType.getRank() != 1) 911 return failure(); 912 913 int64_t castSrcLastDim = castSrcType.getShape().back(); 914 int64_t castDstLastDim = castDstType.getShape().back(); 915 // Require casting to less elements for now; other cases to be implemented. 916 if (castSrcLastDim < castDstLastDim) 917 return failure(); 918 919 assert(castSrcLastDim % castDstLastDim == 0); 920 int64_t shrinkRatio = castSrcLastDim / castDstLastDim; 921 // Nothing to do if it is already bitcasting to a single element. 922 if (castSrcLastDim == shrinkRatio) 923 return failure(); 924 925 Location loc = bitcastOp.getLoc(); 926 Type elemType = castDstType.getElementType(); 927 assert(elemType.isSignlessIntOrIndexOrFloat()); 928 929 Value zero = rewriter.create<arith::ConstantOp>( 930 loc, elemType, rewriter.getZeroAttr(elemType)); 931 Value res = rewriter.create<SplatOp>(loc, castDstType, zero); 932 933 SmallVector<int64_t> sliceShape = {castDstLastDim}; 934 SmallVector<int64_t> strides = {1}; 935 VectorType newCastDstType = 936 VectorType::get(SmallVector<int64_t>{castDstLastDim / shrinkRatio}, 937 castDstType.getElementType()); 938 939 for (int i = 0, e = shrinkRatio; i < e; ++i) { 940 Value extracted = rewriter.create<ExtractStridedSliceOp>( 941 loc, bitcastOp.getSource(), ArrayRef<int64_t>{i * castDstLastDim}, 942 sliceShape, strides); 943 Value bitcast = 944 rewriter.create<BitCastOp>(loc, newCastDstType, extracted); 945 res = rewriter.create<InsertStridedSliceOp>( 946 loc, bitcast, res, 947 ArrayRef<int64_t>{i * castDstLastDim / shrinkRatio}, strides); 948 } 949 rewriter.replaceOp(bitcastOp, res); 950 return success(); 951 } 952 953 private: 954 std::function<bool(BitCastOp)> controlFn; 955 }; 956 957 /// Reorders elementwise(broadcast/splat) to broadcast(elementwise). Ex: 958 /// ``` 959 /// %a = vector.broadcast %arg1 : index to vector<1x4xindex> 960 /// %b = vector.broadcast %arg2 : index to vector<1x4xindex> 961 /// %r = arith.addi %a, %b : vector<1x4xindex> 962 /// ``` 963 /// Gets converted to: 964 /// ``` 965 /// %r = arith.addi %arg0, %arg1 : index 966 /// %b = vector.broadcast %r : index to vector<1x4xindex> 967 /// ``` 968 /// 969 /// Both `vector.broadcast` and `vector.splat` are supported as broadcasting 970 /// ops. 971 struct ReorderElementwiseOpsOnBroadcast final 972 : public OpTraitRewritePattern<OpTrait::Elementwise> { 973 using OpTraitRewritePattern::OpTraitRewritePattern; 974 LogicalResult matchAndRewrite(Operation *op, 975 PatternRewriter &rewriter) const override { 976 if (op->getNumResults() != 1) 977 return failure(); 978 if (!llvm::isa<ShapedType>(op->getResults()[0].getType())) 979 return failure(); 980 if (!OpTrait::hasElementwiseMappableTraits(op)) 981 return rewriter.notifyMatchFailure( 982 op, "Op doesn't have ElementwiseMappableTraits"); 983 if (op->getNumOperands() == 0) 984 return failure(); 985 if (op->getResults()[0].getType() != op->getOperand(0).getType()) 986 return rewriter.notifyMatchFailure(op, 987 "result and operand type mismatch"); 988 if (isa<vector::FMAOp>(op)) { 989 return rewriter.notifyMatchFailure( 990 op, 991 "Op only accepts vector types - not supported as broadcast source " 992 "might be a scalar"); 993 } 994 995 // Get the type of the lhs operand 996 auto *lhsBcastOrSplat = op->getOperand(0).getDefiningOp(); 997 if (!lhsBcastOrSplat || 998 !isa<vector::BroadcastOp, vector::SplatOp>(*lhsBcastOrSplat)) 999 return failure(); 1000 auto lhsBcastOrSplatType = lhsBcastOrSplat->getOperand(0).getType(); 1001 1002 // Make sure that all operands are broadcast from identical types: 1003 // * scalar (`vector.broadcast` + `vector.splat`), or 1004 // * vector (`vector.broadcast`). 1005 // Otherwise the re-ordering wouldn't be safe. 1006 if (!llvm::all_of(op->getOperands(), [&lhsBcastOrSplatType](Value val) { 1007 auto bcast = val.getDefiningOp<vector::BroadcastOp>(); 1008 if (bcast) 1009 return (bcast.getOperand().getType() == lhsBcastOrSplatType); 1010 auto splat = val.getDefiningOp<vector::SplatOp>(); 1011 if (splat) 1012 return (splat.getOperand().getType() == lhsBcastOrSplatType); 1013 return false; 1014 })) { 1015 return failure(); 1016 } 1017 1018 // Collect the source values before broadcasting 1019 SmallVector<Value> srcValues; 1020 srcValues.reserve(op->getNumOperands()); 1021 for (Value operand : op->getOperands()) { 1022 srcValues.push_back(operand.getDefiningOp()->getOperand(0)); 1023 } 1024 1025 // Create the "elementwise" Op 1026 Operation *elementwiseOp = 1027 rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues, 1028 lhsBcastOrSplatType, op->getAttrs()); 1029 1030 // Replace the original Op with the elementwise Op 1031 auto vectorType = op->getResultTypes()[0]; 1032 rewriter.replaceOpWithNewOp<vector::BroadcastOp>( 1033 op, vectorType, elementwiseOp->getResults()); 1034 1035 return success(); 1036 } 1037 }; 1038 1039 // Helper that returns a vector comparison that constructs a mask: 1040 // mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b] 1041 // 1042 // If `dim == 0` then the result will be a 0-D vector. 1043 // 1044 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative, 1045 // much more compact, IR for this operation, but LLVM eventually 1046 // generates more elaborate instructions for this intrinsic since it 1047 // is very conservative on the boundary conditions. 1048 static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op, 1049 bool force32BitVectorIndices, int64_t dim, 1050 Value b, Value *off = nullptr) { 1051 auto loc = op->getLoc(); 1052 // If we can assume all indices fit in 32-bit, we perform the vector 1053 // comparison in 32-bit to get a higher degree of SIMD parallelism. 1054 // Otherwise we perform the vector comparison using 64-bit indices. 1055 Type idxType = 1056 force32BitVectorIndices ? rewriter.getI32Type() : rewriter.getI64Type(); 1057 DenseIntElementsAttr indicesAttr; 1058 if (dim == 0 && force32BitVectorIndices) { 1059 indicesAttr = DenseIntElementsAttr::get( 1060 VectorType::get(ArrayRef<int64_t>{}, idxType), ArrayRef<int32_t>{0}); 1061 } else if (dim == 0) { 1062 indicesAttr = DenseIntElementsAttr::get( 1063 VectorType::get(ArrayRef<int64_t>{}, idxType), ArrayRef<int64_t>{0}); 1064 } else if (force32BitVectorIndices) { 1065 indicesAttr = rewriter.getI32VectorAttr( 1066 llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))); 1067 } else { 1068 indicesAttr = rewriter.getI64VectorAttr( 1069 llvm::to_vector<4>(llvm::seq<int64_t>(0, dim))); 1070 } 1071 Value indices = rewriter.create<arith::ConstantOp>(loc, indicesAttr); 1072 // Add in an offset if requested. 1073 if (off) { 1074 Value o = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, *off); 1075 Value ov = rewriter.create<vector::SplatOp>(loc, indices.getType(), o); 1076 indices = rewriter.create<arith::AddIOp>(loc, ov, indices); 1077 } 1078 // Construct the vector comparison. 1079 Value bound = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, b); 1080 Value bounds = 1081 rewriter.create<vector::SplatOp>(loc, indices.getType(), bound); 1082 return rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, indices, 1083 bounds); 1084 } 1085 1086 template <typename ConcreteOp> 1087 struct MaterializeTransferMask : public OpRewritePattern<ConcreteOp> { 1088 public: 1089 explicit MaterializeTransferMask(MLIRContext *context, bool enableIndexOpt, 1090 PatternBenefit benefit = 1) 1091 : mlir::OpRewritePattern<ConcreteOp>(context, benefit), 1092 force32BitVectorIndices(enableIndexOpt) {} 1093 1094 LogicalResult matchAndRewrite(ConcreteOp xferOp, 1095 PatternRewriter &rewriter) const override { 1096 if (!xferOp.hasOutOfBoundsDim()) 1097 return failure(); 1098 1099 if (xferOp.getVectorType().getRank() > 1 || xferOp.getIndices().empty()) 1100 return failure(); 1101 1102 Location loc = xferOp->getLoc(); 1103 VectorType vtp = xferOp.getVectorType(); 1104 1105 // Create the in-bounds mask with all elements between [0 .. dim - offset) 1106 // set and [dim - offset .. vector_length) unset. 1107 // 1108 // TODO: when the leaf transfer rank is k > 1, we need the last `k` 1109 // dimensions here. 1110 unsigned lastIndex = llvm::size(xferOp.getIndices()) - 1; 1111 Value off = xferOp.getIndices()[lastIndex]; 1112 Value dim = 1113 vector::createOrFoldDimOp(rewriter, loc, xferOp.getSource(), lastIndex); 1114 Value b = rewriter.create<arith::SubIOp>(loc, dim.getType(), dim, off); 1115 Value mask = rewriter.create<vector::CreateMaskOp>( 1116 loc, 1117 VectorType::get(vtp.getShape(), rewriter.getI1Type(), 1118 vtp.getScalableDims()), 1119 b); 1120 if (xferOp.getMask()) { 1121 // Intersect the in-bounds with the mask specified as an op parameter. 1122 mask = rewriter.create<arith::AndIOp>(loc, mask, xferOp.getMask()); 1123 } 1124 1125 rewriter.modifyOpInPlace(xferOp, [&]() { 1126 xferOp.getMaskMutable().assign(mask); 1127 xferOp.setInBoundsAttr(rewriter.getBoolArrayAttr({true})); 1128 }); 1129 1130 return success(); 1131 } 1132 1133 private: 1134 const bool force32BitVectorIndices; 1135 }; 1136 1137 /// Conversion pattern for a `vector.create_mask` (0-D and 1-D only). 1138 class VectorCreateMaskOpConversion 1139 : public OpRewritePattern<vector::CreateMaskOp> { 1140 public: 1141 explicit VectorCreateMaskOpConversion(MLIRContext *context, 1142 bool enableIndexOpt, 1143 PatternBenefit benefit = 1) 1144 : mlir::OpRewritePattern<vector::CreateMaskOp>(context, benefit), 1145 force32BitVectorIndices(enableIndexOpt) {} 1146 1147 LogicalResult matchAndRewrite(vector::CreateMaskOp op, 1148 PatternRewriter &rewriter) const override { 1149 auto dstType = op.getType(); 1150 if (cast<VectorType>(dstType).isScalable()) 1151 return failure(); 1152 int64_t rank = dstType.getRank(); 1153 if (rank > 1) 1154 return failure(); 1155 rewriter.replaceOp( 1156 op, buildVectorComparison(rewriter, op, force32BitVectorIndices, 1157 rank == 0 ? 0 : dstType.getDimSize(0), 1158 op.getOperand(0))); 1159 return success(); 1160 } 1161 1162 private: 1163 const bool force32BitVectorIndices; 1164 }; 1165 1166 /// Returns true if all the `i1` elements of `constantOp` are set to `value`. 1167 static bool allI1ConstantValuesSetTo(arith::ConstantOp constantOp, bool value) { 1168 auto denseAttr = dyn_cast<DenseIntElementsAttr>(constantOp.getValue()); 1169 // TODO: Support non-dense constant. 1170 if (!denseAttr) 1171 return false; 1172 1173 assert(denseAttr.getElementType().isInteger(1) && "Unexpected type"); 1174 return denseAttr.isSplat() && denseAttr.getSplatValue<bool>() == value; 1175 } 1176 1177 /// Folds a select operation between an all-true and all-false vector. For now, 1178 /// only single element vectors (i.e., vector<1xi1>) are supported. That is: 1179 /// 1180 /// %true = arith.constant dense<true> : vector<1xi1> 1181 /// %false = arith.constant dense<false> : vector<1xi1> 1182 /// %result = arith.select %cond, %true, %false : i1, vector<1xi1> 1183 /// => 1184 /// %result = vector.broadcast %cond : i1 to vector<1xi1> 1185 /// 1186 /// InstCombine seems to handle vectors with multiple elements but not the 1187 /// single element ones. 1188 struct FoldI1Select : public OpRewritePattern<arith::SelectOp> { 1189 using OpRewritePattern<arith::SelectOp>::OpRewritePattern; 1190 1191 LogicalResult matchAndRewrite(arith::SelectOp selectOp, 1192 PatternRewriter &rewriter) const override { 1193 auto vecType = dyn_cast<VectorType>(selectOp.getType()); 1194 if (!vecType || !vecType.getElementType().isInteger(1)) 1195 return failure(); 1196 1197 // Only scalar conditions can be folded. 1198 Value cond = selectOp.getCondition(); 1199 if (isa<VectorType>(cond.getType())) 1200 return failure(); 1201 1202 // TODO: Support n-D and scalable vectors. 1203 if (vecType.getRank() != 1 || vecType.isScalable()) 1204 return failure(); 1205 1206 // TODO: Support vectors with multiple elements. 1207 if (vecType.getShape()[0] != 1) 1208 return failure(); 1209 1210 auto trueConst = selectOp.getTrueValue().getDefiningOp<arith::ConstantOp>(); 1211 if (!trueConst || !allI1ConstantValuesSetTo(trueConst, true)) 1212 return failure(); 1213 1214 auto falseConst = 1215 selectOp.getFalseValue().getDefiningOp<arith::ConstantOp>(); 1216 if (!falseConst || !allI1ConstantValuesSetTo(falseConst, false)) 1217 return failure(); 1218 1219 // Replace select with its condition broadcasted to single element vector. 1220 auto elemType = rewriter.getIntegerType(vecType.getNumElements()); 1221 auto bcastType = VectorType::get(/*shape=*/{1}, elemType); 1222 rewriter.replaceOpWithNewOp<vector::BroadcastOp>(selectOp, bcastType, cond); 1223 return success(); 1224 } 1225 }; 1226 1227 /// Returns the number of dims can be folded away from transfer ops. It returns 1228 /// a failure if it can not determine the number of dims to be folded. 1229 /// 1230 /// Ex 1: returns "2" if `srcType` is memref<512x16x1x1xf32> and 1231 /// `vectorType` is vector<16x16x1x1xf32> 1232 /// (there two inner most dims can be dropped by memref.subview ops) 1233 /// 1234 /// Ex 2: returns "1" if `srcType` is memref<512x16x1x1xf32> with 1235 /// [8192, 16, 8, 1] strides and `vectorType` is vector<16x16x1x1xf32> 1236 /// (only the inner most unit dim of `srcType` can be dropped) 1237 /// 1238 /// Ex 3: return "0" if `srcType` is memref<512x16x1x1xf32> and 1239 /// `vectorType` is vector<16x16x1x[1]xf32> 1240 /// (the most inner dim in `vectorType` is not a unit dim (it's a "scalable 1241 /// unit") 1242 static FailureOr<size_t> 1243 getTransferFoldableInnerUnitDims(MemRefType srcType, VectorType vectorType) { 1244 SmallVector<int64_t> srcStrides; 1245 int64_t srcOffset; 1246 if (failed(srcType.getStridesAndOffset(srcStrides, srcOffset))) 1247 return failure(); 1248 1249 auto isUnitDim = [](VectorType type, int dim) { 1250 return type.getDimSize(dim) == 1 && !type.getScalableDims()[dim]; 1251 }; 1252 1253 // According to vector.transfer_read/write semantics, the vector can be a 1254 // slice. Thus, we have to offset the check index with `rankDiff` in 1255 // `srcStrides` and source dim sizes. 1256 size_t result = 0; 1257 int rankDiff = srcType.getRank() - vectorType.getRank(); 1258 for (int64_t i = 0, e = vectorType.getRank(); i < e; ++i) { 1259 // Check that the inner dim size is 1 for both memref type and vector slice. 1260 // It can be folded only if they are 1 and the stride is 1. 1261 int dim = vectorType.getRank() - i - 1; 1262 if (srcStrides[dim + rankDiff] != 1 || 1263 srcType.getDimSize(dim + rankDiff) != 1 || !isUnitDim(vectorType, dim)) 1264 break; 1265 result++; 1266 } 1267 return result; 1268 } 1269 1270 /// Drop inner most contiguous unit dimensions from transfer_read operand. 1271 class DropInnerMostUnitDimsTransferRead 1272 : public OpRewritePattern<vector::TransferReadOp> { 1273 using OpRewritePattern::OpRewritePattern; 1274 1275 LogicalResult matchAndRewrite(vector::TransferReadOp readOp, 1276 PatternRewriter &rewriter) const override { 1277 // TODO: support 0-d corner case. 1278 if (readOp.getTransferRank() == 0) 1279 return failure(); 1280 1281 // TODO: support mask. 1282 if (readOp.getMask()) 1283 return failure(); 1284 1285 auto srcType = dyn_cast<MemRefType>(readOp.getSource().getType()); 1286 if (!srcType) 1287 return failure(); 1288 1289 if (!readOp.getPermutationMap().isMinorIdentity()) 1290 return failure(); 1291 1292 auto targetType = readOp.getVectorType(); 1293 if (targetType.getRank() <= 1) 1294 return failure(); 1295 1296 FailureOr<size_t> maybeDimsToDrop = 1297 getTransferFoldableInnerUnitDims(srcType, targetType); 1298 if (failed(maybeDimsToDrop)) 1299 return failure(); 1300 1301 size_t dimsToDrop = maybeDimsToDrop.value(); 1302 if (dimsToDrop == 0) 1303 return failure(); 1304 1305 auto inBounds = readOp.getInBoundsValues(); 1306 auto droppedInBounds = ArrayRef<bool>(inBounds).take_back(dimsToDrop); 1307 if (llvm::is_contained(droppedInBounds, false)) 1308 return failure(); 1309 1310 auto resultTargetVecType = 1311 VectorType::get(targetType.getShape().drop_back(dimsToDrop), 1312 targetType.getElementType(), 1313 targetType.getScalableDims().drop_back(dimsToDrop)); 1314 1315 auto loc = readOp.getLoc(); 1316 SmallVector<OpFoldResult> sizes = 1317 memref::getMixedSizes(rewriter, loc, readOp.getSource()); 1318 SmallVector<OpFoldResult> offsets(srcType.getRank(), 1319 rewriter.getIndexAttr(0)); 1320 SmallVector<OpFoldResult> strides(srcType.getRank(), 1321 rewriter.getIndexAttr(1)); 1322 auto resultMemrefType = 1323 cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType( 1324 srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes, 1325 strides)); 1326 ArrayAttr inBoundsAttr = rewriter.getArrayAttr( 1327 readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop)); 1328 Value rankedReducedView = rewriter.create<memref::SubViewOp>( 1329 loc, resultMemrefType, readOp.getSource(), offsets, sizes, strides); 1330 auto permMap = getTransferMinorIdentityMap( 1331 cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType); 1332 Value result = rewriter.create<vector::TransferReadOp>( 1333 loc, resultTargetVecType, rankedReducedView, 1334 readOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap), 1335 readOp.getPadding(), 1336 // TODO: support mask. 1337 /*mask=*/Value(), inBoundsAttr); 1338 rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(readOp, targetType, 1339 result); 1340 return success(); 1341 } 1342 }; 1343 1344 /// Drop inner most contiguous unit dimensions from transfer_write operand. 1345 /// E.g., 1346 /// vector.transfer_write %arg1, %arg0[%c0, %arg2, %c0, %c0, %c0] 1347 /// {in_bounds = [true, true, true, true, true]} 1348 /// : vector<1x16x16x1x1xf32>, memref<1x512x16x1x1xf32> 1349 /// 1350 /// will be replaced with 1351 /// 1352 /// %subview = memref.subview %arg0 1353 /// [0, 0, 0, 0, 0] [1, 512, 16, 1, 1] [1, 1, 1, 1, 1] 1354 /// : memref<1x512x16x1x1xf32> to memref<1x512x16xf32> 1355 /// %0 = vector.shape_cast %arg1 : vector<1x16x16x1x1xf32> 1356 /// to vector<1x16x16xf32> 1357 /// vector.transfer_write %0, %subview[%c0, %arg2, %c0] 1358 /// {in_bounds = [true, true, true]} 1359 /// : vector<1x16x16xf32>, memref<1x512x16xf32> 1360 /// 1361 /// Note, this pattern will not collapse "scalable unit" dims (i.e. `[1]`). 1362 class DropInnerMostUnitDimsTransferWrite 1363 : public OpRewritePattern<vector::TransferWriteOp> { 1364 using OpRewritePattern::OpRewritePattern; 1365 1366 LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp, 1367 PatternRewriter &rewriter) const override { 1368 // TODO: support 0-d corner case. 1369 if (writeOp.getTransferRank() == 0) 1370 return failure(); 1371 1372 // TODO: support mask. 1373 if (writeOp.getMask()) 1374 return failure(); 1375 1376 auto srcType = dyn_cast<MemRefType>(writeOp.getSource().getType()); 1377 if (!srcType) 1378 return failure(); 1379 1380 if (!writeOp.getPermutationMap().isMinorIdentity()) 1381 return failure(); 1382 1383 auto targetType = writeOp.getVectorType(); 1384 if (targetType.getRank() <= 1) 1385 return failure(); 1386 1387 FailureOr<size_t> maybeDimsToDrop = 1388 getTransferFoldableInnerUnitDims(srcType, targetType); 1389 if (failed(maybeDimsToDrop)) 1390 return failure(); 1391 1392 size_t dimsToDrop = maybeDimsToDrop.value(); 1393 if (dimsToDrop == 0) 1394 return failure(); 1395 1396 auto inBounds = writeOp.getInBoundsValues(); 1397 auto droppedInBounds = ArrayRef<bool>(inBounds).take_back(dimsToDrop); 1398 if (llvm::is_contained(droppedInBounds, false)) 1399 return failure(); 1400 1401 auto resultTargetVecType = 1402 VectorType::get(targetType.getShape().drop_back(dimsToDrop), 1403 targetType.getElementType(), 1404 targetType.getScalableDims().drop_back(dimsToDrop)); 1405 1406 Location loc = writeOp.getLoc(); 1407 SmallVector<OpFoldResult> sizes = 1408 memref::getMixedSizes(rewriter, loc, writeOp.getSource()); 1409 SmallVector<OpFoldResult> offsets(srcType.getRank(), 1410 rewriter.getIndexAttr(0)); 1411 SmallVector<OpFoldResult> strides(srcType.getRank(), 1412 rewriter.getIndexAttr(1)); 1413 auto resultMemrefType = 1414 cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType( 1415 srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes, 1416 strides)); 1417 ArrayAttr inBoundsAttr = rewriter.getArrayAttr( 1418 writeOp.getInBoundsAttr().getValue().drop_back(dimsToDrop)); 1419 1420 Value rankedReducedView = rewriter.create<memref::SubViewOp>( 1421 loc, resultMemrefType, writeOp.getSource(), offsets, sizes, strides); 1422 auto permMap = getTransferMinorIdentityMap( 1423 cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType); 1424 1425 auto shapeCast = rewriter.createOrFold<vector::ShapeCastOp>( 1426 loc, resultTargetVecType, writeOp.getVector()); 1427 rewriter.replaceOpWithNewOp<vector::TransferWriteOp>( 1428 writeOp, shapeCast, rankedReducedView, 1429 writeOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap), 1430 // TODO: support mask. 1431 /*mask=*/Value(), inBoundsAttr); 1432 return success(); 1433 } 1434 }; 1435 1436 /// Canonicalization of a `vector.contraction %a, %b, %c` with row-major matmul 1437 /// semantics to a contraction suitable for MMT (matrix matrix multiplication 1438 /// with the RHS transposed) lowering. 1439 struct CanonicalizeContractMatmulToMMT final 1440 : OpRewritePattern<vector::ContractionOp> { 1441 using OpRewritePattern::OpRewritePattern; 1442 1443 using FilterConstraintType = 1444 std::function<LogicalResult(vector::ContractionOp op)>; 1445 1446 CanonicalizeContractMatmulToMMT(MLIRContext *context, PatternBenefit benefit, 1447 FilterConstraintType constraint) 1448 : OpRewritePattern<vector::ContractionOp>(context, benefit), 1449 filter(std::move(constraint)) {} 1450 1451 LogicalResult matchAndRewrite(vector::ContractionOp op, 1452 PatternRewriter &rewriter) const override { 1453 if (failed(filter(op))) 1454 return failure(); 1455 1456 Location loc = op.getLoc(); 1457 Value lhs = op.getLhs(); 1458 Value rhs = op.getRhs(); 1459 Value res = op.getAcc(); 1460 1461 // Set up the parallel/reduction structure in right form. 1462 using MapList = ArrayRef<ArrayRef<AffineExpr>>; 1463 auto infer = [&](MapList m) { 1464 return AffineMap::inferFromExprList(m, op.getContext()); 1465 }; 1466 AffineExpr m; 1467 AffineExpr n; 1468 AffineExpr k; 1469 bindDims(rewriter.getContext(), m, n, k); 1470 static constexpr std::array<int64_t, 2> perm = {1, 0}; 1471 auto iteratorTypes = op.getIteratorTypes().getValue(); 1472 SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray(); 1473 if (iteratorTypes.size() != 3 || 1474 !vector::isParallelIterator(iteratorTypes[0]) || 1475 !vector::isParallelIterator(iteratorTypes[1]) || 1476 !vector::isReductionIterator(iteratorTypes[2])) 1477 return rewriter.notifyMatchFailure(op, "contraction is not a gemm"); 1478 1479 // The canonical form is "TNT" = A row-major, B col-major, C row-major. 1480 const auto canonicalForm = infer({{m, k}, {n, k}, {m, n}}); 1481 if (maps == canonicalForm) 1482 return rewriter.notifyMatchFailure(op, "already in the canonical form"); 1483 1484 // Create a vector transpose making sure to emit zero/sign-extend at the 1485 // end. 1486 auto createTranspose = [&rewriter, loc](Value mat) -> Value { 1487 if (auto sext = mat.getDefiningOp<arith::ExtSIOp>()) { 1488 Value trans = 1489 rewriter.create<vector::TransposeOp>(loc, sext.getIn(), perm); 1490 VectorType newType = 1491 cast<VectorType>(trans.getType()) 1492 .clone(cast<VectorType>(mat.getType()).getElementType()); 1493 return rewriter.create<arith::ExtSIOp>(loc, newType, trans); 1494 } 1495 if (auto zext = mat.getDefiningOp<arith::ExtUIOp>()) { 1496 Value trans = 1497 rewriter.create<vector::TransposeOp>(loc, zext.getIn(), perm); 1498 VectorType newType = 1499 VectorType::get(cast<VectorType>(trans.getType()).getShape(), 1500 cast<VectorType>(mat.getType()).getElementType()); 1501 return rewriter.create<arith::ExtUIOp>(loc, newType, trans); 1502 } 1503 return rewriter.create<vector::TransposeOp>(loc, mat, perm); 1504 }; 1505 1506 if (maps == infer({{m, k}, {k, n}, {m, n}})) { 1507 rhs = createTranspose(rhs); 1508 } else if (maps == infer({{k, m}, {n, k}, {m, n}})) { 1509 lhs = createTranspose(lhs); 1510 } else if (maps == infer({{k, m}, {k, n}, {m, n}})) { 1511 rhs = createTranspose(rhs); 1512 lhs = createTranspose(lhs); 1513 } else if (maps == infer({{k, m}, {k, n}, {n, m}})) { 1514 std::swap(rhs, lhs); 1515 rhs = createTranspose(rhs); 1516 lhs = createTranspose(lhs); 1517 } else if (maps == infer({{k, m}, {n, k}, {n, m}})) { 1518 std::swap(rhs, lhs); 1519 rhs = createTranspose(rhs); 1520 } else if (maps == infer({{m, k}, {k, n}, {n, m}})) { 1521 std::swap(lhs, rhs); 1522 lhs = createTranspose(lhs); 1523 } else if (maps == infer({{m, k}, {n, k}, {n, m}})) { 1524 std::swap(lhs, rhs); 1525 } else { 1526 return rewriter.notifyMatchFailure(op, "unhandled contraction form"); 1527 } 1528 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 1529 op, lhs, rhs, res, rewriter.getAffineMapArrayAttr(canonicalForm), 1530 op.getIteratorTypes()); 1531 return success(); 1532 }; 1533 1534 private: 1535 FilterConstraintType filter; 1536 }; 1537 1538 /// Pattern to fold arithmetic extensions on floating point data types into 1539 /// vector contraction operations. linalg.matmul introduces arithmetic 1540 /// extensions on its operands. Please mlir snippets below for more details. 1541 /// ```mlir 1542 /// "linalg.matmul"(%lhs, %rhs, %acc) ({ 1543 /// ^bb0(%arg1: f16, %arg2: f16, %arg3: f32): 1544 /// %lhs_f32 = "arith.extf"(%arg1) : (f16) -> f32 1545 /// %rhs_f32 = "arith.extf"(%arg2) : (f16) -> f32 1546 /// %mul = "arith.mulf"(%lhs_f32, %rhs_f32) : (f32, f32) -> f32 1547 /// %acc = "arith.addf"(%arg3, %mul) : (f32, f32) -> f32 1548 /// "linalg.yield"(%acc) : (f32) -> () 1549 /// }) 1550 /// ``` 1551 /// This restricts the native usage of mixed precision NVIDIA Ampere Tensor 1552 /// Cores, i.e, `mma.sync.*.f32.f16.f16.f32` and `mma.sync.*.f32.bf16.bf16.f32`. 1553 /// This pattern folds the arithmetic extensions into the vector contraction and 1554 /// enables the usage of native mixed precision Tensor Core instructions. 1555 template <typename ExtOp> 1556 struct FoldArithExtIntoContractionOp 1557 : public OpRewritePattern<vector::ContractionOp> { 1558 using OpRewritePattern::OpRewritePattern; 1559 1560 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 1561 PatternRewriter &rewriter) const override { 1562 1563 auto lhsDefOp = contractOp.getLhs().getDefiningOp<ExtOp>(); 1564 auto rhsDefOp = contractOp.getRhs().getDefiningOp<ExtOp>(); 1565 1566 if (!lhsDefOp || !rhsDefOp) { 1567 return rewriter.notifyMatchFailure(contractOp, 1568 "no defining op on contract operands"); 1569 } 1570 1571 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 1572 contractOp, lhsDefOp->getOperand(0), rhsDefOp->getOperand(0), 1573 contractOp.getAcc(), contractOp.getIndexingMapsAttr(), 1574 contractOp.getIteratorTypesAttr()); 1575 1576 return success(); 1577 } 1578 }; 1579 1580 /// Pattern to fold chained reduction to a series of vector additions and a 1581 /// final reduction. This form should require fewer subgroup operations. 1582 /// 1583 /// ```mlir 1584 /// %a = vector.reduction <add> %x, %acc 1585 /// %b = vector.reduction <add> %y, %a 1586 /// ==> 1587 /// %a = arith.addf %x, %y 1588 /// %b = vector.reduction <add> %a, %acc 1589 /// ``` 1590 struct ChainedReduction final : OpRewritePattern<vector::ReductionOp> { 1591 using OpRewritePattern::OpRewritePattern; 1592 1593 LogicalResult matchAndRewrite(vector::ReductionOp op, 1594 PatternRewriter &rewriter) const override { 1595 // TODO: Handle other combining kinds. 1596 if (op.getKind() != vector::CombiningKind::ADD) 1597 return failure(); 1598 1599 // Accumulator is optional. 1600 Value acc = op.getAcc(); 1601 if (!acc) 1602 return failure(); 1603 1604 if (!acc.getType().isIntOrFloat()) 1605 return failure(); 1606 1607 auto parentReduction = acc.getDefiningOp<vector::ReductionOp>(); 1608 if (!parentReduction) 1609 return failure(); 1610 1611 Location loc = op.getLoc(); 1612 Value vAdd; 1613 if (isa<IntegerType>(acc.getType())) { 1614 vAdd = rewriter.createOrFold<arith::AddIOp>( 1615 loc, parentReduction.getVector(), op.getVector()); 1616 } else { 1617 vAdd = rewriter.create<arith::AddFOp>(loc, parentReduction.getVector(), 1618 op.getVector()); 1619 } 1620 rewriter.replaceOpWithNewOp<vector::ReductionOp>(op, op.getKind(), vAdd, 1621 parentReduction.getAcc()); 1622 return success(); 1623 } 1624 }; 1625 1626 // Helper function dropping unit non-scalable dimension from a VectorType 1627 // keeping at least 1 dimension to avoid generating 0-D vectors. Scalable unit 1628 // dimensions are not dropped. Folding such dimensions would require "shifting" 1629 // the scalable flag onto some other fixed-width dim (e.g. vector<[1]x4xf32> -> 1630 // vector<[4]xf32>). This could be implemented in the future. 1631 static VectorType dropNonScalableUnitDimFromType(VectorType inVecTy) { 1632 auto inVecShape = inVecTy.getShape(); 1633 SmallVector<int64_t> newShape; 1634 SmallVector<bool> newScalableDims; 1635 for (auto [dim, isScalable] : 1636 llvm::zip_equal(inVecShape, inVecTy.getScalableDims())) { 1637 if (dim == 1 && !isScalable) 1638 continue; 1639 1640 newShape.push_back(dim); 1641 newScalableDims.push_back(isScalable); 1642 } 1643 // All dims have been dropped, return vector<1xeType>. 1644 if (newShape.empty()) { 1645 newShape.push_back(1); 1646 newScalableDims.push_back(false); 1647 } 1648 1649 return VectorType::get(newShape, inVecTy.getElementType(), newScalableDims); 1650 } 1651 1652 /// For vectors with at least one unit dim, replaces: 1653 /// elementwise(a, b) 1654 /// with: 1655 /// sc_a = shape_cast(a) 1656 /// sc_b = shape_cast(b) 1657 /// res = elementwise(sc_a, sc_b) 1658 /// return shape_cast(res) 1659 /// The newly inserted shape_cast Ops fold (before elementwise Op) and then 1660 /// restore (after elementwise Op) the unit dim. Vectors `a` and `b` are 1661 /// required to be rank > 1. 1662 /// 1663 /// Ex: 1664 /// %mul = arith.mulf %B_row, %A_row : vector<1x[4]xf32> 1665 /// %cast = vector.shape_cast %mul : vector<1x[4]xf32> to vector<[4]xf32> 1666 /// 1667 /// gets converted to: 1668 /// 1669 /// %B_row_sc = vector.shape_cast %B_row : vector<1x[4]xf32> to vector<[4]xf32> 1670 /// %A_row_sc = vector.shape_cast %A_row : vector<1x[4]xf32> to vector<[4]xf32> 1671 /// %mul = arith.mulf %B_row_sc, %A_row_sc : vector<[4]xf32> 1672 /// %cast_new = vector.shape_cast %mul : vector<[4]xf32> to vector<1x[4]xf32> 1673 /// %cast = vector.shape_cast %cast_new : vector<1x[4]xf32> to vector<[4]xf32> 1674 /// 1675 /// Patterns for folding shape_casts should instantly eliminate `%cast_new` and 1676 /// `%cast`. 1677 struct DropUnitDimFromElementwiseOps final 1678 : public OpTraitRewritePattern<OpTrait::Elementwise> { 1679 using OpTraitRewritePattern::OpTraitRewritePattern; 1680 LogicalResult matchAndRewrite(Operation *op, 1681 PatternRewriter &rewriter) const override { 1682 if (op->getNumResults() != 1 || op->getNumRegions() != 0) 1683 return failure(); 1684 1685 auto resultVectorType = dyn_cast<VectorType>(op->getResult(0).getType()); 1686 if (!resultVectorType) 1687 return failure(); 1688 1689 // Check the operand pre-conditions. For `Elementwise` ops all operands are 1690 // guaranteed to have identical shapes (with some exceptions such as 1691 // `arith.select`) and it suffices to only check one of them. 1692 auto sourceVectorType = dyn_cast<VectorType>(op->getOperand(0).getType()); 1693 if (!sourceVectorType) 1694 return failure(); 1695 if (sourceVectorType.getRank() < 2) 1696 return failure(); 1697 1698 SmallVector<Value> newOperands; 1699 auto loc = op->getLoc(); 1700 for (auto operand : op->getOperands()) { 1701 auto opVectorType = cast<VectorType>(operand.getType()); 1702 auto newVType = dropNonScalableUnitDimFromType(opVectorType); 1703 if (newVType == opVectorType) 1704 return rewriter.notifyMatchFailure(op, "No unit dimension to remove."); 1705 1706 auto opSC = rewriter.create<vector::ShapeCastOp>(loc, newVType, operand); 1707 newOperands.push_back(opSC); 1708 } 1709 1710 VectorType newResultVectorType = 1711 dropNonScalableUnitDimFromType(resultVectorType); 1712 // Create an updated elementwise Op without unit dim. 1713 Operation *elementwiseOp = 1714 rewriter.create(loc, op->getName().getIdentifier(), newOperands, 1715 newResultVectorType, op->getAttrs()); 1716 1717 // Restore the unit dim by applying vector.shape_cast to the result. 1718 rewriter.replaceOpWithNewOp<ShapeCastOp>(op, resultVectorType, 1719 elementwiseOp->getResult(0)); 1720 1721 return success(); 1722 } 1723 }; 1724 1725 /// A pattern to drop unit dims from vector.transpose. 1726 /// 1727 /// Example: 1728 /// 1729 /// BEFORE: 1730 /// ```mlir 1731 /// %transpose = vector.transpose %vector, [3, 0, 1, 2] 1732 /// : vector<1x1x4x[4]xf32> to vector<[4]x1x1x4xf32> 1733 /// ``` 1734 /// 1735 /// AFTER: 1736 /// ```mlir 1737 /// %dropDims = vector.shape_cast %vector 1738 /// : vector<1x1x4x[4]xf32> to vector<4x[4]xf32> 1739 /// %transpose = vector.transpose %0, [1, 0] 1740 /// : vector<4x[4]xf32> to vector<[4]x4xf32> 1741 /// %restoreDims = vector.shape_cast %transpose 1742 /// : vector<[4]x4xf32> to vector<[4]x1x1x4xf32> 1743 /// ``` 1744 struct DropUnitDimsFromTransposeOp final 1745 : OpRewritePattern<vector::TransposeOp> { 1746 using OpRewritePattern::OpRewritePattern; 1747 1748 LogicalResult matchAndRewrite(vector::TransposeOp op, 1749 PatternRewriter &rewriter) const override { 1750 VectorType sourceType = op.getSourceVectorType(); 1751 VectorType sourceTypeWithoutUnitDims = 1752 dropNonScalableUnitDimFromType(sourceType); 1753 1754 if (sourceType == sourceTypeWithoutUnitDims) 1755 return failure(); 1756 1757 // Construct a map from dimIdx -> number of dims dropped before dimIdx. 1758 auto sourceDims = llvm::to_vector(vector::getDims(sourceType)); 1759 SmallVector<int64_t> droppedDimsBefore(sourceType.getRank()); 1760 int64_t droppedDims = 0; 1761 for (auto [i, dim] : llvm::enumerate(sourceDims)) { 1762 droppedDimsBefore[i] = droppedDims; 1763 if (dim == std::make_tuple(1, false)) 1764 ++droppedDims; 1765 } 1766 1767 // Drop unit dims from transpose permutation. 1768 ArrayRef<int64_t> perm = op.getPermutation(); 1769 SmallVector<int64_t> newPerm; 1770 for (int64_t idx : perm) { 1771 if (sourceDims[idx] == std::make_tuple(1, false)) 1772 continue; 1773 newPerm.push_back(idx - droppedDimsBefore[idx]); 1774 } 1775 1776 // Fixup for `newPerm`. The `sourceTypeWithoutUnitDims` could be vector<1xT> 1777 // type when the dimensions are unit dimensions. In this case, the newPerm 1778 // should be [0]. 1779 if (newPerm.empty()) { 1780 newPerm.push_back(0); 1781 } 1782 1783 Location loc = op.getLoc(); 1784 // Drop the unit dims via shape_cast. 1785 auto dropDimsShapeCast = rewriter.create<vector::ShapeCastOp>( 1786 loc, sourceTypeWithoutUnitDims, op.getVector()); 1787 // Create the new transpose. 1788 auto tranposeWithoutUnitDims = 1789 rewriter.create<vector::TransposeOp>(loc, dropDimsShapeCast, newPerm); 1790 // Restore the unit dims via shape cast. 1791 rewriter.replaceOpWithNewOp<vector::ShapeCastOp>( 1792 op, op.getResultVectorType(), tranposeWithoutUnitDims); 1793 1794 return success(); 1795 } 1796 }; 1797 1798 /// A pattern to drop unit dims from the iter_args of an scf.for. 1799 /// 1800 /// Example: 1801 /// 1802 /// BEFORE: 1803 /// ```mlir 1804 /// %res = scf.for ... iter_args(%iter = %init) -> vector<[4]x1x1x4xf32> { 1805 /// ... 1806 /// scf.yield % 1807 /// } 1808 /// ``` 1809 /// 1810 /// AFTER: 1811 /// ```mlir 1812 /// %drop = vector.shape_cast %init 1813 /// : vector<4x1x1x[4]xf32> to vector<4x[4]xf32> 1814 /// %new_loop = scf.for ... iter_args(%iter = %drop) -> vector<[4]x4xf32> { 1815 /// %new_iter = vector.shape_cast %iter 1816 /// : vector<[4]x4xf32> to vector<[4]x1x1x4xf32> 1817 /// ... 1818 /// } 1819 /// %res = vector.shape_cast %new_loop 1820 /// : vector<[4]x4xf32> to vector<[4]x1x1x4xf32> 1821 /// ``` 1822 struct DropUnitDimsFromScfForOp final : OpRewritePattern<scf::ForOp> { 1823 using OpRewritePattern::OpRewritePattern; 1824 1825 LogicalResult matchAndRewrite(scf::ForOp forOp, 1826 PatternRewriter &rewriter) const override { 1827 /// Find the first iter_arg with droppable unit dims. Further applications 1828 /// of this pattern will apply to later arguments. 1829 for (OpOperand &operand : forOp.getInitArgsMutable()) { 1830 auto vectorType = dyn_cast<VectorType>(operand.get().getType()); 1831 if (!vectorType) 1832 continue; 1833 1834 VectorType newVectorType = dropNonScalableUnitDimFromType(vectorType); 1835 if (vectorType == newVectorType) 1836 continue; 1837 1838 // Create a new ForOp with that iter operand replaced. 1839 auto castFn = [](OpBuilder &b, Location loc, Type type, Value source) { 1840 return b.create<vector::ShapeCastOp>(loc, type, source); 1841 }; 1842 1843 Value replacement = 1844 castFn(rewriter, forOp.getLoc(), newVectorType, operand.get()); 1845 rewriter.replaceOp(forOp, 1846 replaceAndCastForOpIterArg(rewriter, forOp, operand, 1847 replacement, castFn)); 1848 return success(); 1849 } 1850 return failure(); 1851 } 1852 }; 1853 1854 /// Pattern to eliminate redundant zero-constants added to reduction operands. 1855 /// It's enough for there to be one initial zero value, so we can eliminate the 1856 /// extra ones that feed into `vector.reduction <add>`. These get created by the 1857 /// `ChainedReduction` pattern. 1858 /// 1859 /// ```mlir 1860 /// %a = arith.addf %x, %zero 1861 /// %b = arith.addf %a, %y 1862 /// %c = vector.reduction <add> %b, %acc 1863 /// ==> 1864 /// %b = arith.addf %a, %y 1865 /// %c = vector.reduction <add> %b, %acc 1866 /// ``` 1867 struct ReduceRedundantZero final : OpRewritePattern<vector::ReductionOp> { 1868 using OpRewritePattern::OpRewritePattern; 1869 1870 LogicalResult matchAndRewrite(vector::ReductionOp op, 1871 PatternRewriter &rewriter) const override { 1872 // TODO: Handle other reduction kinds and their identity values. 1873 if (op.getKind() != vector::CombiningKind::ADD) 1874 return failure(); 1875 1876 Type elemType = op.getSourceVectorType().getElementType(); 1877 // The integer case should be handled by `arith.addi` folders, only check 1878 // for floats here. 1879 if (!isa<FloatType>(elemType)) 1880 return failure(); 1881 1882 auto vAdd = op.getVector().getDefiningOp<arith::AddFOp>(); 1883 if (!vAdd) 1884 return failure(); 1885 auto addLhs = vAdd.getLhs().getDefiningOp<arith::AddFOp>(); 1886 if (!addLhs) 1887 return failure(); 1888 1889 if (!matchPattern(addLhs.getRhs(), m_AnyZeroFloat())) 1890 return failure(); 1891 1892 auto newAdd = rewriter.create<arith::AddFOp>(vAdd.getLoc(), addLhs.getLhs(), 1893 vAdd.getRhs()); 1894 rewriter.replaceOpWithNewOp<vector::ReductionOp>(op, op.getKind(), newAdd, 1895 op.getAcc()); 1896 return success(); 1897 } 1898 }; 1899 1900 /// Example: 1901 /// ``` 1902 /// %a = vector.reduction <add> %x : vector<2xf32> into f32 1903 /// ``` 1904 /// is transformed into: 1905 /// ``` 1906 /// %y = vector.extract %x[0] : f32 from vector<2xf32> 1907 /// %z = vector.extract %x[1] : f32 from vector<2xf32> 1908 /// %a = arith.addf %y, %z : f32 1909 /// ``` 1910 struct BreakDownVectorReduction final : OpRewritePattern<vector::ReductionOp> { 1911 BreakDownVectorReduction(MLIRContext *context, 1912 unsigned maxNumElementsToExtract, 1913 PatternBenefit benefit) 1914 : OpRewritePattern(context, benefit), 1915 maxNumElementsToExtract(maxNumElementsToExtract) {} 1916 1917 LogicalResult matchAndRewrite(vector::ReductionOp op, 1918 PatternRewriter &rewriter) const override { 1919 VectorType type = op.getSourceVectorType(); 1920 if (type.isScalable() || op.isMasked()) 1921 return failure(); 1922 assert(type.getRank() == 1 && "Expected a 1-d vector"); 1923 1924 int64_t numElems = type.getNumElements(); 1925 if (numElems > maxNumElementsToExtract) { 1926 return rewriter.notifyMatchFailure( 1927 op, llvm::formatv("has too many vector elements ({0}) to break down " 1928 "(max allowed: {1})", 1929 numElems, maxNumElementsToExtract)); 1930 } 1931 1932 Location loc = op.getLoc(); 1933 SmallVector<Value> extracted(numElems, nullptr); 1934 for (auto [idx, extractedElem] : llvm::enumerate(extracted)) 1935 extractedElem = rewriter.create<vector::ExtractOp>( 1936 loc, op.getVector(), static_cast<int64_t>(idx)); 1937 1938 Value res = extracted.front(); 1939 for (auto extractedElem : llvm::drop_begin(extracted)) 1940 res = vector::makeArithReduction(rewriter, loc, op.getKind(), res, 1941 extractedElem, op.getFastmathAttr()); 1942 if (Value acc = op.getAcc()) 1943 res = vector::makeArithReduction(rewriter, loc, op.getKind(), res, acc, 1944 op.getFastmathAttr()); 1945 1946 rewriter.replaceOp(op, res); 1947 return success(); 1948 } 1949 1950 private: 1951 unsigned maxNumElementsToExtract = 0; 1952 }; 1953 1954 /// Fold `mulf(tr(broadcast(A)), broadcast(B))` into `vector.outerproduct(A, 1955 /// B)`. 1956 /// Example: 1957 /// %lhsBcast = vector.broadcast %lhs : vector<4xi32> to vector<4x4xi32> 1958 /// %lhsT = vector.transpose %lhsBcast, [1, 0] : vector<4x4xi32> to 1959 /// vector<4x4xi32> %rhsBcast = vector.broadcast %rhs : vector<4xi32> to 1960 /// vector<4x4xi32> %mul = arith.muli %lhsT, %rhsBcast : vector<4x4xi32> 1961 /// 1962 /// Becomes : 1963 /// 1964 /// %res = vector.outerproduct %lhs, %rhs : vector<4xi32>, vector<4xi32> 1965 /// 1966 /// Supports only 1D-to-2D broadcasts. The following cases are not supported. 1967 /// %ex1 = vector.broadcast %lhsCast : vector<1x4xf32> to vector<4x4xf32> 1968 /// %ex2 = vector.broadcast %lhsCast : f32 to vector<4x4xf32> 1969 /// %ex3 = vector.broadcast %lhsCast : vector<1x1xf32> to vector<4x4xf32> 1970 template <typename MulOpType> 1971 struct FoldArithToVectorOuterProduct : public OpRewritePattern<MulOpType> { 1972 using OpRewritePattern<MulOpType>::OpRewritePattern; 1973 // Returns whether a vector.broadcast matches requirements for an outerproduct 1974 // pattern. aka a 1D-to-2D broadcastOp without broadcasted unit dimension. 1975 bool isValidBroadcastSource(vector::BroadcastOp broadcastOp) const { 1976 // Fail if it is not a 1-to-2 dimension to broadcast to avoid generating 1977 // shape_casts/broadcasts which does not belong in this pattern. 1978 if (!broadcastOp.computeBroadcastedUnitDims().empty()) 1979 return false; 1980 // Avoid broadcast like f32 or vector<f32> -> ResType 1981 auto srcType = dyn_cast<VectorType>(broadcastOp.getSourceType()); 1982 return srcType && srcType.getRank() != 2; 1983 } 1984 1985 LogicalResult matchAndRewrite(MulOpType mulOp, 1986 PatternRewriter &rewriter) const override { 1987 auto resType = llvm::cast<VectorType>(mulOp.getResult().getType()); 1988 if (!resType) 1989 return failure(); 1990 if (resType.getRank() != 2) 1991 return failure(); 1992 /// If operandA can be written as tr(broadcast(A)) and operandB as 1993 /// broadcast(B) where broadcasts are 1D-to-2D, create and return 1994 /// vector.outerproduct(A, B). Returns failure() otherwise. 1995 auto matchOuterProduct = 1996 [&](Value operandA, 1997 Value operandB) -> FailureOr<vector::OuterProductOp> { 1998 auto transposedLhs = operandA.getDefiningOp<vector::TransposeOp>(); 1999 if (!transposedLhs) 2000 return failure(); 2001 // Fail unless this is a true 2-D matrix transpose. 2002 ArrayRef<int64_t> permutation = transposedLhs.getPermutation(); 2003 if (permutation.size() != 2 || permutation[0] != 1 || permutation[1] != 0) 2004 return failure(); 2005 2006 auto broadcastedLhs = 2007 transposedLhs.getVector().getDefiningOp<vector::BroadcastOp>(); 2008 if (!broadcastedLhs || !isValidBroadcastSource(broadcastedLhs)) 2009 return failure(); 2010 2011 auto broadcastedRhs = operandB.getDefiningOp<vector::BroadcastOp>(); 2012 if (!broadcastedRhs || !isValidBroadcastSource(broadcastedRhs)) 2013 return failure(); 2014 2015 return rewriter.create<vector::OuterProductOp>( 2016 mulOp->getLoc(), resType, broadcastedLhs.getSource(), 2017 broadcastedRhs.getSource(), Value(), vector::CombiningKind::ADD); 2018 }; 2019 2020 Value lhs = mulOp->getOperand(0), rhs = mulOp->getOperand(1); 2021 auto maybeOuterP = matchOuterProduct(lhs, rhs); 2022 // Handle commutativity, the transposed op is the outerproduct LHS. 2023 if (failed(maybeOuterP)) 2024 maybeOuterP = matchOuterProduct(rhs, lhs); 2025 if (failed(maybeOuterP)) 2026 return failure(); 2027 rewriter.replaceOp(mulOp, maybeOuterP->getResult()); 2028 return success(); 2029 } 2030 }; 2031 2032 } // namespace 2033 2034 void mlir::vector::populateFoldArithExtensionPatterns( 2035 RewritePatternSet &patterns) { 2036 patterns.add<FoldArithExtIntoContractionOp<arith::ExtFOp>, 2037 FoldArithExtIntoContractionOp<arith::ExtSIOp>>( 2038 patterns.getContext()); 2039 } 2040 2041 void mlir::vector::populateVectorMaskMaterializationPatterns( 2042 RewritePatternSet &patterns, bool force32BitVectorIndices, 2043 PatternBenefit benefit) { 2044 patterns.add<VectorCreateMaskOpConversion, 2045 MaterializeTransferMask<vector::TransferReadOp>, 2046 MaterializeTransferMask<vector::TransferWriteOp>>( 2047 patterns.getContext(), force32BitVectorIndices, benefit); 2048 patterns.add<FoldI1Select>(patterns.getContext(), benefit); 2049 } 2050 2051 void mlir::vector::populateShapeCastFoldingPatterns(RewritePatternSet &patterns, 2052 PatternBenefit benefit) { 2053 patterns.add<ShapeCastOpFolder>(patterns.getContext(), benefit); 2054 } 2055 2056 void mlir::vector::populateDropUnitDimWithShapeCastPatterns( 2057 RewritePatternSet &patterns, PatternBenefit benefit) { 2058 // TODO: Consider either: 2059 // * including DropInnerMostUnitDimsTransferRead and 2060 // DropInnerMostUnitDimsTransferWrite, or 2061 // * better naming to distinguish this and 2062 // populateVectorTransferCollapseInnerMostContiguousDimsPatterns. 2063 patterns.add<DropUnitDimFromElementwiseOps, DropUnitDimsFromScfForOp, 2064 DropUnitDimsFromTransposeOp, ShapeCastOpFolder>( 2065 patterns.getContext(), benefit); 2066 } 2067 2068 void mlir::vector::populateBubbleVectorBitCastOpPatterns( 2069 RewritePatternSet &patterns, PatternBenefit benefit) { 2070 patterns.add<BubbleDownVectorBitCastForExtract, 2071 BubbleDownBitCastForStridedSliceExtract, 2072 BubbleUpBitCastForInsert, BubbleUpBitCastForStridedSliceInsert>( 2073 patterns.getContext(), benefit); 2074 } 2075 2076 void mlir::vector::populateBreakDownVectorBitCastOpPatterns( 2077 RewritePatternSet &patterns, 2078 std::function<bool(vector::BitCastOp)> controlFn, PatternBenefit benefit) { 2079 patterns.add<BreakDownVectorBitCast>(patterns.getContext(), 2080 std::move(controlFn), benefit); 2081 } 2082 2083 void mlir::vector::populateVectorContractCanonicalizeMatmulToMMT( 2084 RewritePatternSet &patterns, 2085 std::function<LogicalResult(vector::ContractionOp)> constraint, 2086 PatternBenefit benefit) { 2087 patterns.add<CanonicalizeContractMatmulToMMT>(patterns.getContext(), benefit, 2088 std::move(constraint)); 2089 } 2090 2091 void mlir::vector::populateVectorReductionToContractPatterns( 2092 RewritePatternSet &patterns, PatternBenefit benefit) { 2093 patterns.add<MultiReduceToContract, CombineContractBroadcast, 2094 CombineContractABTranspose, CombineContractResultTranspose>( 2095 patterns.getContext(), benefit); 2096 } 2097 2098 void mlir::vector:: 2099 populateVectorTransferCollapseInnerMostContiguousDimsPatterns( 2100 RewritePatternSet &patterns, PatternBenefit benefit) { 2101 patterns.add<DropInnerMostUnitDimsTransferRead, 2102 DropInnerMostUnitDimsTransferWrite>(patterns.getContext(), 2103 benefit); 2104 } 2105 2106 void mlir::vector::populateSinkVectorOpsPatterns(RewritePatternSet &patterns, 2107 PatternBenefit benefit) { 2108 patterns.add<ReorderElementwiseOpsOnTranspose, ReorderCastOpsOnBroadcast, 2109 ReorderElementwiseOpsOnBroadcast>(patterns.getContext(), 2110 benefit); 2111 } 2112 2113 void mlir::vector::populateChainedVectorReductionFoldingPatterns( 2114 RewritePatternSet &patterns, PatternBenefit benefit) { 2115 patterns.add<ChainedReduction>(patterns.getContext(), benefit); 2116 patterns.add<ReduceRedundantZero>(patterns.getContext(), 2117 PatternBenefit(benefit.getBenefit() + 1)); 2118 } 2119 2120 void mlir::vector::populateBreakDownVectorReductionPatterns( 2121 RewritePatternSet &patterns, unsigned maxNumElementsToExtract, 2122 PatternBenefit benefit) { 2123 patterns.add<BreakDownVectorReduction>(patterns.getContext(), 2124 maxNumElementsToExtract, benefit); 2125 } 2126 2127 void mlir::vector::populateElementwiseToVectorOpsPatterns( 2128 RewritePatternSet &patterns) { 2129 patterns.add<FoldArithToVectorOuterProduct<arith::MulFOp>, 2130 FoldArithToVectorOuterProduct<arith::MulIOp>>( 2131 patterns.getContext()); 2132 } 2133 2134 //===----------------------------------------------------------------------===// 2135 // TableGen'd enum attribute definitions 2136 //===----------------------------------------------------------------------===// 2137 2138 #include "mlir/Dialect/Vector/Transforms/VectorTransformsEnums.cpp.inc" 2139