1 //===- VectorTransforms.cpp - Conversion within the Vector dialect --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements target-independent rewrites as 1->N patterns. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Dialect/Vector/Transforms/VectorTransforms.h" 14 15 #include <cassert> 16 #include <cstdint> 17 #include <functional> 18 #include <optional> 19 #include <type_traits> 20 21 #include "mlir/Dialect/Affine/IR/AffineOps.h" 22 #include "mlir/Dialect/Arith/IR/Arith.h" 23 #include "mlir/Dialect/Arith/Utils/Utils.h" 24 #include "mlir/Dialect/Linalg/IR/Linalg.h" 25 #include "mlir/Dialect/MemRef/IR/MemRef.h" 26 #include "mlir/Dialect/SCF/IR/SCF.h" 27 #include "mlir/Dialect/Tensor/IR/Tensor.h" 28 #include "mlir/Dialect/Utils/IndexingUtils.h" 29 #include "mlir/Dialect/Utils/StructuredOpsUtils.h" 30 #include "mlir/Dialect/Vector/IR/VectorOps.h" 31 #include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h" 32 #include "mlir/Dialect/Vector/Utils/VectorUtils.h" 33 #include "mlir/IR/BuiltinAttributeInterfaces.h" 34 #include "mlir/IR/BuiltinTypes.h" 35 #include "mlir/IR/ImplicitLocOpBuilder.h" 36 #include "mlir/IR/Location.h" 37 #include "mlir/IR/Matchers.h" 38 #include "mlir/IR/PatternMatch.h" 39 #include "mlir/IR/TypeUtilities.h" 40 #include "mlir/Interfaces/VectorInterfaces.h" 41 42 #include "llvm/ADT/DenseSet.h" 43 #include "llvm/ADT/MapVector.h" 44 #include "llvm/ADT/STLExtras.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Support/FormatVariadic.h" 48 #include "llvm/Support/raw_ostream.h" 49 50 #define DEBUG_TYPE "vector-to-vector" 51 52 using namespace mlir; 53 using namespace mlir::vector; 54 55 template <typename IntType> 56 static SmallVector<IntType> extractVector(ArrayAttr arrayAttr) { 57 return llvm::to_vector<4>(llvm::map_range( 58 arrayAttr.getAsRange<IntegerAttr>(), 59 [](IntegerAttr attr) { return static_cast<IntType>(attr.getInt()); })); 60 } 61 62 // Helper to find an index in an affine map. 63 static std::optional<int64_t> getResultIndex(AffineMap map, int64_t index) { 64 for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) { 65 int64_t idx = map.getDimPosition(i); 66 if (idx == index) 67 return i; 68 } 69 return std::nullopt; 70 } 71 72 namespace { 73 74 /// ShapeCastOpFolder folds cancelling ShapeCastOps away. 75 // 76 // Example: 77 // 78 // The following MLIR with cancelling ShapeCastOps: 79 // 80 // %0 = source : vector<5x4x2xf32> 81 // %1 = shape_cast %0 : vector<5x4x2xf32> to vector<20x2xf32> 82 // %2 = shape_cast %1 : vector<20x2xf32> to vector<5x4x2xf32> 83 // %3 = user %2 : vector<5x4x2xf32> 84 // 85 // Should canonicalize to the following: 86 // 87 // %0 = source : vector<5x4x2xf32> 88 // %1 = user %0 : vector<5x4x2xf32> 89 // 90 struct ShapeCastOpFolder : public OpRewritePattern<vector::ShapeCastOp> { 91 using OpRewritePattern::OpRewritePattern; 92 93 LogicalResult matchAndRewrite(vector::ShapeCastOp shapeCastOp, 94 PatternRewriter &rewriter) const override { 95 // Check if 'shapeCastOp' has vector source/result type. 96 auto sourceVectorType = 97 dyn_cast_or_null<VectorType>(shapeCastOp.getSource().getType()); 98 auto resultVectorType = 99 dyn_cast_or_null<VectorType>(shapeCastOp.getResult().getType()); 100 if (!sourceVectorType || !resultVectorType) 101 return failure(); 102 103 // Check if shape cast op source operand is also a shape cast op. 104 auto sourceShapeCastOp = dyn_cast_or_null<vector::ShapeCastOp>( 105 shapeCastOp.getSource().getDefiningOp()); 106 if (!sourceShapeCastOp) 107 return failure(); 108 auto operandSourceVectorType = 109 cast<VectorType>(sourceShapeCastOp.getSource().getType()); 110 auto operandResultVectorType = sourceShapeCastOp.getType(); 111 112 // Check if shape cast operations invert each other. 113 if (operandSourceVectorType != resultVectorType || 114 operandResultVectorType != sourceVectorType) 115 return failure(); 116 117 rewriter.replaceOp(shapeCastOp, sourceShapeCastOp.getSource()); 118 return success(); 119 } 120 }; 121 122 /// Convert MulIOp/MulFOp + MultiDimReductionOp<add> into ContractionOp. 123 /// Ex: 124 /// ``` 125 /// %0 = arith.mulf %arg0, %arg1 : vector<8x32x16xf32> 126 /// %1 = vector.multi_reduction add, %0 [1] 127 /// : vector<8x32x16xf32> to vector<8x16xf32> 128 /// ``` 129 /// Gets converted to: 130 /// ``` 131 /// %1 = vector.contract {indexing_maps = [ 132 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 133 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 134 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 135 /// iterator_types = ["parallel", "parallel", "reduction"], 136 /// kind = add} %0, %arg1, %cst_f0 137 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 138 /// ``` 139 struct MultiReduceToContract 140 : public OpRewritePattern<vector::MultiDimReductionOp> { 141 using OpRewritePattern::OpRewritePattern; 142 143 LogicalResult matchAndRewrite(vector::MultiDimReductionOp reduceOp, 144 PatternRewriter &rewriter) const override { 145 if (reduceOp.getKind() != vector::CombiningKind::ADD) 146 return failure(); 147 Operation *mulOp = reduceOp.getSource().getDefiningOp(); 148 if (!mulOp || !isa<arith::MulIOp, arith::MulFOp>(mulOp)) 149 return failure(); 150 SmallVector<bool> reductionMask = reduceOp.getReductionMask(); 151 auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size()); 152 SmallVector<AffineExpr> exprs; 153 SmallVector<vector::IteratorType> iteratorTypes; 154 for (const auto &isReduceDim : llvm::enumerate(reductionMask)) { 155 if (!isReduceDim.value()) { 156 iteratorTypes.push_back(vector::IteratorType::parallel); 157 exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index())); 158 } else { 159 iteratorTypes.push_back(vector::IteratorType::reduction); 160 } 161 } 162 auto dstMap = 163 AffineMap::get(/*dimCount=*/reductionMask.size(), 164 /*symbolCount=*/0, exprs, reduceOp.getContext()); 165 rewriter.replaceOpWithNewOp<mlir::vector::ContractionOp>( 166 reduceOp, mulOp->getOperand(0), mulOp->getOperand(1), reduceOp.getAcc(), 167 rewriter.getAffineMapArrayAttr({srcMap, srcMap, dstMap}), 168 rewriter.getArrayAttr(llvm::to_vector(llvm::map_range( 169 iteratorTypes, [&](IteratorType t) -> mlir::Attribute { 170 return IteratorTypeAttr::get(rewriter.getContext(), t); 171 })))); 172 return success(); 173 } 174 }; 175 176 /// Merge LHS/RHS (A/B) TransposeOp into ContractionOp user. 177 /// Ex: 178 /// ``` 179 /// %0 = vector.transpose %arg0, [2, 0, 1] 180 /// : vector<32x16x8xf32> to vector<8x32x16xf32> 181 /// %1 = vector.contract {indexing_maps = [ 182 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 183 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 184 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 185 /// iterator_types = ["parallel", "parallel", "reduction"], 186 /// kind = add} %0, %arg1, %cst_f0 187 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 188 /// ``` 189 /// Gets converted to: 190 /// ``` 191 /// %1 = vector.contract {indexing_maps = [ 192 /// affine_map<(d0, d1, d2) -> (d1, d2, d0)>, 193 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 194 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 195 /// iterator_types = ["parallel", "parallel", "reduction"], 196 /// kind = add} %arg0, %arg1, %cst_f0 197 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 198 /// ``` 199 struct CombineContractABTranspose final 200 : public OpRewritePattern<vector::ContractionOp> { 201 using OpRewritePattern::OpRewritePattern; 202 203 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 204 PatternRewriter &rewriter) const override { 205 SmallVector<AffineMap> maps = 206 llvm::to_vector<4>(contractOp.getIndexingMapsArray()); 207 Value lhs = contractOp.getLhs(); 208 Value rhs = contractOp.getRhs(); 209 size_t index = 0; 210 bool changed = false; 211 for (Value *operand : {&lhs, &rhs}) { 212 AffineMap &map = maps[index++]; 213 auto transposeOp = operand->getDefiningOp<vector::TransposeOp>(); 214 if (!transposeOp) 215 continue; 216 AffineMap permutationMap = AffineMap::getPermutationMap( 217 transposeOp.getPermutation(), contractOp.getContext()); 218 map = inversePermutation(permutationMap).compose(map); 219 *operand = transposeOp.getVector(); 220 changed = true; 221 } 222 if (!changed) 223 return failure(); 224 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 225 contractOp, lhs, rhs, contractOp.getAcc(), 226 rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes()); 227 return success(); 228 } 229 }; 230 231 /// Merges accumulator and result transposes into contract. 232 /// 233 /// For example: 234 /// ```mlir 235 /// %accT = vector.transpose %acc, [0, 2, 1] 236 /// : vector<2x8x4xf32> to vector<2x4x8xf32> 237 /// %contract = vector.contract { 238 /// indexing_maps = [ 239 /// affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, 240 /// affine_map<(d0, d1, d2, d3) -> (d3, d2)>, 241 /// affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> 242 /// ], 243 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"], 244 /// kind = #vector.kind<add> 245 /// } %lhs, %rhs, %accT 246 /// : vector<2x4x4xf32>, vector<4x8xf32> into vector<2x4x8xf32> 247 /// %0 = vector.transpose %contract, [0, 2, 1] 248 /// : vector<2x4x8xf32> to vector<2x8x4> 249 /// ``` 250 /// Becomes: 251 /// ```mlir 252 /// %0 = vector.contract { 253 /// indexing_maps = [ 254 /// affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, 255 /// affine_map<(d0, d1, d2, d3) -> (d3, d2)>, 256 /// affine_map<(d0, d1, d2, d3) -> (d0, d2, d1)> 257 /// ], 258 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"], 259 /// kind = #vector.kind<add> 260 /// } %lhs, %rhs, %acc 261 /// : vector<2x4x4xf32>, vector<4x8xf32> into vector<2x8x4xf32> 262 /// ``` 263 struct CombineContractResultTranspose final 264 : public OpRewritePattern<vector::TransposeOp> { 265 using OpRewritePattern::OpRewritePattern; 266 267 LogicalResult matchAndRewrite(vector::TransposeOp resTOp, 268 PatternRewriter &rewriter) const override { 269 auto contractOp = resTOp.getVector().getDefiningOp<vector::ContractionOp>(); 270 if (!contractOp || !contractOp->hasOneUse()) 271 return failure(); 272 273 auto accTOp = contractOp.getAcc().getDefiningOp<vector::TransposeOp>(); 274 if (!accTOp) 275 return failure(); 276 277 MLIRContext *context = contractOp.getContext(); 278 auto maps = llvm::to_vector<3>(contractOp.getIndexingMapsArray()); 279 AffineMap contractMap = maps.back(); 280 281 // Accumulator transpose performs f(A) -> B. Contract performs g(C) -> B. 282 // To index into A in contract, we need revert(f)(g(C)) -> A. 283 auto accTMap = 284 AffineMap::getPermutationMap(accTOp.getPermutation(), context); 285 286 // Contract performs g(C) -> D. Result transpose performs h(D) -> E. 287 // To index into E in contract, we need h(g(C)) -> E. 288 auto resTMap = 289 AffineMap::getPermutationMap(resTOp.getPermutation(), context); 290 auto combinedResMap = resTMap.compose(contractMap); 291 292 // The accumulator and result share the same indexing map. So they should be 293 // the same to be able to merge. This means combinedResMap is the same as 294 // inversePermutation(accTMap).compose(contractMap), which means 295 if (inversePermutation(accTMap) != resTMap) 296 return failure(); 297 maps.back() = combinedResMap; 298 299 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 300 resTOp, contractOp.getLhs(), contractOp.getRhs(), accTOp.getVector(), 301 rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes()); 302 return success(); 303 } 304 }; 305 306 /// Merge BroadcastOp into ContractionOp user. 307 /// Ex: 308 /// ``` 309 /// %0 = vector.broadcast %arg0 : vector<32x16xf32> to vector<8x32x16xf32> 310 /// %1 = vector.contract {indexing_maps = [ 311 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 312 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 313 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 314 /// iterator_types = ["parallel", "parallel", "reduction"], 315 /// kind = add} %0, %arg1, %cst_f0 316 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 317 /// ``` 318 /// Gets converted to: 319 /// ``` 320 /// %1 = vector.contract {indexing_maps = [ 321 /// affine_map<(d0, d1, d2) -> (d1, d2)>, 322 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 323 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 324 /// iterator_types = ["parallel", "parallel", "reduction"], 325 /// kind = add} %arg0, %arg1, %cst_f0 326 /// : vector<32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 327 /// ``` 328 struct CombineContractBroadcast 329 : public OpRewritePattern<vector::ContractionOp> { 330 using OpRewritePattern::OpRewritePattern; 331 332 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 333 PatternRewriter &rewriter) const override { 334 SmallVector<AffineMap> maps = 335 llvm::to_vector<4>(contractOp.getIndexingMapsArray()); 336 Value lhs = contractOp.getLhs(); 337 Value rhs = contractOp.getRhs(); 338 size_t index = 0; 339 bool changed = false; 340 for (Value *operand : {&lhs, &rhs}) { 341 AffineMap &map = maps[index++]; 342 auto broadcast = operand->getDefiningOp<vector::BroadcastOp>(); 343 if (!broadcast) 344 continue; 345 // contractionOp can only take vector as operands. 346 auto srcType = dyn_cast<VectorType>(broadcast.getSourceType()); 347 if (!srcType || 348 srcType.getRank() == broadcast.getResultVectorType().getRank()) 349 continue; 350 int64_t rankDiff = 351 broadcast.getResultVectorType().getRank() - srcType.getRank(); 352 bool innerDimBroadcast = false; 353 SmallVector<AffineExpr> originalDims; 354 for (const auto &dim : llvm::enumerate(srcType.getShape())) { 355 if (dim.value() != broadcast.getResultVectorType().getDimSize( 356 rankDiff + dim.index())) { 357 innerDimBroadcast = true; 358 break; 359 } 360 originalDims.push_back( 361 rewriter.getAffineDimExpr(dim.index() + rankDiff)); 362 } 363 // Contract doesn't support inner dimension broadcast. Once this is 364 // relaxed we can remove this case. 365 if (innerDimBroadcast) 366 continue; 367 368 // It would be incorrect to fold a broadcast onto a reduction dimension 369 // of non-unit size. 370 bool nonUnitDimReductionBroadcast = false; 371 for (int64_t i = 0; i < rankDiff; ++i) { 372 if (broadcast.getResultVectorType().getDimSize(i) != 1 && 373 isReductionIterator(contractOp.getIteratorTypes() 374 .getValue()[map.getDimPosition(i)])) { 375 nonUnitDimReductionBroadcast = true; 376 break; 377 } 378 } 379 if (nonUnitDimReductionBroadcast) 380 continue; 381 382 AffineMap broadcastMap = 383 AffineMap::get(broadcast.getResultVectorType().getRank(), 0, 384 originalDims, contractOp.getContext()); 385 map = broadcastMap.compose(map); 386 *operand = broadcast.getSource(); 387 changed = true; 388 } 389 390 if (!changed) 391 return failure(); 392 393 // Determine which dims are usused, now that the maps have been composed 394 // with the broadcast maps. 395 llvm::SmallBitVector unusedDimsBitVector = getUnusedDimsBitVector(maps); 396 // Compress unused dims. 397 for (auto &m : maps) 398 m = compressDims(m, unusedDimsBitVector); 399 // Compute the combined iterators. 400 SmallVector<Attribute> iterators; 401 for (unsigned i = 0; i < unusedDimsBitVector.size(); ++i) { 402 if (!unusedDimsBitVector.test(i)) 403 iterators.push_back(contractOp.getIteratorTypes().getValue()[i]); 404 } 405 // Check that compressing unused dims isn't removing all reduction dimension 406 // pairs. For example, if the vector.contract had only one reduction 407 // iterator and that was a unit-dimension created by a broadcast, 408 // then we should bail here, otherwise we would create a contract without 409 // a reduction dimension pair. 410 bool hasReductionIteratorApplyingOnBothSides = false; 411 for (unsigned i = 0; i < iterators.size(); ++i) { 412 if (!isReductionIterator(iterators[i])) 413 continue; 414 if (getResultIndex(maps[0], i) && getResultIndex(maps[1], i)) { 415 hasReductionIteratorApplyingOnBothSides = true; 416 break; 417 } 418 } 419 if (!hasReductionIteratorApplyingOnBothSides) 420 return failure(); 421 422 // If the compressed maps have a dimension that is not used by either LHS or 423 // RHS then the ContractionOp verifier would fail. 424 if (getUnusedDimsBitVector({maps[0], maps[1]}).any()) 425 return failure(); 426 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 427 contractOp, lhs, rhs, contractOp.getAcc(), 428 rewriter.getAffineMapArrayAttr(maps), rewriter.getArrayAttr(iterators)); 429 return success(); 430 } 431 }; 432 433 /// Reorders cast(broadcast) to broadcast(cast). This makes broadcast ops and 434 /// contraction ops closer, which kicks in CombineContractBroadcast pattern when 435 /// casting ops are around these operations. 436 /// Ex: 437 /// ``` 438 /// %0 = vector.broadcast %arg0 : vector<32x16xi8> to vector<8x32x16xi8> 439 /// %1 = arith.extsi %0 : vector<8x32x16xi8> to vector<8x32x16xi32> 440 /// ``` 441 /// Gets converted to: 442 /// ``` 443 /// %0 = arith.extsi %0 : vector<32x16xi8> to vector<32x16xi32> 444 /// %1 = vector.broadcast %arg0 : vector<32x16xi32> to vector<8x32x16xi32> 445 /// ``` 446 struct ReorderCastOpsOnBroadcast 447 : public OpInterfaceRewritePattern<CastOpInterface> { 448 using OpInterfaceRewritePattern<CastOpInterface>::OpInterfaceRewritePattern; 449 450 LogicalResult matchAndRewrite(CastOpInterface op, 451 PatternRewriter &rewriter) const override { 452 if (op->getNumOperands() != 1) 453 return failure(); 454 auto bcastOp = op->getOperand(0).getDefiningOp<vector::BroadcastOp>(); 455 if (!bcastOp) 456 return failure(); 457 458 Type castResTy = getElementTypeOrSelf(op->getResult(0)); 459 if (auto vecTy = dyn_cast<VectorType>(bcastOp.getSourceType())) 460 castResTy = vecTy.clone(castResTy); 461 auto *castOp = 462 rewriter.create(op->getLoc(), op->getName().getIdentifier(), 463 bcastOp.getSource(), castResTy, op->getAttrs()); 464 rewriter.replaceOpWithNewOp<vector::BroadcastOp>( 465 op, op->getResult(0).getType(), castOp->getResult(0)); 466 return success(); 467 } 468 }; 469 470 /// Reorders elementwise(transpose) to transpose(elementwise). This makes 471 /// transpose ops and contraction ops closer, which kicks in 472 /// CombineContractABTranspose pattern when elementwise ops are between these 473 /// operations. Ex: 474 /// ``` 475 /// %at = vector.transpose %a, [1, 0]: vector<4x2xf32> to vector<2x4xf32> 476 /// %bt = vector.transpose %b, [1, 0]: vector<4x2xf32> to vector<2x4xf32> 477 /// %r = arith.addf %at, %bt : vector<2x4xf32> 478 /// ``` 479 /// Gets converted to: 480 /// ``` 481 /// %0 = arith.addf %a, %b : vector<4x2xf32> 482 /// %r = vector.transpose %0, [1, 0] : vector<2x4xf32> 483 /// ``` 484 struct ReorderElementwiseOpsOnTranspose final 485 : public OpTraitRewritePattern<OpTrait::Elementwise> { 486 using OpTraitRewritePattern::OpTraitRewritePattern; 487 LogicalResult matchAndRewrite(Operation *op, 488 PatternRewriter &rewriter) const override { 489 if (op->getNumResults() != 1 || op->getNumRegions() != 0) 490 return failure(); 491 492 // Make sure all operands are transpose/constant ops and collect their 493 // transposition maps. 494 SmallVector<ArrayRef<int64_t>> transposeMaps; 495 transposeMaps.reserve(op->getNumOperands()); 496 // Record the initial type before transposition. We'll use its shape later. 497 // Any type will do here as we will check all transpose maps are the same. 498 VectorType srcType; 499 for (Value operand : op->getOperands()) { 500 auto transposeOp = operand.getDefiningOp<vector::TransposeOp>(); 501 if (transposeOp) { 502 transposeMaps.push_back(transposeOp.getPermutation()); 503 srcType = transposeOp.getSourceVectorType(); 504 } else if (!matchPattern(operand, m_Constant())) { 505 return failure(); 506 } 507 } 508 if (transposeMaps.empty()) 509 return failure(); 510 // This is an elementwise op, so all transposed operands should have the 511 // same type. We need to additionally check that all transposes uses the 512 // same map. 513 if (!llvm::all_equal(transposeMaps)) 514 return rewriter.notifyMatchFailure(op, "different transpose map"); 515 516 SmallVector<Value> srcValues; 517 srcValues.reserve(op->getNumOperands()); 518 519 // If there are constant operands, we need to insert inverse transposes for 520 // them. Calculate the inverse order first. 521 auto order = transposeMaps.front(); 522 SmallVector<int64_t> invOrder(order.size()); 523 for (int i = 0, e = order.size(); i < e; ++i) 524 invOrder[order[i]] = i; 525 526 for (Value operand : op->getOperands()) { 527 auto transposeOp = operand.getDefiningOp<vector::TransposeOp>(); 528 if (transposeOp) { 529 srcValues.push_back(transposeOp.getVector()); 530 } else { 531 // This is a constant. Create a reverse transpose op for it. 532 auto vectorType = 533 srcType.clone(cast<VectorType>(operand.getType()).getElementType()); 534 srcValues.push_back(rewriter.create<vector::TransposeOp>( 535 operand.getLoc(), vectorType, operand, invOrder)); 536 } 537 } 538 539 auto vectorType = srcType.clone( 540 cast<VectorType>(op->getResultTypes()[0]).getElementType()); 541 Operation *elementwiseOp = 542 rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues, 543 vectorType, op->getAttrs()); 544 rewriter.replaceOpWithNewOp<vector::TransposeOp>( 545 op, op->getResultTypes()[0], elementwiseOp->getResult(0), 546 transposeMaps.front()); 547 return success(); 548 } 549 }; 550 551 // Returns the values in `arrayAttr` as an integer vector. 552 static SmallVector<int64_t> getIntValueVector(ArrayAttr arrayAttr) { 553 return llvm::to_vector<4>( 554 llvm::map_range(arrayAttr.getAsRange<IntegerAttr>(), 555 [](IntegerAttr attr) { return attr.getInt(); })); 556 } 557 558 // Shuffles vector.bitcast op after vector.extract op. 559 // 560 // This transforms IR like: 561 // %0 = vector.bitcast %src : vector<4xf32> to vector<8xf16> 562 // %1 = vector.extract %0[3] : f16 from vector<8xf16> 563 // Into: 564 // %0 = vector.extract %src[1] : f32 from vector<4xf32> 565 // %1 = vector.bitcast %0: vector<1xf32> to vector<2xf16> 566 // %2 = vector.extract %1[1] : f16 from vector<2xf16> 567 struct BubbleDownVectorBitCastForExtract 568 : public OpRewritePattern<vector::ExtractOp> { 569 using OpRewritePattern::OpRewritePattern; 570 571 LogicalResult matchAndRewrite(vector::ExtractOp extractOp, 572 PatternRewriter &rewriter) const override { 573 // Only support extracting scalars for now. 574 if (extractOp.getSourceVectorType().getRank() != 1) 575 return failure(); 576 577 auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>(); 578 if (!castOp) 579 return failure(); 580 581 VectorType castSrcType = castOp.getSourceVectorType(); 582 VectorType castDstType = castOp.getResultVectorType(); 583 assert(castSrcType.getRank() == castDstType.getRank()); 584 585 // Fail to match if we only have one element in the cast op source. 586 // This is to avoid infinite loop given that this pattern can generate 587 // such cases. 588 if (castSrcType.getNumElements() == 1) 589 return failure(); 590 591 // Only support casting to a larger number of elements or now. 592 // E.g., vector<4xf32> -> vector<8xf16>. 593 if (castSrcType.getNumElements() > castDstType.getNumElements()) 594 return failure(); 595 596 unsigned expandRatio = 597 castDstType.getNumElements() / castSrcType.getNumElements(); 598 599 auto getFirstIntValue = [](ArrayRef<OpFoldResult> values) -> uint64_t { 600 assert(values[0].is<Attribute>() && "Unexpected non-constant index"); 601 return cast<IntegerAttr>(values[0].get<Attribute>()).getInt(); 602 }; 603 604 uint64_t index = getFirstIntValue(extractOp.getMixedPosition()); 605 606 // Get the single scalar (as a vector) in the source value that packs the 607 // desired scalar. E.g. extract vector<1xf32> from vector<4xf32> 608 Location loc = extractOp.getLoc(); 609 Value packedValue = rewriter.create<vector::ExtractOp>( 610 loc, castOp.getSource(), index / expandRatio); 611 Type packedVecType = VectorType::get(/*shape=*/{1}, packedValue.getType()); 612 Value zero = rewriter.create<arith::ConstantOp>( 613 loc, packedVecType, rewriter.getZeroAttr(packedVecType)); 614 packedValue = rewriter.create<vector::InsertOp>(loc, packedValue, zero, 615 /*position=*/0); 616 617 // Cast it to a vector with the desired scalar's type. 618 // E.g. f32 -> vector<2xf16> 619 VectorType packedType = 620 VectorType::get({expandRatio}, castDstType.getElementType()); 621 Value castedValue = 622 rewriter.create<vector::BitCastOp>(loc, packedType, packedValue); 623 624 // Finally extract the desired scalar. 625 rewriter.replaceOpWithNewOp<vector::ExtractOp>(extractOp, castedValue, 626 index % expandRatio); 627 return success(); 628 } 629 }; 630 631 // Shuffles vector.bitcast op after vector.extract_strided_slice op. 632 // 633 // This transforms IR like: 634 // %cast = vector.bitcast %arg0: vector<4xf32> to vector<8xf16> 635 // %0 = vector.extract_strided_slice %cast { 636 // offsets = [4], sizes = [4], strides = [1] 637 // } : vector<8xf16> to vector<4xf16> 638 // Into: 639 // %0 = vector.extract_strided_slice %src { 640 // offsets = [2], sizes = [2], strides = [1] 641 // } : vector<4xf32> to vector<2xf32> 642 // %1 = vector.bitcast %0 : vector<2xf32> to vector<4xf16> 643 struct BubbleDownBitCastForStridedSliceExtract 644 : public OpRewritePattern<vector::ExtractStridedSliceOp> { 645 using OpRewritePattern::OpRewritePattern; 646 647 LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp, 648 PatternRewriter &rewriter) const override { 649 auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>(); 650 if (!castOp) 651 return failure(); 652 653 VectorType castSrcType = castOp.getSourceVectorType(); 654 VectorType castDstType = castOp.getResultVectorType(); 655 assert(castSrcType.getRank() == castDstType.getRank()); 656 657 int64_t castSrcLastDim = castSrcType.getShape().back(); 658 int64_t castDstLastDim = castDstType.getShape().back(); 659 // Require casting to more elements for now; other cases to be implemented. 660 if (castSrcLastDim > castDstLastDim) 661 return failure(); 662 663 // Only accept all one strides for now. 664 if (llvm::any_of(extractOp.getStrides().getAsValueRange<IntegerAttr>(), 665 [](const APInt &val) { return !val.isOne(); })) 666 return failure(); 667 668 unsigned rank = extractOp.getSourceVectorType().getRank(); 669 assert(castDstLastDim % castSrcLastDim == 0); 670 int64_t expandRatio = castDstLastDim / castSrcLastDim; 671 672 // If we have a less number of offsets than the rank, then implicitly we 673 // are selecting the full range for the last bitcasted dimension; other 674 // dimensions aren't affected. Otherwise, we need to scale down the last 675 // dimension's offset given we are extracting from less elements now. 676 ArrayAttr newOffsets = extractOp.getOffsets(); 677 if (newOffsets.size() == rank) { 678 SmallVector<int64_t> offsets = getIntValueVector(newOffsets); 679 if (offsets.back() % expandRatio != 0) 680 return failure(); 681 offsets.back() = offsets.back() / expandRatio; 682 newOffsets = rewriter.getI64ArrayAttr(offsets); 683 } 684 685 // Similarly for sizes. 686 ArrayAttr newSizes = extractOp.getSizes(); 687 if (newSizes.size() == rank) { 688 SmallVector<int64_t> sizes = getIntValueVector(newSizes); 689 if (sizes.back() % expandRatio != 0) 690 return failure(); 691 sizes.back() = sizes.back() / expandRatio; 692 newSizes = rewriter.getI64ArrayAttr(sizes); 693 } 694 695 SmallVector<int64_t> dims = 696 llvm::to_vector<4>(cast<VectorType>(extractOp.getType()).getShape()); 697 dims.back() = dims.back() / expandRatio; 698 VectorType newExtractType = 699 VectorType::get(dims, castSrcType.getElementType()); 700 701 auto newExtractOp = rewriter.create<vector::ExtractStridedSliceOp>( 702 extractOp.getLoc(), newExtractType, castOp.getSource(), newOffsets, 703 newSizes, extractOp.getStrides()); 704 705 rewriter.replaceOpWithNewOp<vector::BitCastOp>( 706 extractOp, extractOp.getType(), newExtractOp); 707 708 return success(); 709 } 710 }; 711 712 // Shuffles vector.bitcast op before vector.insert_strided_slice op. 713 // 714 // This transforms IR like: 715 // %0 = vector.insert %val, %dst[4] : vector<32xi4> into vector<8x32xi4> 716 // %1 = vector.bitcast %0 : vector<8x32xi4> to vector<8x16xi8> 717 // Into: 718 // %0 = vector.bitcast %val : vector<32xi4> to vector<16xi8> 719 // %1 = vector.bitcast %dst : vector<8x32xi4> to vector<8x16xi8> 720 // %2 = vector.insert %0, %1 [4] : vector<16xi8> into vector<8x16xi8> 721 // 722 struct BubbleUpBitCastForInsert : public OpRewritePattern<vector::BitCastOp> { 723 using OpRewritePattern::OpRewritePattern; 724 725 LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, 726 PatternRewriter &rewriter) const override { 727 VectorType castSrcType = bitcastOp.getSourceVectorType(); 728 VectorType castDstType = bitcastOp.getResultVectorType(); 729 730 // 0-D and scalable vectors are not supported yet. 731 if (castSrcType.getRank() == 0 || castSrcType.isScalable() || 732 castDstType.isScalable()) 733 return failure(); 734 735 int64_t castSrcLastDim = castSrcType.getShape().back(); 736 int64_t castDstLastDim = castDstType.getShape().back(); 737 bool isNumElemsShrink = castSrcLastDim >= castDstLastDim; 738 int64_t ratio; 739 if (isNumElemsShrink) { 740 assert(castSrcLastDim % castDstLastDim == 0); 741 ratio = castSrcLastDim / castDstLastDim; 742 } else { 743 assert(castDstLastDim % castSrcLastDim == 0); 744 ratio = castDstLastDim / castSrcLastDim; 745 } 746 747 auto insertOp = bitcastOp.getSource().getDefiningOp<vector::InsertOp>(); 748 if (!insertOp) 749 return failure(); 750 751 // Only vector sources are supported for now. 752 auto insertSrcType = dyn_cast<VectorType>(insertOp.getSourceType()); 753 if (!insertSrcType) 754 return failure(); 755 756 // Bitcast the source. 757 SmallVector<int64_t> srcDims(insertSrcType.getShape()); 758 srcDims.back() = 759 isNumElemsShrink ? srcDims.back() / ratio : srcDims.back() * ratio; 760 VectorType newCastSrcType = 761 VectorType::get(srcDims, castDstType.getElementType()); 762 auto newCastSrcOp = rewriter.create<vector::BitCastOp>( 763 bitcastOp.getLoc(), newCastSrcType, insertOp.getSource()); 764 765 SmallVector<int64_t> dstDims(insertOp.getDestVectorType().getShape()); 766 dstDims.back() = 767 isNumElemsShrink ? dstDims.back() / ratio : dstDims.back() * ratio; 768 VectorType newCastDstType = 769 VectorType::get(dstDims, castDstType.getElementType()); 770 771 // Bitcast the destination. 772 auto newCastDstOp = rewriter.create<vector::BitCastOp>( 773 bitcastOp.getLoc(), newCastDstType, insertOp.getDest()); 774 775 // Generate new insert. 776 rewriter.replaceOpWithNewOp<vector::InsertOp>( 777 bitcastOp, newCastSrcOp, newCastDstOp, insertOp.getMixedPosition()); 778 return success(); 779 } 780 }; 781 782 // Shuffles vector.bitcast op before vector.insert_strided_slice op. 783 // 784 // This transforms IR like: 785 // %0 = vector.insert_strided_slice %src, %dst { 786 // offsets = [0], strides = [1]} : vector<4xf16> into vector<8xf16> 787 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32> 788 // Into: 789 // %0 = vector.bitcast %src : vector<4xf16> to vector<2xf32> 790 // %1 = vector.bitcast %dst : vector<8xf16> to vector<4xf32> 791 // %2 = vector.insert_strided_slice %src, %dst { 792 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32> 793 struct BubbleUpBitCastForStridedSliceInsert 794 : public OpRewritePattern<vector::BitCastOp> { 795 using OpRewritePattern::OpRewritePattern; 796 797 LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, 798 PatternRewriter &rewriter) const override { 799 VectorType castSrcType = bitcastOp.getSourceVectorType(); 800 VectorType castDstType = bitcastOp.getResultVectorType(); 801 assert(castSrcType.getRank() == castDstType.getRank()); 802 // Skip 0-D vector which will not from InsertStridedSliceOp. 803 if (castSrcType.getRank() == 0) 804 return failure(); 805 806 int64_t castSrcLastDim = castSrcType.getShape().back(); 807 int64_t castDstLastDim = castDstType.getShape().back(); 808 // Require casting to less elements for now; other cases to be implemented. 809 if (castSrcLastDim < castDstLastDim) 810 return failure(); 811 812 assert(castSrcLastDim % castDstLastDim == 0); 813 int64_t shrinkRatio = castSrcLastDim / castDstLastDim; 814 815 auto insertOp = 816 bitcastOp.getSource().getDefiningOp<vector::InsertStridedSliceOp>(); 817 if (!insertOp) 818 return failure(); 819 820 // Only accept all one strides for now. 821 if (llvm::any_of(insertOp.getStrides().getAsValueRange<IntegerAttr>(), 822 [](const APInt &val) { return !val.isOne(); })) 823 return failure(); 824 825 unsigned rank = insertOp.getSourceVectorType().getRank(); 826 // Require insert op to have the same rank for the source and destination 827 // vector; other cases to be implemented. 828 if (rank != insertOp.getDestVectorType().getRank()) 829 return failure(); 830 831 // Requires that shape of insert op src is castable to dstType. 832 unsigned sourceWidth = castSrcType.getElementType().getIntOrFloatBitWidth(); 833 unsigned destinationWidth = 834 castDstType.getElementType().getIntOrFloatBitWidth(); 835 unsigned numElements = destinationWidth / sourceWidth; 836 if (insertOp.getSourceVectorType().getNumElements() % numElements != 0) 837 return failure(); 838 839 ArrayAttr newOffsets = insertOp.getOffsets(); 840 assert(newOffsets.size() == rank); 841 SmallVector<int64_t> offsets = getIntValueVector(newOffsets); 842 if (offsets.back() % shrinkRatio != 0) 843 return failure(); 844 offsets.back() = offsets.back() / shrinkRatio; 845 newOffsets = rewriter.getI64ArrayAttr(offsets); 846 847 SmallVector<int64_t> srcDims = 848 llvm::to_vector<4>(insertOp.getSourceVectorType().getShape()); 849 srcDims.back() = srcDims.back() / shrinkRatio; 850 VectorType newCastSrcType = 851 VectorType::get(srcDims, castDstType.getElementType()); 852 853 auto newCastSrcOp = rewriter.create<vector::BitCastOp>( 854 bitcastOp.getLoc(), newCastSrcType, insertOp.getSource()); 855 856 SmallVector<int64_t> dstDims = 857 llvm::to_vector<4>(insertOp.getDestVectorType().getShape()); 858 dstDims.back() = dstDims.back() / shrinkRatio; 859 VectorType newCastDstType = 860 VectorType::get(dstDims, castDstType.getElementType()); 861 862 auto newCastDstOp = rewriter.create<vector::BitCastOp>( 863 bitcastOp.getLoc(), newCastDstType, insertOp.getDest()); 864 865 rewriter.replaceOpWithNewOp<vector::InsertStridedSliceOp>( 866 bitcastOp, bitcastOp.getType(), newCastSrcOp, newCastDstOp, newOffsets, 867 insertOp.getStrides()); 868 869 return success(); 870 } 871 }; 872 873 // Breaks down vector.bitcast op 874 // 875 // This transforms IR like: 876 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32> 877 // Into: 878 // %cst = vector.splat %c0_f32 : vector<4xf32> 879 // %1 = vector.extract_strided_slice %0 { 880 // offsets = [0], sizes = [4], strides = [1] 881 // } : vector<8xf16> to vector<4xf16> 882 // %2 = vector.bitcast %1 : vector<4xf16> to vector<2xf32> 883 // %4 = vector.insert_strided_slice %2, %cst { 884 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32> 885 // %5 = vector.extract_strided_slice %0 { 886 // offsets = [4], sizes = [4], strides = [1] 887 // } : vector<8xf16> to vector<4xf16> 888 // %6 = vector.bitcast %5 : vector<4xf16> to vector<2xf32> 889 // %7 = vector.insert_strided_slice %6, %cst { 890 // offsets = [2], strides = [1]} : vector<2xf32> into vector<4xf32> 891 struct BreakDownVectorBitCast : public OpRewritePattern<vector::BitCastOp> { 892 using OpRewritePattern::OpRewritePattern; 893 894 public: 895 BreakDownVectorBitCast(MLIRContext *context, 896 std::function<bool(vector::BitCastOp)> controlFn, 897 PatternBenefit benefit) 898 : OpRewritePattern(context, benefit), controlFn(std::move(controlFn)) {} 899 900 LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, 901 PatternRewriter &rewriter) const override { 902 903 if (controlFn && !controlFn(bitcastOp)) 904 return failure(); 905 906 VectorType castSrcType = bitcastOp.getSourceVectorType(); 907 VectorType castDstType = bitcastOp.getResultVectorType(); 908 assert(castSrcType.getRank() == castDstType.getRank()); 909 910 // Only support rank 1 case for now. 911 if (castSrcType.getRank() != 1) 912 return failure(); 913 914 int64_t castSrcLastDim = castSrcType.getShape().back(); 915 int64_t castDstLastDim = castDstType.getShape().back(); 916 // Require casting to less elements for now; other cases to be implemented. 917 if (castSrcLastDim < castDstLastDim) 918 return failure(); 919 920 assert(castSrcLastDim % castDstLastDim == 0); 921 int64_t shrinkRatio = castSrcLastDim / castDstLastDim; 922 // Nothing to do if it is already bitcasting to a single element. 923 if (castSrcLastDim == shrinkRatio) 924 return failure(); 925 926 Location loc = bitcastOp.getLoc(); 927 Type elemType = castDstType.getElementType(); 928 assert(elemType.isSignlessIntOrIndexOrFloat()); 929 930 Value zero = rewriter.create<arith::ConstantOp>( 931 loc, elemType, rewriter.getZeroAttr(elemType)); 932 Value res = rewriter.create<SplatOp>(loc, castDstType, zero); 933 934 SmallVector<int64_t> sliceShape{castDstLastDim}; 935 SmallVector<int64_t> strides{1}; 936 VectorType newCastDstType = 937 VectorType::get(SmallVector<int64_t>{castDstLastDim / shrinkRatio}, 938 castDstType.getElementType()); 939 940 for (int i = 0, e = shrinkRatio; i < e; ++i) { 941 Value extracted = rewriter.create<ExtractStridedSliceOp>( 942 loc, bitcastOp.getSource(), ArrayRef<int64_t>{i * castDstLastDim}, 943 sliceShape, strides); 944 Value bitcast = 945 rewriter.create<BitCastOp>(loc, newCastDstType, extracted); 946 res = rewriter.create<InsertStridedSliceOp>( 947 loc, bitcast, res, 948 ArrayRef<int64_t>{i * castDstLastDim / shrinkRatio}, strides); 949 } 950 rewriter.replaceOp(bitcastOp, res); 951 return success(); 952 } 953 954 private: 955 std::function<bool(BitCastOp)> controlFn; 956 }; 957 958 /// Reorders elementwise(broadcast/splat) to broadcast(elementwise). Ex: 959 /// ``` 960 /// %a = vector.broadcast %arg1 : index to vector<1x4xindex> 961 /// %b = vector.broadcast %arg2 : index to vector<1x4xindex> 962 /// %r = arith.addi %a, %b : vector<1x4xindex> 963 /// ``` 964 /// Gets converted to: 965 /// ``` 966 /// %r = arith.addi %arg0, %arg1 : index 967 /// %b = vector.broadcast %r : index to vector<1x4xindex> 968 /// ``` 969 /// 970 /// Both `vector.broadcast` and `vector.splat` are supported as broadcasting 971 /// ops. 972 struct ReorderElementwiseOpsOnBroadcast final 973 : public OpTraitRewritePattern<OpTrait::Elementwise> { 974 using OpTraitRewritePattern::OpTraitRewritePattern; 975 LogicalResult matchAndRewrite(Operation *op, 976 PatternRewriter &rewriter) const override { 977 if (op->getNumResults() != 1) 978 return failure(); 979 if (!llvm::isa<ShapedType>(op->getResults()[0].getType())) 980 return failure(); 981 if (!OpTrait::hasElementwiseMappableTraits(op)) 982 return failure(); 983 if (op->getNumOperands() == 0 || 984 op->getResults()[0].getType() != op->getOperand(0).getType()) { 985 return failure(); 986 } 987 // Avoid operations that only accept vector types, since broadcast 988 // source might be scalar types. 989 if (isa<vector::FMAOp>(op)) { 990 return failure(); 991 } 992 993 // Get the type of the lhs operand 994 auto *lhsBcastOrSplat = op->getOperand(0).getDefiningOp(); 995 if (!lhsBcastOrSplat || 996 !isa<vector::BroadcastOp, vector::SplatOp>(*lhsBcastOrSplat)) 997 return failure(); 998 auto lhsBcastOrSplatType = lhsBcastOrSplat->getOperand(0).getType(); 999 1000 // Make sure that all operands are broadcast from identical types: 1001 // * scalar (`vector.broadcast` + `vector.splat`), or 1002 // * vector (`vector.broadcast`). 1003 // Otherwise the re-ordering wouldn't be safe. 1004 if (!llvm::all_of(op->getOperands(), [&lhsBcastOrSplatType](Value val) { 1005 auto bcast = val.getDefiningOp<vector::BroadcastOp>(); 1006 if (bcast) 1007 return (bcast.getOperand().getType() == lhsBcastOrSplatType); 1008 auto splat = val.getDefiningOp<vector::SplatOp>(); 1009 if (splat) 1010 return (splat.getOperand().getType() == lhsBcastOrSplatType); 1011 return false; 1012 })) { 1013 return failure(); 1014 } 1015 1016 // Collect the source values before broadcasting 1017 SmallVector<Value> srcValues; 1018 srcValues.reserve(op->getNumOperands()); 1019 for (Value operand : op->getOperands()) { 1020 srcValues.push_back(operand.getDefiningOp()->getOperand(0)); 1021 } 1022 1023 // Create the "elementwise" Op 1024 Operation *elementwiseOp = 1025 rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues, 1026 lhsBcastOrSplatType, op->getAttrs()); 1027 1028 // Replace the original Op with the elementwise Op 1029 auto vectorType = op->getResultTypes()[0]; 1030 rewriter.replaceOpWithNewOp<vector::BroadcastOp>( 1031 op, vectorType, elementwiseOp->getResults()); 1032 1033 return success(); 1034 } 1035 }; 1036 1037 // Helper that returns a vector comparison that constructs a mask: 1038 // mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b] 1039 // 1040 // If `dim == 0` then the result will be a 0-D vector. 1041 // 1042 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative, 1043 // much more compact, IR for this operation, but LLVM eventually 1044 // generates more elaborate instructions for this intrinsic since it 1045 // is very conservative on the boundary conditions. 1046 static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op, 1047 bool force32BitVectorIndices, int64_t dim, 1048 Value b, Value *off = nullptr) { 1049 auto loc = op->getLoc(); 1050 // If we can assume all indices fit in 32-bit, we perform the vector 1051 // comparison in 32-bit to get a higher degree of SIMD parallelism. 1052 // Otherwise we perform the vector comparison using 64-bit indices. 1053 Type idxType = 1054 force32BitVectorIndices ? rewriter.getI32Type() : rewriter.getI64Type(); 1055 DenseIntElementsAttr indicesAttr; 1056 if (dim == 0 && force32BitVectorIndices) { 1057 indicesAttr = DenseIntElementsAttr::get( 1058 VectorType::get(ArrayRef<int64_t>{}, idxType), ArrayRef<int32_t>{0}); 1059 } else if (dim == 0) { 1060 indicesAttr = DenseIntElementsAttr::get( 1061 VectorType::get(ArrayRef<int64_t>{}, idxType), ArrayRef<int64_t>{0}); 1062 } else if (force32BitVectorIndices) { 1063 indicesAttr = rewriter.getI32VectorAttr( 1064 llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))); 1065 } else { 1066 indicesAttr = rewriter.getI64VectorAttr( 1067 llvm::to_vector<4>(llvm::seq<int64_t>(0, dim))); 1068 } 1069 Value indices = rewriter.create<arith::ConstantOp>(loc, indicesAttr); 1070 // Add in an offset if requested. 1071 if (off) { 1072 Value o = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, *off); 1073 Value ov = rewriter.create<vector::SplatOp>(loc, indices.getType(), o); 1074 indices = rewriter.create<arith::AddIOp>(loc, ov, indices); 1075 } 1076 // Construct the vector comparison. 1077 Value bound = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, b); 1078 Value bounds = 1079 rewriter.create<vector::SplatOp>(loc, indices.getType(), bound); 1080 return rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, indices, 1081 bounds); 1082 } 1083 1084 template <typename ConcreteOp> 1085 struct MaterializeTransferMask : public OpRewritePattern<ConcreteOp> { 1086 public: 1087 explicit MaterializeTransferMask(MLIRContext *context, bool enableIndexOpt, 1088 PatternBenefit benefit = 1) 1089 : mlir::OpRewritePattern<ConcreteOp>(context, benefit), 1090 force32BitVectorIndices(enableIndexOpt) {} 1091 1092 LogicalResult matchAndRewrite(ConcreteOp xferOp, 1093 PatternRewriter &rewriter) const override { 1094 if (!xferOp.hasOutOfBoundsDim()) 1095 return failure(); 1096 1097 if (xferOp.getVectorType().getRank() > 1 || xferOp.getIndices().empty()) 1098 return failure(); 1099 1100 Location loc = xferOp->getLoc(); 1101 VectorType vtp = xferOp.getVectorType(); 1102 1103 // Create the in-bounds mask with all elements between [0 .. dim - offset) 1104 // set and [dim - offset .. vector_length) unset. 1105 // 1106 // TODO: when the leaf transfer rank is k > 1, we need the last `k` 1107 // dimensions here. 1108 unsigned lastIndex = llvm::size(xferOp.getIndices()) - 1; 1109 Value off = xferOp.getIndices()[lastIndex]; 1110 Value dim = 1111 vector::createOrFoldDimOp(rewriter, loc, xferOp.getSource(), lastIndex); 1112 Value b = rewriter.create<arith::SubIOp>(loc, dim.getType(), dim, off); 1113 Value mask = rewriter.create<vector::CreateMaskOp>( 1114 loc, 1115 VectorType::get(vtp.getShape(), rewriter.getI1Type(), 1116 vtp.getScalableDims()), 1117 b); 1118 if (xferOp.getMask()) { 1119 // Intersect the in-bounds with the mask specified as an op parameter. 1120 mask = rewriter.create<arith::AndIOp>(loc, mask, xferOp.getMask()); 1121 } 1122 1123 rewriter.modifyOpInPlace(xferOp, [&]() { 1124 xferOp.getMaskMutable().assign(mask); 1125 xferOp.setInBoundsAttr(rewriter.getBoolArrayAttr({true})); 1126 }); 1127 1128 return success(); 1129 } 1130 1131 private: 1132 const bool force32BitVectorIndices; 1133 }; 1134 1135 /// Conversion pattern for a `vector.create_mask` (0-D and 1-D only). 1136 class VectorCreateMaskOpConversion 1137 : public OpRewritePattern<vector::CreateMaskOp> { 1138 public: 1139 explicit VectorCreateMaskOpConversion(MLIRContext *context, 1140 bool enableIndexOpt, 1141 PatternBenefit benefit = 1) 1142 : mlir::OpRewritePattern<vector::CreateMaskOp>(context, benefit), 1143 force32BitVectorIndices(enableIndexOpt) {} 1144 1145 LogicalResult matchAndRewrite(vector::CreateMaskOp op, 1146 PatternRewriter &rewriter) const override { 1147 auto dstType = op.getType(); 1148 if (cast<VectorType>(dstType).isScalable()) 1149 return failure(); 1150 int64_t rank = dstType.getRank(); 1151 if (rank > 1) 1152 return failure(); 1153 rewriter.replaceOp( 1154 op, buildVectorComparison(rewriter, op, force32BitVectorIndices, 1155 rank == 0 ? 0 : dstType.getDimSize(0), 1156 op.getOperand(0))); 1157 return success(); 1158 } 1159 1160 private: 1161 const bool force32BitVectorIndices; 1162 }; 1163 1164 /// Returns true if all the `i1` elements of `constantOp` are set to `value`. 1165 static bool allI1ConstantValuesSetTo(arith::ConstantOp constantOp, bool value) { 1166 auto denseAttr = dyn_cast<DenseIntElementsAttr>(constantOp.getValue()); 1167 // TODO: Support non-dense constant. 1168 if (!denseAttr) 1169 return false; 1170 1171 assert(denseAttr.getElementType().isInteger(1) && "Unexpected type"); 1172 return denseAttr.isSplat() && denseAttr.getSplatValue<bool>() == value; 1173 } 1174 1175 /// Folds a select operation between an all-true and all-false vector. For now, 1176 /// only single element vectors (i.e., vector<1xi1>) are supported. That is: 1177 /// 1178 /// %true = arith.constant dense<true> : vector<1xi1> 1179 /// %false = arith.constant dense<false> : vector<1xi1> 1180 /// %result = arith.select %cond, %true, %false : i1, vector<1xi1> 1181 /// => 1182 /// %result = vector.broadcast %cond : i1 to vector<1xi1> 1183 /// 1184 /// InstCombine seems to handle vectors with multiple elements but not the 1185 /// single element ones. 1186 struct FoldI1Select : public OpRewritePattern<arith::SelectOp> { 1187 using OpRewritePattern<arith::SelectOp>::OpRewritePattern; 1188 1189 LogicalResult matchAndRewrite(arith::SelectOp selectOp, 1190 PatternRewriter &rewriter) const override { 1191 auto vecType = dyn_cast<VectorType>(selectOp.getType()); 1192 if (!vecType || !vecType.getElementType().isInteger(1)) 1193 return failure(); 1194 1195 // Only scalar conditions can be folded. 1196 Value cond = selectOp.getCondition(); 1197 if (isa<VectorType>(cond.getType())) 1198 return failure(); 1199 1200 // TODO: Support n-D and scalable vectors. 1201 if (vecType.getRank() != 1 || vecType.isScalable()) 1202 return failure(); 1203 1204 // TODO: Support vectors with multiple elements. 1205 if (vecType.getShape()[0] != 1) 1206 return failure(); 1207 1208 auto trueConst = selectOp.getTrueValue().getDefiningOp<arith::ConstantOp>(); 1209 if (!trueConst || !allI1ConstantValuesSetTo(trueConst, true)) 1210 return failure(); 1211 1212 auto falseConst = 1213 selectOp.getFalseValue().getDefiningOp<arith::ConstantOp>(); 1214 if (!falseConst || !allI1ConstantValuesSetTo(falseConst, false)) 1215 return failure(); 1216 1217 // Replace select with its condition broadcasted to single element vector. 1218 auto elemType = rewriter.getIntegerType(vecType.getNumElements()); 1219 auto bcastType = VectorType::get(/*shape=*/{1}, elemType); 1220 rewriter.replaceOpWithNewOp<vector::BroadcastOp>(selectOp, bcastType, cond); 1221 return success(); 1222 } 1223 }; 1224 1225 /// Returns the number of dims can be folded away from transfer ops. It returns 1226 /// a failure if it can not determine the number of dims to be folded. 1227 /// 1228 /// Ex 1: returns "2" if `srcType` is memref<512x16x1x1xf32> and 1229 /// `vectorType` is vector<16x16x1x1xf32> 1230 /// (there two inner most dims can be dropped by memref.subview ops) 1231 /// 1232 /// Ex 2: returns "1" if `srcType` is memref<512x16x1x1xf32> with 1233 /// [8192, 16, 8, 1] strides and `vectorType` is vector<16x16x1x1xf32> 1234 /// (only the inner most unit dim of `srcType` can be dropped) 1235 /// 1236 /// Ex 3: return "0" if `srcType` is memref<512x16x1x1xf32> and 1237 /// `vectorType` is vector<16x16x1x[1]xf32> 1238 /// (the most inner dim in `vectorType` is not a unit dim (it's a "scalable 1239 /// unit") 1240 static FailureOr<size_t> 1241 getTransferFoldableInnerUnitDims(MemRefType srcType, VectorType vectorType) { 1242 SmallVector<int64_t> srcStrides; 1243 int64_t srcOffset; 1244 if (failed(getStridesAndOffset(srcType, srcStrides, srcOffset))) 1245 return failure(); 1246 1247 auto isUnitDim = [](VectorType type, int dim) { 1248 return type.getDimSize(dim) == 1 && !type.getScalableDims()[dim]; 1249 }; 1250 1251 // According to vector.transfer_read/write semantics, the vector can be a 1252 // slice. Thus, we have to offset the check index with `rankDiff` in 1253 // `srcStrides` and source dim sizes. 1254 size_t result = 0; 1255 int rankDiff = srcType.getRank() - vectorType.getRank(); 1256 for (int64_t i = 0, e = vectorType.getRank(); i < e; ++i) { 1257 // Check that the inner dim size is 1 for both memref type and vector slice. 1258 // It can be folded only if they are 1 and the stride is 1. 1259 int dim = vectorType.getRank() - i - 1; 1260 if (srcStrides[dim + rankDiff] != 1 || 1261 srcType.getDimSize(dim + rankDiff) != 1 || !isUnitDim(vectorType, dim)) 1262 break; 1263 result++; 1264 } 1265 return result; 1266 } 1267 1268 /// Drop inner most contiguous unit dimensions from transfer_read operand. 1269 class DropInnerMostUnitDimsTransferRead 1270 : public OpRewritePattern<vector::TransferReadOp> { 1271 using OpRewritePattern::OpRewritePattern; 1272 1273 LogicalResult matchAndRewrite(vector::TransferReadOp readOp, 1274 PatternRewriter &rewriter) const override { 1275 // TODO: support 0-d corner case. 1276 if (readOp.getTransferRank() == 0) 1277 return failure(); 1278 1279 // TODO: support mask. 1280 if (readOp.getMask()) 1281 return failure(); 1282 1283 auto srcType = dyn_cast<MemRefType>(readOp.getSource().getType()); 1284 if (!srcType) 1285 return failure(); 1286 1287 if (!readOp.getPermutationMap().isMinorIdentity()) 1288 return failure(); 1289 1290 auto targetType = readOp.getVectorType(); 1291 if (targetType.getRank() <= 1) 1292 return failure(); 1293 1294 FailureOr<size_t> maybeDimsToDrop = 1295 getTransferFoldableInnerUnitDims(srcType, targetType); 1296 if (failed(maybeDimsToDrop)) 1297 return failure(); 1298 1299 size_t dimsToDrop = maybeDimsToDrop.value(); 1300 if (dimsToDrop == 0) 1301 return failure(); 1302 1303 // Make sure that the indices to be dropped are equal 0. 1304 // TODO: Deal with cases when the indices are not 0. 1305 if (!llvm::all_of(readOp.getIndices().take_back(dimsToDrop), isZeroIndex)) 1306 return failure(); 1307 1308 auto resultTargetVecType = 1309 VectorType::get(targetType.getShape().drop_back(dimsToDrop), 1310 targetType.getElementType(), 1311 targetType.getScalableDims().drop_back(dimsToDrop)); 1312 1313 auto loc = readOp.getLoc(); 1314 SmallVector<OpFoldResult> sizes = 1315 memref::getMixedSizes(rewriter, loc, readOp.getSource()); 1316 SmallVector<OpFoldResult> offsets(srcType.getRank(), 1317 rewriter.getIndexAttr(0)); 1318 SmallVector<OpFoldResult> strides(srcType.getRank(), 1319 rewriter.getIndexAttr(1)); 1320 auto resultMemrefType = 1321 cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType( 1322 srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes, 1323 strides)); 1324 ArrayAttr inBoundsAttr = 1325 readOp.getInBounds() 1326 ? rewriter.getArrayAttr( 1327 readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop)) 1328 : ArrayAttr(); 1329 Value rankedReducedView = rewriter.create<memref::SubViewOp>( 1330 loc, resultMemrefType, readOp.getSource(), offsets, sizes, strides); 1331 auto permMap = getTransferMinorIdentityMap( 1332 cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType); 1333 Value result = rewriter.create<vector::TransferReadOp>( 1334 loc, resultTargetVecType, rankedReducedView, 1335 readOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap), 1336 readOp.getPadding(), 1337 // TODO: support mask. 1338 /*mask=*/Value(), inBoundsAttr); 1339 rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(readOp, targetType, 1340 result); 1341 return success(); 1342 } 1343 }; 1344 1345 /// Drop inner most contiguous unit dimensions from transfer_write operand. 1346 /// E.g., 1347 /// vector.transfer_write %arg1, %arg0[%c0, %arg2, %c0, %c0, %c0] 1348 /// {in_bounds = [true, true, true, true, true]} 1349 /// : vector<1x16x16x1x1xf32>, memref<1x512x16x1x1xf32> 1350 /// 1351 /// will be replaced with 1352 /// 1353 /// %subview = memref.subview %arg0 1354 /// [0, 0, 0, 0, 0] [1, 512, 16, 1, 1] [1, 1, 1, 1, 1] 1355 /// : memref<1x512x16x1x1xf32> to memref<1x512x16xf32> 1356 /// %0 = vector.shape_cast %arg1 : vector<1x16x16x1x1xf32> 1357 /// to vector<1x16x16xf32> 1358 /// vector.transfer_write %0, %subview[%c0, %arg2, %c0] 1359 /// {in_bounds = [true, true, true]} 1360 /// : vector<1x16x16xf32>, memref<1x512x16xf32> 1361 /// 1362 /// Note, this pattern will not collapse "scalable unit" dims (i.e. `[1]`). 1363 class DropInnerMostUnitDimsTransferWrite 1364 : public OpRewritePattern<vector::TransferWriteOp> { 1365 using OpRewritePattern::OpRewritePattern; 1366 1367 LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp, 1368 PatternRewriter &rewriter) const override { 1369 // TODO: support 0-d corner case. 1370 if (writeOp.getTransferRank() == 0) 1371 return failure(); 1372 1373 // TODO: support mask. 1374 if (writeOp.getMask()) 1375 return failure(); 1376 1377 auto srcType = dyn_cast<MemRefType>(writeOp.getSource().getType()); 1378 if (!srcType) 1379 return failure(); 1380 1381 if (!writeOp.getPermutationMap().isMinorIdentity()) 1382 return failure(); 1383 1384 auto targetType = writeOp.getVectorType(); 1385 if (targetType.getRank() <= 1) 1386 return failure(); 1387 1388 FailureOr<size_t> maybeDimsToDrop = 1389 getTransferFoldableInnerUnitDims(srcType, targetType); 1390 if (failed(maybeDimsToDrop)) 1391 return failure(); 1392 1393 size_t dimsToDrop = maybeDimsToDrop.value(); 1394 if (dimsToDrop == 0) 1395 return failure(); 1396 1397 auto resultTargetVecType = 1398 VectorType::get(targetType.getShape().drop_back(dimsToDrop), 1399 targetType.getElementType(), 1400 targetType.getScalableDims().drop_back(dimsToDrop)); 1401 1402 Location loc = writeOp.getLoc(); 1403 SmallVector<OpFoldResult> sizes = 1404 memref::getMixedSizes(rewriter, loc, writeOp.getSource()); 1405 SmallVector<OpFoldResult> offsets(srcType.getRank(), 1406 rewriter.getIndexAttr(0)); 1407 SmallVector<OpFoldResult> strides(srcType.getRank(), 1408 rewriter.getIndexAttr(1)); 1409 auto resultMemrefType = 1410 cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType( 1411 srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes, 1412 strides)); 1413 ArrayAttr inBoundsAttr = 1414 writeOp.getInBounds() 1415 ? rewriter.getArrayAttr( 1416 writeOp.getInBoundsAttr().getValue().drop_back(dimsToDrop)) 1417 : ArrayAttr(); 1418 1419 Value rankedReducedView = rewriter.create<memref::SubViewOp>( 1420 loc, resultMemrefType, writeOp.getSource(), offsets, sizes, strides); 1421 auto permMap = getTransferMinorIdentityMap( 1422 cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType); 1423 1424 auto shapeCast = rewriter.createOrFold<vector::ShapeCastOp>( 1425 loc, resultTargetVecType, writeOp.getVector()); 1426 rewriter.replaceOpWithNewOp<vector::TransferWriteOp>( 1427 writeOp, shapeCast, rankedReducedView, 1428 writeOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap), 1429 // TODO: support mask. 1430 /*mask=*/Value(), inBoundsAttr); 1431 return success(); 1432 } 1433 }; 1434 1435 /// Canonicalization of a `vector.contraction %a, %b, %c` with row-major matmul 1436 /// semantics to a contraction suitable for MMT (matrix matrix multiplication 1437 /// with the RHS transposed) lowering. 1438 struct CanonicalizeContractMatmulToMMT final 1439 : OpRewritePattern<vector::ContractionOp> { 1440 using OpRewritePattern::OpRewritePattern; 1441 1442 using FilterConstraintType = 1443 std::function<LogicalResult(vector::ContractionOp op)>; 1444 1445 CanonicalizeContractMatmulToMMT(MLIRContext *context, PatternBenefit benefit, 1446 FilterConstraintType constraint) 1447 : OpRewritePattern<vector::ContractionOp>(context, benefit), 1448 filter(std::move(constraint)) {} 1449 1450 LogicalResult matchAndRewrite(vector::ContractionOp op, 1451 PatternRewriter &rewriter) const override { 1452 if (failed(filter(op))) 1453 return failure(); 1454 1455 Location loc = op.getLoc(); 1456 Value lhs = op.getLhs(); 1457 Value rhs = op.getRhs(); 1458 Value res = op.getAcc(); 1459 1460 // Set up the parallel/reduction structure in right form. 1461 using MapList = ArrayRef<ArrayRef<AffineExpr>>; 1462 auto infer = [&](MapList m) { 1463 return AffineMap::inferFromExprList(m, op.getContext()); 1464 }; 1465 AffineExpr m; 1466 AffineExpr n; 1467 AffineExpr k; 1468 bindDims(rewriter.getContext(), m, n, k); 1469 static constexpr std::array<int64_t, 2> perm = {1, 0}; 1470 auto iteratorTypes = op.getIteratorTypes().getValue(); 1471 SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray(); 1472 if (iteratorTypes.size() != 3 || 1473 !vector::isParallelIterator(iteratorTypes[0]) || 1474 !vector::isParallelIterator(iteratorTypes[1]) || 1475 !vector::isReductionIterator(iteratorTypes[2])) 1476 return rewriter.notifyMatchFailure(op, "contraction is not a gemm"); 1477 1478 // The canonical form is "TNT" = A row-major, B col-major, C row-major. 1479 const auto canonicalForm = infer({{m, k}, {n, k}, {m, n}}); 1480 if (maps == canonicalForm) 1481 return rewriter.notifyMatchFailure(op, "already in the canonical form"); 1482 1483 // Create a vector transpose making sure to emit zero/sign-extend at the 1484 // end. 1485 auto createTranspose = [&rewriter, loc](Value mat) -> Value { 1486 if (auto sext = mat.getDefiningOp<arith::ExtSIOp>()) { 1487 Value trans = 1488 rewriter.create<vector::TransposeOp>(loc, sext.getIn(), perm); 1489 VectorType newType = 1490 cast<VectorType>(trans.getType()) 1491 .clone(cast<VectorType>(mat.getType()).getElementType()); 1492 return rewriter.create<arith::ExtSIOp>(loc, newType, trans); 1493 } 1494 if (auto zext = mat.getDefiningOp<arith::ExtUIOp>()) { 1495 Value trans = 1496 rewriter.create<vector::TransposeOp>(loc, zext.getIn(), perm); 1497 VectorType newType = 1498 VectorType::get(cast<VectorType>(trans.getType()).getShape(), 1499 cast<VectorType>(mat.getType()).getElementType()); 1500 return rewriter.create<arith::ExtUIOp>(loc, newType, trans); 1501 } 1502 return rewriter.create<vector::TransposeOp>(loc, mat, perm); 1503 }; 1504 1505 if (maps == infer({{m, k}, {k, n}, {m, n}})) { 1506 rhs = createTranspose(rhs); 1507 } else if (maps == infer({{k, m}, {n, k}, {m, n}})) { 1508 lhs = createTranspose(lhs); 1509 } else if (maps == infer({{k, m}, {k, n}, {m, n}})) { 1510 rhs = createTranspose(rhs); 1511 lhs = createTranspose(lhs); 1512 } else if (maps == infer({{k, m}, {k, n}, {n, m}})) { 1513 std::swap(rhs, lhs); 1514 rhs = createTranspose(rhs); 1515 lhs = createTranspose(lhs); 1516 } else if (maps == infer({{k, m}, {n, k}, {n, m}})) { 1517 std::swap(rhs, lhs); 1518 rhs = createTranspose(rhs); 1519 } else if (maps == infer({{m, k}, {k, n}, {n, m}})) { 1520 std::swap(lhs, rhs); 1521 lhs = createTranspose(lhs); 1522 } else if (maps == infer({{m, k}, {n, k}, {n, m}})) { 1523 std::swap(lhs, rhs); 1524 } else { 1525 return rewriter.notifyMatchFailure(op, "unhandled contraction form"); 1526 } 1527 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 1528 op, lhs, rhs, res, rewriter.getAffineMapArrayAttr(canonicalForm), 1529 op.getIteratorTypes()); 1530 return success(); 1531 }; 1532 1533 private: 1534 FilterConstraintType filter; 1535 }; 1536 1537 /// Pattern to fold arithmetic extensions on floating point data types into 1538 /// vector contraction operations. linalg.matmul introduces arithmetic 1539 /// extensions on its operands. Please mlir snippets below for more details. 1540 /// ```mlir 1541 /// "linalg.matmul"(%lhs, %rhs, %acc) ({ 1542 /// ^bb0(%arg1: f16, %arg2: f16, %arg3: f32): 1543 /// %lhs_f32 = "arith.extf"(%arg1) : (f16) -> f32 1544 /// %rhs_f32 = "arith.extf"(%arg2) : (f16) -> f32 1545 /// %mul = "arith.mulf"(%lhs_f32, %rhs_f32) : (f32, f32) -> f32 1546 /// %acc = "arith.addf"(%arg3, %mul) : (f32, f32) -> f32 1547 /// "linalg.yield"(%acc) : (f32) -> () 1548 /// }) 1549 /// ``` 1550 /// This restricts the native usage of mixed precision NVIDIA Ampere Tensor 1551 /// Cores, i.e, `mma.sync.*.f32.f16.f16.f32` and `mma.sync.*.f32.bf16.bf16.f32`. 1552 /// This pattern folds the arithmetic extensions into the vector contraction and 1553 /// enables the usage of native mixed precision Tensor Core instructions. 1554 template <typename ExtOp> 1555 struct FoldArithExtIntoContractionOp 1556 : public OpRewritePattern<vector::ContractionOp> { 1557 using OpRewritePattern::OpRewritePattern; 1558 1559 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 1560 PatternRewriter &rewriter) const override { 1561 1562 auto lhsDefOp = contractOp.getLhs().getDefiningOp<ExtOp>(); 1563 auto rhsDefOp = contractOp.getRhs().getDefiningOp<ExtOp>(); 1564 1565 if (!lhsDefOp || !rhsDefOp) { 1566 return rewriter.notifyMatchFailure(contractOp, 1567 "no defining op on contract operands"); 1568 } 1569 1570 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 1571 contractOp, lhsDefOp->getOperand(0), rhsDefOp->getOperand(0), 1572 contractOp.getAcc(), contractOp.getIndexingMapsAttr(), 1573 contractOp.getIteratorTypesAttr()); 1574 1575 return success(); 1576 } 1577 }; 1578 1579 /// Pattern to fold chained reduction to a series of vector additions and a 1580 /// final reduction. This form should require fewer subgroup operations. 1581 /// 1582 /// ```mlir 1583 /// %a = vector.reduction <add> %x, %acc 1584 /// %b = vector.reduction <add> %y, %a 1585 /// ==> 1586 /// %a = arith.addf %x, %y 1587 /// %b = vector.reduction <add> %a, %acc 1588 /// ``` 1589 struct ChainedReduction final : OpRewritePattern<vector::ReductionOp> { 1590 using OpRewritePattern::OpRewritePattern; 1591 1592 LogicalResult matchAndRewrite(vector::ReductionOp op, 1593 PatternRewriter &rewriter) const override { 1594 // TODO: Handle other combining kinds. 1595 if (op.getKind() != vector::CombiningKind::ADD) 1596 return failure(); 1597 1598 // Accumulator is optional. 1599 Value acc = op.getAcc(); 1600 if (!acc) 1601 return failure(); 1602 1603 if (!acc.getType().isIntOrFloat()) 1604 return failure(); 1605 1606 auto parentReduction = acc.getDefiningOp<vector::ReductionOp>(); 1607 if (!parentReduction) 1608 return failure(); 1609 1610 Location loc = op.getLoc(); 1611 Value vAdd; 1612 if (isa<IntegerType>(acc.getType())) { 1613 vAdd = rewriter.createOrFold<arith::AddIOp>( 1614 loc, parentReduction.getVector(), op.getVector()); 1615 } else { 1616 vAdd = rewriter.create<arith::AddFOp>(loc, parentReduction.getVector(), 1617 op.getVector()); 1618 } 1619 rewriter.replaceOpWithNewOp<vector::ReductionOp>(op, op.getKind(), vAdd, 1620 parentReduction.getAcc()); 1621 return success(); 1622 } 1623 }; 1624 1625 /// For vectors with either leading or trailing unit dim, replaces: 1626 /// elementwise(a, b) 1627 /// with: 1628 /// sc_a = shape_cast(a) 1629 /// sc_b = shape_cast(b) 1630 /// res = elementwise(sc_a, sc_b) 1631 /// return shape_cast(res) 1632 /// The newly inserted shape_cast Ops fold (before elementwise Op) and then 1633 /// restore (after elementwise Op) the unit dim. Vectors `a` and `b` are 1634 /// required to be rank > 1. 1635 /// 1636 /// Ex: 1637 /// ``` 1638 /// %mul = arith.mulf %B_row, %A_row : vector<1x[4]xf32> 1639 /// %cast = vector.shape_cast %mul : vector<1x[4]xf32> to vector<[4]xf32> 1640 /// ``` 1641 /// 1642 /// gets converted to: 1643 /// 1644 /// ``` 1645 /// %B_row_sc = vector.shape_cast %B_row : vector<1x[4]xf32> to vector<[4]xf32> 1646 /// %A_row_sc = vector.shape_cast %A_row : vector<1x[4]xf32> to vector<[4]xf32> 1647 /// %mul = arith.mulf %B_row_sc, %A_row_sc : vector<[4]xf32> 1648 /// %cast_new = vector.shape_cast %mul : vector<[4]xf32> to vector<1x[4]xf32> 1649 /// %cast = vector.shape_cast %cast_new : vector<1x[4]xf32> to vector<[4]xf32> 1650 /// ``` 1651 /// 1652 /// Patterns for folding shape_casts should instantly eliminate `%cast_new` and 1653 /// `%cast`. 1654 struct DropUnitDimFromElementwiseOps final 1655 : public OpTraitRewritePattern<OpTrait::Elementwise> { 1656 using OpTraitRewritePattern::OpTraitRewritePattern; 1657 LogicalResult matchAndRewrite(Operation *op, 1658 PatternRewriter &rewriter) const override { 1659 if (op->getNumResults() != 1 || op->getNumRegions() != 0) 1660 return failure(); 1661 1662 auto resultVectorType = dyn_cast<VectorType>(op->getResult(0).getType()); 1663 if (!resultVectorType) 1664 return failure(); 1665 1666 // Check the operand pre-conditions. For `Elementwise` ops all operands are 1667 // guaranteed to have identical shapes (with some exceptions such as 1668 // `arith.select`) and it suffices to only check one of them. 1669 auto sourceVectorType = dyn_cast<VectorType>(op->getOperand(0).getType()); 1670 if (!sourceVectorType) 1671 return failure(); 1672 if (sourceVectorType.getRank() < 2) 1673 return failure(); 1674 1675 bool hasTrailingDimUnitFixed = 1676 ((sourceVectorType.getShape().back() == 1) && 1677 (!sourceVectorType.getScalableDims().back())); 1678 bool hasLeadingDimUnitFixed = 1679 ((sourceVectorType.getShape().front() == 1) && 1680 (!sourceVectorType.getScalableDims().front())); 1681 if (!hasLeadingDimUnitFixed && !hasTrailingDimUnitFixed) 1682 return failure(); 1683 1684 // Drop leading/trailing unit dim by applying vector.shape_cast to all 1685 // operands 1686 int64_t dim = hasLeadingDimUnitFixed ? 0 : sourceVectorType.getRank() - 1; 1687 1688 SmallVector<Value> newOperands; 1689 auto loc = op->getLoc(); 1690 for (auto operand : op->getOperands()) { 1691 auto opVectorType = cast<VectorType>(operand.getType()); 1692 VectorType newVType = VectorType::Builder(opVectorType).dropDim(dim); 1693 auto opSC = rewriter.create<vector::ShapeCastOp>(loc, newVType, operand); 1694 newOperands.push_back(opSC); 1695 } 1696 1697 VectorType newResultVectorType = 1698 VectorType::Builder(resultVectorType).dropDim(dim); 1699 // Create an updated elementwise Op without leading/trailing unit dim 1700 Operation *elementwiseOp = 1701 rewriter.create(loc, op->getName().getIdentifier(), newOperands, 1702 newResultVectorType, op->getAttrs()); 1703 1704 // Restore the leading/trailing unit dim by applying vector.shape_cast 1705 // to the result 1706 rewriter.replaceOpWithNewOp<ShapeCastOp>(op, resultVectorType, 1707 elementwiseOp->getResult(0)); 1708 1709 return success(); 1710 } 1711 }; 1712 1713 /// Pattern to eliminate redundant zero-constants added to reduction operands. 1714 /// It's enough for there to be one initial zero value, so we can eliminate the 1715 /// extra ones that feed into `vector.reduction <add>`. These get created by the 1716 /// `ChainedReduction` pattern. 1717 /// 1718 /// ```mlir 1719 /// %a = arith.addf %x, %zero 1720 /// %b = arith.addf %a, %y 1721 /// %c = vector.reduction <add> %b, %acc 1722 /// ==> 1723 /// %b = arith.addf %a, %y 1724 /// %c = vector.reduction <add> %b, %acc 1725 /// ``` 1726 struct ReduceRedundantZero final : OpRewritePattern<vector::ReductionOp> { 1727 using OpRewritePattern::OpRewritePattern; 1728 1729 LogicalResult matchAndRewrite(vector::ReductionOp op, 1730 PatternRewriter &rewriter) const override { 1731 // TODO: Handle other reduction kinds and their identity values. 1732 if (op.getKind() != vector::CombiningKind::ADD) 1733 return failure(); 1734 1735 Type elemType = op.getSourceVectorType().getElementType(); 1736 // The integer case should be handled by `arith.addi` folders, only check 1737 // for floats here. 1738 if (!isa<FloatType>(elemType)) 1739 return failure(); 1740 1741 auto vAdd = op.getVector().getDefiningOp<arith::AddFOp>(); 1742 if (!vAdd) 1743 return failure(); 1744 auto addLhs = vAdd.getLhs().getDefiningOp<arith::AddFOp>(); 1745 if (!addLhs) 1746 return failure(); 1747 1748 if (!matchPattern(addLhs.getRhs(), m_AnyZeroFloat())) 1749 return failure(); 1750 1751 auto newAdd = rewriter.create<arith::AddFOp>(vAdd.getLoc(), addLhs.getLhs(), 1752 vAdd.getRhs()); 1753 rewriter.replaceOpWithNewOp<vector::ReductionOp>(op, op.getKind(), newAdd, 1754 op.getAcc()); 1755 return success(); 1756 } 1757 }; 1758 1759 /// Example: 1760 /// ``` 1761 /// %a = vector.reduction <add> %x : vector<2xf32> into f32 1762 /// ``` 1763 /// is transformed into: 1764 /// ``` 1765 /// %y = vector.extract %x[0] : f32 from vector<2xf32> 1766 /// %z = vector.extract %x[1] : f32 from vector<2xf32> 1767 /// %a = arith.addf %y, %z : f32 1768 /// ``` 1769 struct BreakDownVectorReduction final : OpRewritePattern<vector::ReductionOp> { 1770 BreakDownVectorReduction(MLIRContext *context, 1771 unsigned maxNumElementsToExtract, 1772 PatternBenefit benefit) 1773 : OpRewritePattern(context, benefit), 1774 maxNumElementsToExtract(maxNumElementsToExtract) {} 1775 1776 LogicalResult matchAndRewrite(vector::ReductionOp op, 1777 PatternRewriter &rewriter) const override { 1778 VectorType type = op.getSourceVectorType(); 1779 if (type.isScalable() || op.isMasked()) 1780 return failure(); 1781 assert(type.getRank() == 1 && "Expected a 1-d vector"); 1782 1783 int64_t numElems = type.getNumElements(); 1784 if (numElems > maxNumElementsToExtract) { 1785 return rewriter.notifyMatchFailure( 1786 op, llvm::formatv("has too many vector elements ({0}) to break down " 1787 "(max allowed: {1})", 1788 numElems, maxNumElementsToExtract)); 1789 } 1790 1791 Location loc = op.getLoc(); 1792 SmallVector<Value> extracted(numElems, nullptr); 1793 for (auto [idx, extractedElem] : llvm::enumerate(extracted)) 1794 extractedElem = rewriter.create<vector::ExtractOp>( 1795 loc, op.getVector(), static_cast<int64_t>(idx)); 1796 1797 Value res = extracted.front(); 1798 for (auto extractedElem : llvm::drop_begin(extracted)) 1799 res = vector::makeArithReduction(rewriter, loc, op.getKind(), res, 1800 extractedElem, op.getFastmathAttr()); 1801 if (Value acc = op.getAcc()) 1802 res = vector::makeArithReduction(rewriter, loc, op.getKind(), res, acc, 1803 op.getFastmathAttr()); 1804 1805 rewriter.replaceOp(op, res); 1806 return success(); 1807 } 1808 1809 private: 1810 unsigned maxNumElementsToExtract = 0; 1811 }; 1812 1813 /// Fold `mulf(tr(broadcast(A)), broadcast(B))` into `vector.outerproduct(A, 1814 /// B)`. 1815 /// Example: 1816 /// %lhsBcast = vector.broadcast %lhs : vector<4xi32> to vector<4x4xi32> 1817 /// %lhsT = vector.transpose %lhsBcast, [1, 0] : vector<4x4xi32> to 1818 /// vector<4x4xi32> %rhsBcast = vector.broadcast %rhs : vector<4xi32> to 1819 /// vector<4x4xi32> %mul = arith.muli %lhsT, %rhsBcast : vector<4x4xi32> 1820 /// 1821 /// Becomes : 1822 /// 1823 /// %res = vector.outerproduct %lhs, %rhs : vector<4xi32>, vector<4xi32> 1824 /// 1825 /// Supports only 1D-to-2D broadcasts. The following cases are not supported. 1826 /// %ex1 = vector.broadcast %lhsCast : vector<1x4xf32> to vector<4x4xf32> 1827 /// %ex2 = vector.broadcast %lhsCast : f32 to vector<4x4xf32> 1828 /// %ex3 = vector.broadcast %lhsCast : vector<1x1xf32> to vector<4x4xf32> 1829 template <typename MulOpType> 1830 struct FoldArithToVectorOuterProduct : public OpRewritePattern<MulOpType> { 1831 using OpRewritePattern<MulOpType>::OpRewritePattern; 1832 // Returns whether a vector.broadcast matches requirements for an outerproduct 1833 // pattern. aka a 1D-to-2D broadcastOp without broadcasted unit dimension. 1834 bool isValidBroadcastSource(vector::BroadcastOp broadcastOp) const { 1835 // Fail if it is not a 1-to-2 dimension to broadcast to avoid generating 1836 // shape_casts/broadcasts which does not belong in this pattern. 1837 if (!broadcastOp.computeBroadcastedUnitDims().empty()) 1838 return false; 1839 // Avoid broadcast like f32 or vector<f32> -> ResType 1840 auto srcType = dyn_cast<VectorType>(broadcastOp.getSourceType()); 1841 return srcType && srcType.getRank() != 2; 1842 } 1843 1844 LogicalResult matchAndRewrite(MulOpType mulOp, 1845 PatternRewriter &rewriter) const override { 1846 auto resType = llvm::cast<VectorType>(mulOp.getResult().getType()); 1847 if (!resType) 1848 return failure(); 1849 if (resType.getRank() != 2) 1850 return failure(); 1851 /// If operandA can be written as tr(broadcast(A)) and operandB as 1852 /// broadcast(B) where broadcasts are 1D-to-2D, create and return 1853 /// vector.outerproduct(A, B). Returns failure() otherwise. 1854 auto matchOuterProduct = 1855 [&](Value operandA, 1856 Value operandB) -> FailureOr<vector::OuterProductOp> { 1857 auto transposedLhs = operandA.getDefiningOp<vector::TransposeOp>(); 1858 if (!transposedLhs) 1859 return failure(); 1860 // Fail unless this is a true 2-D matrix transpose. 1861 ArrayRef<int64_t> permutation = transposedLhs.getPermutation(); 1862 if (permutation.size() != 2 || permutation[0] != 1 || permutation[1] != 0) 1863 return failure(); 1864 1865 auto broadcastedLhs = 1866 transposedLhs.getVector().getDefiningOp<vector::BroadcastOp>(); 1867 if (!broadcastedLhs || !isValidBroadcastSource(broadcastedLhs)) 1868 return failure(); 1869 1870 auto broadcastedRhs = operandB.getDefiningOp<vector::BroadcastOp>(); 1871 if (!broadcastedRhs || !isValidBroadcastSource(broadcastedRhs)) 1872 return failure(); 1873 1874 return rewriter.create<vector::OuterProductOp>( 1875 mulOp->getLoc(), resType, broadcastedLhs.getSource(), 1876 broadcastedRhs.getSource(), Value(), vector::CombiningKind::ADD); 1877 }; 1878 1879 Value lhs = mulOp->getOperand(0), rhs = mulOp->getOperand(1); 1880 auto maybeOuterP = matchOuterProduct(lhs, rhs); 1881 // Handle commutativity, the transposed op is the outerproduct LHS. 1882 if (failed(maybeOuterP)) 1883 maybeOuterP = matchOuterProduct(rhs, lhs); 1884 if (failed(maybeOuterP)) 1885 return failure(); 1886 rewriter.replaceOp(mulOp, maybeOuterP->getResult()); 1887 return success(); 1888 } 1889 }; 1890 1891 } // namespace 1892 1893 void mlir::vector::populateFoldArithExtensionPatterns( 1894 RewritePatternSet &patterns) { 1895 patterns.add<FoldArithExtIntoContractionOp<arith::ExtFOp>, 1896 FoldArithExtIntoContractionOp<arith::ExtSIOp>>( 1897 patterns.getContext()); 1898 } 1899 1900 void mlir::vector::populateVectorMaskMaterializationPatterns( 1901 RewritePatternSet &patterns, bool force32BitVectorIndices, 1902 PatternBenefit benefit) { 1903 patterns.add<VectorCreateMaskOpConversion, 1904 MaterializeTransferMask<vector::TransferReadOp>, 1905 MaterializeTransferMask<vector::TransferWriteOp>>( 1906 patterns.getContext(), force32BitVectorIndices, benefit); 1907 patterns.add<FoldI1Select>(patterns.getContext(), benefit); 1908 } 1909 1910 void mlir::vector::populateShapeCastFoldingPatterns(RewritePatternSet &patterns, 1911 PatternBenefit benefit) { 1912 patterns.add<ShapeCastOpFolder>(patterns.getContext(), benefit); 1913 } 1914 1915 void mlir::vector::populateDropUnitDimWithShapeCastPatterns( 1916 RewritePatternSet &patterns, PatternBenefit benefit) { 1917 patterns.add<DropUnitDimFromElementwiseOps, ShapeCastOpFolder>( 1918 patterns.getContext(), benefit); 1919 } 1920 1921 void mlir::vector::populateBubbleVectorBitCastOpPatterns( 1922 RewritePatternSet &patterns, PatternBenefit benefit) { 1923 patterns.add<BubbleDownVectorBitCastForExtract, 1924 BubbleDownBitCastForStridedSliceExtract, 1925 BubbleUpBitCastForInsert, BubbleUpBitCastForStridedSliceInsert>( 1926 patterns.getContext(), benefit); 1927 } 1928 1929 void mlir::vector::populateBreakDownVectorBitCastOpPatterns( 1930 RewritePatternSet &patterns, 1931 std::function<bool(vector::BitCastOp)> controlFn, PatternBenefit benefit) { 1932 patterns.add<BreakDownVectorBitCast>(patterns.getContext(), 1933 std::move(controlFn), benefit); 1934 } 1935 1936 void mlir::vector::populateVectorContractCanonicalizeMatmulToMMT( 1937 RewritePatternSet &patterns, 1938 std::function<LogicalResult(vector::ContractionOp)> constraint, 1939 PatternBenefit benefit) { 1940 patterns.add<CanonicalizeContractMatmulToMMT>(patterns.getContext(), benefit, 1941 std::move(constraint)); 1942 } 1943 1944 void mlir::vector::populateVectorReductionToContractPatterns( 1945 RewritePatternSet &patterns, PatternBenefit benefit) { 1946 patterns.add<MultiReduceToContract, CombineContractBroadcast, 1947 CombineContractABTranspose, CombineContractResultTranspose, 1948 ReorderCastOpsOnBroadcast, ReorderElementwiseOpsOnTranspose>( 1949 patterns.getContext(), benefit); 1950 } 1951 1952 void mlir::vector:: 1953 populateVectorTransferCollapseInnerMostContiguousDimsPatterns( 1954 RewritePatternSet &patterns, PatternBenefit benefit) { 1955 patterns.add<DropInnerMostUnitDimsTransferRead, 1956 DropInnerMostUnitDimsTransferWrite>(patterns.getContext(), 1957 benefit); 1958 } 1959 1960 void mlir::vector::populateSinkVectorBroadcastPatterns( 1961 RewritePatternSet &patterns, PatternBenefit benefit) { 1962 patterns.add<ReorderCastOpsOnBroadcast, ReorderElementwiseOpsOnBroadcast>( 1963 patterns.getContext(), benefit); 1964 } 1965 1966 void mlir::vector::populateChainedVectorReductionFoldingPatterns( 1967 RewritePatternSet &patterns, PatternBenefit benefit) { 1968 patterns.add<ChainedReduction>(patterns.getContext(), benefit); 1969 patterns.add<ReduceRedundantZero>(patterns.getContext(), 1970 PatternBenefit(benefit.getBenefit() + 1)); 1971 } 1972 1973 void mlir::vector::populateBreakDownVectorReductionPatterns( 1974 RewritePatternSet &patterns, unsigned maxNumElementsToExtract, 1975 PatternBenefit benefit) { 1976 patterns.add<BreakDownVectorReduction>(patterns.getContext(), 1977 maxNumElementsToExtract, benefit); 1978 } 1979 1980 void mlir::vector::populateElementwiseToVectorOpsPatterns( 1981 RewritePatternSet &patterns) { 1982 patterns.add<FoldArithToVectorOuterProduct<arith::MulFOp>, 1983 FoldArithToVectorOuterProduct<arith::MulIOp>>( 1984 patterns.getContext()); 1985 } 1986 1987 //===----------------------------------------------------------------------===// 1988 // TableGen'd enum attribute definitions 1989 //===----------------------------------------------------------------------===// 1990 1991 #include "mlir/Dialect/Vector/Transforms/VectorTransformsEnums.cpp.inc" 1992