1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 10 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" 12 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 13 #include "mlir/Dialect/StandardOps/Ops.h" 14 #include "mlir/Dialect/VectorOps/VectorOps.h" 15 #include "mlir/IR/Attributes.h" 16 #include "mlir/IR/Builders.h" 17 #include "mlir/IR/MLIRContext.h" 18 #include "mlir/IR/Module.h" 19 #include "mlir/IR/Operation.h" 20 #include "mlir/IR/PatternMatch.h" 21 #include "mlir/IR/StandardTypes.h" 22 #include "mlir/IR/Types.h" 23 #include "mlir/Pass/Pass.h" 24 #include "mlir/Pass/PassManager.h" 25 #include "mlir/Transforms/DialectConversion.h" 26 #include "mlir/Transforms/Passes.h" 27 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/Allocator.h" 32 #include "llvm/Support/ErrorHandling.h" 33 34 using namespace mlir; 35 using namespace mlir::vector; 36 37 template <typename T> 38 static LLVM::LLVMType getPtrToElementType(T containerType, 39 LLVMTypeConverter &lowering) { 40 return lowering.convertType(containerType.getElementType()) 41 .template cast<LLVM::LLVMType>() 42 .getPointerTo(); 43 } 44 45 // Helper to reduce vector type by one rank at front. 46 static VectorType reducedVectorTypeFront(VectorType tp) { 47 assert((tp.getRank() > 1) && "unlowerable vector type"); 48 return VectorType::get(tp.getShape().drop_front(), tp.getElementType()); 49 } 50 51 // Helper to reduce vector type by *all* but one rank at back. 52 static VectorType reducedVectorTypeBack(VectorType tp) { 53 assert((tp.getRank() > 1) && "unlowerable vector type"); 54 return VectorType::get(tp.getShape().take_back(), tp.getElementType()); 55 } 56 57 // Helper that picks the proper sequence for inserting. 58 static Value insertOne(ConversionPatternRewriter &rewriter, 59 LLVMTypeConverter &lowering, Location loc, Value val1, 60 Value val2, Type llvmType, int64_t rank, int64_t pos) { 61 if (rank == 1) { 62 auto idxType = rewriter.getIndexType(); 63 auto constant = rewriter.create<LLVM::ConstantOp>( 64 loc, lowering.convertType(idxType), 65 rewriter.getIntegerAttr(idxType, pos)); 66 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2, 67 constant); 68 } 69 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2, 70 rewriter.getI64ArrayAttr(pos)); 71 } 72 73 // Helper that picks the proper sequence for inserting. 74 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from, 75 Value into, int64_t offset) { 76 auto vectorType = into.getType().cast<VectorType>(); 77 if (vectorType.getRank() > 1) 78 return rewriter.create<InsertOp>(loc, from, into, offset); 79 return rewriter.create<vector::InsertElementOp>( 80 loc, vectorType, from, into, 81 rewriter.create<ConstantIndexOp>(loc, offset)); 82 } 83 84 // Helper that picks the proper sequence for extracting. 85 static Value extractOne(ConversionPatternRewriter &rewriter, 86 LLVMTypeConverter &lowering, Location loc, Value val, 87 Type llvmType, int64_t rank, int64_t pos) { 88 if (rank == 1) { 89 auto idxType = rewriter.getIndexType(); 90 auto constant = rewriter.create<LLVM::ConstantOp>( 91 loc, lowering.convertType(idxType), 92 rewriter.getIntegerAttr(idxType, pos)); 93 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val, 94 constant); 95 } 96 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val, 97 rewriter.getI64ArrayAttr(pos)); 98 } 99 100 // Helper that picks the proper sequence for extracting. 101 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector, 102 int64_t offset) { 103 auto vectorType = vector.getType().cast<VectorType>(); 104 if (vectorType.getRank() > 1) 105 return rewriter.create<ExtractOp>(loc, vector, offset); 106 return rewriter.create<vector::ExtractElementOp>( 107 loc, vectorType.getElementType(), vector, 108 rewriter.create<ConstantIndexOp>(loc, offset)); 109 } 110 111 // Helper that returns a subset of `arrayAttr` as a vector of int64_t. 112 // TODO(rriddle): Better support for attribute subtype forwarding + slicing. 113 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr, 114 unsigned dropFront = 0, 115 unsigned dropBack = 0) { 116 assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds"); 117 auto range = arrayAttr.getAsRange<IntegerAttr>(); 118 SmallVector<int64_t, 4> res; 119 res.reserve(arrayAttr.size() - dropFront - dropBack); 120 for (auto it = range.begin() + dropFront, eit = range.end() - dropBack; 121 it != eit; ++it) 122 res.push_back((*it).getValue().getSExtValue()); 123 return res; 124 } 125 126 namespace { 127 class VectorBroadcastOpConversion : public LLVMOpLowering { 128 public: 129 explicit VectorBroadcastOpConversion(MLIRContext *context, 130 LLVMTypeConverter &typeConverter) 131 : LLVMOpLowering(vector::BroadcastOp::getOperationName(), context, 132 typeConverter) {} 133 134 PatternMatchResult 135 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 136 ConversionPatternRewriter &rewriter) const override { 137 auto broadcastOp = cast<vector::BroadcastOp>(op); 138 VectorType dstVectorType = broadcastOp.getVectorType(); 139 if (lowering.convertType(dstVectorType) == nullptr) 140 return matchFailure(); 141 // Rewrite when the full vector type can be lowered (which 142 // implies all 'reduced' types can be lowered too). 143 auto adaptor = vector::BroadcastOpOperandAdaptor(operands); 144 VectorType srcVectorType = 145 broadcastOp.getSourceType().dyn_cast<VectorType>(); 146 rewriter.replaceOp( 147 op, expandRanks(adaptor.source(), // source value to be expanded 148 op->getLoc(), // location of original broadcast 149 srcVectorType, dstVectorType, rewriter)); 150 return matchSuccess(); 151 } 152 153 private: 154 // Expands the given source value over all the ranks, as defined 155 // by the source and destination type (a null source type denotes 156 // expansion from a scalar value into a vector). 157 // 158 // TODO(ajcbik): consider replacing this one-pattern lowering 159 // with a two-pattern lowering using other vector 160 // ops once all insert/extract/shuffle operations 161 // are available with lowering implementation. 162 // 163 Value expandRanks(Value value, Location loc, VectorType srcVectorType, 164 VectorType dstVectorType, 165 ConversionPatternRewriter &rewriter) const { 166 assert((dstVectorType != nullptr) && "invalid result type in broadcast"); 167 // Determine rank of source and destination. 168 int64_t srcRank = srcVectorType ? srcVectorType.getRank() : 0; 169 int64_t dstRank = dstVectorType.getRank(); 170 int64_t curDim = dstVectorType.getDimSize(0); 171 if (srcRank < dstRank) 172 // Duplicate this rank. 173 return duplicateOneRank(value, loc, srcVectorType, dstVectorType, dstRank, 174 curDim, rewriter); 175 // If all trailing dimensions are the same, the broadcast consists of 176 // simply passing through the source value and we are done. Otherwise, 177 // any non-matching dimension forces a stretch along this rank. 178 assert((srcVectorType != nullptr) && (srcRank > 0) && 179 (srcRank == dstRank) && "invalid rank in broadcast"); 180 for (int64_t r = 0; r < dstRank; r++) { 181 if (srcVectorType.getDimSize(r) != dstVectorType.getDimSize(r)) { 182 return stretchOneRank(value, loc, srcVectorType, dstVectorType, dstRank, 183 curDim, rewriter); 184 } 185 } 186 return value; 187 } 188 189 // Picks the best way to duplicate a single rank. For the 1-D case, a 190 // single insert-elt/shuffle is the most efficient expansion. For higher 191 // dimensions, however, we need dim x insert-values on a new broadcast 192 // with one less leading dimension, which will be lowered "recursively" 193 // to matching LLVM IR. 194 // For example: 195 // v = broadcast s : f32 to vector<4x2xf32> 196 // becomes: 197 // x = broadcast s : f32 to vector<2xf32> 198 // v = [x,x,x,x] 199 // becomes: 200 // x = [s,s] 201 // v = [x,x,x,x] 202 Value duplicateOneRank(Value value, Location loc, VectorType srcVectorType, 203 VectorType dstVectorType, int64_t rank, int64_t dim, 204 ConversionPatternRewriter &rewriter) const { 205 Type llvmType = lowering.convertType(dstVectorType); 206 assert((llvmType != nullptr) && "unlowerable vector type"); 207 if (rank == 1) { 208 Value undef = rewriter.create<LLVM::UndefOp>(loc, llvmType); 209 Value expand = 210 insertOne(rewriter, lowering, loc, undef, value, llvmType, rank, 0); 211 SmallVector<int32_t, 4> zeroValues(dim, 0); 212 return rewriter.create<LLVM::ShuffleVectorOp>( 213 loc, expand, undef, rewriter.getI32ArrayAttr(zeroValues)); 214 } 215 Value expand = expandRanks(value, loc, srcVectorType, 216 reducedVectorTypeFront(dstVectorType), rewriter); 217 Value result = rewriter.create<LLVM::UndefOp>(loc, llvmType); 218 for (int64_t d = 0; d < dim; ++d) { 219 result = 220 insertOne(rewriter, lowering, loc, result, expand, llvmType, rank, d); 221 } 222 return result; 223 } 224 225 // Picks the best way to stretch a single rank. For the 1-D case, a 226 // single insert-elt/shuffle is the most efficient expansion when at 227 // a stretch. Otherwise, every dimension needs to be expanded 228 // individually and individually inserted in the resulting vector. 229 // For example: 230 // v = broadcast w : vector<4x1x2xf32> to vector<4x2x2xf32> 231 // becomes: 232 // a = broadcast w[0] : vector<1x2xf32> to vector<2x2xf32> 233 // b = broadcast w[1] : vector<1x2xf32> to vector<2x2xf32> 234 // c = broadcast w[2] : vector<1x2xf32> to vector<2x2xf32> 235 // d = broadcast w[3] : vector<1x2xf32> to vector<2x2xf32> 236 // v = [a,b,c,d] 237 // becomes: 238 // x = broadcast w[0][0] : vector<2xf32> to vector <2x2xf32> 239 // y = broadcast w[1][0] : vector<2xf32> to vector <2x2xf32> 240 // a = [x, y] 241 // etc. 242 Value stretchOneRank(Value value, Location loc, VectorType srcVectorType, 243 VectorType dstVectorType, int64_t rank, int64_t dim, 244 ConversionPatternRewriter &rewriter) const { 245 Type llvmType = lowering.convertType(dstVectorType); 246 assert((llvmType != nullptr) && "unlowerable vector type"); 247 Value result = rewriter.create<LLVM::UndefOp>(loc, llvmType); 248 bool atStretch = dim != srcVectorType.getDimSize(0); 249 if (rank == 1) { 250 assert(atStretch); 251 Type redLlvmType = lowering.convertType(dstVectorType.getElementType()); 252 Value one = 253 extractOne(rewriter, lowering, loc, value, redLlvmType, rank, 0); 254 Value expand = 255 insertOne(rewriter, lowering, loc, result, one, llvmType, rank, 0); 256 SmallVector<int32_t, 4> zeroValues(dim, 0); 257 return rewriter.create<LLVM::ShuffleVectorOp>( 258 loc, expand, result, rewriter.getI32ArrayAttr(zeroValues)); 259 } 260 VectorType redSrcType = reducedVectorTypeFront(srcVectorType); 261 VectorType redDstType = reducedVectorTypeFront(dstVectorType); 262 Type redLlvmType = lowering.convertType(redSrcType); 263 for (int64_t d = 0; d < dim; ++d) { 264 int64_t pos = atStretch ? 0 : d; 265 Value one = 266 extractOne(rewriter, lowering, loc, value, redLlvmType, rank, pos); 267 Value expand = expandRanks(one, loc, redSrcType, redDstType, rewriter); 268 result = 269 insertOne(rewriter, lowering, loc, result, expand, llvmType, rank, d); 270 } 271 return result; 272 } 273 }; 274 275 class VectorShuffleOpConversion : public LLVMOpLowering { 276 public: 277 explicit VectorShuffleOpConversion(MLIRContext *context, 278 LLVMTypeConverter &typeConverter) 279 : LLVMOpLowering(vector::ShuffleOp::getOperationName(), context, 280 typeConverter) {} 281 282 PatternMatchResult 283 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 284 ConversionPatternRewriter &rewriter) const override { 285 auto loc = op->getLoc(); 286 auto adaptor = vector::ShuffleOpOperandAdaptor(operands); 287 auto shuffleOp = cast<vector::ShuffleOp>(op); 288 auto v1Type = shuffleOp.getV1VectorType(); 289 auto v2Type = shuffleOp.getV2VectorType(); 290 auto vectorType = shuffleOp.getVectorType(); 291 Type llvmType = lowering.convertType(vectorType); 292 auto maskArrayAttr = shuffleOp.mask(); 293 294 // Bail if result type cannot be lowered. 295 if (!llvmType) 296 return matchFailure(); 297 298 // Get rank and dimension sizes. 299 int64_t rank = vectorType.getRank(); 300 assert(v1Type.getRank() == rank); 301 assert(v2Type.getRank() == rank); 302 int64_t v1Dim = v1Type.getDimSize(0); 303 304 // For rank 1, where both operands have *exactly* the same vector type, 305 // there is direct shuffle support in LLVM. Use it! 306 if (rank == 1 && v1Type == v2Type) { 307 Value shuffle = rewriter.create<LLVM::ShuffleVectorOp>( 308 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); 309 rewriter.replaceOp(op, shuffle); 310 return matchSuccess(); 311 } 312 313 // For all other cases, insert the individual values individually. 314 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType); 315 int64_t insPos = 0; 316 for (auto en : llvm::enumerate(maskArrayAttr)) { 317 int64_t extPos = en.value().cast<IntegerAttr>().getInt(); 318 Value value = adaptor.v1(); 319 if (extPos >= v1Dim) { 320 extPos -= v1Dim; 321 value = adaptor.v2(); 322 } 323 Value extract = 324 extractOne(rewriter, lowering, loc, value, llvmType, rank, extPos); 325 insert = insertOne(rewriter, lowering, loc, insert, extract, llvmType, 326 rank, insPos++); 327 } 328 rewriter.replaceOp(op, insert); 329 return matchSuccess(); 330 } 331 }; 332 333 class VectorExtractElementOpConversion : public LLVMOpLowering { 334 public: 335 explicit VectorExtractElementOpConversion(MLIRContext *context, 336 LLVMTypeConverter &typeConverter) 337 : LLVMOpLowering(vector::ExtractElementOp::getOperationName(), context, 338 typeConverter) {} 339 340 PatternMatchResult 341 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 342 ConversionPatternRewriter &rewriter) const override { 343 auto adaptor = vector::ExtractElementOpOperandAdaptor(operands); 344 auto extractEltOp = cast<vector::ExtractElementOp>(op); 345 auto vectorType = extractEltOp.getVectorType(); 346 auto llvmType = lowering.convertType(vectorType.getElementType()); 347 348 // Bail if result type cannot be lowered. 349 if (!llvmType) 350 return matchFailure(); 351 352 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>( 353 op, llvmType, adaptor.vector(), adaptor.position()); 354 return matchSuccess(); 355 } 356 }; 357 358 class VectorExtractOpConversion : public LLVMOpLowering { 359 public: 360 explicit VectorExtractOpConversion(MLIRContext *context, 361 LLVMTypeConverter &typeConverter) 362 : LLVMOpLowering(vector::ExtractOp::getOperationName(), context, 363 typeConverter) {} 364 365 PatternMatchResult 366 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 367 ConversionPatternRewriter &rewriter) const override { 368 auto loc = op->getLoc(); 369 auto adaptor = vector::ExtractOpOperandAdaptor(operands); 370 auto extractOp = cast<vector::ExtractOp>(op); 371 auto vectorType = extractOp.getVectorType(); 372 auto resultType = extractOp.getResult().getType(); 373 auto llvmResultType = lowering.convertType(resultType); 374 auto positionArrayAttr = extractOp.position(); 375 376 // Bail if result type cannot be lowered. 377 if (!llvmResultType) 378 return matchFailure(); 379 380 // One-shot extraction of vector from array (only requires extractvalue). 381 if (resultType.isa<VectorType>()) { 382 Value extracted = rewriter.create<LLVM::ExtractValueOp>( 383 loc, llvmResultType, adaptor.vector(), positionArrayAttr); 384 rewriter.replaceOp(op, extracted); 385 return matchSuccess(); 386 } 387 388 // Potential extraction of 1-D vector from array. 389 auto *context = op->getContext(); 390 Value extracted = adaptor.vector(); 391 auto positionAttrs = positionArrayAttr.getValue(); 392 if (positionAttrs.size() > 1) { 393 auto oneDVectorType = reducedVectorTypeBack(vectorType); 394 auto nMinusOnePositionAttrs = 395 ArrayAttr::get(positionAttrs.drop_back(), context); 396 extracted = rewriter.create<LLVM::ExtractValueOp>( 397 loc, lowering.convertType(oneDVectorType), extracted, 398 nMinusOnePositionAttrs); 399 } 400 401 // Remaining extraction of element from 1-D LLVM vector 402 auto position = positionAttrs.back().cast<IntegerAttr>(); 403 auto i64Type = LLVM::LLVMType::getInt64Ty(lowering.getDialect()); 404 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 405 extracted = 406 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant); 407 rewriter.replaceOp(op, extracted); 408 409 return matchSuccess(); 410 } 411 }; 412 413 class VectorInsertElementOpConversion : public LLVMOpLowering { 414 public: 415 explicit VectorInsertElementOpConversion(MLIRContext *context, 416 LLVMTypeConverter &typeConverter) 417 : LLVMOpLowering(vector::InsertElementOp::getOperationName(), context, 418 typeConverter) {} 419 420 PatternMatchResult 421 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 422 ConversionPatternRewriter &rewriter) const override { 423 auto adaptor = vector::InsertElementOpOperandAdaptor(operands); 424 auto insertEltOp = cast<vector::InsertElementOp>(op); 425 auto vectorType = insertEltOp.getDestVectorType(); 426 auto llvmType = lowering.convertType(vectorType); 427 428 // Bail if result type cannot be lowered. 429 if (!llvmType) 430 return matchFailure(); 431 432 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>( 433 op, llvmType, adaptor.dest(), adaptor.source(), adaptor.position()); 434 return matchSuccess(); 435 } 436 }; 437 438 class VectorInsertOpConversion : public LLVMOpLowering { 439 public: 440 explicit VectorInsertOpConversion(MLIRContext *context, 441 LLVMTypeConverter &typeConverter) 442 : LLVMOpLowering(vector::InsertOp::getOperationName(), context, 443 typeConverter) {} 444 445 PatternMatchResult 446 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 447 ConversionPatternRewriter &rewriter) const override { 448 auto loc = op->getLoc(); 449 auto adaptor = vector::InsertOpOperandAdaptor(operands); 450 auto insertOp = cast<vector::InsertOp>(op); 451 auto sourceType = insertOp.getSourceType(); 452 auto destVectorType = insertOp.getDestVectorType(); 453 auto llvmResultType = lowering.convertType(destVectorType); 454 auto positionArrayAttr = insertOp.position(); 455 456 // Bail if result type cannot be lowered. 457 if (!llvmResultType) 458 return matchFailure(); 459 460 // One-shot insertion of a vector into an array (only requires insertvalue). 461 if (sourceType.isa<VectorType>()) { 462 Value inserted = rewriter.create<LLVM::InsertValueOp>( 463 loc, llvmResultType, adaptor.dest(), adaptor.source(), 464 positionArrayAttr); 465 rewriter.replaceOp(op, inserted); 466 return matchSuccess(); 467 } 468 469 // Potential extraction of 1-D vector from array. 470 auto *context = op->getContext(); 471 Value extracted = adaptor.dest(); 472 auto positionAttrs = positionArrayAttr.getValue(); 473 auto position = positionAttrs.back().cast<IntegerAttr>(); 474 auto oneDVectorType = destVectorType; 475 if (positionAttrs.size() > 1) { 476 oneDVectorType = reducedVectorTypeBack(destVectorType); 477 auto nMinusOnePositionAttrs = 478 ArrayAttr::get(positionAttrs.drop_back(), context); 479 extracted = rewriter.create<LLVM::ExtractValueOp>( 480 loc, lowering.convertType(oneDVectorType), extracted, 481 nMinusOnePositionAttrs); 482 } 483 484 // Insertion of an element into a 1-D LLVM vector. 485 auto i64Type = LLVM::LLVMType::getInt64Ty(lowering.getDialect()); 486 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 487 Value inserted = rewriter.create<LLVM::InsertElementOp>( 488 loc, lowering.convertType(oneDVectorType), extracted, adaptor.source(), 489 constant); 490 491 // Potential insertion of resulting 1-D vector into array. 492 if (positionAttrs.size() > 1) { 493 auto nMinusOnePositionAttrs = 494 ArrayAttr::get(positionAttrs.drop_back(), context); 495 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType, 496 adaptor.dest(), inserted, 497 nMinusOnePositionAttrs); 498 } 499 500 rewriter.replaceOp(op, inserted); 501 return matchSuccess(); 502 } 503 }; 504 505 // When ranks are different, InsertStridedSlice needs to extract a properly 506 // ranked vector from the destination vector into which to insert. This pattern 507 // only takes care of this part and forwards the rest of the conversion to 508 // another pattern that converts InsertStridedSlice for operands of the same 509 // rank. 510 // 511 // RewritePattern for InsertStridedSliceOp where source and destination vectors 512 // have different ranks. In this case: 513 // 1. the proper subvector is extracted from the destination vector 514 // 2. a new InsertStridedSlice op is created to insert the source in the 515 // destination subvector 516 // 3. the destination subvector is inserted back in the proper place 517 // 4. the op is replaced by the result of step 3. 518 // The new InsertStridedSlice from step 2. will be picked up by a 519 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 520 class VectorInsertStridedSliceOpDifferentRankRewritePattern 521 : public OpRewritePattern<InsertStridedSliceOp> { 522 public: 523 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 524 525 PatternMatchResult matchAndRewrite(InsertStridedSliceOp op, 526 PatternRewriter &rewriter) const override { 527 auto srcType = op.getSourceVectorType(); 528 auto dstType = op.getDestVectorType(); 529 530 if (op.offsets().getValue().empty()) 531 return matchFailure(); 532 533 auto loc = op.getLoc(); 534 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 535 assert(rankDiff >= 0); 536 if (rankDiff == 0) 537 return matchFailure(); 538 539 int64_t rankRest = dstType.getRank() - rankDiff; 540 // Extract / insert the subvector of matching rank and InsertStridedSlice 541 // on it. 542 Value extracted = 543 rewriter.create<ExtractOp>(loc, op.dest(), 544 getI64SubArray(op.offsets(), /*dropFront=*/0, 545 /*dropFront=*/rankRest)); 546 // A different pattern will kick in for InsertStridedSlice with matching 547 // ranks. 548 auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>( 549 loc, op.source(), extracted, 550 getI64SubArray(op.offsets(), /*dropFront=*/rankDiff), 551 getI64SubArray(op.strides(), /*dropFront=*/0)); 552 rewriter.replaceOpWithNewOp<InsertOp>( 553 op, stridedSliceInnerOp.getResult(), op.dest(), 554 getI64SubArray(op.offsets(), /*dropFront=*/0, 555 /*dropFront=*/rankRest)); 556 return matchSuccess(); 557 } 558 }; 559 560 // RewritePattern for InsertStridedSliceOp where source and destination vectors 561 // have the same rank. In this case, we reduce 562 // 1. the proper subvector is extracted from the destination vector 563 // 2. a new InsertStridedSlice op is created to insert the source in the 564 // destination subvector 565 // 3. the destination subvector is inserted back in the proper place 566 // 4. the op is replaced by the result of step 3. 567 // The new InsertStridedSlice from step 2. will be picked up by a 568 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 569 class VectorInsertStridedSliceOpSameRankRewritePattern 570 : public OpRewritePattern<InsertStridedSliceOp> { 571 public: 572 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 573 574 PatternMatchResult matchAndRewrite(InsertStridedSliceOp op, 575 PatternRewriter &rewriter) const override { 576 auto srcType = op.getSourceVectorType(); 577 auto dstType = op.getDestVectorType(); 578 579 if (op.offsets().getValue().empty()) 580 return matchFailure(); 581 582 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 583 assert(rankDiff >= 0); 584 if (rankDiff != 0) 585 return matchFailure(); 586 587 if (srcType == dstType) { 588 rewriter.replaceOp(op, op.source()); 589 return matchSuccess(); 590 } 591 592 int64_t offset = 593 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 594 int64_t size = srcType.getShape().front(); 595 int64_t stride = 596 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 597 598 auto loc = op.getLoc(); 599 Value res = op.dest(); 600 // For each slice of the source vector along the most major dimension. 601 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 602 off += stride, ++idx) { 603 // 1. extract the proper subvector (or element) from source 604 Value extractedSource = extractOne(rewriter, loc, op.source(), idx); 605 if (extractedSource.getType().isa<VectorType>()) { 606 // 2. If we have a vector, extract the proper subvector from destination 607 // Otherwise we are at the element level and no need to recurse. 608 Value extractedDest = extractOne(rewriter, loc, op.dest(), off); 609 // 3. Reduce the problem to lowering a new InsertStridedSlice op with 610 // smaller rank. 611 InsertStridedSliceOp insertStridedSliceOp = 612 rewriter.create<InsertStridedSliceOp>( 613 loc, extractedSource, extractedDest, 614 getI64SubArray(op.offsets(), /* dropFront=*/1), 615 getI64SubArray(op.strides(), /* dropFront=*/1)); 616 // Call matchAndRewrite recursively from within the pattern. This 617 // circumvents the current limitation that a given pattern cannot 618 // be called multiple times by the PatternRewrite infrastructure (to 619 // avoid infinite recursion, but in this case, infinite recursion 620 // cannot happen because the rank is strictly decreasing). 621 // TODO(rriddle, nicolasvasilache) Implement something like a hook for 622 // a potential function that must decrease and allow the same pattern 623 // multiple times. 624 auto success = matchAndRewrite(insertStridedSliceOp, rewriter); 625 (void)success; 626 assert(success && "Unexpected failure"); 627 extractedSource = insertStridedSliceOp; 628 } 629 // 4. Insert the extractedSource into the res vector. 630 res = insertOne(rewriter, loc, extractedSource, res, off); 631 } 632 633 rewriter.replaceOp(op, res); 634 return matchSuccess(); 635 } 636 }; 637 638 class VectorOuterProductOpConversion : public LLVMOpLowering { 639 public: 640 explicit VectorOuterProductOpConversion(MLIRContext *context, 641 LLVMTypeConverter &typeConverter) 642 : LLVMOpLowering(vector::OuterProductOp::getOperationName(), context, 643 typeConverter) {} 644 645 PatternMatchResult 646 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 647 ConversionPatternRewriter &rewriter) const override { 648 auto loc = op->getLoc(); 649 auto adaptor = vector::OuterProductOpOperandAdaptor(operands); 650 auto *ctx = op->getContext(); 651 auto vLHS = adaptor.lhs().getType().cast<LLVM::LLVMType>(); 652 auto vRHS = adaptor.rhs().getType().cast<LLVM::LLVMType>(); 653 auto rankLHS = vLHS.getUnderlyingType()->getVectorNumElements(); 654 auto rankRHS = vRHS.getUnderlyingType()->getVectorNumElements(); 655 auto llvmArrayOfVectType = lowering.convertType( 656 cast<vector::OuterProductOp>(op).getResult().getType()); 657 Value desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayOfVectType); 658 Value a = adaptor.lhs(), b = adaptor.rhs(); 659 Value acc = adaptor.acc().empty() ? nullptr : adaptor.acc().front(); 660 SmallVector<Value, 8> lhs, accs; 661 lhs.reserve(rankLHS); 662 accs.reserve(rankLHS); 663 for (unsigned d = 0, e = rankLHS; d < e; ++d) { 664 // shufflevector explicitly requires i32. 665 auto attr = rewriter.getI32IntegerAttr(d); 666 SmallVector<Attribute, 4> bcastAttr(rankRHS, attr); 667 auto bcastArrayAttr = ArrayAttr::get(bcastAttr, ctx); 668 Value aD = nullptr, accD = nullptr; 669 // 1. Broadcast the element a[d] into vector aD. 670 aD = rewriter.create<LLVM::ShuffleVectorOp>(loc, a, a, bcastArrayAttr); 671 // 2. If acc is present, extract 1-d vector acc[d] into accD. 672 if (acc) 673 accD = rewriter.create<LLVM::ExtractValueOp>( 674 loc, vRHS, acc, rewriter.getI64ArrayAttr(d)); 675 // 3. Compute aD outer b (plus accD, if relevant). 676 Value aOuterbD = 677 accD ? rewriter.create<LLVM::FMulAddOp>(loc, vRHS, aD, b, accD) 678 .getResult() 679 : rewriter.create<LLVM::FMulOp>(loc, aD, b).getResult(); 680 // 4. Insert as value `d` in the descriptor. 681 desc = rewriter.create<LLVM::InsertValueOp>(loc, llvmArrayOfVectType, 682 desc, aOuterbD, 683 rewriter.getI64ArrayAttr(d)); 684 } 685 rewriter.replaceOp(op, desc); 686 return matchSuccess(); 687 } 688 }; 689 690 class VectorTypeCastOpConversion : public LLVMOpLowering { 691 public: 692 explicit VectorTypeCastOpConversion(MLIRContext *context, 693 LLVMTypeConverter &typeConverter) 694 : LLVMOpLowering(vector::TypeCastOp::getOperationName(), context, 695 typeConverter) {} 696 697 PatternMatchResult 698 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 699 ConversionPatternRewriter &rewriter) const override { 700 auto loc = op->getLoc(); 701 vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op); 702 MemRefType sourceMemRefType = 703 castOp.getOperand().getType().cast<MemRefType>(); 704 MemRefType targetMemRefType = 705 castOp.getResult().getType().cast<MemRefType>(); 706 707 // Only static shape casts supported atm. 708 if (!sourceMemRefType.hasStaticShape() || 709 !targetMemRefType.hasStaticShape()) 710 return matchFailure(); 711 712 auto llvmSourceDescriptorTy = 713 operands[0].getType().dyn_cast<LLVM::LLVMType>(); 714 if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy()) 715 return matchFailure(); 716 MemRefDescriptor sourceMemRef(operands[0]); 717 718 auto llvmTargetDescriptorTy = lowering.convertType(targetMemRefType) 719 .dyn_cast_or_null<LLVM::LLVMType>(); 720 if (!llvmTargetDescriptorTy || !llvmTargetDescriptorTy.isStructTy()) 721 return matchFailure(); 722 723 int64_t offset; 724 SmallVector<int64_t, 4> strides; 725 auto successStrides = 726 getStridesAndOffset(sourceMemRefType, strides, offset); 727 bool isContiguous = (strides.back() == 1); 728 if (isContiguous) { 729 auto sizes = sourceMemRefType.getShape(); 730 for (int index = 0, e = strides.size() - 2; index < e; ++index) { 731 if (strides[index] != strides[index + 1] * sizes[index + 1]) { 732 isContiguous = false; 733 break; 734 } 735 } 736 } 737 // Only contiguous source tensors supported atm. 738 if (failed(successStrides) || !isContiguous) 739 return matchFailure(); 740 741 auto int64Ty = LLVM::LLVMType::getInt64Ty(lowering.getDialect()); 742 743 // Create descriptor. 744 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 745 Type llvmTargetElementTy = desc.getElementType(); 746 // Set allocated ptr. 747 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc); 748 allocated = 749 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated); 750 desc.setAllocatedPtr(rewriter, loc, allocated); 751 // Set aligned ptr. 752 Value ptr = sourceMemRef.alignedPtr(rewriter, loc); 753 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr); 754 desc.setAlignedPtr(rewriter, loc, ptr); 755 // Fill offset 0. 756 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); 757 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr); 758 desc.setOffset(rewriter, loc, zero); 759 760 // Fill size and stride descriptors in memref. 761 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { 762 int64_t index = indexedSize.index(); 763 auto sizeAttr = 764 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); 765 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr); 766 desc.setSize(rewriter, loc, index, size); 767 auto strideAttr = 768 rewriter.getIntegerAttr(rewriter.getIndexType(), strides[index]); 769 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr); 770 desc.setStride(rewriter, loc, index, stride); 771 } 772 773 rewriter.replaceOp(op, {desc}); 774 return matchSuccess(); 775 } 776 }; 777 778 class VectorPrintOpConversion : public LLVMOpLowering { 779 public: 780 explicit VectorPrintOpConversion(MLIRContext *context, 781 LLVMTypeConverter &typeConverter) 782 : LLVMOpLowering(vector::PrintOp::getOperationName(), context, 783 typeConverter) {} 784 785 // Proof-of-concept lowering implementation that relies on a small 786 // runtime support library, which only needs to provide a few 787 // printing methods (single value for all data types, opening/closing 788 // bracket, comma, newline). The lowering fully unrolls a vector 789 // in terms of these elementary printing operations. The advantage 790 // of this approach is that the library can remain unaware of all 791 // low-level implementation details of vectors while still supporting 792 // output of any shaped and dimensioned vector. Due to full unrolling, 793 // this approach is less suited for very large vectors though. 794 // 795 // TODO(ajcbik): rely solely on libc in future? something else? 796 // 797 PatternMatchResult 798 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 799 ConversionPatternRewriter &rewriter) const override { 800 auto printOp = cast<vector::PrintOp>(op); 801 auto adaptor = vector::PrintOpOperandAdaptor(operands); 802 Type printType = printOp.getPrintType(); 803 804 if (lowering.convertType(printType) == nullptr) 805 return matchFailure(); 806 807 // Make sure element type has runtime support (currently just Float/Double). 808 VectorType vectorType = printType.dyn_cast<VectorType>(); 809 Type eltType = vectorType ? vectorType.getElementType() : printType; 810 int64_t rank = vectorType ? vectorType.getRank() : 0; 811 Operation *printer; 812 if (eltType.isF32()) 813 printer = getPrintFloat(op); 814 else if (eltType.isF64()) 815 printer = getPrintDouble(op); 816 else 817 return matchFailure(); 818 819 // Unroll vector into elementary print calls. 820 emitRanks(rewriter, op, adaptor.source(), vectorType, printer, rank); 821 emitCall(rewriter, op->getLoc(), getPrintNewline(op)); 822 rewriter.eraseOp(op); 823 return matchSuccess(); 824 } 825 826 private: 827 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, 828 Value value, VectorType vectorType, Operation *printer, 829 int64_t rank) const { 830 Location loc = op->getLoc(); 831 if (rank == 0) { 832 emitCall(rewriter, loc, printer, value); 833 return; 834 } 835 836 emitCall(rewriter, loc, getPrintOpen(op)); 837 Operation *printComma = getPrintComma(op); 838 int64_t dim = vectorType.getDimSize(0); 839 for (int64_t d = 0; d < dim; ++d) { 840 auto reducedType = 841 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; 842 auto llvmType = lowering.convertType( 843 rank > 1 ? reducedType : vectorType.getElementType()); 844 Value nestedVal = 845 extractOne(rewriter, lowering, loc, value, llvmType, rank, d); 846 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1); 847 if (d != dim - 1) 848 emitCall(rewriter, loc, printComma); 849 } 850 emitCall(rewriter, loc, getPrintClose(op)); 851 } 852 853 // Helper to emit a call. 854 static void emitCall(ConversionPatternRewriter &rewriter, Location loc, 855 Operation *ref, ValueRange params = ValueRange()) { 856 rewriter.create<LLVM::CallOp>(loc, ArrayRef<Type>{}, 857 rewriter.getSymbolRefAttr(ref), params); 858 } 859 860 // Helper for printer method declaration (first hit) and lookup. 861 static Operation *getPrint(Operation *op, LLVM::LLVMDialect *dialect, 862 StringRef name, ArrayRef<LLVM::LLVMType> params) { 863 auto module = op->getParentOfType<ModuleOp>(); 864 auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name); 865 if (func) 866 return func; 867 OpBuilder moduleBuilder(module.getBodyRegion()); 868 return moduleBuilder.create<LLVM::LLVMFuncOp>( 869 op->getLoc(), name, 870 LLVM::LLVMType::getFunctionTy(LLVM::LLVMType::getVoidTy(dialect), 871 params, /*isVarArg=*/false)); 872 } 873 874 // Helpers for method names. 875 Operation *getPrintFloat(Operation *op) const { 876 LLVM::LLVMDialect *dialect = lowering.getDialect(); 877 return getPrint(op, dialect, "print_f32", 878 LLVM::LLVMType::getFloatTy(dialect)); 879 } 880 Operation *getPrintDouble(Operation *op) const { 881 LLVM::LLVMDialect *dialect = lowering.getDialect(); 882 return getPrint(op, dialect, "print_f64", 883 LLVM::LLVMType::getDoubleTy(dialect)); 884 } 885 Operation *getPrintOpen(Operation *op) const { 886 return getPrint(op, lowering.getDialect(), "print_open", {}); 887 } 888 Operation *getPrintClose(Operation *op) const { 889 return getPrint(op, lowering.getDialect(), "print_close", {}); 890 } 891 Operation *getPrintComma(Operation *op) const { 892 return getPrint(op, lowering.getDialect(), "print_comma", {}); 893 } 894 Operation *getPrintNewline(Operation *op) const { 895 return getPrint(op, lowering.getDialect(), "print_newline", {}); 896 } 897 }; 898 899 /// Progressive lowering of StridedSliceOp to either: 900 /// 1. extractelement + insertelement for the 1-D case 901 /// 2. extract + optional strided_slice + insert for the n-D case. 902 class VectorStridedSliceOpConversion : public OpRewritePattern<StridedSliceOp> { 903 public: 904 using OpRewritePattern<StridedSliceOp>::OpRewritePattern; 905 906 PatternMatchResult matchAndRewrite(StridedSliceOp op, 907 PatternRewriter &rewriter) const override { 908 auto dstType = op.getResult().getType().cast<VectorType>(); 909 910 assert(!op.offsets().getValue().empty() && "Unexpected empty offsets"); 911 912 int64_t offset = 913 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 914 int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt(); 915 int64_t stride = 916 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 917 918 auto loc = op.getLoc(); 919 auto elemType = dstType.getElementType(); 920 assert(elemType.isIntOrIndexOrFloat()); 921 Value zero = rewriter.create<ConstantOp>(loc, elemType, 922 rewriter.getZeroAttr(elemType)); 923 Value res = rewriter.create<SplatOp>(loc, dstType, zero); 924 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 925 off += stride, ++idx) { 926 Value extracted = extractOne(rewriter, loc, op.vector(), off); 927 if (op.offsets().getValue().size() > 1) { 928 StridedSliceOp stridedSliceOp = rewriter.create<StridedSliceOp>( 929 loc, extracted, getI64SubArray(op.offsets(), /* dropFront=*/1), 930 getI64SubArray(op.sizes(), /* dropFront=*/1), 931 getI64SubArray(op.strides(), /* dropFront=*/1)); 932 // Call matchAndRewrite recursively from within the pattern. This 933 // circumvents the current limitation that a given pattern cannot 934 // be called multiple times by the PatternRewrite infrastructure (to 935 // avoid infinite recursion, but in this case, infinite recursion 936 // cannot happen because the rank is strictly decreasing). 937 // TODO(rriddle, nicolasvasilache) Implement something like a hook for 938 // a potential function that must decrease and allow the same pattern 939 // multiple times. 940 auto success = matchAndRewrite(stridedSliceOp, rewriter); 941 (void)success; 942 assert(success && "Unexpected failure"); 943 extracted = stridedSliceOp; 944 } 945 res = insertOne(rewriter, loc, extracted, res, idx); 946 } 947 rewriter.replaceOp(op, {res}); 948 return matchSuccess(); 949 } 950 }; 951 952 } // namespace 953 954 /// Populate the given list with patterns that convert from Vector to LLVM. 955 void mlir::populateVectorToLLVMConversionPatterns( 956 LLVMTypeConverter &converter, OwningRewritePatternList &patterns) { 957 MLIRContext *ctx = converter.getDialect()->getContext(); 958 patterns.insert<VectorInsertStridedSliceOpDifferentRankRewritePattern, 959 VectorInsertStridedSliceOpSameRankRewritePattern, 960 VectorStridedSliceOpConversion>(ctx); 961 patterns.insert<VectorBroadcastOpConversion, VectorShuffleOpConversion, 962 VectorExtractElementOpConversion, VectorExtractOpConversion, 963 VectorInsertElementOpConversion, VectorInsertOpConversion, 964 VectorOuterProductOpConversion, VectorTypeCastOpConversion, 965 VectorPrintOpConversion>(ctx, converter); 966 } 967 968 namespace { 969 struct LowerVectorToLLVMPass : public ModulePass<LowerVectorToLLVMPass> { 970 void runOnModule() override; 971 }; 972 } // namespace 973 974 void LowerVectorToLLVMPass::runOnModule() { 975 // Perform progressive lowering of operations on "slices". 976 // Folding and DCE get rid of all non-leaking tuple ops. 977 { 978 OwningRewritePatternList patterns; 979 populateVectorSlicesLoweringPatterns(patterns, &getContext()); 980 applyPatternsGreedily(getModule(), patterns); 981 } 982 983 // Convert to the LLVM IR dialect. 984 LLVMTypeConverter converter(&getContext()); 985 OwningRewritePatternList patterns; 986 populateVectorToLLVMConversionPatterns(converter, patterns); 987 populateStdToLLVMConversionPatterns(converter, patterns); 988 989 ConversionTarget target(getContext()); 990 target.addLegalDialect<LLVM::LLVMDialect>(); 991 target.addDynamicallyLegalOp<FuncOp>( 992 [&](FuncOp op) { return converter.isSignatureLegal(op.getType()); }); 993 if (failed( 994 applyPartialConversion(getModule(), target, patterns, &converter))) { 995 signalPassFailure(); 996 } 997 } 998 999 OpPassBase<ModuleOp> *mlir::createLowerVectorToLLVMPass() { 1000 return new LowerVectorToLLVMPass(); 1001 } 1002 1003 static PassRegistration<LowerVectorToLLVMPass> 1004 pass("convert-vector-to-llvm", 1005 "Lower the operations from the vector dialect into the LLVM dialect"); 1006