1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 10 11 #include "../PassDetail.h" 12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 13 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" 14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 15 #include "mlir/Dialect/StandardOps/IR/Ops.h" 16 #include "mlir/Dialect/Vector/VectorOps.h" 17 #include "mlir/IR/AffineMap.h" 18 #include "mlir/IR/Attributes.h" 19 #include "mlir/IR/Builders.h" 20 #include "mlir/IR/MLIRContext.h" 21 #include "mlir/IR/Module.h" 22 #include "mlir/IR/Operation.h" 23 #include "mlir/IR/PatternMatch.h" 24 #include "mlir/IR/StandardTypes.h" 25 #include "mlir/IR/Types.h" 26 #include "mlir/Target/LLVMIR/TypeTranslation.h" 27 #include "mlir/Transforms/DialectConversion.h" 28 #include "mlir/Transforms/Passes.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Module.h" 31 #include "llvm/IR/Type.h" 32 #include "llvm/Support/Allocator.h" 33 #include "llvm/Support/ErrorHandling.h" 34 35 using namespace mlir; 36 using namespace mlir::vector; 37 38 // Helper to reduce vector type by one rank at front. 39 static VectorType reducedVectorTypeFront(VectorType tp) { 40 assert((tp.getRank() > 1) && "unlowerable vector type"); 41 return VectorType::get(tp.getShape().drop_front(), tp.getElementType()); 42 } 43 44 // Helper to reduce vector type by *all* but one rank at back. 45 static VectorType reducedVectorTypeBack(VectorType tp) { 46 assert((tp.getRank() > 1) && "unlowerable vector type"); 47 return VectorType::get(tp.getShape().take_back(), tp.getElementType()); 48 } 49 50 // Helper that picks the proper sequence for inserting. 51 static Value insertOne(ConversionPatternRewriter &rewriter, 52 LLVMTypeConverter &typeConverter, Location loc, 53 Value val1, Value val2, Type llvmType, int64_t rank, 54 int64_t pos) { 55 if (rank == 1) { 56 auto idxType = rewriter.getIndexType(); 57 auto constant = rewriter.create<LLVM::ConstantOp>( 58 loc, typeConverter.convertType(idxType), 59 rewriter.getIntegerAttr(idxType, pos)); 60 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2, 61 constant); 62 } 63 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2, 64 rewriter.getI64ArrayAttr(pos)); 65 } 66 67 // Helper that picks the proper sequence for inserting. 68 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from, 69 Value into, int64_t offset) { 70 auto vectorType = into.getType().cast<VectorType>(); 71 if (vectorType.getRank() > 1) 72 return rewriter.create<InsertOp>(loc, from, into, offset); 73 return rewriter.create<vector::InsertElementOp>( 74 loc, vectorType, from, into, 75 rewriter.create<ConstantIndexOp>(loc, offset)); 76 } 77 78 // Helper that picks the proper sequence for extracting. 79 static Value extractOne(ConversionPatternRewriter &rewriter, 80 LLVMTypeConverter &typeConverter, Location loc, 81 Value val, Type llvmType, int64_t rank, int64_t pos) { 82 if (rank == 1) { 83 auto idxType = rewriter.getIndexType(); 84 auto constant = rewriter.create<LLVM::ConstantOp>( 85 loc, typeConverter.convertType(idxType), 86 rewriter.getIntegerAttr(idxType, pos)); 87 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val, 88 constant); 89 } 90 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val, 91 rewriter.getI64ArrayAttr(pos)); 92 } 93 94 // Helper that picks the proper sequence for extracting. 95 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector, 96 int64_t offset) { 97 auto vectorType = vector.getType().cast<VectorType>(); 98 if (vectorType.getRank() > 1) 99 return rewriter.create<ExtractOp>(loc, vector, offset); 100 return rewriter.create<vector::ExtractElementOp>( 101 loc, vectorType.getElementType(), vector, 102 rewriter.create<ConstantIndexOp>(loc, offset)); 103 } 104 105 // Helper that returns a subset of `arrayAttr` as a vector of int64_t. 106 // TODO: Better support for attribute subtype forwarding + slicing. 107 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr, 108 unsigned dropFront = 0, 109 unsigned dropBack = 0) { 110 assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds"); 111 auto range = arrayAttr.getAsRange<IntegerAttr>(); 112 SmallVector<int64_t, 4> res; 113 res.reserve(arrayAttr.size() - dropFront - dropBack); 114 for (auto it = range.begin() + dropFront, eit = range.end() - dropBack; 115 it != eit; ++it) 116 res.push_back((*it).getValue().getSExtValue()); 117 return res; 118 } 119 120 // Helper that returns data layout alignment of an operation with memref. 121 template <typename T> 122 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, T op, 123 unsigned &align) { 124 Type elementTy = 125 typeConverter.convertType(op.getMemRefType().getElementType()); 126 if (!elementTy) 127 return failure(); 128 129 auto dataLayout = typeConverter.getDialect()->getLLVMModule().getDataLayout(); 130 // TODO: this should be abstracted away to avoid depending on translation. 131 align = dataLayout.getPrefTypeAlignment(LLVM::translateTypeToLLVMIR( 132 elementTy.cast<LLVM::LLVMType>(), 133 typeConverter.getDialect()->getLLVMContext())); 134 return success(); 135 } 136 137 // Helper that returns the base address of a memref. 138 LogicalResult getBase(ConversionPatternRewriter &rewriter, Location loc, 139 Value memref, MemRefType memRefType, Value &base) { 140 // Inspect stride and offset structure. 141 // 142 // TODO: flat memory only for now, generalize 143 // 144 int64_t offset; 145 SmallVector<int64_t, 4> strides; 146 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 147 if (failed(successStrides) || strides.size() != 1 || strides[0] != 1 || 148 offset != 0 || memRefType.getMemorySpace() != 0) 149 return failure(); 150 base = MemRefDescriptor(memref).alignedPtr(rewriter, loc); 151 return success(); 152 } 153 154 // Helper that returns a pointer given a memref base. 155 LogicalResult getBasePtr(ConversionPatternRewriter &rewriter, Location loc, 156 Value memref, MemRefType memRefType, Value &ptr) { 157 Value base; 158 if (failed(getBase(rewriter, loc, memref, memRefType, base))) 159 return failure(); 160 auto pType = MemRefDescriptor(memref).getElementType(); 161 ptr = rewriter.create<LLVM::GEPOp>(loc, pType, base); 162 return success(); 163 } 164 165 // Helper that returns vector of pointers given a memref base and an index 166 // vector. 167 LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter, Location loc, 168 Value memref, Value indices, MemRefType memRefType, 169 VectorType vType, Type iType, Value &ptrs) { 170 Value base; 171 if (failed(getBase(rewriter, loc, memref, memRefType, base))) 172 return failure(); 173 auto pType = MemRefDescriptor(memref).getElementType(); 174 auto ptrsType = LLVM::LLVMType::getVectorTy(pType, vType.getDimSize(0)); 175 ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, indices); 176 return success(); 177 } 178 179 static LogicalResult 180 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter, 181 LLVMTypeConverter &typeConverter, Location loc, 182 TransferReadOp xferOp, 183 ArrayRef<Value> operands, Value dataPtr) { 184 unsigned align; 185 if (failed(getMemRefAlignment(typeConverter, xferOp, align))) 186 return failure(); 187 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align); 188 return success(); 189 } 190 191 static LogicalResult 192 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter, 193 LLVMTypeConverter &typeConverter, Location loc, 194 TransferReadOp xferOp, ArrayRef<Value> operands, 195 Value dataPtr, Value mask) { 196 auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); }; 197 VectorType fillType = xferOp.getVectorType(); 198 Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding()); 199 fill = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(fillType), fill); 200 201 Type vecTy = typeConverter.convertType(xferOp.getVectorType()); 202 if (!vecTy) 203 return failure(); 204 205 unsigned align; 206 if (failed(getMemRefAlignment(typeConverter, xferOp, align))) 207 return failure(); 208 209 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 210 xferOp, vecTy, dataPtr, mask, ValueRange{fill}, 211 rewriter.getI32IntegerAttr(align)); 212 return success(); 213 } 214 215 static LogicalResult 216 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter, 217 LLVMTypeConverter &typeConverter, Location loc, 218 TransferWriteOp xferOp, 219 ArrayRef<Value> operands, Value dataPtr) { 220 unsigned align; 221 if (failed(getMemRefAlignment(typeConverter, xferOp, align))) 222 return failure(); 223 auto adaptor = TransferWriteOpAdaptor(operands); 224 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr, 225 align); 226 return success(); 227 } 228 229 static LogicalResult 230 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter, 231 LLVMTypeConverter &typeConverter, Location loc, 232 TransferWriteOp xferOp, ArrayRef<Value> operands, 233 Value dataPtr, Value mask) { 234 unsigned align; 235 if (failed(getMemRefAlignment(typeConverter, xferOp, align))) 236 return failure(); 237 238 auto adaptor = TransferWriteOpAdaptor(operands); 239 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 240 xferOp, adaptor.vector(), dataPtr, mask, 241 rewriter.getI32IntegerAttr(align)); 242 return success(); 243 } 244 245 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp, 246 ArrayRef<Value> operands) { 247 return TransferReadOpAdaptor(operands); 248 } 249 250 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp, 251 ArrayRef<Value> operands) { 252 return TransferWriteOpAdaptor(operands); 253 } 254 255 namespace { 256 257 /// Conversion pattern for a vector.matrix_multiply. 258 /// This is lowered directly to the proper llvm.intr.matrix.multiply. 259 class VectorMatmulOpConversion : public ConvertToLLVMPattern { 260 public: 261 explicit VectorMatmulOpConversion(MLIRContext *context, 262 LLVMTypeConverter &typeConverter) 263 : ConvertToLLVMPattern(vector::MatmulOp::getOperationName(), context, 264 typeConverter) {} 265 266 LogicalResult 267 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 268 ConversionPatternRewriter &rewriter) const override { 269 auto matmulOp = cast<vector::MatmulOp>(op); 270 auto adaptor = vector::MatmulOpAdaptor(operands); 271 rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>( 272 op, typeConverter.convertType(matmulOp.res().getType()), adaptor.lhs(), 273 adaptor.rhs(), matmulOp.lhs_rows(), matmulOp.lhs_columns(), 274 matmulOp.rhs_columns()); 275 return success(); 276 } 277 }; 278 279 /// Conversion pattern for a vector.flat_transpose. 280 /// This is lowered directly to the proper llvm.intr.matrix.transpose. 281 class VectorFlatTransposeOpConversion : public ConvertToLLVMPattern { 282 public: 283 explicit VectorFlatTransposeOpConversion(MLIRContext *context, 284 LLVMTypeConverter &typeConverter) 285 : ConvertToLLVMPattern(vector::FlatTransposeOp::getOperationName(), 286 context, typeConverter) {} 287 288 LogicalResult 289 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 290 ConversionPatternRewriter &rewriter) const override { 291 auto transOp = cast<vector::FlatTransposeOp>(op); 292 auto adaptor = vector::FlatTransposeOpAdaptor(operands); 293 rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>( 294 transOp, typeConverter.convertType(transOp.res().getType()), 295 adaptor.matrix(), transOp.rows(), transOp.columns()); 296 return success(); 297 } 298 }; 299 300 /// Conversion pattern for a vector.gather. 301 class VectorGatherOpConversion : public ConvertToLLVMPattern { 302 public: 303 explicit VectorGatherOpConversion(MLIRContext *context, 304 LLVMTypeConverter &typeConverter) 305 : ConvertToLLVMPattern(vector::GatherOp::getOperationName(), context, 306 typeConverter) {} 307 308 LogicalResult 309 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 310 ConversionPatternRewriter &rewriter) const override { 311 auto loc = op->getLoc(); 312 auto gather = cast<vector::GatherOp>(op); 313 auto adaptor = vector::GatherOpAdaptor(operands); 314 315 // Resolve alignment. 316 unsigned align; 317 if (failed(getMemRefAlignment(typeConverter, gather, align))) 318 return failure(); 319 320 // Get index ptrs. 321 VectorType vType = gather.getResultVectorType(); 322 Type iType = gather.getIndicesVectorType().getElementType(); 323 Value ptrs; 324 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(), 325 gather.getMemRefType(), vType, iType, ptrs))) 326 return failure(); 327 328 // Replace with the gather intrinsic. 329 ValueRange v = (llvm::size(adaptor.pass_thru()) == 0) ? ValueRange({}) 330 : adaptor.pass_thru(); 331 rewriter.replaceOpWithNewOp<LLVM::masked_gather>( 332 gather, typeConverter.convertType(vType), ptrs, adaptor.mask(), v, 333 rewriter.getI32IntegerAttr(align)); 334 return success(); 335 } 336 }; 337 338 /// Conversion pattern for a vector.scatter. 339 class VectorScatterOpConversion : public ConvertToLLVMPattern { 340 public: 341 explicit VectorScatterOpConversion(MLIRContext *context, 342 LLVMTypeConverter &typeConverter) 343 : ConvertToLLVMPattern(vector::ScatterOp::getOperationName(), context, 344 typeConverter) {} 345 346 LogicalResult 347 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 348 ConversionPatternRewriter &rewriter) const override { 349 auto loc = op->getLoc(); 350 auto scatter = cast<vector::ScatterOp>(op); 351 auto adaptor = vector::ScatterOpAdaptor(operands); 352 353 // Resolve alignment. 354 unsigned align; 355 if (failed(getMemRefAlignment(typeConverter, scatter, align))) 356 return failure(); 357 358 // Get index ptrs. 359 VectorType vType = scatter.getValueVectorType(); 360 Type iType = scatter.getIndicesVectorType().getElementType(); 361 Value ptrs; 362 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(), 363 scatter.getMemRefType(), vType, iType, ptrs))) 364 return failure(); 365 366 // Replace with the scatter intrinsic. 367 rewriter.replaceOpWithNewOp<LLVM::masked_scatter>( 368 scatter, adaptor.value(), ptrs, adaptor.mask(), 369 rewriter.getI32IntegerAttr(align)); 370 return success(); 371 } 372 }; 373 374 /// Conversion pattern for a vector.expandload. 375 class VectorExpandLoadOpConversion : public ConvertToLLVMPattern { 376 public: 377 explicit VectorExpandLoadOpConversion(MLIRContext *context, 378 LLVMTypeConverter &typeConverter) 379 : ConvertToLLVMPattern(vector::ExpandLoadOp::getOperationName(), context, 380 typeConverter) {} 381 382 LogicalResult 383 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 384 ConversionPatternRewriter &rewriter) const override { 385 auto loc = op->getLoc(); 386 auto expand = cast<vector::ExpandLoadOp>(op); 387 auto adaptor = vector::ExpandLoadOpAdaptor(operands); 388 389 Value ptr; 390 if (failed(getBasePtr(rewriter, loc, adaptor.base(), expand.getMemRefType(), 391 ptr))) 392 return failure(); 393 394 auto vType = expand.getResultVectorType(); 395 rewriter.replaceOpWithNewOp<LLVM::masked_expandload>( 396 op, typeConverter.convertType(vType), ptr, adaptor.mask(), 397 adaptor.pass_thru()); 398 return success(); 399 } 400 }; 401 402 /// Conversion pattern for a vector.compressstore. 403 class VectorCompressStoreOpConversion : public ConvertToLLVMPattern { 404 public: 405 explicit VectorCompressStoreOpConversion(MLIRContext *context, 406 LLVMTypeConverter &typeConverter) 407 : ConvertToLLVMPattern(vector::CompressStoreOp::getOperationName(), 408 context, typeConverter) {} 409 410 LogicalResult 411 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 412 ConversionPatternRewriter &rewriter) const override { 413 auto loc = op->getLoc(); 414 auto compress = cast<vector::CompressStoreOp>(op); 415 auto adaptor = vector::CompressStoreOpAdaptor(operands); 416 417 Value ptr; 418 if (failed(getBasePtr(rewriter, loc, adaptor.base(), 419 compress.getMemRefType(), ptr))) 420 return failure(); 421 422 rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>( 423 op, adaptor.value(), ptr, adaptor.mask()); 424 return success(); 425 } 426 }; 427 428 /// Conversion pattern for all vector reductions. 429 class VectorReductionOpConversion : public ConvertToLLVMPattern { 430 public: 431 explicit VectorReductionOpConversion(MLIRContext *context, 432 LLVMTypeConverter &typeConverter, 433 bool reassociateFP) 434 : ConvertToLLVMPattern(vector::ReductionOp::getOperationName(), context, 435 typeConverter), 436 reassociateFPReductions(reassociateFP) {} 437 438 LogicalResult 439 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 440 ConversionPatternRewriter &rewriter) const override { 441 auto reductionOp = cast<vector::ReductionOp>(op); 442 auto kind = reductionOp.kind(); 443 Type eltType = reductionOp.dest().getType(); 444 Type llvmType = typeConverter.convertType(eltType); 445 if (eltType.isSignlessInteger(32) || eltType.isSignlessInteger(64)) { 446 // Integer reductions: add/mul/min/max/and/or/xor. 447 if (kind == "add") 448 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_add>( 449 op, llvmType, operands[0]); 450 else if (kind == "mul") 451 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_mul>( 452 op, llvmType, operands[0]); 453 else if (kind == "min") 454 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smin>( 455 op, llvmType, operands[0]); 456 else if (kind == "max") 457 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smax>( 458 op, llvmType, operands[0]); 459 else if (kind == "and") 460 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_and>( 461 op, llvmType, operands[0]); 462 else if (kind == "or") 463 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_or>( 464 op, llvmType, operands[0]); 465 else if (kind == "xor") 466 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_xor>( 467 op, llvmType, operands[0]); 468 else 469 return failure(); 470 return success(); 471 472 } else if (eltType.isF32() || eltType.isF64()) { 473 // Floating-point reductions: add/mul/min/max 474 if (kind == "add") { 475 // Optional accumulator (or zero). 476 Value acc = operands.size() > 1 ? operands[1] 477 : rewriter.create<LLVM::ConstantOp>( 478 op->getLoc(), llvmType, 479 rewriter.getZeroAttr(eltType)); 480 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fadd>( 481 op, llvmType, acc, operands[0], 482 rewriter.getBoolAttr(reassociateFPReductions)); 483 } else if (kind == "mul") { 484 // Optional accumulator (or one). 485 Value acc = operands.size() > 1 486 ? operands[1] 487 : rewriter.create<LLVM::ConstantOp>( 488 op->getLoc(), llvmType, 489 rewriter.getFloatAttr(eltType, 1.0)); 490 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fmul>( 491 op, llvmType, acc, operands[0], 492 rewriter.getBoolAttr(reassociateFPReductions)); 493 } else if (kind == "min") 494 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmin>( 495 op, llvmType, operands[0]); 496 else if (kind == "max") 497 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmax>( 498 op, llvmType, operands[0]); 499 else 500 return failure(); 501 return success(); 502 } 503 return failure(); 504 } 505 506 private: 507 const bool reassociateFPReductions; 508 }; 509 510 class VectorShuffleOpConversion : public ConvertToLLVMPattern { 511 public: 512 explicit VectorShuffleOpConversion(MLIRContext *context, 513 LLVMTypeConverter &typeConverter) 514 : ConvertToLLVMPattern(vector::ShuffleOp::getOperationName(), context, 515 typeConverter) {} 516 517 LogicalResult 518 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 519 ConversionPatternRewriter &rewriter) const override { 520 auto loc = op->getLoc(); 521 auto adaptor = vector::ShuffleOpAdaptor(operands); 522 auto shuffleOp = cast<vector::ShuffleOp>(op); 523 auto v1Type = shuffleOp.getV1VectorType(); 524 auto v2Type = shuffleOp.getV2VectorType(); 525 auto vectorType = shuffleOp.getVectorType(); 526 Type llvmType = typeConverter.convertType(vectorType); 527 auto maskArrayAttr = shuffleOp.mask(); 528 529 // Bail if result type cannot be lowered. 530 if (!llvmType) 531 return failure(); 532 533 // Get rank and dimension sizes. 534 int64_t rank = vectorType.getRank(); 535 assert(v1Type.getRank() == rank); 536 assert(v2Type.getRank() == rank); 537 int64_t v1Dim = v1Type.getDimSize(0); 538 539 // For rank 1, where both operands have *exactly* the same vector type, 540 // there is direct shuffle support in LLVM. Use it! 541 if (rank == 1 && v1Type == v2Type) { 542 Value shuffle = rewriter.create<LLVM::ShuffleVectorOp>( 543 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); 544 rewriter.replaceOp(op, shuffle); 545 return success(); 546 } 547 548 // For all other cases, insert the individual values individually. 549 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType); 550 int64_t insPos = 0; 551 for (auto en : llvm::enumerate(maskArrayAttr)) { 552 int64_t extPos = en.value().cast<IntegerAttr>().getInt(); 553 Value value = adaptor.v1(); 554 if (extPos >= v1Dim) { 555 extPos -= v1Dim; 556 value = adaptor.v2(); 557 } 558 Value extract = extractOne(rewriter, typeConverter, loc, value, llvmType, 559 rank, extPos); 560 insert = insertOne(rewriter, typeConverter, loc, insert, extract, 561 llvmType, rank, insPos++); 562 } 563 rewriter.replaceOp(op, insert); 564 return success(); 565 } 566 }; 567 568 class VectorExtractElementOpConversion : public ConvertToLLVMPattern { 569 public: 570 explicit VectorExtractElementOpConversion(MLIRContext *context, 571 LLVMTypeConverter &typeConverter) 572 : ConvertToLLVMPattern(vector::ExtractElementOp::getOperationName(), 573 context, typeConverter) {} 574 575 LogicalResult 576 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 577 ConversionPatternRewriter &rewriter) const override { 578 auto adaptor = vector::ExtractElementOpAdaptor(operands); 579 auto extractEltOp = cast<vector::ExtractElementOp>(op); 580 auto vectorType = extractEltOp.getVectorType(); 581 auto llvmType = typeConverter.convertType(vectorType.getElementType()); 582 583 // Bail if result type cannot be lowered. 584 if (!llvmType) 585 return failure(); 586 587 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>( 588 op, llvmType, adaptor.vector(), adaptor.position()); 589 return success(); 590 } 591 }; 592 593 class VectorExtractOpConversion : public ConvertToLLVMPattern { 594 public: 595 explicit VectorExtractOpConversion(MLIRContext *context, 596 LLVMTypeConverter &typeConverter) 597 : ConvertToLLVMPattern(vector::ExtractOp::getOperationName(), context, 598 typeConverter) {} 599 600 LogicalResult 601 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 602 ConversionPatternRewriter &rewriter) const override { 603 auto loc = op->getLoc(); 604 auto adaptor = vector::ExtractOpAdaptor(operands); 605 auto extractOp = cast<vector::ExtractOp>(op); 606 auto vectorType = extractOp.getVectorType(); 607 auto resultType = extractOp.getResult().getType(); 608 auto llvmResultType = typeConverter.convertType(resultType); 609 auto positionArrayAttr = extractOp.position(); 610 611 // Bail if result type cannot be lowered. 612 if (!llvmResultType) 613 return failure(); 614 615 // One-shot extraction of vector from array (only requires extractvalue). 616 if (resultType.isa<VectorType>()) { 617 Value extracted = rewriter.create<LLVM::ExtractValueOp>( 618 loc, llvmResultType, adaptor.vector(), positionArrayAttr); 619 rewriter.replaceOp(op, extracted); 620 return success(); 621 } 622 623 // Potential extraction of 1-D vector from array. 624 auto *context = op->getContext(); 625 Value extracted = adaptor.vector(); 626 auto positionAttrs = positionArrayAttr.getValue(); 627 if (positionAttrs.size() > 1) { 628 auto oneDVectorType = reducedVectorTypeBack(vectorType); 629 auto nMinusOnePositionAttrs = 630 ArrayAttr::get(positionAttrs.drop_back(), context); 631 extracted = rewriter.create<LLVM::ExtractValueOp>( 632 loc, typeConverter.convertType(oneDVectorType), extracted, 633 nMinusOnePositionAttrs); 634 } 635 636 // Remaining extraction of element from 1-D LLVM vector 637 auto position = positionAttrs.back().cast<IntegerAttr>(); 638 auto i64Type = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect()); 639 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 640 extracted = 641 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant); 642 rewriter.replaceOp(op, extracted); 643 644 return success(); 645 } 646 }; 647 648 /// Conversion pattern that turns a vector.fma on a 1-D vector 649 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion. 650 /// This does not match vectors of n >= 2 rank. 651 /// 652 /// Example: 653 /// ``` 654 /// vector.fma %a, %a, %a : vector<8xf32> 655 /// ``` 656 /// is converted to: 657 /// ``` 658 /// llvm.intr.fmuladd %va, %va, %va: 659 /// (!llvm<"<8 x float>">, !llvm<"<8 x float>">, !llvm<"<8 x float>">) 660 /// -> !llvm<"<8 x float>"> 661 /// ``` 662 class VectorFMAOp1DConversion : public ConvertToLLVMPattern { 663 public: 664 explicit VectorFMAOp1DConversion(MLIRContext *context, 665 LLVMTypeConverter &typeConverter) 666 : ConvertToLLVMPattern(vector::FMAOp::getOperationName(), context, 667 typeConverter) {} 668 669 LogicalResult 670 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 671 ConversionPatternRewriter &rewriter) const override { 672 auto adaptor = vector::FMAOpAdaptor(operands); 673 vector::FMAOp fmaOp = cast<vector::FMAOp>(op); 674 VectorType vType = fmaOp.getVectorType(); 675 if (vType.getRank() != 1) 676 return failure(); 677 rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(op, adaptor.lhs(), 678 adaptor.rhs(), adaptor.acc()); 679 return success(); 680 } 681 }; 682 683 class VectorInsertElementOpConversion : public ConvertToLLVMPattern { 684 public: 685 explicit VectorInsertElementOpConversion(MLIRContext *context, 686 LLVMTypeConverter &typeConverter) 687 : ConvertToLLVMPattern(vector::InsertElementOp::getOperationName(), 688 context, typeConverter) {} 689 690 LogicalResult 691 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 692 ConversionPatternRewriter &rewriter) const override { 693 auto adaptor = vector::InsertElementOpAdaptor(operands); 694 auto insertEltOp = cast<vector::InsertElementOp>(op); 695 auto vectorType = insertEltOp.getDestVectorType(); 696 auto llvmType = typeConverter.convertType(vectorType); 697 698 // Bail if result type cannot be lowered. 699 if (!llvmType) 700 return failure(); 701 702 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>( 703 op, llvmType, adaptor.dest(), adaptor.source(), adaptor.position()); 704 return success(); 705 } 706 }; 707 708 class VectorInsertOpConversion : public ConvertToLLVMPattern { 709 public: 710 explicit VectorInsertOpConversion(MLIRContext *context, 711 LLVMTypeConverter &typeConverter) 712 : ConvertToLLVMPattern(vector::InsertOp::getOperationName(), context, 713 typeConverter) {} 714 715 LogicalResult 716 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 717 ConversionPatternRewriter &rewriter) const override { 718 auto loc = op->getLoc(); 719 auto adaptor = vector::InsertOpAdaptor(operands); 720 auto insertOp = cast<vector::InsertOp>(op); 721 auto sourceType = insertOp.getSourceType(); 722 auto destVectorType = insertOp.getDestVectorType(); 723 auto llvmResultType = typeConverter.convertType(destVectorType); 724 auto positionArrayAttr = insertOp.position(); 725 726 // Bail if result type cannot be lowered. 727 if (!llvmResultType) 728 return failure(); 729 730 // One-shot insertion of a vector into an array (only requires insertvalue). 731 if (sourceType.isa<VectorType>()) { 732 Value inserted = rewriter.create<LLVM::InsertValueOp>( 733 loc, llvmResultType, adaptor.dest(), adaptor.source(), 734 positionArrayAttr); 735 rewriter.replaceOp(op, inserted); 736 return success(); 737 } 738 739 // Potential extraction of 1-D vector from array. 740 auto *context = op->getContext(); 741 Value extracted = adaptor.dest(); 742 auto positionAttrs = positionArrayAttr.getValue(); 743 auto position = positionAttrs.back().cast<IntegerAttr>(); 744 auto oneDVectorType = destVectorType; 745 if (positionAttrs.size() > 1) { 746 oneDVectorType = reducedVectorTypeBack(destVectorType); 747 auto nMinusOnePositionAttrs = 748 ArrayAttr::get(positionAttrs.drop_back(), context); 749 extracted = rewriter.create<LLVM::ExtractValueOp>( 750 loc, typeConverter.convertType(oneDVectorType), extracted, 751 nMinusOnePositionAttrs); 752 } 753 754 // Insertion of an element into a 1-D LLVM vector. 755 auto i64Type = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect()); 756 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 757 Value inserted = rewriter.create<LLVM::InsertElementOp>( 758 loc, typeConverter.convertType(oneDVectorType), extracted, 759 adaptor.source(), constant); 760 761 // Potential insertion of resulting 1-D vector into array. 762 if (positionAttrs.size() > 1) { 763 auto nMinusOnePositionAttrs = 764 ArrayAttr::get(positionAttrs.drop_back(), context); 765 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType, 766 adaptor.dest(), inserted, 767 nMinusOnePositionAttrs); 768 } 769 770 rewriter.replaceOp(op, inserted); 771 return success(); 772 } 773 }; 774 775 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1. 776 /// 777 /// Example: 778 /// ``` 779 /// %d = vector.fma %a, %b, %c : vector<2x4xf32> 780 /// ``` 781 /// is rewritten into: 782 /// ``` 783 /// %r = splat %f0: vector<2x4xf32> 784 /// %va = vector.extractvalue %a[0] : vector<2x4xf32> 785 /// %vb = vector.extractvalue %b[0] : vector<2x4xf32> 786 /// %vc = vector.extractvalue %c[0] : vector<2x4xf32> 787 /// %vd = vector.fma %va, %vb, %vc : vector<4xf32> 788 /// %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32> 789 /// %va2 = vector.extractvalue %a2[1] : vector<2x4xf32> 790 /// %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32> 791 /// %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32> 792 /// %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32> 793 /// %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32> 794 /// // %r3 holds the final value. 795 /// ``` 796 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> { 797 public: 798 using OpRewritePattern<FMAOp>::OpRewritePattern; 799 800 LogicalResult matchAndRewrite(FMAOp op, 801 PatternRewriter &rewriter) const override { 802 auto vType = op.getVectorType(); 803 if (vType.getRank() < 2) 804 return failure(); 805 806 auto loc = op.getLoc(); 807 auto elemType = vType.getElementType(); 808 Value zero = rewriter.create<ConstantOp>(loc, elemType, 809 rewriter.getZeroAttr(elemType)); 810 Value desc = rewriter.create<SplatOp>(loc, vType, zero); 811 for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) { 812 Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i); 813 Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i); 814 Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i); 815 Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC); 816 desc = rewriter.create<InsertOp>(loc, fma, desc, i); 817 } 818 rewriter.replaceOp(op, desc); 819 return success(); 820 } 821 }; 822 823 // When ranks are different, InsertStridedSlice needs to extract a properly 824 // ranked vector from the destination vector into which to insert. This pattern 825 // only takes care of this part and forwards the rest of the conversion to 826 // another pattern that converts InsertStridedSlice for operands of the same 827 // rank. 828 // 829 // RewritePattern for InsertStridedSliceOp where source and destination vectors 830 // have different ranks. In this case: 831 // 1. the proper subvector is extracted from the destination vector 832 // 2. a new InsertStridedSlice op is created to insert the source in the 833 // destination subvector 834 // 3. the destination subvector is inserted back in the proper place 835 // 4. the op is replaced by the result of step 3. 836 // The new InsertStridedSlice from step 2. will be picked up by a 837 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 838 class VectorInsertStridedSliceOpDifferentRankRewritePattern 839 : public OpRewritePattern<InsertStridedSliceOp> { 840 public: 841 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 842 843 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 844 PatternRewriter &rewriter) const override { 845 auto srcType = op.getSourceVectorType(); 846 auto dstType = op.getDestVectorType(); 847 848 if (op.offsets().getValue().empty()) 849 return failure(); 850 851 auto loc = op.getLoc(); 852 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 853 assert(rankDiff >= 0); 854 if (rankDiff == 0) 855 return failure(); 856 857 int64_t rankRest = dstType.getRank() - rankDiff; 858 // Extract / insert the subvector of matching rank and InsertStridedSlice 859 // on it. 860 Value extracted = 861 rewriter.create<ExtractOp>(loc, op.dest(), 862 getI64SubArray(op.offsets(), /*dropFront=*/0, 863 /*dropFront=*/rankRest)); 864 // A different pattern will kick in for InsertStridedSlice with matching 865 // ranks. 866 auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>( 867 loc, op.source(), extracted, 868 getI64SubArray(op.offsets(), /*dropFront=*/rankDiff), 869 getI64SubArray(op.strides(), /*dropFront=*/0)); 870 rewriter.replaceOpWithNewOp<InsertOp>( 871 op, stridedSliceInnerOp.getResult(), op.dest(), 872 getI64SubArray(op.offsets(), /*dropFront=*/0, 873 /*dropFront=*/rankRest)); 874 return success(); 875 } 876 }; 877 878 // RewritePattern for InsertStridedSliceOp where source and destination vectors 879 // have the same rank. In this case, we reduce 880 // 1. the proper subvector is extracted from the destination vector 881 // 2. a new InsertStridedSlice op is created to insert the source in the 882 // destination subvector 883 // 3. the destination subvector is inserted back in the proper place 884 // 4. the op is replaced by the result of step 3. 885 // The new InsertStridedSlice from step 2. will be picked up by a 886 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 887 class VectorInsertStridedSliceOpSameRankRewritePattern 888 : public OpRewritePattern<InsertStridedSliceOp> { 889 public: 890 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 891 892 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 893 PatternRewriter &rewriter) const override { 894 auto srcType = op.getSourceVectorType(); 895 auto dstType = op.getDestVectorType(); 896 897 if (op.offsets().getValue().empty()) 898 return failure(); 899 900 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 901 assert(rankDiff >= 0); 902 if (rankDiff != 0) 903 return failure(); 904 905 if (srcType == dstType) { 906 rewriter.replaceOp(op, op.source()); 907 return success(); 908 } 909 910 int64_t offset = 911 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 912 int64_t size = srcType.getShape().front(); 913 int64_t stride = 914 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 915 916 auto loc = op.getLoc(); 917 Value res = op.dest(); 918 // For each slice of the source vector along the most major dimension. 919 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 920 off += stride, ++idx) { 921 // 1. extract the proper subvector (or element) from source 922 Value extractedSource = extractOne(rewriter, loc, op.source(), idx); 923 if (extractedSource.getType().isa<VectorType>()) { 924 // 2. If we have a vector, extract the proper subvector from destination 925 // Otherwise we are at the element level and no need to recurse. 926 Value extractedDest = extractOne(rewriter, loc, op.dest(), off); 927 // 3. Reduce the problem to lowering a new InsertStridedSlice op with 928 // smaller rank. 929 extractedSource = rewriter.create<InsertStridedSliceOp>( 930 loc, extractedSource, extractedDest, 931 getI64SubArray(op.offsets(), /* dropFront=*/1), 932 getI64SubArray(op.strides(), /* dropFront=*/1)); 933 } 934 // 4. Insert the extractedSource into the res vector. 935 res = insertOne(rewriter, loc, extractedSource, res, off); 936 } 937 938 rewriter.replaceOp(op, res); 939 return success(); 940 } 941 /// This pattern creates recursive InsertStridedSliceOp, but the recursion is 942 /// bounded as the rank is strictly decreasing. 943 bool hasBoundedRewriteRecursion() const final { return true; } 944 }; 945 946 class VectorTypeCastOpConversion : public ConvertToLLVMPattern { 947 public: 948 explicit VectorTypeCastOpConversion(MLIRContext *context, 949 LLVMTypeConverter &typeConverter) 950 : ConvertToLLVMPattern(vector::TypeCastOp::getOperationName(), context, 951 typeConverter) {} 952 953 LogicalResult 954 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 955 ConversionPatternRewriter &rewriter) const override { 956 auto loc = op->getLoc(); 957 vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op); 958 MemRefType sourceMemRefType = 959 castOp.getOperand().getType().cast<MemRefType>(); 960 MemRefType targetMemRefType = 961 castOp.getResult().getType().cast<MemRefType>(); 962 963 // Only static shape casts supported atm. 964 if (!sourceMemRefType.hasStaticShape() || 965 !targetMemRefType.hasStaticShape()) 966 return failure(); 967 968 auto llvmSourceDescriptorTy = 969 operands[0].getType().dyn_cast<LLVM::LLVMType>(); 970 if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy()) 971 return failure(); 972 MemRefDescriptor sourceMemRef(operands[0]); 973 974 auto llvmTargetDescriptorTy = typeConverter.convertType(targetMemRefType) 975 .dyn_cast_or_null<LLVM::LLVMType>(); 976 if (!llvmTargetDescriptorTy || !llvmTargetDescriptorTy.isStructTy()) 977 return failure(); 978 979 int64_t offset; 980 SmallVector<int64_t, 4> strides; 981 auto successStrides = 982 getStridesAndOffset(sourceMemRefType, strides, offset); 983 bool isContiguous = (strides.back() == 1); 984 if (isContiguous) { 985 auto sizes = sourceMemRefType.getShape(); 986 for (int index = 0, e = strides.size() - 2; index < e; ++index) { 987 if (strides[index] != strides[index + 1] * sizes[index + 1]) { 988 isContiguous = false; 989 break; 990 } 991 } 992 } 993 // Only contiguous source tensors supported atm. 994 if (failed(successStrides) || !isContiguous) 995 return failure(); 996 997 auto int64Ty = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect()); 998 999 // Create descriptor. 1000 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 1001 Type llvmTargetElementTy = desc.getElementType(); 1002 // Set allocated ptr. 1003 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc); 1004 allocated = 1005 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated); 1006 desc.setAllocatedPtr(rewriter, loc, allocated); 1007 // Set aligned ptr. 1008 Value ptr = sourceMemRef.alignedPtr(rewriter, loc); 1009 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr); 1010 desc.setAlignedPtr(rewriter, loc, ptr); 1011 // Fill offset 0. 1012 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); 1013 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr); 1014 desc.setOffset(rewriter, loc, zero); 1015 1016 // Fill size and stride descriptors in memref. 1017 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { 1018 int64_t index = indexedSize.index(); 1019 auto sizeAttr = 1020 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); 1021 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr); 1022 desc.setSize(rewriter, loc, index, size); 1023 auto strideAttr = 1024 rewriter.getIntegerAttr(rewriter.getIndexType(), strides[index]); 1025 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr); 1026 desc.setStride(rewriter, loc, index, stride); 1027 } 1028 1029 rewriter.replaceOp(op, {desc}); 1030 return success(); 1031 } 1032 }; 1033 1034 /// Conversion pattern that converts a 1-D vector transfer read/write op in a 1035 /// sequence of: 1036 /// 1. Bitcast or addrspacecast to vector form. 1037 /// 2. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ]. 1038 /// 3. Create a mask where offsetVector is compared against memref upper bound. 1039 /// 4. Rewrite op as a masked read or write. 1040 template <typename ConcreteOp> 1041 class VectorTransferConversion : public ConvertToLLVMPattern { 1042 public: 1043 explicit VectorTransferConversion(MLIRContext *context, 1044 LLVMTypeConverter &typeConv) 1045 : ConvertToLLVMPattern(ConcreteOp::getOperationName(), context, 1046 typeConv) {} 1047 1048 LogicalResult 1049 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 1050 ConversionPatternRewriter &rewriter) const override { 1051 auto xferOp = cast<ConcreteOp>(op); 1052 auto adaptor = getTransferOpAdapter(xferOp, operands); 1053 1054 if (xferOp.getVectorType().getRank() > 1 || 1055 llvm::size(xferOp.indices()) == 0) 1056 return failure(); 1057 if (xferOp.permutation_map() != 1058 AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(), 1059 xferOp.getVectorType().getRank(), 1060 op->getContext())) 1061 return failure(); 1062 1063 auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); }; 1064 1065 Location loc = op->getLoc(); 1066 Type i64Type = rewriter.getIntegerType(64); 1067 MemRefType memRefType = xferOp.getMemRefType(); 1068 1069 // 1. Get the source/dst address as an LLVM vector pointer. 1070 // The vector pointer would always be on address space 0, therefore 1071 // addrspacecast shall be used when source/dst memrefs are not on 1072 // address space 0. 1073 // TODO: support alignment when possible. 1074 Value dataPtr = getDataPtr(loc, memRefType, adaptor.memref(), 1075 adaptor.indices(), rewriter, getModule()); 1076 auto vecTy = 1077 toLLVMTy(xferOp.getVectorType()).template cast<LLVM::LLVMType>(); 1078 Value vectorDataPtr; 1079 if (memRefType.getMemorySpace() == 0) 1080 vectorDataPtr = 1081 rewriter.create<LLVM::BitcastOp>(loc, vecTy.getPointerTo(), dataPtr); 1082 else 1083 vectorDataPtr = rewriter.create<LLVM::AddrSpaceCastOp>( 1084 loc, vecTy.getPointerTo(), dataPtr); 1085 1086 if (!xferOp.isMaskedDim(0)) 1087 return replaceTransferOpWithLoadOrStore(rewriter, typeConverter, loc, 1088 xferOp, operands, vectorDataPtr); 1089 1090 // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ]. 1091 unsigned vecWidth = vecTy.getVectorNumElements(); 1092 VectorType vectorCmpType = VectorType::get(vecWidth, i64Type); 1093 SmallVector<int64_t, 8> indices; 1094 indices.reserve(vecWidth); 1095 for (unsigned i = 0; i < vecWidth; ++i) 1096 indices.push_back(i); 1097 Value linearIndices = rewriter.create<ConstantOp>( 1098 loc, vectorCmpType, 1099 DenseElementsAttr::get(vectorCmpType, ArrayRef<int64_t>(indices))); 1100 linearIndices = rewriter.create<LLVM::DialectCastOp>( 1101 loc, toLLVMTy(vectorCmpType), linearIndices); 1102 1103 // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ]. 1104 // TODO: when the leaf transfer rank is k > 1 we need the last 1105 // `k` dimensions here. 1106 unsigned lastIndex = llvm::size(xferOp.indices()) - 1; 1107 Value offsetIndex = *(xferOp.indices().begin() + lastIndex); 1108 offsetIndex = rewriter.create<IndexCastOp>(loc, i64Type, offsetIndex); 1109 Value base = rewriter.create<SplatOp>(loc, vectorCmpType, offsetIndex); 1110 Value offsetVector = rewriter.create<AddIOp>(loc, base, linearIndices); 1111 1112 // 4. Let dim the memref dimension, compute the vector comparison mask: 1113 // [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ] 1114 Value dim = rewriter.create<DimOp>(loc, xferOp.memref(), lastIndex); 1115 dim = rewriter.create<IndexCastOp>(loc, i64Type, dim); 1116 dim = rewriter.create<SplatOp>(loc, vectorCmpType, dim); 1117 Value mask = 1118 rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, offsetVector, dim); 1119 mask = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(mask.getType()), 1120 mask); 1121 1122 // 5. Rewrite as a masked read / write. 1123 return replaceTransferOpWithMasked(rewriter, typeConverter, loc, xferOp, 1124 operands, vectorDataPtr, mask); 1125 } 1126 }; 1127 1128 class VectorPrintOpConversion : public ConvertToLLVMPattern { 1129 public: 1130 explicit VectorPrintOpConversion(MLIRContext *context, 1131 LLVMTypeConverter &typeConverter) 1132 : ConvertToLLVMPattern(vector::PrintOp::getOperationName(), context, 1133 typeConverter) {} 1134 1135 // Proof-of-concept lowering implementation that relies on a small 1136 // runtime support library, which only needs to provide a few 1137 // printing methods (single value for all data types, opening/closing 1138 // bracket, comma, newline). The lowering fully unrolls a vector 1139 // in terms of these elementary printing operations. The advantage 1140 // of this approach is that the library can remain unaware of all 1141 // low-level implementation details of vectors while still supporting 1142 // output of any shaped and dimensioned vector. Due to full unrolling, 1143 // this approach is less suited for very large vectors though. 1144 // 1145 // TODO: rely solely on libc in future? something else? 1146 // 1147 LogicalResult 1148 matchAndRewrite(Operation *op, ArrayRef<Value> operands, 1149 ConversionPatternRewriter &rewriter) const override { 1150 auto printOp = cast<vector::PrintOp>(op); 1151 auto adaptor = vector::PrintOpAdaptor(operands); 1152 Type printType = printOp.getPrintType(); 1153 1154 if (typeConverter.convertType(printType) == nullptr) 1155 return failure(); 1156 1157 // Make sure element type has runtime support (currently just Float/Double). 1158 VectorType vectorType = printType.dyn_cast<VectorType>(); 1159 Type eltType = vectorType ? vectorType.getElementType() : printType; 1160 int64_t rank = vectorType ? vectorType.getRank() : 0; 1161 Operation *printer; 1162 if (eltType.isSignlessInteger(1) || eltType.isSignlessInteger(32)) 1163 printer = getPrintI32(op); 1164 else if (eltType.isSignlessInteger(64)) 1165 printer = getPrintI64(op); 1166 else if (eltType.isF32()) 1167 printer = getPrintFloat(op); 1168 else if (eltType.isF64()) 1169 printer = getPrintDouble(op); 1170 else 1171 return failure(); 1172 1173 // Unroll vector into elementary print calls. 1174 emitRanks(rewriter, op, adaptor.source(), vectorType, printer, rank); 1175 emitCall(rewriter, op->getLoc(), getPrintNewline(op)); 1176 rewriter.eraseOp(op); 1177 return success(); 1178 } 1179 1180 private: 1181 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, 1182 Value value, VectorType vectorType, Operation *printer, 1183 int64_t rank) const { 1184 Location loc = op->getLoc(); 1185 if (rank == 0) { 1186 if (value.getType() == 1187 LLVM::LLVMType::getInt1Ty(typeConverter.getDialect())) { 1188 // Convert i1 (bool) to i32 so we can use the print_i32 method. 1189 // This avoids the need for a print_i1 method with an unclear ABI. 1190 auto i32Type = LLVM::LLVMType::getInt32Ty(typeConverter.getDialect()); 1191 auto trueVal = rewriter.create<ConstantOp>( 1192 loc, i32Type, rewriter.getI32IntegerAttr(1)); 1193 auto falseVal = rewriter.create<ConstantOp>( 1194 loc, i32Type, rewriter.getI32IntegerAttr(0)); 1195 value = rewriter.create<SelectOp>(loc, value, trueVal, falseVal); 1196 } 1197 emitCall(rewriter, loc, printer, value); 1198 return; 1199 } 1200 1201 emitCall(rewriter, loc, getPrintOpen(op)); 1202 Operation *printComma = getPrintComma(op); 1203 int64_t dim = vectorType.getDimSize(0); 1204 for (int64_t d = 0; d < dim; ++d) { 1205 auto reducedType = 1206 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; 1207 auto llvmType = typeConverter.convertType( 1208 rank > 1 ? reducedType : vectorType.getElementType()); 1209 Value nestedVal = 1210 extractOne(rewriter, typeConverter, loc, value, llvmType, rank, d); 1211 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1); 1212 if (d != dim - 1) 1213 emitCall(rewriter, loc, printComma); 1214 } 1215 emitCall(rewriter, loc, getPrintClose(op)); 1216 } 1217 1218 // Helper to emit a call. 1219 static void emitCall(ConversionPatternRewriter &rewriter, Location loc, 1220 Operation *ref, ValueRange params = ValueRange()) { 1221 rewriter.create<LLVM::CallOp>(loc, ArrayRef<Type>{}, 1222 rewriter.getSymbolRefAttr(ref), params); 1223 } 1224 1225 // Helper for printer method declaration (first hit) and lookup. 1226 static Operation *getPrint(Operation *op, LLVM::LLVMDialect *dialect, 1227 StringRef name, ArrayRef<LLVM::LLVMType> params) { 1228 auto module = op->getParentOfType<ModuleOp>(); 1229 auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name); 1230 if (func) 1231 return func; 1232 OpBuilder moduleBuilder(module.getBodyRegion()); 1233 return moduleBuilder.create<LLVM::LLVMFuncOp>( 1234 op->getLoc(), name, 1235 LLVM::LLVMType::getFunctionTy(LLVM::LLVMType::getVoidTy(dialect), 1236 params, /*isVarArg=*/false)); 1237 } 1238 1239 // Helpers for method names. 1240 Operation *getPrintI32(Operation *op) const { 1241 LLVM::LLVMDialect *dialect = typeConverter.getDialect(); 1242 return getPrint(op, dialect, "print_i32", 1243 LLVM::LLVMType::getInt32Ty(dialect)); 1244 } 1245 Operation *getPrintI64(Operation *op) const { 1246 LLVM::LLVMDialect *dialect = typeConverter.getDialect(); 1247 return getPrint(op, dialect, "print_i64", 1248 LLVM::LLVMType::getInt64Ty(dialect)); 1249 } 1250 Operation *getPrintFloat(Operation *op) const { 1251 LLVM::LLVMDialect *dialect = typeConverter.getDialect(); 1252 return getPrint(op, dialect, "print_f32", 1253 LLVM::LLVMType::getFloatTy(dialect)); 1254 } 1255 Operation *getPrintDouble(Operation *op) const { 1256 LLVM::LLVMDialect *dialect = typeConverter.getDialect(); 1257 return getPrint(op, dialect, "print_f64", 1258 LLVM::LLVMType::getDoubleTy(dialect)); 1259 } 1260 Operation *getPrintOpen(Operation *op) const { 1261 return getPrint(op, typeConverter.getDialect(), "print_open", {}); 1262 } 1263 Operation *getPrintClose(Operation *op) const { 1264 return getPrint(op, typeConverter.getDialect(), "print_close", {}); 1265 } 1266 Operation *getPrintComma(Operation *op) const { 1267 return getPrint(op, typeConverter.getDialect(), "print_comma", {}); 1268 } 1269 Operation *getPrintNewline(Operation *op) const { 1270 return getPrint(op, typeConverter.getDialect(), "print_newline", {}); 1271 } 1272 }; 1273 1274 /// Progressive lowering of ExtractStridedSliceOp to either: 1275 /// 1. extractelement + insertelement for the 1-D case 1276 /// 2. extract + optional strided_slice + insert for the n-D case. 1277 class VectorStridedSliceOpConversion 1278 : public OpRewritePattern<ExtractStridedSliceOp> { 1279 public: 1280 using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern; 1281 1282 LogicalResult matchAndRewrite(ExtractStridedSliceOp op, 1283 PatternRewriter &rewriter) const override { 1284 auto dstType = op.getResult().getType().cast<VectorType>(); 1285 1286 assert(!op.offsets().getValue().empty() && "Unexpected empty offsets"); 1287 1288 int64_t offset = 1289 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 1290 int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt(); 1291 int64_t stride = 1292 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 1293 1294 auto loc = op.getLoc(); 1295 auto elemType = dstType.getElementType(); 1296 assert(elemType.isSignlessIntOrIndexOrFloat()); 1297 Value zero = rewriter.create<ConstantOp>(loc, elemType, 1298 rewriter.getZeroAttr(elemType)); 1299 Value res = rewriter.create<SplatOp>(loc, dstType, zero); 1300 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 1301 off += stride, ++idx) { 1302 Value extracted = extractOne(rewriter, loc, op.vector(), off); 1303 if (op.offsets().getValue().size() > 1) { 1304 extracted = rewriter.create<ExtractStridedSliceOp>( 1305 loc, extracted, getI64SubArray(op.offsets(), /* dropFront=*/1), 1306 getI64SubArray(op.sizes(), /* dropFront=*/1), 1307 getI64SubArray(op.strides(), /* dropFront=*/1)); 1308 } 1309 res = insertOne(rewriter, loc, extracted, res, idx); 1310 } 1311 rewriter.replaceOp(op, {res}); 1312 return success(); 1313 } 1314 /// This pattern creates recursive ExtractStridedSliceOp, but the recursion is 1315 /// bounded as the rank is strictly decreasing. 1316 bool hasBoundedRewriteRecursion() const final { return true; } 1317 }; 1318 1319 } // namespace 1320 1321 /// Populate the given list with patterns that convert from Vector to LLVM. 1322 void mlir::populateVectorToLLVMConversionPatterns( 1323 LLVMTypeConverter &converter, OwningRewritePatternList &patterns, 1324 bool reassociateFPReductions) { 1325 MLIRContext *ctx = converter.getDialect()->getContext(); 1326 // clang-format off 1327 patterns.insert<VectorFMAOpNDRewritePattern, 1328 VectorInsertStridedSliceOpDifferentRankRewritePattern, 1329 VectorInsertStridedSliceOpSameRankRewritePattern, 1330 VectorStridedSliceOpConversion>(ctx); 1331 patterns.insert<VectorReductionOpConversion>( 1332 ctx, converter, reassociateFPReductions); 1333 patterns 1334 .insert<VectorShuffleOpConversion, 1335 VectorExtractElementOpConversion, 1336 VectorExtractOpConversion, 1337 VectorFMAOp1DConversion, 1338 VectorInsertElementOpConversion, 1339 VectorInsertOpConversion, 1340 VectorPrintOpConversion, 1341 VectorTransferConversion<TransferReadOp>, 1342 VectorTransferConversion<TransferWriteOp>, 1343 VectorTypeCastOpConversion, 1344 VectorGatherOpConversion, 1345 VectorScatterOpConversion, 1346 VectorExpandLoadOpConversion, 1347 VectorCompressStoreOpConversion>(ctx, converter); 1348 // clang-format on 1349 } 1350 1351 void mlir::populateVectorToLLVMMatrixConversionPatterns( 1352 LLVMTypeConverter &converter, OwningRewritePatternList &patterns) { 1353 MLIRContext *ctx = converter.getDialect()->getContext(); 1354 patterns.insert<VectorMatmulOpConversion>(ctx, converter); 1355 patterns.insert<VectorFlatTransposeOpConversion>(ctx, converter); 1356 } 1357 1358 namespace { 1359 struct LowerVectorToLLVMPass 1360 : public ConvertVectorToLLVMBase<LowerVectorToLLVMPass> { 1361 LowerVectorToLLVMPass(const LowerVectorToLLVMOptions &options) { 1362 this->reassociateFPReductions = options.reassociateFPReductions; 1363 } 1364 void runOnOperation() override; 1365 }; 1366 } // namespace 1367 1368 void LowerVectorToLLVMPass::runOnOperation() { 1369 // Perform progressive lowering of operations on slices and 1370 // all contraction operations. Also applies folding and DCE. 1371 { 1372 OwningRewritePatternList patterns; 1373 populateVectorToVectorCanonicalizationPatterns(patterns, &getContext()); 1374 populateVectorSlicesLoweringPatterns(patterns, &getContext()); 1375 populateVectorContractLoweringPatterns(patterns, &getContext()); 1376 applyPatternsAndFoldGreedily(getOperation(), patterns); 1377 } 1378 1379 // Convert to the LLVM IR dialect. 1380 LLVMTypeConverter converter(&getContext()); 1381 OwningRewritePatternList patterns; 1382 populateVectorToLLVMMatrixConversionPatterns(converter, patterns); 1383 populateVectorToLLVMConversionPatterns(converter, patterns, 1384 reassociateFPReductions); 1385 populateVectorToLLVMMatrixConversionPatterns(converter, patterns); 1386 populateStdToLLVMConversionPatterns(converter, patterns); 1387 1388 LLVMConversionTarget target(getContext()); 1389 if (failed(applyPartialConversion(getOperation(), target, patterns))) { 1390 signalPassFailure(); 1391 } 1392 } 1393 1394 std::unique_ptr<OperationPass<ModuleOp>> 1395 mlir::createConvertVectorToLLVMPass(const LowerVectorToLLVMOptions &options) { 1396 return std::make_unique<LowerVectorToLLVMPass>(options); 1397 } 1398