1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 10 11 #include "mlir/Conversion/LLVMCommon/VectorPattern.h" 12 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 13 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h" 14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 15 #include "mlir/Dialect/MemRef/IR/MemRef.h" 16 #include "mlir/Dialect/StandardOps/IR/Ops.h" 17 #include "mlir/Dialect/Vector/VectorTransforms.h" 18 #include "mlir/IR/BuiltinTypes.h" 19 #include "mlir/Support/MathExtras.h" 20 #include "mlir/Target/LLVMIR/TypeToLLVM.h" 21 #include "mlir/Transforms/DialectConversion.h" 22 23 using namespace mlir; 24 using namespace mlir::vector; 25 26 // Helper to reduce vector type by one rank at front. 27 static VectorType reducedVectorTypeFront(VectorType tp) { 28 assert((tp.getRank() > 1) && "unlowerable vector type"); 29 return VectorType::get(tp.getShape().drop_front(), tp.getElementType()); 30 } 31 32 // Helper to reduce vector type by *all* but one rank at back. 33 static VectorType reducedVectorTypeBack(VectorType tp) { 34 assert((tp.getRank() > 1) && "unlowerable vector type"); 35 return VectorType::get(tp.getShape().take_back(), tp.getElementType()); 36 } 37 38 // Helper that picks the proper sequence for inserting. 39 static Value insertOne(ConversionPatternRewriter &rewriter, 40 LLVMTypeConverter &typeConverter, Location loc, 41 Value val1, Value val2, Type llvmType, int64_t rank, 42 int64_t pos) { 43 if (rank == 1) { 44 auto idxType = rewriter.getIndexType(); 45 auto constant = rewriter.create<LLVM::ConstantOp>( 46 loc, typeConverter.convertType(idxType), 47 rewriter.getIntegerAttr(idxType, pos)); 48 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2, 49 constant); 50 } 51 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2, 52 rewriter.getI64ArrayAttr(pos)); 53 } 54 55 // Helper that picks the proper sequence for extracting. 56 static Value extractOne(ConversionPatternRewriter &rewriter, 57 LLVMTypeConverter &typeConverter, Location loc, 58 Value val, Type llvmType, int64_t rank, int64_t pos) { 59 if (rank == 1) { 60 auto idxType = rewriter.getIndexType(); 61 auto constant = rewriter.create<LLVM::ConstantOp>( 62 loc, typeConverter.convertType(idxType), 63 rewriter.getIntegerAttr(idxType, pos)); 64 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val, 65 constant); 66 } 67 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val, 68 rewriter.getI64ArrayAttr(pos)); 69 } 70 71 // Helper that returns data layout alignment of a memref. 72 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, 73 MemRefType memrefType, unsigned &align) { 74 Type elementTy = typeConverter.convertType(memrefType.getElementType()); 75 if (!elementTy) 76 return failure(); 77 78 // TODO: this should use the MLIR data layout when it becomes available and 79 // stop depending on translation. 80 llvm::LLVMContext llvmContext; 81 align = LLVM::TypeToLLVMIRTranslator(llvmContext) 82 .getPreferredAlignment(elementTy, typeConverter.getDataLayout()); 83 return success(); 84 } 85 86 // Return the minimal alignment value that satisfies all the AssumeAlignment 87 // uses of `value`. If no such uses exist, return 1. 88 static unsigned getAssumedAlignment(Value value) { 89 unsigned align = 1; 90 for (auto &u : value.getUses()) { 91 Operation *owner = u.getOwner(); 92 if (auto op = dyn_cast<memref::AssumeAlignmentOp>(owner)) 93 align = mlir::lcm(align, op.alignment()); 94 } 95 return align; 96 } 97 98 // Helper that returns data layout alignment of a memref associated with a 99 // load, store, scatter, or gather op, including additional information from 100 // assume_alignment calls on the source of the transfer 101 template <class OpAdaptor> 102 LogicalResult getMemRefOpAlignment(LLVMTypeConverter &typeConverter, 103 OpAdaptor op, unsigned &align) { 104 if (failed(getMemRefAlignment(typeConverter, op.getMemRefType(), align))) 105 return failure(); 106 align = std::max(align, getAssumedAlignment(op.base())); 107 return success(); 108 } 109 110 // Add an index vector component to a base pointer. This almost always succeeds 111 // unless the last stride is non-unit or the memory space is not zero. 112 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter, 113 Location loc, Value memref, Value base, 114 Value index, MemRefType memRefType, 115 VectorType vType, Value &ptrs) { 116 int64_t offset; 117 SmallVector<int64_t, 4> strides; 118 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 119 if (failed(successStrides) || strides.back() != 1 || 120 memRefType.getMemorySpaceAsInt() != 0) 121 return failure(); 122 auto pType = MemRefDescriptor(memref).getElementPtrType(); 123 auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0)); 124 ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, index); 125 return success(); 126 } 127 128 // Casts a strided element pointer to a vector pointer. The vector pointer 129 // will be in the same address space as the incoming memref type. 130 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc, 131 Value ptr, MemRefType memRefType, Type vt) { 132 auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpaceAsInt()); 133 return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr); 134 } 135 136 namespace { 137 138 /// Conversion pattern for a vector.bitcast. 139 class VectorBitCastOpConversion 140 : public ConvertOpToLLVMPattern<vector::BitCastOp> { 141 public: 142 using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern; 143 144 LogicalResult 145 matchAndRewrite(vector::BitCastOp bitCastOp, OpAdaptor adaptor, 146 ConversionPatternRewriter &rewriter) const override { 147 // Only 1-D vectors can be lowered to LLVM. 148 VectorType resultTy = bitCastOp.getType(); 149 if (resultTy.getRank() != 1) 150 return failure(); 151 Type newResultTy = typeConverter->convertType(resultTy); 152 rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy, 153 adaptor.getOperands()[0]); 154 return success(); 155 } 156 }; 157 158 /// Conversion pattern for a vector.matrix_multiply. 159 /// This is lowered directly to the proper llvm.intr.matrix.multiply. 160 class VectorMatmulOpConversion 161 : public ConvertOpToLLVMPattern<vector::MatmulOp> { 162 public: 163 using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern; 164 165 LogicalResult 166 matchAndRewrite(vector::MatmulOp matmulOp, OpAdaptor adaptor, 167 ConversionPatternRewriter &rewriter) const override { 168 rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>( 169 matmulOp, typeConverter->convertType(matmulOp.res().getType()), 170 adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(), 171 matmulOp.lhs_columns(), matmulOp.rhs_columns()); 172 return success(); 173 } 174 }; 175 176 /// Conversion pattern for a vector.flat_transpose. 177 /// This is lowered directly to the proper llvm.intr.matrix.transpose. 178 class VectorFlatTransposeOpConversion 179 : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> { 180 public: 181 using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern; 182 183 LogicalResult 184 matchAndRewrite(vector::FlatTransposeOp transOp, OpAdaptor adaptor, 185 ConversionPatternRewriter &rewriter) const override { 186 rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>( 187 transOp, typeConverter->convertType(transOp.res().getType()), 188 adaptor.matrix(), transOp.rows(), transOp.columns()); 189 return success(); 190 } 191 }; 192 193 /// Overloaded utility that replaces a vector.load, vector.store, 194 /// vector.maskedload and vector.maskedstore with their respective LLVM 195 /// couterparts. 196 static void replaceLoadOrStoreOp(vector::LoadOp loadOp, 197 vector::LoadOpAdaptor adaptor, 198 VectorType vectorTy, Value ptr, unsigned align, 199 ConversionPatternRewriter &rewriter) { 200 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align); 201 } 202 203 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp, 204 vector::MaskedLoadOpAdaptor adaptor, 205 VectorType vectorTy, Value ptr, unsigned align, 206 ConversionPatternRewriter &rewriter) { 207 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 208 loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align); 209 } 210 211 static void replaceLoadOrStoreOp(vector::StoreOp storeOp, 212 vector::StoreOpAdaptor adaptor, 213 VectorType vectorTy, Value ptr, unsigned align, 214 ConversionPatternRewriter &rewriter) { 215 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(), 216 ptr, align); 217 } 218 219 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp, 220 vector::MaskedStoreOpAdaptor adaptor, 221 VectorType vectorTy, Value ptr, unsigned align, 222 ConversionPatternRewriter &rewriter) { 223 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 224 storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align); 225 } 226 227 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and 228 /// vector.maskedstore. 229 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor> 230 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> { 231 public: 232 using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern; 233 234 LogicalResult 235 matchAndRewrite(LoadOrStoreOp loadOrStoreOp, 236 typename LoadOrStoreOp::Adaptor adaptor, 237 ConversionPatternRewriter &rewriter) const override { 238 // Only 1-D vectors can be lowered to LLVM. 239 VectorType vectorTy = loadOrStoreOp.getVectorType(); 240 if (vectorTy.getRank() > 1) 241 return failure(); 242 243 auto loc = loadOrStoreOp->getLoc(); 244 MemRefType memRefTy = loadOrStoreOp.getMemRefType(); 245 246 // Resolve alignment. 247 unsigned align; 248 if (failed(getMemRefOpAlignment(*this->getTypeConverter(), loadOrStoreOp, 249 align))) 250 return failure(); 251 252 // Resolve address. 253 auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType()) 254 .template cast<VectorType>(); 255 Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(), 256 adaptor.indices(), rewriter); 257 Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype); 258 259 replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter); 260 return success(); 261 } 262 }; 263 264 /// Conversion pattern for a vector.gather. 265 class VectorGatherOpConversion 266 : public ConvertOpToLLVMPattern<vector::GatherOp> { 267 public: 268 using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern; 269 270 LogicalResult 271 matchAndRewrite(vector::GatherOp gather, OpAdaptor adaptor, 272 ConversionPatternRewriter &rewriter) const override { 273 auto loc = gather->getLoc(); 274 MemRefType memRefType = gather.getMemRefType(); 275 276 // Resolve alignment. 277 unsigned align; 278 if (failed(getMemRefOpAlignment(*getTypeConverter(), gather, align))) 279 return failure(); 280 281 // Resolve address. 282 Value ptrs; 283 VectorType vType = gather.getVectorType(); 284 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 285 adaptor.indices(), rewriter); 286 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 287 adaptor.index_vec(), memRefType, vType, ptrs))) 288 return failure(); 289 290 // Replace with the gather intrinsic. 291 rewriter.replaceOpWithNewOp<LLVM::masked_gather>( 292 gather, typeConverter->convertType(vType), ptrs, adaptor.mask(), 293 adaptor.pass_thru(), rewriter.getI32IntegerAttr(align)); 294 return success(); 295 } 296 }; 297 298 /// Conversion pattern for a vector.scatter. 299 class VectorScatterOpConversion 300 : public ConvertOpToLLVMPattern<vector::ScatterOp> { 301 public: 302 using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern; 303 304 LogicalResult 305 matchAndRewrite(vector::ScatterOp scatter, OpAdaptor adaptor, 306 ConversionPatternRewriter &rewriter) const override { 307 auto loc = scatter->getLoc(); 308 MemRefType memRefType = scatter.getMemRefType(); 309 310 // Resolve alignment. 311 unsigned align; 312 if (failed(getMemRefOpAlignment(*getTypeConverter(), scatter, align))) 313 return failure(); 314 315 // Resolve address. 316 Value ptrs; 317 VectorType vType = scatter.getVectorType(); 318 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 319 adaptor.indices(), rewriter); 320 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 321 adaptor.index_vec(), memRefType, vType, ptrs))) 322 return failure(); 323 324 // Replace with the scatter intrinsic. 325 rewriter.replaceOpWithNewOp<LLVM::masked_scatter>( 326 scatter, adaptor.valueToStore(), ptrs, adaptor.mask(), 327 rewriter.getI32IntegerAttr(align)); 328 return success(); 329 } 330 }; 331 332 /// Conversion pattern for a vector.expandload. 333 class VectorExpandLoadOpConversion 334 : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> { 335 public: 336 using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern; 337 338 LogicalResult 339 matchAndRewrite(vector::ExpandLoadOp expand, OpAdaptor adaptor, 340 ConversionPatternRewriter &rewriter) const override { 341 auto loc = expand->getLoc(); 342 MemRefType memRefType = expand.getMemRefType(); 343 344 // Resolve address. 345 auto vtype = typeConverter->convertType(expand.getVectorType()); 346 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 347 adaptor.indices(), rewriter); 348 349 rewriter.replaceOpWithNewOp<LLVM::masked_expandload>( 350 expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru()); 351 return success(); 352 } 353 }; 354 355 /// Conversion pattern for a vector.compressstore. 356 class VectorCompressStoreOpConversion 357 : public ConvertOpToLLVMPattern<vector::CompressStoreOp> { 358 public: 359 using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern; 360 361 LogicalResult 362 matchAndRewrite(vector::CompressStoreOp compress, OpAdaptor adaptor, 363 ConversionPatternRewriter &rewriter) const override { 364 auto loc = compress->getLoc(); 365 MemRefType memRefType = compress.getMemRefType(); 366 367 // Resolve address. 368 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 369 adaptor.indices(), rewriter); 370 371 rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>( 372 compress, adaptor.valueToStore(), ptr, adaptor.mask()); 373 return success(); 374 } 375 }; 376 377 /// Conversion pattern for all vector reductions. 378 class VectorReductionOpConversion 379 : public ConvertOpToLLVMPattern<vector::ReductionOp> { 380 public: 381 explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv, 382 bool reassociateFPRed) 383 : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv), 384 reassociateFPReductions(reassociateFPRed) {} 385 386 LogicalResult 387 matchAndRewrite(vector::ReductionOp reductionOp, OpAdaptor adaptor, 388 ConversionPatternRewriter &rewriter) const override { 389 auto kind = reductionOp.kind(); 390 Type eltType = reductionOp.dest().getType(); 391 Type llvmType = typeConverter->convertType(eltType); 392 Value operand = adaptor.getOperands()[0]; 393 if (eltType.isIntOrIndex()) { 394 // Integer reductions: add/mul/min/max/and/or/xor. 395 if (kind == "add") 396 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(reductionOp, 397 llvmType, operand); 398 else if (kind == "mul") 399 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(reductionOp, 400 llvmType, operand); 401 else if (kind == "minui") 402 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>( 403 reductionOp, llvmType, operand); 404 else if (kind == "minsi") 405 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>( 406 reductionOp, llvmType, operand); 407 else if (kind == "maxui") 408 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>( 409 reductionOp, llvmType, operand); 410 else if (kind == "maxsi") 411 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>( 412 reductionOp, llvmType, operand); 413 else if (kind == "and") 414 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(reductionOp, 415 llvmType, operand); 416 else if (kind == "or") 417 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(reductionOp, 418 llvmType, operand); 419 else if (kind == "xor") 420 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(reductionOp, 421 llvmType, operand); 422 else 423 return failure(); 424 return success(); 425 } 426 427 if (!eltType.isa<FloatType>()) 428 return failure(); 429 430 // Floating-point reductions: add/mul/min/max 431 if (kind == "add") { 432 // Optional accumulator (or zero). 433 Value acc = adaptor.getOperands().size() > 1 434 ? adaptor.getOperands()[1] 435 : rewriter.create<LLVM::ConstantOp>( 436 reductionOp->getLoc(), llvmType, 437 rewriter.getZeroAttr(eltType)); 438 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>( 439 reductionOp, llvmType, acc, operand, 440 rewriter.getBoolAttr(reassociateFPReductions)); 441 } else if (kind == "mul") { 442 // Optional accumulator (or one). 443 Value acc = adaptor.getOperands().size() > 1 444 ? adaptor.getOperands()[1] 445 : rewriter.create<LLVM::ConstantOp>( 446 reductionOp->getLoc(), llvmType, 447 rewriter.getFloatAttr(eltType, 1.0)); 448 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>( 449 reductionOp, llvmType, acc, operand, 450 rewriter.getBoolAttr(reassociateFPReductions)); 451 } else if (kind == "minf") 452 // FIXME: MLIR's 'minf' and LLVM's 'vector_reduce_fmin' do not handle 453 // NaNs/-0.0/+0.0 in the same way. 454 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(reductionOp, 455 llvmType, operand); 456 else if (kind == "maxf") 457 // FIXME: MLIR's 'maxf' and LLVM's 'vector_reduce_fmax' do not handle 458 // NaNs/-0.0/+0.0 in the same way. 459 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(reductionOp, 460 llvmType, operand); 461 else 462 return failure(); 463 return success(); 464 } 465 466 private: 467 const bool reassociateFPReductions; 468 }; 469 470 class VectorShuffleOpConversion 471 : public ConvertOpToLLVMPattern<vector::ShuffleOp> { 472 public: 473 using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern; 474 475 LogicalResult 476 matchAndRewrite(vector::ShuffleOp shuffleOp, OpAdaptor adaptor, 477 ConversionPatternRewriter &rewriter) const override { 478 auto loc = shuffleOp->getLoc(); 479 auto v1Type = shuffleOp.getV1VectorType(); 480 auto v2Type = shuffleOp.getV2VectorType(); 481 auto vectorType = shuffleOp.getVectorType(); 482 Type llvmType = typeConverter->convertType(vectorType); 483 auto maskArrayAttr = shuffleOp.mask(); 484 485 // Bail if result type cannot be lowered. 486 if (!llvmType) 487 return failure(); 488 489 // Get rank and dimension sizes. 490 int64_t rank = vectorType.getRank(); 491 assert(v1Type.getRank() == rank); 492 assert(v2Type.getRank() == rank); 493 int64_t v1Dim = v1Type.getDimSize(0); 494 495 // For rank 1, where both operands have *exactly* the same vector type, 496 // there is direct shuffle support in LLVM. Use it! 497 if (rank == 1 && v1Type == v2Type) { 498 Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>( 499 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); 500 rewriter.replaceOp(shuffleOp, llvmShuffleOp); 501 return success(); 502 } 503 504 // For all other cases, insert the individual values individually. 505 Type eltType; 506 if (auto arrayType = llvmType.dyn_cast<LLVM::LLVMArrayType>()) 507 eltType = arrayType.getElementType(); 508 else 509 eltType = llvmType.cast<VectorType>().getElementType(); 510 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType); 511 int64_t insPos = 0; 512 for (auto en : llvm::enumerate(maskArrayAttr)) { 513 int64_t extPos = en.value().cast<IntegerAttr>().getInt(); 514 Value value = adaptor.v1(); 515 if (extPos >= v1Dim) { 516 extPos -= v1Dim; 517 value = adaptor.v2(); 518 } 519 Value extract = extractOne(rewriter, *getTypeConverter(), loc, value, 520 eltType, rank, extPos); 521 insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract, 522 llvmType, rank, insPos++); 523 } 524 rewriter.replaceOp(shuffleOp, insert); 525 return success(); 526 } 527 }; 528 529 class VectorExtractElementOpConversion 530 : public ConvertOpToLLVMPattern<vector::ExtractElementOp> { 531 public: 532 using ConvertOpToLLVMPattern< 533 vector::ExtractElementOp>::ConvertOpToLLVMPattern; 534 535 LogicalResult 536 matchAndRewrite(vector::ExtractElementOp extractEltOp, OpAdaptor adaptor, 537 ConversionPatternRewriter &rewriter) const override { 538 auto vectorType = extractEltOp.getVectorType(); 539 auto llvmType = typeConverter->convertType(vectorType.getElementType()); 540 541 // Bail if result type cannot be lowered. 542 if (!llvmType) 543 return failure(); 544 545 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>( 546 extractEltOp, llvmType, adaptor.vector(), adaptor.position()); 547 return success(); 548 } 549 }; 550 551 class VectorExtractOpConversion 552 : public ConvertOpToLLVMPattern<vector::ExtractOp> { 553 public: 554 using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern; 555 556 LogicalResult 557 matchAndRewrite(vector::ExtractOp extractOp, OpAdaptor adaptor, 558 ConversionPatternRewriter &rewriter) const override { 559 auto loc = extractOp->getLoc(); 560 auto vectorType = extractOp.getVectorType(); 561 auto resultType = extractOp.getResult().getType(); 562 auto llvmResultType = typeConverter->convertType(resultType); 563 auto positionArrayAttr = extractOp.position(); 564 565 // Bail if result type cannot be lowered. 566 if (!llvmResultType) 567 return failure(); 568 569 // Extract entire vector. Should be handled by folder, but just to be safe. 570 if (positionArrayAttr.empty()) { 571 rewriter.replaceOp(extractOp, adaptor.vector()); 572 return success(); 573 } 574 575 // One-shot extraction of vector from array (only requires extractvalue). 576 if (resultType.isa<VectorType>()) { 577 Value extracted = rewriter.create<LLVM::ExtractValueOp>( 578 loc, llvmResultType, adaptor.vector(), positionArrayAttr); 579 rewriter.replaceOp(extractOp, extracted); 580 return success(); 581 } 582 583 // Potential extraction of 1-D vector from array. 584 auto *context = extractOp->getContext(); 585 Value extracted = adaptor.vector(); 586 auto positionAttrs = positionArrayAttr.getValue(); 587 if (positionAttrs.size() > 1) { 588 auto oneDVectorType = reducedVectorTypeBack(vectorType); 589 auto nMinusOnePositionAttrs = 590 ArrayAttr::get(context, positionAttrs.drop_back()); 591 extracted = rewriter.create<LLVM::ExtractValueOp>( 592 loc, typeConverter->convertType(oneDVectorType), extracted, 593 nMinusOnePositionAttrs); 594 } 595 596 // Remaining extraction of element from 1-D LLVM vector 597 auto position = positionAttrs.back().cast<IntegerAttr>(); 598 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 599 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 600 extracted = 601 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant); 602 rewriter.replaceOp(extractOp, extracted); 603 604 return success(); 605 } 606 }; 607 608 /// Conversion pattern that turns a vector.fma on a 1-D vector 609 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion. 610 /// This does not match vectors of n >= 2 rank. 611 /// 612 /// Example: 613 /// ``` 614 /// vector.fma %a, %a, %a : vector<8xf32> 615 /// ``` 616 /// is converted to: 617 /// ``` 618 /// llvm.intr.fmuladd %va, %va, %va: 619 /// (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">) 620 /// -> !llvm."<8 x f32>"> 621 /// ``` 622 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> { 623 public: 624 using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern; 625 626 LogicalResult 627 matchAndRewrite(vector::FMAOp fmaOp, OpAdaptor adaptor, 628 ConversionPatternRewriter &rewriter) const override { 629 VectorType vType = fmaOp.getVectorType(); 630 if (vType.getRank() != 1) 631 return failure(); 632 rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(), 633 adaptor.rhs(), adaptor.acc()); 634 return success(); 635 } 636 }; 637 638 class VectorInsertElementOpConversion 639 : public ConvertOpToLLVMPattern<vector::InsertElementOp> { 640 public: 641 using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern; 642 643 LogicalResult 644 matchAndRewrite(vector::InsertElementOp insertEltOp, OpAdaptor adaptor, 645 ConversionPatternRewriter &rewriter) const override { 646 auto vectorType = insertEltOp.getDestVectorType(); 647 auto llvmType = typeConverter->convertType(vectorType); 648 649 // Bail if result type cannot be lowered. 650 if (!llvmType) 651 return failure(); 652 653 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>( 654 insertEltOp, llvmType, adaptor.dest(), adaptor.source(), 655 adaptor.position()); 656 return success(); 657 } 658 }; 659 660 class VectorInsertOpConversion 661 : public ConvertOpToLLVMPattern<vector::InsertOp> { 662 public: 663 using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern; 664 665 LogicalResult 666 matchAndRewrite(vector::InsertOp insertOp, OpAdaptor adaptor, 667 ConversionPatternRewriter &rewriter) const override { 668 auto loc = insertOp->getLoc(); 669 auto sourceType = insertOp.getSourceType(); 670 auto destVectorType = insertOp.getDestVectorType(); 671 auto llvmResultType = typeConverter->convertType(destVectorType); 672 auto positionArrayAttr = insertOp.position(); 673 674 // Bail if result type cannot be lowered. 675 if (!llvmResultType) 676 return failure(); 677 678 // Overwrite entire vector with value. Should be handled by folder, but 679 // just to be safe. 680 if (positionArrayAttr.empty()) { 681 rewriter.replaceOp(insertOp, adaptor.source()); 682 return success(); 683 } 684 685 // One-shot insertion of a vector into an array (only requires insertvalue). 686 if (sourceType.isa<VectorType>()) { 687 Value inserted = rewriter.create<LLVM::InsertValueOp>( 688 loc, llvmResultType, adaptor.dest(), adaptor.source(), 689 positionArrayAttr); 690 rewriter.replaceOp(insertOp, inserted); 691 return success(); 692 } 693 694 // Potential extraction of 1-D vector from array. 695 auto *context = insertOp->getContext(); 696 Value extracted = adaptor.dest(); 697 auto positionAttrs = positionArrayAttr.getValue(); 698 auto position = positionAttrs.back().cast<IntegerAttr>(); 699 auto oneDVectorType = destVectorType; 700 if (positionAttrs.size() > 1) { 701 oneDVectorType = reducedVectorTypeBack(destVectorType); 702 auto nMinusOnePositionAttrs = 703 ArrayAttr::get(context, positionAttrs.drop_back()); 704 extracted = rewriter.create<LLVM::ExtractValueOp>( 705 loc, typeConverter->convertType(oneDVectorType), extracted, 706 nMinusOnePositionAttrs); 707 } 708 709 // Insertion of an element into a 1-D LLVM vector. 710 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 711 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 712 Value inserted = rewriter.create<LLVM::InsertElementOp>( 713 loc, typeConverter->convertType(oneDVectorType), extracted, 714 adaptor.source(), constant); 715 716 // Potential insertion of resulting 1-D vector into array. 717 if (positionAttrs.size() > 1) { 718 auto nMinusOnePositionAttrs = 719 ArrayAttr::get(context, positionAttrs.drop_back()); 720 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType, 721 adaptor.dest(), inserted, 722 nMinusOnePositionAttrs); 723 } 724 725 rewriter.replaceOp(insertOp, inserted); 726 return success(); 727 } 728 }; 729 730 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1. 731 /// 732 /// Example: 733 /// ``` 734 /// %d = vector.fma %a, %b, %c : vector<2x4xf32> 735 /// ``` 736 /// is rewritten into: 737 /// ``` 738 /// %r = splat %f0: vector<2x4xf32> 739 /// %va = vector.extractvalue %a[0] : vector<2x4xf32> 740 /// %vb = vector.extractvalue %b[0] : vector<2x4xf32> 741 /// %vc = vector.extractvalue %c[0] : vector<2x4xf32> 742 /// %vd = vector.fma %va, %vb, %vc : vector<4xf32> 743 /// %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32> 744 /// %va2 = vector.extractvalue %a2[1] : vector<2x4xf32> 745 /// %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32> 746 /// %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32> 747 /// %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32> 748 /// %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32> 749 /// // %r3 holds the final value. 750 /// ``` 751 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> { 752 public: 753 using OpRewritePattern<FMAOp>::OpRewritePattern; 754 755 void initialize() { 756 // This pattern recursively unpacks one dimension at a time. The recursion 757 // bounded as the rank is strictly decreasing. 758 setHasBoundedRewriteRecursion(); 759 } 760 761 LogicalResult matchAndRewrite(FMAOp op, 762 PatternRewriter &rewriter) const override { 763 auto vType = op.getVectorType(); 764 if (vType.getRank() < 2) 765 return failure(); 766 767 auto loc = op.getLoc(); 768 auto elemType = vType.getElementType(); 769 Value zero = rewriter.create<arith::ConstantOp>( 770 loc, elemType, rewriter.getZeroAttr(elemType)); 771 Value desc = rewriter.create<SplatOp>(loc, vType, zero); 772 for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) { 773 Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i); 774 Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i); 775 Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i); 776 Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC); 777 desc = rewriter.create<InsertOp>(loc, fma, desc, i); 778 } 779 rewriter.replaceOp(op, desc); 780 return success(); 781 } 782 }; 783 784 /// Returns the strides if the memory underlying `memRefType` has a contiguous 785 /// static layout. 786 static llvm::Optional<SmallVector<int64_t, 4>> 787 computeContiguousStrides(MemRefType memRefType) { 788 int64_t offset; 789 SmallVector<int64_t, 4> strides; 790 if (failed(getStridesAndOffset(memRefType, strides, offset))) 791 return None; 792 if (!strides.empty() && strides.back() != 1) 793 return None; 794 // If no layout or identity layout, this is contiguous by definition. 795 if (memRefType.getLayout().isIdentity()) 796 return strides; 797 798 // Otherwise, we must determine contiguity form shapes. This can only ever 799 // work in static cases because MemRefType is underspecified to represent 800 // contiguous dynamic shapes in other ways than with just empty/identity 801 // layout. 802 auto sizes = memRefType.getShape(); 803 for (int index = 0, e = strides.size() - 1; index < e; ++index) { 804 if (ShapedType::isDynamic(sizes[index + 1]) || 805 ShapedType::isDynamicStrideOrOffset(strides[index]) || 806 ShapedType::isDynamicStrideOrOffset(strides[index + 1])) 807 return None; 808 if (strides[index] != strides[index + 1] * sizes[index + 1]) 809 return None; 810 } 811 return strides; 812 } 813 814 class VectorTypeCastOpConversion 815 : public ConvertOpToLLVMPattern<vector::TypeCastOp> { 816 public: 817 using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern; 818 819 LogicalResult 820 matchAndRewrite(vector::TypeCastOp castOp, OpAdaptor adaptor, 821 ConversionPatternRewriter &rewriter) const override { 822 auto loc = castOp->getLoc(); 823 MemRefType sourceMemRefType = 824 castOp.getOperand().getType().cast<MemRefType>(); 825 MemRefType targetMemRefType = castOp.getType(); 826 827 // Only static shape casts supported atm. 828 if (!sourceMemRefType.hasStaticShape() || 829 !targetMemRefType.hasStaticShape()) 830 return failure(); 831 832 auto llvmSourceDescriptorTy = 833 adaptor.getOperands()[0].getType().dyn_cast<LLVM::LLVMStructType>(); 834 if (!llvmSourceDescriptorTy) 835 return failure(); 836 MemRefDescriptor sourceMemRef(adaptor.getOperands()[0]); 837 838 auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType) 839 .dyn_cast_or_null<LLVM::LLVMStructType>(); 840 if (!llvmTargetDescriptorTy) 841 return failure(); 842 843 // Only contiguous source buffers supported atm. 844 auto sourceStrides = computeContiguousStrides(sourceMemRefType); 845 if (!sourceStrides) 846 return failure(); 847 auto targetStrides = computeContiguousStrides(targetMemRefType); 848 if (!targetStrides) 849 return failure(); 850 // Only support static strides for now, regardless of contiguity. 851 if (llvm::any_of(*targetStrides, [](int64_t stride) { 852 return ShapedType::isDynamicStrideOrOffset(stride); 853 })) 854 return failure(); 855 856 auto int64Ty = IntegerType::get(rewriter.getContext(), 64); 857 858 // Create descriptor. 859 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 860 Type llvmTargetElementTy = desc.getElementPtrType(); 861 // Set allocated ptr. 862 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc); 863 allocated = 864 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated); 865 desc.setAllocatedPtr(rewriter, loc, allocated); 866 // Set aligned ptr. 867 Value ptr = sourceMemRef.alignedPtr(rewriter, loc); 868 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr); 869 desc.setAlignedPtr(rewriter, loc, ptr); 870 // Fill offset 0. 871 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); 872 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr); 873 desc.setOffset(rewriter, loc, zero); 874 875 // Fill size and stride descriptors in memref. 876 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { 877 int64_t index = indexedSize.index(); 878 auto sizeAttr = 879 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); 880 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr); 881 desc.setSize(rewriter, loc, index, size); 882 auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), 883 (*targetStrides)[index]); 884 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr); 885 desc.setStride(rewriter, loc, index, stride); 886 } 887 888 rewriter.replaceOp(castOp, {desc}); 889 return success(); 890 } 891 }; 892 893 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> { 894 public: 895 using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern; 896 897 // Proof-of-concept lowering implementation that relies on a small 898 // runtime support library, which only needs to provide a few 899 // printing methods (single value for all data types, opening/closing 900 // bracket, comma, newline). The lowering fully unrolls a vector 901 // in terms of these elementary printing operations. The advantage 902 // of this approach is that the library can remain unaware of all 903 // low-level implementation details of vectors while still supporting 904 // output of any shaped and dimensioned vector. Due to full unrolling, 905 // this approach is less suited for very large vectors though. 906 // 907 // TODO: rely solely on libc in future? something else? 908 // 909 LogicalResult 910 matchAndRewrite(vector::PrintOp printOp, OpAdaptor adaptor, 911 ConversionPatternRewriter &rewriter) const override { 912 Type printType = printOp.getPrintType(); 913 914 if (typeConverter->convertType(printType) == nullptr) 915 return failure(); 916 917 // Make sure element type has runtime support. 918 PrintConversion conversion = PrintConversion::None; 919 VectorType vectorType = printType.dyn_cast<VectorType>(); 920 Type eltType = vectorType ? vectorType.getElementType() : printType; 921 Operation *printer; 922 if (eltType.isF32()) { 923 printer = 924 LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>()); 925 } else if (eltType.isF64()) { 926 printer = 927 LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>()); 928 } else if (eltType.isIndex()) { 929 printer = 930 LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>()); 931 } else if (auto intTy = eltType.dyn_cast<IntegerType>()) { 932 // Integers need a zero or sign extension on the operand 933 // (depending on the source type) as well as a signed or 934 // unsigned print method. Up to 64-bit is supported. 935 unsigned width = intTy.getWidth(); 936 if (intTy.isUnsigned()) { 937 if (width <= 64) { 938 if (width < 64) 939 conversion = PrintConversion::ZeroExt64; 940 printer = LLVM::lookupOrCreatePrintU64Fn( 941 printOp->getParentOfType<ModuleOp>()); 942 } else { 943 return failure(); 944 } 945 } else { 946 assert(intTy.isSignless() || intTy.isSigned()); 947 if (width <= 64) { 948 // Note that we *always* zero extend booleans (1-bit integers), 949 // so that true/false is printed as 1/0 rather than -1/0. 950 if (width == 1) 951 conversion = PrintConversion::ZeroExt64; 952 else if (width < 64) 953 conversion = PrintConversion::SignExt64; 954 printer = LLVM::lookupOrCreatePrintI64Fn( 955 printOp->getParentOfType<ModuleOp>()); 956 } else { 957 return failure(); 958 } 959 } 960 } else { 961 return failure(); 962 } 963 964 // Unroll vector into elementary print calls. 965 int64_t rank = vectorType ? vectorType.getRank() : 0; 966 emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank, 967 conversion); 968 emitCall(rewriter, printOp->getLoc(), 969 LLVM::lookupOrCreatePrintNewlineFn( 970 printOp->getParentOfType<ModuleOp>())); 971 rewriter.eraseOp(printOp); 972 return success(); 973 } 974 975 private: 976 enum class PrintConversion { 977 // clang-format off 978 None, 979 ZeroExt64, 980 SignExt64 981 // clang-format on 982 }; 983 984 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, 985 Value value, VectorType vectorType, Operation *printer, 986 int64_t rank, PrintConversion conversion) const { 987 Location loc = op->getLoc(); 988 if (rank == 0) { 989 switch (conversion) { 990 case PrintConversion::ZeroExt64: 991 value = rewriter.create<arith::ExtUIOp>( 992 loc, value, IntegerType::get(rewriter.getContext(), 64)); 993 break; 994 case PrintConversion::SignExt64: 995 value = rewriter.create<arith::ExtSIOp>( 996 loc, value, IntegerType::get(rewriter.getContext(), 64)); 997 break; 998 case PrintConversion::None: 999 break; 1000 } 1001 emitCall(rewriter, loc, printer, value); 1002 return; 1003 } 1004 1005 emitCall(rewriter, loc, 1006 LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>())); 1007 Operation *printComma = 1008 LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>()); 1009 int64_t dim = vectorType.getDimSize(0); 1010 for (int64_t d = 0; d < dim; ++d) { 1011 auto reducedType = 1012 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; 1013 auto llvmType = typeConverter->convertType( 1014 rank > 1 ? reducedType : vectorType.getElementType()); 1015 Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value, 1016 llvmType, rank, d); 1017 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1, 1018 conversion); 1019 if (d != dim - 1) 1020 emitCall(rewriter, loc, printComma); 1021 } 1022 emitCall(rewriter, loc, 1023 LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>())); 1024 } 1025 1026 // Helper to emit a call. 1027 static void emitCall(ConversionPatternRewriter &rewriter, Location loc, 1028 Operation *ref, ValueRange params = ValueRange()) { 1029 rewriter.create<LLVM::CallOp>(loc, TypeRange(), SymbolRefAttr::get(ref), 1030 params); 1031 } 1032 }; 1033 1034 } // namespace 1035 1036 /// Populate the given list with patterns that convert from Vector to LLVM. 1037 void mlir::populateVectorToLLVMConversionPatterns( 1038 LLVMTypeConverter &converter, RewritePatternSet &patterns, 1039 bool reassociateFPReductions) { 1040 MLIRContext *ctx = converter.getDialect()->getContext(); 1041 patterns.add<VectorFMAOpNDRewritePattern>(ctx); 1042 populateVectorInsertExtractStridedSliceTransforms(patterns); 1043 patterns.add<VectorReductionOpConversion>(converter, reassociateFPReductions); 1044 patterns 1045 .add<VectorBitCastOpConversion, VectorShuffleOpConversion, 1046 VectorExtractElementOpConversion, VectorExtractOpConversion, 1047 VectorFMAOp1DConversion, VectorInsertElementOpConversion, 1048 VectorInsertOpConversion, VectorPrintOpConversion, 1049 VectorTypeCastOpConversion, 1050 VectorLoadStoreConversion<vector::LoadOp, vector::LoadOpAdaptor>, 1051 VectorLoadStoreConversion<vector::MaskedLoadOp, 1052 vector::MaskedLoadOpAdaptor>, 1053 VectorLoadStoreConversion<vector::StoreOp, vector::StoreOpAdaptor>, 1054 VectorLoadStoreConversion<vector::MaskedStoreOp, 1055 vector::MaskedStoreOpAdaptor>, 1056 VectorGatherOpConversion, VectorScatterOpConversion, 1057 VectorExpandLoadOpConversion, VectorCompressStoreOpConversion>( 1058 converter); 1059 // Transfer ops with rank > 1 are handled by VectorToSCF. 1060 populateVectorTransferLoweringPatterns(patterns, /*maxTransferRank=*/1); 1061 } 1062 1063 void mlir::populateVectorToLLVMMatrixConversionPatterns( 1064 LLVMTypeConverter &converter, RewritePatternSet &patterns) { 1065 patterns.add<VectorMatmulOpConversion>(converter); 1066 patterns.add<VectorFlatTransposeOpConversion>(converter); 1067 } 1068