1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 10 11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" 13 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h" 14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 15 #include "mlir/Dialect/MemRef/IR/MemRef.h" 16 #include "mlir/Dialect/StandardOps/IR/Ops.h" 17 #include "mlir/Dialect/Vector/VectorOps.h" 18 #include "mlir/IR/BuiltinTypes.h" 19 #include "mlir/Target/LLVMIR/TypeTranslation.h" 20 #include "mlir/Transforms/DialectConversion.h" 21 22 using namespace mlir; 23 using namespace mlir::vector; 24 25 // Helper to reduce vector type by one rank at front. 26 static VectorType reducedVectorTypeFront(VectorType tp) { 27 assert((tp.getRank() > 1) && "unlowerable vector type"); 28 return VectorType::get(tp.getShape().drop_front(), tp.getElementType()); 29 } 30 31 // Helper to reduce vector type by *all* but one rank at back. 32 static VectorType reducedVectorTypeBack(VectorType tp) { 33 assert((tp.getRank() > 1) && "unlowerable vector type"); 34 return VectorType::get(tp.getShape().take_back(), tp.getElementType()); 35 } 36 37 // Helper that picks the proper sequence for inserting. 38 static Value insertOne(ConversionPatternRewriter &rewriter, 39 LLVMTypeConverter &typeConverter, Location loc, 40 Value val1, Value val2, Type llvmType, int64_t rank, 41 int64_t pos) { 42 if (rank == 1) { 43 auto idxType = rewriter.getIndexType(); 44 auto constant = rewriter.create<LLVM::ConstantOp>( 45 loc, typeConverter.convertType(idxType), 46 rewriter.getIntegerAttr(idxType, pos)); 47 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2, 48 constant); 49 } 50 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2, 51 rewriter.getI64ArrayAttr(pos)); 52 } 53 54 // Helper that picks the proper sequence for inserting. 55 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from, 56 Value into, int64_t offset) { 57 auto vectorType = into.getType().cast<VectorType>(); 58 if (vectorType.getRank() > 1) 59 return rewriter.create<InsertOp>(loc, from, into, offset); 60 return rewriter.create<vector::InsertElementOp>( 61 loc, vectorType, from, into, 62 rewriter.create<ConstantIndexOp>(loc, offset)); 63 } 64 65 // Helper that picks the proper sequence for extracting. 66 static Value extractOne(ConversionPatternRewriter &rewriter, 67 LLVMTypeConverter &typeConverter, Location loc, 68 Value val, Type llvmType, int64_t rank, int64_t pos) { 69 if (rank == 1) { 70 auto idxType = rewriter.getIndexType(); 71 auto constant = rewriter.create<LLVM::ConstantOp>( 72 loc, typeConverter.convertType(idxType), 73 rewriter.getIntegerAttr(idxType, pos)); 74 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val, 75 constant); 76 } 77 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val, 78 rewriter.getI64ArrayAttr(pos)); 79 } 80 81 // Helper that picks the proper sequence for extracting. 82 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector, 83 int64_t offset) { 84 auto vectorType = vector.getType().cast<VectorType>(); 85 if (vectorType.getRank() > 1) 86 return rewriter.create<ExtractOp>(loc, vector, offset); 87 return rewriter.create<vector::ExtractElementOp>( 88 loc, vectorType.getElementType(), vector, 89 rewriter.create<ConstantIndexOp>(loc, offset)); 90 } 91 92 // Helper that returns a subset of `arrayAttr` as a vector of int64_t. 93 // TODO: Better support for attribute subtype forwarding + slicing. 94 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr, 95 unsigned dropFront = 0, 96 unsigned dropBack = 0) { 97 assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds"); 98 auto range = arrayAttr.getAsRange<IntegerAttr>(); 99 SmallVector<int64_t, 4> res; 100 res.reserve(arrayAttr.size() - dropFront - dropBack); 101 for (auto it = range.begin() + dropFront, eit = range.end() - dropBack; 102 it != eit; ++it) 103 res.push_back((*it).getValue().getSExtValue()); 104 return res; 105 } 106 107 // Helper that returns data layout alignment of a memref. 108 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, 109 MemRefType memrefType, unsigned &align) { 110 Type elementTy = typeConverter.convertType(memrefType.getElementType()); 111 if (!elementTy) 112 return failure(); 113 114 // TODO: this should use the MLIR data layout when it becomes available and 115 // stop depending on translation. 116 llvm::LLVMContext llvmContext; 117 align = LLVM::TypeToLLVMIRTranslator(llvmContext) 118 .getPreferredAlignment(elementTy, typeConverter.getDataLayout()); 119 return success(); 120 } 121 122 // Add an index vector component to a base pointer. This almost always succeeds 123 // unless the last stride is non-unit or the memory space is not zero. 124 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter, 125 Location loc, Value memref, Value base, 126 Value index, MemRefType memRefType, 127 VectorType vType, Value &ptrs) { 128 int64_t offset; 129 SmallVector<int64_t, 4> strides; 130 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 131 if (failed(successStrides) || strides.back() != 1 || 132 memRefType.getMemorySpaceAsInt() != 0) 133 return failure(); 134 auto pType = MemRefDescriptor(memref).getElementPtrType(); 135 auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0)); 136 ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, index); 137 return success(); 138 } 139 140 // Casts a strided element pointer to a vector pointer. The vector pointer 141 // will be in the same address space as the incoming memref type. 142 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc, 143 Value ptr, MemRefType memRefType, Type vt) { 144 auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpaceAsInt()); 145 return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr); 146 } 147 148 static LogicalResult 149 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter, 150 LLVMTypeConverter &typeConverter, Location loc, 151 TransferReadOp xferOp, 152 ArrayRef<Value> operands, Value dataPtr) { 153 unsigned align; 154 if (failed(getMemRefAlignment( 155 typeConverter, xferOp.getShapedType().cast<MemRefType>(), align))) 156 return failure(); 157 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align); 158 return success(); 159 } 160 161 static LogicalResult 162 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter, 163 LLVMTypeConverter &typeConverter, Location loc, 164 TransferReadOp xferOp, ArrayRef<Value> operands, 165 Value dataPtr, Value mask) { 166 Type vecTy = typeConverter.convertType(xferOp.getVectorType()); 167 if (!vecTy) 168 return failure(); 169 170 auto adaptor = TransferReadOpAdaptor(operands, xferOp->getAttrDictionary()); 171 Value fill = rewriter.create<SplatOp>(loc, vecTy, adaptor.padding()); 172 173 unsigned align; 174 if (failed(getMemRefAlignment( 175 typeConverter, xferOp.getShapedType().cast<MemRefType>(), align))) 176 return failure(); 177 178 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 179 xferOp, vecTy, dataPtr, mask, ValueRange{fill}, 180 rewriter.getI32IntegerAttr(align)); 181 return success(); 182 } 183 184 static LogicalResult 185 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter, 186 LLVMTypeConverter &typeConverter, Location loc, 187 TransferWriteOp xferOp, 188 ArrayRef<Value> operands, Value dataPtr) { 189 unsigned align; 190 if (failed(getMemRefAlignment( 191 typeConverter, xferOp.getShapedType().cast<MemRefType>(), align))) 192 return failure(); 193 auto adaptor = TransferWriteOpAdaptor(operands, xferOp->getAttrDictionary()); 194 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr, 195 align); 196 return success(); 197 } 198 199 static LogicalResult 200 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter, 201 LLVMTypeConverter &typeConverter, Location loc, 202 TransferWriteOp xferOp, ArrayRef<Value> operands, 203 Value dataPtr, Value mask) { 204 unsigned align; 205 if (failed(getMemRefAlignment( 206 typeConverter, xferOp.getShapedType().cast<MemRefType>(), align))) 207 return failure(); 208 209 auto adaptor = TransferWriteOpAdaptor(operands, xferOp->getAttrDictionary()); 210 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 211 xferOp, adaptor.vector(), dataPtr, mask, 212 rewriter.getI32IntegerAttr(align)); 213 return success(); 214 } 215 216 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp, 217 ArrayRef<Value> operands) { 218 return TransferReadOpAdaptor(operands, xferOp->getAttrDictionary()); 219 } 220 221 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp, 222 ArrayRef<Value> operands) { 223 return TransferWriteOpAdaptor(operands, xferOp->getAttrDictionary()); 224 } 225 226 namespace { 227 228 /// Conversion pattern for a vector.bitcast. 229 class VectorBitCastOpConversion 230 : public ConvertOpToLLVMPattern<vector::BitCastOp> { 231 public: 232 using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern; 233 234 LogicalResult 235 matchAndRewrite(vector::BitCastOp bitCastOp, ArrayRef<Value> operands, 236 ConversionPatternRewriter &rewriter) const override { 237 // Only 1-D vectors can be lowered to LLVM. 238 VectorType resultTy = bitCastOp.getType(); 239 if (resultTy.getRank() != 1) 240 return failure(); 241 Type newResultTy = typeConverter->convertType(resultTy); 242 rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy, 243 operands[0]); 244 return success(); 245 } 246 }; 247 248 /// Conversion pattern for a vector.matrix_multiply. 249 /// This is lowered directly to the proper llvm.intr.matrix.multiply. 250 class VectorMatmulOpConversion 251 : public ConvertOpToLLVMPattern<vector::MatmulOp> { 252 public: 253 using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern; 254 255 LogicalResult 256 matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands, 257 ConversionPatternRewriter &rewriter) const override { 258 auto adaptor = vector::MatmulOpAdaptor(operands); 259 rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>( 260 matmulOp, typeConverter->convertType(matmulOp.res().getType()), 261 adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(), 262 matmulOp.lhs_columns(), matmulOp.rhs_columns()); 263 return success(); 264 } 265 }; 266 267 /// Conversion pattern for a vector.flat_transpose. 268 /// This is lowered directly to the proper llvm.intr.matrix.transpose. 269 class VectorFlatTransposeOpConversion 270 : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> { 271 public: 272 using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern; 273 274 LogicalResult 275 matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands, 276 ConversionPatternRewriter &rewriter) const override { 277 auto adaptor = vector::FlatTransposeOpAdaptor(operands); 278 rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>( 279 transOp, typeConverter->convertType(transOp.res().getType()), 280 adaptor.matrix(), transOp.rows(), transOp.columns()); 281 return success(); 282 } 283 }; 284 285 /// Overloaded utility that replaces a vector.load, vector.store, 286 /// vector.maskedload and vector.maskedstore with their respective LLVM 287 /// couterparts. 288 static void replaceLoadOrStoreOp(vector::LoadOp loadOp, 289 vector::LoadOpAdaptor adaptor, 290 VectorType vectorTy, Value ptr, unsigned align, 291 ConversionPatternRewriter &rewriter) { 292 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align); 293 } 294 295 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp, 296 vector::MaskedLoadOpAdaptor adaptor, 297 VectorType vectorTy, Value ptr, unsigned align, 298 ConversionPatternRewriter &rewriter) { 299 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 300 loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align); 301 } 302 303 static void replaceLoadOrStoreOp(vector::StoreOp storeOp, 304 vector::StoreOpAdaptor adaptor, 305 VectorType vectorTy, Value ptr, unsigned align, 306 ConversionPatternRewriter &rewriter) { 307 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(), 308 ptr, align); 309 } 310 311 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp, 312 vector::MaskedStoreOpAdaptor adaptor, 313 VectorType vectorTy, Value ptr, unsigned align, 314 ConversionPatternRewriter &rewriter) { 315 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 316 storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align); 317 } 318 319 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and 320 /// vector.maskedstore. 321 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor> 322 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> { 323 public: 324 using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern; 325 326 LogicalResult 327 matchAndRewrite(LoadOrStoreOp loadOrStoreOp, ArrayRef<Value> operands, 328 ConversionPatternRewriter &rewriter) const override { 329 // Only 1-D vectors can be lowered to LLVM. 330 VectorType vectorTy = loadOrStoreOp.getVectorType(); 331 if (vectorTy.getRank() > 1) 332 return failure(); 333 334 auto loc = loadOrStoreOp->getLoc(); 335 auto adaptor = LoadOrStoreOpAdaptor(operands); 336 MemRefType memRefTy = loadOrStoreOp.getMemRefType(); 337 338 // Resolve alignment. 339 unsigned align; 340 if (failed(getMemRefAlignment(*this->getTypeConverter(), memRefTy, align))) 341 return failure(); 342 343 // Resolve address. 344 auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType()) 345 .template cast<VectorType>(); 346 Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(), 347 adaptor.indices(), rewriter); 348 Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype); 349 350 replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter); 351 return success(); 352 } 353 }; 354 355 /// Conversion pattern for a vector.gather. 356 class VectorGatherOpConversion 357 : public ConvertOpToLLVMPattern<vector::GatherOp> { 358 public: 359 using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern; 360 361 LogicalResult 362 matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands, 363 ConversionPatternRewriter &rewriter) const override { 364 auto loc = gather->getLoc(); 365 auto adaptor = vector::GatherOpAdaptor(operands); 366 MemRefType memRefType = gather.getMemRefType(); 367 368 // Resolve alignment. 369 unsigned align; 370 if (failed(getMemRefAlignment(*getTypeConverter(), memRefType, align))) 371 return failure(); 372 373 // Resolve address. 374 Value ptrs; 375 VectorType vType = gather.getVectorType(); 376 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 377 adaptor.indices(), rewriter); 378 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 379 adaptor.index_vec(), memRefType, vType, ptrs))) 380 return failure(); 381 382 // Replace with the gather intrinsic. 383 rewriter.replaceOpWithNewOp<LLVM::masked_gather>( 384 gather, typeConverter->convertType(vType), ptrs, adaptor.mask(), 385 adaptor.pass_thru(), rewriter.getI32IntegerAttr(align)); 386 return success(); 387 } 388 }; 389 390 /// Conversion pattern for a vector.scatter. 391 class VectorScatterOpConversion 392 : public ConvertOpToLLVMPattern<vector::ScatterOp> { 393 public: 394 using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern; 395 396 LogicalResult 397 matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands, 398 ConversionPatternRewriter &rewriter) const override { 399 auto loc = scatter->getLoc(); 400 auto adaptor = vector::ScatterOpAdaptor(operands); 401 MemRefType memRefType = scatter.getMemRefType(); 402 403 // Resolve alignment. 404 unsigned align; 405 if (failed(getMemRefAlignment(*getTypeConverter(), memRefType, align))) 406 return failure(); 407 408 // Resolve address. 409 Value ptrs; 410 VectorType vType = scatter.getVectorType(); 411 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 412 adaptor.indices(), rewriter); 413 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 414 adaptor.index_vec(), memRefType, vType, ptrs))) 415 return failure(); 416 417 // Replace with the scatter intrinsic. 418 rewriter.replaceOpWithNewOp<LLVM::masked_scatter>( 419 scatter, adaptor.valueToStore(), ptrs, adaptor.mask(), 420 rewriter.getI32IntegerAttr(align)); 421 return success(); 422 } 423 }; 424 425 /// Conversion pattern for a vector.expandload. 426 class VectorExpandLoadOpConversion 427 : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> { 428 public: 429 using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern; 430 431 LogicalResult 432 matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands, 433 ConversionPatternRewriter &rewriter) const override { 434 auto loc = expand->getLoc(); 435 auto adaptor = vector::ExpandLoadOpAdaptor(operands); 436 MemRefType memRefType = expand.getMemRefType(); 437 438 // Resolve address. 439 auto vtype = typeConverter->convertType(expand.getVectorType()); 440 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 441 adaptor.indices(), rewriter); 442 443 rewriter.replaceOpWithNewOp<LLVM::masked_expandload>( 444 expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru()); 445 return success(); 446 } 447 }; 448 449 /// Conversion pattern for a vector.compressstore. 450 class VectorCompressStoreOpConversion 451 : public ConvertOpToLLVMPattern<vector::CompressStoreOp> { 452 public: 453 using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern; 454 455 LogicalResult 456 matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands, 457 ConversionPatternRewriter &rewriter) const override { 458 auto loc = compress->getLoc(); 459 auto adaptor = vector::CompressStoreOpAdaptor(operands); 460 MemRefType memRefType = compress.getMemRefType(); 461 462 // Resolve address. 463 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 464 adaptor.indices(), rewriter); 465 466 rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>( 467 compress, adaptor.valueToStore(), ptr, adaptor.mask()); 468 return success(); 469 } 470 }; 471 472 /// Conversion pattern for all vector reductions. 473 class VectorReductionOpConversion 474 : public ConvertOpToLLVMPattern<vector::ReductionOp> { 475 public: 476 explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv, 477 bool reassociateFPRed) 478 : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv), 479 reassociateFPReductions(reassociateFPRed) {} 480 481 LogicalResult 482 matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands, 483 ConversionPatternRewriter &rewriter) const override { 484 auto kind = reductionOp.kind(); 485 Type eltType = reductionOp.dest().getType(); 486 Type llvmType = typeConverter->convertType(eltType); 487 if (eltType.isIntOrIndex()) { 488 // Integer reductions: add/mul/min/max/and/or/xor. 489 if (kind == "add") 490 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>( 491 reductionOp, llvmType, operands[0]); 492 else if (kind == "mul") 493 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>( 494 reductionOp, llvmType, operands[0]); 495 else if (kind == "min" && 496 (eltType.isIndex() || eltType.isUnsignedInteger())) 497 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>( 498 reductionOp, llvmType, operands[0]); 499 else if (kind == "min") 500 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>( 501 reductionOp, llvmType, operands[0]); 502 else if (kind == "max" && 503 (eltType.isIndex() || eltType.isUnsignedInteger())) 504 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>( 505 reductionOp, llvmType, operands[0]); 506 else if (kind == "max") 507 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>( 508 reductionOp, llvmType, operands[0]); 509 else if (kind == "and") 510 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>( 511 reductionOp, llvmType, operands[0]); 512 else if (kind == "or") 513 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>( 514 reductionOp, llvmType, operands[0]); 515 else if (kind == "xor") 516 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>( 517 reductionOp, llvmType, operands[0]); 518 else 519 return failure(); 520 return success(); 521 } 522 523 if (!eltType.isa<FloatType>()) 524 return failure(); 525 526 // Floating-point reductions: add/mul/min/max 527 if (kind == "add") { 528 // Optional accumulator (or zero). 529 Value acc = operands.size() > 1 ? operands[1] 530 : rewriter.create<LLVM::ConstantOp>( 531 reductionOp->getLoc(), llvmType, 532 rewriter.getZeroAttr(eltType)); 533 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>( 534 reductionOp, llvmType, acc, operands[0], 535 rewriter.getBoolAttr(reassociateFPReductions)); 536 } else if (kind == "mul") { 537 // Optional accumulator (or one). 538 Value acc = operands.size() > 1 539 ? operands[1] 540 : rewriter.create<LLVM::ConstantOp>( 541 reductionOp->getLoc(), llvmType, 542 rewriter.getFloatAttr(eltType, 1.0)); 543 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>( 544 reductionOp, llvmType, acc, operands[0], 545 rewriter.getBoolAttr(reassociateFPReductions)); 546 } else if (kind == "min") 547 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>( 548 reductionOp, llvmType, operands[0]); 549 else if (kind == "max") 550 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>( 551 reductionOp, llvmType, operands[0]); 552 else 553 return failure(); 554 return success(); 555 } 556 557 private: 558 const bool reassociateFPReductions; 559 }; 560 561 class VectorShuffleOpConversion 562 : public ConvertOpToLLVMPattern<vector::ShuffleOp> { 563 public: 564 using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern; 565 566 LogicalResult 567 matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands, 568 ConversionPatternRewriter &rewriter) const override { 569 auto loc = shuffleOp->getLoc(); 570 auto adaptor = vector::ShuffleOpAdaptor(operands); 571 auto v1Type = shuffleOp.getV1VectorType(); 572 auto v2Type = shuffleOp.getV2VectorType(); 573 auto vectorType = shuffleOp.getVectorType(); 574 Type llvmType = typeConverter->convertType(vectorType); 575 auto maskArrayAttr = shuffleOp.mask(); 576 577 // Bail if result type cannot be lowered. 578 if (!llvmType) 579 return failure(); 580 581 // Get rank and dimension sizes. 582 int64_t rank = vectorType.getRank(); 583 assert(v1Type.getRank() == rank); 584 assert(v2Type.getRank() == rank); 585 int64_t v1Dim = v1Type.getDimSize(0); 586 587 // For rank 1, where both operands have *exactly* the same vector type, 588 // there is direct shuffle support in LLVM. Use it! 589 if (rank == 1 && v1Type == v2Type) { 590 Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>( 591 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); 592 rewriter.replaceOp(shuffleOp, llvmShuffleOp); 593 return success(); 594 } 595 596 // For all other cases, insert the individual values individually. 597 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType); 598 int64_t insPos = 0; 599 for (auto en : llvm::enumerate(maskArrayAttr)) { 600 int64_t extPos = en.value().cast<IntegerAttr>().getInt(); 601 Value value = adaptor.v1(); 602 if (extPos >= v1Dim) { 603 extPos -= v1Dim; 604 value = adaptor.v2(); 605 } 606 Value extract = extractOne(rewriter, *getTypeConverter(), loc, value, 607 llvmType, rank, extPos); 608 insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract, 609 llvmType, rank, insPos++); 610 } 611 rewriter.replaceOp(shuffleOp, insert); 612 return success(); 613 } 614 }; 615 616 class VectorExtractElementOpConversion 617 : public ConvertOpToLLVMPattern<vector::ExtractElementOp> { 618 public: 619 using ConvertOpToLLVMPattern< 620 vector::ExtractElementOp>::ConvertOpToLLVMPattern; 621 622 LogicalResult 623 matchAndRewrite(vector::ExtractElementOp extractEltOp, 624 ArrayRef<Value> operands, 625 ConversionPatternRewriter &rewriter) const override { 626 auto adaptor = vector::ExtractElementOpAdaptor(operands); 627 auto vectorType = extractEltOp.getVectorType(); 628 auto llvmType = typeConverter->convertType(vectorType.getElementType()); 629 630 // Bail if result type cannot be lowered. 631 if (!llvmType) 632 return failure(); 633 634 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>( 635 extractEltOp, llvmType, adaptor.vector(), adaptor.position()); 636 return success(); 637 } 638 }; 639 640 class VectorExtractOpConversion 641 : public ConvertOpToLLVMPattern<vector::ExtractOp> { 642 public: 643 using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern; 644 645 LogicalResult 646 matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands, 647 ConversionPatternRewriter &rewriter) const override { 648 auto loc = extractOp->getLoc(); 649 auto adaptor = vector::ExtractOpAdaptor(operands); 650 auto vectorType = extractOp.getVectorType(); 651 auto resultType = extractOp.getResult().getType(); 652 auto llvmResultType = typeConverter->convertType(resultType); 653 auto positionArrayAttr = extractOp.position(); 654 655 // Bail if result type cannot be lowered. 656 if (!llvmResultType) 657 return failure(); 658 659 // Extract entire vector. Should be handled by folder, but just to be safe. 660 if (positionArrayAttr.empty()) { 661 rewriter.replaceOp(extractOp, adaptor.vector()); 662 return success(); 663 } 664 665 // One-shot extraction of vector from array (only requires extractvalue). 666 if (resultType.isa<VectorType>()) { 667 Value extracted = rewriter.create<LLVM::ExtractValueOp>( 668 loc, llvmResultType, adaptor.vector(), positionArrayAttr); 669 rewriter.replaceOp(extractOp, extracted); 670 return success(); 671 } 672 673 // Potential extraction of 1-D vector from array. 674 auto *context = extractOp->getContext(); 675 Value extracted = adaptor.vector(); 676 auto positionAttrs = positionArrayAttr.getValue(); 677 if (positionAttrs.size() > 1) { 678 auto oneDVectorType = reducedVectorTypeBack(vectorType); 679 auto nMinusOnePositionAttrs = 680 ArrayAttr::get(context, positionAttrs.drop_back()); 681 extracted = rewriter.create<LLVM::ExtractValueOp>( 682 loc, typeConverter->convertType(oneDVectorType), extracted, 683 nMinusOnePositionAttrs); 684 } 685 686 // Remaining extraction of element from 1-D LLVM vector 687 auto position = positionAttrs.back().cast<IntegerAttr>(); 688 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 689 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 690 extracted = 691 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant); 692 rewriter.replaceOp(extractOp, extracted); 693 694 return success(); 695 } 696 }; 697 698 /// Conversion pattern that turns a vector.fma on a 1-D vector 699 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion. 700 /// This does not match vectors of n >= 2 rank. 701 /// 702 /// Example: 703 /// ``` 704 /// vector.fma %a, %a, %a : vector<8xf32> 705 /// ``` 706 /// is converted to: 707 /// ``` 708 /// llvm.intr.fmuladd %va, %va, %va: 709 /// (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">) 710 /// -> !llvm."<8 x f32>"> 711 /// ``` 712 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> { 713 public: 714 using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern; 715 716 LogicalResult 717 matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands, 718 ConversionPatternRewriter &rewriter) const override { 719 auto adaptor = vector::FMAOpAdaptor(operands); 720 VectorType vType = fmaOp.getVectorType(); 721 if (vType.getRank() != 1) 722 return failure(); 723 rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(), 724 adaptor.rhs(), adaptor.acc()); 725 return success(); 726 } 727 }; 728 729 class VectorInsertElementOpConversion 730 : public ConvertOpToLLVMPattern<vector::InsertElementOp> { 731 public: 732 using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern; 733 734 LogicalResult 735 matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands, 736 ConversionPatternRewriter &rewriter) const override { 737 auto adaptor = vector::InsertElementOpAdaptor(operands); 738 auto vectorType = insertEltOp.getDestVectorType(); 739 auto llvmType = typeConverter->convertType(vectorType); 740 741 // Bail if result type cannot be lowered. 742 if (!llvmType) 743 return failure(); 744 745 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>( 746 insertEltOp, llvmType, adaptor.dest(), adaptor.source(), 747 adaptor.position()); 748 return success(); 749 } 750 }; 751 752 class VectorInsertOpConversion 753 : public ConvertOpToLLVMPattern<vector::InsertOp> { 754 public: 755 using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern; 756 757 LogicalResult 758 matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands, 759 ConversionPatternRewriter &rewriter) const override { 760 auto loc = insertOp->getLoc(); 761 auto adaptor = vector::InsertOpAdaptor(operands); 762 auto sourceType = insertOp.getSourceType(); 763 auto destVectorType = insertOp.getDestVectorType(); 764 auto llvmResultType = typeConverter->convertType(destVectorType); 765 auto positionArrayAttr = insertOp.position(); 766 767 // Bail if result type cannot be lowered. 768 if (!llvmResultType) 769 return failure(); 770 771 // Overwrite entire vector with value. Should be handled by folder, but 772 // just to be safe. 773 if (positionArrayAttr.empty()) { 774 rewriter.replaceOp(insertOp, adaptor.source()); 775 return success(); 776 } 777 778 // One-shot insertion of a vector into an array (only requires insertvalue). 779 if (sourceType.isa<VectorType>()) { 780 Value inserted = rewriter.create<LLVM::InsertValueOp>( 781 loc, llvmResultType, adaptor.dest(), adaptor.source(), 782 positionArrayAttr); 783 rewriter.replaceOp(insertOp, inserted); 784 return success(); 785 } 786 787 // Potential extraction of 1-D vector from array. 788 auto *context = insertOp->getContext(); 789 Value extracted = adaptor.dest(); 790 auto positionAttrs = positionArrayAttr.getValue(); 791 auto position = positionAttrs.back().cast<IntegerAttr>(); 792 auto oneDVectorType = destVectorType; 793 if (positionAttrs.size() > 1) { 794 oneDVectorType = reducedVectorTypeBack(destVectorType); 795 auto nMinusOnePositionAttrs = 796 ArrayAttr::get(context, positionAttrs.drop_back()); 797 extracted = rewriter.create<LLVM::ExtractValueOp>( 798 loc, typeConverter->convertType(oneDVectorType), extracted, 799 nMinusOnePositionAttrs); 800 } 801 802 // Insertion of an element into a 1-D LLVM vector. 803 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 804 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 805 Value inserted = rewriter.create<LLVM::InsertElementOp>( 806 loc, typeConverter->convertType(oneDVectorType), extracted, 807 adaptor.source(), constant); 808 809 // Potential insertion of resulting 1-D vector into array. 810 if (positionAttrs.size() > 1) { 811 auto nMinusOnePositionAttrs = 812 ArrayAttr::get(context, positionAttrs.drop_back()); 813 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType, 814 adaptor.dest(), inserted, 815 nMinusOnePositionAttrs); 816 } 817 818 rewriter.replaceOp(insertOp, inserted); 819 return success(); 820 } 821 }; 822 823 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1. 824 /// 825 /// Example: 826 /// ``` 827 /// %d = vector.fma %a, %b, %c : vector<2x4xf32> 828 /// ``` 829 /// is rewritten into: 830 /// ``` 831 /// %r = splat %f0: vector<2x4xf32> 832 /// %va = vector.extractvalue %a[0] : vector<2x4xf32> 833 /// %vb = vector.extractvalue %b[0] : vector<2x4xf32> 834 /// %vc = vector.extractvalue %c[0] : vector<2x4xf32> 835 /// %vd = vector.fma %va, %vb, %vc : vector<4xf32> 836 /// %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32> 837 /// %va2 = vector.extractvalue %a2[1] : vector<2x4xf32> 838 /// %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32> 839 /// %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32> 840 /// %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32> 841 /// %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32> 842 /// // %r3 holds the final value. 843 /// ``` 844 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> { 845 public: 846 using OpRewritePattern<FMAOp>::OpRewritePattern; 847 848 LogicalResult matchAndRewrite(FMAOp op, 849 PatternRewriter &rewriter) const override { 850 auto vType = op.getVectorType(); 851 if (vType.getRank() < 2) 852 return failure(); 853 854 auto loc = op.getLoc(); 855 auto elemType = vType.getElementType(); 856 Value zero = rewriter.create<ConstantOp>(loc, elemType, 857 rewriter.getZeroAttr(elemType)); 858 Value desc = rewriter.create<SplatOp>(loc, vType, zero); 859 for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) { 860 Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i); 861 Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i); 862 Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i); 863 Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC); 864 desc = rewriter.create<InsertOp>(loc, fma, desc, i); 865 } 866 rewriter.replaceOp(op, desc); 867 return success(); 868 } 869 }; 870 871 // When ranks are different, InsertStridedSlice needs to extract a properly 872 // ranked vector from the destination vector into which to insert. This pattern 873 // only takes care of this part and forwards the rest of the conversion to 874 // another pattern that converts InsertStridedSlice for operands of the same 875 // rank. 876 // 877 // RewritePattern for InsertStridedSliceOp where source and destination vectors 878 // have different ranks. In this case: 879 // 1. the proper subvector is extracted from the destination vector 880 // 2. a new InsertStridedSlice op is created to insert the source in the 881 // destination subvector 882 // 3. the destination subvector is inserted back in the proper place 883 // 4. the op is replaced by the result of step 3. 884 // The new InsertStridedSlice from step 2. will be picked up by a 885 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 886 class VectorInsertStridedSliceOpDifferentRankRewritePattern 887 : public OpRewritePattern<InsertStridedSliceOp> { 888 public: 889 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 890 891 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 892 PatternRewriter &rewriter) const override { 893 auto srcType = op.getSourceVectorType(); 894 auto dstType = op.getDestVectorType(); 895 896 if (op.offsets().getValue().empty()) 897 return failure(); 898 899 auto loc = op.getLoc(); 900 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 901 assert(rankDiff >= 0); 902 if (rankDiff == 0) 903 return failure(); 904 905 int64_t rankRest = dstType.getRank() - rankDiff; 906 // Extract / insert the subvector of matching rank and InsertStridedSlice 907 // on it. 908 Value extracted = 909 rewriter.create<ExtractOp>(loc, op.dest(), 910 getI64SubArray(op.offsets(), /*dropFront=*/0, 911 /*dropBack=*/rankRest)); 912 // A different pattern will kick in for InsertStridedSlice with matching 913 // ranks. 914 auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>( 915 loc, op.source(), extracted, 916 getI64SubArray(op.offsets(), /*dropFront=*/rankDiff), 917 getI64SubArray(op.strides(), /*dropFront=*/0)); 918 rewriter.replaceOpWithNewOp<InsertOp>( 919 op, stridedSliceInnerOp.getResult(), op.dest(), 920 getI64SubArray(op.offsets(), /*dropFront=*/0, 921 /*dropBack=*/rankRest)); 922 return success(); 923 } 924 }; 925 926 // RewritePattern for InsertStridedSliceOp where source and destination vectors 927 // have the same rank. In this case, we reduce 928 // 1. the proper subvector is extracted from the destination vector 929 // 2. a new InsertStridedSlice op is created to insert the source in the 930 // destination subvector 931 // 3. the destination subvector is inserted back in the proper place 932 // 4. the op is replaced by the result of step 3. 933 // The new InsertStridedSlice from step 2. will be picked up by a 934 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 935 class VectorInsertStridedSliceOpSameRankRewritePattern 936 : public OpRewritePattern<InsertStridedSliceOp> { 937 public: 938 VectorInsertStridedSliceOpSameRankRewritePattern(MLIRContext *ctx) 939 : OpRewritePattern<InsertStridedSliceOp>(ctx) { 940 // This pattern creates recursive InsertStridedSliceOp, but the recursion is 941 // bounded as the rank is strictly decreasing. 942 setHasBoundedRewriteRecursion(); 943 } 944 945 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 946 PatternRewriter &rewriter) const override { 947 auto srcType = op.getSourceVectorType(); 948 auto dstType = op.getDestVectorType(); 949 950 if (op.offsets().getValue().empty()) 951 return failure(); 952 953 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 954 assert(rankDiff >= 0); 955 if (rankDiff != 0) 956 return failure(); 957 958 if (srcType == dstType) { 959 rewriter.replaceOp(op, op.source()); 960 return success(); 961 } 962 963 int64_t offset = 964 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 965 int64_t size = srcType.getShape().front(); 966 int64_t stride = 967 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 968 969 auto loc = op.getLoc(); 970 Value res = op.dest(); 971 // For each slice of the source vector along the most major dimension. 972 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 973 off += stride, ++idx) { 974 // 1. extract the proper subvector (or element) from source 975 Value extractedSource = extractOne(rewriter, loc, op.source(), idx); 976 if (extractedSource.getType().isa<VectorType>()) { 977 // 2. If we have a vector, extract the proper subvector from destination 978 // Otherwise we are at the element level and no need to recurse. 979 Value extractedDest = extractOne(rewriter, loc, op.dest(), off); 980 // 3. Reduce the problem to lowering a new InsertStridedSlice op with 981 // smaller rank. 982 extractedSource = rewriter.create<InsertStridedSliceOp>( 983 loc, extractedSource, extractedDest, 984 getI64SubArray(op.offsets(), /* dropFront=*/1), 985 getI64SubArray(op.strides(), /* dropFront=*/1)); 986 } 987 // 4. Insert the extractedSource into the res vector. 988 res = insertOne(rewriter, loc, extractedSource, res, off); 989 } 990 991 rewriter.replaceOp(op, res); 992 return success(); 993 } 994 }; 995 996 /// Returns the strides if the memory underlying `memRefType` has a contiguous 997 /// static layout. 998 static llvm::Optional<SmallVector<int64_t, 4>> 999 computeContiguousStrides(MemRefType memRefType) { 1000 int64_t offset; 1001 SmallVector<int64_t, 4> strides; 1002 if (failed(getStridesAndOffset(memRefType, strides, offset))) 1003 return None; 1004 if (!strides.empty() && strides.back() != 1) 1005 return None; 1006 // If no layout or identity layout, this is contiguous by definition. 1007 if (memRefType.getAffineMaps().empty() || 1008 memRefType.getAffineMaps().front().isIdentity()) 1009 return strides; 1010 1011 // Otherwise, we must determine contiguity form shapes. This can only ever 1012 // work in static cases because MemRefType is underspecified to represent 1013 // contiguous dynamic shapes in other ways than with just empty/identity 1014 // layout. 1015 auto sizes = memRefType.getShape(); 1016 for (int index = 0, e = strides.size() - 2; index < e; ++index) { 1017 if (ShapedType::isDynamic(sizes[index + 1]) || 1018 ShapedType::isDynamicStrideOrOffset(strides[index]) || 1019 ShapedType::isDynamicStrideOrOffset(strides[index + 1])) 1020 return None; 1021 if (strides[index] != strides[index + 1] * sizes[index + 1]) 1022 return None; 1023 } 1024 return strides; 1025 } 1026 1027 class VectorTypeCastOpConversion 1028 : public ConvertOpToLLVMPattern<vector::TypeCastOp> { 1029 public: 1030 using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern; 1031 1032 LogicalResult 1033 matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands, 1034 ConversionPatternRewriter &rewriter) const override { 1035 auto loc = castOp->getLoc(); 1036 MemRefType sourceMemRefType = 1037 castOp.getOperand().getType().cast<MemRefType>(); 1038 MemRefType targetMemRefType = castOp.getType(); 1039 1040 // Only static shape casts supported atm. 1041 if (!sourceMemRefType.hasStaticShape() || 1042 !targetMemRefType.hasStaticShape()) 1043 return failure(); 1044 1045 auto llvmSourceDescriptorTy = 1046 operands[0].getType().dyn_cast<LLVM::LLVMStructType>(); 1047 if (!llvmSourceDescriptorTy) 1048 return failure(); 1049 MemRefDescriptor sourceMemRef(operands[0]); 1050 1051 auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType) 1052 .dyn_cast_or_null<LLVM::LLVMStructType>(); 1053 if (!llvmTargetDescriptorTy) 1054 return failure(); 1055 1056 // Only contiguous source buffers supported atm. 1057 auto sourceStrides = computeContiguousStrides(sourceMemRefType); 1058 if (!sourceStrides) 1059 return failure(); 1060 auto targetStrides = computeContiguousStrides(targetMemRefType); 1061 if (!targetStrides) 1062 return failure(); 1063 // Only support static strides for now, regardless of contiguity. 1064 if (llvm::any_of(*targetStrides, [](int64_t stride) { 1065 return ShapedType::isDynamicStrideOrOffset(stride); 1066 })) 1067 return failure(); 1068 1069 auto int64Ty = IntegerType::get(rewriter.getContext(), 64); 1070 1071 // Create descriptor. 1072 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 1073 Type llvmTargetElementTy = desc.getElementPtrType(); 1074 // Set allocated ptr. 1075 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc); 1076 allocated = 1077 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated); 1078 desc.setAllocatedPtr(rewriter, loc, allocated); 1079 // Set aligned ptr. 1080 Value ptr = sourceMemRef.alignedPtr(rewriter, loc); 1081 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr); 1082 desc.setAlignedPtr(rewriter, loc, ptr); 1083 // Fill offset 0. 1084 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); 1085 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr); 1086 desc.setOffset(rewriter, loc, zero); 1087 1088 // Fill size and stride descriptors in memref. 1089 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { 1090 int64_t index = indexedSize.index(); 1091 auto sizeAttr = 1092 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); 1093 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr); 1094 desc.setSize(rewriter, loc, index, size); 1095 auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), 1096 (*targetStrides)[index]); 1097 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr); 1098 desc.setStride(rewriter, loc, index, stride); 1099 } 1100 1101 rewriter.replaceOp(castOp, {desc}); 1102 return success(); 1103 } 1104 }; 1105 1106 /// Conversion pattern that converts a 1-D vector transfer read/write op into a 1107 /// a masked or unmasked read/write. 1108 template <typename ConcreteOp> 1109 class VectorTransferConversion : public ConvertOpToLLVMPattern<ConcreteOp> { 1110 public: 1111 using ConvertOpToLLVMPattern<ConcreteOp>::ConvertOpToLLVMPattern; 1112 1113 LogicalResult 1114 matchAndRewrite(ConcreteOp xferOp, ArrayRef<Value> operands, 1115 ConversionPatternRewriter &rewriter) const override { 1116 auto adaptor = getTransferOpAdapter(xferOp, operands); 1117 1118 if (xferOp.getVectorType().getRank() > 1 || 1119 llvm::size(xferOp.indices()) == 0) 1120 return failure(); 1121 if (xferOp.permutation_map() != 1122 AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(), 1123 xferOp.getVectorType().getRank(), 1124 xferOp->getContext())) 1125 return failure(); 1126 auto memRefType = xferOp.getShapedType().template dyn_cast<MemRefType>(); 1127 if (!memRefType) 1128 return failure(); 1129 // Only contiguous source tensors supported atm. 1130 auto strides = computeContiguousStrides(memRefType); 1131 if (!strides) 1132 return failure(); 1133 // Out-of-bounds dims are handled by MaterializeTransferMask. 1134 if (xferOp.hasOutOfBoundsDim()) 1135 return failure(); 1136 1137 auto toLLVMTy = [&](Type t) { 1138 return this->getTypeConverter()->convertType(t); 1139 }; 1140 1141 Location loc = xferOp->getLoc(); 1142 1143 if (auto memrefVectorElementType = 1144 memRefType.getElementType().template dyn_cast<VectorType>()) { 1145 // Memref has vector element type. 1146 if (memrefVectorElementType.getElementType() != 1147 xferOp.getVectorType().getElementType()) 1148 return failure(); 1149 #ifndef NDEBUG 1150 // Check that memref vector type is a suffix of 'vectorType. 1151 unsigned memrefVecEltRank = memrefVectorElementType.getRank(); 1152 unsigned resultVecRank = xferOp.getVectorType().getRank(); 1153 assert(memrefVecEltRank <= resultVecRank); 1154 // TODO: Move this to isSuffix in Vector/Utils.h. 1155 unsigned rankOffset = resultVecRank - memrefVecEltRank; 1156 auto memrefVecEltShape = memrefVectorElementType.getShape(); 1157 auto resultVecShape = xferOp.getVectorType().getShape(); 1158 for (unsigned i = 0; i < memrefVecEltRank; ++i) 1159 assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] && 1160 "memref vector element shape should match suffix of vector " 1161 "result shape."); 1162 #endif // ifndef NDEBUG 1163 } 1164 1165 // Get the source/dst address as an LLVM vector pointer. 1166 VectorType vtp = xferOp.getVectorType(); 1167 Value dataPtr = this->getStridedElementPtr( 1168 loc, memRefType, adaptor.source(), adaptor.indices(), rewriter); 1169 Value vectorDataPtr = 1170 castDataPtr(rewriter, loc, dataPtr, memRefType, toLLVMTy(vtp)); 1171 1172 // Rewrite as an unmasked masked read / write. 1173 if (!xferOp.mask()) 1174 return replaceTransferOpWithLoadOrStore(rewriter, 1175 *this->getTypeConverter(), loc, 1176 xferOp, operands, vectorDataPtr); 1177 1178 // Rewrite as a masked read / write. 1179 return replaceTransferOpWithMasked(rewriter, *this->getTypeConverter(), loc, 1180 xferOp, operands, vectorDataPtr, 1181 xferOp.mask()); 1182 } 1183 }; 1184 1185 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> { 1186 public: 1187 using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern; 1188 1189 // Proof-of-concept lowering implementation that relies on a small 1190 // runtime support library, which only needs to provide a few 1191 // printing methods (single value for all data types, opening/closing 1192 // bracket, comma, newline). The lowering fully unrolls a vector 1193 // in terms of these elementary printing operations. The advantage 1194 // of this approach is that the library can remain unaware of all 1195 // low-level implementation details of vectors while still supporting 1196 // output of any shaped and dimensioned vector. Due to full unrolling, 1197 // this approach is less suited for very large vectors though. 1198 // 1199 // TODO: rely solely on libc in future? something else? 1200 // 1201 LogicalResult 1202 matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands, 1203 ConversionPatternRewriter &rewriter) const override { 1204 auto adaptor = vector::PrintOpAdaptor(operands); 1205 Type printType = printOp.getPrintType(); 1206 1207 if (typeConverter->convertType(printType) == nullptr) 1208 return failure(); 1209 1210 // Make sure element type has runtime support. 1211 PrintConversion conversion = PrintConversion::None; 1212 VectorType vectorType = printType.dyn_cast<VectorType>(); 1213 Type eltType = vectorType ? vectorType.getElementType() : printType; 1214 Operation *printer; 1215 if (eltType.isF32()) { 1216 printer = 1217 LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>()); 1218 } else if (eltType.isF64()) { 1219 printer = 1220 LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>()); 1221 } else if (eltType.isIndex()) { 1222 printer = 1223 LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>()); 1224 } else if (auto intTy = eltType.dyn_cast<IntegerType>()) { 1225 // Integers need a zero or sign extension on the operand 1226 // (depending on the source type) as well as a signed or 1227 // unsigned print method. Up to 64-bit is supported. 1228 unsigned width = intTy.getWidth(); 1229 if (intTy.isUnsigned()) { 1230 if (width <= 64) { 1231 if (width < 64) 1232 conversion = PrintConversion::ZeroExt64; 1233 printer = LLVM::lookupOrCreatePrintU64Fn( 1234 printOp->getParentOfType<ModuleOp>()); 1235 } else { 1236 return failure(); 1237 } 1238 } else { 1239 assert(intTy.isSignless() || intTy.isSigned()); 1240 if (width <= 64) { 1241 // Note that we *always* zero extend booleans (1-bit integers), 1242 // so that true/false is printed as 1/0 rather than -1/0. 1243 if (width == 1) 1244 conversion = PrintConversion::ZeroExt64; 1245 else if (width < 64) 1246 conversion = PrintConversion::SignExt64; 1247 printer = LLVM::lookupOrCreatePrintI64Fn( 1248 printOp->getParentOfType<ModuleOp>()); 1249 } else { 1250 return failure(); 1251 } 1252 } 1253 } else { 1254 return failure(); 1255 } 1256 1257 // Unroll vector into elementary print calls. 1258 int64_t rank = vectorType ? vectorType.getRank() : 0; 1259 emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank, 1260 conversion); 1261 emitCall(rewriter, printOp->getLoc(), 1262 LLVM::lookupOrCreatePrintNewlineFn( 1263 printOp->getParentOfType<ModuleOp>())); 1264 rewriter.eraseOp(printOp); 1265 return success(); 1266 } 1267 1268 private: 1269 enum class PrintConversion { 1270 // clang-format off 1271 None, 1272 ZeroExt64, 1273 SignExt64 1274 // clang-format on 1275 }; 1276 1277 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, 1278 Value value, VectorType vectorType, Operation *printer, 1279 int64_t rank, PrintConversion conversion) const { 1280 Location loc = op->getLoc(); 1281 if (rank == 0) { 1282 switch (conversion) { 1283 case PrintConversion::ZeroExt64: 1284 value = rewriter.create<ZeroExtendIOp>( 1285 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1286 break; 1287 case PrintConversion::SignExt64: 1288 value = rewriter.create<SignExtendIOp>( 1289 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1290 break; 1291 case PrintConversion::None: 1292 break; 1293 } 1294 emitCall(rewriter, loc, printer, value); 1295 return; 1296 } 1297 1298 emitCall(rewriter, loc, 1299 LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>())); 1300 Operation *printComma = 1301 LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>()); 1302 int64_t dim = vectorType.getDimSize(0); 1303 for (int64_t d = 0; d < dim; ++d) { 1304 auto reducedType = 1305 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; 1306 auto llvmType = typeConverter->convertType( 1307 rank > 1 ? reducedType : vectorType.getElementType()); 1308 Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value, 1309 llvmType, rank, d); 1310 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1, 1311 conversion); 1312 if (d != dim - 1) 1313 emitCall(rewriter, loc, printComma); 1314 } 1315 emitCall(rewriter, loc, 1316 LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>())); 1317 } 1318 1319 // Helper to emit a call. 1320 static void emitCall(ConversionPatternRewriter &rewriter, Location loc, 1321 Operation *ref, ValueRange params = ValueRange()) { 1322 rewriter.create<LLVM::CallOp>(loc, TypeRange(), 1323 rewriter.getSymbolRefAttr(ref), params); 1324 } 1325 }; 1326 1327 /// Progressive lowering of ExtractStridedSliceOp to either: 1328 /// 1. express single offset extract as a direct shuffle. 1329 /// 2. extract + lower rank strided_slice + insert for the n-D case. 1330 class VectorExtractStridedSliceOpConversion 1331 : public OpRewritePattern<ExtractStridedSliceOp> { 1332 public: 1333 VectorExtractStridedSliceOpConversion(MLIRContext *ctx) 1334 : OpRewritePattern<ExtractStridedSliceOp>(ctx) { 1335 // This pattern creates recursive ExtractStridedSliceOp, but the recursion 1336 // is bounded as the rank is strictly decreasing. 1337 setHasBoundedRewriteRecursion(); 1338 } 1339 1340 LogicalResult matchAndRewrite(ExtractStridedSliceOp op, 1341 PatternRewriter &rewriter) const override { 1342 auto dstType = op.getType(); 1343 1344 assert(!op.offsets().getValue().empty() && "Unexpected empty offsets"); 1345 1346 int64_t offset = 1347 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 1348 int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt(); 1349 int64_t stride = 1350 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 1351 1352 auto loc = op.getLoc(); 1353 auto elemType = dstType.getElementType(); 1354 assert(elemType.isSignlessIntOrIndexOrFloat()); 1355 1356 // Single offset can be more efficiently shuffled. 1357 if (op.offsets().getValue().size() == 1) { 1358 SmallVector<int64_t, 4> offsets; 1359 offsets.reserve(size); 1360 for (int64_t off = offset, e = offset + size * stride; off < e; 1361 off += stride) 1362 offsets.push_back(off); 1363 rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(), 1364 op.vector(), 1365 rewriter.getI64ArrayAttr(offsets)); 1366 return success(); 1367 } 1368 1369 // Extract/insert on a lower ranked extract strided slice op. 1370 Value zero = rewriter.create<ConstantOp>(loc, elemType, 1371 rewriter.getZeroAttr(elemType)); 1372 Value res = rewriter.create<SplatOp>(loc, dstType, zero); 1373 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 1374 off += stride, ++idx) { 1375 Value one = extractOne(rewriter, loc, op.vector(), off); 1376 Value extracted = rewriter.create<ExtractStridedSliceOp>( 1377 loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1), 1378 getI64SubArray(op.sizes(), /* dropFront=*/1), 1379 getI64SubArray(op.strides(), /* dropFront=*/1)); 1380 res = insertOne(rewriter, loc, extracted, res, idx); 1381 } 1382 rewriter.replaceOp(op, res); 1383 return success(); 1384 } 1385 }; 1386 1387 } // namespace 1388 1389 /// Populate the given list with patterns that convert from Vector to LLVM. 1390 void mlir::populateVectorToLLVMConversionPatterns( 1391 LLVMTypeConverter &converter, RewritePatternSet &patterns, 1392 bool reassociateFPReductions) { 1393 MLIRContext *ctx = converter.getDialect()->getContext(); 1394 patterns.add<VectorFMAOpNDRewritePattern, 1395 VectorInsertStridedSliceOpDifferentRankRewritePattern, 1396 VectorInsertStridedSliceOpSameRankRewritePattern, 1397 VectorExtractStridedSliceOpConversion>(ctx); 1398 patterns.add<VectorReductionOpConversion>(converter, reassociateFPReductions); 1399 patterns 1400 .add<VectorBitCastOpConversion, VectorShuffleOpConversion, 1401 VectorExtractElementOpConversion, VectorExtractOpConversion, 1402 VectorFMAOp1DConversion, VectorInsertElementOpConversion, 1403 VectorInsertOpConversion, VectorPrintOpConversion, 1404 VectorTypeCastOpConversion, 1405 VectorLoadStoreConversion<vector::LoadOp, vector::LoadOpAdaptor>, 1406 VectorLoadStoreConversion<vector::MaskedLoadOp, 1407 vector::MaskedLoadOpAdaptor>, 1408 VectorLoadStoreConversion<vector::StoreOp, vector::StoreOpAdaptor>, 1409 VectorLoadStoreConversion<vector::MaskedStoreOp, 1410 vector::MaskedStoreOpAdaptor>, 1411 VectorGatherOpConversion, VectorScatterOpConversion, 1412 VectorExpandLoadOpConversion, VectorCompressStoreOpConversion, 1413 VectorTransferConversion<TransferReadOp>, 1414 VectorTransferConversion<TransferWriteOp>>(converter); 1415 } 1416 1417 void mlir::populateVectorToLLVMMatrixConversionPatterns( 1418 LLVMTypeConverter &converter, RewritePatternSet &patterns) { 1419 patterns.add<VectorMatmulOpConversion>(converter); 1420 patterns.add<VectorFlatTransposeOpConversion>(converter); 1421 } 1422