1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 10 11 #include "mlir/Conversion/LLVMCommon/VectorPattern.h" 12 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h" 13 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 14 #include "mlir/Dialect/MemRef/IR/MemRef.h" 15 #include "mlir/Dialect/StandardOps/IR/Ops.h" 16 #include "mlir/Dialect/Vector/VectorOps.h" 17 #include "mlir/IR/BuiltinTypes.h" 18 #include "mlir/Support/MathExtras.h" 19 #include "mlir/Target/LLVMIR/TypeToLLVM.h" 20 #include "mlir/Transforms/DialectConversion.h" 21 22 using namespace mlir; 23 using namespace mlir::vector; 24 25 // Helper to reduce vector type by one rank at front. 26 static VectorType reducedVectorTypeFront(VectorType tp) { 27 assert((tp.getRank() > 1) && "unlowerable vector type"); 28 return VectorType::get(tp.getShape().drop_front(), tp.getElementType()); 29 } 30 31 // Helper to reduce vector type by *all* but one rank at back. 32 static VectorType reducedVectorTypeBack(VectorType tp) { 33 assert((tp.getRank() > 1) && "unlowerable vector type"); 34 return VectorType::get(tp.getShape().take_back(), tp.getElementType()); 35 } 36 37 // Helper that picks the proper sequence for inserting. 38 static Value insertOne(ConversionPatternRewriter &rewriter, 39 LLVMTypeConverter &typeConverter, Location loc, 40 Value val1, Value val2, Type llvmType, int64_t rank, 41 int64_t pos) { 42 if (rank == 1) { 43 auto idxType = rewriter.getIndexType(); 44 auto constant = rewriter.create<LLVM::ConstantOp>( 45 loc, typeConverter.convertType(idxType), 46 rewriter.getIntegerAttr(idxType, pos)); 47 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2, 48 constant); 49 } 50 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2, 51 rewriter.getI64ArrayAttr(pos)); 52 } 53 54 // Helper that picks the proper sequence for inserting. 55 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from, 56 Value into, int64_t offset) { 57 auto vectorType = into.getType().cast<VectorType>(); 58 if (vectorType.getRank() > 1) 59 return rewriter.create<InsertOp>(loc, from, into, offset); 60 return rewriter.create<vector::InsertElementOp>( 61 loc, vectorType, from, into, 62 rewriter.create<ConstantIndexOp>(loc, offset)); 63 } 64 65 // Helper that picks the proper sequence for extracting. 66 static Value extractOne(ConversionPatternRewriter &rewriter, 67 LLVMTypeConverter &typeConverter, Location loc, 68 Value val, Type llvmType, int64_t rank, int64_t pos) { 69 if (rank == 1) { 70 auto idxType = rewriter.getIndexType(); 71 auto constant = rewriter.create<LLVM::ConstantOp>( 72 loc, typeConverter.convertType(idxType), 73 rewriter.getIntegerAttr(idxType, pos)); 74 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val, 75 constant); 76 } 77 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val, 78 rewriter.getI64ArrayAttr(pos)); 79 } 80 81 // Helper that picks the proper sequence for extracting. 82 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector, 83 int64_t offset) { 84 auto vectorType = vector.getType().cast<VectorType>(); 85 if (vectorType.getRank() > 1) 86 return rewriter.create<ExtractOp>(loc, vector, offset); 87 return rewriter.create<vector::ExtractElementOp>( 88 loc, vectorType.getElementType(), vector, 89 rewriter.create<ConstantIndexOp>(loc, offset)); 90 } 91 92 // Helper that returns a subset of `arrayAttr` as a vector of int64_t. 93 // TODO: Better support for attribute subtype forwarding + slicing. 94 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr, 95 unsigned dropFront = 0, 96 unsigned dropBack = 0) { 97 assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds"); 98 auto range = arrayAttr.getAsRange<IntegerAttr>(); 99 SmallVector<int64_t, 4> res; 100 res.reserve(arrayAttr.size() - dropFront - dropBack); 101 for (auto it = range.begin() + dropFront, eit = range.end() - dropBack; 102 it != eit; ++it) 103 res.push_back((*it).getValue().getSExtValue()); 104 return res; 105 } 106 107 // Helper that returns data layout alignment of a memref. 108 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, 109 MemRefType memrefType, unsigned &align) { 110 Type elementTy = typeConverter.convertType(memrefType.getElementType()); 111 if (!elementTy) 112 return failure(); 113 114 // TODO: this should use the MLIR data layout when it becomes available and 115 // stop depending on translation. 116 llvm::LLVMContext llvmContext; 117 align = LLVM::TypeToLLVMIRTranslator(llvmContext) 118 .getPreferredAlignment(elementTy, typeConverter.getDataLayout()); 119 return success(); 120 } 121 122 // Return the minimal alignment value that satisfies all the AssumeAlignment 123 // uses of `value`. If no such uses exist, return 1. 124 static unsigned getAssumedAlignment(Value value) { 125 unsigned align = 1; 126 for (auto &u : value.getUses()) { 127 Operation *owner = u.getOwner(); 128 if (auto op = dyn_cast<memref::AssumeAlignmentOp>(owner)) 129 align = mlir::lcm(align, op.alignment()); 130 } 131 return align; 132 } 133 134 // Helper that returns data layout alignment of a memref associated with a 135 // load, store, scatter, or gather op, including additional information from 136 // assume_alignment calls on the source of the transfer 137 template <class OpAdaptor> 138 LogicalResult getMemRefOpAlignment(LLVMTypeConverter &typeConverter, 139 OpAdaptor op, unsigned &align) { 140 if (failed(getMemRefAlignment(typeConverter, op.getMemRefType(), align))) 141 return failure(); 142 align = std::max(align, getAssumedAlignment(op.base())); 143 return success(); 144 } 145 146 // Add an index vector component to a base pointer. This almost always succeeds 147 // unless the last stride is non-unit or the memory space is not zero. 148 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter, 149 Location loc, Value memref, Value base, 150 Value index, MemRefType memRefType, 151 VectorType vType, Value &ptrs) { 152 int64_t offset; 153 SmallVector<int64_t, 4> strides; 154 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 155 if (failed(successStrides) || strides.back() != 1 || 156 memRefType.getMemorySpaceAsInt() != 0) 157 return failure(); 158 auto pType = MemRefDescriptor(memref).getElementPtrType(); 159 auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0)); 160 ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, index); 161 return success(); 162 } 163 164 // Casts a strided element pointer to a vector pointer. The vector pointer 165 // will be in the same address space as the incoming memref type. 166 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc, 167 Value ptr, MemRefType memRefType, Type vt) { 168 auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpaceAsInt()); 169 return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr); 170 } 171 172 namespace { 173 174 /// Conversion pattern for a vector.bitcast. 175 class VectorBitCastOpConversion 176 : public ConvertOpToLLVMPattern<vector::BitCastOp> { 177 public: 178 using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern; 179 180 LogicalResult 181 matchAndRewrite(vector::BitCastOp bitCastOp, ArrayRef<Value> operands, 182 ConversionPatternRewriter &rewriter) const override { 183 // Only 1-D vectors can be lowered to LLVM. 184 VectorType resultTy = bitCastOp.getType(); 185 if (resultTy.getRank() != 1) 186 return failure(); 187 Type newResultTy = typeConverter->convertType(resultTy); 188 rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy, 189 operands[0]); 190 return success(); 191 } 192 }; 193 194 /// Conversion pattern for a vector.matrix_multiply. 195 /// This is lowered directly to the proper llvm.intr.matrix.multiply. 196 class VectorMatmulOpConversion 197 : public ConvertOpToLLVMPattern<vector::MatmulOp> { 198 public: 199 using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern; 200 201 LogicalResult 202 matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands, 203 ConversionPatternRewriter &rewriter) const override { 204 auto adaptor = vector::MatmulOpAdaptor(operands); 205 rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>( 206 matmulOp, typeConverter->convertType(matmulOp.res().getType()), 207 adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(), 208 matmulOp.lhs_columns(), matmulOp.rhs_columns()); 209 return success(); 210 } 211 }; 212 213 /// Conversion pattern for a vector.flat_transpose. 214 /// This is lowered directly to the proper llvm.intr.matrix.transpose. 215 class VectorFlatTransposeOpConversion 216 : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> { 217 public: 218 using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern; 219 220 LogicalResult 221 matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands, 222 ConversionPatternRewriter &rewriter) const override { 223 auto adaptor = vector::FlatTransposeOpAdaptor(operands); 224 rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>( 225 transOp, typeConverter->convertType(transOp.res().getType()), 226 adaptor.matrix(), transOp.rows(), transOp.columns()); 227 return success(); 228 } 229 }; 230 231 /// Overloaded utility that replaces a vector.load, vector.store, 232 /// vector.maskedload and vector.maskedstore with their respective LLVM 233 /// couterparts. 234 static void replaceLoadOrStoreOp(vector::LoadOp loadOp, 235 vector::LoadOpAdaptor adaptor, 236 VectorType vectorTy, Value ptr, unsigned align, 237 ConversionPatternRewriter &rewriter) { 238 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align); 239 } 240 241 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp, 242 vector::MaskedLoadOpAdaptor adaptor, 243 VectorType vectorTy, Value ptr, unsigned align, 244 ConversionPatternRewriter &rewriter) { 245 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 246 loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align); 247 } 248 249 static void replaceLoadOrStoreOp(vector::StoreOp storeOp, 250 vector::StoreOpAdaptor adaptor, 251 VectorType vectorTy, Value ptr, unsigned align, 252 ConversionPatternRewriter &rewriter) { 253 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(), 254 ptr, align); 255 } 256 257 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp, 258 vector::MaskedStoreOpAdaptor adaptor, 259 VectorType vectorTy, Value ptr, unsigned align, 260 ConversionPatternRewriter &rewriter) { 261 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 262 storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align); 263 } 264 265 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and 266 /// vector.maskedstore. 267 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor> 268 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> { 269 public: 270 using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern; 271 272 LogicalResult 273 matchAndRewrite(LoadOrStoreOp loadOrStoreOp, ArrayRef<Value> operands, 274 ConversionPatternRewriter &rewriter) const override { 275 // Only 1-D vectors can be lowered to LLVM. 276 VectorType vectorTy = loadOrStoreOp.getVectorType(); 277 if (vectorTy.getRank() > 1) 278 return failure(); 279 280 auto loc = loadOrStoreOp->getLoc(); 281 auto adaptor = LoadOrStoreOpAdaptor(operands); 282 MemRefType memRefTy = loadOrStoreOp.getMemRefType(); 283 284 // Resolve alignment. 285 unsigned align; 286 if (failed(getMemRefOpAlignment(*this->getTypeConverter(), loadOrStoreOp, 287 align))) 288 return failure(); 289 290 // Resolve address. 291 auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType()) 292 .template cast<VectorType>(); 293 Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(), 294 adaptor.indices(), rewriter); 295 Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype); 296 297 replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter); 298 return success(); 299 } 300 }; 301 302 /// Conversion pattern for a vector.gather. 303 class VectorGatherOpConversion 304 : public ConvertOpToLLVMPattern<vector::GatherOp> { 305 public: 306 using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern; 307 308 LogicalResult 309 matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands, 310 ConversionPatternRewriter &rewriter) const override { 311 auto loc = gather->getLoc(); 312 auto adaptor = vector::GatherOpAdaptor(operands); 313 MemRefType memRefType = gather.getMemRefType(); 314 315 // Resolve alignment. 316 unsigned align; 317 if (failed(getMemRefOpAlignment(*getTypeConverter(), gather, align))) 318 return failure(); 319 320 // Resolve address. 321 Value ptrs; 322 VectorType vType = gather.getVectorType(); 323 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 324 adaptor.indices(), rewriter); 325 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 326 adaptor.index_vec(), memRefType, vType, ptrs))) 327 return failure(); 328 329 // Replace with the gather intrinsic. 330 rewriter.replaceOpWithNewOp<LLVM::masked_gather>( 331 gather, typeConverter->convertType(vType), ptrs, adaptor.mask(), 332 adaptor.pass_thru(), rewriter.getI32IntegerAttr(align)); 333 return success(); 334 } 335 }; 336 337 /// Conversion pattern for a vector.scatter. 338 class VectorScatterOpConversion 339 : public ConvertOpToLLVMPattern<vector::ScatterOp> { 340 public: 341 using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern; 342 343 LogicalResult 344 matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands, 345 ConversionPatternRewriter &rewriter) const override { 346 auto loc = scatter->getLoc(); 347 auto adaptor = vector::ScatterOpAdaptor(operands); 348 MemRefType memRefType = scatter.getMemRefType(); 349 350 // Resolve alignment. 351 unsigned align; 352 if (failed(getMemRefOpAlignment(*getTypeConverter(), scatter, align))) 353 return failure(); 354 355 // Resolve address. 356 Value ptrs; 357 VectorType vType = scatter.getVectorType(); 358 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 359 adaptor.indices(), rewriter); 360 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 361 adaptor.index_vec(), memRefType, vType, ptrs))) 362 return failure(); 363 364 // Replace with the scatter intrinsic. 365 rewriter.replaceOpWithNewOp<LLVM::masked_scatter>( 366 scatter, adaptor.valueToStore(), ptrs, adaptor.mask(), 367 rewriter.getI32IntegerAttr(align)); 368 return success(); 369 } 370 }; 371 372 /// Conversion pattern for a vector.expandload. 373 class VectorExpandLoadOpConversion 374 : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> { 375 public: 376 using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern; 377 378 LogicalResult 379 matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands, 380 ConversionPatternRewriter &rewriter) const override { 381 auto loc = expand->getLoc(); 382 auto adaptor = vector::ExpandLoadOpAdaptor(operands); 383 MemRefType memRefType = expand.getMemRefType(); 384 385 // Resolve address. 386 auto vtype = typeConverter->convertType(expand.getVectorType()); 387 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 388 adaptor.indices(), rewriter); 389 390 rewriter.replaceOpWithNewOp<LLVM::masked_expandload>( 391 expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru()); 392 return success(); 393 } 394 }; 395 396 /// Conversion pattern for a vector.compressstore. 397 class VectorCompressStoreOpConversion 398 : public ConvertOpToLLVMPattern<vector::CompressStoreOp> { 399 public: 400 using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern; 401 402 LogicalResult 403 matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands, 404 ConversionPatternRewriter &rewriter) const override { 405 auto loc = compress->getLoc(); 406 auto adaptor = vector::CompressStoreOpAdaptor(operands); 407 MemRefType memRefType = compress.getMemRefType(); 408 409 // Resolve address. 410 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 411 adaptor.indices(), rewriter); 412 413 rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>( 414 compress, adaptor.valueToStore(), ptr, adaptor.mask()); 415 return success(); 416 } 417 }; 418 419 /// Conversion pattern for all vector reductions. 420 class VectorReductionOpConversion 421 : public ConvertOpToLLVMPattern<vector::ReductionOp> { 422 public: 423 explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv, 424 bool reassociateFPRed) 425 : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv), 426 reassociateFPReductions(reassociateFPRed) {} 427 428 LogicalResult 429 matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands, 430 ConversionPatternRewriter &rewriter) const override { 431 auto kind = reductionOp.kind(); 432 Type eltType = reductionOp.dest().getType(); 433 Type llvmType = typeConverter->convertType(eltType); 434 if (eltType.isIntOrIndex()) { 435 // Integer reductions: add/mul/min/max/and/or/xor. 436 if (kind == "add") 437 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>( 438 reductionOp, llvmType, operands[0]); 439 else if (kind == "mul") 440 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>( 441 reductionOp, llvmType, operands[0]); 442 else if (kind == "min" && 443 (eltType.isIndex() || eltType.isUnsignedInteger())) 444 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>( 445 reductionOp, llvmType, operands[0]); 446 else if (kind == "min") 447 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>( 448 reductionOp, llvmType, operands[0]); 449 else if (kind == "max" && 450 (eltType.isIndex() || eltType.isUnsignedInteger())) 451 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>( 452 reductionOp, llvmType, operands[0]); 453 else if (kind == "max") 454 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>( 455 reductionOp, llvmType, operands[0]); 456 else if (kind == "and") 457 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>( 458 reductionOp, llvmType, operands[0]); 459 else if (kind == "or") 460 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>( 461 reductionOp, llvmType, operands[0]); 462 else if (kind == "xor") 463 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>( 464 reductionOp, llvmType, operands[0]); 465 else 466 return failure(); 467 return success(); 468 } 469 470 if (!eltType.isa<FloatType>()) 471 return failure(); 472 473 // Floating-point reductions: add/mul/min/max 474 if (kind == "add") { 475 // Optional accumulator (or zero). 476 Value acc = operands.size() > 1 ? operands[1] 477 : rewriter.create<LLVM::ConstantOp>( 478 reductionOp->getLoc(), llvmType, 479 rewriter.getZeroAttr(eltType)); 480 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>( 481 reductionOp, llvmType, acc, operands[0], 482 rewriter.getBoolAttr(reassociateFPReductions)); 483 } else if (kind == "mul") { 484 // Optional accumulator (or one). 485 Value acc = operands.size() > 1 486 ? operands[1] 487 : rewriter.create<LLVM::ConstantOp>( 488 reductionOp->getLoc(), llvmType, 489 rewriter.getFloatAttr(eltType, 1.0)); 490 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>( 491 reductionOp, llvmType, acc, operands[0], 492 rewriter.getBoolAttr(reassociateFPReductions)); 493 } else if (kind == "min") 494 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>( 495 reductionOp, llvmType, operands[0]); 496 else if (kind == "max") 497 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>( 498 reductionOp, llvmType, operands[0]); 499 else 500 return failure(); 501 return success(); 502 } 503 504 private: 505 const bool reassociateFPReductions; 506 }; 507 508 class VectorShuffleOpConversion 509 : public ConvertOpToLLVMPattern<vector::ShuffleOp> { 510 public: 511 using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern; 512 513 LogicalResult 514 matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands, 515 ConversionPatternRewriter &rewriter) const override { 516 auto loc = shuffleOp->getLoc(); 517 auto adaptor = vector::ShuffleOpAdaptor(operands); 518 auto v1Type = shuffleOp.getV1VectorType(); 519 auto v2Type = shuffleOp.getV2VectorType(); 520 auto vectorType = shuffleOp.getVectorType(); 521 Type llvmType = typeConverter->convertType(vectorType); 522 auto maskArrayAttr = shuffleOp.mask(); 523 524 // Bail if result type cannot be lowered. 525 if (!llvmType) 526 return failure(); 527 528 // Get rank and dimension sizes. 529 int64_t rank = vectorType.getRank(); 530 assert(v1Type.getRank() == rank); 531 assert(v2Type.getRank() == rank); 532 int64_t v1Dim = v1Type.getDimSize(0); 533 534 // For rank 1, where both operands have *exactly* the same vector type, 535 // there is direct shuffle support in LLVM. Use it! 536 if (rank == 1 && v1Type == v2Type) { 537 Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>( 538 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); 539 rewriter.replaceOp(shuffleOp, llvmShuffleOp); 540 return success(); 541 } 542 543 // For all other cases, insert the individual values individually. 544 Type eltType; 545 llvm::errs() << llvmType << "\n"; 546 if (auto arrayType = llvmType.dyn_cast<LLVM::LLVMArrayType>()) 547 eltType = arrayType.getElementType(); 548 else 549 eltType = llvmType.cast<VectorType>().getElementType(); 550 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType); 551 int64_t insPos = 0; 552 for (auto en : llvm::enumerate(maskArrayAttr)) { 553 int64_t extPos = en.value().cast<IntegerAttr>().getInt(); 554 Value value = adaptor.v1(); 555 if (extPos >= v1Dim) { 556 extPos -= v1Dim; 557 value = adaptor.v2(); 558 } 559 Value extract = extractOne(rewriter, *getTypeConverter(), loc, value, 560 eltType, rank, extPos); 561 insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract, 562 llvmType, rank, insPos++); 563 } 564 rewriter.replaceOp(shuffleOp, insert); 565 return success(); 566 } 567 }; 568 569 class VectorExtractElementOpConversion 570 : public ConvertOpToLLVMPattern<vector::ExtractElementOp> { 571 public: 572 using ConvertOpToLLVMPattern< 573 vector::ExtractElementOp>::ConvertOpToLLVMPattern; 574 575 LogicalResult 576 matchAndRewrite(vector::ExtractElementOp extractEltOp, 577 ArrayRef<Value> operands, 578 ConversionPatternRewriter &rewriter) const override { 579 auto adaptor = vector::ExtractElementOpAdaptor(operands); 580 auto vectorType = extractEltOp.getVectorType(); 581 auto llvmType = typeConverter->convertType(vectorType.getElementType()); 582 583 // Bail if result type cannot be lowered. 584 if (!llvmType) 585 return failure(); 586 587 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>( 588 extractEltOp, llvmType, adaptor.vector(), adaptor.position()); 589 return success(); 590 } 591 }; 592 593 class VectorExtractOpConversion 594 : public ConvertOpToLLVMPattern<vector::ExtractOp> { 595 public: 596 using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern; 597 598 LogicalResult 599 matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands, 600 ConversionPatternRewriter &rewriter) const override { 601 auto loc = extractOp->getLoc(); 602 auto adaptor = vector::ExtractOpAdaptor(operands); 603 auto vectorType = extractOp.getVectorType(); 604 auto resultType = extractOp.getResult().getType(); 605 auto llvmResultType = typeConverter->convertType(resultType); 606 auto positionArrayAttr = extractOp.position(); 607 608 // Bail if result type cannot be lowered. 609 if (!llvmResultType) 610 return failure(); 611 612 // Extract entire vector. Should be handled by folder, but just to be safe. 613 if (positionArrayAttr.empty()) { 614 rewriter.replaceOp(extractOp, adaptor.vector()); 615 return success(); 616 } 617 618 // One-shot extraction of vector from array (only requires extractvalue). 619 if (resultType.isa<VectorType>()) { 620 Value extracted = rewriter.create<LLVM::ExtractValueOp>( 621 loc, llvmResultType, adaptor.vector(), positionArrayAttr); 622 rewriter.replaceOp(extractOp, extracted); 623 return success(); 624 } 625 626 // Potential extraction of 1-D vector from array. 627 auto *context = extractOp->getContext(); 628 Value extracted = adaptor.vector(); 629 auto positionAttrs = positionArrayAttr.getValue(); 630 if (positionAttrs.size() > 1) { 631 auto oneDVectorType = reducedVectorTypeBack(vectorType); 632 auto nMinusOnePositionAttrs = 633 ArrayAttr::get(context, positionAttrs.drop_back()); 634 extracted = rewriter.create<LLVM::ExtractValueOp>( 635 loc, typeConverter->convertType(oneDVectorType), extracted, 636 nMinusOnePositionAttrs); 637 } 638 639 // Remaining extraction of element from 1-D LLVM vector 640 auto position = positionAttrs.back().cast<IntegerAttr>(); 641 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 642 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 643 extracted = 644 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant); 645 rewriter.replaceOp(extractOp, extracted); 646 647 return success(); 648 } 649 }; 650 651 /// Conversion pattern that turns a vector.fma on a 1-D vector 652 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion. 653 /// This does not match vectors of n >= 2 rank. 654 /// 655 /// Example: 656 /// ``` 657 /// vector.fma %a, %a, %a : vector<8xf32> 658 /// ``` 659 /// is converted to: 660 /// ``` 661 /// llvm.intr.fmuladd %va, %va, %va: 662 /// (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">) 663 /// -> !llvm."<8 x f32>"> 664 /// ``` 665 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> { 666 public: 667 using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern; 668 669 LogicalResult 670 matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands, 671 ConversionPatternRewriter &rewriter) const override { 672 auto adaptor = vector::FMAOpAdaptor(operands); 673 VectorType vType = fmaOp.getVectorType(); 674 if (vType.getRank() != 1) 675 return failure(); 676 rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(), 677 adaptor.rhs(), adaptor.acc()); 678 return success(); 679 } 680 }; 681 682 class VectorInsertElementOpConversion 683 : public ConvertOpToLLVMPattern<vector::InsertElementOp> { 684 public: 685 using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern; 686 687 LogicalResult 688 matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands, 689 ConversionPatternRewriter &rewriter) const override { 690 auto adaptor = vector::InsertElementOpAdaptor(operands); 691 auto vectorType = insertEltOp.getDestVectorType(); 692 auto llvmType = typeConverter->convertType(vectorType); 693 694 // Bail if result type cannot be lowered. 695 if (!llvmType) 696 return failure(); 697 698 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>( 699 insertEltOp, llvmType, adaptor.dest(), adaptor.source(), 700 adaptor.position()); 701 return success(); 702 } 703 }; 704 705 class VectorInsertOpConversion 706 : public ConvertOpToLLVMPattern<vector::InsertOp> { 707 public: 708 using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern; 709 710 LogicalResult 711 matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands, 712 ConversionPatternRewriter &rewriter) const override { 713 auto loc = insertOp->getLoc(); 714 auto adaptor = vector::InsertOpAdaptor(operands); 715 auto sourceType = insertOp.getSourceType(); 716 auto destVectorType = insertOp.getDestVectorType(); 717 auto llvmResultType = typeConverter->convertType(destVectorType); 718 auto positionArrayAttr = insertOp.position(); 719 720 // Bail if result type cannot be lowered. 721 if (!llvmResultType) 722 return failure(); 723 724 // Overwrite entire vector with value. Should be handled by folder, but 725 // just to be safe. 726 if (positionArrayAttr.empty()) { 727 rewriter.replaceOp(insertOp, adaptor.source()); 728 return success(); 729 } 730 731 // One-shot insertion of a vector into an array (only requires insertvalue). 732 if (sourceType.isa<VectorType>()) { 733 Value inserted = rewriter.create<LLVM::InsertValueOp>( 734 loc, llvmResultType, adaptor.dest(), adaptor.source(), 735 positionArrayAttr); 736 rewriter.replaceOp(insertOp, inserted); 737 return success(); 738 } 739 740 // Potential extraction of 1-D vector from array. 741 auto *context = insertOp->getContext(); 742 Value extracted = adaptor.dest(); 743 auto positionAttrs = positionArrayAttr.getValue(); 744 auto position = positionAttrs.back().cast<IntegerAttr>(); 745 auto oneDVectorType = destVectorType; 746 if (positionAttrs.size() > 1) { 747 oneDVectorType = reducedVectorTypeBack(destVectorType); 748 auto nMinusOnePositionAttrs = 749 ArrayAttr::get(context, positionAttrs.drop_back()); 750 extracted = rewriter.create<LLVM::ExtractValueOp>( 751 loc, typeConverter->convertType(oneDVectorType), extracted, 752 nMinusOnePositionAttrs); 753 } 754 755 // Insertion of an element into a 1-D LLVM vector. 756 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 757 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 758 Value inserted = rewriter.create<LLVM::InsertElementOp>( 759 loc, typeConverter->convertType(oneDVectorType), extracted, 760 adaptor.source(), constant); 761 762 // Potential insertion of resulting 1-D vector into array. 763 if (positionAttrs.size() > 1) { 764 auto nMinusOnePositionAttrs = 765 ArrayAttr::get(context, positionAttrs.drop_back()); 766 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType, 767 adaptor.dest(), inserted, 768 nMinusOnePositionAttrs); 769 } 770 771 rewriter.replaceOp(insertOp, inserted); 772 return success(); 773 } 774 }; 775 776 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1. 777 /// 778 /// Example: 779 /// ``` 780 /// %d = vector.fma %a, %b, %c : vector<2x4xf32> 781 /// ``` 782 /// is rewritten into: 783 /// ``` 784 /// %r = splat %f0: vector<2x4xf32> 785 /// %va = vector.extractvalue %a[0] : vector<2x4xf32> 786 /// %vb = vector.extractvalue %b[0] : vector<2x4xf32> 787 /// %vc = vector.extractvalue %c[0] : vector<2x4xf32> 788 /// %vd = vector.fma %va, %vb, %vc : vector<4xf32> 789 /// %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32> 790 /// %va2 = vector.extractvalue %a2[1] : vector<2x4xf32> 791 /// %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32> 792 /// %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32> 793 /// %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32> 794 /// %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32> 795 /// // %r3 holds the final value. 796 /// ``` 797 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> { 798 public: 799 using OpRewritePattern<FMAOp>::OpRewritePattern; 800 801 LogicalResult matchAndRewrite(FMAOp op, 802 PatternRewriter &rewriter) const override { 803 auto vType = op.getVectorType(); 804 if (vType.getRank() < 2) 805 return failure(); 806 807 auto loc = op.getLoc(); 808 auto elemType = vType.getElementType(); 809 Value zero = rewriter.create<ConstantOp>(loc, elemType, 810 rewriter.getZeroAttr(elemType)); 811 Value desc = rewriter.create<SplatOp>(loc, vType, zero); 812 for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) { 813 Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i); 814 Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i); 815 Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i); 816 Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC); 817 desc = rewriter.create<InsertOp>(loc, fma, desc, i); 818 } 819 rewriter.replaceOp(op, desc); 820 return success(); 821 } 822 }; 823 824 // When ranks are different, InsertStridedSlice needs to extract a properly 825 // ranked vector from the destination vector into which to insert. This pattern 826 // only takes care of this part and forwards the rest of the conversion to 827 // another pattern that converts InsertStridedSlice for operands of the same 828 // rank. 829 // 830 // RewritePattern for InsertStridedSliceOp where source and destination vectors 831 // have different ranks. In this case: 832 // 1. the proper subvector is extracted from the destination vector 833 // 2. a new InsertStridedSlice op is created to insert the source in the 834 // destination subvector 835 // 3. the destination subvector is inserted back in the proper place 836 // 4. the op is replaced by the result of step 3. 837 // The new InsertStridedSlice from step 2. will be picked up by a 838 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 839 class VectorInsertStridedSliceOpDifferentRankRewritePattern 840 : public OpRewritePattern<InsertStridedSliceOp> { 841 public: 842 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 843 844 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 845 PatternRewriter &rewriter) const override { 846 auto srcType = op.getSourceVectorType(); 847 auto dstType = op.getDestVectorType(); 848 849 if (op.offsets().getValue().empty()) 850 return failure(); 851 852 auto loc = op.getLoc(); 853 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 854 assert(rankDiff >= 0); 855 if (rankDiff == 0) 856 return failure(); 857 858 int64_t rankRest = dstType.getRank() - rankDiff; 859 // Extract / insert the subvector of matching rank and InsertStridedSlice 860 // on it. 861 Value extracted = 862 rewriter.create<ExtractOp>(loc, op.dest(), 863 getI64SubArray(op.offsets(), /*dropFront=*/0, 864 /*dropBack=*/rankRest)); 865 // A different pattern will kick in for InsertStridedSlice with matching 866 // ranks. 867 auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>( 868 loc, op.source(), extracted, 869 getI64SubArray(op.offsets(), /*dropFront=*/rankDiff), 870 getI64SubArray(op.strides(), /*dropFront=*/0)); 871 rewriter.replaceOpWithNewOp<InsertOp>( 872 op, stridedSliceInnerOp.getResult(), op.dest(), 873 getI64SubArray(op.offsets(), /*dropFront=*/0, 874 /*dropBack=*/rankRest)); 875 return success(); 876 } 877 }; 878 879 // RewritePattern for InsertStridedSliceOp where source and destination vectors 880 // have the same rank. In this case, we reduce 881 // 1. the proper subvector is extracted from the destination vector 882 // 2. a new InsertStridedSlice op is created to insert the source in the 883 // destination subvector 884 // 3. the destination subvector is inserted back in the proper place 885 // 4. the op is replaced by the result of step 3. 886 // The new InsertStridedSlice from step 2. will be picked up by a 887 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 888 class VectorInsertStridedSliceOpSameRankRewritePattern 889 : public OpRewritePattern<InsertStridedSliceOp> { 890 public: 891 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 892 893 void initialize() { 894 // This pattern creates recursive InsertStridedSliceOp, but the recursion is 895 // bounded as the rank is strictly decreasing. 896 setHasBoundedRewriteRecursion(); 897 } 898 899 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 900 PatternRewriter &rewriter) const override { 901 auto srcType = op.getSourceVectorType(); 902 auto dstType = op.getDestVectorType(); 903 904 if (op.offsets().getValue().empty()) 905 return failure(); 906 907 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 908 assert(rankDiff >= 0); 909 if (rankDiff != 0) 910 return failure(); 911 912 if (srcType == dstType) { 913 rewriter.replaceOp(op, op.source()); 914 return success(); 915 } 916 917 int64_t offset = 918 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 919 int64_t size = srcType.getShape().front(); 920 int64_t stride = 921 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 922 923 auto loc = op.getLoc(); 924 Value res = op.dest(); 925 // For each slice of the source vector along the most major dimension. 926 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 927 off += stride, ++idx) { 928 // 1. extract the proper subvector (or element) from source 929 Value extractedSource = extractOne(rewriter, loc, op.source(), idx); 930 if (extractedSource.getType().isa<VectorType>()) { 931 // 2. If we have a vector, extract the proper subvector from destination 932 // Otherwise we are at the element level and no need to recurse. 933 Value extractedDest = extractOne(rewriter, loc, op.dest(), off); 934 // 3. Reduce the problem to lowering a new InsertStridedSlice op with 935 // smaller rank. 936 extractedSource = rewriter.create<InsertStridedSliceOp>( 937 loc, extractedSource, extractedDest, 938 getI64SubArray(op.offsets(), /* dropFront=*/1), 939 getI64SubArray(op.strides(), /* dropFront=*/1)); 940 } 941 // 4. Insert the extractedSource into the res vector. 942 res = insertOne(rewriter, loc, extractedSource, res, off); 943 } 944 945 rewriter.replaceOp(op, res); 946 return success(); 947 } 948 }; 949 950 /// Returns the strides if the memory underlying `memRefType` has a contiguous 951 /// static layout. 952 static llvm::Optional<SmallVector<int64_t, 4>> 953 computeContiguousStrides(MemRefType memRefType) { 954 int64_t offset; 955 SmallVector<int64_t, 4> strides; 956 if (failed(getStridesAndOffset(memRefType, strides, offset))) 957 return None; 958 if (!strides.empty() && strides.back() != 1) 959 return None; 960 // If no layout or identity layout, this is contiguous by definition. 961 if (memRefType.getAffineMaps().empty() || 962 memRefType.getAffineMaps().front().isIdentity()) 963 return strides; 964 965 // Otherwise, we must determine contiguity form shapes. This can only ever 966 // work in static cases because MemRefType is underspecified to represent 967 // contiguous dynamic shapes in other ways than with just empty/identity 968 // layout. 969 auto sizes = memRefType.getShape(); 970 for (int index = 0, e = strides.size() - 1; index < e; ++index) { 971 if (ShapedType::isDynamic(sizes[index + 1]) || 972 ShapedType::isDynamicStrideOrOffset(strides[index]) || 973 ShapedType::isDynamicStrideOrOffset(strides[index + 1])) 974 return None; 975 if (strides[index] != strides[index + 1] * sizes[index + 1]) 976 return None; 977 } 978 return strides; 979 } 980 981 class VectorTypeCastOpConversion 982 : public ConvertOpToLLVMPattern<vector::TypeCastOp> { 983 public: 984 using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern; 985 986 LogicalResult 987 matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands, 988 ConversionPatternRewriter &rewriter) const override { 989 auto loc = castOp->getLoc(); 990 MemRefType sourceMemRefType = 991 castOp.getOperand().getType().cast<MemRefType>(); 992 MemRefType targetMemRefType = castOp.getType(); 993 994 // Only static shape casts supported atm. 995 if (!sourceMemRefType.hasStaticShape() || 996 !targetMemRefType.hasStaticShape()) 997 return failure(); 998 999 auto llvmSourceDescriptorTy = 1000 operands[0].getType().dyn_cast<LLVM::LLVMStructType>(); 1001 if (!llvmSourceDescriptorTy) 1002 return failure(); 1003 MemRefDescriptor sourceMemRef(operands[0]); 1004 1005 auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType) 1006 .dyn_cast_or_null<LLVM::LLVMStructType>(); 1007 if (!llvmTargetDescriptorTy) 1008 return failure(); 1009 1010 // Only contiguous source buffers supported atm. 1011 auto sourceStrides = computeContiguousStrides(sourceMemRefType); 1012 if (!sourceStrides) 1013 return failure(); 1014 auto targetStrides = computeContiguousStrides(targetMemRefType); 1015 if (!targetStrides) 1016 return failure(); 1017 // Only support static strides for now, regardless of contiguity. 1018 if (llvm::any_of(*targetStrides, [](int64_t stride) { 1019 return ShapedType::isDynamicStrideOrOffset(stride); 1020 })) 1021 return failure(); 1022 1023 auto int64Ty = IntegerType::get(rewriter.getContext(), 64); 1024 1025 // Create descriptor. 1026 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 1027 Type llvmTargetElementTy = desc.getElementPtrType(); 1028 // Set allocated ptr. 1029 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc); 1030 allocated = 1031 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated); 1032 desc.setAllocatedPtr(rewriter, loc, allocated); 1033 // Set aligned ptr. 1034 Value ptr = sourceMemRef.alignedPtr(rewriter, loc); 1035 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr); 1036 desc.setAlignedPtr(rewriter, loc, ptr); 1037 // Fill offset 0. 1038 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); 1039 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr); 1040 desc.setOffset(rewriter, loc, zero); 1041 1042 // Fill size and stride descriptors in memref. 1043 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { 1044 int64_t index = indexedSize.index(); 1045 auto sizeAttr = 1046 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); 1047 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr); 1048 desc.setSize(rewriter, loc, index, size); 1049 auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), 1050 (*targetStrides)[index]); 1051 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr); 1052 desc.setStride(rewriter, loc, index, stride); 1053 } 1054 1055 rewriter.replaceOp(castOp, {desc}); 1056 return success(); 1057 } 1058 }; 1059 1060 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> { 1061 public: 1062 using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern; 1063 1064 // Proof-of-concept lowering implementation that relies on a small 1065 // runtime support library, which only needs to provide a few 1066 // printing methods (single value for all data types, opening/closing 1067 // bracket, comma, newline). The lowering fully unrolls a vector 1068 // in terms of these elementary printing operations. The advantage 1069 // of this approach is that the library can remain unaware of all 1070 // low-level implementation details of vectors while still supporting 1071 // output of any shaped and dimensioned vector. Due to full unrolling, 1072 // this approach is less suited for very large vectors though. 1073 // 1074 // TODO: rely solely on libc in future? something else? 1075 // 1076 LogicalResult 1077 matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands, 1078 ConversionPatternRewriter &rewriter) const override { 1079 auto adaptor = vector::PrintOpAdaptor(operands); 1080 Type printType = printOp.getPrintType(); 1081 1082 if (typeConverter->convertType(printType) == nullptr) 1083 return failure(); 1084 1085 // Make sure element type has runtime support. 1086 PrintConversion conversion = PrintConversion::None; 1087 VectorType vectorType = printType.dyn_cast<VectorType>(); 1088 Type eltType = vectorType ? vectorType.getElementType() : printType; 1089 Operation *printer; 1090 if (eltType.isF32()) { 1091 printer = 1092 LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>()); 1093 } else if (eltType.isF64()) { 1094 printer = 1095 LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>()); 1096 } else if (eltType.isIndex()) { 1097 printer = 1098 LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>()); 1099 } else if (auto intTy = eltType.dyn_cast<IntegerType>()) { 1100 // Integers need a zero or sign extension on the operand 1101 // (depending on the source type) as well as a signed or 1102 // unsigned print method. Up to 64-bit is supported. 1103 unsigned width = intTy.getWidth(); 1104 if (intTy.isUnsigned()) { 1105 if (width <= 64) { 1106 if (width < 64) 1107 conversion = PrintConversion::ZeroExt64; 1108 printer = LLVM::lookupOrCreatePrintU64Fn( 1109 printOp->getParentOfType<ModuleOp>()); 1110 } else { 1111 return failure(); 1112 } 1113 } else { 1114 assert(intTy.isSignless() || intTy.isSigned()); 1115 if (width <= 64) { 1116 // Note that we *always* zero extend booleans (1-bit integers), 1117 // so that true/false is printed as 1/0 rather than -1/0. 1118 if (width == 1) 1119 conversion = PrintConversion::ZeroExt64; 1120 else if (width < 64) 1121 conversion = PrintConversion::SignExt64; 1122 printer = LLVM::lookupOrCreatePrintI64Fn( 1123 printOp->getParentOfType<ModuleOp>()); 1124 } else { 1125 return failure(); 1126 } 1127 } 1128 } else { 1129 return failure(); 1130 } 1131 1132 // Unroll vector into elementary print calls. 1133 int64_t rank = vectorType ? vectorType.getRank() : 0; 1134 emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank, 1135 conversion); 1136 emitCall(rewriter, printOp->getLoc(), 1137 LLVM::lookupOrCreatePrintNewlineFn( 1138 printOp->getParentOfType<ModuleOp>())); 1139 rewriter.eraseOp(printOp); 1140 return success(); 1141 } 1142 1143 private: 1144 enum class PrintConversion { 1145 // clang-format off 1146 None, 1147 ZeroExt64, 1148 SignExt64 1149 // clang-format on 1150 }; 1151 1152 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, 1153 Value value, VectorType vectorType, Operation *printer, 1154 int64_t rank, PrintConversion conversion) const { 1155 Location loc = op->getLoc(); 1156 if (rank == 0) { 1157 switch (conversion) { 1158 case PrintConversion::ZeroExt64: 1159 value = rewriter.create<ZeroExtendIOp>( 1160 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1161 break; 1162 case PrintConversion::SignExt64: 1163 value = rewriter.create<SignExtendIOp>( 1164 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1165 break; 1166 case PrintConversion::None: 1167 break; 1168 } 1169 emitCall(rewriter, loc, printer, value); 1170 return; 1171 } 1172 1173 emitCall(rewriter, loc, 1174 LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>())); 1175 Operation *printComma = 1176 LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>()); 1177 int64_t dim = vectorType.getDimSize(0); 1178 for (int64_t d = 0; d < dim; ++d) { 1179 auto reducedType = 1180 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; 1181 auto llvmType = typeConverter->convertType( 1182 rank > 1 ? reducedType : vectorType.getElementType()); 1183 Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value, 1184 llvmType, rank, d); 1185 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1, 1186 conversion); 1187 if (d != dim - 1) 1188 emitCall(rewriter, loc, printComma); 1189 } 1190 emitCall(rewriter, loc, 1191 LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>())); 1192 } 1193 1194 // Helper to emit a call. 1195 static void emitCall(ConversionPatternRewriter &rewriter, Location loc, 1196 Operation *ref, ValueRange params = ValueRange()) { 1197 rewriter.create<LLVM::CallOp>(loc, TypeRange(), 1198 rewriter.getSymbolRefAttr(ref), params); 1199 } 1200 }; 1201 1202 /// Progressive lowering of ExtractStridedSliceOp to either: 1203 /// 1. express single offset extract as a direct shuffle. 1204 /// 2. extract + lower rank strided_slice + insert for the n-D case. 1205 class VectorExtractStridedSliceOpConversion 1206 : public OpRewritePattern<ExtractStridedSliceOp> { 1207 public: 1208 using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern; 1209 1210 void initialize() { 1211 // This pattern creates recursive ExtractStridedSliceOp, but the recursion 1212 // is bounded as the rank is strictly decreasing. 1213 setHasBoundedRewriteRecursion(); 1214 } 1215 1216 LogicalResult matchAndRewrite(ExtractStridedSliceOp op, 1217 PatternRewriter &rewriter) const override { 1218 auto dstType = op.getType(); 1219 1220 assert(!op.offsets().getValue().empty() && "Unexpected empty offsets"); 1221 1222 int64_t offset = 1223 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 1224 int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt(); 1225 int64_t stride = 1226 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 1227 1228 auto loc = op.getLoc(); 1229 auto elemType = dstType.getElementType(); 1230 assert(elemType.isSignlessIntOrIndexOrFloat()); 1231 1232 // Single offset can be more efficiently shuffled. 1233 if (op.offsets().getValue().size() == 1) { 1234 SmallVector<int64_t, 4> offsets; 1235 offsets.reserve(size); 1236 for (int64_t off = offset, e = offset + size * stride; off < e; 1237 off += stride) 1238 offsets.push_back(off); 1239 rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(), 1240 op.vector(), 1241 rewriter.getI64ArrayAttr(offsets)); 1242 return success(); 1243 } 1244 1245 // Extract/insert on a lower ranked extract strided slice op. 1246 Value zero = rewriter.create<ConstantOp>(loc, elemType, 1247 rewriter.getZeroAttr(elemType)); 1248 Value res = rewriter.create<SplatOp>(loc, dstType, zero); 1249 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 1250 off += stride, ++idx) { 1251 Value one = extractOne(rewriter, loc, op.vector(), off); 1252 Value extracted = rewriter.create<ExtractStridedSliceOp>( 1253 loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1), 1254 getI64SubArray(op.sizes(), /* dropFront=*/1), 1255 getI64SubArray(op.strides(), /* dropFront=*/1)); 1256 res = insertOne(rewriter, loc, extracted, res, idx); 1257 } 1258 rewriter.replaceOp(op, res); 1259 return success(); 1260 } 1261 }; 1262 1263 } // namespace 1264 1265 /// Populate the given list with patterns that convert from Vector to LLVM. 1266 void mlir::populateVectorToLLVMConversionPatterns( 1267 LLVMTypeConverter &converter, RewritePatternSet &patterns, 1268 bool reassociateFPReductions) { 1269 MLIRContext *ctx = converter.getDialect()->getContext(); 1270 patterns.add<VectorFMAOpNDRewritePattern, 1271 VectorInsertStridedSliceOpDifferentRankRewritePattern, 1272 VectorInsertStridedSliceOpSameRankRewritePattern, 1273 VectorExtractStridedSliceOpConversion>(ctx); 1274 patterns.add<VectorReductionOpConversion>(converter, reassociateFPReductions); 1275 patterns 1276 .add<VectorBitCastOpConversion, VectorShuffleOpConversion, 1277 VectorExtractElementOpConversion, VectorExtractOpConversion, 1278 VectorFMAOp1DConversion, VectorInsertElementOpConversion, 1279 VectorInsertOpConversion, VectorPrintOpConversion, 1280 VectorTypeCastOpConversion, 1281 VectorLoadStoreConversion<vector::LoadOp, vector::LoadOpAdaptor>, 1282 VectorLoadStoreConversion<vector::MaskedLoadOp, 1283 vector::MaskedLoadOpAdaptor>, 1284 VectorLoadStoreConversion<vector::StoreOp, vector::StoreOpAdaptor>, 1285 VectorLoadStoreConversion<vector::MaskedStoreOp, 1286 vector::MaskedStoreOpAdaptor>, 1287 VectorGatherOpConversion, VectorScatterOpConversion, 1288 VectorExpandLoadOpConversion, VectorCompressStoreOpConversion>( 1289 converter); 1290 // Transfer ops with rank > 1 are handled by VectorToSCF. 1291 populateVectorTransferLoweringPatterns(patterns, /*maxTransferRank=*/1); 1292 } 1293 1294 void mlir::populateVectorToLLVMMatrixConversionPatterns( 1295 LLVMTypeConverter &converter, RewritePatternSet &patterns) { 1296 patterns.add<VectorMatmulOpConversion>(converter); 1297 patterns.add<VectorFlatTransposeOpConversion>(converter); 1298 } 1299