xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision cf5c517c05e056ab083770b8baed1897b5fcac10)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
13 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
14 #include "mlir/Dialect/StandardOps/IR/Ops.h"
15 #include "mlir/Dialect/Vector/VectorOps.h"
16 #include "mlir/IR/BuiltinTypes.h"
17 #include "mlir/Target/LLVMIR/TypeTranslation.h"
18 #include "mlir/Transforms/DialectConversion.h"
19 
20 using namespace mlir;
21 using namespace mlir::vector;
22 
23 // Helper to reduce vector type by one rank at front.
24 static VectorType reducedVectorTypeFront(VectorType tp) {
25   assert((tp.getRank() > 1) && "unlowerable vector type");
26   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
27 }
28 
29 // Helper to reduce vector type by *all* but one rank at back.
30 static VectorType reducedVectorTypeBack(VectorType tp) {
31   assert((tp.getRank() > 1) && "unlowerable vector type");
32   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
33 }
34 
35 // Helper that picks the proper sequence for inserting.
36 static Value insertOne(ConversionPatternRewriter &rewriter,
37                        LLVMTypeConverter &typeConverter, Location loc,
38                        Value val1, Value val2, Type llvmType, int64_t rank,
39                        int64_t pos) {
40   if (rank == 1) {
41     auto idxType = rewriter.getIndexType();
42     auto constant = rewriter.create<LLVM::ConstantOp>(
43         loc, typeConverter.convertType(idxType),
44         rewriter.getIntegerAttr(idxType, pos));
45     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
46                                                   constant);
47   }
48   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
49                                               rewriter.getI64ArrayAttr(pos));
50 }
51 
52 // Helper that picks the proper sequence for inserting.
53 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
54                        Value into, int64_t offset) {
55   auto vectorType = into.getType().cast<VectorType>();
56   if (vectorType.getRank() > 1)
57     return rewriter.create<InsertOp>(loc, from, into, offset);
58   return rewriter.create<vector::InsertElementOp>(
59       loc, vectorType, from, into,
60       rewriter.create<ConstantIndexOp>(loc, offset));
61 }
62 
63 // Helper that picks the proper sequence for extracting.
64 static Value extractOne(ConversionPatternRewriter &rewriter,
65                         LLVMTypeConverter &typeConverter, Location loc,
66                         Value val, Type llvmType, int64_t rank, int64_t pos) {
67   if (rank == 1) {
68     auto idxType = rewriter.getIndexType();
69     auto constant = rewriter.create<LLVM::ConstantOp>(
70         loc, typeConverter.convertType(idxType),
71         rewriter.getIntegerAttr(idxType, pos));
72     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
73                                                    constant);
74   }
75   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
76                                                rewriter.getI64ArrayAttr(pos));
77 }
78 
79 // Helper that picks the proper sequence for extracting.
80 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
81                         int64_t offset) {
82   auto vectorType = vector.getType().cast<VectorType>();
83   if (vectorType.getRank() > 1)
84     return rewriter.create<ExtractOp>(loc, vector, offset);
85   return rewriter.create<vector::ExtractElementOp>(
86       loc, vectorType.getElementType(), vector,
87       rewriter.create<ConstantIndexOp>(loc, offset));
88 }
89 
90 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
91 // TODO: Better support for attribute subtype forwarding + slicing.
92 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
93                                               unsigned dropFront = 0,
94                                               unsigned dropBack = 0) {
95   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
96   auto range = arrayAttr.getAsRange<IntegerAttr>();
97   SmallVector<int64_t, 4> res;
98   res.reserve(arrayAttr.size() - dropFront - dropBack);
99   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
100        it != eit; ++it)
101     res.push_back((*it).getValue().getSExtValue());
102   return res;
103 }
104 
105 // Helper that returns a vector comparison that constructs a mask:
106 //     mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b]
107 //
108 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative,
109 //       much more compact, IR for this operation, but LLVM eventually
110 //       generates more elaborate instructions for this intrinsic since it
111 //       is very conservative on the boundary conditions.
112 static Value buildVectorComparison(ConversionPatternRewriter &rewriter,
113                                    Operation *op, bool enableIndexOptimizations,
114                                    int64_t dim, Value b, Value *off = nullptr) {
115   auto loc = op->getLoc();
116   // If we can assume all indices fit in 32-bit, we perform the vector
117   // comparison in 32-bit to get a higher degree of SIMD parallelism.
118   // Otherwise we perform the vector comparison using 64-bit indices.
119   Value indices;
120   Type idxType;
121   if (enableIndexOptimizations) {
122     indices = rewriter.create<ConstantOp>(
123         loc, rewriter.getI32VectorAttr(
124                  llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))));
125     idxType = rewriter.getI32Type();
126   } else {
127     indices = rewriter.create<ConstantOp>(
128         loc, rewriter.getI64VectorAttr(
129                  llvm::to_vector<4>(llvm::seq<int64_t>(0, dim))));
130     idxType = rewriter.getI64Type();
131   }
132   // Add in an offset if requested.
133   if (off) {
134     Value o = rewriter.create<IndexCastOp>(loc, idxType, *off);
135     Value ov = rewriter.create<SplatOp>(loc, indices.getType(), o);
136     indices = rewriter.create<AddIOp>(loc, ov, indices);
137   }
138   // Construct the vector comparison.
139   Value bound = rewriter.create<IndexCastOp>(loc, idxType, b);
140   Value bounds = rewriter.create<SplatOp>(loc, indices.getType(), bound);
141   return rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, indices, bounds);
142 }
143 
144 // Helper that returns data layout alignment of a memref.
145 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter,
146                                  MemRefType memrefType, unsigned &align) {
147   Type elementTy = typeConverter.convertType(memrefType.getElementType());
148   if (!elementTy)
149     return failure();
150 
151   // TODO: this should use the MLIR data layout when it becomes available and
152   // stop depending on translation.
153   llvm::LLVMContext llvmContext;
154   align = LLVM::TypeToLLVMIRTranslator(llvmContext)
155               .getPreferredAlignment(elementTy, typeConverter.getDataLayout());
156   return success();
157 }
158 
159 // Helper that returns the base address of a memref.
160 static LogicalResult getBase(ConversionPatternRewriter &rewriter, Location loc,
161                              Value memref, MemRefType memRefType, Value &base) {
162   // Inspect stride and offset structure.
163   //
164   // TODO: flat memory only for now, generalize
165   //
166   int64_t offset;
167   SmallVector<int64_t, 4> strides;
168   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
169   if (failed(successStrides) || strides.size() != 1 || strides[0] != 1 ||
170       offset != 0 || memRefType.getMemorySpace() != 0)
171     return failure();
172   base = MemRefDescriptor(memref).alignedPtr(rewriter, loc);
173   return success();
174 }
175 
176 // Helper that returns vector of pointers given a memref base with index vector.
177 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
178                                     Location loc, Value memref, Value indices,
179                                     MemRefType memRefType, VectorType vType,
180                                     Type iType, Value &ptrs) {
181   Value base;
182   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
183     return failure();
184   auto pType = MemRefDescriptor(memref).getElementPtrType();
185   auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0));
186   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, indices);
187   return success();
188 }
189 
190 // Casts a strided element pointer to a vector pointer. The vector pointer
191 // would always be on address space 0, therefore addrspacecast shall be
192 // used when source/dst memrefs are not on address space 0.
193 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc,
194                          Value ptr, MemRefType memRefType, Type vt) {
195   auto pType = LLVM::LLVMPointerType::get(vt);
196   if (memRefType.getMemorySpace() == 0)
197     return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr);
198   return rewriter.create<LLVM::AddrSpaceCastOp>(loc, pType, ptr);
199 }
200 
201 static LogicalResult
202 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
203                                  LLVMTypeConverter &typeConverter, Location loc,
204                                  TransferReadOp xferOp,
205                                  ArrayRef<Value> operands, Value dataPtr) {
206   unsigned align;
207   if (failed(getMemRefAlignment(
208           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
209     return failure();
210   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
211   return success();
212 }
213 
214 static LogicalResult
215 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
216                             LLVMTypeConverter &typeConverter, Location loc,
217                             TransferReadOp xferOp, ArrayRef<Value> operands,
218                             Value dataPtr, Value mask) {
219   auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
220   VectorType fillType = xferOp.getVectorType();
221   Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding());
222   fill = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(fillType), fill);
223 
224   Type vecTy = typeConverter.convertType(xferOp.getVectorType());
225   if (!vecTy)
226     return failure();
227 
228   unsigned align;
229   if (failed(getMemRefAlignment(
230           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
231     return failure();
232 
233   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
234       xferOp, vecTy, dataPtr, mask, ValueRange{fill},
235       rewriter.getI32IntegerAttr(align));
236   return success();
237 }
238 
239 static LogicalResult
240 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
241                                  LLVMTypeConverter &typeConverter, Location loc,
242                                  TransferWriteOp xferOp,
243                                  ArrayRef<Value> operands, Value dataPtr) {
244   unsigned align;
245   if (failed(getMemRefAlignment(
246           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
247     return failure();
248   auto adaptor = TransferWriteOpAdaptor(operands);
249   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
250                                              align);
251   return success();
252 }
253 
254 static LogicalResult
255 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
256                             LLVMTypeConverter &typeConverter, Location loc,
257                             TransferWriteOp xferOp, ArrayRef<Value> operands,
258                             Value dataPtr, Value mask) {
259   unsigned align;
260   if (failed(getMemRefAlignment(
261           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
262     return failure();
263 
264   auto adaptor = TransferWriteOpAdaptor(operands);
265   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
266       xferOp, adaptor.vector(), dataPtr, mask,
267       rewriter.getI32IntegerAttr(align));
268   return success();
269 }
270 
271 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
272                                                   ArrayRef<Value> operands) {
273   return TransferReadOpAdaptor(operands);
274 }
275 
276 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
277                                                    ArrayRef<Value> operands) {
278   return TransferWriteOpAdaptor(operands);
279 }
280 
281 namespace {
282 
283 /// Conversion pattern for a vector.bitcast.
284 class VectorBitCastOpConversion
285     : public ConvertOpToLLVMPattern<vector::BitCastOp> {
286 public:
287   using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern;
288 
289   LogicalResult
290   matchAndRewrite(vector::BitCastOp bitCastOp, ArrayRef<Value> operands,
291                   ConversionPatternRewriter &rewriter) const override {
292     // Only 1-D vectors can be lowered to LLVM.
293     VectorType resultTy = bitCastOp.getType();
294     if (resultTy.getRank() != 1)
295       return failure();
296     Type newResultTy = typeConverter->convertType(resultTy);
297     rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy,
298                                                  operands[0]);
299     return success();
300   }
301 };
302 
303 /// Conversion pattern for a vector.matrix_multiply.
304 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
305 class VectorMatmulOpConversion
306     : public ConvertOpToLLVMPattern<vector::MatmulOp> {
307 public:
308   using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern;
309 
310   LogicalResult
311   matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands,
312                   ConversionPatternRewriter &rewriter) const override {
313     auto adaptor = vector::MatmulOpAdaptor(operands);
314     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
315         matmulOp, typeConverter->convertType(matmulOp.res().getType()),
316         adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(),
317         matmulOp.lhs_columns(), matmulOp.rhs_columns());
318     return success();
319   }
320 };
321 
322 /// Conversion pattern for a vector.flat_transpose.
323 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
324 class VectorFlatTransposeOpConversion
325     : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> {
326 public:
327   using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern;
328 
329   LogicalResult
330   matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands,
331                   ConversionPatternRewriter &rewriter) const override {
332     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
333     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
334         transOp, typeConverter->convertType(transOp.res().getType()),
335         adaptor.matrix(), transOp.rows(), transOp.columns());
336     return success();
337   }
338 };
339 
340 /// Conversion pattern for a vector.maskedload.
341 class VectorMaskedLoadOpConversion
342     : public ConvertOpToLLVMPattern<vector::MaskedLoadOp> {
343 public:
344   using ConvertOpToLLVMPattern<vector::MaskedLoadOp>::ConvertOpToLLVMPattern;
345 
346   LogicalResult
347   matchAndRewrite(vector::MaskedLoadOp load, ArrayRef<Value> operands,
348                   ConversionPatternRewriter &rewriter) const override {
349     auto loc = load->getLoc();
350     auto adaptor = vector::MaskedLoadOpAdaptor(operands);
351     MemRefType memRefType = load.getMemRefType();
352 
353     // Resolve alignment.
354     unsigned align;
355     if (failed(getMemRefAlignment(*getTypeConverter(), memRefType, align)))
356       return failure();
357 
358     // Resolve address.
359     auto vtype = typeConverter->convertType(load.getResultVectorType());
360     Value dataPtr = this->getStridedElementPtr(loc, memRefType, adaptor.base(),
361                                                adaptor.indices(), rewriter);
362     Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefType, vtype);
363 
364     rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
365         load, vtype, ptr, adaptor.mask(), adaptor.pass_thru(),
366         rewriter.getI32IntegerAttr(align));
367     return success();
368   }
369 };
370 
371 /// Conversion pattern for a vector.maskedstore.
372 class VectorMaskedStoreOpConversion
373     : public ConvertOpToLLVMPattern<vector::MaskedStoreOp> {
374 public:
375   using ConvertOpToLLVMPattern<vector::MaskedStoreOp>::ConvertOpToLLVMPattern;
376 
377   LogicalResult
378   matchAndRewrite(vector::MaskedStoreOp store, ArrayRef<Value> operands,
379                   ConversionPatternRewriter &rewriter) const override {
380     auto loc = store->getLoc();
381     auto adaptor = vector::MaskedStoreOpAdaptor(operands);
382     MemRefType memRefType = store.getMemRefType();
383 
384     // Resolve alignment.
385     unsigned align;
386     if (failed(getMemRefAlignment(*getTypeConverter(), memRefType, align)))
387       return failure();
388 
389     // Resolve address.
390     auto vtype = typeConverter->convertType(store.getValueVectorType());
391     Value dataPtr = this->getStridedElementPtr(loc, memRefType, adaptor.base(),
392                                                adaptor.indices(), rewriter);
393     Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefType, vtype);
394 
395     rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
396         store, adaptor.value(), ptr, adaptor.mask(),
397         rewriter.getI32IntegerAttr(align));
398     return success();
399   }
400 };
401 
402 /// Conversion pattern for a vector.gather.
403 class VectorGatherOpConversion
404     : public ConvertOpToLLVMPattern<vector::GatherOp> {
405 public:
406   using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern;
407 
408   LogicalResult
409   matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands,
410                   ConversionPatternRewriter &rewriter) const override {
411     auto loc = gather->getLoc();
412     auto adaptor = vector::GatherOpAdaptor(operands);
413 
414     // Resolve alignment.
415     unsigned align;
416     if (failed(getMemRefAlignment(*getTypeConverter(), gather.getMemRefType(),
417                                   align)))
418       return failure();
419 
420     // Get index ptrs.
421     VectorType vType = gather.getResultVectorType();
422     Type iType = gather.getIndicesVectorType().getElementType();
423     Value ptrs;
424     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
425                               gather.getMemRefType(), vType, iType, ptrs)))
426       return failure();
427 
428     // Replace with the gather intrinsic.
429     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
430         gather, typeConverter->convertType(vType), ptrs, adaptor.mask(),
431         adaptor.pass_thru(), rewriter.getI32IntegerAttr(align));
432     return success();
433   }
434 };
435 
436 /// Conversion pattern for a vector.scatter.
437 class VectorScatterOpConversion
438     : public ConvertOpToLLVMPattern<vector::ScatterOp> {
439 public:
440   using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern;
441 
442   LogicalResult
443   matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands,
444                   ConversionPatternRewriter &rewriter) const override {
445     auto loc = scatter->getLoc();
446     auto adaptor = vector::ScatterOpAdaptor(operands);
447 
448     // Resolve alignment.
449     unsigned align;
450     if (failed(getMemRefAlignment(*getTypeConverter(), scatter.getMemRefType(),
451                                   align)))
452       return failure();
453 
454     // Get index ptrs.
455     VectorType vType = scatter.getValueVectorType();
456     Type iType = scatter.getIndicesVectorType().getElementType();
457     Value ptrs;
458     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
459                               scatter.getMemRefType(), vType, iType, ptrs)))
460       return failure();
461 
462     // Replace with the scatter intrinsic.
463     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
464         scatter, adaptor.value(), ptrs, adaptor.mask(),
465         rewriter.getI32IntegerAttr(align));
466     return success();
467   }
468 };
469 
470 /// Conversion pattern for a vector.expandload.
471 class VectorExpandLoadOpConversion
472     : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> {
473 public:
474   using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern;
475 
476   LogicalResult
477   matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands,
478                   ConversionPatternRewriter &rewriter) const override {
479     auto loc = expand->getLoc();
480     auto adaptor = vector::ExpandLoadOpAdaptor(operands);
481     MemRefType memRefType = expand.getMemRefType();
482 
483     // Resolve address.
484     auto vtype = typeConverter->convertType(expand.getResultVectorType());
485     Value ptr = this->getStridedElementPtr(loc, memRefType, adaptor.base(),
486                                            adaptor.indices(), rewriter);
487 
488     rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
489         expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru());
490     return success();
491   }
492 };
493 
494 /// Conversion pattern for a vector.compressstore.
495 class VectorCompressStoreOpConversion
496     : public ConvertOpToLLVMPattern<vector::CompressStoreOp> {
497 public:
498   using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern;
499 
500   LogicalResult
501   matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands,
502                   ConversionPatternRewriter &rewriter) const override {
503     auto loc = compress->getLoc();
504     auto adaptor = vector::CompressStoreOpAdaptor(operands);
505     MemRefType memRefType = compress.getMemRefType();
506 
507     // Resolve address.
508     Value ptr = this->getStridedElementPtr(loc, memRefType, adaptor.base(),
509                                            adaptor.indices(), rewriter);
510 
511     rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
512         compress, adaptor.value(), ptr, adaptor.mask());
513     return success();
514   }
515 };
516 
517 /// Conversion pattern for all vector reductions.
518 class VectorReductionOpConversion
519     : public ConvertOpToLLVMPattern<vector::ReductionOp> {
520 public:
521   explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv,
522                                        bool reassociateFPRed)
523       : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv),
524         reassociateFPReductions(reassociateFPRed) {}
525 
526   LogicalResult
527   matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands,
528                   ConversionPatternRewriter &rewriter) const override {
529     auto kind = reductionOp.kind();
530     Type eltType = reductionOp.dest().getType();
531     Type llvmType = typeConverter->convertType(eltType);
532     if (eltType.isIntOrIndex()) {
533       // Integer reductions: add/mul/min/max/and/or/xor.
534       if (kind == "add")
535         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(
536             reductionOp, llvmType, operands[0]);
537       else if (kind == "mul")
538         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(
539             reductionOp, llvmType, operands[0]);
540       else if (kind == "min" &&
541                (eltType.isIndex() || eltType.isUnsignedInteger()))
542         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>(
543             reductionOp, llvmType, operands[0]);
544       else if (kind == "min")
545         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>(
546             reductionOp, llvmType, operands[0]);
547       else if (kind == "max" &&
548                (eltType.isIndex() || eltType.isUnsignedInteger()))
549         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>(
550             reductionOp, llvmType, operands[0]);
551       else if (kind == "max")
552         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>(
553             reductionOp, llvmType, operands[0]);
554       else if (kind == "and")
555         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(
556             reductionOp, llvmType, operands[0]);
557       else if (kind == "or")
558         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(
559             reductionOp, llvmType, operands[0]);
560       else if (kind == "xor")
561         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(
562             reductionOp, llvmType, operands[0]);
563       else
564         return failure();
565       return success();
566     }
567 
568     if (!eltType.isa<FloatType>())
569       return failure();
570 
571     // Floating-point reductions: add/mul/min/max
572     if (kind == "add") {
573       // Optional accumulator (or zero).
574       Value acc = operands.size() > 1 ? operands[1]
575                                       : rewriter.create<LLVM::ConstantOp>(
576                                             reductionOp->getLoc(), llvmType,
577                                             rewriter.getZeroAttr(eltType));
578       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>(
579           reductionOp, llvmType, acc, operands[0],
580           rewriter.getBoolAttr(reassociateFPReductions));
581     } else if (kind == "mul") {
582       // Optional accumulator (or one).
583       Value acc = operands.size() > 1
584                       ? operands[1]
585                       : rewriter.create<LLVM::ConstantOp>(
586                             reductionOp->getLoc(), llvmType,
587                             rewriter.getFloatAttr(eltType, 1.0));
588       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>(
589           reductionOp, llvmType, acc, operands[0],
590           rewriter.getBoolAttr(reassociateFPReductions));
591     } else if (kind == "min")
592       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(
593           reductionOp, llvmType, operands[0]);
594     else if (kind == "max")
595       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(
596           reductionOp, llvmType, operands[0]);
597     else
598       return failure();
599     return success();
600   }
601 
602 private:
603   const bool reassociateFPReductions;
604 };
605 
606 /// Conversion pattern for a vector.create_mask (1-D only).
607 class VectorCreateMaskOpConversion
608     : public ConvertOpToLLVMPattern<vector::CreateMaskOp> {
609 public:
610   explicit VectorCreateMaskOpConversion(LLVMTypeConverter &typeConv,
611                                         bool enableIndexOpt)
612       : ConvertOpToLLVMPattern<vector::CreateMaskOp>(typeConv),
613         enableIndexOptimizations(enableIndexOpt) {}
614 
615   LogicalResult
616   matchAndRewrite(vector::CreateMaskOp op, ArrayRef<Value> operands,
617                   ConversionPatternRewriter &rewriter) const override {
618     auto dstType = op.getType();
619     int64_t rank = dstType.getRank();
620     if (rank == 1) {
621       rewriter.replaceOp(
622           op, buildVectorComparison(rewriter, op, enableIndexOptimizations,
623                                     dstType.getDimSize(0), operands[0]));
624       return success();
625     }
626     return failure();
627   }
628 
629 private:
630   const bool enableIndexOptimizations;
631 };
632 
633 class VectorShuffleOpConversion
634     : public ConvertOpToLLVMPattern<vector::ShuffleOp> {
635 public:
636   using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern;
637 
638   LogicalResult
639   matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands,
640                   ConversionPatternRewriter &rewriter) const override {
641     auto loc = shuffleOp->getLoc();
642     auto adaptor = vector::ShuffleOpAdaptor(operands);
643     auto v1Type = shuffleOp.getV1VectorType();
644     auto v2Type = shuffleOp.getV2VectorType();
645     auto vectorType = shuffleOp.getVectorType();
646     Type llvmType = typeConverter->convertType(vectorType);
647     auto maskArrayAttr = shuffleOp.mask();
648 
649     // Bail if result type cannot be lowered.
650     if (!llvmType)
651       return failure();
652 
653     // Get rank and dimension sizes.
654     int64_t rank = vectorType.getRank();
655     assert(v1Type.getRank() == rank);
656     assert(v2Type.getRank() == rank);
657     int64_t v1Dim = v1Type.getDimSize(0);
658 
659     // For rank 1, where both operands have *exactly* the same vector type,
660     // there is direct shuffle support in LLVM. Use it!
661     if (rank == 1 && v1Type == v2Type) {
662       Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>(
663           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
664       rewriter.replaceOp(shuffleOp, llvmShuffleOp);
665       return success();
666     }
667 
668     // For all other cases, insert the individual values individually.
669     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
670     int64_t insPos = 0;
671     for (auto en : llvm::enumerate(maskArrayAttr)) {
672       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
673       Value value = adaptor.v1();
674       if (extPos >= v1Dim) {
675         extPos -= v1Dim;
676         value = adaptor.v2();
677       }
678       Value extract = extractOne(rewriter, *getTypeConverter(), loc, value,
679                                  llvmType, rank, extPos);
680       insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract,
681                          llvmType, rank, insPos++);
682     }
683     rewriter.replaceOp(shuffleOp, insert);
684     return success();
685   }
686 };
687 
688 class VectorExtractElementOpConversion
689     : public ConvertOpToLLVMPattern<vector::ExtractElementOp> {
690 public:
691   using ConvertOpToLLVMPattern<
692       vector::ExtractElementOp>::ConvertOpToLLVMPattern;
693 
694   LogicalResult
695   matchAndRewrite(vector::ExtractElementOp extractEltOp,
696                   ArrayRef<Value> operands,
697                   ConversionPatternRewriter &rewriter) const override {
698     auto adaptor = vector::ExtractElementOpAdaptor(operands);
699     auto vectorType = extractEltOp.getVectorType();
700     auto llvmType = typeConverter->convertType(vectorType.getElementType());
701 
702     // Bail if result type cannot be lowered.
703     if (!llvmType)
704       return failure();
705 
706     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
707         extractEltOp, llvmType, adaptor.vector(), adaptor.position());
708     return success();
709   }
710 };
711 
712 class VectorExtractOpConversion
713     : public ConvertOpToLLVMPattern<vector::ExtractOp> {
714 public:
715   using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern;
716 
717   LogicalResult
718   matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands,
719                   ConversionPatternRewriter &rewriter) const override {
720     auto loc = extractOp->getLoc();
721     auto adaptor = vector::ExtractOpAdaptor(operands);
722     auto vectorType = extractOp.getVectorType();
723     auto resultType = extractOp.getResult().getType();
724     auto llvmResultType = typeConverter->convertType(resultType);
725     auto positionArrayAttr = extractOp.position();
726 
727     // Bail if result type cannot be lowered.
728     if (!llvmResultType)
729       return failure();
730 
731     // One-shot extraction of vector from array (only requires extractvalue).
732     if (resultType.isa<VectorType>()) {
733       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
734           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
735       rewriter.replaceOp(extractOp, extracted);
736       return success();
737     }
738 
739     // Potential extraction of 1-D vector from array.
740     auto *context = extractOp->getContext();
741     Value extracted = adaptor.vector();
742     auto positionAttrs = positionArrayAttr.getValue();
743     if (positionAttrs.size() > 1) {
744       auto oneDVectorType = reducedVectorTypeBack(vectorType);
745       auto nMinusOnePositionAttrs =
746           ArrayAttr::get(positionAttrs.drop_back(), context);
747       extracted = rewriter.create<LLVM::ExtractValueOp>(
748           loc, typeConverter->convertType(oneDVectorType), extracted,
749           nMinusOnePositionAttrs);
750     }
751 
752     // Remaining extraction of element from 1-D LLVM vector
753     auto position = positionAttrs.back().cast<IntegerAttr>();
754     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
755     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
756     extracted =
757         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
758     rewriter.replaceOp(extractOp, extracted);
759 
760     return success();
761   }
762 };
763 
764 /// Conversion pattern that turns a vector.fma on a 1-D vector
765 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
766 /// This does not match vectors of n >= 2 rank.
767 ///
768 /// Example:
769 /// ```
770 ///  vector.fma %a, %a, %a : vector<8xf32>
771 /// ```
772 /// is converted to:
773 /// ```
774 ///  llvm.intr.fmuladd %va, %va, %va:
775 ///    (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">)
776 ///    -> !llvm."<8 x f32>">
777 /// ```
778 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> {
779 public:
780   using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern;
781 
782   LogicalResult
783   matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands,
784                   ConversionPatternRewriter &rewriter) const override {
785     auto adaptor = vector::FMAOpAdaptor(operands);
786     VectorType vType = fmaOp.getVectorType();
787     if (vType.getRank() != 1)
788       return failure();
789     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(),
790                                                  adaptor.rhs(), adaptor.acc());
791     return success();
792   }
793 };
794 
795 class VectorInsertElementOpConversion
796     : public ConvertOpToLLVMPattern<vector::InsertElementOp> {
797 public:
798   using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern;
799 
800   LogicalResult
801   matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands,
802                   ConversionPatternRewriter &rewriter) const override {
803     auto adaptor = vector::InsertElementOpAdaptor(operands);
804     auto vectorType = insertEltOp.getDestVectorType();
805     auto llvmType = typeConverter->convertType(vectorType);
806 
807     // Bail if result type cannot be lowered.
808     if (!llvmType)
809       return failure();
810 
811     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
812         insertEltOp, llvmType, adaptor.dest(), adaptor.source(),
813         adaptor.position());
814     return success();
815   }
816 };
817 
818 class VectorInsertOpConversion
819     : public ConvertOpToLLVMPattern<vector::InsertOp> {
820 public:
821   using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern;
822 
823   LogicalResult
824   matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands,
825                   ConversionPatternRewriter &rewriter) const override {
826     auto loc = insertOp->getLoc();
827     auto adaptor = vector::InsertOpAdaptor(operands);
828     auto sourceType = insertOp.getSourceType();
829     auto destVectorType = insertOp.getDestVectorType();
830     auto llvmResultType = typeConverter->convertType(destVectorType);
831     auto positionArrayAttr = insertOp.position();
832 
833     // Bail if result type cannot be lowered.
834     if (!llvmResultType)
835       return failure();
836 
837     // One-shot insertion of a vector into an array (only requires insertvalue).
838     if (sourceType.isa<VectorType>()) {
839       Value inserted = rewriter.create<LLVM::InsertValueOp>(
840           loc, llvmResultType, adaptor.dest(), adaptor.source(),
841           positionArrayAttr);
842       rewriter.replaceOp(insertOp, inserted);
843       return success();
844     }
845 
846     // Potential extraction of 1-D vector from array.
847     auto *context = insertOp->getContext();
848     Value extracted = adaptor.dest();
849     auto positionAttrs = positionArrayAttr.getValue();
850     auto position = positionAttrs.back().cast<IntegerAttr>();
851     auto oneDVectorType = destVectorType;
852     if (positionAttrs.size() > 1) {
853       oneDVectorType = reducedVectorTypeBack(destVectorType);
854       auto nMinusOnePositionAttrs =
855           ArrayAttr::get(positionAttrs.drop_back(), context);
856       extracted = rewriter.create<LLVM::ExtractValueOp>(
857           loc, typeConverter->convertType(oneDVectorType), extracted,
858           nMinusOnePositionAttrs);
859     }
860 
861     // Insertion of an element into a 1-D LLVM vector.
862     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
863     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
864     Value inserted = rewriter.create<LLVM::InsertElementOp>(
865         loc, typeConverter->convertType(oneDVectorType), extracted,
866         adaptor.source(), constant);
867 
868     // Potential insertion of resulting 1-D vector into array.
869     if (positionAttrs.size() > 1) {
870       auto nMinusOnePositionAttrs =
871           ArrayAttr::get(positionAttrs.drop_back(), context);
872       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
873                                                       adaptor.dest(), inserted,
874                                                       nMinusOnePositionAttrs);
875     }
876 
877     rewriter.replaceOp(insertOp, inserted);
878     return success();
879   }
880 };
881 
882 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
883 ///
884 /// Example:
885 /// ```
886 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
887 /// ```
888 /// is rewritten into:
889 /// ```
890 ///  %r = splat %f0: vector<2x4xf32>
891 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
892 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
893 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
894 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
895 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
896 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
897 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
898 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
899 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
900 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
901 ///  // %r3 holds the final value.
902 /// ```
903 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
904 public:
905   using OpRewritePattern<FMAOp>::OpRewritePattern;
906 
907   LogicalResult matchAndRewrite(FMAOp op,
908                                 PatternRewriter &rewriter) const override {
909     auto vType = op.getVectorType();
910     if (vType.getRank() < 2)
911       return failure();
912 
913     auto loc = op.getLoc();
914     auto elemType = vType.getElementType();
915     Value zero = rewriter.create<ConstantOp>(loc, elemType,
916                                              rewriter.getZeroAttr(elemType));
917     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
918     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
919       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
920       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
921       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
922       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
923       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
924     }
925     rewriter.replaceOp(op, desc);
926     return success();
927   }
928 };
929 
930 // When ranks are different, InsertStridedSlice needs to extract a properly
931 // ranked vector from the destination vector into which to insert. This pattern
932 // only takes care of this part and forwards the rest of the conversion to
933 // another pattern that converts InsertStridedSlice for operands of the same
934 // rank.
935 //
936 // RewritePattern for InsertStridedSliceOp where source and destination vectors
937 // have different ranks. In this case:
938 //   1. the proper subvector is extracted from the destination vector
939 //   2. a new InsertStridedSlice op is created to insert the source in the
940 //   destination subvector
941 //   3. the destination subvector is inserted back in the proper place
942 //   4. the op is replaced by the result of step 3.
943 // The new InsertStridedSlice from step 2. will be picked up by a
944 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
945 class VectorInsertStridedSliceOpDifferentRankRewritePattern
946     : public OpRewritePattern<InsertStridedSliceOp> {
947 public:
948   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
949 
950   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
951                                 PatternRewriter &rewriter) const override {
952     auto srcType = op.getSourceVectorType();
953     auto dstType = op.getDestVectorType();
954 
955     if (op.offsets().getValue().empty())
956       return failure();
957 
958     auto loc = op.getLoc();
959     int64_t rankDiff = dstType.getRank() - srcType.getRank();
960     assert(rankDiff >= 0);
961     if (rankDiff == 0)
962       return failure();
963 
964     int64_t rankRest = dstType.getRank() - rankDiff;
965     // Extract / insert the subvector of matching rank and InsertStridedSlice
966     // on it.
967     Value extracted =
968         rewriter.create<ExtractOp>(loc, op.dest(),
969                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
970                                                   /*dropBack=*/rankRest));
971     // A different pattern will kick in for InsertStridedSlice with matching
972     // ranks.
973     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
974         loc, op.source(), extracted,
975         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
976         getI64SubArray(op.strides(), /*dropFront=*/0));
977     rewriter.replaceOpWithNewOp<InsertOp>(
978         op, stridedSliceInnerOp.getResult(), op.dest(),
979         getI64SubArray(op.offsets(), /*dropFront=*/0,
980                        /*dropBack=*/rankRest));
981     return success();
982   }
983 };
984 
985 // RewritePattern for InsertStridedSliceOp where source and destination vectors
986 // have the same rank. In this case, we reduce
987 //   1. the proper subvector is extracted from the destination vector
988 //   2. a new InsertStridedSlice op is created to insert the source in the
989 //   destination subvector
990 //   3. the destination subvector is inserted back in the proper place
991 //   4. the op is replaced by the result of step 3.
992 // The new InsertStridedSlice from step 2. will be picked up by a
993 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
994 class VectorInsertStridedSliceOpSameRankRewritePattern
995     : public OpRewritePattern<InsertStridedSliceOp> {
996 public:
997   VectorInsertStridedSliceOpSameRankRewritePattern(MLIRContext *ctx)
998       : OpRewritePattern<InsertStridedSliceOp>(ctx) {
999     // This pattern creates recursive InsertStridedSliceOp, but the recursion is
1000     // bounded as the rank is strictly decreasing.
1001     setHasBoundedRewriteRecursion();
1002   }
1003 
1004   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
1005                                 PatternRewriter &rewriter) const override {
1006     auto srcType = op.getSourceVectorType();
1007     auto dstType = op.getDestVectorType();
1008 
1009     if (op.offsets().getValue().empty())
1010       return failure();
1011 
1012     int64_t rankDiff = dstType.getRank() - srcType.getRank();
1013     assert(rankDiff >= 0);
1014     if (rankDiff != 0)
1015       return failure();
1016 
1017     if (srcType == dstType) {
1018       rewriter.replaceOp(op, op.source());
1019       return success();
1020     }
1021 
1022     int64_t offset =
1023         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1024     int64_t size = srcType.getShape().front();
1025     int64_t stride =
1026         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1027 
1028     auto loc = op.getLoc();
1029     Value res = op.dest();
1030     // For each slice of the source vector along the most major dimension.
1031     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1032          off += stride, ++idx) {
1033       // 1. extract the proper subvector (or element) from source
1034       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
1035       if (extractedSource.getType().isa<VectorType>()) {
1036         // 2. If we have a vector, extract the proper subvector from destination
1037         // Otherwise we are at the element level and no need to recurse.
1038         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
1039         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
1040         // smaller rank.
1041         extractedSource = rewriter.create<InsertStridedSliceOp>(
1042             loc, extractedSource, extractedDest,
1043             getI64SubArray(op.offsets(), /* dropFront=*/1),
1044             getI64SubArray(op.strides(), /* dropFront=*/1));
1045       }
1046       // 4. Insert the extractedSource into the res vector.
1047       res = insertOne(rewriter, loc, extractedSource, res, off);
1048     }
1049 
1050     rewriter.replaceOp(op, res);
1051     return success();
1052   }
1053 };
1054 
1055 /// Returns the strides if the memory underlying `memRefType` has a contiguous
1056 /// static layout.
1057 static llvm::Optional<SmallVector<int64_t, 4>>
1058 computeContiguousStrides(MemRefType memRefType) {
1059   int64_t offset;
1060   SmallVector<int64_t, 4> strides;
1061   if (failed(getStridesAndOffset(memRefType, strides, offset)))
1062     return None;
1063   if (!strides.empty() && strides.back() != 1)
1064     return None;
1065   // If no layout or identity layout, this is contiguous by definition.
1066   if (memRefType.getAffineMaps().empty() ||
1067       memRefType.getAffineMaps().front().isIdentity())
1068     return strides;
1069 
1070   // Otherwise, we must determine contiguity form shapes. This can only ever
1071   // work in static cases because MemRefType is underspecified to represent
1072   // contiguous dynamic shapes in other ways than with just empty/identity
1073   // layout.
1074   auto sizes = memRefType.getShape();
1075   for (int index = 0, e = strides.size() - 2; index < e; ++index) {
1076     if (ShapedType::isDynamic(sizes[index + 1]) ||
1077         ShapedType::isDynamicStrideOrOffset(strides[index]) ||
1078         ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
1079       return None;
1080     if (strides[index] != strides[index + 1] * sizes[index + 1])
1081       return None;
1082   }
1083   return strides;
1084 }
1085 
1086 class VectorTypeCastOpConversion
1087     : public ConvertOpToLLVMPattern<vector::TypeCastOp> {
1088 public:
1089   using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern;
1090 
1091   LogicalResult
1092   matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands,
1093                   ConversionPatternRewriter &rewriter) const override {
1094     auto loc = castOp->getLoc();
1095     MemRefType sourceMemRefType =
1096         castOp.getOperand().getType().cast<MemRefType>();
1097     MemRefType targetMemRefType = castOp.getType();
1098 
1099     // Only static shape casts supported atm.
1100     if (!sourceMemRefType.hasStaticShape() ||
1101         !targetMemRefType.hasStaticShape())
1102       return failure();
1103 
1104     auto llvmSourceDescriptorTy =
1105         operands[0].getType().dyn_cast<LLVM::LLVMStructType>();
1106     if (!llvmSourceDescriptorTy)
1107       return failure();
1108     MemRefDescriptor sourceMemRef(operands[0]);
1109 
1110     auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType)
1111                                       .dyn_cast_or_null<LLVM::LLVMStructType>();
1112     if (!llvmTargetDescriptorTy)
1113       return failure();
1114 
1115     // Only contiguous source buffers supported atm.
1116     auto sourceStrides = computeContiguousStrides(sourceMemRefType);
1117     if (!sourceStrides)
1118       return failure();
1119     auto targetStrides = computeContiguousStrides(targetMemRefType);
1120     if (!targetStrides)
1121       return failure();
1122     // Only support static strides for now, regardless of contiguity.
1123     if (llvm::any_of(*targetStrides, [](int64_t stride) {
1124           return ShapedType::isDynamicStrideOrOffset(stride);
1125         }))
1126       return failure();
1127 
1128     auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
1129 
1130     // Create descriptor.
1131     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
1132     Type llvmTargetElementTy = desc.getElementPtrType();
1133     // Set allocated ptr.
1134     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
1135     allocated =
1136         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
1137     desc.setAllocatedPtr(rewriter, loc, allocated);
1138     // Set aligned ptr.
1139     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
1140     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
1141     desc.setAlignedPtr(rewriter, loc, ptr);
1142     // Fill offset 0.
1143     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
1144     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
1145     desc.setOffset(rewriter, loc, zero);
1146 
1147     // Fill size and stride descriptors in memref.
1148     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
1149       int64_t index = indexedSize.index();
1150       auto sizeAttr =
1151           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
1152       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
1153       desc.setSize(rewriter, loc, index, size);
1154       auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(),
1155                                                 (*targetStrides)[index]);
1156       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
1157       desc.setStride(rewriter, loc, index, stride);
1158     }
1159 
1160     rewriter.replaceOp(castOp, {desc});
1161     return success();
1162   }
1163 };
1164 
1165 /// Conversion pattern that converts a 1-D vector transfer read/write op in a
1166 /// sequence of:
1167 /// 1. Get the source/dst address as an LLVM vector pointer.
1168 /// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1169 /// 3. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1170 /// 4. Create a mask where offsetVector is compared against memref upper bound.
1171 /// 5. Rewrite op as a masked read or write.
1172 template <typename ConcreteOp>
1173 class VectorTransferConversion : public ConvertOpToLLVMPattern<ConcreteOp> {
1174 public:
1175   explicit VectorTransferConversion(LLVMTypeConverter &typeConv,
1176                                     bool enableIndexOpt)
1177       : ConvertOpToLLVMPattern<ConcreteOp>(typeConv),
1178         enableIndexOptimizations(enableIndexOpt) {}
1179 
1180   LogicalResult
1181   matchAndRewrite(ConcreteOp xferOp, ArrayRef<Value> operands,
1182                   ConversionPatternRewriter &rewriter) const override {
1183     auto adaptor = getTransferOpAdapter(xferOp, operands);
1184 
1185     if (xferOp.getVectorType().getRank() > 1 ||
1186         llvm::size(xferOp.indices()) == 0)
1187       return failure();
1188     if (xferOp.permutation_map() !=
1189         AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
1190                                        xferOp.getVectorType().getRank(),
1191                                        xferOp->getContext()))
1192       return failure();
1193     auto memRefType = xferOp.getShapedType().template dyn_cast<MemRefType>();
1194     if (!memRefType)
1195       return failure();
1196     // Only contiguous source tensors supported atm.
1197     auto strides = computeContiguousStrides(memRefType);
1198     if (!strides)
1199       return failure();
1200 
1201     auto toLLVMTy = [&](Type t) {
1202       return this->getTypeConverter()->convertType(t);
1203     };
1204 
1205     Location loc = xferOp->getLoc();
1206 
1207     if (auto memrefVectorElementType =
1208             memRefType.getElementType().template dyn_cast<VectorType>()) {
1209       // Memref has vector element type.
1210       if (memrefVectorElementType.getElementType() !=
1211           xferOp.getVectorType().getElementType())
1212         return failure();
1213 #ifndef NDEBUG
1214       // Check that memref vector type is a suffix of 'vectorType.
1215       unsigned memrefVecEltRank = memrefVectorElementType.getRank();
1216       unsigned resultVecRank = xferOp.getVectorType().getRank();
1217       assert(memrefVecEltRank <= resultVecRank);
1218       // TODO: Move this to isSuffix in Vector/Utils.h.
1219       unsigned rankOffset = resultVecRank - memrefVecEltRank;
1220       auto memrefVecEltShape = memrefVectorElementType.getShape();
1221       auto resultVecShape = xferOp.getVectorType().getShape();
1222       for (unsigned i = 0; i < memrefVecEltRank; ++i)
1223         assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] &&
1224                "memref vector element shape should match suffix of vector "
1225                "result shape.");
1226 #endif // ifndef NDEBUG
1227     }
1228 
1229     // 1. Get the source/dst address as an LLVM vector pointer.
1230     VectorType vtp = xferOp.getVectorType();
1231     Value dataPtr = this->getStridedElementPtr(
1232         loc, memRefType, adaptor.source(), adaptor.indices(), rewriter);
1233     Value vectorDataPtr =
1234         castDataPtr(rewriter, loc, dataPtr, memRefType, toLLVMTy(vtp));
1235 
1236     if (!xferOp.isMaskedDim(0))
1237       return replaceTransferOpWithLoadOrStore(rewriter,
1238                                               *this->getTypeConverter(), loc,
1239                                               xferOp, operands, vectorDataPtr);
1240 
1241     // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1242     // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1243     // 4. Let dim the memref dimension, compute the vector comparison mask:
1244     //   [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
1245     //
1246     // TODO: when the leaf transfer rank is k > 1, we need the last `k`
1247     //       dimensions here.
1248     unsigned vecWidth = LLVM::getVectorNumElements(vtp).getFixedValue();
1249     unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
1250     Value off = xferOp.indices()[lastIndex];
1251     Value dim = rewriter.create<DimOp>(loc, xferOp.source(), lastIndex);
1252     Value mask = buildVectorComparison(
1253         rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off);
1254 
1255     // 5. Rewrite as a masked read / write.
1256     return replaceTransferOpWithMasked(rewriter, *this->getTypeConverter(), loc,
1257                                        xferOp, operands, vectorDataPtr, mask);
1258   }
1259 
1260 private:
1261   const bool enableIndexOptimizations;
1262 };
1263 
1264 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
1265 public:
1266   using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern;
1267 
1268   // Proof-of-concept lowering implementation that relies on a small
1269   // runtime support library, which only needs to provide a few
1270   // printing methods (single value for all data types, opening/closing
1271   // bracket, comma, newline). The lowering fully unrolls a vector
1272   // in terms of these elementary printing operations. The advantage
1273   // of this approach is that the library can remain unaware of all
1274   // low-level implementation details of vectors while still supporting
1275   // output of any shaped and dimensioned vector. Due to full unrolling,
1276   // this approach is less suited for very large vectors though.
1277   //
1278   // TODO: rely solely on libc in future? something else?
1279   //
1280   LogicalResult
1281   matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands,
1282                   ConversionPatternRewriter &rewriter) const override {
1283     auto adaptor = vector::PrintOpAdaptor(operands);
1284     Type printType = printOp.getPrintType();
1285 
1286     if (typeConverter->convertType(printType) == nullptr)
1287       return failure();
1288 
1289     // Make sure element type has runtime support.
1290     PrintConversion conversion = PrintConversion::None;
1291     VectorType vectorType = printType.dyn_cast<VectorType>();
1292     Type eltType = vectorType ? vectorType.getElementType() : printType;
1293     Operation *printer;
1294     if (eltType.isF32()) {
1295       printer = getPrintFloat(printOp);
1296     } else if (eltType.isF64()) {
1297       printer = getPrintDouble(printOp);
1298     } else if (eltType.isIndex()) {
1299       printer = getPrintU64(printOp);
1300     } else if (auto intTy = eltType.dyn_cast<IntegerType>()) {
1301       // Integers need a zero or sign extension on the operand
1302       // (depending on the source type) as well as a signed or
1303       // unsigned print method. Up to 64-bit is supported.
1304       unsigned width = intTy.getWidth();
1305       if (intTy.isUnsigned()) {
1306         if (width <= 64) {
1307           if (width < 64)
1308             conversion = PrintConversion::ZeroExt64;
1309           printer = getPrintU64(printOp);
1310         } else {
1311           return failure();
1312         }
1313       } else {
1314         assert(intTy.isSignless() || intTy.isSigned());
1315         if (width <= 64) {
1316           // Note that we *always* zero extend booleans (1-bit integers),
1317           // so that true/false is printed as 1/0 rather than -1/0.
1318           if (width == 1)
1319             conversion = PrintConversion::ZeroExt64;
1320           else if (width < 64)
1321             conversion = PrintConversion::SignExt64;
1322           printer = getPrintI64(printOp);
1323         } else {
1324           return failure();
1325         }
1326       }
1327     } else {
1328       return failure();
1329     }
1330 
1331     // Unroll vector into elementary print calls.
1332     int64_t rank = vectorType ? vectorType.getRank() : 0;
1333     emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank,
1334               conversion);
1335     emitCall(rewriter, printOp->getLoc(), getPrintNewline(printOp));
1336     rewriter.eraseOp(printOp);
1337     return success();
1338   }
1339 
1340 private:
1341   enum class PrintConversion {
1342     // clang-format off
1343     None,
1344     ZeroExt64,
1345     SignExt64
1346     // clang-format on
1347   };
1348 
1349   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1350                  Value value, VectorType vectorType, Operation *printer,
1351                  int64_t rank, PrintConversion conversion) const {
1352     Location loc = op->getLoc();
1353     if (rank == 0) {
1354       switch (conversion) {
1355       case PrintConversion::ZeroExt64:
1356         value = rewriter.create<ZeroExtendIOp>(
1357             loc, value, IntegerType::get(rewriter.getContext(), 64));
1358         break;
1359       case PrintConversion::SignExt64:
1360         value = rewriter.create<SignExtendIOp>(
1361             loc, value, IntegerType::get(rewriter.getContext(), 64));
1362         break;
1363       case PrintConversion::None:
1364         break;
1365       }
1366       emitCall(rewriter, loc, printer, value);
1367       return;
1368     }
1369 
1370     emitCall(rewriter, loc, getPrintOpen(op));
1371     Operation *printComma = getPrintComma(op);
1372     int64_t dim = vectorType.getDimSize(0);
1373     for (int64_t d = 0; d < dim; ++d) {
1374       auto reducedType =
1375           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1376       auto llvmType = typeConverter->convertType(
1377           rank > 1 ? reducedType : vectorType.getElementType());
1378       Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value,
1379                                    llvmType, rank, d);
1380       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1,
1381                 conversion);
1382       if (d != dim - 1)
1383         emitCall(rewriter, loc, printComma);
1384     }
1385     emitCall(rewriter, loc, getPrintClose(op));
1386   }
1387 
1388   // Helper to emit a call.
1389   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1390                        Operation *ref, ValueRange params = ValueRange()) {
1391     rewriter.create<LLVM::CallOp>(loc, TypeRange(),
1392                                   rewriter.getSymbolRefAttr(ref), params);
1393   }
1394 
1395   // Helper for printer method declaration (first hit) and lookup.
1396   static Operation *getPrint(Operation *op, StringRef name,
1397                              ArrayRef<Type> params) {
1398     auto module = op->getParentOfType<ModuleOp>();
1399     auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name);
1400     if (func)
1401       return func;
1402     OpBuilder moduleBuilder(module.getBodyRegion());
1403     return moduleBuilder.create<LLVM::LLVMFuncOp>(
1404         op->getLoc(), name,
1405         LLVM::LLVMFunctionType::get(LLVM::LLVMVoidType::get(op->getContext()),
1406                                     params));
1407   }
1408 
1409   // Helpers for method names.
1410   Operation *getPrintI64(Operation *op) const {
1411     return getPrint(op, "printI64", IntegerType::get(op->getContext(), 64));
1412   }
1413   Operation *getPrintU64(Operation *op) const {
1414     return getPrint(op, "printU64", IntegerType::get(op->getContext(), 64));
1415   }
1416   Operation *getPrintFloat(Operation *op) const {
1417     return getPrint(op, "printF32", Float32Type::get(op->getContext()));
1418   }
1419   Operation *getPrintDouble(Operation *op) const {
1420     return getPrint(op, "printF64", Float64Type::get(op->getContext()));
1421   }
1422   Operation *getPrintOpen(Operation *op) const {
1423     return getPrint(op, "printOpen", {});
1424   }
1425   Operation *getPrintClose(Operation *op) const {
1426     return getPrint(op, "printClose", {});
1427   }
1428   Operation *getPrintComma(Operation *op) const {
1429     return getPrint(op, "printComma", {});
1430   }
1431   Operation *getPrintNewline(Operation *op) const {
1432     return getPrint(op, "printNewline", {});
1433   }
1434 };
1435 
1436 /// Progressive lowering of ExtractStridedSliceOp to either:
1437 ///   1. express single offset extract as a direct shuffle.
1438 ///   2. extract + lower rank strided_slice + insert for the n-D case.
1439 class VectorExtractStridedSliceOpConversion
1440     : public OpRewritePattern<ExtractStridedSliceOp> {
1441 public:
1442   VectorExtractStridedSliceOpConversion(MLIRContext *ctx)
1443       : OpRewritePattern<ExtractStridedSliceOp>(ctx) {
1444     // This pattern creates recursive ExtractStridedSliceOp, but the recursion
1445     // is bounded as the rank is strictly decreasing.
1446     setHasBoundedRewriteRecursion();
1447   }
1448 
1449   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1450                                 PatternRewriter &rewriter) const override {
1451     auto dstType = op.getType();
1452 
1453     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1454 
1455     int64_t offset =
1456         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1457     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1458     int64_t stride =
1459         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1460 
1461     auto loc = op.getLoc();
1462     auto elemType = dstType.getElementType();
1463     assert(elemType.isSignlessIntOrIndexOrFloat());
1464 
1465     // Single offset can be more efficiently shuffled.
1466     if (op.offsets().getValue().size() == 1) {
1467       SmallVector<int64_t, 4> offsets;
1468       offsets.reserve(size);
1469       for (int64_t off = offset, e = offset + size * stride; off < e;
1470            off += stride)
1471         offsets.push_back(off);
1472       rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(),
1473                                              op.vector(),
1474                                              rewriter.getI64ArrayAttr(offsets));
1475       return success();
1476     }
1477 
1478     // Extract/insert on a lower ranked extract strided slice op.
1479     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1480                                              rewriter.getZeroAttr(elemType));
1481     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1482     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1483          off += stride, ++idx) {
1484       Value one = extractOne(rewriter, loc, op.vector(), off);
1485       Value extracted = rewriter.create<ExtractStridedSliceOp>(
1486           loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1),
1487           getI64SubArray(op.sizes(), /* dropFront=*/1),
1488           getI64SubArray(op.strides(), /* dropFront=*/1));
1489       res = insertOne(rewriter, loc, extracted, res, idx);
1490     }
1491     rewriter.replaceOp(op, res);
1492     return success();
1493   }
1494 };
1495 
1496 } // namespace
1497 
1498 /// Populate the given list with patterns that convert from Vector to LLVM.
1499 void mlir::populateVectorToLLVMConversionPatterns(
1500     LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
1501     bool reassociateFPReductions, bool enableIndexOptimizations) {
1502   MLIRContext *ctx = converter.getDialect()->getContext();
1503   // clang-format off
1504   patterns.insert<VectorFMAOpNDRewritePattern,
1505                   VectorInsertStridedSliceOpDifferentRankRewritePattern,
1506                   VectorInsertStridedSliceOpSameRankRewritePattern,
1507                   VectorExtractStridedSliceOpConversion>(ctx);
1508   patterns.insert<VectorReductionOpConversion>(
1509       converter, reassociateFPReductions);
1510   patterns.insert<VectorCreateMaskOpConversion,
1511                   VectorTransferConversion<TransferReadOp>,
1512                   VectorTransferConversion<TransferWriteOp>>(
1513       converter, enableIndexOptimizations);
1514   patterns
1515       .insert<VectorBitCastOpConversion,
1516               VectorShuffleOpConversion,
1517               VectorExtractElementOpConversion,
1518               VectorExtractOpConversion,
1519               VectorFMAOp1DConversion,
1520               VectorInsertElementOpConversion,
1521               VectorInsertOpConversion,
1522               VectorPrintOpConversion,
1523               VectorTypeCastOpConversion,
1524               VectorMaskedLoadOpConversion,
1525               VectorMaskedStoreOpConversion,
1526               VectorGatherOpConversion,
1527               VectorScatterOpConversion,
1528               VectorExpandLoadOpConversion,
1529               VectorCompressStoreOpConversion>(converter);
1530   // clang-format on
1531 }
1532 
1533 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1534     LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1535   patterns.insert<VectorMatmulOpConversion>(converter);
1536   patterns.insert<VectorFlatTransposeOpConversion>(converter);
1537 }
1538