xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision 563879b6f9465982b422a69a901e3d84e7cb7764)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
13 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
14 #include "mlir/Dialect/StandardOps/IR/Ops.h"
15 #include "mlir/Dialect/Vector/VectorOps.h"
16 #include "mlir/IR/BuiltinTypes.h"
17 #include "mlir/Target/LLVMIR/TypeTranslation.h"
18 #include "mlir/Transforms/DialectConversion.h"
19 
20 using namespace mlir;
21 using namespace mlir::vector;
22 
23 // Helper to reduce vector type by one rank at front.
24 static VectorType reducedVectorTypeFront(VectorType tp) {
25   assert((tp.getRank() > 1) && "unlowerable vector type");
26   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
27 }
28 
29 // Helper to reduce vector type by *all* but one rank at back.
30 static VectorType reducedVectorTypeBack(VectorType tp) {
31   assert((tp.getRank() > 1) && "unlowerable vector type");
32   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
33 }
34 
35 // Helper that picks the proper sequence for inserting.
36 static Value insertOne(ConversionPatternRewriter &rewriter,
37                        LLVMTypeConverter &typeConverter, Location loc,
38                        Value val1, Value val2, Type llvmType, int64_t rank,
39                        int64_t pos) {
40   if (rank == 1) {
41     auto idxType = rewriter.getIndexType();
42     auto constant = rewriter.create<LLVM::ConstantOp>(
43         loc, typeConverter.convertType(idxType),
44         rewriter.getIntegerAttr(idxType, pos));
45     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
46                                                   constant);
47   }
48   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
49                                               rewriter.getI64ArrayAttr(pos));
50 }
51 
52 // Helper that picks the proper sequence for inserting.
53 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
54                        Value into, int64_t offset) {
55   auto vectorType = into.getType().cast<VectorType>();
56   if (vectorType.getRank() > 1)
57     return rewriter.create<InsertOp>(loc, from, into, offset);
58   return rewriter.create<vector::InsertElementOp>(
59       loc, vectorType, from, into,
60       rewriter.create<ConstantIndexOp>(loc, offset));
61 }
62 
63 // Helper that picks the proper sequence for extracting.
64 static Value extractOne(ConversionPatternRewriter &rewriter,
65                         LLVMTypeConverter &typeConverter, Location loc,
66                         Value val, Type llvmType, int64_t rank, int64_t pos) {
67   if (rank == 1) {
68     auto idxType = rewriter.getIndexType();
69     auto constant = rewriter.create<LLVM::ConstantOp>(
70         loc, typeConverter.convertType(idxType),
71         rewriter.getIntegerAttr(idxType, pos));
72     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
73                                                    constant);
74   }
75   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
76                                                rewriter.getI64ArrayAttr(pos));
77 }
78 
79 // Helper that picks the proper sequence for extracting.
80 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
81                         int64_t offset) {
82   auto vectorType = vector.getType().cast<VectorType>();
83   if (vectorType.getRank() > 1)
84     return rewriter.create<ExtractOp>(loc, vector, offset);
85   return rewriter.create<vector::ExtractElementOp>(
86       loc, vectorType.getElementType(), vector,
87       rewriter.create<ConstantIndexOp>(loc, offset));
88 }
89 
90 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
91 // TODO: Better support for attribute subtype forwarding + slicing.
92 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
93                                               unsigned dropFront = 0,
94                                               unsigned dropBack = 0) {
95   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
96   auto range = arrayAttr.getAsRange<IntegerAttr>();
97   SmallVector<int64_t, 4> res;
98   res.reserve(arrayAttr.size() - dropFront - dropBack);
99   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
100        it != eit; ++it)
101     res.push_back((*it).getValue().getSExtValue());
102   return res;
103 }
104 
105 // Helper that returns a vector comparison that constructs a mask:
106 //     mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b]
107 //
108 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative,
109 //       much more compact, IR for this operation, but LLVM eventually
110 //       generates more elaborate instructions for this intrinsic since it
111 //       is very conservative on the boundary conditions.
112 static Value buildVectorComparison(ConversionPatternRewriter &rewriter,
113                                    Operation *op, bool enableIndexOptimizations,
114                                    int64_t dim, Value b, Value *off = nullptr) {
115   auto loc = op->getLoc();
116   // If we can assume all indices fit in 32-bit, we perform the vector
117   // comparison in 32-bit to get a higher degree of SIMD parallelism.
118   // Otherwise we perform the vector comparison using 64-bit indices.
119   Value indices;
120   Type idxType;
121   if (enableIndexOptimizations) {
122     indices = rewriter.create<ConstantOp>(
123         loc, rewriter.getI32VectorAttr(
124                  llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))));
125     idxType = rewriter.getI32Type();
126   } else {
127     indices = rewriter.create<ConstantOp>(
128         loc, rewriter.getI64VectorAttr(
129                  llvm::to_vector<4>(llvm::seq<int64_t>(0, dim))));
130     idxType = rewriter.getI64Type();
131   }
132   // Add in an offset if requested.
133   if (off) {
134     Value o = rewriter.create<IndexCastOp>(loc, idxType, *off);
135     Value ov = rewriter.create<SplatOp>(loc, indices.getType(), o);
136     indices = rewriter.create<AddIOp>(loc, ov, indices);
137   }
138   // Construct the vector comparison.
139   Value bound = rewriter.create<IndexCastOp>(loc, idxType, b);
140   Value bounds = rewriter.create<SplatOp>(loc, indices.getType(), bound);
141   return rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, indices, bounds);
142 }
143 
144 // Helper that returns data layout alignment of an operation with memref.
145 template <typename T>
146 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, T op,
147                                  unsigned &align) {
148   Type elementTy =
149       typeConverter.convertType(op.getMemRefType().getElementType());
150   if (!elementTy)
151     return failure();
152 
153   // TODO: this should use the MLIR data layout when it becomes available and
154   // stop depending on translation.
155   llvm::LLVMContext llvmContext;
156   align = LLVM::TypeToLLVMIRTranslator(llvmContext)
157               .getPreferredAlignment(elementTy.cast<LLVM::LLVMType>(),
158                                      typeConverter.getDataLayout());
159   return success();
160 }
161 
162 // Helper that returns the base address of a memref.
163 static LogicalResult getBase(ConversionPatternRewriter &rewriter, Location loc,
164                              Value memref, MemRefType memRefType, Value &base) {
165   // Inspect stride and offset structure.
166   //
167   // TODO: flat memory only for now, generalize
168   //
169   int64_t offset;
170   SmallVector<int64_t, 4> strides;
171   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
172   if (failed(successStrides) || strides.size() != 1 || strides[0] != 1 ||
173       offset != 0 || memRefType.getMemorySpace() != 0)
174     return failure();
175   base = MemRefDescriptor(memref).alignedPtr(rewriter, loc);
176   return success();
177 }
178 
179 // Helper that returns a pointer given a memref base.
180 static LogicalResult getBasePtr(ConversionPatternRewriter &rewriter,
181                                 Location loc, Value memref,
182                                 MemRefType memRefType, Value &ptr) {
183   Value base;
184   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
185     return failure();
186   auto pType = MemRefDescriptor(memref).getElementPtrType();
187   ptr = rewriter.create<LLVM::GEPOp>(loc, pType, base);
188   return success();
189 }
190 
191 // Helper that returns a bit-casted pointer given a memref base.
192 static LogicalResult getBasePtr(ConversionPatternRewriter &rewriter,
193                                 Location loc, Value memref,
194                                 MemRefType memRefType, Type type, Value &ptr) {
195   Value base;
196   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
197     return failure();
198   auto pType = type.template cast<LLVM::LLVMType>().getPointerTo();
199   base = rewriter.create<LLVM::BitcastOp>(loc, pType, base);
200   ptr = rewriter.create<LLVM::GEPOp>(loc, pType, base);
201   return success();
202 }
203 
204 // Helper that returns vector of pointers given a memref base and an index
205 // vector.
206 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
207                                     Location loc, Value memref, Value indices,
208                                     MemRefType memRefType, VectorType vType,
209                                     Type iType, Value &ptrs) {
210   Value base;
211   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
212     return failure();
213   auto pType = MemRefDescriptor(memref).getElementPtrType();
214   auto ptrsType = LLVM::LLVMType::getVectorTy(pType, vType.getDimSize(0));
215   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, indices);
216   return success();
217 }
218 
219 static LogicalResult
220 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
221                                  LLVMTypeConverter &typeConverter, Location loc,
222                                  TransferReadOp xferOp,
223                                  ArrayRef<Value> operands, Value dataPtr) {
224   unsigned align;
225   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
226     return failure();
227   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
228   return success();
229 }
230 
231 static LogicalResult
232 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
233                             LLVMTypeConverter &typeConverter, Location loc,
234                             TransferReadOp xferOp, ArrayRef<Value> operands,
235                             Value dataPtr, Value mask) {
236   auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
237   VectorType fillType = xferOp.getVectorType();
238   Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding());
239   fill = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(fillType), fill);
240 
241   Type vecTy = typeConverter.convertType(xferOp.getVectorType());
242   if (!vecTy)
243     return failure();
244 
245   unsigned align;
246   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
247     return failure();
248 
249   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
250       xferOp, vecTy, dataPtr, mask, ValueRange{fill},
251       rewriter.getI32IntegerAttr(align));
252   return success();
253 }
254 
255 static LogicalResult
256 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
257                                  LLVMTypeConverter &typeConverter, Location loc,
258                                  TransferWriteOp xferOp,
259                                  ArrayRef<Value> operands, Value dataPtr) {
260   unsigned align;
261   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
262     return failure();
263   auto adaptor = TransferWriteOpAdaptor(operands);
264   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
265                                              align);
266   return success();
267 }
268 
269 static LogicalResult
270 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
271                             LLVMTypeConverter &typeConverter, Location loc,
272                             TransferWriteOp xferOp, ArrayRef<Value> operands,
273                             Value dataPtr, Value mask) {
274   unsigned align;
275   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
276     return failure();
277 
278   auto adaptor = TransferWriteOpAdaptor(operands);
279   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
280       xferOp, adaptor.vector(), dataPtr, mask,
281       rewriter.getI32IntegerAttr(align));
282   return success();
283 }
284 
285 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
286                                                   ArrayRef<Value> operands) {
287   return TransferReadOpAdaptor(operands);
288 }
289 
290 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
291                                                    ArrayRef<Value> operands) {
292   return TransferWriteOpAdaptor(operands);
293 }
294 
295 namespace {
296 
297 /// Conversion pattern for a vector.matrix_multiply.
298 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
299 class VectorMatmulOpConversion
300     : public ConvertOpToLLVMPattern<vector::MatmulOp> {
301 public:
302   using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern;
303 
304   LogicalResult
305   matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands,
306                   ConversionPatternRewriter &rewriter) const override {
307     auto adaptor = vector::MatmulOpAdaptor(operands);
308     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
309         matmulOp, typeConverter->convertType(matmulOp.res().getType()),
310         adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(),
311         matmulOp.lhs_columns(), matmulOp.rhs_columns());
312     return success();
313   }
314 };
315 
316 /// Conversion pattern for a vector.flat_transpose.
317 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
318 class VectorFlatTransposeOpConversion
319     : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> {
320 public:
321   using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern;
322 
323   LogicalResult
324   matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands,
325                   ConversionPatternRewriter &rewriter) const override {
326     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
327     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
328         transOp, typeConverter->convertType(transOp.res().getType()),
329         adaptor.matrix(), transOp.rows(), transOp.columns());
330     return success();
331   }
332 };
333 
334 /// Conversion pattern for a vector.maskedload.
335 class VectorMaskedLoadOpConversion
336     : public ConvertOpToLLVMPattern<vector::MaskedLoadOp> {
337 public:
338   using ConvertOpToLLVMPattern<vector::MaskedLoadOp>::ConvertOpToLLVMPattern;
339 
340   LogicalResult
341   matchAndRewrite(vector::MaskedLoadOp load, ArrayRef<Value> operands,
342                   ConversionPatternRewriter &rewriter) const override {
343     auto loc = load->getLoc();
344     auto adaptor = vector::MaskedLoadOpAdaptor(operands);
345 
346     // Resolve alignment.
347     unsigned align;
348     if (failed(getMemRefAlignment(*getTypeConverter(), load, align)))
349       return failure();
350 
351     auto vtype = typeConverter->convertType(load.getResultVectorType());
352     Value ptr;
353     if (failed(getBasePtr(rewriter, loc, adaptor.base(), load.getMemRefType(),
354                           vtype, ptr)))
355       return failure();
356 
357     rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
358         load, vtype, ptr, adaptor.mask(), adaptor.pass_thru(),
359         rewriter.getI32IntegerAttr(align));
360     return success();
361   }
362 };
363 
364 /// Conversion pattern for a vector.maskedstore.
365 class VectorMaskedStoreOpConversion
366     : public ConvertOpToLLVMPattern<vector::MaskedStoreOp> {
367 public:
368   using ConvertOpToLLVMPattern<vector::MaskedStoreOp>::ConvertOpToLLVMPattern;
369 
370   LogicalResult
371   matchAndRewrite(vector::MaskedStoreOp store, ArrayRef<Value> operands,
372                   ConversionPatternRewriter &rewriter) const override {
373     auto loc = store->getLoc();
374     auto adaptor = vector::MaskedStoreOpAdaptor(operands);
375 
376     // Resolve alignment.
377     unsigned align;
378     if (failed(getMemRefAlignment(*getTypeConverter(), store, align)))
379       return failure();
380 
381     auto vtype = typeConverter->convertType(store.getValueVectorType());
382     Value ptr;
383     if (failed(getBasePtr(rewriter, loc, adaptor.base(), store.getMemRefType(),
384                           vtype, ptr)))
385       return failure();
386 
387     rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
388         store, adaptor.value(), ptr, adaptor.mask(),
389         rewriter.getI32IntegerAttr(align));
390     return success();
391   }
392 };
393 
394 /// Conversion pattern for a vector.gather.
395 class VectorGatherOpConversion
396     : public ConvertOpToLLVMPattern<vector::GatherOp> {
397 public:
398   using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern;
399 
400   LogicalResult
401   matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands,
402                   ConversionPatternRewriter &rewriter) const override {
403     auto loc = gather->getLoc();
404     auto adaptor = vector::GatherOpAdaptor(operands);
405 
406     // Resolve alignment.
407     unsigned align;
408     if (failed(getMemRefAlignment(*getTypeConverter(), gather, align)))
409       return failure();
410 
411     // Get index ptrs.
412     VectorType vType = gather.getResultVectorType();
413     Type iType = gather.getIndicesVectorType().getElementType();
414     Value ptrs;
415     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
416                               gather.getMemRefType(), vType, iType, ptrs)))
417       return failure();
418 
419     // Replace with the gather intrinsic.
420     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
421         gather, typeConverter->convertType(vType), ptrs, adaptor.mask(),
422         adaptor.pass_thru(), rewriter.getI32IntegerAttr(align));
423     return success();
424   }
425 };
426 
427 /// Conversion pattern for a vector.scatter.
428 class VectorScatterOpConversion
429     : public ConvertOpToLLVMPattern<vector::ScatterOp> {
430 public:
431   using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern;
432 
433   LogicalResult
434   matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands,
435                   ConversionPatternRewriter &rewriter) const override {
436     auto loc = scatter->getLoc();
437     auto adaptor = vector::ScatterOpAdaptor(operands);
438 
439     // Resolve alignment.
440     unsigned align;
441     if (failed(getMemRefAlignment(*getTypeConverter(), scatter, align)))
442       return failure();
443 
444     // Get index ptrs.
445     VectorType vType = scatter.getValueVectorType();
446     Type iType = scatter.getIndicesVectorType().getElementType();
447     Value ptrs;
448     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
449                               scatter.getMemRefType(), vType, iType, ptrs)))
450       return failure();
451 
452     // Replace with the scatter intrinsic.
453     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
454         scatter, adaptor.value(), ptrs, adaptor.mask(),
455         rewriter.getI32IntegerAttr(align));
456     return success();
457   }
458 };
459 
460 /// Conversion pattern for a vector.expandload.
461 class VectorExpandLoadOpConversion
462     : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> {
463 public:
464   using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern;
465 
466   LogicalResult
467   matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands,
468                   ConversionPatternRewriter &rewriter) const override {
469     auto loc = expand->getLoc();
470     auto adaptor = vector::ExpandLoadOpAdaptor(operands);
471 
472     Value ptr;
473     if (failed(getBasePtr(rewriter, loc, adaptor.base(), expand.getMemRefType(),
474                           ptr)))
475       return failure();
476 
477     auto vType = expand.getResultVectorType();
478     rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
479         expand, typeConverter->convertType(vType), ptr, adaptor.mask(),
480         adaptor.pass_thru());
481     return success();
482   }
483 };
484 
485 /// Conversion pattern for a vector.compressstore.
486 class VectorCompressStoreOpConversion
487     : public ConvertOpToLLVMPattern<vector::CompressStoreOp> {
488 public:
489   using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern;
490 
491   LogicalResult
492   matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands,
493                   ConversionPatternRewriter &rewriter) const override {
494     auto loc = compress->getLoc();
495     auto adaptor = vector::CompressStoreOpAdaptor(operands);
496 
497     Value ptr;
498     if (failed(getBasePtr(rewriter, loc, adaptor.base(),
499                           compress.getMemRefType(), ptr)))
500       return failure();
501 
502     rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
503         compress, adaptor.value(), ptr, adaptor.mask());
504     return success();
505   }
506 };
507 
508 /// Conversion pattern for all vector reductions.
509 class VectorReductionOpConversion
510     : public ConvertOpToLLVMPattern<vector::ReductionOp> {
511 public:
512   explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv,
513                                        bool reassociateFPRed)
514       : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv),
515         reassociateFPReductions(reassociateFPRed) {}
516 
517   LogicalResult
518   matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands,
519                   ConversionPatternRewriter &rewriter) const override {
520     auto kind = reductionOp.kind();
521     Type eltType = reductionOp.dest().getType();
522     Type llvmType = typeConverter->convertType(eltType);
523     if (eltType.isIntOrIndex()) {
524       // Integer reductions: add/mul/min/max/and/or/xor.
525       if (kind == "add")
526         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(
527             reductionOp, llvmType, operands[0]);
528       else if (kind == "mul")
529         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(
530             reductionOp, llvmType, operands[0]);
531       else if (kind == "min" &&
532                (eltType.isIndex() || eltType.isUnsignedInteger()))
533         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>(
534             reductionOp, llvmType, operands[0]);
535       else if (kind == "min")
536         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>(
537             reductionOp, llvmType, operands[0]);
538       else if (kind == "max" &&
539                (eltType.isIndex() || eltType.isUnsignedInteger()))
540         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>(
541             reductionOp, llvmType, operands[0]);
542       else if (kind == "max")
543         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>(
544             reductionOp, llvmType, operands[0]);
545       else if (kind == "and")
546         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(
547             reductionOp, llvmType, operands[0]);
548       else if (kind == "or")
549         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(
550             reductionOp, llvmType, operands[0]);
551       else if (kind == "xor")
552         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(
553             reductionOp, llvmType, operands[0]);
554       else
555         return failure();
556       return success();
557     }
558 
559     if (!eltType.isa<FloatType>())
560       return failure();
561 
562     // Floating-point reductions: add/mul/min/max
563     if (kind == "add") {
564       // Optional accumulator (or zero).
565       Value acc = operands.size() > 1 ? operands[1]
566                                       : rewriter.create<LLVM::ConstantOp>(
567                                             reductionOp->getLoc(), llvmType,
568                                             rewriter.getZeroAttr(eltType));
569       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>(
570           reductionOp, llvmType, acc, operands[0],
571           rewriter.getBoolAttr(reassociateFPReductions));
572     } else if (kind == "mul") {
573       // Optional accumulator (or one).
574       Value acc = operands.size() > 1
575                       ? operands[1]
576                       : rewriter.create<LLVM::ConstantOp>(
577                             reductionOp->getLoc(), llvmType,
578                             rewriter.getFloatAttr(eltType, 1.0));
579       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>(
580           reductionOp, llvmType, acc, operands[0],
581           rewriter.getBoolAttr(reassociateFPReductions));
582     } else if (kind == "min")
583       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(
584           reductionOp, llvmType, operands[0]);
585     else if (kind == "max")
586       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(
587           reductionOp, llvmType, operands[0]);
588     else
589       return failure();
590     return success();
591   }
592 
593 private:
594   const bool reassociateFPReductions;
595 };
596 
597 /// Conversion pattern for a vector.create_mask (1-D only).
598 class VectorCreateMaskOpConversion
599     : public ConvertOpToLLVMPattern<vector::CreateMaskOp> {
600 public:
601   explicit VectorCreateMaskOpConversion(LLVMTypeConverter &typeConv,
602                                         bool enableIndexOpt)
603       : ConvertOpToLLVMPattern<vector::CreateMaskOp>(typeConv),
604         enableIndexOptimizations(enableIndexOpt) {}
605 
606   LogicalResult
607   matchAndRewrite(vector::CreateMaskOp op, ArrayRef<Value> operands,
608                   ConversionPatternRewriter &rewriter) const override {
609     auto dstType = op->getResult(0).getType().cast<VectorType>();
610     int64_t rank = dstType.getRank();
611     if (rank == 1) {
612       rewriter.replaceOp(
613           op, buildVectorComparison(rewriter, op, enableIndexOptimizations,
614                                     dstType.getDimSize(0), operands[0]));
615       return success();
616     }
617     return failure();
618   }
619 
620 private:
621   const bool enableIndexOptimizations;
622 };
623 
624 class VectorShuffleOpConversion
625     : public ConvertOpToLLVMPattern<vector::ShuffleOp> {
626 public:
627   using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern;
628 
629   LogicalResult
630   matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands,
631                   ConversionPatternRewriter &rewriter) const override {
632     auto loc = shuffleOp->getLoc();
633     auto adaptor = vector::ShuffleOpAdaptor(operands);
634     auto v1Type = shuffleOp.getV1VectorType();
635     auto v2Type = shuffleOp.getV2VectorType();
636     auto vectorType = shuffleOp.getVectorType();
637     Type llvmType = typeConverter->convertType(vectorType);
638     auto maskArrayAttr = shuffleOp.mask();
639 
640     // Bail if result type cannot be lowered.
641     if (!llvmType)
642       return failure();
643 
644     // Get rank and dimension sizes.
645     int64_t rank = vectorType.getRank();
646     assert(v1Type.getRank() == rank);
647     assert(v2Type.getRank() == rank);
648     int64_t v1Dim = v1Type.getDimSize(0);
649 
650     // For rank 1, where both operands have *exactly* the same vector type,
651     // there is direct shuffle support in LLVM. Use it!
652     if (rank == 1 && v1Type == v2Type) {
653       Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>(
654           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
655       rewriter.replaceOp(shuffleOp, llvmShuffleOp);
656       return success();
657     }
658 
659     // For all other cases, insert the individual values individually.
660     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
661     int64_t insPos = 0;
662     for (auto en : llvm::enumerate(maskArrayAttr)) {
663       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
664       Value value = adaptor.v1();
665       if (extPos >= v1Dim) {
666         extPos -= v1Dim;
667         value = adaptor.v2();
668       }
669       Value extract = extractOne(rewriter, *getTypeConverter(), loc, value,
670                                  llvmType, rank, extPos);
671       insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract,
672                          llvmType, rank, insPos++);
673     }
674     rewriter.replaceOp(shuffleOp, insert);
675     return success();
676   }
677 };
678 
679 class VectorExtractElementOpConversion
680     : public ConvertOpToLLVMPattern<vector::ExtractElementOp> {
681 public:
682   using ConvertOpToLLVMPattern<
683       vector::ExtractElementOp>::ConvertOpToLLVMPattern;
684 
685   LogicalResult
686   matchAndRewrite(vector::ExtractElementOp extractEltOp,
687                   ArrayRef<Value> operands,
688                   ConversionPatternRewriter &rewriter) const override {
689     auto adaptor = vector::ExtractElementOpAdaptor(operands);
690     auto vectorType = extractEltOp.getVectorType();
691     auto llvmType = typeConverter->convertType(vectorType.getElementType());
692 
693     // Bail if result type cannot be lowered.
694     if (!llvmType)
695       return failure();
696 
697     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
698         extractEltOp, llvmType, adaptor.vector(), adaptor.position());
699     return success();
700   }
701 };
702 
703 class VectorExtractOpConversion
704     : public ConvertOpToLLVMPattern<vector::ExtractOp> {
705 public:
706   using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern;
707 
708   LogicalResult
709   matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands,
710                   ConversionPatternRewriter &rewriter) const override {
711     auto loc = extractOp->getLoc();
712     auto adaptor = vector::ExtractOpAdaptor(operands);
713     auto vectorType = extractOp.getVectorType();
714     auto resultType = extractOp.getResult().getType();
715     auto llvmResultType = typeConverter->convertType(resultType);
716     auto positionArrayAttr = extractOp.position();
717 
718     // Bail if result type cannot be lowered.
719     if (!llvmResultType)
720       return failure();
721 
722     // One-shot extraction of vector from array (only requires extractvalue).
723     if (resultType.isa<VectorType>()) {
724       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
725           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
726       rewriter.replaceOp(extractOp, extracted);
727       return success();
728     }
729 
730     // Potential extraction of 1-D vector from array.
731     auto *context = extractOp->getContext();
732     Value extracted = adaptor.vector();
733     auto positionAttrs = positionArrayAttr.getValue();
734     if (positionAttrs.size() > 1) {
735       auto oneDVectorType = reducedVectorTypeBack(vectorType);
736       auto nMinusOnePositionAttrs =
737           ArrayAttr::get(positionAttrs.drop_back(), context);
738       extracted = rewriter.create<LLVM::ExtractValueOp>(
739           loc, typeConverter->convertType(oneDVectorType), extracted,
740           nMinusOnePositionAttrs);
741     }
742 
743     // Remaining extraction of element from 1-D LLVM vector
744     auto position = positionAttrs.back().cast<IntegerAttr>();
745     auto i64Type = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
746     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
747     extracted =
748         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
749     rewriter.replaceOp(extractOp, extracted);
750 
751     return success();
752   }
753 };
754 
755 /// Conversion pattern that turns a vector.fma on a 1-D vector
756 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
757 /// This does not match vectors of n >= 2 rank.
758 ///
759 /// Example:
760 /// ```
761 ///  vector.fma %a, %a, %a : vector<8xf32>
762 /// ```
763 /// is converted to:
764 /// ```
765 ///  llvm.intr.fmuladd %va, %va, %va:
766 ///    (!llvm<"<8 x float>">, !llvm<"<8 x float>">, !llvm<"<8 x float>">)
767 ///    -> !llvm<"<8 x float>">
768 /// ```
769 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> {
770 public:
771   using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern;
772 
773   LogicalResult
774   matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands,
775                   ConversionPatternRewriter &rewriter) const override {
776     auto adaptor = vector::FMAOpAdaptor(operands);
777     VectorType vType = fmaOp.getVectorType();
778     if (vType.getRank() != 1)
779       return failure();
780     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(),
781                                                  adaptor.rhs(), adaptor.acc());
782     return success();
783   }
784 };
785 
786 class VectorInsertElementOpConversion
787     : public ConvertOpToLLVMPattern<vector::InsertElementOp> {
788 public:
789   using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern;
790 
791   LogicalResult
792   matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands,
793                   ConversionPatternRewriter &rewriter) const override {
794     auto adaptor = vector::InsertElementOpAdaptor(operands);
795     auto vectorType = insertEltOp.getDestVectorType();
796     auto llvmType = typeConverter->convertType(vectorType);
797 
798     // Bail if result type cannot be lowered.
799     if (!llvmType)
800       return failure();
801 
802     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
803         insertEltOp, llvmType, adaptor.dest(), adaptor.source(),
804         adaptor.position());
805     return success();
806   }
807 };
808 
809 class VectorInsertOpConversion
810     : public ConvertOpToLLVMPattern<vector::InsertOp> {
811 public:
812   using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern;
813 
814   LogicalResult
815   matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands,
816                   ConversionPatternRewriter &rewriter) const override {
817     auto loc = insertOp->getLoc();
818     auto adaptor = vector::InsertOpAdaptor(operands);
819     auto sourceType = insertOp.getSourceType();
820     auto destVectorType = insertOp.getDestVectorType();
821     auto llvmResultType = typeConverter->convertType(destVectorType);
822     auto positionArrayAttr = insertOp.position();
823 
824     // Bail if result type cannot be lowered.
825     if (!llvmResultType)
826       return failure();
827 
828     // One-shot insertion of a vector into an array (only requires insertvalue).
829     if (sourceType.isa<VectorType>()) {
830       Value inserted = rewriter.create<LLVM::InsertValueOp>(
831           loc, llvmResultType, adaptor.dest(), adaptor.source(),
832           positionArrayAttr);
833       rewriter.replaceOp(insertOp, inserted);
834       return success();
835     }
836 
837     // Potential extraction of 1-D vector from array.
838     auto *context = insertOp->getContext();
839     Value extracted = adaptor.dest();
840     auto positionAttrs = positionArrayAttr.getValue();
841     auto position = positionAttrs.back().cast<IntegerAttr>();
842     auto oneDVectorType = destVectorType;
843     if (positionAttrs.size() > 1) {
844       oneDVectorType = reducedVectorTypeBack(destVectorType);
845       auto nMinusOnePositionAttrs =
846           ArrayAttr::get(positionAttrs.drop_back(), context);
847       extracted = rewriter.create<LLVM::ExtractValueOp>(
848           loc, typeConverter->convertType(oneDVectorType), extracted,
849           nMinusOnePositionAttrs);
850     }
851 
852     // Insertion of an element into a 1-D LLVM vector.
853     auto i64Type = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
854     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
855     Value inserted = rewriter.create<LLVM::InsertElementOp>(
856         loc, typeConverter->convertType(oneDVectorType), extracted,
857         adaptor.source(), constant);
858 
859     // Potential insertion of resulting 1-D vector into array.
860     if (positionAttrs.size() > 1) {
861       auto nMinusOnePositionAttrs =
862           ArrayAttr::get(positionAttrs.drop_back(), context);
863       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
864                                                       adaptor.dest(), inserted,
865                                                       nMinusOnePositionAttrs);
866     }
867 
868     rewriter.replaceOp(insertOp, inserted);
869     return success();
870   }
871 };
872 
873 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
874 ///
875 /// Example:
876 /// ```
877 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
878 /// ```
879 /// is rewritten into:
880 /// ```
881 ///  %r = splat %f0: vector<2x4xf32>
882 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
883 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
884 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
885 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
886 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
887 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
888 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
889 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
890 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
891 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
892 ///  // %r3 holds the final value.
893 /// ```
894 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
895 public:
896   using OpRewritePattern<FMAOp>::OpRewritePattern;
897 
898   LogicalResult matchAndRewrite(FMAOp op,
899                                 PatternRewriter &rewriter) const override {
900     auto vType = op.getVectorType();
901     if (vType.getRank() < 2)
902       return failure();
903 
904     auto loc = op.getLoc();
905     auto elemType = vType.getElementType();
906     Value zero = rewriter.create<ConstantOp>(loc, elemType,
907                                              rewriter.getZeroAttr(elemType));
908     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
909     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
910       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
911       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
912       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
913       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
914       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
915     }
916     rewriter.replaceOp(op, desc);
917     return success();
918   }
919 };
920 
921 // When ranks are different, InsertStridedSlice needs to extract a properly
922 // ranked vector from the destination vector into which to insert. This pattern
923 // only takes care of this part and forwards the rest of the conversion to
924 // another pattern that converts InsertStridedSlice for operands of the same
925 // rank.
926 //
927 // RewritePattern for InsertStridedSliceOp where source and destination vectors
928 // have different ranks. In this case:
929 //   1. the proper subvector is extracted from the destination vector
930 //   2. a new InsertStridedSlice op is created to insert the source in the
931 //   destination subvector
932 //   3. the destination subvector is inserted back in the proper place
933 //   4. the op is replaced by the result of step 3.
934 // The new InsertStridedSlice from step 2. will be picked up by a
935 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
936 class VectorInsertStridedSliceOpDifferentRankRewritePattern
937     : public OpRewritePattern<InsertStridedSliceOp> {
938 public:
939   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
940 
941   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
942                                 PatternRewriter &rewriter) const override {
943     auto srcType = op.getSourceVectorType();
944     auto dstType = op.getDestVectorType();
945 
946     if (op.offsets().getValue().empty())
947       return failure();
948 
949     auto loc = op.getLoc();
950     int64_t rankDiff = dstType.getRank() - srcType.getRank();
951     assert(rankDiff >= 0);
952     if (rankDiff == 0)
953       return failure();
954 
955     int64_t rankRest = dstType.getRank() - rankDiff;
956     // Extract / insert the subvector of matching rank and InsertStridedSlice
957     // on it.
958     Value extracted =
959         rewriter.create<ExtractOp>(loc, op.dest(),
960                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
961                                                   /*dropBack=*/rankRest));
962     // A different pattern will kick in for InsertStridedSlice with matching
963     // ranks.
964     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
965         loc, op.source(), extracted,
966         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
967         getI64SubArray(op.strides(), /*dropFront=*/0));
968     rewriter.replaceOpWithNewOp<InsertOp>(
969         op, stridedSliceInnerOp.getResult(), op.dest(),
970         getI64SubArray(op.offsets(), /*dropFront=*/0,
971                        /*dropBack=*/rankRest));
972     return success();
973   }
974 };
975 
976 // RewritePattern for InsertStridedSliceOp where source and destination vectors
977 // have the same rank. In this case, we reduce
978 //   1. the proper subvector is extracted from the destination vector
979 //   2. a new InsertStridedSlice op is created to insert the source in the
980 //   destination subvector
981 //   3. the destination subvector is inserted back in the proper place
982 //   4. the op is replaced by the result of step 3.
983 // The new InsertStridedSlice from step 2. will be picked up by a
984 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
985 class VectorInsertStridedSliceOpSameRankRewritePattern
986     : public OpRewritePattern<InsertStridedSliceOp> {
987 public:
988   VectorInsertStridedSliceOpSameRankRewritePattern(MLIRContext *ctx)
989       : OpRewritePattern<InsertStridedSliceOp>(ctx) {
990     // This pattern creates recursive InsertStridedSliceOp, but the recursion is
991     // bounded as the rank is strictly decreasing.
992     setHasBoundedRewriteRecursion();
993   }
994 
995   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
996                                 PatternRewriter &rewriter) const override {
997     auto srcType = op.getSourceVectorType();
998     auto dstType = op.getDestVectorType();
999 
1000     if (op.offsets().getValue().empty())
1001       return failure();
1002 
1003     int64_t rankDiff = dstType.getRank() - srcType.getRank();
1004     assert(rankDiff >= 0);
1005     if (rankDiff != 0)
1006       return failure();
1007 
1008     if (srcType == dstType) {
1009       rewriter.replaceOp(op, op.source());
1010       return success();
1011     }
1012 
1013     int64_t offset =
1014         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1015     int64_t size = srcType.getShape().front();
1016     int64_t stride =
1017         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1018 
1019     auto loc = op.getLoc();
1020     Value res = op.dest();
1021     // For each slice of the source vector along the most major dimension.
1022     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1023          off += stride, ++idx) {
1024       // 1. extract the proper subvector (or element) from source
1025       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
1026       if (extractedSource.getType().isa<VectorType>()) {
1027         // 2. If we have a vector, extract the proper subvector from destination
1028         // Otherwise we are at the element level and no need to recurse.
1029         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
1030         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
1031         // smaller rank.
1032         extractedSource = rewriter.create<InsertStridedSliceOp>(
1033             loc, extractedSource, extractedDest,
1034             getI64SubArray(op.offsets(), /* dropFront=*/1),
1035             getI64SubArray(op.strides(), /* dropFront=*/1));
1036       }
1037       // 4. Insert the extractedSource into the res vector.
1038       res = insertOne(rewriter, loc, extractedSource, res, off);
1039     }
1040 
1041     rewriter.replaceOp(op, res);
1042     return success();
1043   }
1044 };
1045 
1046 /// Returns the strides if the memory underlying `memRefType` has a contiguous
1047 /// static layout.
1048 static llvm::Optional<SmallVector<int64_t, 4>>
1049 computeContiguousStrides(MemRefType memRefType) {
1050   int64_t offset;
1051   SmallVector<int64_t, 4> strides;
1052   if (failed(getStridesAndOffset(memRefType, strides, offset)))
1053     return None;
1054   if (!strides.empty() && strides.back() != 1)
1055     return None;
1056   // If no layout or identity layout, this is contiguous by definition.
1057   if (memRefType.getAffineMaps().empty() ||
1058       memRefType.getAffineMaps().front().isIdentity())
1059     return strides;
1060 
1061   // Otherwise, we must determine contiguity form shapes. This can only ever
1062   // work in static cases because MemRefType is underspecified to represent
1063   // contiguous dynamic shapes in other ways than with just empty/identity
1064   // layout.
1065   auto sizes = memRefType.getShape();
1066   for (int index = 0, e = strides.size() - 2; index < e; ++index) {
1067     if (ShapedType::isDynamic(sizes[index + 1]) ||
1068         ShapedType::isDynamicStrideOrOffset(strides[index]) ||
1069         ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
1070       return None;
1071     if (strides[index] != strides[index + 1] * sizes[index + 1])
1072       return None;
1073   }
1074   return strides;
1075 }
1076 
1077 class VectorTypeCastOpConversion
1078     : public ConvertOpToLLVMPattern<vector::TypeCastOp> {
1079 public:
1080   using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern;
1081 
1082   LogicalResult
1083   matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands,
1084                   ConversionPatternRewriter &rewriter) const override {
1085     auto loc = castOp->getLoc();
1086     MemRefType sourceMemRefType =
1087         castOp.getOperand().getType().cast<MemRefType>();
1088     MemRefType targetMemRefType =
1089         castOp.getResult().getType().cast<MemRefType>();
1090 
1091     // Only static shape casts supported atm.
1092     if (!sourceMemRefType.hasStaticShape() ||
1093         !targetMemRefType.hasStaticShape())
1094       return failure();
1095 
1096     auto llvmSourceDescriptorTy =
1097         operands[0].getType().dyn_cast<LLVM::LLVMType>();
1098     if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy())
1099       return failure();
1100     MemRefDescriptor sourceMemRef(operands[0]);
1101 
1102     auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType)
1103                                       .dyn_cast_or_null<LLVM::LLVMType>();
1104     if (!llvmTargetDescriptorTy || !llvmTargetDescriptorTy.isStructTy())
1105       return failure();
1106 
1107     // Only contiguous source buffers supported atm.
1108     auto sourceStrides = computeContiguousStrides(sourceMemRefType);
1109     if (!sourceStrides)
1110       return failure();
1111     auto targetStrides = computeContiguousStrides(targetMemRefType);
1112     if (!targetStrides)
1113       return failure();
1114     // Only support static strides for now, regardless of contiguity.
1115     if (llvm::any_of(*targetStrides, [](int64_t stride) {
1116           return ShapedType::isDynamicStrideOrOffset(stride);
1117         }))
1118       return failure();
1119 
1120     auto int64Ty = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
1121 
1122     // Create descriptor.
1123     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
1124     Type llvmTargetElementTy = desc.getElementPtrType();
1125     // Set allocated ptr.
1126     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
1127     allocated =
1128         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
1129     desc.setAllocatedPtr(rewriter, loc, allocated);
1130     // Set aligned ptr.
1131     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
1132     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
1133     desc.setAlignedPtr(rewriter, loc, ptr);
1134     // Fill offset 0.
1135     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
1136     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
1137     desc.setOffset(rewriter, loc, zero);
1138 
1139     // Fill size and stride descriptors in memref.
1140     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
1141       int64_t index = indexedSize.index();
1142       auto sizeAttr =
1143           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
1144       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
1145       desc.setSize(rewriter, loc, index, size);
1146       auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(),
1147                                                 (*targetStrides)[index]);
1148       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
1149       desc.setStride(rewriter, loc, index, stride);
1150     }
1151 
1152     rewriter.replaceOp(castOp, {desc});
1153     return success();
1154   }
1155 };
1156 
1157 /// Conversion pattern that converts a 1-D vector transfer read/write op in a
1158 /// sequence of:
1159 /// 1. Get the source/dst address as an LLVM vector pointer.
1160 /// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1161 /// 3. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1162 /// 4. Create a mask where offsetVector is compared against memref upper bound.
1163 /// 5. Rewrite op as a masked read or write.
1164 template <typename ConcreteOp>
1165 class VectorTransferConversion : public ConvertOpToLLVMPattern<ConcreteOp> {
1166 public:
1167   explicit VectorTransferConversion(LLVMTypeConverter &typeConv,
1168                                     bool enableIndexOpt)
1169       : ConvertOpToLLVMPattern<ConcreteOp>(typeConv),
1170         enableIndexOptimizations(enableIndexOpt) {}
1171 
1172   LogicalResult
1173   matchAndRewrite(ConcreteOp xferOp, ArrayRef<Value> operands,
1174                   ConversionPatternRewriter &rewriter) const override {
1175     auto adaptor = getTransferOpAdapter(xferOp, operands);
1176 
1177     if (xferOp.getVectorType().getRank() > 1 ||
1178         llvm::size(xferOp.indices()) == 0)
1179       return failure();
1180     if (xferOp.permutation_map() !=
1181         AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
1182                                        xferOp.getVectorType().getRank(),
1183                                        xferOp->getContext()))
1184       return failure();
1185     // Only contiguous source tensors supported atm.
1186     auto strides = computeContiguousStrides(xferOp.getMemRefType());
1187     if (!strides)
1188       return failure();
1189 
1190     auto toLLVMTy = [&](Type t) {
1191       return this->getTypeConverter()->convertType(t);
1192     };
1193 
1194     Location loc = xferOp->getLoc();
1195     MemRefType memRefType = xferOp.getMemRefType();
1196 
1197     if (auto memrefVectorElementType =
1198             memRefType.getElementType().dyn_cast<VectorType>()) {
1199       // Memref has vector element type.
1200       if (memrefVectorElementType.getElementType() !=
1201           xferOp.getVectorType().getElementType())
1202         return failure();
1203 #ifndef NDEBUG
1204       // Check that memref vector type is a suffix of 'vectorType.
1205       unsigned memrefVecEltRank = memrefVectorElementType.getRank();
1206       unsigned resultVecRank = xferOp.getVectorType().getRank();
1207       assert(memrefVecEltRank <= resultVecRank);
1208       // TODO: Move this to isSuffix in Vector/Utils.h.
1209       unsigned rankOffset = resultVecRank - memrefVecEltRank;
1210       auto memrefVecEltShape = memrefVectorElementType.getShape();
1211       auto resultVecShape = xferOp.getVectorType().getShape();
1212       for (unsigned i = 0; i < memrefVecEltRank; ++i)
1213         assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] &&
1214                "memref vector element shape should match suffix of vector "
1215                "result shape.");
1216 #endif // ifndef NDEBUG
1217     }
1218 
1219     // 1. Get the source/dst address as an LLVM vector pointer.
1220     //    The vector pointer would always be on address space 0, therefore
1221     //    addrspacecast shall be used when source/dst memrefs are not on
1222     //    address space 0.
1223     // TODO: support alignment when possible.
1224     Value dataPtr = this->getStridedElementPtr(
1225         loc, memRefType, adaptor.memref(), adaptor.indices(), rewriter);
1226     auto vecTy =
1227         toLLVMTy(xferOp.getVectorType()).template cast<LLVM::LLVMType>();
1228     Value vectorDataPtr;
1229     if (memRefType.getMemorySpace() == 0)
1230       vectorDataPtr =
1231           rewriter.create<LLVM::BitcastOp>(loc, vecTy.getPointerTo(), dataPtr);
1232     else
1233       vectorDataPtr = rewriter.create<LLVM::AddrSpaceCastOp>(
1234           loc, vecTy.getPointerTo(), dataPtr);
1235 
1236     if (!xferOp.isMaskedDim(0))
1237       return replaceTransferOpWithLoadOrStore(rewriter,
1238                                               *this->getTypeConverter(), loc,
1239                                               xferOp, operands, vectorDataPtr);
1240 
1241     // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1242     // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1243     // 4. Let dim the memref dimension, compute the vector comparison mask:
1244     //   [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
1245     //
1246     // TODO: when the leaf transfer rank is k > 1, we need the last `k`
1247     //       dimensions here.
1248     unsigned vecWidth = vecTy.getVectorNumElements();
1249     unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
1250     Value off = xferOp.indices()[lastIndex];
1251     Value dim = rewriter.create<DimOp>(loc, xferOp.memref(), lastIndex);
1252     Value mask = buildVectorComparison(
1253         rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off);
1254 
1255     // 5. Rewrite as a masked read / write.
1256     return replaceTransferOpWithMasked(rewriter, *this->getTypeConverter(), loc,
1257                                        xferOp, operands, vectorDataPtr, mask);
1258   }
1259 
1260 private:
1261   const bool enableIndexOptimizations;
1262 };
1263 
1264 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
1265 public:
1266   using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern;
1267 
1268   // Proof-of-concept lowering implementation that relies on a small
1269   // runtime support library, which only needs to provide a few
1270   // printing methods (single value for all data types, opening/closing
1271   // bracket, comma, newline). The lowering fully unrolls a vector
1272   // in terms of these elementary printing operations. The advantage
1273   // of this approach is that the library can remain unaware of all
1274   // low-level implementation details of vectors while still supporting
1275   // output of any shaped and dimensioned vector. Due to full unrolling,
1276   // this approach is less suited for very large vectors though.
1277   //
1278   // TODO: rely solely on libc in future? something else?
1279   //
1280   LogicalResult
1281   matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands,
1282                   ConversionPatternRewriter &rewriter) const override {
1283     auto adaptor = vector::PrintOpAdaptor(operands);
1284     Type printType = printOp.getPrintType();
1285 
1286     if (typeConverter->convertType(printType) == nullptr)
1287       return failure();
1288 
1289     // Make sure element type has runtime support.
1290     PrintConversion conversion = PrintConversion::None;
1291     VectorType vectorType = printType.dyn_cast<VectorType>();
1292     Type eltType = vectorType ? vectorType.getElementType() : printType;
1293     Operation *printer;
1294     if (eltType.isF32()) {
1295       printer = getPrintFloat(printOp);
1296     } else if (eltType.isF64()) {
1297       printer = getPrintDouble(printOp);
1298     } else if (eltType.isIndex()) {
1299       printer = getPrintU64(printOp);
1300     } else if (auto intTy = eltType.dyn_cast<IntegerType>()) {
1301       // Integers need a zero or sign extension on the operand
1302       // (depending on the source type) as well as a signed or
1303       // unsigned print method. Up to 64-bit is supported.
1304       unsigned width = intTy.getWidth();
1305       if (intTy.isUnsigned()) {
1306         if (width <= 64) {
1307           if (width < 64)
1308             conversion = PrintConversion::ZeroExt64;
1309           printer = getPrintU64(printOp);
1310         } else {
1311           return failure();
1312         }
1313       } else {
1314         assert(intTy.isSignless() || intTy.isSigned());
1315         if (width <= 64) {
1316           // Note that we *always* zero extend booleans (1-bit integers),
1317           // so that true/false is printed as 1/0 rather than -1/0.
1318           if (width == 1)
1319             conversion = PrintConversion::ZeroExt64;
1320           else if (width < 64)
1321             conversion = PrintConversion::SignExt64;
1322           printer = getPrintI64(printOp);
1323         } else {
1324           return failure();
1325         }
1326       }
1327     } else {
1328       return failure();
1329     }
1330 
1331     // Unroll vector into elementary print calls.
1332     int64_t rank = vectorType ? vectorType.getRank() : 0;
1333     emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank,
1334               conversion);
1335     emitCall(rewriter, printOp->getLoc(), getPrintNewline(printOp));
1336     rewriter.eraseOp(printOp);
1337     return success();
1338   }
1339 
1340 private:
1341   enum class PrintConversion {
1342     // clang-format off
1343     None,
1344     ZeroExt64,
1345     SignExt64
1346     // clang-format on
1347   };
1348 
1349   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1350                  Value value, VectorType vectorType, Operation *printer,
1351                  int64_t rank, PrintConversion conversion) const {
1352     Location loc = op->getLoc();
1353     if (rank == 0) {
1354       switch (conversion) {
1355       case PrintConversion::ZeroExt64:
1356         value = rewriter.create<ZeroExtendIOp>(
1357             loc, value, LLVM::LLVMType::getInt64Ty(rewriter.getContext()));
1358         break;
1359       case PrintConversion::SignExt64:
1360         value = rewriter.create<SignExtendIOp>(
1361             loc, value, LLVM::LLVMType::getInt64Ty(rewriter.getContext()));
1362         break;
1363       case PrintConversion::None:
1364         break;
1365       }
1366       emitCall(rewriter, loc, printer, value);
1367       return;
1368     }
1369 
1370     emitCall(rewriter, loc, getPrintOpen(op));
1371     Operation *printComma = getPrintComma(op);
1372     int64_t dim = vectorType.getDimSize(0);
1373     for (int64_t d = 0; d < dim; ++d) {
1374       auto reducedType =
1375           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1376       auto llvmType = typeConverter->convertType(
1377           rank > 1 ? reducedType : vectorType.getElementType());
1378       Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value,
1379                                    llvmType, rank, d);
1380       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1,
1381                 conversion);
1382       if (d != dim - 1)
1383         emitCall(rewriter, loc, printComma);
1384     }
1385     emitCall(rewriter, loc, getPrintClose(op));
1386   }
1387 
1388   // Helper to emit a call.
1389   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1390                        Operation *ref, ValueRange params = ValueRange()) {
1391     rewriter.create<LLVM::CallOp>(loc, TypeRange(),
1392                                   rewriter.getSymbolRefAttr(ref), params);
1393   }
1394 
1395   // Helper for printer method declaration (first hit) and lookup.
1396   static Operation *getPrint(Operation *op, StringRef name,
1397                              ArrayRef<LLVM::LLVMType> params) {
1398     auto module = op->getParentOfType<ModuleOp>();
1399     auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name);
1400     if (func)
1401       return func;
1402     OpBuilder moduleBuilder(module.getBodyRegion());
1403     return moduleBuilder.create<LLVM::LLVMFuncOp>(
1404         op->getLoc(), name,
1405         LLVM::LLVMType::getFunctionTy(
1406             LLVM::LLVMType::getVoidTy(op->getContext()), params,
1407             /*isVarArg=*/false));
1408   }
1409 
1410   // Helpers for method names.
1411   Operation *getPrintI64(Operation *op) const {
1412     return getPrint(op, "printI64",
1413                     LLVM::LLVMType::getInt64Ty(op->getContext()));
1414   }
1415   Operation *getPrintU64(Operation *op) const {
1416     return getPrint(op, "printU64",
1417                     LLVM::LLVMType::getInt64Ty(op->getContext()));
1418   }
1419   Operation *getPrintFloat(Operation *op) const {
1420     return getPrint(op, "printF32",
1421                     LLVM::LLVMType::getFloatTy(op->getContext()));
1422   }
1423   Operation *getPrintDouble(Operation *op) const {
1424     return getPrint(op, "printF64",
1425                     LLVM::LLVMType::getDoubleTy(op->getContext()));
1426   }
1427   Operation *getPrintOpen(Operation *op) const {
1428     return getPrint(op, "printOpen", {});
1429   }
1430   Operation *getPrintClose(Operation *op) const {
1431     return getPrint(op, "printClose", {});
1432   }
1433   Operation *getPrintComma(Operation *op) const {
1434     return getPrint(op, "printComma", {});
1435   }
1436   Operation *getPrintNewline(Operation *op) const {
1437     return getPrint(op, "printNewline", {});
1438   }
1439 };
1440 
1441 /// Progressive lowering of ExtractStridedSliceOp to either:
1442 ///   1. express single offset extract as a direct shuffle.
1443 ///   2. extract + lower rank strided_slice + insert for the n-D case.
1444 class VectorExtractStridedSliceOpConversion
1445     : public OpRewritePattern<ExtractStridedSliceOp> {
1446 public:
1447   VectorExtractStridedSliceOpConversion(MLIRContext *ctx)
1448       : OpRewritePattern<ExtractStridedSliceOp>(ctx) {
1449     // This pattern creates recursive ExtractStridedSliceOp, but the recursion
1450     // is bounded as the rank is strictly decreasing.
1451     setHasBoundedRewriteRecursion();
1452   }
1453 
1454   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1455                                 PatternRewriter &rewriter) const override {
1456     auto dstType = op.getResult().getType().cast<VectorType>();
1457 
1458     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1459 
1460     int64_t offset =
1461         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1462     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1463     int64_t stride =
1464         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1465 
1466     auto loc = op.getLoc();
1467     auto elemType = dstType.getElementType();
1468     assert(elemType.isSignlessIntOrIndexOrFloat());
1469 
1470     // Single offset can be more efficiently shuffled.
1471     if (op.offsets().getValue().size() == 1) {
1472       SmallVector<int64_t, 4> offsets;
1473       offsets.reserve(size);
1474       for (int64_t off = offset, e = offset + size * stride; off < e;
1475            off += stride)
1476         offsets.push_back(off);
1477       rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(),
1478                                              op.vector(),
1479                                              rewriter.getI64ArrayAttr(offsets));
1480       return success();
1481     }
1482 
1483     // Extract/insert on a lower ranked extract strided slice op.
1484     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1485                                              rewriter.getZeroAttr(elemType));
1486     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1487     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1488          off += stride, ++idx) {
1489       Value one = extractOne(rewriter, loc, op.vector(), off);
1490       Value extracted = rewriter.create<ExtractStridedSliceOp>(
1491           loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1),
1492           getI64SubArray(op.sizes(), /* dropFront=*/1),
1493           getI64SubArray(op.strides(), /* dropFront=*/1));
1494       res = insertOne(rewriter, loc, extracted, res, idx);
1495     }
1496     rewriter.replaceOp(op, res);
1497     return success();
1498   }
1499 };
1500 
1501 } // namespace
1502 
1503 /// Populate the given list with patterns that convert from Vector to LLVM.
1504 void mlir::populateVectorToLLVMConversionPatterns(
1505     LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
1506     bool reassociateFPReductions, bool enableIndexOptimizations) {
1507   MLIRContext *ctx = converter.getDialect()->getContext();
1508   // clang-format off
1509   patterns.insert<VectorFMAOpNDRewritePattern,
1510                   VectorInsertStridedSliceOpDifferentRankRewritePattern,
1511                   VectorInsertStridedSliceOpSameRankRewritePattern,
1512                   VectorExtractStridedSliceOpConversion>(ctx);
1513   patterns.insert<VectorReductionOpConversion>(
1514       converter, reassociateFPReductions);
1515   patterns.insert<VectorCreateMaskOpConversion,
1516                   VectorTransferConversion<TransferReadOp>,
1517                   VectorTransferConversion<TransferWriteOp>>(
1518       converter, enableIndexOptimizations);
1519   patterns
1520       .insert<VectorShuffleOpConversion,
1521               VectorExtractElementOpConversion,
1522               VectorExtractOpConversion,
1523               VectorFMAOp1DConversion,
1524               VectorInsertElementOpConversion,
1525               VectorInsertOpConversion,
1526               VectorPrintOpConversion,
1527               VectorTypeCastOpConversion,
1528               VectorMaskedLoadOpConversion,
1529               VectorMaskedStoreOpConversion,
1530               VectorGatherOpConversion,
1531               VectorScatterOpConversion,
1532               VectorExpandLoadOpConversion,
1533               VectorCompressStoreOpConversion>(converter);
1534   // clang-format on
1535 }
1536 
1537 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1538     LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1539   patterns.insert<VectorMatmulOpConversion>(converter);
1540   patterns.insert<VectorFlatTransposeOpConversion>(converter);
1541 }
1542