xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision 060c9dd1cc467cbeb6cf1c29dd44d07f562606b4)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "../PassDetail.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
13 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
15 #include "mlir/Dialect/StandardOps/IR/Ops.h"
16 #include "mlir/Dialect/Vector/VectorOps.h"
17 #include "mlir/IR/AffineMap.h"
18 #include "mlir/IR/Attributes.h"
19 #include "mlir/IR/Builders.h"
20 #include "mlir/IR/MLIRContext.h"
21 #include "mlir/IR/Module.h"
22 #include "mlir/IR/Operation.h"
23 #include "mlir/IR/PatternMatch.h"
24 #include "mlir/IR/StandardTypes.h"
25 #include "mlir/IR/Types.h"
26 #include "mlir/Target/LLVMIR/TypeTranslation.h"
27 #include "mlir/Transforms/DialectConversion.h"
28 #include "mlir/Transforms/Passes.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Allocator.h"
33 #include "llvm/Support/ErrorHandling.h"
34 
35 using namespace mlir;
36 using namespace mlir::vector;
37 
38 // Helper to reduce vector type by one rank at front.
39 static VectorType reducedVectorTypeFront(VectorType tp) {
40   assert((tp.getRank() > 1) && "unlowerable vector type");
41   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
42 }
43 
44 // Helper to reduce vector type by *all* but one rank at back.
45 static VectorType reducedVectorTypeBack(VectorType tp) {
46   assert((tp.getRank() > 1) && "unlowerable vector type");
47   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
48 }
49 
50 // Helper that picks the proper sequence for inserting.
51 static Value insertOne(ConversionPatternRewriter &rewriter,
52                        LLVMTypeConverter &typeConverter, Location loc,
53                        Value val1, Value val2, Type llvmType, int64_t rank,
54                        int64_t pos) {
55   if (rank == 1) {
56     auto idxType = rewriter.getIndexType();
57     auto constant = rewriter.create<LLVM::ConstantOp>(
58         loc, typeConverter.convertType(idxType),
59         rewriter.getIntegerAttr(idxType, pos));
60     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
61                                                   constant);
62   }
63   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
64                                               rewriter.getI64ArrayAttr(pos));
65 }
66 
67 // Helper that picks the proper sequence for inserting.
68 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
69                        Value into, int64_t offset) {
70   auto vectorType = into.getType().cast<VectorType>();
71   if (vectorType.getRank() > 1)
72     return rewriter.create<InsertOp>(loc, from, into, offset);
73   return rewriter.create<vector::InsertElementOp>(
74       loc, vectorType, from, into,
75       rewriter.create<ConstantIndexOp>(loc, offset));
76 }
77 
78 // Helper that picks the proper sequence for extracting.
79 static Value extractOne(ConversionPatternRewriter &rewriter,
80                         LLVMTypeConverter &typeConverter, Location loc,
81                         Value val, Type llvmType, int64_t rank, int64_t pos) {
82   if (rank == 1) {
83     auto idxType = rewriter.getIndexType();
84     auto constant = rewriter.create<LLVM::ConstantOp>(
85         loc, typeConverter.convertType(idxType),
86         rewriter.getIntegerAttr(idxType, pos));
87     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
88                                                    constant);
89   }
90   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
91                                                rewriter.getI64ArrayAttr(pos));
92 }
93 
94 // Helper that picks the proper sequence for extracting.
95 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
96                         int64_t offset) {
97   auto vectorType = vector.getType().cast<VectorType>();
98   if (vectorType.getRank() > 1)
99     return rewriter.create<ExtractOp>(loc, vector, offset);
100   return rewriter.create<vector::ExtractElementOp>(
101       loc, vectorType.getElementType(), vector,
102       rewriter.create<ConstantIndexOp>(loc, offset));
103 }
104 
105 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
106 // TODO: Better support for attribute subtype forwarding + slicing.
107 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
108                                               unsigned dropFront = 0,
109                                               unsigned dropBack = 0) {
110   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
111   auto range = arrayAttr.getAsRange<IntegerAttr>();
112   SmallVector<int64_t, 4> res;
113   res.reserve(arrayAttr.size() - dropFront - dropBack);
114   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
115        it != eit; ++it)
116     res.push_back((*it).getValue().getSExtValue());
117   return res;
118 }
119 
120 // Helper that returns a vector comparison that constructs a mask:
121 //     mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b]
122 //
123 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative,
124 //       much more compact, IR for this operation, but LLVM eventually
125 //       generates more elaborate instructions for this intrinsic since it
126 //       is very conservative on the boundary conditions.
127 static Value buildVectorComparison(ConversionPatternRewriter &rewriter,
128                                    Operation *op, bool enableIndexOptimizations,
129                                    int64_t dim, Value b, Value *off = nullptr) {
130   auto loc = op->getLoc();
131   // If we can assume all indices fit in 32-bit, we perform the vector
132   // comparison in 32-bit to get a higher degree of SIMD parallelism.
133   // Otherwise we perform the vector comparison using 64-bit indices.
134   Value indices;
135   Type idxType;
136   if (enableIndexOptimizations) {
137     SmallVector<int32_t, 4> values(dim);
138     for (int64_t d = 0; d < dim; d++)
139       values[d] = d;
140     indices =
141         rewriter.create<ConstantOp>(loc, rewriter.getI32VectorAttr(values));
142     idxType = rewriter.getI32Type();
143   } else {
144     SmallVector<int64_t, 4> values(dim);
145     for (int64_t d = 0; d < dim; d++)
146       values[d] = d;
147     indices =
148         rewriter.create<ConstantOp>(loc, rewriter.getI64VectorAttr(values));
149     idxType = rewriter.getI64Type();
150   }
151   // Add in an offset if requested.
152   if (off) {
153     Value o = rewriter.create<IndexCastOp>(loc, idxType, *off);
154     Value ov = rewriter.create<SplatOp>(loc, indices.getType(), o);
155     indices = rewriter.create<AddIOp>(loc, ov, indices);
156   }
157   // Construct the vector comparison.
158   Value bound = rewriter.create<IndexCastOp>(loc, idxType, b);
159   Value bounds = rewriter.create<SplatOp>(loc, indices.getType(), bound);
160   return rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, indices, bounds);
161 }
162 
163 // Helper that returns data layout alignment of an operation with memref.
164 template <typename T>
165 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, T op,
166                                  unsigned &align) {
167   Type elementTy =
168       typeConverter.convertType(op.getMemRefType().getElementType());
169   if (!elementTy)
170     return failure();
171 
172   // TODO: this should use the MLIR data layout when it becomes available and
173   // stop depending on translation.
174   llvm::LLVMContext llvmContext;
175   align = LLVM::TypeToLLVMIRTranslator(llvmContext)
176               .getPreferredAlignment(elementTy.cast<LLVM::LLVMType>(),
177                                      typeConverter.getDataLayout());
178   return success();
179 }
180 
181 // Helper that returns the base address of a memref.
182 static LogicalResult getBase(ConversionPatternRewriter &rewriter, Location loc,
183                              Value memref, MemRefType memRefType, Value &base) {
184   // Inspect stride and offset structure.
185   //
186   // TODO: flat memory only for now, generalize
187   //
188   int64_t offset;
189   SmallVector<int64_t, 4> strides;
190   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
191   if (failed(successStrides) || strides.size() != 1 || strides[0] != 1 ||
192       offset != 0 || memRefType.getMemorySpace() != 0)
193     return failure();
194   base = MemRefDescriptor(memref).alignedPtr(rewriter, loc);
195   return success();
196 }
197 
198 // Helper that returns a pointer given a memref base.
199 static LogicalResult getBasePtr(ConversionPatternRewriter &rewriter,
200                                 Location loc, Value memref,
201                                 MemRefType memRefType, Value &ptr) {
202   Value base;
203   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
204     return failure();
205   auto pType = MemRefDescriptor(memref).getElementType();
206   ptr = rewriter.create<LLVM::GEPOp>(loc, pType, base);
207   return success();
208 }
209 
210 // Helper that returns a bit-casted pointer given a memref base.
211 static LogicalResult getBasePtr(ConversionPatternRewriter &rewriter,
212                                 Location loc, Value memref,
213                                 MemRefType memRefType, Type type, Value &ptr) {
214   Value base;
215   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
216     return failure();
217   auto pType = type.template cast<LLVM::LLVMType>().getPointerTo();
218   base = rewriter.create<LLVM::BitcastOp>(loc, pType, base);
219   ptr = rewriter.create<LLVM::GEPOp>(loc, pType, base);
220   return success();
221 }
222 
223 // Helper that returns vector of pointers given a memref base and an index
224 // vector.
225 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
226                                     Location loc, Value memref, Value indices,
227                                     MemRefType memRefType, VectorType vType,
228                                     Type iType, Value &ptrs) {
229   Value base;
230   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
231     return failure();
232   auto pType = MemRefDescriptor(memref).getElementType();
233   auto ptrsType = LLVM::LLVMType::getVectorTy(pType, vType.getDimSize(0));
234   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, indices);
235   return success();
236 }
237 
238 static LogicalResult
239 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
240                                  LLVMTypeConverter &typeConverter, Location loc,
241                                  TransferReadOp xferOp,
242                                  ArrayRef<Value> operands, Value dataPtr) {
243   unsigned align;
244   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
245     return failure();
246   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
247   return success();
248 }
249 
250 static LogicalResult
251 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
252                             LLVMTypeConverter &typeConverter, Location loc,
253                             TransferReadOp xferOp, ArrayRef<Value> operands,
254                             Value dataPtr, Value mask) {
255   auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
256   VectorType fillType = xferOp.getVectorType();
257   Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding());
258   fill = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(fillType), fill);
259 
260   Type vecTy = typeConverter.convertType(xferOp.getVectorType());
261   if (!vecTy)
262     return failure();
263 
264   unsigned align;
265   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
266     return failure();
267 
268   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
269       xferOp, vecTy, dataPtr, mask, ValueRange{fill},
270       rewriter.getI32IntegerAttr(align));
271   return success();
272 }
273 
274 static LogicalResult
275 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
276                                  LLVMTypeConverter &typeConverter, Location loc,
277                                  TransferWriteOp xferOp,
278                                  ArrayRef<Value> operands, Value dataPtr) {
279   unsigned align;
280   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
281     return failure();
282   auto adaptor = TransferWriteOpAdaptor(operands);
283   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
284                                              align);
285   return success();
286 }
287 
288 static LogicalResult
289 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
290                             LLVMTypeConverter &typeConverter, Location loc,
291                             TransferWriteOp xferOp, ArrayRef<Value> operands,
292                             Value dataPtr, Value mask) {
293   unsigned align;
294   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
295     return failure();
296 
297   auto adaptor = TransferWriteOpAdaptor(operands);
298   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
299       xferOp, adaptor.vector(), dataPtr, mask,
300       rewriter.getI32IntegerAttr(align));
301   return success();
302 }
303 
304 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
305                                                   ArrayRef<Value> operands) {
306   return TransferReadOpAdaptor(operands);
307 }
308 
309 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
310                                                    ArrayRef<Value> operands) {
311   return TransferWriteOpAdaptor(operands);
312 }
313 
314 namespace {
315 
316 /// Conversion pattern for a vector.matrix_multiply.
317 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
318 class VectorMatmulOpConversion : public ConvertToLLVMPattern {
319 public:
320   explicit VectorMatmulOpConversion(MLIRContext *context,
321                                     LLVMTypeConverter &typeConverter)
322       : ConvertToLLVMPattern(vector::MatmulOp::getOperationName(), context,
323                              typeConverter) {}
324 
325   LogicalResult
326   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
327                   ConversionPatternRewriter &rewriter) const override {
328     auto matmulOp = cast<vector::MatmulOp>(op);
329     auto adaptor = vector::MatmulOpAdaptor(operands);
330     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
331         op, typeConverter.convertType(matmulOp.res().getType()), adaptor.lhs(),
332         adaptor.rhs(), matmulOp.lhs_rows(), matmulOp.lhs_columns(),
333         matmulOp.rhs_columns());
334     return success();
335   }
336 };
337 
338 /// Conversion pattern for a vector.flat_transpose.
339 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
340 class VectorFlatTransposeOpConversion : public ConvertToLLVMPattern {
341 public:
342   explicit VectorFlatTransposeOpConversion(MLIRContext *context,
343                                            LLVMTypeConverter &typeConverter)
344       : ConvertToLLVMPattern(vector::FlatTransposeOp::getOperationName(),
345                              context, typeConverter) {}
346 
347   LogicalResult
348   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
349                   ConversionPatternRewriter &rewriter) const override {
350     auto transOp = cast<vector::FlatTransposeOp>(op);
351     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
352     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
353         transOp, typeConverter.convertType(transOp.res().getType()),
354         adaptor.matrix(), transOp.rows(), transOp.columns());
355     return success();
356   }
357 };
358 
359 /// Conversion pattern for a vector.maskedload.
360 class VectorMaskedLoadOpConversion : public ConvertToLLVMPattern {
361 public:
362   explicit VectorMaskedLoadOpConversion(MLIRContext *context,
363                                         LLVMTypeConverter &typeConverter)
364       : ConvertToLLVMPattern(vector::MaskedLoadOp::getOperationName(), context,
365                              typeConverter) {}
366 
367   LogicalResult
368   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
369                   ConversionPatternRewriter &rewriter) const override {
370     auto loc = op->getLoc();
371     auto load = cast<vector::MaskedLoadOp>(op);
372     auto adaptor = vector::MaskedLoadOpAdaptor(operands);
373 
374     // Resolve alignment.
375     unsigned align;
376     if (failed(getMemRefAlignment(typeConverter, load, align)))
377       return failure();
378 
379     auto vtype = typeConverter.convertType(load.getResultVectorType());
380     Value ptr;
381     if (failed(getBasePtr(rewriter, loc, adaptor.base(), load.getMemRefType(),
382                           vtype, ptr)))
383       return failure();
384 
385     rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
386         load, vtype, ptr, adaptor.mask(), adaptor.pass_thru(),
387         rewriter.getI32IntegerAttr(align));
388     return success();
389   }
390 };
391 
392 /// Conversion pattern for a vector.maskedstore.
393 class VectorMaskedStoreOpConversion : public ConvertToLLVMPattern {
394 public:
395   explicit VectorMaskedStoreOpConversion(MLIRContext *context,
396                                          LLVMTypeConverter &typeConverter)
397       : ConvertToLLVMPattern(vector::MaskedStoreOp::getOperationName(), context,
398                              typeConverter) {}
399 
400   LogicalResult
401   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
402                   ConversionPatternRewriter &rewriter) const override {
403     auto loc = op->getLoc();
404     auto store = cast<vector::MaskedStoreOp>(op);
405     auto adaptor = vector::MaskedStoreOpAdaptor(operands);
406 
407     // Resolve alignment.
408     unsigned align;
409     if (failed(getMemRefAlignment(typeConverter, store, align)))
410       return failure();
411 
412     auto vtype = typeConverter.convertType(store.getValueVectorType());
413     Value ptr;
414     if (failed(getBasePtr(rewriter, loc, adaptor.base(), store.getMemRefType(),
415                           vtype, ptr)))
416       return failure();
417 
418     rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
419         store, adaptor.value(), ptr, adaptor.mask(),
420         rewriter.getI32IntegerAttr(align));
421     return success();
422   }
423 };
424 
425 /// Conversion pattern for a vector.gather.
426 class VectorGatherOpConversion : public ConvertToLLVMPattern {
427 public:
428   explicit VectorGatherOpConversion(MLIRContext *context,
429                                     LLVMTypeConverter &typeConverter)
430       : ConvertToLLVMPattern(vector::GatherOp::getOperationName(), context,
431                              typeConverter) {}
432 
433   LogicalResult
434   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
435                   ConversionPatternRewriter &rewriter) const override {
436     auto loc = op->getLoc();
437     auto gather = cast<vector::GatherOp>(op);
438     auto adaptor = vector::GatherOpAdaptor(operands);
439 
440     // Resolve alignment.
441     unsigned align;
442     if (failed(getMemRefAlignment(typeConverter, gather, align)))
443       return failure();
444 
445     // Get index ptrs.
446     VectorType vType = gather.getResultVectorType();
447     Type iType = gather.getIndicesVectorType().getElementType();
448     Value ptrs;
449     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
450                               gather.getMemRefType(), vType, iType, ptrs)))
451       return failure();
452 
453     // Replace with the gather intrinsic.
454     ValueRange v = (llvm::size(adaptor.pass_thru()) == 0) ? ValueRange({})
455                                                           : adaptor.pass_thru();
456     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
457         gather, typeConverter.convertType(vType), ptrs, adaptor.mask(), v,
458         rewriter.getI32IntegerAttr(align));
459     return success();
460   }
461 };
462 
463 /// Conversion pattern for a vector.scatter.
464 class VectorScatterOpConversion : public ConvertToLLVMPattern {
465 public:
466   explicit VectorScatterOpConversion(MLIRContext *context,
467                                      LLVMTypeConverter &typeConverter)
468       : ConvertToLLVMPattern(vector::ScatterOp::getOperationName(), context,
469                              typeConverter) {}
470 
471   LogicalResult
472   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
473                   ConversionPatternRewriter &rewriter) const override {
474     auto loc = op->getLoc();
475     auto scatter = cast<vector::ScatterOp>(op);
476     auto adaptor = vector::ScatterOpAdaptor(operands);
477 
478     // Resolve alignment.
479     unsigned align;
480     if (failed(getMemRefAlignment(typeConverter, scatter, align)))
481       return failure();
482 
483     // Get index ptrs.
484     VectorType vType = scatter.getValueVectorType();
485     Type iType = scatter.getIndicesVectorType().getElementType();
486     Value ptrs;
487     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
488                               scatter.getMemRefType(), vType, iType, ptrs)))
489       return failure();
490 
491     // Replace with the scatter intrinsic.
492     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
493         scatter, adaptor.value(), ptrs, adaptor.mask(),
494         rewriter.getI32IntegerAttr(align));
495     return success();
496   }
497 };
498 
499 /// Conversion pattern for a vector.expandload.
500 class VectorExpandLoadOpConversion : public ConvertToLLVMPattern {
501 public:
502   explicit VectorExpandLoadOpConversion(MLIRContext *context,
503                                         LLVMTypeConverter &typeConverter)
504       : ConvertToLLVMPattern(vector::ExpandLoadOp::getOperationName(), context,
505                              typeConverter) {}
506 
507   LogicalResult
508   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
509                   ConversionPatternRewriter &rewriter) const override {
510     auto loc = op->getLoc();
511     auto expand = cast<vector::ExpandLoadOp>(op);
512     auto adaptor = vector::ExpandLoadOpAdaptor(operands);
513 
514     Value ptr;
515     if (failed(getBasePtr(rewriter, loc, adaptor.base(), expand.getMemRefType(),
516                           ptr)))
517       return failure();
518 
519     auto vType = expand.getResultVectorType();
520     rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
521         op, typeConverter.convertType(vType), ptr, adaptor.mask(),
522         adaptor.pass_thru());
523     return success();
524   }
525 };
526 
527 /// Conversion pattern for a vector.compressstore.
528 class VectorCompressStoreOpConversion : public ConvertToLLVMPattern {
529 public:
530   explicit VectorCompressStoreOpConversion(MLIRContext *context,
531                                            LLVMTypeConverter &typeConverter)
532       : ConvertToLLVMPattern(vector::CompressStoreOp::getOperationName(),
533                              context, typeConverter) {}
534 
535   LogicalResult
536   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
537                   ConversionPatternRewriter &rewriter) const override {
538     auto loc = op->getLoc();
539     auto compress = cast<vector::CompressStoreOp>(op);
540     auto adaptor = vector::CompressStoreOpAdaptor(operands);
541 
542     Value ptr;
543     if (failed(getBasePtr(rewriter, loc, adaptor.base(),
544                           compress.getMemRefType(), ptr)))
545       return failure();
546 
547     rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
548         op, adaptor.value(), ptr, adaptor.mask());
549     return success();
550   }
551 };
552 
553 /// Conversion pattern for all vector reductions.
554 class VectorReductionOpConversion : public ConvertToLLVMPattern {
555 public:
556   explicit VectorReductionOpConversion(MLIRContext *context,
557                                        LLVMTypeConverter &typeConverter,
558                                        bool reassociateFPRed)
559       : ConvertToLLVMPattern(vector::ReductionOp::getOperationName(), context,
560                              typeConverter),
561         reassociateFPReductions(reassociateFPRed) {}
562 
563   LogicalResult
564   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
565                   ConversionPatternRewriter &rewriter) const override {
566     auto reductionOp = cast<vector::ReductionOp>(op);
567     auto kind = reductionOp.kind();
568     Type eltType = reductionOp.dest().getType();
569     Type llvmType = typeConverter.convertType(eltType);
570     if (eltType.isSignlessInteger(32) || eltType.isSignlessInteger(64)) {
571       // Integer reductions: add/mul/min/max/and/or/xor.
572       if (kind == "add")
573         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_add>(
574             op, llvmType, operands[0]);
575       else if (kind == "mul")
576         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_mul>(
577             op, llvmType, operands[0]);
578       else if (kind == "min")
579         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smin>(
580             op, llvmType, operands[0]);
581       else if (kind == "max")
582         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smax>(
583             op, llvmType, operands[0]);
584       else if (kind == "and")
585         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_and>(
586             op, llvmType, operands[0]);
587       else if (kind == "or")
588         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_or>(
589             op, llvmType, operands[0]);
590       else if (kind == "xor")
591         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_xor>(
592             op, llvmType, operands[0]);
593       else
594         return failure();
595       return success();
596 
597     } else if (eltType.isF32() || eltType.isF64()) {
598       // Floating-point reductions: add/mul/min/max
599       if (kind == "add") {
600         // Optional accumulator (or zero).
601         Value acc = operands.size() > 1 ? operands[1]
602                                         : rewriter.create<LLVM::ConstantOp>(
603                                               op->getLoc(), llvmType,
604                                               rewriter.getZeroAttr(eltType));
605         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fadd>(
606             op, llvmType, acc, operands[0],
607             rewriter.getBoolAttr(reassociateFPReductions));
608       } else if (kind == "mul") {
609         // Optional accumulator (or one).
610         Value acc = operands.size() > 1
611                         ? operands[1]
612                         : rewriter.create<LLVM::ConstantOp>(
613                               op->getLoc(), llvmType,
614                               rewriter.getFloatAttr(eltType, 1.0));
615         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fmul>(
616             op, llvmType, acc, operands[0],
617             rewriter.getBoolAttr(reassociateFPReductions));
618       } else if (kind == "min")
619         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmin>(
620             op, llvmType, operands[0]);
621       else if (kind == "max")
622         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmax>(
623             op, llvmType, operands[0]);
624       else
625         return failure();
626       return success();
627     }
628     return failure();
629   }
630 
631 private:
632   const bool reassociateFPReductions;
633 };
634 
635 /// Conversion pattern for a vector.create_mask (1-D only).
636 class VectorCreateMaskOpConversion : public ConvertToLLVMPattern {
637 public:
638   explicit VectorCreateMaskOpConversion(MLIRContext *context,
639                                         LLVMTypeConverter &typeConverter,
640                                         bool enableIndexOpt)
641       : ConvertToLLVMPattern(vector::CreateMaskOp::getOperationName(), context,
642                              typeConverter),
643         enableIndexOptimizations(enableIndexOpt) {}
644 
645   LogicalResult
646   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
647                   ConversionPatternRewriter &rewriter) const override {
648     auto dstType = op->getResult(0).getType().cast<VectorType>();
649     int64_t rank = dstType.getRank();
650     if (rank == 1) {
651       rewriter.replaceOp(
652           op, buildVectorComparison(rewriter, op, enableIndexOptimizations,
653                                     dstType.getDimSize(0), operands[0]));
654       return success();
655     }
656     return failure();
657   }
658 
659 private:
660   const bool enableIndexOptimizations;
661 };
662 
663 class VectorShuffleOpConversion : public ConvertToLLVMPattern {
664 public:
665   explicit VectorShuffleOpConversion(MLIRContext *context,
666                                      LLVMTypeConverter &typeConverter)
667       : ConvertToLLVMPattern(vector::ShuffleOp::getOperationName(), context,
668                              typeConverter) {}
669 
670   LogicalResult
671   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
672                   ConversionPatternRewriter &rewriter) const override {
673     auto loc = op->getLoc();
674     auto adaptor = vector::ShuffleOpAdaptor(operands);
675     auto shuffleOp = cast<vector::ShuffleOp>(op);
676     auto v1Type = shuffleOp.getV1VectorType();
677     auto v2Type = shuffleOp.getV2VectorType();
678     auto vectorType = shuffleOp.getVectorType();
679     Type llvmType = typeConverter.convertType(vectorType);
680     auto maskArrayAttr = shuffleOp.mask();
681 
682     // Bail if result type cannot be lowered.
683     if (!llvmType)
684       return failure();
685 
686     // Get rank and dimension sizes.
687     int64_t rank = vectorType.getRank();
688     assert(v1Type.getRank() == rank);
689     assert(v2Type.getRank() == rank);
690     int64_t v1Dim = v1Type.getDimSize(0);
691 
692     // For rank 1, where both operands have *exactly* the same vector type,
693     // there is direct shuffle support in LLVM. Use it!
694     if (rank == 1 && v1Type == v2Type) {
695       Value shuffle = rewriter.create<LLVM::ShuffleVectorOp>(
696           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
697       rewriter.replaceOp(op, shuffle);
698       return success();
699     }
700 
701     // For all other cases, insert the individual values individually.
702     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
703     int64_t insPos = 0;
704     for (auto en : llvm::enumerate(maskArrayAttr)) {
705       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
706       Value value = adaptor.v1();
707       if (extPos >= v1Dim) {
708         extPos -= v1Dim;
709         value = adaptor.v2();
710       }
711       Value extract = extractOne(rewriter, typeConverter, loc, value, llvmType,
712                                  rank, extPos);
713       insert = insertOne(rewriter, typeConverter, loc, insert, extract,
714                          llvmType, rank, insPos++);
715     }
716     rewriter.replaceOp(op, insert);
717     return success();
718   }
719 };
720 
721 class VectorExtractElementOpConversion : public ConvertToLLVMPattern {
722 public:
723   explicit VectorExtractElementOpConversion(MLIRContext *context,
724                                             LLVMTypeConverter &typeConverter)
725       : ConvertToLLVMPattern(vector::ExtractElementOp::getOperationName(),
726                              context, typeConverter) {}
727 
728   LogicalResult
729   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
730                   ConversionPatternRewriter &rewriter) const override {
731     auto adaptor = vector::ExtractElementOpAdaptor(operands);
732     auto extractEltOp = cast<vector::ExtractElementOp>(op);
733     auto vectorType = extractEltOp.getVectorType();
734     auto llvmType = typeConverter.convertType(vectorType.getElementType());
735 
736     // Bail if result type cannot be lowered.
737     if (!llvmType)
738       return failure();
739 
740     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
741         op, llvmType, adaptor.vector(), adaptor.position());
742     return success();
743   }
744 };
745 
746 class VectorExtractOpConversion : public ConvertToLLVMPattern {
747 public:
748   explicit VectorExtractOpConversion(MLIRContext *context,
749                                      LLVMTypeConverter &typeConverter)
750       : ConvertToLLVMPattern(vector::ExtractOp::getOperationName(), context,
751                              typeConverter) {}
752 
753   LogicalResult
754   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
755                   ConversionPatternRewriter &rewriter) const override {
756     auto loc = op->getLoc();
757     auto adaptor = vector::ExtractOpAdaptor(operands);
758     auto extractOp = cast<vector::ExtractOp>(op);
759     auto vectorType = extractOp.getVectorType();
760     auto resultType = extractOp.getResult().getType();
761     auto llvmResultType = typeConverter.convertType(resultType);
762     auto positionArrayAttr = extractOp.position();
763 
764     // Bail if result type cannot be lowered.
765     if (!llvmResultType)
766       return failure();
767 
768     // One-shot extraction of vector from array (only requires extractvalue).
769     if (resultType.isa<VectorType>()) {
770       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
771           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
772       rewriter.replaceOp(op, extracted);
773       return success();
774     }
775 
776     // Potential extraction of 1-D vector from array.
777     auto *context = op->getContext();
778     Value extracted = adaptor.vector();
779     auto positionAttrs = positionArrayAttr.getValue();
780     if (positionAttrs.size() > 1) {
781       auto oneDVectorType = reducedVectorTypeBack(vectorType);
782       auto nMinusOnePositionAttrs =
783           ArrayAttr::get(positionAttrs.drop_back(), context);
784       extracted = rewriter.create<LLVM::ExtractValueOp>(
785           loc, typeConverter.convertType(oneDVectorType), extracted,
786           nMinusOnePositionAttrs);
787     }
788 
789     // Remaining extraction of element from 1-D LLVM vector
790     auto position = positionAttrs.back().cast<IntegerAttr>();
791     auto i64Type = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
792     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
793     extracted =
794         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
795     rewriter.replaceOp(op, extracted);
796 
797     return success();
798   }
799 };
800 
801 /// Conversion pattern that turns a vector.fma on a 1-D vector
802 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
803 /// This does not match vectors of n >= 2 rank.
804 ///
805 /// Example:
806 /// ```
807 ///  vector.fma %a, %a, %a : vector<8xf32>
808 /// ```
809 /// is converted to:
810 /// ```
811 ///  llvm.intr.fmuladd %va, %va, %va:
812 ///    (!llvm<"<8 x float>">, !llvm<"<8 x float>">, !llvm<"<8 x float>">)
813 ///    -> !llvm<"<8 x float>">
814 /// ```
815 class VectorFMAOp1DConversion : public ConvertToLLVMPattern {
816 public:
817   explicit VectorFMAOp1DConversion(MLIRContext *context,
818                                    LLVMTypeConverter &typeConverter)
819       : ConvertToLLVMPattern(vector::FMAOp::getOperationName(), context,
820                              typeConverter) {}
821 
822   LogicalResult
823   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
824                   ConversionPatternRewriter &rewriter) const override {
825     auto adaptor = vector::FMAOpAdaptor(operands);
826     vector::FMAOp fmaOp = cast<vector::FMAOp>(op);
827     VectorType vType = fmaOp.getVectorType();
828     if (vType.getRank() != 1)
829       return failure();
830     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(op, adaptor.lhs(),
831                                                  adaptor.rhs(), adaptor.acc());
832     return success();
833   }
834 };
835 
836 class VectorInsertElementOpConversion : public ConvertToLLVMPattern {
837 public:
838   explicit VectorInsertElementOpConversion(MLIRContext *context,
839                                            LLVMTypeConverter &typeConverter)
840       : ConvertToLLVMPattern(vector::InsertElementOp::getOperationName(),
841                              context, typeConverter) {}
842 
843   LogicalResult
844   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
845                   ConversionPatternRewriter &rewriter) const override {
846     auto adaptor = vector::InsertElementOpAdaptor(operands);
847     auto insertEltOp = cast<vector::InsertElementOp>(op);
848     auto vectorType = insertEltOp.getDestVectorType();
849     auto llvmType = typeConverter.convertType(vectorType);
850 
851     // Bail if result type cannot be lowered.
852     if (!llvmType)
853       return failure();
854 
855     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
856         op, llvmType, adaptor.dest(), adaptor.source(), adaptor.position());
857     return success();
858   }
859 };
860 
861 class VectorInsertOpConversion : public ConvertToLLVMPattern {
862 public:
863   explicit VectorInsertOpConversion(MLIRContext *context,
864                                     LLVMTypeConverter &typeConverter)
865       : ConvertToLLVMPattern(vector::InsertOp::getOperationName(), context,
866                              typeConverter) {}
867 
868   LogicalResult
869   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
870                   ConversionPatternRewriter &rewriter) const override {
871     auto loc = op->getLoc();
872     auto adaptor = vector::InsertOpAdaptor(operands);
873     auto insertOp = cast<vector::InsertOp>(op);
874     auto sourceType = insertOp.getSourceType();
875     auto destVectorType = insertOp.getDestVectorType();
876     auto llvmResultType = typeConverter.convertType(destVectorType);
877     auto positionArrayAttr = insertOp.position();
878 
879     // Bail if result type cannot be lowered.
880     if (!llvmResultType)
881       return failure();
882 
883     // One-shot insertion of a vector into an array (only requires insertvalue).
884     if (sourceType.isa<VectorType>()) {
885       Value inserted = rewriter.create<LLVM::InsertValueOp>(
886           loc, llvmResultType, adaptor.dest(), adaptor.source(),
887           positionArrayAttr);
888       rewriter.replaceOp(op, inserted);
889       return success();
890     }
891 
892     // Potential extraction of 1-D vector from array.
893     auto *context = op->getContext();
894     Value extracted = adaptor.dest();
895     auto positionAttrs = positionArrayAttr.getValue();
896     auto position = positionAttrs.back().cast<IntegerAttr>();
897     auto oneDVectorType = destVectorType;
898     if (positionAttrs.size() > 1) {
899       oneDVectorType = reducedVectorTypeBack(destVectorType);
900       auto nMinusOnePositionAttrs =
901           ArrayAttr::get(positionAttrs.drop_back(), context);
902       extracted = rewriter.create<LLVM::ExtractValueOp>(
903           loc, typeConverter.convertType(oneDVectorType), extracted,
904           nMinusOnePositionAttrs);
905     }
906 
907     // Insertion of an element into a 1-D LLVM vector.
908     auto i64Type = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
909     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
910     Value inserted = rewriter.create<LLVM::InsertElementOp>(
911         loc, typeConverter.convertType(oneDVectorType), extracted,
912         adaptor.source(), constant);
913 
914     // Potential insertion of resulting 1-D vector into array.
915     if (positionAttrs.size() > 1) {
916       auto nMinusOnePositionAttrs =
917           ArrayAttr::get(positionAttrs.drop_back(), context);
918       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
919                                                       adaptor.dest(), inserted,
920                                                       nMinusOnePositionAttrs);
921     }
922 
923     rewriter.replaceOp(op, inserted);
924     return success();
925   }
926 };
927 
928 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
929 ///
930 /// Example:
931 /// ```
932 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
933 /// ```
934 /// is rewritten into:
935 /// ```
936 ///  %r = splat %f0: vector<2x4xf32>
937 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
938 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
939 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
940 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
941 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
942 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
943 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
944 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
945 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
946 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
947 ///  // %r3 holds the final value.
948 /// ```
949 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
950 public:
951   using OpRewritePattern<FMAOp>::OpRewritePattern;
952 
953   LogicalResult matchAndRewrite(FMAOp op,
954                                 PatternRewriter &rewriter) const override {
955     auto vType = op.getVectorType();
956     if (vType.getRank() < 2)
957       return failure();
958 
959     auto loc = op.getLoc();
960     auto elemType = vType.getElementType();
961     Value zero = rewriter.create<ConstantOp>(loc, elemType,
962                                              rewriter.getZeroAttr(elemType));
963     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
964     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
965       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
966       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
967       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
968       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
969       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
970     }
971     rewriter.replaceOp(op, desc);
972     return success();
973   }
974 };
975 
976 // When ranks are different, InsertStridedSlice needs to extract a properly
977 // ranked vector from the destination vector into which to insert. This pattern
978 // only takes care of this part and forwards the rest of the conversion to
979 // another pattern that converts InsertStridedSlice for operands of the same
980 // rank.
981 //
982 // RewritePattern for InsertStridedSliceOp where source and destination vectors
983 // have different ranks. In this case:
984 //   1. the proper subvector is extracted from the destination vector
985 //   2. a new InsertStridedSlice op is created to insert the source in the
986 //   destination subvector
987 //   3. the destination subvector is inserted back in the proper place
988 //   4. the op is replaced by the result of step 3.
989 // The new InsertStridedSlice from step 2. will be picked up by a
990 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
991 class VectorInsertStridedSliceOpDifferentRankRewritePattern
992     : public OpRewritePattern<InsertStridedSliceOp> {
993 public:
994   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
995 
996   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
997                                 PatternRewriter &rewriter) const override {
998     auto srcType = op.getSourceVectorType();
999     auto dstType = op.getDestVectorType();
1000 
1001     if (op.offsets().getValue().empty())
1002       return failure();
1003 
1004     auto loc = op.getLoc();
1005     int64_t rankDiff = dstType.getRank() - srcType.getRank();
1006     assert(rankDiff >= 0);
1007     if (rankDiff == 0)
1008       return failure();
1009 
1010     int64_t rankRest = dstType.getRank() - rankDiff;
1011     // Extract / insert the subvector of matching rank and InsertStridedSlice
1012     // on it.
1013     Value extracted =
1014         rewriter.create<ExtractOp>(loc, op.dest(),
1015                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
1016                                                   /*dropFront=*/rankRest));
1017     // A different pattern will kick in for InsertStridedSlice with matching
1018     // ranks.
1019     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
1020         loc, op.source(), extracted,
1021         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
1022         getI64SubArray(op.strides(), /*dropFront=*/0));
1023     rewriter.replaceOpWithNewOp<InsertOp>(
1024         op, stridedSliceInnerOp.getResult(), op.dest(),
1025         getI64SubArray(op.offsets(), /*dropFront=*/0,
1026                        /*dropFront=*/rankRest));
1027     return success();
1028   }
1029 };
1030 
1031 // RewritePattern for InsertStridedSliceOp where source and destination vectors
1032 // have the same rank. In this case, we reduce
1033 //   1. the proper subvector is extracted from the destination vector
1034 //   2. a new InsertStridedSlice op is created to insert the source in the
1035 //   destination subvector
1036 //   3. the destination subvector is inserted back in the proper place
1037 //   4. the op is replaced by the result of step 3.
1038 // The new InsertStridedSlice from step 2. will be picked up by a
1039 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
1040 class VectorInsertStridedSliceOpSameRankRewritePattern
1041     : public OpRewritePattern<InsertStridedSliceOp> {
1042 public:
1043   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
1044 
1045   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
1046                                 PatternRewriter &rewriter) const override {
1047     auto srcType = op.getSourceVectorType();
1048     auto dstType = op.getDestVectorType();
1049 
1050     if (op.offsets().getValue().empty())
1051       return failure();
1052 
1053     int64_t rankDiff = dstType.getRank() - srcType.getRank();
1054     assert(rankDiff >= 0);
1055     if (rankDiff != 0)
1056       return failure();
1057 
1058     if (srcType == dstType) {
1059       rewriter.replaceOp(op, op.source());
1060       return success();
1061     }
1062 
1063     int64_t offset =
1064         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1065     int64_t size = srcType.getShape().front();
1066     int64_t stride =
1067         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1068 
1069     auto loc = op.getLoc();
1070     Value res = op.dest();
1071     // For each slice of the source vector along the most major dimension.
1072     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1073          off += stride, ++idx) {
1074       // 1. extract the proper subvector (or element) from source
1075       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
1076       if (extractedSource.getType().isa<VectorType>()) {
1077         // 2. If we have a vector, extract the proper subvector from destination
1078         // Otherwise we are at the element level and no need to recurse.
1079         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
1080         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
1081         // smaller rank.
1082         extractedSource = rewriter.create<InsertStridedSliceOp>(
1083             loc, extractedSource, extractedDest,
1084             getI64SubArray(op.offsets(), /* dropFront=*/1),
1085             getI64SubArray(op.strides(), /* dropFront=*/1));
1086       }
1087       // 4. Insert the extractedSource into the res vector.
1088       res = insertOne(rewriter, loc, extractedSource, res, off);
1089     }
1090 
1091     rewriter.replaceOp(op, res);
1092     return success();
1093   }
1094   /// This pattern creates recursive InsertStridedSliceOp, but the recursion is
1095   /// bounded as the rank is strictly decreasing.
1096   bool hasBoundedRewriteRecursion() const final { return true; }
1097 };
1098 
1099 /// Returns true if the memory underlying `memRefType` has a contiguous layout.
1100 /// Strides are written to `strides`.
1101 static bool isContiguous(MemRefType memRefType,
1102                          SmallVectorImpl<int64_t> &strides) {
1103   int64_t offset;
1104   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
1105   bool isContiguous = (strides.back() == 1);
1106   if (isContiguous) {
1107     auto sizes = memRefType.getShape();
1108     for (int index = 0, e = strides.size() - 2; index < e; ++index) {
1109       if (strides[index] != strides[index + 1] * sizes[index + 1]) {
1110         isContiguous = false;
1111         break;
1112       }
1113     }
1114   }
1115   return succeeded(successStrides) && isContiguous;
1116 }
1117 
1118 class VectorTypeCastOpConversion : public ConvertToLLVMPattern {
1119 public:
1120   explicit VectorTypeCastOpConversion(MLIRContext *context,
1121                                       LLVMTypeConverter &typeConverter)
1122       : ConvertToLLVMPattern(vector::TypeCastOp::getOperationName(), context,
1123                              typeConverter) {}
1124 
1125   LogicalResult
1126   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
1127                   ConversionPatternRewriter &rewriter) const override {
1128     auto loc = op->getLoc();
1129     vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op);
1130     MemRefType sourceMemRefType =
1131         castOp.getOperand().getType().cast<MemRefType>();
1132     MemRefType targetMemRefType =
1133         castOp.getResult().getType().cast<MemRefType>();
1134 
1135     // Only static shape casts supported atm.
1136     if (!sourceMemRefType.hasStaticShape() ||
1137         !targetMemRefType.hasStaticShape())
1138       return failure();
1139 
1140     auto llvmSourceDescriptorTy =
1141         operands[0].getType().dyn_cast<LLVM::LLVMType>();
1142     if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy())
1143       return failure();
1144     MemRefDescriptor sourceMemRef(operands[0]);
1145 
1146     auto llvmTargetDescriptorTy = typeConverter.convertType(targetMemRefType)
1147                                       .dyn_cast_or_null<LLVM::LLVMType>();
1148     if (!llvmTargetDescriptorTy || !llvmTargetDescriptorTy.isStructTy())
1149       return failure();
1150 
1151     // Only contiguous source tensors supported atm.
1152     SmallVector<int64_t, 4> strides;
1153     if (!isContiguous(sourceMemRefType, strides))
1154       return failure();
1155 
1156     auto int64Ty = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
1157 
1158     // Create descriptor.
1159     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
1160     Type llvmTargetElementTy = desc.getElementType();
1161     // Set allocated ptr.
1162     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
1163     allocated =
1164         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
1165     desc.setAllocatedPtr(rewriter, loc, allocated);
1166     // Set aligned ptr.
1167     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
1168     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
1169     desc.setAlignedPtr(rewriter, loc, ptr);
1170     // Fill offset 0.
1171     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
1172     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
1173     desc.setOffset(rewriter, loc, zero);
1174 
1175     // Fill size and stride descriptors in memref.
1176     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
1177       int64_t index = indexedSize.index();
1178       auto sizeAttr =
1179           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
1180       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
1181       desc.setSize(rewriter, loc, index, size);
1182       auto strideAttr =
1183           rewriter.getIntegerAttr(rewriter.getIndexType(), strides[index]);
1184       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
1185       desc.setStride(rewriter, loc, index, stride);
1186     }
1187 
1188     rewriter.replaceOp(op, {desc});
1189     return success();
1190   }
1191 };
1192 
1193 /// Conversion pattern that converts a 1-D vector transfer read/write op in a
1194 /// sequence of:
1195 /// 1. Get the source/dst address as an LLVM vector pointer.
1196 /// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1197 /// 3. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1198 /// 4. Create a mask where offsetVector is compared against memref upper bound.
1199 /// 5. Rewrite op as a masked read or write.
1200 template <typename ConcreteOp>
1201 class VectorTransferConversion : public ConvertToLLVMPattern {
1202 public:
1203   explicit VectorTransferConversion(MLIRContext *context,
1204                                     LLVMTypeConverter &typeConv,
1205                                     bool enableIndexOpt)
1206       : ConvertToLLVMPattern(ConcreteOp::getOperationName(), context, typeConv),
1207         enableIndexOptimizations(enableIndexOpt) {}
1208 
1209   LogicalResult
1210   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
1211                   ConversionPatternRewriter &rewriter) const override {
1212     auto xferOp = cast<ConcreteOp>(op);
1213     auto adaptor = getTransferOpAdapter(xferOp, operands);
1214 
1215     if (xferOp.getVectorType().getRank() > 1 ||
1216         llvm::size(xferOp.indices()) == 0)
1217       return failure();
1218     if (xferOp.permutation_map() !=
1219         AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
1220                                        xferOp.getVectorType().getRank(),
1221                                        op->getContext()))
1222       return failure();
1223     // Only contiguous source tensors supported atm.
1224     SmallVector<int64_t, 4> strides;
1225     if (!isContiguous(xferOp.getMemRefType(), strides))
1226       return failure();
1227 
1228     auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
1229 
1230     Location loc = op->getLoc();
1231     MemRefType memRefType = xferOp.getMemRefType();
1232 
1233     if (auto memrefVectorElementType =
1234             memRefType.getElementType().dyn_cast<VectorType>()) {
1235       // Memref has vector element type.
1236       if (memrefVectorElementType.getElementType() !=
1237           xferOp.getVectorType().getElementType())
1238         return failure();
1239 #ifndef NDEBUG
1240       // Check that memref vector type is a suffix of 'vectorType.
1241       unsigned memrefVecEltRank = memrefVectorElementType.getRank();
1242       unsigned resultVecRank = xferOp.getVectorType().getRank();
1243       assert(memrefVecEltRank <= resultVecRank);
1244       // TODO: Move this to isSuffix in Vector/Utils.h.
1245       unsigned rankOffset = resultVecRank - memrefVecEltRank;
1246       auto memrefVecEltShape = memrefVectorElementType.getShape();
1247       auto resultVecShape = xferOp.getVectorType().getShape();
1248       for (unsigned i = 0; i < memrefVecEltRank; ++i)
1249         assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] &&
1250                "memref vector element shape should match suffix of vector "
1251                "result shape.");
1252 #endif // ifndef NDEBUG
1253     }
1254 
1255     // 1. Get the source/dst address as an LLVM vector pointer.
1256     //    The vector pointer would always be on address space 0, therefore
1257     //    addrspacecast shall be used when source/dst memrefs are not on
1258     //    address space 0.
1259     // TODO: support alignment when possible.
1260     Value dataPtr = getDataPtr(loc, memRefType, adaptor.memref(),
1261                                adaptor.indices(), rewriter);
1262     auto vecTy =
1263         toLLVMTy(xferOp.getVectorType()).template cast<LLVM::LLVMType>();
1264     Value vectorDataPtr;
1265     if (memRefType.getMemorySpace() == 0)
1266       vectorDataPtr =
1267           rewriter.create<LLVM::BitcastOp>(loc, vecTy.getPointerTo(), dataPtr);
1268     else
1269       vectorDataPtr = rewriter.create<LLVM::AddrSpaceCastOp>(
1270           loc, vecTy.getPointerTo(), dataPtr);
1271 
1272     if (!xferOp.isMaskedDim(0))
1273       return replaceTransferOpWithLoadOrStore(rewriter, typeConverter, loc,
1274                                               xferOp, operands, vectorDataPtr);
1275 
1276     // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1277     // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1278     // 4. Let dim the memref dimension, compute the vector comparison mask:
1279     //   [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
1280     //
1281     // TODO: when the leaf transfer rank is k > 1, we need the last `k`
1282     //       dimensions here.
1283     unsigned vecWidth = vecTy.getVectorNumElements();
1284     unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
1285     Value off = *(xferOp.indices().begin() + lastIndex);
1286     Value dim = rewriter.create<DimOp>(loc, xferOp.memref(), lastIndex);
1287     Value mask = buildVectorComparison(rewriter, op, enableIndexOptimizations,
1288                                        vecWidth, dim, &off);
1289 
1290     // 5. Rewrite as a masked read / write.
1291     return replaceTransferOpWithMasked(rewriter, typeConverter, loc, xferOp,
1292                                        operands, vectorDataPtr, mask);
1293   }
1294 
1295 private:
1296   const bool enableIndexOptimizations;
1297 };
1298 
1299 class VectorPrintOpConversion : public ConvertToLLVMPattern {
1300 public:
1301   explicit VectorPrintOpConversion(MLIRContext *context,
1302                                    LLVMTypeConverter &typeConverter)
1303       : ConvertToLLVMPattern(vector::PrintOp::getOperationName(), context,
1304                              typeConverter) {}
1305 
1306   // Proof-of-concept lowering implementation that relies on a small
1307   // runtime support library, which only needs to provide a few
1308   // printing methods (single value for all data types, opening/closing
1309   // bracket, comma, newline). The lowering fully unrolls a vector
1310   // in terms of these elementary printing operations. The advantage
1311   // of this approach is that the library can remain unaware of all
1312   // low-level implementation details of vectors while still supporting
1313   // output of any shaped and dimensioned vector. Due to full unrolling,
1314   // this approach is less suited for very large vectors though.
1315   //
1316   // TODO: rely solely on libc in future? something else?
1317   //
1318   LogicalResult
1319   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
1320                   ConversionPatternRewriter &rewriter) const override {
1321     auto printOp = cast<vector::PrintOp>(op);
1322     auto adaptor = vector::PrintOpAdaptor(operands);
1323     Type printType = printOp.getPrintType();
1324 
1325     if (typeConverter.convertType(printType) == nullptr)
1326       return failure();
1327 
1328     // Make sure element type has runtime support (currently just Float/Double).
1329     VectorType vectorType = printType.dyn_cast<VectorType>();
1330     Type eltType = vectorType ? vectorType.getElementType() : printType;
1331     int64_t rank = vectorType ? vectorType.getRank() : 0;
1332     Operation *printer;
1333     if (eltType.isSignlessInteger(1) || eltType.isSignlessInteger(32))
1334       printer = getPrintI32(op);
1335     else if (eltType.isSignlessInteger(64))
1336       printer = getPrintI64(op);
1337     else if (eltType.isF32())
1338       printer = getPrintFloat(op);
1339     else if (eltType.isF64())
1340       printer = getPrintDouble(op);
1341     else
1342       return failure();
1343 
1344     // Unroll vector into elementary print calls.
1345     emitRanks(rewriter, op, adaptor.source(), vectorType, printer, rank);
1346     emitCall(rewriter, op->getLoc(), getPrintNewline(op));
1347     rewriter.eraseOp(op);
1348     return success();
1349   }
1350 
1351 private:
1352   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1353                  Value value, VectorType vectorType, Operation *printer,
1354                  int64_t rank) const {
1355     Location loc = op->getLoc();
1356     if (rank == 0) {
1357       if (value.getType() == LLVM::LLVMType::getInt1Ty(rewriter.getContext())) {
1358         // Convert i1 (bool) to i32 so we can use the print_i32 method.
1359         // This avoids the need for a print_i1 method with an unclear ABI.
1360         auto i32Type = LLVM::LLVMType::getInt32Ty(rewriter.getContext());
1361         auto trueVal = rewriter.create<ConstantOp>(
1362             loc, i32Type, rewriter.getI32IntegerAttr(1));
1363         auto falseVal = rewriter.create<ConstantOp>(
1364             loc, i32Type, rewriter.getI32IntegerAttr(0));
1365         value = rewriter.create<SelectOp>(loc, value, trueVal, falseVal);
1366       }
1367       emitCall(rewriter, loc, printer, value);
1368       return;
1369     }
1370 
1371     emitCall(rewriter, loc, getPrintOpen(op));
1372     Operation *printComma = getPrintComma(op);
1373     int64_t dim = vectorType.getDimSize(0);
1374     for (int64_t d = 0; d < dim; ++d) {
1375       auto reducedType =
1376           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1377       auto llvmType = typeConverter.convertType(
1378           rank > 1 ? reducedType : vectorType.getElementType());
1379       Value nestedVal =
1380           extractOne(rewriter, typeConverter, loc, value, llvmType, rank, d);
1381       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1);
1382       if (d != dim - 1)
1383         emitCall(rewriter, loc, printComma);
1384     }
1385     emitCall(rewriter, loc, getPrintClose(op));
1386   }
1387 
1388   // Helper to emit a call.
1389   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1390                        Operation *ref, ValueRange params = ValueRange()) {
1391     rewriter.create<LLVM::CallOp>(loc, ArrayRef<Type>{},
1392                                   rewriter.getSymbolRefAttr(ref), params);
1393   }
1394 
1395   // Helper for printer method declaration (first hit) and lookup.
1396   static Operation *getPrint(Operation *op, StringRef name,
1397                              ArrayRef<LLVM::LLVMType> params) {
1398     auto module = op->getParentOfType<ModuleOp>();
1399     auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name);
1400     if (func)
1401       return func;
1402     OpBuilder moduleBuilder(module.getBodyRegion());
1403     return moduleBuilder.create<LLVM::LLVMFuncOp>(
1404         op->getLoc(), name,
1405         LLVM::LLVMType::getFunctionTy(
1406             LLVM::LLVMType::getVoidTy(op->getContext()), params,
1407             /*isVarArg=*/false));
1408   }
1409 
1410   // Helpers for method names.
1411   Operation *getPrintI32(Operation *op) const {
1412     return getPrint(op, "print_i32",
1413                     LLVM::LLVMType::getInt32Ty(op->getContext()));
1414   }
1415   Operation *getPrintI64(Operation *op) const {
1416     return getPrint(op, "print_i64",
1417                     LLVM::LLVMType::getInt64Ty(op->getContext()));
1418   }
1419   Operation *getPrintFloat(Operation *op) const {
1420     return getPrint(op, "print_f32",
1421                     LLVM::LLVMType::getFloatTy(op->getContext()));
1422   }
1423   Operation *getPrintDouble(Operation *op) const {
1424     return getPrint(op, "print_f64",
1425                     LLVM::LLVMType::getDoubleTy(op->getContext()));
1426   }
1427   Operation *getPrintOpen(Operation *op) const {
1428     return getPrint(op, "print_open", {});
1429   }
1430   Operation *getPrintClose(Operation *op) const {
1431     return getPrint(op, "print_close", {});
1432   }
1433   Operation *getPrintComma(Operation *op) const {
1434     return getPrint(op, "print_comma", {});
1435   }
1436   Operation *getPrintNewline(Operation *op) const {
1437     return getPrint(op, "print_newline", {});
1438   }
1439 };
1440 
1441 /// Progressive lowering of ExtractStridedSliceOp to either:
1442 ///   1. express single offset extract as a direct shuffle.
1443 ///   2. extract + lower rank strided_slice + insert for the n-D case.
1444 class VectorExtractStridedSliceOpConversion
1445     : public OpRewritePattern<ExtractStridedSliceOp> {
1446 public:
1447   using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern;
1448 
1449   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1450                                 PatternRewriter &rewriter) const override {
1451     auto dstType = op.getResult().getType().cast<VectorType>();
1452 
1453     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1454 
1455     int64_t offset =
1456         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1457     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1458     int64_t stride =
1459         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1460 
1461     auto loc = op.getLoc();
1462     auto elemType = dstType.getElementType();
1463     assert(elemType.isSignlessIntOrIndexOrFloat());
1464 
1465     // Single offset can be more efficiently shuffled.
1466     if (op.offsets().getValue().size() == 1) {
1467       SmallVector<int64_t, 4> offsets;
1468       offsets.reserve(size);
1469       for (int64_t off = offset, e = offset + size * stride; off < e;
1470            off += stride)
1471         offsets.push_back(off);
1472       rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(),
1473                                              op.vector(),
1474                                              rewriter.getI64ArrayAttr(offsets));
1475       return success();
1476     }
1477 
1478     // Extract/insert on a lower ranked extract strided slice op.
1479     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1480                                              rewriter.getZeroAttr(elemType));
1481     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1482     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1483          off += stride, ++idx) {
1484       Value one = extractOne(rewriter, loc, op.vector(), off);
1485       Value extracted = rewriter.create<ExtractStridedSliceOp>(
1486           loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1),
1487           getI64SubArray(op.sizes(), /* dropFront=*/1),
1488           getI64SubArray(op.strides(), /* dropFront=*/1));
1489       res = insertOne(rewriter, loc, extracted, res, idx);
1490     }
1491     rewriter.replaceOp(op, res);
1492     return success();
1493   }
1494   /// This pattern creates recursive ExtractStridedSliceOp, but the recursion is
1495   /// bounded as the rank is strictly decreasing.
1496   bool hasBoundedRewriteRecursion() const final { return true; }
1497 };
1498 
1499 } // namespace
1500 
1501 /// Populate the given list with patterns that convert from Vector to LLVM.
1502 void mlir::populateVectorToLLVMConversionPatterns(
1503     LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
1504     bool reassociateFPReductions, bool enableIndexOptimizations) {
1505   MLIRContext *ctx = converter.getDialect()->getContext();
1506   // clang-format off
1507   patterns.insert<VectorFMAOpNDRewritePattern,
1508                   VectorInsertStridedSliceOpDifferentRankRewritePattern,
1509                   VectorInsertStridedSliceOpSameRankRewritePattern,
1510                   VectorExtractStridedSliceOpConversion>(ctx);
1511   patterns.insert<VectorReductionOpConversion>(
1512       ctx, converter, reassociateFPReductions);
1513   patterns.insert<VectorCreateMaskOpConversion,
1514                   VectorTransferConversion<TransferReadOp>,
1515                   VectorTransferConversion<TransferWriteOp>>(
1516       ctx, converter, enableIndexOptimizations);
1517   patterns
1518       .insert<VectorShuffleOpConversion,
1519               VectorExtractElementOpConversion,
1520               VectorExtractOpConversion,
1521               VectorFMAOp1DConversion,
1522               VectorInsertElementOpConversion,
1523               VectorInsertOpConversion,
1524               VectorPrintOpConversion,
1525               VectorTypeCastOpConversion,
1526               VectorMaskedLoadOpConversion,
1527               VectorMaskedStoreOpConversion,
1528               VectorGatherOpConversion,
1529               VectorScatterOpConversion,
1530               VectorExpandLoadOpConversion,
1531               VectorCompressStoreOpConversion>(ctx, converter);
1532   // clang-format on
1533 }
1534 
1535 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1536     LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1537   MLIRContext *ctx = converter.getDialect()->getContext();
1538   patterns.insert<VectorMatmulOpConversion>(ctx, converter);
1539   patterns.insert<VectorFlatTransposeOpConversion>(ctx, converter);
1540 }
1541 
1542 namespace {
1543 struct LowerVectorToLLVMPass
1544     : public ConvertVectorToLLVMBase<LowerVectorToLLVMPass> {
1545   LowerVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
1546     this->reassociateFPReductions = options.reassociateFPReductions;
1547     this->enableIndexOptimizations = options.enableIndexOptimizations;
1548   }
1549   void runOnOperation() override;
1550 };
1551 } // namespace
1552 
1553 void LowerVectorToLLVMPass::runOnOperation() {
1554   // Perform progressive lowering of operations on slices and
1555   // all contraction operations. Also applies folding and DCE.
1556   {
1557     OwningRewritePatternList patterns;
1558     populateVectorToVectorCanonicalizationPatterns(patterns, &getContext());
1559     populateVectorSlicesLoweringPatterns(patterns, &getContext());
1560     populateVectorContractLoweringPatterns(patterns, &getContext());
1561     applyPatternsAndFoldGreedily(getOperation(), patterns);
1562   }
1563 
1564   // Convert to the LLVM IR dialect.
1565   LLVMTypeConverter converter(&getContext());
1566   OwningRewritePatternList patterns;
1567   populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
1568   populateVectorToLLVMConversionPatterns(
1569       converter, patterns, reassociateFPReductions, enableIndexOptimizations);
1570   populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
1571   populateStdToLLVMConversionPatterns(converter, patterns);
1572 
1573   LLVMConversionTarget target(getContext());
1574   if (failed(applyPartialConversion(getOperation(), target, patterns)))
1575     signalPassFailure();
1576 }
1577 
1578 std::unique_ptr<OperationPass<ModuleOp>>
1579 mlir::createConvertVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
1580   return std::make_unique<LowerVectorToLLVMPass>(options);
1581 }
1582