xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision b99bd771626fbbf8b9b29ce312d4151968796826)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "../PassDetail.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
13 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
15 #include "mlir/Dialect/StandardOps/IR/Ops.h"
16 #include "mlir/Dialect/Vector/VectorOps.h"
17 #include "mlir/IR/AffineMap.h"
18 #include "mlir/IR/Attributes.h"
19 #include "mlir/IR/Builders.h"
20 #include "mlir/IR/MLIRContext.h"
21 #include "mlir/IR/Module.h"
22 #include "mlir/IR/Operation.h"
23 #include "mlir/IR/PatternMatch.h"
24 #include "mlir/IR/StandardTypes.h"
25 #include "mlir/IR/Types.h"
26 #include "mlir/Target/LLVMIR/TypeTranslation.h"
27 #include "mlir/Transforms/DialectConversion.h"
28 #include "mlir/Transforms/Passes.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Allocator.h"
33 #include "llvm/Support/ErrorHandling.h"
34 
35 using namespace mlir;
36 using namespace mlir::vector;
37 
38 // Helper to reduce vector type by one rank at front.
39 static VectorType reducedVectorTypeFront(VectorType tp) {
40   assert((tp.getRank() > 1) && "unlowerable vector type");
41   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
42 }
43 
44 // Helper to reduce vector type by *all* but one rank at back.
45 static VectorType reducedVectorTypeBack(VectorType tp) {
46   assert((tp.getRank() > 1) && "unlowerable vector type");
47   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
48 }
49 
50 // Helper that picks the proper sequence for inserting.
51 static Value insertOne(ConversionPatternRewriter &rewriter,
52                        LLVMTypeConverter &typeConverter, Location loc,
53                        Value val1, Value val2, Type llvmType, int64_t rank,
54                        int64_t pos) {
55   if (rank == 1) {
56     auto idxType = rewriter.getIndexType();
57     auto constant = rewriter.create<LLVM::ConstantOp>(
58         loc, typeConverter.convertType(idxType),
59         rewriter.getIntegerAttr(idxType, pos));
60     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
61                                                   constant);
62   }
63   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
64                                               rewriter.getI64ArrayAttr(pos));
65 }
66 
67 // Helper that picks the proper sequence for inserting.
68 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
69                        Value into, int64_t offset) {
70   auto vectorType = into.getType().cast<VectorType>();
71   if (vectorType.getRank() > 1)
72     return rewriter.create<InsertOp>(loc, from, into, offset);
73   return rewriter.create<vector::InsertElementOp>(
74       loc, vectorType, from, into,
75       rewriter.create<ConstantIndexOp>(loc, offset));
76 }
77 
78 // Helper that picks the proper sequence for extracting.
79 static Value extractOne(ConversionPatternRewriter &rewriter,
80                         LLVMTypeConverter &typeConverter, Location loc,
81                         Value val, Type llvmType, int64_t rank, int64_t pos) {
82   if (rank == 1) {
83     auto idxType = rewriter.getIndexType();
84     auto constant = rewriter.create<LLVM::ConstantOp>(
85         loc, typeConverter.convertType(idxType),
86         rewriter.getIntegerAttr(idxType, pos));
87     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
88                                                    constant);
89   }
90   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
91                                                rewriter.getI64ArrayAttr(pos));
92 }
93 
94 // Helper that picks the proper sequence for extracting.
95 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
96                         int64_t offset) {
97   auto vectorType = vector.getType().cast<VectorType>();
98   if (vectorType.getRank() > 1)
99     return rewriter.create<ExtractOp>(loc, vector, offset);
100   return rewriter.create<vector::ExtractElementOp>(
101       loc, vectorType.getElementType(), vector,
102       rewriter.create<ConstantIndexOp>(loc, offset));
103 }
104 
105 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
106 // TODO: Better support for attribute subtype forwarding + slicing.
107 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
108                                               unsigned dropFront = 0,
109                                               unsigned dropBack = 0) {
110   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
111   auto range = arrayAttr.getAsRange<IntegerAttr>();
112   SmallVector<int64_t, 4> res;
113   res.reserve(arrayAttr.size() - dropFront - dropBack);
114   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
115        it != eit; ++it)
116     res.push_back((*it).getValue().getSExtValue());
117   return res;
118 }
119 
120 // Helper that returns a vector comparison that constructs a mask:
121 //     mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b]
122 //
123 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative,
124 //       much more compact, IR for this operation, but LLVM eventually
125 //       generates more elaborate instructions for this intrinsic since it
126 //       is very conservative on the boundary conditions.
127 static Value buildVectorComparison(ConversionPatternRewriter &rewriter,
128                                    Operation *op, bool enableIndexOptimizations,
129                                    int64_t dim, Value b, Value *off = nullptr) {
130   auto loc = op->getLoc();
131   // If we can assume all indices fit in 32-bit, we perform the vector
132   // comparison in 32-bit to get a higher degree of SIMD parallelism.
133   // Otherwise we perform the vector comparison using 64-bit indices.
134   Value indices;
135   Type idxType;
136   if (enableIndexOptimizations) {
137     indices = rewriter.create<ConstantOp>(
138         loc, rewriter.getI32VectorAttr(
139                  llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))));
140     idxType = rewriter.getI32Type();
141   } else {
142     indices = rewriter.create<ConstantOp>(
143         loc, rewriter.getI64VectorAttr(
144                  llvm::to_vector<4>(llvm::seq<int64_t>(0, dim))));
145     idxType = rewriter.getI64Type();
146   }
147   // Add in an offset if requested.
148   if (off) {
149     Value o = rewriter.create<IndexCastOp>(loc, idxType, *off);
150     Value ov = rewriter.create<SplatOp>(loc, indices.getType(), o);
151     indices = rewriter.create<AddIOp>(loc, ov, indices);
152   }
153   // Construct the vector comparison.
154   Value bound = rewriter.create<IndexCastOp>(loc, idxType, b);
155   Value bounds = rewriter.create<SplatOp>(loc, indices.getType(), bound);
156   return rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, indices, bounds);
157 }
158 
159 // Helper that returns data layout alignment of an operation with memref.
160 template <typename T>
161 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, T op,
162                                  unsigned &align) {
163   Type elementTy =
164       typeConverter.convertType(op.getMemRefType().getElementType());
165   if (!elementTy)
166     return failure();
167 
168   // TODO: this should use the MLIR data layout when it becomes available and
169   // stop depending on translation.
170   llvm::LLVMContext llvmContext;
171   align = LLVM::TypeToLLVMIRTranslator(llvmContext)
172               .getPreferredAlignment(elementTy.cast<LLVM::LLVMType>(),
173                                      typeConverter.getDataLayout());
174   return success();
175 }
176 
177 // Helper that returns the base address of a memref.
178 static LogicalResult getBase(ConversionPatternRewriter &rewriter, Location loc,
179                              Value memref, MemRefType memRefType, Value &base) {
180   // Inspect stride and offset structure.
181   //
182   // TODO: flat memory only for now, generalize
183   //
184   int64_t offset;
185   SmallVector<int64_t, 4> strides;
186   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
187   if (failed(successStrides) || strides.size() != 1 || strides[0] != 1 ||
188       offset != 0 || memRefType.getMemorySpace() != 0)
189     return failure();
190   base = MemRefDescriptor(memref).alignedPtr(rewriter, loc);
191   return success();
192 }
193 
194 // Helper that returns a pointer given a memref base.
195 static LogicalResult getBasePtr(ConversionPatternRewriter &rewriter,
196                                 Location loc, Value memref,
197                                 MemRefType memRefType, Value &ptr) {
198   Value base;
199   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
200     return failure();
201   auto pType = MemRefDescriptor(memref).getElementPtrType();
202   ptr = rewriter.create<LLVM::GEPOp>(loc, pType, base);
203   return success();
204 }
205 
206 // Helper that returns a bit-casted pointer given a memref base.
207 static LogicalResult getBasePtr(ConversionPatternRewriter &rewriter,
208                                 Location loc, Value memref,
209                                 MemRefType memRefType, Type type, Value &ptr) {
210   Value base;
211   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
212     return failure();
213   auto pType = type.template cast<LLVM::LLVMType>().getPointerTo();
214   base = rewriter.create<LLVM::BitcastOp>(loc, pType, base);
215   ptr = rewriter.create<LLVM::GEPOp>(loc, pType, base);
216   return success();
217 }
218 
219 // Helper that returns vector of pointers given a memref base and an index
220 // vector.
221 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
222                                     Location loc, Value memref, Value indices,
223                                     MemRefType memRefType, VectorType vType,
224                                     Type iType, Value &ptrs) {
225   Value base;
226   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
227     return failure();
228   auto pType = MemRefDescriptor(memref).getElementPtrType();
229   auto ptrsType = LLVM::LLVMType::getVectorTy(pType, vType.getDimSize(0));
230   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, indices);
231   return success();
232 }
233 
234 static LogicalResult
235 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
236                                  LLVMTypeConverter &typeConverter, Location loc,
237                                  TransferReadOp xferOp,
238                                  ArrayRef<Value> operands, Value dataPtr) {
239   unsigned align;
240   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
241     return failure();
242   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
243   return success();
244 }
245 
246 static LogicalResult
247 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
248                             LLVMTypeConverter &typeConverter, Location loc,
249                             TransferReadOp xferOp, ArrayRef<Value> operands,
250                             Value dataPtr, Value mask) {
251   auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
252   VectorType fillType = xferOp.getVectorType();
253   Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding());
254   fill = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(fillType), fill);
255 
256   Type vecTy = typeConverter.convertType(xferOp.getVectorType());
257   if (!vecTy)
258     return failure();
259 
260   unsigned align;
261   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
262     return failure();
263 
264   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
265       xferOp, vecTy, dataPtr, mask, ValueRange{fill},
266       rewriter.getI32IntegerAttr(align));
267   return success();
268 }
269 
270 static LogicalResult
271 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
272                                  LLVMTypeConverter &typeConverter, Location loc,
273                                  TransferWriteOp xferOp,
274                                  ArrayRef<Value> operands, Value dataPtr) {
275   unsigned align;
276   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
277     return failure();
278   auto adaptor = TransferWriteOpAdaptor(operands);
279   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
280                                              align);
281   return success();
282 }
283 
284 static LogicalResult
285 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
286                             LLVMTypeConverter &typeConverter, Location loc,
287                             TransferWriteOp xferOp, ArrayRef<Value> operands,
288                             Value dataPtr, Value mask) {
289   unsigned align;
290   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
291     return failure();
292 
293   auto adaptor = TransferWriteOpAdaptor(operands);
294   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
295       xferOp, adaptor.vector(), dataPtr, mask,
296       rewriter.getI32IntegerAttr(align));
297   return success();
298 }
299 
300 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
301                                                   ArrayRef<Value> operands) {
302   return TransferReadOpAdaptor(operands);
303 }
304 
305 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
306                                                    ArrayRef<Value> operands) {
307   return TransferWriteOpAdaptor(operands);
308 }
309 
310 namespace {
311 
312 /// Conversion pattern for a vector.matrix_multiply.
313 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
314 class VectorMatmulOpConversion : public ConvertToLLVMPattern {
315 public:
316   explicit VectorMatmulOpConversion(MLIRContext *context,
317                                     LLVMTypeConverter &typeConverter)
318       : ConvertToLLVMPattern(vector::MatmulOp::getOperationName(), context,
319                              typeConverter) {}
320 
321   LogicalResult
322   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
323                   ConversionPatternRewriter &rewriter) const override {
324     auto matmulOp = cast<vector::MatmulOp>(op);
325     auto adaptor = vector::MatmulOpAdaptor(operands);
326     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
327         op, typeConverter.convertType(matmulOp.res().getType()), adaptor.lhs(),
328         adaptor.rhs(), matmulOp.lhs_rows(), matmulOp.lhs_columns(),
329         matmulOp.rhs_columns());
330     return success();
331   }
332 };
333 
334 /// Conversion pattern for a vector.flat_transpose.
335 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
336 class VectorFlatTransposeOpConversion : public ConvertToLLVMPattern {
337 public:
338   explicit VectorFlatTransposeOpConversion(MLIRContext *context,
339                                            LLVMTypeConverter &typeConverter)
340       : ConvertToLLVMPattern(vector::FlatTransposeOp::getOperationName(),
341                              context, typeConverter) {}
342 
343   LogicalResult
344   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
345                   ConversionPatternRewriter &rewriter) const override {
346     auto transOp = cast<vector::FlatTransposeOp>(op);
347     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
348     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
349         transOp, typeConverter.convertType(transOp.res().getType()),
350         adaptor.matrix(), transOp.rows(), transOp.columns());
351     return success();
352   }
353 };
354 
355 /// Conversion pattern for a vector.maskedload.
356 class VectorMaskedLoadOpConversion : public ConvertToLLVMPattern {
357 public:
358   explicit VectorMaskedLoadOpConversion(MLIRContext *context,
359                                         LLVMTypeConverter &typeConverter)
360       : ConvertToLLVMPattern(vector::MaskedLoadOp::getOperationName(), context,
361                              typeConverter) {}
362 
363   LogicalResult
364   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
365                   ConversionPatternRewriter &rewriter) const override {
366     auto loc = op->getLoc();
367     auto load = cast<vector::MaskedLoadOp>(op);
368     auto adaptor = vector::MaskedLoadOpAdaptor(operands);
369 
370     // Resolve alignment.
371     unsigned align;
372     if (failed(getMemRefAlignment(typeConverter, load, align)))
373       return failure();
374 
375     auto vtype = typeConverter.convertType(load.getResultVectorType());
376     Value ptr;
377     if (failed(getBasePtr(rewriter, loc, adaptor.base(), load.getMemRefType(),
378                           vtype, ptr)))
379       return failure();
380 
381     rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
382         load, vtype, ptr, adaptor.mask(), adaptor.pass_thru(),
383         rewriter.getI32IntegerAttr(align));
384     return success();
385   }
386 };
387 
388 /// Conversion pattern for a vector.maskedstore.
389 class VectorMaskedStoreOpConversion : public ConvertToLLVMPattern {
390 public:
391   explicit VectorMaskedStoreOpConversion(MLIRContext *context,
392                                          LLVMTypeConverter &typeConverter)
393       : ConvertToLLVMPattern(vector::MaskedStoreOp::getOperationName(), context,
394                              typeConverter) {}
395 
396   LogicalResult
397   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
398                   ConversionPatternRewriter &rewriter) const override {
399     auto loc = op->getLoc();
400     auto store = cast<vector::MaskedStoreOp>(op);
401     auto adaptor = vector::MaskedStoreOpAdaptor(operands);
402 
403     // Resolve alignment.
404     unsigned align;
405     if (failed(getMemRefAlignment(typeConverter, store, align)))
406       return failure();
407 
408     auto vtype = typeConverter.convertType(store.getValueVectorType());
409     Value ptr;
410     if (failed(getBasePtr(rewriter, loc, adaptor.base(), store.getMemRefType(),
411                           vtype, ptr)))
412       return failure();
413 
414     rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
415         store, adaptor.value(), ptr, adaptor.mask(),
416         rewriter.getI32IntegerAttr(align));
417     return success();
418   }
419 };
420 
421 /// Conversion pattern for a vector.gather.
422 class VectorGatherOpConversion : public ConvertToLLVMPattern {
423 public:
424   explicit VectorGatherOpConversion(MLIRContext *context,
425                                     LLVMTypeConverter &typeConverter)
426       : ConvertToLLVMPattern(vector::GatherOp::getOperationName(), context,
427                              typeConverter) {}
428 
429   LogicalResult
430   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
431                   ConversionPatternRewriter &rewriter) const override {
432     auto loc = op->getLoc();
433     auto gather = cast<vector::GatherOp>(op);
434     auto adaptor = vector::GatherOpAdaptor(operands);
435 
436     // Resolve alignment.
437     unsigned align;
438     if (failed(getMemRefAlignment(typeConverter, gather, align)))
439       return failure();
440 
441     // Get index ptrs.
442     VectorType vType = gather.getResultVectorType();
443     Type iType = gather.getIndicesVectorType().getElementType();
444     Value ptrs;
445     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
446                               gather.getMemRefType(), vType, iType, ptrs)))
447       return failure();
448 
449     // Replace with the gather intrinsic.
450     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
451         gather, typeConverter.convertType(vType), ptrs, adaptor.mask(),
452         adaptor.pass_thru(), rewriter.getI32IntegerAttr(align));
453     return success();
454   }
455 };
456 
457 /// Conversion pattern for a vector.scatter.
458 class VectorScatterOpConversion : public ConvertToLLVMPattern {
459 public:
460   explicit VectorScatterOpConversion(MLIRContext *context,
461                                      LLVMTypeConverter &typeConverter)
462       : ConvertToLLVMPattern(vector::ScatterOp::getOperationName(), context,
463                              typeConverter) {}
464 
465   LogicalResult
466   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
467                   ConversionPatternRewriter &rewriter) const override {
468     auto loc = op->getLoc();
469     auto scatter = cast<vector::ScatterOp>(op);
470     auto adaptor = vector::ScatterOpAdaptor(operands);
471 
472     // Resolve alignment.
473     unsigned align;
474     if (failed(getMemRefAlignment(typeConverter, scatter, align)))
475       return failure();
476 
477     // Get index ptrs.
478     VectorType vType = scatter.getValueVectorType();
479     Type iType = scatter.getIndicesVectorType().getElementType();
480     Value ptrs;
481     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
482                               scatter.getMemRefType(), vType, iType, ptrs)))
483       return failure();
484 
485     // Replace with the scatter intrinsic.
486     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
487         scatter, adaptor.value(), ptrs, adaptor.mask(),
488         rewriter.getI32IntegerAttr(align));
489     return success();
490   }
491 };
492 
493 /// Conversion pattern for a vector.expandload.
494 class VectorExpandLoadOpConversion : public ConvertToLLVMPattern {
495 public:
496   explicit VectorExpandLoadOpConversion(MLIRContext *context,
497                                         LLVMTypeConverter &typeConverter)
498       : ConvertToLLVMPattern(vector::ExpandLoadOp::getOperationName(), context,
499                              typeConverter) {}
500 
501   LogicalResult
502   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
503                   ConversionPatternRewriter &rewriter) const override {
504     auto loc = op->getLoc();
505     auto expand = cast<vector::ExpandLoadOp>(op);
506     auto adaptor = vector::ExpandLoadOpAdaptor(operands);
507 
508     Value ptr;
509     if (failed(getBasePtr(rewriter, loc, adaptor.base(), expand.getMemRefType(),
510                           ptr)))
511       return failure();
512 
513     auto vType = expand.getResultVectorType();
514     rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
515         op, typeConverter.convertType(vType), ptr, adaptor.mask(),
516         adaptor.pass_thru());
517     return success();
518   }
519 };
520 
521 /// Conversion pattern for a vector.compressstore.
522 class VectorCompressStoreOpConversion : public ConvertToLLVMPattern {
523 public:
524   explicit VectorCompressStoreOpConversion(MLIRContext *context,
525                                            LLVMTypeConverter &typeConverter)
526       : ConvertToLLVMPattern(vector::CompressStoreOp::getOperationName(),
527                              context, typeConverter) {}
528 
529   LogicalResult
530   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
531                   ConversionPatternRewriter &rewriter) const override {
532     auto loc = op->getLoc();
533     auto compress = cast<vector::CompressStoreOp>(op);
534     auto adaptor = vector::CompressStoreOpAdaptor(operands);
535 
536     Value ptr;
537     if (failed(getBasePtr(rewriter, loc, adaptor.base(),
538                           compress.getMemRefType(), ptr)))
539       return failure();
540 
541     rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
542         op, adaptor.value(), ptr, adaptor.mask());
543     return success();
544   }
545 };
546 
547 /// Conversion pattern for all vector reductions.
548 class VectorReductionOpConversion : public ConvertToLLVMPattern {
549 public:
550   explicit VectorReductionOpConversion(MLIRContext *context,
551                                        LLVMTypeConverter &typeConverter,
552                                        bool reassociateFPRed)
553       : ConvertToLLVMPattern(vector::ReductionOp::getOperationName(), context,
554                              typeConverter),
555         reassociateFPReductions(reassociateFPRed) {}
556 
557   LogicalResult
558   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
559                   ConversionPatternRewriter &rewriter) const override {
560     auto reductionOp = cast<vector::ReductionOp>(op);
561     auto kind = reductionOp.kind();
562     Type eltType = reductionOp.dest().getType();
563     Type llvmType = typeConverter.convertType(eltType);
564     if (eltType.isIntOrIndex()) {
565       // Integer reductions: add/mul/min/max/and/or/xor.
566       if (kind == "add")
567         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(
568             op, llvmType, operands[0]);
569       else if (kind == "mul")
570         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(
571             op, llvmType, operands[0]);
572       else if (kind == "min" &&
573                (eltType.isIndex() || eltType.isUnsignedInteger()))
574         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>(
575             op, llvmType, operands[0]);
576       else if (kind == "min")
577         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>(
578             op, llvmType, operands[0]);
579       else if (kind == "max" &&
580                (eltType.isIndex() || eltType.isUnsignedInteger()))
581         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>(
582             op, llvmType, operands[0]);
583       else if (kind == "max")
584         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>(
585             op, llvmType, operands[0]);
586       else if (kind == "and")
587         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(
588             op, llvmType, operands[0]);
589       else if (kind == "or")
590         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(
591             op, llvmType, operands[0]);
592       else if (kind == "xor")
593         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(
594             op, llvmType, operands[0]);
595       else
596         return failure();
597       return success();
598 
599     } else if (eltType.isa<FloatType>()) {
600       // Floating-point reductions: add/mul/min/max
601       if (kind == "add") {
602         // Optional accumulator (or zero).
603         Value acc = operands.size() > 1 ? operands[1]
604                                         : rewriter.create<LLVM::ConstantOp>(
605                                               op->getLoc(), llvmType,
606                                               rewriter.getZeroAttr(eltType));
607         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>(
608             op, llvmType, acc, operands[0],
609             rewriter.getBoolAttr(reassociateFPReductions));
610       } else if (kind == "mul") {
611         // Optional accumulator (or one).
612         Value acc = operands.size() > 1
613                         ? operands[1]
614                         : rewriter.create<LLVM::ConstantOp>(
615                               op->getLoc(), llvmType,
616                               rewriter.getFloatAttr(eltType, 1.0));
617         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>(
618             op, llvmType, acc, operands[0],
619             rewriter.getBoolAttr(reassociateFPReductions));
620       } else if (kind == "min")
621         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(
622             op, llvmType, operands[0]);
623       else if (kind == "max")
624         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(
625             op, llvmType, operands[0]);
626       else
627         return failure();
628       return success();
629     }
630     return failure();
631   }
632 
633 private:
634   const bool reassociateFPReductions;
635 };
636 
637 /// Conversion pattern for a vector.create_mask (1-D only).
638 class VectorCreateMaskOpConversion : public ConvertToLLVMPattern {
639 public:
640   explicit VectorCreateMaskOpConversion(MLIRContext *context,
641                                         LLVMTypeConverter &typeConverter,
642                                         bool enableIndexOpt)
643       : ConvertToLLVMPattern(vector::CreateMaskOp::getOperationName(), context,
644                              typeConverter),
645         enableIndexOptimizations(enableIndexOpt) {}
646 
647   LogicalResult
648   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
649                   ConversionPatternRewriter &rewriter) const override {
650     auto dstType = op->getResult(0).getType().cast<VectorType>();
651     int64_t rank = dstType.getRank();
652     if (rank == 1) {
653       rewriter.replaceOp(
654           op, buildVectorComparison(rewriter, op, enableIndexOptimizations,
655                                     dstType.getDimSize(0), operands[0]));
656       return success();
657     }
658     return failure();
659   }
660 
661 private:
662   const bool enableIndexOptimizations;
663 };
664 
665 class VectorShuffleOpConversion : public ConvertToLLVMPattern {
666 public:
667   explicit VectorShuffleOpConversion(MLIRContext *context,
668                                      LLVMTypeConverter &typeConverter)
669       : ConvertToLLVMPattern(vector::ShuffleOp::getOperationName(), context,
670                              typeConverter) {}
671 
672   LogicalResult
673   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
674                   ConversionPatternRewriter &rewriter) const override {
675     auto loc = op->getLoc();
676     auto adaptor = vector::ShuffleOpAdaptor(operands);
677     auto shuffleOp = cast<vector::ShuffleOp>(op);
678     auto v1Type = shuffleOp.getV1VectorType();
679     auto v2Type = shuffleOp.getV2VectorType();
680     auto vectorType = shuffleOp.getVectorType();
681     Type llvmType = typeConverter.convertType(vectorType);
682     auto maskArrayAttr = shuffleOp.mask();
683 
684     // Bail if result type cannot be lowered.
685     if (!llvmType)
686       return failure();
687 
688     // Get rank and dimension sizes.
689     int64_t rank = vectorType.getRank();
690     assert(v1Type.getRank() == rank);
691     assert(v2Type.getRank() == rank);
692     int64_t v1Dim = v1Type.getDimSize(0);
693 
694     // For rank 1, where both operands have *exactly* the same vector type,
695     // there is direct shuffle support in LLVM. Use it!
696     if (rank == 1 && v1Type == v2Type) {
697       Value shuffle = rewriter.create<LLVM::ShuffleVectorOp>(
698           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
699       rewriter.replaceOp(op, shuffle);
700       return success();
701     }
702 
703     // For all other cases, insert the individual values individually.
704     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
705     int64_t insPos = 0;
706     for (auto en : llvm::enumerate(maskArrayAttr)) {
707       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
708       Value value = adaptor.v1();
709       if (extPos >= v1Dim) {
710         extPos -= v1Dim;
711         value = adaptor.v2();
712       }
713       Value extract = extractOne(rewriter, typeConverter, loc, value, llvmType,
714                                  rank, extPos);
715       insert = insertOne(rewriter, typeConverter, loc, insert, extract,
716                          llvmType, rank, insPos++);
717     }
718     rewriter.replaceOp(op, insert);
719     return success();
720   }
721 };
722 
723 class VectorExtractElementOpConversion : public ConvertToLLVMPattern {
724 public:
725   explicit VectorExtractElementOpConversion(MLIRContext *context,
726                                             LLVMTypeConverter &typeConverter)
727       : ConvertToLLVMPattern(vector::ExtractElementOp::getOperationName(),
728                              context, typeConverter) {}
729 
730   LogicalResult
731   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
732                   ConversionPatternRewriter &rewriter) const override {
733     auto adaptor = vector::ExtractElementOpAdaptor(operands);
734     auto extractEltOp = cast<vector::ExtractElementOp>(op);
735     auto vectorType = extractEltOp.getVectorType();
736     auto llvmType = typeConverter.convertType(vectorType.getElementType());
737 
738     // Bail if result type cannot be lowered.
739     if (!llvmType)
740       return failure();
741 
742     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
743         op, llvmType, adaptor.vector(), adaptor.position());
744     return success();
745   }
746 };
747 
748 class VectorExtractOpConversion : public ConvertToLLVMPattern {
749 public:
750   explicit VectorExtractOpConversion(MLIRContext *context,
751                                      LLVMTypeConverter &typeConverter)
752       : ConvertToLLVMPattern(vector::ExtractOp::getOperationName(), context,
753                              typeConverter) {}
754 
755   LogicalResult
756   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
757                   ConversionPatternRewriter &rewriter) const override {
758     auto loc = op->getLoc();
759     auto adaptor = vector::ExtractOpAdaptor(operands);
760     auto extractOp = cast<vector::ExtractOp>(op);
761     auto vectorType = extractOp.getVectorType();
762     auto resultType = extractOp.getResult().getType();
763     auto llvmResultType = typeConverter.convertType(resultType);
764     auto positionArrayAttr = extractOp.position();
765 
766     // Bail if result type cannot be lowered.
767     if (!llvmResultType)
768       return failure();
769 
770     // One-shot extraction of vector from array (only requires extractvalue).
771     if (resultType.isa<VectorType>()) {
772       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
773           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
774       rewriter.replaceOp(op, extracted);
775       return success();
776     }
777 
778     // Potential extraction of 1-D vector from array.
779     auto *context = op->getContext();
780     Value extracted = adaptor.vector();
781     auto positionAttrs = positionArrayAttr.getValue();
782     if (positionAttrs.size() > 1) {
783       auto oneDVectorType = reducedVectorTypeBack(vectorType);
784       auto nMinusOnePositionAttrs =
785           ArrayAttr::get(positionAttrs.drop_back(), context);
786       extracted = rewriter.create<LLVM::ExtractValueOp>(
787           loc, typeConverter.convertType(oneDVectorType), extracted,
788           nMinusOnePositionAttrs);
789     }
790 
791     // Remaining extraction of element from 1-D LLVM vector
792     auto position = positionAttrs.back().cast<IntegerAttr>();
793     auto i64Type = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
794     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
795     extracted =
796         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
797     rewriter.replaceOp(op, extracted);
798 
799     return success();
800   }
801 };
802 
803 /// Conversion pattern that turns a vector.fma on a 1-D vector
804 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
805 /// This does not match vectors of n >= 2 rank.
806 ///
807 /// Example:
808 /// ```
809 ///  vector.fma %a, %a, %a : vector<8xf32>
810 /// ```
811 /// is converted to:
812 /// ```
813 ///  llvm.intr.fmuladd %va, %va, %va:
814 ///    (!llvm<"<8 x float>">, !llvm<"<8 x float>">, !llvm<"<8 x float>">)
815 ///    -> !llvm<"<8 x float>">
816 /// ```
817 class VectorFMAOp1DConversion : public ConvertToLLVMPattern {
818 public:
819   explicit VectorFMAOp1DConversion(MLIRContext *context,
820                                    LLVMTypeConverter &typeConverter)
821       : ConvertToLLVMPattern(vector::FMAOp::getOperationName(), context,
822                              typeConverter) {}
823 
824   LogicalResult
825   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
826                   ConversionPatternRewriter &rewriter) const override {
827     auto adaptor = vector::FMAOpAdaptor(operands);
828     vector::FMAOp fmaOp = cast<vector::FMAOp>(op);
829     VectorType vType = fmaOp.getVectorType();
830     if (vType.getRank() != 1)
831       return failure();
832     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(op, adaptor.lhs(),
833                                                  adaptor.rhs(), adaptor.acc());
834     return success();
835   }
836 };
837 
838 class VectorInsertElementOpConversion : public ConvertToLLVMPattern {
839 public:
840   explicit VectorInsertElementOpConversion(MLIRContext *context,
841                                            LLVMTypeConverter &typeConverter)
842       : ConvertToLLVMPattern(vector::InsertElementOp::getOperationName(),
843                              context, typeConverter) {}
844 
845   LogicalResult
846   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
847                   ConversionPatternRewriter &rewriter) const override {
848     auto adaptor = vector::InsertElementOpAdaptor(operands);
849     auto insertEltOp = cast<vector::InsertElementOp>(op);
850     auto vectorType = insertEltOp.getDestVectorType();
851     auto llvmType = typeConverter.convertType(vectorType);
852 
853     // Bail if result type cannot be lowered.
854     if (!llvmType)
855       return failure();
856 
857     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
858         op, llvmType, adaptor.dest(), adaptor.source(), adaptor.position());
859     return success();
860   }
861 };
862 
863 class VectorInsertOpConversion : public ConvertToLLVMPattern {
864 public:
865   explicit VectorInsertOpConversion(MLIRContext *context,
866                                     LLVMTypeConverter &typeConverter)
867       : ConvertToLLVMPattern(vector::InsertOp::getOperationName(), context,
868                              typeConverter) {}
869 
870   LogicalResult
871   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
872                   ConversionPatternRewriter &rewriter) const override {
873     auto loc = op->getLoc();
874     auto adaptor = vector::InsertOpAdaptor(operands);
875     auto insertOp = cast<vector::InsertOp>(op);
876     auto sourceType = insertOp.getSourceType();
877     auto destVectorType = insertOp.getDestVectorType();
878     auto llvmResultType = typeConverter.convertType(destVectorType);
879     auto positionArrayAttr = insertOp.position();
880 
881     // Bail if result type cannot be lowered.
882     if (!llvmResultType)
883       return failure();
884 
885     // One-shot insertion of a vector into an array (only requires insertvalue).
886     if (sourceType.isa<VectorType>()) {
887       Value inserted = rewriter.create<LLVM::InsertValueOp>(
888           loc, llvmResultType, adaptor.dest(), adaptor.source(),
889           positionArrayAttr);
890       rewriter.replaceOp(op, inserted);
891       return success();
892     }
893 
894     // Potential extraction of 1-D vector from array.
895     auto *context = op->getContext();
896     Value extracted = adaptor.dest();
897     auto positionAttrs = positionArrayAttr.getValue();
898     auto position = positionAttrs.back().cast<IntegerAttr>();
899     auto oneDVectorType = destVectorType;
900     if (positionAttrs.size() > 1) {
901       oneDVectorType = reducedVectorTypeBack(destVectorType);
902       auto nMinusOnePositionAttrs =
903           ArrayAttr::get(positionAttrs.drop_back(), context);
904       extracted = rewriter.create<LLVM::ExtractValueOp>(
905           loc, typeConverter.convertType(oneDVectorType), extracted,
906           nMinusOnePositionAttrs);
907     }
908 
909     // Insertion of an element into a 1-D LLVM vector.
910     auto i64Type = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
911     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
912     Value inserted = rewriter.create<LLVM::InsertElementOp>(
913         loc, typeConverter.convertType(oneDVectorType), extracted,
914         adaptor.source(), constant);
915 
916     // Potential insertion of resulting 1-D vector into array.
917     if (positionAttrs.size() > 1) {
918       auto nMinusOnePositionAttrs =
919           ArrayAttr::get(positionAttrs.drop_back(), context);
920       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
921                                                       adaptor.dest(), inserted,
922                                                       nMinusOnePositionAttrs);
923     }
924 
925     rewriter.replaceOp(op, inserted);
926     return success();
927   }
928 };
929 
930 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
931 ///
932 /// Example:
933 /// ```
934 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
935 /// ```
936 /// is rewritten into:
937 /// ```
938 ///  %r = splat %f0: vector<2x4xf32>
939 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
940 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
941 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
942 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
943 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
944 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
945 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
946 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
947 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
948 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
949 ///  // %r3 holds the final value.
950 /// ```
951 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
952 public:
953   using OpRewritePattern<FMAOp>::OpRewritePattern;
954 
955   LogicalResult matchAndRewrite(FMAOp op,
956                                 PatternRewriter &rewriter) const override {
957     auto vType = op.getVectorType();
958     if (vType.getRank() < 2)
959       return failure();
960 
961     auto loc = op.getLoc();
962     auto elemType = vType.getElementType();
963     Value zero = rewriter.create<ConstantOp>(loc, elemType,
964                                              rewriter.getZeroAttr(elemType));
965     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
966     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
967       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
968       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
969       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
970       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
971       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
972     }
973     rewriter.replaceOp(op, desc);
974     return success();
975   }
976 };
977 
978 // When ranks are different, InsertStridedSlice needs to extract a properly
979 // ranked vector from the destination vector into which to insert. This pattern
980 // only takes care of this part and forwards the rest of the conversion to
981 // another pattern that converts InsertStridedSlice for operands of the same
982 // rank.
983 //
984 // RewritePattern for InsertStridedSliceOp where source and destination vectors
985 // have different ranks. In this case:
986 //   1. the proper subvector is extracted from the destination vector
987 //   2. a new InsertStridedSlice op is created to insert the source in the
988 //   destination subvector
989 //   3. the destination subvector is inserted back in the proper place
990 //   4. the op is replaced by the result of step 3.
991 // The new InsertStridedSlice from step 2. will be picked up by a
992 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
993 class VectorInsertStridedSliceOpDifferentRankRewritePattern
994     : public OpRewritePattern<InsertStridedSliceOp> {
995 public:
996   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
997 
998   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
999                                 PatternRewriter &rewriter) const override {
1000     auto srcType = op.getSourceVectorType();
1001     auto dstType = op.getDestVectorType();
1002 
1003     if (op.offsets().getValue().empty())
1004       return failure();
1005 
1006     auto loc = op.getLoc();
1007     int64_t rankDiff = dstType.getRank() - srcType.getRank();
1008     assert(rankDiff >= 0);
1009     if (rankDiff == 0)
1010       return failure();
1011 
1012     int64_t rankRest = dstType.getRank() - rankDiff;
1013     // Extract / insert the subvector of matching rank and InsertStridedSlice
1014     // on it.
1015     Value extracted =
1016         rewriter.create<ExtractOp>(loc, op.dest(),
1017                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
1018                                                   /*dropFront=*/rankRest));
1019     // A different pattern will kick in for InsertStridedSlice with matching
1020     // ranks.
1021     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
1022         loc, op.source(), extracted,
1023         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
1024         getI64SubArray(op.strides(), /*dropFront=*/0));
1025     rewriter.replaceOpWithNewOp<InsertOp>(
1026         op, stridedSliceInnerOp.getResult(), op.dest(),
1027         getI64SubArray(op.offsets(), /*dropFront=*/0,
1028                        /*dropFront=*/rankRest));
1029     return success();
1030   }
1031 };
1032 
1033 // RewritePattern for InsertStridedSliceOp where source and destination vectors
1034 // have the same rank. In this case, we reduce
1035 //   1. the proper subvector is extracted from the destination vector
1036 //   2. a new InsertStridedSlice op is created to insert the source in the
1037 //   destination subvector
1038 //   3. the destination subvector is inserted back in the proper place
1039 //   4. the op is replaced by the result of step 3.
1040 // The new InsertStridedSlice from step 2. will be picked up by a
1041 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
1042 class VectorInsertStridedSliceOpSameRankRewritePattern
1043     : public OpRewritePattern<InsertStridedSliceOp> {
1044 public:
1045   VectorInsertStridedSliceOpSameRankRewritePattern(MLIRContext *ctx)
1046       : OpRewritePattern<InsertStridedSliceOp>(ctx) {
1047     // This pattern creates recursive InsertStridedSliceOp, but the recursion is
1048     // bounded as the rank is strictly decreasing.
1049     setHasBoundedRewriteRecursion();
1050   }
1051 
1052   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
1053                                 PatternRewriter &rewriter) const override {
1054     auto srcType = op.getSourceVectorType();
1055     auto dstType = op.getDestVectorType();
1056 
1057     if (op.offsets().getValue().empty())
1058       return failure();
1059 
1060     int64_t rankDiff = dstType.getRank() - srcType.getRank();
1061     assert(rankDiff >= 0);
1062     if (rankDiff != 0)
1063       return failure();
1064 
1065     if (srcType == dstType) {
1066       rewriter.replaceOp(op, op.source());
1067       return success();
1068     }
1069 
1070     int64_t offset =
1071         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1072     int64_t size = srcType.getShape().front();
1073     int64_t stride =
1074         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1075 
1076     auto loc = op.getLoc();
1077     Value res = op.dest();
1078     // For each slice of the source vector along the most major dimension.
1079     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1080          off += stride, ++idx) {
1081       // 1. extract the proper subvector (or element) from source
1082       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
1083       if (extractedSource.getType().isa<VectorType>()) {
1084         // 2. If we have a vector, extract the proper subvector from destination
1085         // Otherwise we are at the element level and no need to recurse.
1086         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
1087         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
1088         // smaller rank.
1089         extractedSource = rewriter.create<InsertStridedSliceOp>(
1090             loc, extractedSource, extractedDest,
1091             getI64SubArray(op.offsets(), /* dropFront=*/1),
1092             getI64SubArray(op.strides(), /* dropFront=*/1));
1093       }
1094       // 4. Insert the extractedSource into the res vector.
1095       res = insertOne(rewriter, loc, extractedSource, res, off);
1096     }
1097 
1098     rewriter.replaceOp(op, res);
1099     return success();
1100   }
1101 };
1102 
1103 /// Returns the strides if the memory underlying `memRefType` has a contiguous
1104 /// static layout.
1105 static llvm::Optional<SmallVector<int64_t, 4>>
1106 computeContiguousStrides(MemRefType memRefType) {
1107   int64_t offset;
1108   SmallVector<int64_t, 4> strides;
1109   if (failed(getStridesAndOffset(memRefType, strides, offset)))
1110     return None;
1111   if (!strides.empty() && strides.back() != 1)
1112     return None;
1113   // If no layout or identity layout, this is contiguous by definition.
1114   if (memRefType.getAffineMaps().empty() ||
1115       memRefType.getAffineMaps().front().isIdentity())
1116     return strides;
1117 
1118   // Otherwise, we must determine contiguity form shapes. This can only ever
1119   // work in static cases because MemRefType is underspecified to represent
1120   // contiguous dynamic shapes in other ways than with just empty/identity
1121   // layout.
1122   auto sizes = memRefType.getShape();
1123   for (int index = 0, e = strides.size() - 2; index < e; ++index) {
1124     if (ShapedType::isDynamic(sizes[index + 1]) ||
1125         ShapedType::isDynamicStrideOrOffset(strides[index]) ||
1126         ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
1127       return None;
1128     if (strides[index] != strides[index + 1] * sizes[index + 1])
1129       return None;
1130   }
1131   return strides;
1132 }
1133 
1134 class VectorTypeCastOpConversion : public ConvertToLLVMPattern {
1135 public:
1136   explicit VectorTypeCastOpConversion(MLIRContext *context,
1137                                       LLVMTypeConverter &typeConverter)
1138       : ConvertToLLVMPattern(vector::TypeCastOp::getOperationName(), context,
1139                              typeConverter) {}
1140 
1141   LogicalResult
1142   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
1143                   ConversionPatternRewriter &rewriter) const override {
1144     auto loc = op->getLoc();
1145     vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op);
1146     MemRefType sourceMemRefType =
1147         castOp.getOperand().getType().cast<MemRefType>();
1148     MemRefType targetMemRefType =
1149         castOp.getResult().getType().cast<MemRefType>();
1150 
1151     // Only static shape casts supported atm.
1152     if (!sourceMemRefType.hasStaticShape() ||
1153         !targetMemRefType.hasStaticShape())
1154       return failure();
1155 
1156     auto llvmSourceDescriptorTy =
1157         operands[0].getType().dyn_cast<LLVM::LLVMType>();
1158     if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy())
1159       return failure();
1160     MemRefDescriptor sourceMemRef(operands[0]);
1161 
1162     auto llvmTargetDescriptorTy = typeConverter.convertType(targetMemRefType)
1163                                       .dyn_cast_or_null<LLVM::LLVMType>();
1164     if (!llvmTargetDescriptorTy || !llvmTargetDescriptorTy.isStructTy())
1165       return failure();
1166 
1167     // Only contiguous source buffers supported atm.
1168     auto sourceStrides = computeContiguousStrides(sourceMemRefType);
1169     if (!sourceStrides)
1170       return failure();
1171     auto targetStrides = computeContiguousStrides(targetMemRefType);
1172     if (!targetStrides)
1173       return failure();
1174     // Only support static strides for now, regardless of contiguity.
1175     if (llvm::any_of(*targetStrides, [](int64_t stride) {
1176           return ShapedType::isDynamicStrideOrOffset(stride);
1177         }))
1178       return failure();
1179 
1180     auto int64Ty = LLVM::LLVMType::getInt64Ty(rewriter.getContext());
1181 
1182     // Create descriptor.
1183     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
1184     Type llvmTargetElementTy = desc.getElementPtrType();
1185     // Set allocated ptr.
1186     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
1187     allocated =
1188         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
1189     desc.setAllocatedPtr(rewriter, loc, allocated);
1190     // Set aligned ptr.
1191     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
1192     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
1193     desc.setAlignedPtr(rewriter, loc, ptr);
1194     // Fill offset 0.
1195     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
1196     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
1197     desc.setOffset(rewriter, loc, zero);
1198 
1199     // Fill size and stride descriptors in memref.
1200     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
1201       int64_t index = indexedSize.index();
1202       auto sizeAttr =
1203           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
1204       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
1205       desc.setSize(rewriter, loc, index, size);
1206       auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(),
1207                                                 (*targetStrides)[index]);
1208       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
1209       desc.setStride(rewriter, loc, index, stride);
1210     }
1211 
1212     rewriter.replaceOp(op, {desc});
1213     return success();
1214   }
1215 };
1216 
1217 /// Conversion pattern that converts a 1-D vector transfer read/write op in a
1218 /// sequence of:
1219 /// 1. Get the source/dst address as an LLVM vector pointer.
1220 /// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1221 /// 3. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1222 /// 4. Create a mask where offsetVector is compared against memref upper bound.
1223 /// 5. Rewrite op as a masked read or write.
1224 template <typename ConcreteOp>
1225 class VectorTransferConversion : public ConvertToLLVMPattern {
1226 public:
1227   explicit VectorTransferConversion(MLIRContext *context,
1228                                     LLVMTypeConverter &typeConv,
1229                                     bool enableIndexOpt)
1230       : ConvertToLLVMPattern(ConcreteOp::getOperationName(), context, typeConv),
1231         enableIndexOptimizations(enableIndexOpt) {}
1232 
1233   LogicalResult
1234   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
1235                   ConversionPatternRewriter &rewriter) const override {
1236     auto xferOp = cast<ConcreteOp>(op);
1237     auto adaptor = getTransferOpAdapter(xferOp, operands);
1238 
1239     if (xferOp.getVectorType().getRank() > 1 ||
1240         llvm::size(xferOp.indices()) == 0)
1241       return failure();
1242     if (xferOp.permutation_map() !=
1243         AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
1244                                        xferOp.getVectorType().getRank(),
1245                                        op->getContext()))
1246       return failure();
1247     // Only contiguous source tensors supported atm.
1248     auto strides = computeContiguousStrides(xferOp.getMemRefType());
1249     if (!strides)
1250       return failure();
1251 
1252     auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
1253 
1254     Location loc = op->getLoc();
1255     MemRefType memRefType = xferOp.getMemRefType();
1256 
1257     if (auto memrefVectorElementType =
1258             memRefType.getElementType().dyn_cast<VectorType>()) {
1259       // Memref has vector element type.
1260       if (memrefVectorElementType.getElementType() !=
1261           xferOp.getVectorType().getElementType())
1262         return failure();
1263 #ifndef NDEBUG
1264       // Check that memref vector type is a suffix of 'vectorType.
1265       unsigned memrefVecEltRank = memrefVectorElementType.getRank();
1266       unsigned resultVecRank = xferOp.getVectorType().getRank();
1267       assert(memrefVecEltRank <= resultVecRank);
1268       // TODO: Move this to isSuffix in Vector/Utils.h.
1269       unsigned rankOffset = resultVecRank - memrefVecEltRank;
1270       auto memrefVecEltShape = memrefVectorElementType.getShape();
1271       auto resultVecShape = xferOp.getVectorType().getShape();
1272       for (unsigned i = 0; i < memrefVecEltRank; ++i)
1273         assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] &&
1274                "memref vector element shape should match suffix of vector "
1275                "result shape.");
1276 #endif // ifndef NDEBUG
1277     }
1278 
1279     // 1. Get the source/dst address as an LLVM vector pointer.
1280     //    The vector pointer would always be on address space 0, therefore
1281     //    addrspacecast shall be used when source/dst memrefs are not on
1282     //    address space 0.
1283     // TODO: support alignment when possible.
1284     Value dataPtr = getDataPtr(loc, memRefType, adaptor.memref(),
1285                                adaptor.indices(), rewriter);
1286     auto vecTy =
1287         toLLVMTy(xferOp.getVectorType()).template cast<LLVM::LLVMType>();
1288     Value vectorDataPtr;
1289     if (memRefType.getMemorySpace() == 0)
1290       vectorDataPtr =
1291           rewriter.create<LLVM::BitcastOp>(loc, vecTy.getPointerTo(), dataPtr);
1292     else
1293       vectorDataPtr = rewriter.create<LLVM::AddrSpaceCastOp>(
1294           loc, vecTy.getPointerTo(), dataPtr);
1295 
1296     if (!xferOp.isMaskedDim(0))
1297       return replaceTransferOpWithLoadOrStore(rewriter, typeConverter, loc,
1298                                               xferOp, operands, vectorDataPtr);
1299 
1300     // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1301     // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1302     // 4. Let dim the memref dimension, compute the vector comparison mask:
1303     //   [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
1304     //
1305     // TODO: when the leaf transfer rank is k > 1, we need the last `k`
1306     //       dimensions here.
1307     unsigned vecWidth = vecTy.getVectorNumElements();
1308     unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
1309     Value off = xferOp.indices()[lastIndex];
1310     Value dim = rewriter.create<DimOp>(loc, xferOp.memref(), lastIndex);
1311     Value mask = buildVectorComparison(rewriter, op, enableIndexOptimizations,
1312                                        vecWidth, dim, &off);
1313 
1314     // 5. Rewrite as a masked read / write.
1315     return replaceTransferOpWithMasked(rewriter, typeConverter, loc, xferOp,
1316                                        operands, vectorDataPtr, mask);
1317   }
1318 
1319 private:
1320   const bool enableIndexOptimizations;
1321 };
1322 
1323 class VectorPrintOpConversion : public ConvertToLLVMPattern {
1324 public:
1325   explicit VectorPrintOpConversion(MLIRContext *context,
1326                                    LLVMTypeConverter &typeConverter)
1327       : ConvertToLLVMPattern(vector::PrintOp::getOperationName(), context,
1328                              typeConverter) {}
1329 
1330   // Proof-of-concept lowering implementation that relies on a small
1331   // runtime support library, which only needs to provide a few
1332   // printing methods (single value for all data types, opening/closing
1333   // bracket, comma, newline). The lowering fully unrolls a vector
1334   // in terms of these elementary printing operations. The advantage
1335   // of this approach is that the library can remain unaware of all
1336   // low-level implementation details of vectors while still supporting
1337   // output of any shaped and dimensioned vector. Due to full unrolling,
1338   // this approach is less suited for very large vectors though.
1339   //
1340   // TODO: rely solely on libc in future? something else?
1341   //
1342   LogicalResult
1343   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
1344                   ConversionPatternRewriter &rewriter) const override {
1345     auto printOp = cast<vector::PrintOp>(op);
1346     auto adaptor = vector::PrintOpAdaptor(operands);
1347     Type printType = printOp.getPrintType();
1348 
1349     if (typeConverter.convertType(printType) == nullptr)
1350       return failure();
1351 
1352     // Make sure element type has runtime support.
1353     PrintConversion conversion = PrintConversion::None;
1354     VectorType vectorType = printType.dyn_cast<VectorType>();
1355     Type eltType = vectorType ? vectorType.getElementType() : printType;
1356     Operation *printer;
1357     if (eltType.isF32()) {
1358       printer = getPrintFloat(op);
1359     } else if (eltType.isF64()) {
1360       printer = getPrintDouble(op);
1361     } else if (eltType.isIndex()) {
1362       printer = getPrintU64(op);
1363     } else if (auto intTy = eltType.dyn_cast<IntegerType>()) {
1364       // Integers need a zero or sign extension on the operand
1365       // (depending on the source type) as well as a signed or
1366       // unsigned print method. Up to 64-bit is supported.
1367       unsigned width = intTy.getWidth();
1368       if (intTy.isUnsigned()) {
1369         if (width <= 64) {
1370           if (width < 64)
1371             conversion = PrintConversion::ZeroExt64;
1372           printer = getPrintU64(op);
1373         } else {
1374           return failure();
1375         }
1376       } else {
1377         assert(intTy.isSignless() || intTy.isSigned());
1378         if (width <= 64) {
1379           // Note that we *always* zero extend booleans (1-bit integers),
1380           // so that true/false is printed as 1/0 rather than -1/0.
1381           if (width == 1)
1382             conversion = PrintConversion::ZeroExt64;
1383           else if (width < 64)
1384             conversion = PrintConversion::SignExt64;
1385           printer = getPrintI64(op);
1386         } else {
1387           return failure();
1388         }
1389       }
1390     } else {
1391       return failure();
1392     }
1393 
1394     // Unroll vector into elementary print calls.
1395     int64_t rank = vectorType ? vectorType.getRank() : 0;
1396     emitRanks(rewriter, op, adaptor.source(), vectorType, printer, rank,
1397               conversion);
1398     emitCall(rewriter, op->getLoc(), getPrintNewline(op));
1399     rewriter.eraseOp(op);
1400     return success();
1401   }
1402 
1403 private:
1404   enum class PrintConversion {
1405     // clang-format off
1406     None,
1407     ZeroExt64,
1408     SignExt64
1409     // clang-format on
1410   };
1411 
1412   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1413                  Value value, VectorType vectorType, Operation *printer,
1414                  int64_t rank, PrintConversion conversion) const {
1415     Location loc = op->getLoc();
1416     if (rank == 0) {
1417       switch (conversion) {
1418       case PrintConversion::ZeroExt64:
1419         value = rewriter.create<ZeroExtendIOp>(
1420             loc, value, LLVM::LLVMType::getInt64Ty(rewriter.getContext()));
1421         break;
1422       case PrintConversion::SignExt64:
1423         value = rewriter.create<SignExtendIOp>(
1424             loc, value, LLVM::LLVMType::getInt64Ty(rewriter.getContext()));
1425         break;
1426       case PrintConversion::None:
1427         break;
1428       }
1429       emitCall(rewriter, loc, printer, value);
1430       return;
1431     }
1432 
1433     emitCall(rewriter, loc, getPrintOpen(op));
1434     Operation *printComma = getPrintComma(op);
1435     int64_t dim = vectorType.getDimSize(0);
1436     for (int64_t d = 0; d < dim; ++d) {
1437       auto reducedType =
1438           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1439       auto llvmType = typeConverter.convertType(
1440           rank > 1 ? reducedType : vectorType.getElementType());
1441       Value nestedVal =
1442           extractOne(rewriter, typeConverter, loc, value, llvmType, rank, d);
1443       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1,
1444                 conversion);
1445       if (d != dim - 1)
1446         emitCall(rewriter, loc, printComma);
1447     }
1448     emitCall(rewriter, loc, getPrintClose(op));
1449   }
1450 
1451   // Helper to emit a call.
1452   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1453                        Operation *ref, ValueRange params = ValueRange()) {
1454     rewriter.create<LLVM::CallOp>(loc, TypeRange(),
1455                                   rewriter.getSymbolRefAttr(ref), params);
1456   }
1457 
1458   // Helper for printer method declaration (first hit) and lookup.
1459   static Operation *getPrint(Operation *op, StringRef name,
1460                              ArrayRef<LLVM::LLVMType> params) {
1461     auto module = op->getParentOfType<ModuleOp>();
1462     auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name);
1463     if (func)
1464       return func;
1465     OpBuilder moduleBuilder(module.getBodyRegion());
1466     return moduleBuilder.create<LLVM::LLVMFuncOp>(
1467         op->getLoc(), name,
1468         LLVM::LLVMType::getFunctionTy(
1469             LLVM::LLVMType::getVoidTy(op->getContext()), params,
1470             /*isVarArg=*/false));
1471   }
1472 
1473   // Helpers for method names.
1474   Operation *getPrintI64(Operation *op) const {
1475     return getPrint(op, "printI64",
1476                     LLVM::LLVMType::getInt64Ty(op->getContext()));
1477   }
1478   Operation *getPrintU64(Operation *op) const {
1479     return getPrint(op, "printU64",
1480                     LLVM::LLVMType::getInt64Ty(op->getContext()));
1481   }
1482   Operation *getPrintFloat(Operation *op) const {
1483     return getPrint(op, "printF32",
1484                     LLVM::LLVMType::getFloatTy(op->getContext()));
1485   }
1486   Operation *getPrintDouble(Operation *op) const {
1487     return getPrint(op, "printF64",
1488                     LLVM::LLVMType::getDoubleTy(op->getContext()));
1489   }
1490   Operation *getPrintOpen(Operation *op) const {
1491     return getPrint(op, "printOpen", {});
1492   }
1493   Operation *getPrintClose(Operation *op) const {
1494     return getPrint(op, "printClose", {});
1495   }
1496   Operation *getPrintComma(Operation *op) const {
1497     return getPrint(op, "printComma", {});
1498   }
1499   Operation *getPrintNewline(Operation *op) const {
1500     return getPrint(op, "printNewline", {});
1501   }
1502 };
1503 
1504 /// Progressive lowering of ExtractStridedSliceOp to either:
1505 ///   1. express single offset extract as a direct shuffle.
1506 ///   2. extract + lower rank strided_slice + insert for the n-D case.
1507 class VectorExtractStridedSliceOpConversion
1508     : public OpRewritePattern<ExtractStridedSliceOp> {
1509 public:
1510   VectorExtractStridedSliceOpConversion(MLIRContext *ctx)
1511       : OpRewritePattern<ExtractStridedSliceOp>(ctx) {
1512     // This pattern creates recursive ExtractStridedSliceOp, but the recursion
1513     // is bounded as the rank is strictly decreasing.
1514     setHasBoundedRewriteRecursion();
1515   }
1516 
1517   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1518                                 PatternRewriter &rewriter) const override {
1519     auto dstType = op.getResult().getType().cast<VectorType>();
1520 
1521     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1522 
1523     int64_t offset =
1524         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1525     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1526     int64_t stride =
1527         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1528 
1529     auto loc = op.getLoc();
1530     auto elemType = dstType.getElementType();
1531     assert(elemType.isSignlessIntOrIndexOrFloat());
1532 
1533     // Single offset can be more efficiently shuffled.
1534     if (op.offsets().getValue().size() == 1) {
1535       SmallVector<int64_t, 4> offsets;
1536       offsets.reserve(size);
1537       for (int64_t off = offset, e = offset + size * stride; off < e;
1538            off += stride)
1539         offsets.push_back(off);
1540       rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(),
1541                                              op.vector(),
1542                                              rewriter.getI64ArrayAttr(offsets));
1543       return success();
1544     }
1545 
1546     // Extract/insert on a lower ranked extract strided slice op.
1547     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1548                                              rewriter.getZeroAttr(elemType));
1549     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1550     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1551          off += stride, ++idx) {
1552       Value one = extractOne(rewriter, loc, op.vector(), off);
1553       Value extracted = rewriter.create<ExtractStridedSliceOp>(
1554           loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1),
1555           getI64SubArray(op.sizes(), /* dropFront=*/1),
1556           getI64SubArray(op.strides(), /* dropFront=*/1));
1557       res = insertOne(rewriter, loc, extracted, res, idx);
1558     }
1559     rewriter.replaceOp(op, res);
1560     return success();
1561   }
1562 };
1563 
1564 } // namespace
1565 
1566 /// Populate the given list with patterns that convert from Vector to LLVM.
1567 void mlir::populateVectorToLLVMConversionPatterns(
1568     LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
1569     bool reassociateFPReductions, bool enableIndexOptimizations) {
1570   MLIRContext *ctx = converter.getDialect()->getContext();
1571   // clang-format off
1572   patterns.insert<VectorFMAOpNDRewritePattern,
1573                   VectorInsertStridedSliceOpDifferentRankRewritePattern,
1574                   VectorInsertStridedSliceOpSameRankRewritePattern,
1575                   VectorExtractStridedSliceOpConversion>(ctx);
1576   patterns.insert<VectorReductionOpConversion>(
1577       ctx, converter, reassociateFPReductions);
1578   patterns.insert<VectorCreateMaskOpConversion,
1579                   VectorTransferConversion<TransferReadOp>,
1580                   VectorTransferConversion<TransferWriteOp>>(
1581       ctx, converter, enableIndexOptimizations);
1582   patterns
1583       .insert<VectorShuffleOpConversion,
1584               VectorExtractElementOpConversion,
1585               VectorExtractOpConversion,
1586               VectorFMAOp1DConversion,
1587               VectorInsertElementOpConversion,
1588               VectorInsertOpConversion,
1589               VectorPrintOpConversion,
1590               VectorTypeCastOpConversion,
1591               VectorMaskedLoadOpConversion,
1592               VectorMaskedStoreOpConversion,
1593               VectorGatherOpConversion,
1594               VectorScatterOpConversion,
1595               VectorExpandLoadOpConversion,
1596               VectorCompressStoreOpConversion>(ctx, converter);
1597   // clang-format on
1598 }
1599 
1600 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1601     LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1602   MLIRContext *ctx = converter.getDialect()->getContext();
1603   patterns.insert<VectorMatmulOpConversion>(ctx, converter);
1604   patterns.insert<VectorFlatTransposeOpConversion>(ctx, converter);
1605 }
1606 
1607 namespace {
1608 struct LowerVectorToLLVMPass
1609     : public ConvertVectorToLLVMBase<LowerVectorToLLVMPass> {
1610   LowerVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
1611     this->reassociateFPReductions = options.reassociateFPReductions;
1612     this->enableIndexOptimizations = options.enableIndexOptimizations;
1613   }
1614   void runOnOperation() override;
1615 };
1616 } // namespace
1617 
1618 void LowerVectorToLLVMPass::runOnOperation() {
1619   // Perform progressive lowering of operations on slices and
1620   // all contraction operations. Also applies folding and DCE.
1621   {
1622     OwningRewritePatternList patterns;
1623     populateVectorToVectorCanonicalizationPatterns(patterns, &getContext());
1624     populateVectorSlicesLoweringPatterns(patterns, &getContext());
1625     populateVectorContractLoweringPatterns(patterns, &getContext());
1626     applyPatternsAndFoldGreedily(getOperation(), patterns);
1627   }
1628 
1629   // Convert to the LLVM IR dialect.
1630   LLVMTypeConverter converter(&getContext());
1631   OwningRewritePatternList patterns;
1632   populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
1633   populateVectorToLLVMConversionPatterns(
1634       converter, patterns, reassociateFPReductions, enableIndexOptimizations);
1635   populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
1636   populateStdToLLVMConversionPatterns(converter, patterns);
1637 
1638   LLVMConversionTarget target(getContext());
1639   if (failed(applyPartialConversion(getOperation(), target, patterns)))
1640     signalPassFailure();
1641 }
1642 
1643 std::unique_ptr<OperationPass<ModuleOp>>
1644 mlir::createConvertVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
1645   return std::make_unique<LowerVectorToLLVMPass>(options);
1646 }
1647